Compare commits

...

27 Commits

Author SHA1 Message Date
Aiden McClelland
08c672c024 passthrough feature 2026-03-04 16:29:02 -07:00
Aiden McClelland
2fd87298bf fix build 2026-03-04 01:09:12 -07:00
Aiden McClelland
ee7f77b5db chore: todos and formatting 2026-03-04 00:55:00 -07:00
Matt Hill
cdf30196ca fix link 2026-03-03 20:07:56 -07:00
Matt Hill
e999d89bbc multiple bugs and better port forward ux 2026-03-03 19:04:20 -07:00
Aiden McClelland
16a2fe4e08 new checkPort types 2026-03-03 13:07:12 -07:00
Aiden McClelland
6778f37307 sdk version bump 2026-03-03 11:49:01 -07:00
Aiden McClelland
b51bfb8d59 fix: preserve z namespace types for sdk consumers 2026-03-03 11:42:02 -07:00
Aiden McClelland
0e15a6e7ed create manage-release script (untested) 2026-03-02 18:04:00 -07:00
Aiden McClelland
f004c46977 misc bugfixes 2026-03-02 18:02:20 -07:00
Aiden McClelland
011a3f9d9f chore: split out nvidia variant 2026-03-02 16:04:53 -07:00
Matt Hill
b1c533d670 diable actions when in error state 2026-02-26 18:30:43 -07:00
Matt Hill
d0ac073651 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-26 14:53:15 -07:00
Matt Hill
6c86146e94 update snake 2026-02-26 14:53:08 -07:00
Aiden McClelland
e74f8db887 fix: add --no-nvram to efi grub-install to preserve built-in boot order 2026-02-26 14:48:33 -07:00
Aiden McClelland
d422cd3c66 chore: bump sdk to beta.54, add device-info RPC, improve SDK abort handling and InputSpec filtering
- Bump SDK version to 0.4.0-beta.54
- Add `server.device-info` RPC endpoint and `s9pk select` CLI command
- Extract `HardwareRequirements::is_compatible()` method, reuse in registry filtering
- Add `AbortedError` class with `muteUnhandled` flag, replace generic abort errors
- Handle unhandled promise rejections in container-runtime with mute support
- Improve `InputSpec.filter()` with `keepByDefault` param and boolean filter values
- Accept readonly tuples in `CommandType` and `splitCommand`
- Remove `sync_host` calls from host API handlers (binding/address changes)
- Filter mDNS hostnames by secure gateway availability
- Derive mDNS enabled state from LAN IPs in web UI
- Add "Open UI" action to address table, disable mDNS toggle
- Hide debug details in service error component
- Update rpc-toolkit docs for no-params handlers
2026-02-26 14:08:33 -07:00
Matt Hill
7f66c62848 action failure show dialog 2026-02-26 13:53:19 -07:00
Matt Hill
7e8be5852d reset instead of reset defaults 2026-02-26 11:03:35 -07:00
Aiden McClelland
72d573dbd1 chore: bump sdk to beta.53, wrap z.deepPartial with passthrough 2026-02-25 17:31:22 -07:00
Matt Hill
827458562b update snake and add about this server to system general 2026-02-25 17:15:20 -07:00
Alex Inkin
803dd38d96 fix: header color in zoom (#3128)
* fix: merge version ranges when adding existing package signer (#3125)

* fix: merge version ranges when adding existing package signer

   Previously, add_package_signer unconditionally inserted the new
   version range, overwriting any existing authorization for that signer.
   Now it OR-merges the new range with the existing one, so running
   signer add multiple times accumulates permissions rather than
   replacing them.

* add --merge flag to registry package signer add

  Default behavior remains overwrite. When --merge is passed, the new
  version range is OR-merged with the existing one, allowing admins to
  accumulate permissions incrementally.

* add missing attribute to TS type

* make merge optional

* upsert instead of insert

* VersionRange::None on upsert

* fix: header color in zoom

---------

Co-authored-by: Dominion5254 <musashidisciple@proton.me>
2026-02-25 15:09:25 -07:00
Aiden McClelland
8da9d76cb4 feat: add zod-deep-partial, partialValidator on InputSpec, and z.deepPartial re-export 2026-02-25 13:35:52 -07:00
Matt Hill
b466e71b3b clean up copy around addresses table 2026-02-24 17:16:34 -07:00
Aiden McClelland
3743a0d2e4 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-24 16:06:21 -07:00
Aiden McClelland
33a51bc663 setup changes 2026-02-24 16:06:19 -07:00
Matt Hill
d69e5b9f1a implement server name 2026-02-24 16:02:09 -07:00
Matt Hill
d4e019c87b add comments to everything potentially consumer facing (#3127)
* add comments to everything potentially consumer facing

* rework smtp

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2026-02-24 14:29:09 -07:00
188 changed files with 4912 additions and 10416 deletions

View File

@@ -25,10 +25,13 @@ on:
- ALL
- x86_64
- x86_64-nonfree
- x86_64-nvidia
- aarch64
- aarch64-nonfree
- aarch64-nvidia
# - raspberrypi
- riscv64
- riscv64-nonfree
deploy:
type: choice
description: Deploy
@@ -65,10 +68,13 @@ jobs:
fromJson('{
"x86_64": ["x86_64"],
"x86_64-nonfree": ["x86_64"],
"x86_64-nvidia": ["x86_64"],
"aarch64": ["aarch64"],
"aarch64-nonfree": ["aarch64"],
"aarch64-nvidia": ["aarch64"],
"raspberrypi": ["aarch64"],
"riscv64": ["riscv64"],
"riscv64-nonfree": ["riscv64"],
"ALL": ["x86_64", "aarch64", "riscv64"]
}')[github.event.inputs.platform || 'ALL']
}}
@@ -125,7 +131,7 @@ jobs:
format(
'[
["{0}"],
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "riscv64"]
["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "riscv64", "riscv64-nonfree"]
]',
github.event.inputs.platform || 'ALL'
)
@@ -139,18 +145,24 @@ jobs:
fromJson('{
"x86_64": "ubuntu-latest",
"x86_64-nonfree": "ubuntu-latest",
"x86_64-nvidia": "ubuntu-latest",
"aarch64": "ubuntu-24.04-arm",
"aarch64-nonfree": "ubuntu-24.04-arm",
"aarch64-nvidia": "ubuntu-24.04-arm",
"raspberrypi": "ubuntu-24.04-arm",
"riscv64": "ubuntu-24.04-arm",
"riscv64-nonfree": "ubuntu-24.04-arm",
}')[matrix.platform],
fromJson('{
"x86_64": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nvidia": "buildjet-8vcpu-ubuntu-2204",
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nvidia": "buildjet-8vcpu-ubuntu-2204-arm",
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
"riscv64": "buildjet-8vcpu-ubuntu-2204",
"riscv64-nonfree": "buildjet-8vcpu-ubuntu-2204",
}')[matrix.platform]
)
)[github.event.inputs.runner == 'fast']
@@ -161,10 +173,13 @@ jobs:
fromJson('{
"x86_64": "x86_64",
"x86_64-nonfree": "x86_64",
"x86_64-nvidia": "x86_64",
"aarch64": "aarch64",
"aarch64-nonfree": "aarch64",
"aarch64-nvidia": "aarch64",
"raspberrypi": "aarch64",
"riscv64": "riscv64",
"riscv64-nonfree": "riscv64",
}')[matrix.platform]
}}
steps:

View File

@@ -11,12 +11,14 @@ Each major component has its own `CLAUDE.md` with detailed guidance: `core/`, `w
## Build & Development
See [CONTRIBUTING.md](CONTRIBUTING.md) for:
- Environment setup and requirements
- Build commands and make targets
- Testing and formatting commands
- Environment variables
**Quick reference:**
```bash
. ./devmode.sh # Enable dev mode
make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
@@ -28,6 +30,7 @@ make test-core # Run Rust tests
- Always verify cross-layer changes using the order described in [ARCHITECTURE.md](ARCHITECTURE.md#cross-layer-verification)
- Check component-level CLAUDE.md files for component-specific conventions. ALWAYS read it before operating on that component.
- Follow existing patterns before inventing new ones
- Always use `make` recipes when they exist for testing builds rather than manually invoking build commands
## Supplementary Documentation
@@ -47,6 +50,7 @@ On startup:
1. **Check for `docs/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer.
2. **Check `docs/TODO.md` for relevant tasks** - Show TODOs that either:
- Have no `@username` tag (relevant to everyone)
- Are tagged with the current user's identifier

View File

@@ -7,7 +7,7 @@ GIT_HASH_FILE := $(shell ./build/env/check-git-hash.sh)
VERSION_FILE := $(shell ./build/env/check-version.sh)
BASENAME := $(shell PROJECT=startos ./build/env/basename.sh)
PLATFORM := $(shell if [ -f $(PLATFORM_FILE) ]; then cat $(PLATFORM_FILE); else echo unknown; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; elif [ "$(PLATFORM)" = "rockchip64" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g; s/-nvidia$$//g'; fi)
RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else echo $(ARCH); fi)
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./build/env/basename.sh)
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./build/env/basename.sh)

View File

@@ -52,7 +52,7 @@ The easiest path. [Buy a server](https://store.start9.com) from Start9 and plug
### Build your own
Install StartOS on your own hardware. Follow one of the [DIY guides](https://start9.com/latest/diy). Reasons to go this route:
Follow the [install guide](https://docs.start9.com/start-os/installing.html) to install StartOS on your own hardware. . Reasons to go this route:
1. You already have compatible hardware
2. You want to save on shipping costs

View File

@@ -12,6 +12,10 @@ fi
if [[ "$PLATFORM" =~ -nonfree$ ]]; then
FEATURES+=("nonfree")
fi
if [[ "$PLATFORM" =~ -nvidia$ ]]; then
FEATURES+=("nonfree")
FEATURES+=("nvidia")
fi
feature_file_checker='
/^#/ { next }

View File

@@ -4,7 +4,4 @@
+ firmware-iwlwifi
+ firmware-libertas
+ firmware-misc-nonfree
+ firmware-realtek
+ nvidia-container-toolkit
# + nvidia-driver
# + nvidia-kernel-dkms
+ firmware-realtek

View File

@@ -0,0 +1 @@
+ nvidia-container-toolkit

View File

@@ -34,11 +34,11 @@ fi
IMAGE_BASENAME=startos-${VERSION_FULL}_${IB_TARGET_PLATFORM}
BOOTLOADERS=grub-efi
if [ "$IB_TARGET_PLATFORM" = "x86_64" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nonfree" ]; then
if [ "$IB_TARGET_PLATFORM" = "x86_64" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nvidia" ]; then
IB_TARGET_ARCH=amd64
QEMU_ARCH=x86_64
BOOTLOADERS=grub-efi,syslinux
elif [ "$IB_TARGET_PLATFORM" = "aarch64" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "raspberrypi" ] || [ "$IB_TARGET_PLATFORM" = "rockchip64" ]; then
elif [ "$IB_TARGET_PLATFORM" = "aarch64" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nvidia" ] || [ "$IB_TARGET_PLATFORM" = "raspberrypi" ] || [ "$IB_TARGET_PLATFORM" = "rockchip64" ]; then
IB_TARGET_ARCH=arm64
QEMU_ARCH=aarch64
elif [ "$IB_TARGET_PLATFORM" = "riscv64" ] || [ "$IB_TARGET_PLATFORM" = "riscv64-nonfree" ]; then
@@ -60,9 +60,13 @@ mkdir -p $prep_results_dir
cd $prep_results_dir
NON_FREE=
if [[ "${IB_TARGET_PLATFORM}" =~ -nonfree$ ]] || [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
if [[ "${IB_TARGET_PLATFORM}" =~ -nonfree$ ]] || [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]] || [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
NON_FREE=1
fi
NVIDIA=
if [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]]; then
NVIDIA=1
fi
IMAGE_TYPE=iso
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ] || [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
IMAGE_TYPE=img
@@ -101,7 +105,7 @@ lb config \
--iso-preparer "START9 LABS; HTTPS://START9.COM" \
--iso-publisher "START9 LABS; HTTPS://START9.COM" \
--backports true \
--bootappend-live "boot=live noautologin" \
--bootappend-live "boot=live noautologin console=tty0" \
--bootloaders $BOOTLOADERS \
--cache false \
--mirror-bootstrap "https://deb.debian.org/debian/" \
@@ -177,7 +181,7 @@ if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
fi
if [ "$NON_FREE" = 1 ]; then
if [ "$NVIDIA" = 1 ]; then
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o config/archives/nvidia-container-toolkit.key
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
| sed 's#deb https://#deb [signed-by=/etc/apt/trusted.gpg.d/nvidia-container-toolkit.key.gpg] https://#g' \
@@ -205,11 +209,11 @@ cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF
set -e
if [ "${NON_FREE}" = "1" ] && [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ] && [ "${IB_TARGET_PLATFORM}" != "riscv64-nonfree" ]; then
if [ "${NVIDIA}" = "1" ]; then
# install a specific NVIDIA driver version
# ---------------- configuration ----------------
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.119.02}"
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.126.09}"
BASE_URL="https://download.nvidia.com/XFree86/Linux-${QEMU_ARCH}"
@@ -259,12 +263,15 @@ if [ "${NON_FREE}" = "1" ] && [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ] && [
echo "[nvidia-hook] Running NVIDIA installer for kernel \${KVER}" >&2
sh "\${RUN_PATH}" \
if ! sh "\${RUN_PATH}" \
--silent \
--kernel-name="\${KVER}" \
--no-x-check \
--no-nouveau-check \
--no-runlevel-check
--no-runlevel-check; then
cat /var/log/nvidia-installer.log
exit 1
fi
# Rebuild module metadata
echo "[nvidia-hook] Running depmod for \${KVER}" >&2

View File

@@ -62,7 +62,7 @@ fi
chroot /media/startos/next bash -e << "EOF"
if [ -f /boot/grub/grub.cfg ]; then
grub-install /dev/$(eval $(lsblk -o MOUNTPOINT,PKNAME -P | grep 'MOUNTPOINT="/media/startos/root"') && echo $PKNAME)
grub-install --no-nvram /dev/$(eval $(lsblk -o MOUNTPOINT,PKNAME -P | grep 'MOUNTPOINT="/media/startos/root"') && echo $PKNAME)
update-grub
fi

364
build/manage-release.sh Executable file
View File

@@ -0,0 +1,364 @@
#!/bin/bash
set -e
REPO="Start9Labs/start-os"
REGISTRY="https://alpha-registry-x.start9.com"
S3_BUCKET="s3://startos-images"
S3_CDN="https://startos-images.nyc3.cdn.digitaloceanspaces.com"
START9_GPG_KEY="2D63C217"
ARCHES="aarch64 aarch64-nonfree aarch64-nvidia riscv64 riscv64-nonfree x86_64 x86_64-nonfree x86_64-nvidia"
CLI_ARCHES="aarch64 riscv64 x86_64"
parse_run_id() {
local val="$1"
if [[ "$val" =~ /actions/runs/([0-9]+) ]]; then
echo "${BASH_REMATCH[1]}"
else
echo "$val"
fi
}
require_version() {
if [ -z "${VERSION:-}" ]; then
read -rp "VERSION: " VERSION
if [ -z "$VERSION" ]; then
>&2 echo '$VERSION required'
exit 2
fi
fi
}
release_dir() {
echo "$HOME/Downloads/v$VERSION"
}
ensure_release_dir() {
local dir
dir=$(release_dir)
if [ "$CLEAN" = "1" ]; then
rm -rf "$dir"
fi
mkdir -p "$dir"
cd "$dir"
}
enter_release_dir() {
local dir
dir=$(release_dir)
if [ ! -d "$dir" ]; then
>&2 echo "Release directory $dir does not exist. Run 'download' or 'pull' first."
exit 1
fi
cd "$dir"
}
cli_target_for() {
local arch=$1 os=$2
local pair="${arch}-${os}"
if [ "$pair" = "riscv64-linux" ]; then
echo "riscv64gc-unknown-linux-musl"
elif [ "$pair" = "riscv64-macos" ]; then
return 1
elif [ "$os" = "linux" ]; then
echo "${arch}-unknown-linux-musl"
elif [ "$os" = "macos" ]; then
echo "${arch}-apple-darwin"
fi
}
release_files() {
for file in *.iso *.squashfs *.deb; do
[ -f "$file" ] && echo "$file"
done
for file in start-cli_*; do
[[ "$file" == *.asc ]] && continue
[ -f "$file" ] && echo "$file"
done
}
resolve_gh_user() {
GH_USER=${GH_USER:-$(gh api user -q .login 2>/dev/null || true)}
GH_GPG_KEY=$(git config user.signingkey 2>/dev/null || true)
}
# --- Subcommands ---
cmd_download() {
require_version
if [ -z "${RUN_ID:-}" ]; then
read -rp "RUN_ID (OS images, leave blank to skip): " RUN_ID
fi
RUN_ID=$(parse_run_id "${RUN_ID:-}")
if [ -z "${ST_RUN_ID:-}" ]; then
read -rp "ST_RUN_ID (start-tunnel, leave blank to skip): " ST_RUN_ID
fi
ST_RUN_ID=$(parse_run_id "${ST_RUN_ID:-}")
if [ -z "${CLI_RUN_ID:-}" ]; then
read -rp "CLI_RUN_ID (start-cli, leave blank to skip): " CLI_RUN_ID
fi
CLI_RUN_ID=$(parse_run_id "${CLI_RUN_ID:-}")
ensure_release_dir
if [ -n "$RUN_ID" ]; then
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.squashfs" -D "$(pwd)"; do sleep 1; done
done
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.iso" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$ST_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
while ! gh run download -R $REPO "$ST_RUN_ID" -n "start-tunnel_$arch.deb" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$CLI_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
for os in linux macos; do
local target
target=$(cli_target_for "$arch" "$os") || continue
while ! gh run download -R $REPO "$CLI_RUN_ID" -n "start-cli_$target" -D "$(pwd)"; do sleep 1; done
mv start-cli "start-cli_${arch}-${os}"
done
done
fi
}
cmd_pull() {
require_version
ensure_release_dir
echo "Downloading release assets from tag v$VERSION..."
# Download debs and CLI binaries from the GH release
for file in $(gh release view -R $REPO "v$VERSION" --json assets -q '.assets[].name' | grep -E '\.(deb)$|^start-cli_'); do
gh release download -R $REPO "v$VERSION" -p "$file" -D "$(pwd)" --clobber
done
# Download ISOs and squashfs from S3 CDN
for arch in $ARCHES; do
for ext in squashfs iso; do
# Get the actual filename from the GH release asset list or body
local filename
filename=$(gh release view -R $REPO "v$VERSION" --json assets -q ".assets[].name" | grep "_${arch}\\.${ext}$" || true)
if [ -z "$filename" ]; then
filename=$(gh release view -R $REPO "v$VERSION" --json body -q .body | grep -oP "[^ ]*_${arch}\\.${ext}" | head -1 || true)
fi
if [ -n "$filename" ]; then
echo "Downloading $filename from S3..."
curl -fSL -o "$filename" "$S3_CDN/v$VERSION/$filename"
fi
done
done
}
cmd_register() {
require_version
enter_release_dir
start-cli --registry=$REGISTRY registry os version add "$VERSION" "v$VERSION" '' ">=0.3.5 <=$VERSION"
}
cmd_upload() {
require_version
enter_release_dir
for file in $(release_files); do
case "$file" in
*.iso|*.squashfs)
s3cmd put -P "$file" "$S3_BUCKET/v$VERSION/$file"
;;
*)
gh release upload -R $REPO "v$VERSION" "$file"
;;
esac
done
}
cmd_index() {
require_version
enter_release_dir
for arch in $ARCHES; do
for file in *_"$arch".squashfs *_"$arch".iso; do
start-cli --registry=$REGISTRY registry os asset add --platform="$arch" --version="$VERSION" "$file" "$S3_CDN/v$VERSION/$file"
done
done
}
cmd_sign() {
require_version
enter_release_dir
resolve_gh_user
for file in $(release_files); do
gpg -u $START9_GPG_KEY --detach-sign --armor -o "${file}.start9.asc" "$file"
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "${file}.${GH_USER}.asc" "$file"
fi
done
gpg --export -a $START9_GPG_KEY > start9.key.asc
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg --export -a "$GH_GPG_KEY" > "${GH_USER}.key.asc"
else
>&2 echo 'Warning: could not determine GitHub user or GPG signing key, skipping personal signature'
fi
tar -czvf signatures.tar.gz *.asc
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
}
cmd_cosign() {
require_version
enter_release_dir
resolve_gh_user
if [ -z "$GH_USER" ] || [ -z "$GH_GPG_KEY" ]; then
>&2 echo 'Error: could not determine GitHub user or GPG signing key'
>&2 echo "Set GH_USER and/or configure git user.signingkey"
exit 1
fi
echo "Downloading existing signatures..."
gh release download -R $REPO "v$VERSION" -p "signatures.tar.gz" -D "$(pwd)" --clobber
tar -xzf signatures.tar.gz
echo "Adding personal signatures as $GH_USER..."
for file in $(release_files); do
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "${file}.${GH_USER}.asc" "$file"
done
gpg --export -a "$GH_GPG_KEY" > "${GH_USER}.key.asc"
echo "Re-packing signatures..."
tar -czvf signatures.tar.gz *.asc
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
echo "Done. Personal signatures for $GH_USER added to v$VERSION."
}
cmd_notes() {
require_version
enter_release_dir
cat << EOF
# ISO Downloads
- [x86_64/AMD64]($S3_CDN/v$VERSION/$(ls *_x86_64-nonfree.iso))
- [x86_64/AMD64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_x86_64-nvidia.iso))
- [x86_64/AMD64-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
- [aarch64/ARM64]($S3_CDN/v$VERSION/$(ls *_aarch64-nonfree.iso))
- [aarch64/ARM64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_aarch64-nvidia.iso))
- [aarch64/ARM64-slim (FOSS-Only)]($S3_CDN/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
- [RISCV64 (RVA23)]($S3_CDN/v$VERSION/$(ls *_riscv64-nonfree.iso))
- [RISCV64 (RVA23)-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_riscv64.iso) "Without proprietary software or drivers")
EOF
cat << 'EOF'
# StartOS Checksums
## SHA-256
```
EOF
sha256sum *.iso *.squashfs
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum *.iso *.squashfs
cat << 'EOF'
```
# Start-Tunnel Checksums
## SHA-256
```
EOF
sha256sum start-tunnel*.deb
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-tunnel*.deb
cat << 'EOF'
```
# start-cli Checksums
## SHA-256
```
EOF
release_files | grep '^start-cli_' | xargs sha256sum
cat << 'EOF'
```
## BLAKE-3
```
EOF
release_files | grep '^start-cli_' | xargs b3sum
cat << 'EOF'
```
EOF
}
cmd_full_release() {
cmd_download
cmd_register
cmd_upload
cmd_index
cmd_sign
cmd_notes
}
usage() {
cat << 'EOF'
Usage: manage-release.sh <subcommand>
Subcommands:
download Download artifacts from GitHub Actions runs
Requires: RUN_ID, ST_RUN_ID, CLI_RUN_ID (any combination)
pull Download an existing release from the GH tag and S3
register Register the version in the Start9 registry
upload Upload artifacts to GitHub Releases and S3
index Add assets to the registry index
sign Sign all artifacts with Start9 org key (+ personal key if available)
and upload signatures.tar.gz
cosign Add personal GPG signature to an existing release's signatures
(requires 'pull' first so you can verify assets before signing)
notes Print release notes with download links and checksums
full-release Run: download → register → upload → index → sign → notes
Environment variables:
VERSION (required) Release version
RUN_ID GitHub Actions run ID for OS images (download subcommand)
ST_RUN_ID GitHub Actions run ID for start-tunnel (download subcommand)
CLI_RUN_ID GitHub Actions run ID for start-cli (download subcommand)
GH_USER Override GitHub username (default: autodetected via gh cli)
CLEAN Set to 1 to wipe and recreate the release directory
EOF
}
case "${1:-}" in
download) cmd_download ;;
pull) cmd_pull ;;
register) cmd_register ;;
upload) cmd_upload ;;
index) cmd_index ;;
sign) cmd_sign ;;
cosign) cmd_cosign ;;
notes) cmd_notes ;;
full-release) cmd_full_release ;;
*) usage; exit 1 ;;
esac

View File

@@ -1,142 +0,0 @@
#!/bin/bash
if [ -z "$VERSION" ]; then
>&2 echo '$VERSION required'
exit 2
fi
set -e
if [ "$SKIP_DL" != "1" ]; then
if [ "$SKIP_CLEAN" != "1" ]; then
rm -rf ~/Downloads/v$VERSION
mkdir ~/Downloads/v$VERSION
cd ~/Downloads/v$VERSION
fi
if [ -n "$RUN_ID" ]; then
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.squashfs -D $(pwd); do sleep 1; done
done
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.iso -D $(pwd); do sleep 1; done
done
fi
if [ -n "$ST_RUN_ID" ]; then
for arch in aarch64 riscv64 x86_64; do
while ! gh run download -R Start9Labs/start-os $ST_RUN_ID -n start-tunnel_$arch.deb -D $(pwd); do sleep 1; done
done
fi
if [ -n "$CLI_RUN_ID" ]; then
for arch in aarch64 riscv64 x86_64; do
for os in linux macos; do
pair=${arch}-${os}
if [ "${pair}" = "riscv64-linux" ]; then
target=riscv64gc-unknown-linux-musl
elif [ "${pair}" = "riscv64-macos" ]; then
continue
elif [ "${os}" = "linux" ]; then
target="${arch}-unknown-linux-musl"
elif [ "${os}" = "macos" ]; then
target="${arch}-apple-darwin"
fi
while ! gh run download -R Start9Labs/start-os $CLI_RUN_ID -n start-cli_$target -D $(pwd); do sleep 1; done
mv start-cli "start-cli_${pair}"
done
done
fi
else
cd ~/Downloads/v$VERSION
fi
start-cli --registry=https://alpha-registry-x.start9.com registry os version add $VERSION "v$VERSION" '' ">=0.3.5 <=$VERSION"
if [ "$SKIP_UL" = "2" ]; then
exit 2
elif [ "$SKIP_UL" != "1" ]; then
for file in *.deb start-cli_*; do
gh release upload -R Start9Labs/start-os v$VERSION $file
done
for file in *.iso *.squashfs; do
s3cmd put -P $file s3://startos-images/v$VERSION/$file
done
fi
if [ "$SKIP_INDEX" != "1" ]; then
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
for file in *_$arch.squashfs *_$arch.iso; do
start-cli --registry=https://alpha-registry-x.start9.com registry os asset add --platform=$arch --version=$VERSION $file https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$file
done
done
fi
for file in *.iso *.squashfs *.deb start-cli_*; do
gpg -u 7CFFDA41CA66056A --detach-sign --armor -o "${file}.asc" "$file"
done
gpg --export -a 7CFFDA41CA66056A > dr-bonez.key.asc
tar -czvf signatures.tar.gz *.asc
gh release upload -R Start9Labs/start-os v$VERSION signatures.tar.gz
cat << EOF
# ISO Downloads
- [x86_64/AMD64](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_x86_64-nonfree.iso))
- [x86_64/AMD64-slim (FOSS-only)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
- [aarch64/ARM64](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_aarch64-nonfree.iso))
- [aarch64/ARM64-slim (FOSS-Only)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
- [RISCV64 (RVA23)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_riscv64.iso))
EOF
cat << 'EOF'
# StartOS Checksums
## SHA-256
```
EOF
sha256sum *.iso *.squashfs
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum *.iso *.squashfs
cat << 'EOF'
```
# Start-Tunnel Checksums
## SHA-256
```
EOF
sha256sum start-tunnel*.deb
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-tunnel*.deb
cat << 'EOF'
```
# start-cli Checksums
## SHA-256
```
EOF
sha256sum start-cli_*
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-cli_*
cat << 'EOF'
```
EOF

View File

@@ -0,0 +1,30 @@
// Mock for ESM-only mime package — Jest's module loader doesn't support require(esm)
const types = {
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".svg": "image/svg+xml",
".webp": "image/webp",
".ico": "image/x-icon",
".json": "application/json",
".js": "application/javascript",
".html": "text/html",
".css": "text/css",
".txt": "text/plain",
".md": "text/markdown",
}
module.exports = {
default: {
getType(path) {
const ext = "." + path.split(".").pop()
return types[ext] || null
},
getExtension(type) {
const entry = Object.entries(types).find(([, v]) => v === type)
return entry ? entry[0].slice(1) : null
},
},
__esModule: true,
}

View File

@@ -5,4 +5,7 @@ module.exports = {
testEnvironment: "node",
rootDir: "./src/",
modulePathIgnorePatterns: ["./dist/"],
moduleNameMapper: {
"^mime$": "<rootDir>/../__mocks__/mime.js",
},
}

View File

@@ -37,7 +37,7 @@
},
"../sdk/dist": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.51",
"version": "0.4.0-beta.55",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",
@@ -49,7 +49,8 @@
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"yaml": "^2.7.1",
"zod": "^4.3.6"
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
},
"devDependencies": {
"@types/jest": "^29.4.0",

View File

@@ -1,5 +1,4 @@
import { RpcListener } from "./Adapters/RpcListener"
import { SystemForEmbassy } from "./Adapters/Systems/SystemForEmbassy"
import { AllGetDependencies } from "./Interfaces/AllGetDependencies"
import { getSystem } from "./Adapters/Systems"
@@ -7,6 +6,18 @@ const getDependencies: AllGetDependencies = {
system: getSystem,
}
process.on("unhandledRejection", (reason) => {
if (
reason instanceof Error &&
"muteUnhandled" in reason &&
reason.muteUnhandled
) {
// mute
} else {
console.error("Unhandled promise rejection", reason)
}
})
for (let s of ["SIGTERM", "SIGINT", "SIGHUP"]) {
process.on(s, (s) => {
console.log(`Caught ${s}`)

View File

@@ -197,6 +197,13 @@ setup.transferring-data:
fr_FR: "Transfert de données"
pl_PL: "Przesyłanie danych"
setup.password-required:
en_US: "Password is required for fresh setup"
de_DE: "Passwort ist für die Ersteinrichtung erforderlich"
es_ES: "Se requiere contraseña para la configuración inicial"
fr_FR: "Le mot de passe est requis pour la première configuration"
pl_PL: "Hasło jest wymagane do nowej konfiguracji"
# system.rs
system.governor-not-available:
en_US: "Governor %{governor} not available"
@@ -3145,7 +3152,7 @@ help.arg.smtp-from:
fr_FR: "Adresse de l'expéditeur"
pl_PL: "Adres nadawcy e-mail"
help.arg.smtp-login:
help.arg.smtp-username:
en_US: "SMTP authentication username"
de_DE: "SMTP-Authentifizierungsbenutzername"
es_ES: "Nombre de usuario de autenticación SMTP"
@@ -3166,13 +3173,20 @@ help.arg.smtp-port:
fr_FR: "Port du serveur SMTP"
pl_PL: "Port serwera SMTP"
help.arg.smtp-server:
help.arg.smtp-host:
en_US: "SMTP server hostname"
de_DE: "SMTP-Server-Hostname"
es_ES: "Nombre de host del servidor SMTP"
fr_FR: "Nom d'hôte du serveur SMTP"
pl_PL: "Nazwa hosta serwera SMTP"
help.arg.smtp-security:
en_US: "Connection security mode (starttls or tls)"
de_DE: "Verbindungssicherheitsmodus (starttls oder tls)"
es_ES: "Modo de seguridad de conexión (starttls o tls)"
fr_FR: "Mode de sécurité de connexion (starttls ou tls)"
pl_PL: "Tryb zabezpieczeń połączenia (starttls lub tls)"
help.arg.smtp-to:
en_US: "Email recipient address"
de_DE: "E-Mail-Empfängeradresse"
@@ -3670,6 +3684,13 @@ help.arg.s9pk-file-path:
fr_FR: "Chemin vers le fichier de paquet s9pk"
pl_PL: "Ścieżka do pliku pakietu s9pk"
help.arg.s9pk-file-paths:
en_US: "Paths to s9pk package files"
de_DE: "Pfade zu s9pk-Paketdateien"
es_ES: "Rutas a los archivos de paquete s9pk"
fr_FR: "Chemins vers les fichiers de paquet s9pk"
pl_PL: "Ścieżki do plików pakietów s9pk"
help.arg.session-ids:
en_US: "Session identifiers"
de_DE: "Sitzungskennungen"
@@ -4966,6 +4987,13 @@ about.publish-s9pk:
fr_FR: "Publier s9pk dans le bucket S3 et indexer dans le registre"
pl_PL: "Opublikuj s9pk do bucketu S3 i zindeksuj w rejestrze"
about.select-s9pk-for-device:
en_US: "Select the best compatible s9pk for a target device"
de_DE: "Das beste kompatible s9pk für ein Zielgerät auswählen"
es_ES: "Seleccionar el s9pk más compatible para un dispositivo destino"
fr_FR: "Sélectionner le meilleur s9pk compatible pour un appareil cible"
pl_PL: "Wybierz najlepiej kompatybilny s9pk dla urządzenia docelowego"
about.rebuild-service-container:
en_US: "Rebuild service container"
de_DE: "Dienst-Container neu erstellen"

View File

@@ -21,6 +21,14 @@ pub async fn my_handler(ctx: RpcContext, params: MyParams) -> Result<MyResponse,
from_fn_async(my_handler)
```
If a handler takes no params, simply omit the params argument entirely (no need for `_: Empty`):
```rust
pub async fn no_params_handler(ctx: RpcContext) -> Result<MyResponse, Error> {
// ...
}
```
### `from_fn_async_local` - Non-thread-safe async handlers
For async functions that are not `Send` (cannot be safely moved between threads). Use when working with non-thread-safe types.
@@ -181,9 +189,9 @@ pub struct MyParams {
### Adding a New RPC Endpoint
1. Define params struct with `Deserialize, Serialize, Parser, TS`
1. Define params struct with `Deserialize, Serialize, Parser, TS` (skip if no params needed)
2. Choose handler type based on sync/async and thread-safety
3. Write handler function taking `(Context, Params) -> Result<Response, Error>`
3. Write handler function taking `(Context, Params) -> Result<Response, Error>` (omit Params if none needed)
4. Add to parent handler with appropriate extensions (display modifiers before `with_about`)
5. TypeScript types auto-generated via `make ts-bindings`

View File

@@ -86,7 +86,7 @@ pub async fn restore_packages_rpc(
pub async fn recover_full_server(
ctx: &SetupContext,
disk_guid: InternedString,
password: String,
password: Option<String>,
recovery_source: TmpMountGuard,
server_id: &str,
recovery_password: &str,
@@ -110,12 +110,14 @@ pub async fn recover_full_server(
.with_ctx(|_| (ErrorKind::Filesystem, os_backup_path.display().to_string()))?,
)?;
os_backup.account.password = argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::rfc9106_low_mem(),
)
.with_kind(ErrorKind::PasswordHashGeneration)?;
if let Some(password) = password {
os_backup.account.password = argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::rfc9106_low_mem(),
)
.with_kind(ErrorKind::PasswordHashGeneration)?;
}
if let Some(h) = hostname {
os_backup.account.hostname = h;

View File

@@ -10,7 +10,6 @@ use std::time::Duration;
use chrono::{TimeDelta, Utc};
use imbl::OrdMap;
use imbl_value::InternedString;
use itertools::Itertools;
use josekit::jwk::Jwk;
use reqwest::{Client, Proxy};
use rpc_toolkit::yajrc::RpcError;
@@ -25,7 +24,6 @@ use crate::account::AccountInfo;
use crate::auth::Sessions;
use crate::context::config::ServerConfig;
use crate::db::model::Database;
use crate::db::model::package::TaskSeverity;
use crate::disk::OsPartitionInfo;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
@@ -44,7 +42,6 @@ use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle};
use crate::rpc_continuations::{Guid, OpenAuthedContinuations, RpcContinuations};
use crate::service::ServiceMap;
use crate::service::action::update_tasks;
use crate::service::effects::callbacks::ServiceCallbacks;
use crate::service::effects::subcontainer::NVIDIA_OVERLAY_PATH;
use crate::shutdown::Shutdown;
@@ -53,7 +50,7 @@ use crate::util::future::NonDetachingJoinHandle;
use crate::util::io::{TmpDir, delete_file};
use crate::util::lshw::LshwDevice;
use crate::util::sync::{SyncMutex, SyncRwLock, Watch};
use crate::{ActionId, DATA_DIR, PLATFORM, PackageId};
use crate::{DATA_DIR, PLATFORM, PackageId};
pub struct RpcContextSeed {
is_closed: AtomicBool,
@@ -114,7 +111,6 @@ pub struct CleanupInitPhases {
cleanup_sessions: PhaseProgressTrackerHandle,
init_services: PhaseProgressTrackerHandle,
prune_s9pks: PhaseProgressTrackerHandle,
check_tasks: PhaseProgressTrackerHandle,
}
impl CleanupInitPhases {
pub fn new(handle: &FullProgressTracker) -> Self {
@@ -122,7 +118,6 @@ impl CleanupInitPhases {
cleanup_sessions: handle.add_phase("Cleaning up sessions".into(), Some(1)),
init_services: handle.add_phase("Initializing services".into(), Some(10)),
prune_s9pks: handle.add_phase("Pruning S9PKs".into(), Some(1)),
check_tasks: handle.add_phase("Checking action requests".into(), Some(1)),
}
}
}
@@ -173,7 +168,7 @@ impl RpcContext {
init_net_ctrl.complete();
tracing::info!("{}", t!("context.rpc.initialized-net-controller"));
if PLATFORM.ends_with("-nonfree") {
if PLATFORM.ends_with("-nvidia") {
if let Err(e) = Command::new("nvidia-smi")
.invoke(ErrorKind::ParseSysInfo)
.await
@@ -411,7 +406,6 @@ impl RpcContext {
mut cleanup_sessions,
mut init_services,
mut prune_s9pks,
mut check_tasks,
}: CleanupInitPhases,
) -> Result<(), Error> {
cleanup_sessions.start();
@@ -503,76 +497,6 @@ impl RpcContext {
}
prune_s9pks.complete();
check_tasks.start();
let mut action_input: OrdMap<PackageId, BTreeMap<ActionId, Value>> = OrdMap::new();
let tasks: BTreeSet<_> = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| {
Ok(pde
.as_tasks()
.as_entries()?
.into_iter()
.map(|(_, r)| {
let t = r.as_task();
Ok::<_, Error>(if t.as_input().transpose_ref().is_some() {
Some((t.as_package_id().de()?, t.as_action_id().de()?))
} else {
None
})
})
.filter_map_ok(|a| a))
})
.flatten_ok()
.map(|a| a.and_then(|a| a))
.try_collect()?;
let procedure_id = Guid::new();
for (package_id, action_id) in tasks {
if let Some(service) = self.services.get(&package_id).await.as_ref() {
if let Some(input) = service
.get_action_input(procedure_id.clone(), action_id.clone(), Value::Null)
.await
.log_err()
.flatten()
.and_then(|i| i.value)
{
action_input
.entry(package_id)
.or_default()
.insert(action_id, input);
}
}
}
self.db
.mutate(|db| {
for (package_id, action_input) in &action_input {
for (action_id, input) in action_input {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, package_id, action_id, input, false))
})?;
}
}
}
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
if pde
.as_tasks()
.de()?
.into_iter()
.any(|(_, t)| t.active && t.task.severity == TaskSeverity::Critical)
{
pde.as_status_info_mut().stop()?;
}
}
Ok(())
})
.await
.result?;
check_tasks.complete();
Ok(())
}
pub async fn call_remote<RemoteContext>(

View File

@@ -24,7 +24,7 @@ use crate::net::host::Host;
use crate::net::host::binding::{
AddSslOptions, BindInfo, BindOptions, Bindings, DerivedAddressInfo, NetInfo,
};
use crate::net::vhost::AlpnInfo;
use crate::net::vhost::{AlpnInfo, PassthroughInfo};
use crate::prelude::*;
use crate::progress::FullProgress;
use crate::system::{KeyboardOptions, SmtpValue};
@@ -121,6 +121,7 @@ impl Public {
},
dns: Default::default(),
default_outbound: None,
passthroughs: Vec::new(),
},
status_info: ServerStatus {
backup_progress: None,
@@ -233,6 +234,8 @@ pub struct NetworkInfo {
#[serde(default)]
#[ts(type = "string | null")]
pub default_outbound: Option<GatewayId>,
#[serde(default)]
pub passthroughs: Vec<PassthroughInfo>,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]

View File

@@ -251,18 +251,35 @@ pub async fn set_hostname_rpc(
ctx: RpcContext,
SetServerHostnameParams { name, hostname }: SetServerHostnameParams,
) -> Result<(), Error> {
let Some(hostname) = ServerHostnameInfo::new_opt(name, hostname)? else {
let name = name.filter(|n| !n.is_empty());
let hostname = hostname
.filter(|h| !h.is_empty())
.map(ServerHostname::new)
.transpose()?;
if name.is_none() && hostname.is_none() {
return Err(Error::new(
eyre!("{}", t!("hostname.must-provide-name-or-hostname")),
ErrorKind::InvalidRequest,
));
};
ctx.db
.mutate(|db| hostname.save(db.as_public_mut().as_server_info_mut()))
let info = ctx
.db
.mutate(|db| {
let server_info = db.as_public_mut().as_server_info_mut();
if let Some(name) = name {
server_info.as_name_mut().ser(&name)?;
}
if let Some(hostname) = &hostname {
hostname.save(server_info)?;
}
ServerHostnameInfo::load(server_info)
})
.await
.result?;
ctx.account.mutate(|a| a.hostname = hostname.clone());
sync_hostname(&hostname.hostname).await?;
ctx.account.mutate(|a| a.hostname = info.clone());
if let Some(h) = hostname {
sync_hostname(&h).await?;
}
Ok(())
}

View File

@@ -25,6 +25,9 @@ pub fn platform_to_arch(platform: &str) -> &str {
if let Some(arch) = platform.strip_suffix("-nonfree") {
return arch;
}
if let Some(arch) = platform.strip_suffix("-nvidia") {
return arch;
}
match platform {
"raspberrypi" | "rockchip64" => "aarch64",
_ => platform,
@@ -268,6 +271,18 @@ pub fn server<C: Context>() -> ParentHandler<C> {
.with_about("about.display-time-uptime")
.with_call_remote::<CliContext>(),
)
.subcommand(
"device-info",
ParentHandler::<C, WithIoFormat<Empty>>::new().root_handler(
from_fn_async(system::device_info)
.with_display_serializable()
.with_custom_display_fn(|handle, result| {
system::display_device_info(handle.params, result)
})
.with_about("about.get-device-info")
.with_call_remote::<CliContext>(),
),
)
.subcommand(
"experimental",
system::experimental::<C>().with_about("about.commands-experimental"),

View File

@@ -20,9 +20,6 @@ use crate::context::RpcContext;
use crate::middleware::auth::DbContext;
use crate::prelude::*;
use crate::rpc_continuations::OpenAuthedContinuations;
use crate::util::Invoke;
use crate::util::io::{create_file_mod, read_file_to_string};
use crate::util::serde::{BASE64, const_true};
use crate::util::sync::SyncMutex;
pub trait SessionAuthContext: DbContext {

View File

@@ -27,7 +27,7 @@ use crate::db::model::public::AcmeSettings;
use crate::db::{DbAccess, DbAccessByKey, DbAccessMut};
use crate::error::ErrorData;
use crate::net::ssl::should_use_cert;
use crate::net::tls::{SingleCertResolver, TlsHandler};
use crate::net::tls::{SingleCertResolver, TlsHandler, TlsHandlerAction};
use crate::net::web_server::Accept;
use crate::prelude::*;
use crate::util::FromStrParser;
@@ -173,7 +173,7 @@ where
&'a mut self,
hello: &'a ClientHello<'a>,
_: &'a <A as Accept>::Metadata,
) -> Option<ServerConfig> {
) -> Option<TlsHandlerAction> {
let domain = hello.server_name()?;
if hello
.alpn()
@@ -207,20 +207,20 @@ where
cfg.alpn_protocols = vec![ACME_TLS_ALPN_NAME.to_vec()];
tracing::info!("performing ACME auth challenge");
return Some(cfg);
return Some(TlsHandlerAction::Tls(cfg));
}
let domains: BTreeSet<InternedString> = [domain.into()].into_iter().collect();
let crypto_provider = self.crypto_provider.clone();
if let Some(cert) = self.get_cert(&domains).await {
return Some(
return Some(TlsHandlerAction::Tls(
ServerConfig::builder_with_provider(crypto_provider)
.with_safe_default_protocol_versions()
.log_err()?
.with_no_client_auth()
.with_cert_resolver(Arc::new(SingleCertResolver(Arc::new(cert)))),
);
));
}
None

View File

@@ -185,6 +185,16 @@ struct CheckPortParams {
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CheckPortRes {
pub ip: Ipv4Addr,
pub port: u16,
pub open_externally: bool,
pub open_internally: bool,
pub hairpinning: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct IfconfigPortRes {
pub ip: Ipv4Addr,
pub port: u16,
pub reachable: bool,
@@ -211,15 +221,33 @@ async fn check_port(
ErrorKind::NotFound,
)
})?;
let iface = &*ip_info.name;
let internal_ips = ip_info
.subnets
.iter()
.map(|i| i.addr())
.filter(|a| a.is_ipv4())
.map(|a| SocketAddr::new(a, port))
.collect::<Vec<_>>();
let open_internally = tokio::time::timeout(
Duration::from_secs(5),
tokio::net::TcpStream::connect(&*internal_ips),
)
.await
.map_or(false, |r| r.is_ok());
let client = reqwest::Client::builder();
#[cfg(target_os = "linux")]
let client = client.interface(iface);
let client = client.interface(gateway.as_str());
let url = base_url
.join(&format!("/port/{port}"))
.with_kind(ErrorKind::ParseUrl)?;
let res: CheckPortRes = client
let IfconfigPortRes {
ip,
port,
reachable: open_externally,
} = client
.build()?
.get(url)
.timeout(Duration::from_secs(10))
@@ -228,7 +256,21 @@ async fn check_port(
.error_for_status()?
.json()
.await?;
Ok(res)
let hairpinning = tokio::time::timeout(
Duration::from_secs(5),
tokio::net::TcpStream::connect(SocketAddr::new(ip.into(), port)),
)
.await
.map_or(false, |r| r.is_ok());
Ok(CheckPortRes {
ip,
port,
open_externally,
open_internally,
hairpinning,
})
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]

View File

@@ -204,7 +204,6 @@ pub async fn add_public_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
tokio::task::spawn_blocking(|| {
crate::net::dns::query_dns(ctx, crate::net::dns::QueryDnsParams { fqdn })
@@ -242,7 +241,6 @@ pub async fn remove_public_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
@@ -279,7 +277,6 @@ pub async fn add_private_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
@@ -306,7 +303,6 @@ pub async fn remove_private_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}

View File

@@ -358,5 +358,5 @@ pub async fn set_address_enabled<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await
Ok(())
}

View File

@@ -1,5 +1,4 @@
use std::collections::{BTreeMap, BTreeSet};
use std::future::Future;
use std::net::{IpAddr, SocketAddrV4};
use std::panic::RefUnwindSafe;
@@ -182,15 +181,26 @@ impl Model<Host> {
opt.secure
.map_or(true, |s| !(s.ssl && opt.add_ssl.is_some()))
}) {
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: false,
hostname: mdns_host.clone(),
port: Some(port),
metadata: HostnameMetadata::Mdns {
gateways: mdns_gateways.clone(),
},
});
let mdns_gateways = if opt.secure.is_some() {
mdns_gateways.clone()
} else {
mdns_gateways
.iter()
.filter(|g| gateways.get(*g).map_or(false, |g| g.secure()))
.cloned()
.collect()
};
if !mdns_gateways.is_empty() {
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: false,
hostname: mdns_host.clone(),
port: Some(port),
metadata: HostnameMetadata::Mdns {
gateways: mdns_gateways,
},
});
}
}
if let Some(port) = net.assigned_ssl_port {
available.insert(HostnameInfo {
@@ -239,6 +249,20 @@ impl Model<Host> {
port: Some(port),
metadata,
});
} else if opt.secure.map_or(false, |s| s.ssl)
&& opt.add_ssl.is_none()
&& available_ports.is_ssl(opt.preferred_external_port)
&& net.assigned_port != Some(opt.preferred_external_port)
{
// Service handles its own TLS and the preferred port is
// allocated as SSL — add an address for passthrough vhost.
available.insert(HostnameInfo {
ssl: true,
public: true,
hostname: domain,
port: Some(opt.preferred_external_port),
metadata,
});
}
}
@@ -283,6 +307,20 @@ impl Model<Host> {
gateways: domain_gateways,
},
});
} else if opt.secure.map_or(false, |s| s.ssl)
&& opt.add_ssl.is_none()
&& available_ports.is_ssl(opt.preferred_external_port)
&& net.assigned_port != Some(opt.preferred_external_port)
{
available.insert(HostnameInfo {
ssl: true,
public: true,
hostname: domain,
port: Some(opt.preferred_external_port),
metadata: HostnameMetadata::PrivateDomain {
gateways: domain_gateways,
},
});
}
}
bind.as_addresses_mut().as_available_mut().ser(&available)?;
@@ -429,10 +467,6 @@ pub trait HostApiKind: 'static {
inheritance: &Self::Inheritance,
db: &'a mut DatabaseModel,
) -> Result<&'a mut Model<Host>, Error>;
fn sync_host(
ctx: &RpcContext,
inheritance: Self::Inheritance,
) -> impl Future<Output = Result<(), Error>> + Send;
}
pub struct ForPackage;
impl HostApiKind for ForPackage {
@@ -451,12 +485,6 @@ impl HostApiKind for ForPackage {
) -> Result<&'a mut Model<Host>, Error> {
host_for(db, Some(package), host)
}
async fn sync_host(ctx: &RpcContext, (package, host): Self::Inheritance) -> Result<(), Error> {
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.sync_host(host).await?;
Ok(())
}
}
pub struct ForServer;
impl HostApiKind for ForServer {
@@ -472,9 +500,6 @@ impl HostApiKind for ForServer {
) -> Result<&'a mut Model<Host>, Error> {
host_for(db, None, &HostId::default())
}
async fn sync_host(ctx: &RpcContext, _: Self::Inheritance) -> Result<(), Error> {
ctx.os_net_service.sync_host(HostId::default()).await
}
}
pub fn host_api<C: Context>() -> ParentHandler<C, RequiresPackageId> {

View File

@@ -76,9 +76,22 @@ impl NetController {
],
)
.await?;
let passthroughs = db
.peek()
.await
.as_public()
.as_server_info()
.as_network()
.as_passthroughs()
.de()?;
Ok(Self {
db: db.clone(),
vhost: VHostController::new(db.clone(), net_iface.clone(), crypto_provider),
vhost: VHostController::new(
db.clone(),
net_iface.clone(),
crypto_provider,
passthroughs,
),
tls_client_config,
dns: DnsController::init(db, &net_iface.watcher).await?,
forward: InterfacePortForwardController::new(net_iface.watcher.subscribe()),
@@ -237,6 +250,7 @@ impl NetServiceData {
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
passthrough: false,
},
);
}
@@ -253,7 +267,9 @@ impl NetServiceData {
_ => continue,
}
let domain = &addr_info.hostname;
let domain_ssl_port = addr_info.port.unwrap_or(443);
let Some(domain_ssl_port) = addr_info.port else {
continue;
};
let key = (Some(domain.clone()), domain_ssl_port);
let target = vhosts.entry(key).or_insert_with(|| ProxyTarget {
public: BTreeSet::new(),
@@ -266,6 +282,7 @@ impl NetServiceData {
addr,
add_x_forwarded_headers: ssl.add_x_forwarded_headers,
connect_ssl: connect_ssl.clone().map(|_| ctrl.tls_client_config.clone()),
passthrough: false,
});
if addr_info.public {
for gw in addr_info.metadata.gateways() {
@@ -317,6 +334,53 @@ impl NetServiceData {
),
);
}
// Passthrough vhosts: if the service handles its own TLS
// (secure.ssl && no add_ssl) and a domain address is enabled on
// an SSL port different from assigned_port, add a passthrough
// vhost so the service's TLS endpoint is reachable on that port.
if bind.options.secure.map_or(false, |s| s.ssl) && bind.options.add_ssl.is_none() {
let assigned = bind.net.assigned_port;
for addr_info in &enabled_addresses {
if !addr_info.ssl {
continue;
}
let Some(pt_port) = addr_info.port.filter(|p| assigned != Some(*p)) else {
continue;
};
match &addr_info.metadata {
HostnameMetadata::PublicDomain { .. }
| HostnameMetadata::PrivateDomain { .. } => {}
_ => continue,
}
let domain = &addr_info.hostname;
let key = (Some(domain.clone()), pt_port);
let target = vhosts.entry(key).or_insert_with(|| ProxyTarget {
public: BTreeSet::new(),
private: BTreeSet::new(),
acme: None,
addr,
add_x_forwarded_headers: false,
connect_ssl: Err(AlpnInfo::Reflect),
passthrough: true,
});
if addr_info.public {
for gw in addr_info.metadata.gateways() {
target.public.insert(gw.clone());
}
} else {
for gw in addr_info.metadata.gateways() {
if let Some(info) = net_ifaces.get(gw) {
if let Some(ip_info) = &info.ip_info {
for subnet in &ip_info.subnets {
target.private.insert(subnet.addr());
}
}
}
}
}
}
}
}
// ── Phase 3: Reconcile ──
@@ -725,13 +789,6 @@ impl NetService {
.result
}
pub async fn sync_host(&self, _id: HostId) -> Result<(), Error> {
let current = self.synced.peek(|v| *v);
let mut w = self.synced.clone();
w.wait_for(|v| *v > current).await;
Ok(())
}
pub async fn remove_all(mut self) -> Result<(), Error> {
if Weak::upgrade(&self.data.lock().await.controller).is_none() {
self.shutdown = true;

View File

@@ -36,7 +36,7 @@ use crate::db::{DbAccess, DbAccessMut};
use crate::hostname::ServerHostname;
use crate::init::check_time_is_synchronized;
use crate::net::gateway::GatewayInfo;
use crate::net::tls::TlsHandler;
use crate::net::tls::{TlsHandler, TlsHandlerAction};
use crate::net::web_server::{Accept, ExtractVisitor, TcpMetadata, extract};
use crate::prelude::*;
use crate::util::serde::Pem;
@@ -620,7 +620,7 @@ where
&mut self,
hello: &ClientHello<'_>,
metadata: &<A as Accept>::Metadata,
) -> Option<ServerConfig> {
) -> Option<TlsHandlerAction> {
let hostnames: BTreeSet<InternedString> = hello
.server_name()
.map(InternedString::from)
@@ -684,5 +684,6 @@ where
)
}
.log_err()
.map(TlsHandlerAction::Tls)
}
}

View File

@@ -9,7 +9,7 @@ use async_compression::tokio::bufread::GzipEncoder;
use axum::Router;
use axum::body::Body;
use axum::extract::{self as x, Request};
use axum::response::{IntoResponse, Response};
use axum::response::Response;
use axum::routing::{any, get};
use base64::display::Base64Display;
use digest::Digest;

View File

@@ -16,6 +16,14 @@ use tokio_rustls::rustls::sign::CertifiedKey;
use tokio_rustls::rustls::{ClientConfig, RootCertStore, ServerConfig};
use visit_rs::{Visit, VisitFields};
/// Result of a TLS handler's decision about how to handle a connection.
pub enum TlsHandlerAction {
/// Complete the TLS handshake with this ServerConfig.
Tls(ServerConfig),
/// Don't complete TLS — rewind the BackTrackingIO and return the raw stream.
Passthrough,
}
use crate::net::http::handle_http_on_https;
use crate::net::web_server::{Accept, AcceptStream, MetadataVisitor};
use crate::prelude::*;
@@ -50,7 +58,7 @@ pub trait TlsHandler<'a, A: Accept> {
&'a mut self,
hello: &'a ClientHello<'a>,
metadata: &'a A::Metadata,
) -> impl Future<Output = Option<ServerConfig>> + Send + 'a;
) -> impl Future<Output = Option<TlsHandlerAction>> + Send + 'a;
}
#[derive(Clone)]
@@ -66,7 +74,7 @@ where
&'a mut self,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> Option<ServerConfig> {
) -> Option<TlsHandlerAction> {
if let Some(config) = self.0.get_config(hello, metadata).await {
return Some(config);
}
@@ -86,7 +94,7 @@ pub trait WrapTlsHandler<A: Accept> {
prev: ServerConfig,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> impl Future<Output = Option<ServerConfig>> + Send + 'a
) -> impl Future<Output = Option<TlsHandlerAction>> + Send + 'a
where
Self: 'a;
}
@@ -102,9 +110,12 @@ where
&'a mut self,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> Option<ServerConfig> {
let prev = self.inner.get_config(hello, metadata).await?;
self.wrapper.wrap(prev, hello, metadata).await
) -> Option<TlsHandlerAction> {
let action = self.inner.get_config(hello, metadata).await?;
match action {
TlsHandlerAction::Tls(cfg) => self.wrapper.wrap(cfg, hello, metadata).await,
other => Some(other),
}
}
}
@@ -203,34 +214,56 @@ where
}
};
let hello = mid.client_hello();
if let Some(cfg) = tls_handler.get_config(&hello, &metadata).await {
let buffered = mid.io.stop_buffering();
mid.io
.write_all(&buffered)
.await
.with_kind(ErrorKind::Network)?;
return Ok(match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => {
let s = stream.get_ref().1;
Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: s.server_name().map(InternedString::intern),
alpn: s
.alpn_protocol()
.map(|a| MaybeUtf8String(a.to_vec())),
let sni = hello.server_name().map(InternedString::intern);
match tls_handler.get_config(&hello, &metadata).await {
Some(TlsHandlerAction::Tls(cfg)) => {
let buffered = mid.io.stop_buffering();
mid.io
.write_all(&buffered)
.await
.with_kind(ErrorKind::Network)?;
return Ok(match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => {
let s = stream.get_ref().1;
Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: s
.server_name()
.map(InternedString::intern),
alpn: s
.alpn_protocol()
.map(|a| MaybeUtf8String(a.to_vec())),
},
},
},
Box::pin(stream) as AcceptStream,
))
}
Err(e) => {
tracing::trace!("Error completing TLS handshake: {e}");
tracing::trace!("{e:?}");
None
}
});
Box::pin(stream) as AcceptStream,
))
}
Err(e) => {
tracing::trace!("Error completing TLS handshake: {e}");
tracing::trace!("{e:?}");
None
}
});
}
Some(TlsHandlerAction::Passthrough) => {
let (dummy, _drop) = tokio::io::duplex(1);
let mut bt = std::mem::replace(
&mut mid.io,
BackTrackingIO::new(Box::pin(dummy) as AcceptStream),
);
drop(mid);
bt.rewind();
return Ok(Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo { sni, alpn: None },
},
Box::pin(bt) as AcceptStream,
)));
}
None => {}
}
Ok(None)

View File

@@ -6,12 +6,13 @@ use std::sync::{Arc, Weak};
use std::task::{Poll, ready};
use async_acme::acme::ACME_TLS_ALPN_NAME;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use futures::future::BoxFuture;
use imbl::OrdMap;
use imbl_value::{InOMap, InternedString};
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn};
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn, from_fn_async};
use serde::{Deserialize, Serialize};
use tokio::net::{TcpListener, TcpStream};
use tokio_rustls::TlsConnector;
@@ -35,7 +36,7 @@ use crate::net::gateway::{
};
use crate::net::ssl::{CertStore, RootCaTlsHandler};
use crate::net::tls::{
ChainedHandler, TlsHandlerWrapper, TlsListener, TlsMetadata, WrapTlsHandler,
ChainedHandler, TlsHandlerAction, TlsHandlerWrapper, TlsListener, TlsMetadata, WrapTlsHandler,
};
use crate::net::utils::ipv6_is_link_local;
use crate::net::web_server::{Accept, AcceptStream, ExtractVisitor, TcpMetadata, extract};
@@ -46,68 +47,228 @@ use crate::util::serde::{HandlerExtSerde, MaybeUtf8String, display_serializable}
use crate::util::sync::{SyncMutex, Watch};
use crate::{GatewayId, ResultExt};
#[derive(Debug, Clone, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct PassthroughInfo {
#[ts(type = "string")]
pub hostname: InternedString,
pub listen_port: u16,
#[ts(type = "string")]
pub backend: SocketAddr,
#[ts(type = "string[]")]
pub public_gateways: BTreeSet<GatewayId>,
#[ts(type = "string[]")]
pub private_ips: BTreeSet<IpAddr>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
struct AddPassthroughParams {
#[arg(long)]
pub hostname: InternedString,
#[arg(long)]
pub listen_port: u16,
#[arg(long)]
pub backend: SocketAddr,
#[arg(long)]
pub public_gateway: Vec<GatewayId>,
#[arg(long)]
pub private_ip: Vec<IpAddr>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
struct RemovePassthroughParams {
#[arg(long)]
pub hostname: InternedString,
#[arg(long)]
pub listen_port: u16,
}
pub fn vhost_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"dump-table",
from_fn(|ctx: RpcContext| Ok(ctx.net_controller.vhost.dump_table()))
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
ParentHandler::new()
.subcommand(
"dump-table",
from_fn(dump_table)
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
if let Some(format) = params.format {
display_serializable(format, res)?;
return Ok::<_, Error>(());
}
if let Some(format) = params.format {
display_serializable(format, res)?;
return Ok::<_, Error>(());
}
let mut table = Table::new();
table.add_row(row![bc => "FROM", "TO", "ACTIVE"]);
let mut table = Table::new();
table.add_row(row![bc => "FROM", "TO", "ACTIVE"]);
for (external, targets) in res {
for (host, targets) in targets {
for (idx, target) in targets.into_iter().enumerate() {
table.add_row(row![
format!(
"{}:{}",
host.as_ref().map(|s| &**s).unwrap_or("*"),
external.0
),
target,
idx == 0
]);
for (external, targets) in res {
for (host, targets) in targets {
for (idx, target) in targets.into_iter().enumerate() {
table.add_row(row![
format!(
"{}:{}",
host.as_ref().map(|s| &**s).unwrap_or("*"),
external.0
),
target,
idx == 0
]);
}
}
}
}
table.print_tty(false)?;
table.print_tty(false)?;
Ok(())
})
.with_call_remote::<CliContext>(),
)
Ok(())
})
.with_call_remote::<CliContext>(),
)
.subcommand(
"add-passthrough",
from_fn_async(add_passthrough)
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove-passthrough",
from_fn_async(remove_passthrough)
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand(
"list-passthrough",
from_fn(list_passthrough)
.with_display_serializable()
.with_call_remote::<CliContext>(),
)
}
fn dump_table(
ctx: RpcContext,
) -> Result<BTreeMap<JsonKey<u16>, BTreeMap<JsonKey<Option<InternedString>>, EqSet<String>>>, Error>
{
Ok(ctx.net_controller.vhost.dump_table())
}
async fn add_passthrough(
ctx: RpcContext,
AddPassthroughParams {
hostname,
listen_port,
backend,
public_gateway,
private_ip,
}: AddPassthroughParams,
) -> Result<(), Error> {
let public_gateways: BTreeSet<GatewayId> = public_gateway.into_iter().collect();
let private_ips: BTreeSet<IpAddr> = private_ip.into_iter().collect();
ctx.net_controller.vhost.add_passthrough(
hostname.clone(),
listen_port,
backend,
public_gateways.clone(),
private_ips.clone(),
)?;
ctx.db
.mutate(|db| {
let pts = db
.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_passthroughs_mut();
let mut vec: Vec<PassthroughInfo> = pts.de()?;
vec.retain(|p| !(p.hostname == hostname && p.listen_port == listen_port));
vec.push(PassthroughInfo {
hostname,
listen_port,
backend,
public_gateways,
private_ips,
});
pts.ser(&vec)
})
.await
.result?;
Ok(())
}
async fn remove_passthrough(
ctx: RpcContext,
RemovePassthroughParams {
hostname,
listen_port,
}: RemovePassthroughParams,
) -> Result<(), Error> {
ctx.net_controller
.vhost
.remove_passthrough(&hostname, listen_port);
ctx.db
.mutate(|db| {
let pts = db
.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_passthroughs_mut();
let mut vec: Vec<PassthroughInfo> = pts.de()?;
vec.retain(|p| !(p.hostname == hostname && p.listen_port == listen_port));
pts.ser(&vec)
})
.await
.result?;
Ok(())
}
fn list_passthrough(ctx: RpcContext) -> Result<Vec<PassthroughInfo>, Error> {
Ok(ctx.net_controller.vhost.list_passthrough())
}
// not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
struct PassthroughHandle {
_rc: Arc<()>,
backend: SocketAddr,
public: BTreeSet<GatewayId>,
private: BTreeSet<IpAddr>,
}
pub struct VHostController {
db: TypedPatchDb<Database>,
interfaces: Arc<NetworkInterfaceController>,
crypto_provider: Arc<CryptoProvider>,
acme_cache: AcmeTlsAlpnCache,
servers: SyncMutex<BTreeMap<u16, VHostServer<VHostBindListener>>>,
passthrough_handles: SyncMutex<BTreeMap<(InternedString, u16), PassthroughHandle>>,
}
impl VHostController {
pub fn new(
db: TypedPatchDb<Database>,
interfaces: Arc<NetworkInterfaceController>,
crypto_provider: Arc<CryptoProvider>,
passthroughs: Vec<PassthroughInfo>,
) -> Self {
Self {
let controller = Self {
db,
interfaces,
crypto_provider,
acme_cache: Arc::new(SyncMutex::new(BTreeMap::new())),
servers: SyncMutex::new(BTreeMap::new()),
passthrough_handles: SyncMutex::new(BTreeMap::new()),
};
for pt in passthroughs {
if let Err(e) = controller.add_passthrough(
pt.hostname,
pt.listen_port,
pt.backend,
pt.public_gateways,
pt.private_ips,
) {
tracing::warn!("failed to restore passthrough: {e}");
}
}
controller
}
#[instrument(skip_all)]
pub fn add(
@@ -120,20 +281,7 @@ impl VHostController {
let server = if let Some(server) = writable.remove(&external) {
server
} else {
let bind_reqs = Watch::new(VHostBindRequirements::default());
let listener = VHostBindListener {
ip_info: self.interfaces.watcher.subscribe(),
port: external,
bind_reqs: bind_reqs.clone_unseen(),
listeners: BTreeMap::new(),
};
VHostServer::new(
listener,
bind_reqs,
self.db.clone(),
self.crypto_provider.clone(),
self.acme_cache.clone(),
)
self.create_server(external)
};
let rc = server.add(hostname, target);
writable.insert(external, server);
@@ -141,6 +289,75 @@ impl VHostController {
})
}
fn create_server(&self, port: u16) -> VHostServer<VHostBindListener> {
let bind_reqs = Watch::new(VHostBindRequirements::default());
let listener = VHostBindListener {
ip_info: self.interfaces.watcher.subscribe(),
port,
bind_reqs: bind_reqs.clone_unseen(),
listeners: BTreeMap::new(),
};
VHostServer::new(
listener,
bind_reqs,
self.db.clone(),
self.crypto_provider.clone(),
self.acme_cache.clone(),
)
}
pub fn add_passthrough(
&self,
hostname: InternedString,
port: u16,
backend: SocketAddr,
public: BTreeSet<GatewayId>,
private: BTreeSet<IpAddr>,
) -> Result<(), Error> {
let target = ProxyTarget {
public: public.clone(),
private: private.clone(),
acme: None,
addr: backend,
add_x_forwarded_headers: false,
connect_ssl: Err(AlpnInfo::Reflect),
passthrough: true,
};
let rc = self.add(Some(hostname.clone()), port, DynVHostTarget::new(target))?;
self.passthrough_handles.mutate(|h| {
h.insert(
(hostname, port),
PassthroughHandle {
_rc: rc,
backend,
public,
private,
},
);
});
Ok(())
}
pub fn remove_passthrough(&self, hostname: &InternedString, port: u16) {
self.passthrough_handles
.mutate(|h| h.remove(&(hostname.clone(), port)));
self.gc(Some(hostname.clone()), port);
}
pub fn list_passthrough(&self) -> Vec<PassthroughInfo> {
self.passthrough_handles.peek(|h| {
h.iter()
.map(|((hostname, port), handle)| PassthroughInfo {
hostname: hostname.clone(),
listen_port: *port,
backend: handle.backend,
public_gateways: handle.public.clone(),
private_ips: handle.private.clone(),
})
.collect()
})
}
pub fn dump_table(
&self,
) -> BTreeMap<JsonKey<u16>, BTreeMap<JsonKey<Option<InternedString>>, EqSet<String>>> {
@@ -330,6 +547,9 @@ pub trait VHostTarget<A: Accept>: std::fmt::Debug + Eq {
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
(BTreeSet::new(), BTreeSet::new())
}
fn is_passthrough(&self) -> bool {
false
}
fn preprocess<'a>(
&'a self,
prev: ServerConfig,
@@ -349,6 +569,7 @@ pub trait DynVHostTargetT<A: Accept>: std::fmt::Debug + Any {
fn filter(&self, metadata: &<A as Accept>::Metadata) -> bool;
fn acme(&self) -> Option<&AcmeProvider>;
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>);
fn is_passthrough(&self) -> bool;
fn preprocess<'a>(
&'a self,
prev: ServerConfig,
@@ -373,6 +594,9 @@ impl<A: Accept, T: VHostTarget<A> + 'static> DynVHostTargetT<A> for T {
fn acme(&self) -> Option<&AcmeProvider> {
VHostTarget::acme(self)
}
fn is_passthrough(&self) -> bool {
VHostTarget::is_passthrough(self)
}
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
VHostTarget::bind_requirements(self)
}
@@ -459,6 +683,7 @@ pub struct ProxyTarget {
pub addr: SocketAddr,
pub add_x_forwarded_headers: bool,
pub connect_ssl: Result<Arc<ClientConfig>, AlpnInfo>, // Ok: yes, connect using ssl, pass through alpn; Err: connect tcp, use provided strategy for alpn
pub passthrough: bool,
}
impl PartialEq for ProxyTarget {
fn eq(&self, other: &Self) -> bool {
@@ -466,6 +691,7 @@ impl PartialEq for ProxyTarget {
&& self.private == other.private
&& self.acme == other.acme
&& self.addr == other.addr
&& self.passthrough == other.passthrough
&& self.connect_ssl.as_ref().map(Arc::as_ptr)
== other.connect_ssl.as_ref().map(Arc::as_ptr)
}
@@ -480,6 +706,7 @@ impl fmt::Debug for ProxyTarget {
.field("addr", &self.addr)
.field("add_x_forwarded_headers", &self.add_x_forwarded_headers)
.field("connect_ssl", &self.connect_ssl.as_ref().map(|_| ()))
.field("passthrough", &self.passthrough)
.finish()
}
}
@@ -524,6 +751,9 @@ where
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
(self.public.clone(), self.private.clone())
}
fn is_passthrough(&self) -> bool {
self.passthrough
}
async fn preprocess<'a>(
&'a self,
mut prev: ServerConfig,
@@ -677,7 +907,7 @@ where
prev: ServerConfig,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> Option<ServerConfig>
) -> Option<TlsHandlerAction>
where
Self: 'a,
{
@@ -687,7 +917,7 @@ where
.flatten()
.any(|a| a == ACME_TLS_ALPN_NAME)
{
return Some(prev);
return Some(TlsHandlerAction::Tls(prev));
}
let (target, rc) = self.0.peek(|m| {
@@ -700,11 +930,16 @@ where
.map(|(t, rc)| (t.clone(), rc.clone()))
})?;
let is_pt = target.0.is_passthrough();
let (prev, store) = target.into_preprocessed(rc, prev, hello, metadata).await?;
self.1 = Some(store);
Some(prev)
if is_pt {
Some(TlsHandlerAction::Passthrough)
} else {
Some(TlsHandlerAction::Tls(prev))
}
}
}

View File

@@ -359,6 +359,7 @@ pub async fn install_os_to(
"riscv64" => install.arg("--target=riscv64-efi"),
_ => &mut install,
};
install.arg("--no-nvram");
}
install
.arg(disk_path)

View File

@@ -255,30 +255,7 @@ impl Model<PackageVersionInfo> {
}
if let Some(hw) = &device_info.hardware {
self.as_s9pks_mut().mutate(|s9pks| {
s9pks.retain(|(hw_req, _)| {
if let Some(arch) = &hw_req.arch {
if !arch.contains(&hw.arch) {
return false;
}
}
if let Some(ram) = hw_req.ram {
if hw.ram < ram {
return false;
}
}
if let Some(dev) = &hw.devices {
for device_filter in &hw_req.device {
if !dev
.iter()
.filter(|d| d.class() == &*device_filter.class)
.any(|d| device_filter.matches(d))
{
return false;
}
}
}
true
});
s9pks.retain(|(hw_req, _)| hw_req.is_compatible(hw));
if hw.devices.is_some() {
s9pks.sort_by_key(|(req, _)| req.specificity_desc());
} else {

View File

@@ -58,6 +58,9 @@ pub struct AddPackageSignerParams {
#[arg(long, help = "help.arg.version-range")]
#[ts(type = "string | null")]
pub versions: Option<VersionRange>,
#[arg(long, help = "help.arg.merge")]
#[ts(optional)]
pub merge: Option<bool>,
}
pub async fn add_package_signer(
@@ -66,6 +69,7 @@ pub async fn add_package_signer(
id,
signer,
versions,
merge,
}: AddPackageSignerParams,
) -> Result<(), Error> {
ctx.db
@@ -76,13 +80,22 @@ pub async fn add_package_signer(
"unknown signer {signer}"
);
let versions = versions.unwrap_or_default();
db.as_index_mut()
.as_package_mut()
.as_packages_mut()
.as_idx_mut(&id)
.or_not_found(&id)?
.as_authorized_mut()
.insert(&signer, &versions.unwrap_or_default())?;
.upsert(&signer, || Ok(VersionRange::None))?
.mutate(|existing| {
*existing = if merge.unwrap_or(false) {
VersionRange::or(existing.clone(), versions)
} else {
versions
};
Ok(())
})?;
Ok(())
})

View File

@@ -3,16 +3,17 @@ use std::path::PathBuf;
use std::sync::Arc;
use clap::Parser;
use rpc_toolkit::{Empty, HandlerExt, ParentHandler, from_fn_async};
use rpc_toolkit::{Empty, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use ts_rs::TS;
use url::Url;
use crate::ImageId;
use crate::context::CliContext;
use crate::context::{CliContext, RpcContext};
use crate::prelude::*;
use crate::s9pk::manifest::Manifest;
use crate::registry::device_info::DeviceInfo;
use crate::s9pk::manifest::{HardwareRequirements, Manifest};
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::s9pk::v2::SIG_CONTEXT;
use crate::s9pk::v2::pack::ImageConfig;
@@ -70,6 +71,15 @@ pub fn s9pk() -> ParentHandler<CliContext> {
.no_display()
.with_about("about.publish-s9pk"),
)
.subcommand(
"select",
from_fn_async(select)
.with_custom_display_fn(|_, path: PathBuf| {
println!("{}", path.display());
Ok(())
})
.with_about("about.select-s9pk-for-device"),
)
}
#[derive(Deserialize, Serialize, Parser)]
@@ -323,3 +333,97 @@ async fn publish(ctx: CliContext, S9pkPath { s9pk: s9pk_path }: S9pkPath) -> Res
.await?;
crate::registry::package::add::cli_add_package_impl(ctx, s9pk, vec![s3url], false).await
}
#[derive(Deserialize, Serialize, Parser)]
struct SelectParams {
#[arg(help = "help.arg.s9pk-file-paths")]
s9pks: Vec<PathBuf>,
}
async fn select(
HandlerArgs {
context,
params: SelectParams { s9pks },
..
}: HandlerArgs<CliContext, SelectParams>,
) -> Result<PathBuf, Error> {
// Resolve file list: use provided paths or scan cwd for *.s9pk
let paths = if s9pks.is_empty() {
let mut found = Vec::new();
let mut entries = tokio::fs::read_dir(".").await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension().and_then(|e| e.to_str()) == Some("s9pk") {
found.push(path);
}
}
if found.is_empty() {
return Err(Error::new(
eyre!("no .s9pk files found in current directory"),
ErrorKind::NotFound,
));
}
found
} else {
s9pks
};
// Fetch DeviceInfo from the target server
let device_info: DeviceInfo = from_value(
context
.call_remote::<RpcContext>("server.device-info", imbl_value::json!({}))
.await?,
)?;
// Filter and rank s9pk files by compatibility
let mut compatible: Vec<(PathBuf, HardwareRequirements)> = Vec::new();
for path in &paths {
let s9pk = match super::S9pk::open(path, None).await {
Ok(s9pk) => s9pk,
Err(e) => {
tracing::warn!("skipping {}: {e}", path.display());
continue;
}
};
let manifest = s9pk.as_manifest();
// OS version check: package's required OS version must be in server's compat range
if !manifest
.metadata
.os_version
.satisfies(&device_info.os.compat)
{
continue;
}
let hw_req = &manifest.hardware_requirements;
if let Some(hw) = &device_info.hardware {
if !hw_req.is_compatible(hw) {
continue;
}
}
compatible.push((path.clone(), hw_req.clone()));
}
if compatible.is_empty() {
return Err(Error::new(
eyre!(
"no compatible s9pk found for device (arch: {}, os: {})",
device_info
.hardware
.as_ref()
.map(|h| h.arch.to_string())
.unwrap_or_else(|| "unknown".into()),
device_info.os.version,
),
ErrorKind::NotFound,
));
}
// Sort by specificity (most specific first)
compatible.sort_by_key(|(_, req)| req.specificity_desc());
Ok(compatible.into_iter().next().unwrap().0)
}

View File

@@ -154,6 +154,32 @@ pub struct HardwareRequirements {
pub arch: Option<BTreeSet<InternedString>>,
}
impl HardwareRequirements {
/// Returns true if this s9pk's hardware requirements are satisfied by the given hardware.
pub fn is_compatible(&self, hw: &crate::registry::device_info::HardwareInfo) -> bool {
if let Some(arch) = &self.arch {
if !arch.contains(&hw.arch) {
return false;
}
}
if let Some(ram) = self.ram {
if hw.ram < ram {
return false;
}
}
if let Some(devices) = &hw.devices {
for device_filter in &self.device {
if !devices
.iter()
.filter(|d| d.class() == &*device_filter.class)
.any(|d| device_filter.matches(d))
{
return false;
}
}
}
true
}
/// returns a value that can be used as a sort key to get most specific requirements first
pub fn specificity_desc(&self) -> (u32, u32, u64) {
(

View File

@@ -251,11 +251,12 @@ async fn create_task(
.get(&task.package_id)
.await
.as_ref()
.filter(|s| s.is_initialized())
{
let Some(prev) = service
let prev = service
.get_action_input(procedure_id.clone(), task.action_id.clone(), Value::Null)
.await?
else {
.await?;
let Some(prev) = prev else {
return Err(Error::new(
eyre!(
"{}",
@@ -278,7 +279,9 @@ async fn create_task(
true
}
} else {
true // update when service is installed
// Service not installed or not yet initialized — assume active.
// Will be retested when service init completes (Service::recheck_tasks).
true
}
}
},

View File

@@ -52,7 +52,7 @@ use crate::util::serde::Pem;
use crate::util::sync::SyncMutex;
use crate::util::tui::choose;
use crate::volume::data_dir;
use crate::{ActionId, CAP_1_KiB, DATA_DIR, HostId, ImageId, PackageId};
use crate::{ActionId, CAP_1_KiB, DATA_DIR, ImageId, PackageId};
pub mod action;
pub mod cli;
@@ -215,6 +215,84 @@ pub struct Service {
seed: Arc<ServiceActorSeed>,
}
impl Service {
pub fn is_initialized(&self) -> bool {
self.seed.persistent_container.state.borrow().rt_initialized
}
/// Re-evaluate all tasks that reference this service's actions.
/// Called after every service init to update task active state.
#[instrument(skip_all)]
async fn recheck_tasks(&self) -> Result<(), Error> {
let service_id = &self.seed.id;
let peek = self.seed.ctx.db.peek().await;
let mut action_input: BTreeMap<ActionId, Value> = BTreeMap::new();
let tasks: BTreeSet<_> = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| {
Ok(pde
.as_tasks()
.as_entries()?
.into_iter()
.map(|(_, r)| {
let t = r.as_task();
Ok::<_, Error>(
if t.as_package_id().de()? == *service_id
&& t.as_input().transpose_ref().is_some()
{
Some(t.as_action_id().de()?)
} else {
None
},
)
})
.filter_map_ok(|a| a))
})
.flatten_ok()
.map(|a| a.and_then(|a| a))
.try_collect()?;
let procedure_id = Guid::new();
for action_id in tasks {
if let Some(input) = self
.get_action_input(procedure_id.clone(), action_id.clone(), Value::Null)
.await
.log_err()
.flatten()
.and_then(|i| i.value)
{
action_input.insert(action_id, input);
}
}
self.seed
.ctx
.db
.mutate(|db| {
for (action_id, input) in &action_input {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, service_id, action_id, input, false))
})?;
}
}
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
if pde
.as_tasks()
.de()?
.into_iter()
.any(|(_, t)| t.active && t.task.severity == TaskSeverity::Critical)
{
pde.as_status_info_mut().stop()?;
}
}
Ok(())
})
.await
.result?;
Ok(())
}
#[instrument(skip_all)]
async fn new(
ctx: RpcContext,
@@ -263,6 +341,7 @@ impl Service {
.persistent_container
.init(service.weak(), procedure_id, init_kind)
.await?;
service.recheck_tasks().await?;
if let Some(recovery_guard) = recovery_guard {
recovery_guard.unmount(true).await?;
}
@@ -489,70 +568,8 @@ impl Service {
)
.await?;
if let Some(mut progress) = progress {
progress.finalization_progress.complete();
progress.progress.complete();
tokio::task::yield_now().await;
}
let peek = ctx.db.peek().await;
let mut action_input: BTreeMap<ActionId, Value> = BTreeMap::new();
let tasks: BTreeSet<_> = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| {
Ok(pde
.as_tasks()
.as_entries()?
.into_iter()
.map(|(_, r)| {
let t = r.as_task();
Ok::<_, Error>(
if t.as_package_id().de()? == manifest.id
&& t.as_input().transpose_ref().is_some()
{
Some(t.as_action_id().de()?)
} else {
None
},
)
})
.filter_map_ok(|a| a))
})
.flatten_ok()
.map(|a| a.and_then(|a| a))
.try_collect()?;
for action_id in tasks {
if peek
.as_public()
.as_package_data()
.as_idx(&manifest.id)
.or_not_found(&manifest.id)?
.as_actions()
.contains_key(&action_id)?
{
if let Some(input) = service
.get_action_input(procedure_id.clone(), action_id.clone(), Value::Null)
.await
.log_err()
.flatten()
.and_then(|i| i.value)
{
action_input.insert(action_id, input);
}
}
}
ctx.db
.mutate(|db| {
for (action_id, input) in &action_input {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, &manifest.id, action_id, input, false))
})?;
}
}
let entry = db
.as_public_mut()
.as_package_data_mut()
@@ -594,6 +611,12 @@ impl Service {
.await
.result?;
if let Some(mut progress) = progress {
progress.finalization_progress.complete();
progress.progress.complete();
tokio::task::yield_now().await;
}
// Trigger manifest callbacks after successful installation
let manifest = service.seed.persistent_container.s9pk.as_manifest();
if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) {
@@ -683,14 +706,6 @@ impl Service {
memory_usage: MiB::from_MiB(used),
})
}
pub async fn sync_host(&self, host_id: HostId) -> Result<(), Error> {
self.seed
.persistent_container
.net_service
.sync_host(host_id)
.await
}
}
struct ServiceActorSeed {

View File

@@ -176,8 +176,6 @@ pub struct AttachParams {
pub guid: InternedString,
#[ts(optional)]
pub kiosk: Option<bool>,
pub name: Option<InternedString>,
pub hostname: Option<InternedString>,
}
#[instrument(skip_all)]
@@ -187,8 +185,6 @@ pub async fn attach(
password,
guid: disk_guid,
kiosk,
name,
hostname,
}: AttachParams,
) -> Result<SetupProgress, Error> {
let setup_ctx = ctx.clone();
@@ -242,10 +238,8 @@ pub async fn attach(
}
disk_phase.complete();
let hostname = ServerHostnameInfo::new_opt(name, hostname)?;
let (account, net_ctrl) =
setup_init(&setup_ctx, password, kiosk, hostname, init_phases).await?;
setup_init(&setup_ctx, password, kiosk, None, init_phases).await?;
let rpc_ctx = RpcContext::init(
&setup_ctx.webserver,
@@ -414,7 +408,7 @@ pub async fn setup_data_drive(
#[ts(export)]
pub struct SetupExecuteParams {
guid: InternedString,
password: EncryptedWire,
password: Option<EncryptedWire>,
recovery_source: Option<RecoverySource<EncryptedWire>>,
#[ts(optional)]
kiosk: Option<bool>,
@@ -434,15 +428,16 @@ pub async fn execute(
hostname,
}: SetupExecuteParams,
) -> Result<SetupProgress, Error> {
let password = match password.decrypt(&ctx) {
Some(a) => a,
None => {
return Err(Error::new(
color_eyre::eyre::eyre!("{}", t!("setup.couldnt-decode-startos-password")),
crate::ErrorKind::Unknown,
));
}
};
let password = password
.map(|p| {
p.decrypt(&ctx).ok_or_else(|| {
Error::new(
color_eyre::eyre::eyre!("{}", t!("setup.couldnt-decode-startos-password")),
crate::ErrorKind::Unknown,
)
})
})
.transpose()?;
let recovery = match recovery_source {
Some(RecoverySource::Backup {
target,
@@ -551,7 +546,7 @@ pub async fn shutdown(ctx: SetupContext) -> Result<(), Error> {
pub async fn execute_inner(
ctx: SetupContext,
guid: InternedString,
password: String,
password: Option<String>,
recovery_source: Option<RecoverySource<String>>,
kiosk: Option<bool>,
hostname: Option<ServerHostnameInfo>,
@@ -597,7 +592,22 @@ pub async fn execute_inner(
Some(RecoverySource::Migrate { guid: old_guid }) => {
migrate(&ctx, guid, &old_guid, password, kiosk, hostname, progress).await
}
None => fresh_setup(&ctx, guid, &password, kiosk, hostname, progress).await,
None => {
fresh_setup(
&ctx,
guid,
&password.ok_or_else(|| {
Error::new(
eyre!("{}", t!("setup.password-required")),
ErrorKind::InvalidRequest,
)
})?,
kiosk,
hostname,
progress,
)
.await
}
}
}
@@ -668,7 +678,7 @@ async fn fresh_setup(
async fn recover(
ctx: &SetupContext,
guid: InternedString,
password: String,
password: Option<String>,
recovery_source: BackupTargetFS,
server_id: String,
recovery_password: String,
@@ -696,7 +706,7 @@ async fn migrate(
ctx: &SetupContext,
guid: InternedString,
old_guid: &str,
password: String,
password: Option<String>,
kiosk: Option<bool>,
hostname: Option<ServerHostnameInfo>,
SetupExecuteProgress {
@@ -777,8 +787,7 @@ async fn migrate(
crate::disk::main::export(&old_guid, "/media/startos/migrate").await?;
restore_phase.complete();
let (account, net_ctrl) =
setup_init(&ctx, Some(password), kiosk, hostname, init_phases).await?;
let (account, net_ctrl) = setup_init(&ctx, password, kiosk, hostname, init_phases).await?;
let rpc_ctx = RpcContext::init(
&ctx.webserver,

View File

@@ -20,6 +20,7 @@ use crate::context::{CliContext, RpcContext};
use crate::disk::util::{get_available, get_used};
use crate::logs::{LogSource, LogsParams, SYSTEM_UNIT};
use crate::prelude::*;
use crate::registry::device_info::DeviceInfo;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::shutdown::Shutdown;
use crate::util::Invoke;
@@ -249,6 +250,64 @@ pub async fn time(ctx: RpcContext, _: Empty) -> Result<TimeInfo, Error> {
})
}
pub async fn device_info(ctx: RpcContext) -> Result<DeviceInfo, Error> {
DeviceInfo::load(&ctx).await
}
pub fn display_device_info(params: WithIoFormat<Empty>, info: DeviceInfo) -> Result<(), Error> {
use prettytable::*;
if let Some(format) = params.format {
return display_serializable(format, info);
}
let mut table = Table::new();
table.add_row(row![br -> "PLATFORM", &*info.os.platform]);
table.add_row(row![br -> "OS VERSION", info.os.version.to_string()]);
table.add_row(row![br -> "OS COMPAT", info.os.compat.to_string()]);
if let Some(lang) = &info.os.language {
table.add_row(row![br -> "LANGUAGE", &**lang]);
}
if let Some(hw) = &info.hardware {
table.add_row(row![br -> "ARCH", &*hw.arch]);
table.add_row(row![br -> "RAM", format_ram(hw.ram)]);
if let Some(devices) = &hw.devices {
for dev in devices {
let (class, desc) = match dev {
crate::util::lshw::LshwDevice::Processor(p) => (
"PROCESSOR",
p.product.as_deref().unwrap_or("unknown").to_string(),
),
crate::util::lshw::LshwDevice::Display(d) => (
"DISPLAY",
format!(
"{}{}",
d.product.as_deref().unwrap_or("unknown"),
d.driver
.as_deref()
.map(|drv| format!(" ({})", drv))
.unwrap_or_default()
),
),
};
table.add_row(row![br -> class, desc]);
}
}
}
table.print_tty(false)?;
Ok(())
}
fn format_ram(bytes: u64) -> String {
const GIB: u64 = 1024 * 1024 * 1024;
const MIB: u64 = 1024 * 1024;
if bytes >= GIB {
format!("{:.1} GiB", bytes as f64 / GIB as f64)
} else {
format!("{:.1} MiB", bytes as f64 / MIB as f64)
}
}
pub fn logs<C: Context + AsRef<RpcContinuations>>() -> ParentHandler<C, LogsParams> {
crate::logs::logs(|_: &C, _| async { Ok(LogSource::Unit(SYSTEM_UNIT)) })
}
@@ -1049,20 +1108,36 @@ async fn get_disk_info() -> Result<MetricsDisk, Error> {
})
}
#[derive(
Debug, Clone, Copy, Default, serde::Serialize, serde::Deserialize, TS, clap::ValueEnum,
)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub enum SmtpSecurity {
#[default]
Starttls,
Tls,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct SmtpValue {
#[arg(long, help = "help.arg.smtp-server")]
pub server: String,
#[arg(long, help = "help.arg.smtp-host")]
#[serde(alias = "server")]
pub host: String,
#[arg(long, help = "help.arg.smtp-port")]
pub port: u16,
#[arg(long, help = "help.arg.smtp-from")]
pub from: String,
#[arg(long, help = "help.arg.smtp-login")]
pub login: String,
#[arg(long, help = "help.arg.smtp-username")]
#[serde(alias = "login")]
pub username: String,
#[arg(long, help = "help.arg.smtp-password")]
pub password: Option<String>,
#[arg(long, help = "help.arg.smtp-security")]
#[serde(default)]
pub security: SmtpSecurity,
}
pub async fn set_system_smtp(ctx: RpcContext, smtp: SmtpValue) -> Result<(), Error> {
let smtp = Some(smtp);
@@ -1121,47 +1196,63 @@ pub async fn set_ifconfig_url(
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct TestSmtpParams {
#[arg(long, help = "help.arg.smtp-server")]
pub server: String,
#[arg(long, help = "help.arg.smtp-host")]
pub host: String,
#[arg(long, help = "help.arg.smtp-port")]
pub port: u16,
#[arg(long, help = "help.arg.smtp-from")]
pub from: String,
#[arg(long, help = "help.arg.smtp-to")]
pub to: String,
#[arg(long, help = "help.arg.smtp-login")]
pub login: String,
#[arg(long, help = "help.arg.smtp-username")]
pub username: String,
#[arg(long, help = "help.arg.smtp-password")]
pub password: String,
#[arg(long, help = "help.arg.smtp-security")]
#[serde(default)]
pub security: SmtpSecurity,
}
pub async fn test_smtp(
_: RpcContext,
TestSmtpParams {
server,
host,
port,
from,
to,
login,
username,
password,
security,
}: TestSmtpParams,
) -> Result<(), Error> {
use lettre::message::header::ContentType;
use lettre::transport::smtp::authentication::Credentials;
use lettre::transport::smtp::client::{Tls, TlsParameters};
use lettre::{AsyncSmtpTransport, AsyncTransport, Message, Tokio1Executor};
AsyncSmtpTransport::<Tokio1Executor>::relay(&server)?
.port(port)
.credentials(Credentials::new(login, password))
.build()
.send(
Message::builder()
.from(from.parse()?)
.to(to.parse()?)
.subject("StartOS Test Email")
.header(ContentType::TEXT_PLAIN)
.body("This is a test email sent from your StartOS Server".to_owned())?,
)
.await?;
let creds = Credentials::new(username, password);
let message = Message::builder()
.from(from.parse()?)
.to(to.parse()?)
.subject("StartOS Test Email")
.header(ContentType::TEXT_PLAIN)
.body("This is a test email sent from your StartOS Server".to_owned())?;
let transport = match security {
SmtpSecurity::Starttls => AsyncSmtpTransport::<Tokio1Executor>::relay(&host)?
.port(port)
.credentials(creds)
.build(),
SmtpSecurity::Tls => {
let tls = TlsParameters::new(host.clone())?;
AsyncSmtpTransport::<Tokio1Executor>::relay(&host)?
.port(port)
.tls(Tls::Wrapper(tls))
.credentials(creds)
.build()
}
};
transport.send(message).await?;
Ok(())
}

View File

@@ -20,7 +20,7 @@ use ts_rs::TS;
use crate::context::CliContext;
use crate::hostname::ServerHostname;
use crate::net::ssl::{SANInfo, root_ca_start_time};
use crate::net::tls::TlsHandler;
use crate::net::tls::{TlsHandler, TlsHandlerAction};
use crate::net::web_server::Accept;
use crate::prelude::*;
use crate::tunnel::auth::SetPasswordParams;
@@ -59,7 +59,7 @@ where
&'a mut self,
_: &'a ClientHello<'a>,
_: &'a <A as Accept>::Metadata,
) -> Option<ServerConfig> {
) -> Option<TlsHandlerAction> {
let cert_info = self
.db
.peek()
@@ -88,7 +88,7 @@ where
.log_err()?;
cfg.alpn_protocols
.extend([b"http/1.1".into(), b"h2".into()]);
Some(cfg)
Some(TlsHandlerAction::Tls(cfg))
}
}

View File

@@ -166,6 +166,9 @@ impl VersionT for Version {
// Rebuild from actual assigned ports in all bindings
migrate_available_ports(db);
// Migrate SMTP: rename server->host, login->username, add security field
migrate_smtp(db);
// Delete ui.name (moved to serverInfo.name)
if let Some(ui) = db
.get_mut("public")
@@ -269,6 +272,25 @@ fn migrate_available_ports(db: &mut Value) {
}
}
fn migrate_smtp(db: &mut Value) {
if let Some(smtp) = db
.get_mut("public")
.and_then(|p| p.get_mut("serverInfo"))
.and_then(|s| s.get_mut("smtp"))
.and_then(|s| s.as_object_mut())
{
if let Some(server) = smtp.remove("server") {
smtp.insert("host".into(), server);
}
if let Some(login) = smtp.remove("login") {
smtp.insert("username".into(), login);
}
if !smtp.contains_key("security") {
smtp.insert("security".into(), json!("starttls"));
}
}
}
fn denormalize_hostname(s: &str) -> String {
let mut cap = true;
s.chars()

View File

@@ -7,10 +7,12 @@ cd "$(dirname "${BASH_SOURCE[0]}")/.."
PROJECT=${PROJECT:-"startos"}
BASENAME=${BASENAME:-"$(./build/env/basename.sh)"}
VERSION=${VERSION:-$(cat ./build/env/VERSION.txt)}
if [ "$PLATFORM" = "x86_64" ] || [ "$PLATFORM" = "x86_64-nonfree" ]; then
if [ "$PLATFORM" = "x86_64" ] || [ "$PLATFORM" = "x86_64-nonfree" ] || [ "$PLATFORM" = "x86_64-nvidia" ]; then
DEB_ARCH=amd64
elif [ "$PLATFORM" = "aarch64" ] || [ "$PLATFORM" = "aarch64-nonfree" ] || [ "$PLATFORM" = "raspberrypi" ]; then
elif [ "$PLATFORM" = "aarch64" ] || [ "$PLATFORM" = "aarch64-nonfree" ] || [ "$PLATFORM" = "aarch64-nvidia" ] || [ "$PLATFORM" = "raspberrypi" ] || [ "$PLATFORM" = "rockchip64" ]; then
DEB_ARCH=arm64
elif [ "$PLATFORM" = "riscv64" ] || [ "$PLATFORM" = "riscv64-nonfree" ]; then
DEB_ARCH=riscv64
else
DEB_ARCH="$PLATFORM"
fi

View File

@@ -23,15 +23,6 @@ Pending tasks for AI agents. Remove items when completed.
other crate types. Extracting them requires either moving the type definitions into the sub-crate
(and importing them back into `start-os`) or restructuring to share a common types crate.
- [ ] Make `SetupExecuteParams.password` optional in the backend - @dr-bonez
**Problem**: In `core/src/setup.rs`, `SetupExecuteParams` has `password: EncryptedWire` (non-nullable),
but the frontend needs to send `null` for restore/transfer flows where the user keeps their existing
password. The `AttachParams` type correctly uses `Option<EncryptedWire>` for this purpose.
**Fix**: Change `password: EncryptedWire` to `password: Option<EncryptedWire>` in `SetupExecuteParams`
and handle the `None` case in the `execute` handler (similar to how `attach` handles it).
- [ ] Auto-configure port forwards via UPnP/NAT-PMP/PCP - @dr-bonez
**Goal**: When a binding is marked public, automatically configure port forwards on the user's router
@@ -39,10 +30,11 @@ Pending tasks for AI agents. Remove items when completed.
displaying manual instructions (the port forward mapping from patch-db) when auto-configuration is
unavailable or fails.
- [ ] Decouple createTask from service running state - @dr-bonez
- [ ] Use TLS-ALPN challenges for check-port when addSsl - @dr-bonez
**Problem**: `createTask` currently depends on the service being in a running state.
**Problem**: The `check_port` RPC in `core/src/net/gateway.rs` currently uses an external HTTP
service (`ifconfig_url`) to verify port reachability. This doesn't check whether the port is forwarded to the right place, just that it's open. there's nothing we can do about this if it's a raw forward, but if it goes through the ssl proxy we can do a better verification.
**Goal**: The `input-not-matches` handler in StartOS should queue the task, check it once the
service is ready, then clear it if it matches. This allows tasks to be created regardless of
whether the service is currently running.
**Goal**: When a binding has `addSsl` enabled, use TLS-ALPN-01 challenges to verify port
reachability instead of (or in addition to) the plain TCP check. This more accurately validates
that the SSL port is properly configured and reachable.

View File

@@ -27,16 +27,33 @@ bundle: baseDist dist | test fmt
base/lib/exver/exver.ts: base/node_modules base/lib/exver/exver.pegjs
cd base && npm run peggy
baseDist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) base/package.json base/node_modules base/README.md base/LICENSE
baseDist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) base/package.json base/node_modules base/README.md base/LICENSE
(cd base && npm run tsc)
# Copy hand-written .js/.d.ts pairs (no corresponding .ts source) into the output.
cd base/lib && find . -name '*.js' | while read f; do \
base="$${f%.js}"; \
if [ -f "$$base.d.ts" ] && [ ! -f "$$base.ts" ]; then \
mkdir -p "../../baseDist/$$(dirname "$$f")"; \
cp "$$f" "../../baseDist/$$f"; \
cp "$$base.d.ts" "../../baseDist/$$base.d.ts"; \
fi; \
done
rsync -ac base/node_modules baseDist/
cp base/package.json baseDist/package.json
cp base/README.md baseDist/README.md
cp base/LICENSE baseDist/LICENSE
touch baseDist
dist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) package/package.json package/.npmignore package/node_modules package/README.md package/LICENSE
dist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) package/package.json package/.npmignore package/node_modules package/README.md package/LICENSE
(cd package && npm run tsc)
cd base/lib && find . -name '*.js' | while read f; do \
base="$${f%.js}"; \
if [ -f "$$base.d.ts" ] && [ ! -f "$$base.ts" ]; then \
mkdir -p "../../dist/base/lib/$$(dirname "$$f")"; \
cp "$$f" "../../dist/base/lib/$$f"; \
cp "$$base.d.ts" "../../dist/base/lib/$$base.d.ts"; \
fi; \
done
rsync -ac package/node_modules dist/
cp package/.npmignore dist/.npmignore
cp package/package.json dist/package.json
@@ -70,7 +87,7 @@ base/node_modules: base/package-lock.json
node_modules: package/node_modules base/node_modules
publish: bundle package/package.json package/README.md package/LICENSE
cd dist && npm publish --access=public
cd dist && npm publish --access=public --tag=latest
link: bundle
cd dist && npm link

View File

@@ -3,22 +3,83 @@ import { Value } from './value'
import { _ } from '../../../util'
import { Effects } from '../../../Effects'
import { z } from 'zod'
import { zodDeepPartial } from 'zod-deep-partial'
import { DeepPartial } from '../../../types'
import { InputSpecTools, createInputSpecTools } from './inputSpecTools'
/** Options passed to a lazy builder function when resolving dynamic form field values. */
export type LazyBuildOptions<Type> = {
/** The effects interface for runtime operations (e.g. reading files, querying state). */
effects: Effects
/** Previously saved form data to pre-fill the form with, or `null` for fresh creation. */
prefill: DeepPartial<Type> | null
}
/**
* A function that lazily produces a value, potentially using effects and prefill data.
* Used by `dynamic*` variants of {@link Value} to compute form field options at runtime.
*/
export type LazyBuild<ExpectedOut, Type> = (
options: LazyBuildOptions<Type>,
) => Promise<ExpectedOut> | ExpectedOut
/**
* Defines which keys to keep when filtering an InputSpec.
* Use `true` to keep a field as-is, or a nested object to filter sub-fields of an object-typed field.
*/
export type FilterKeys<F> = {
[K in keyof F]?: F[K] extends Record<string, any>
? boolean | FilterKeys<F[K]>
: boolean
}
type RetainKey<T, F, Default extends boolean> = {
[K in keyof T]: K extends keyof F
? F[K] extends false
? never
: K
: Default extends true
? K
: never
}[keyof T]
/**
* Computes the resulting type after applying a {@link FilterKeys} shape to a type.
*/
export type ApplyFilter<T, F, Default extends boolean = false> = {
[K in RetainKey<T, F, Default>]: K extends keyof F
? true extends F[K]
? F[K] extends true
? T[K]
: T[K] | undefined
: T[K] extends Record<string, any>
? F[K] extends FilterKeys<T[K]>
? ApplyFilter<T[K], F[K]>
: undefined
: undefined
: Default extends true
? T[K]
: undefined
}
/**
* Computes the union of all valid key-path tuples through a nested type.
* Each tuple represents a path from root to a field, recursing into object-typed sub-fields.
*/
export type KeyPaths<T> = {
[K in keyof T & string]: T[K] extends any[]
? [K]
: T[K] extends Record<string, any>
? [K] | [K, ...KeyPaths<T[K]>]
: [K]
}[keyof T & string]
/** Extracts the runtime type from an {@link InputSpec}. */
// prettier-ignore
export type ExtractInputSpecType<A extends InputSpec<Record<string, any>, any>> =
export type ExtractInputSpecType<A extends InputSpec<Record<string, any>, any>> =
A extends InputSpec<infer B, any> ? B :
never
/** Extracts the static validation type from an {@link InputSpec}. */
export type ExtractInputSpecStaticValidatedAs<
A extends InputSpec<any, Record<string, any>>,
> = A extends InputSpec<any, infer B> ? B : never
@@ -27,10 +88,12 @@ export type ExtractInputSpecStaticValidatedAs<
// A extends Record<string, any> | InputSpec<Record<string, any>>,
// > = A extends InputSpec<infer B> ? DeepPartial<B> : DeepPartial<A>
/** Maps an object type to a record of {@link Value} entries for use with `InputSpec.of`. */
export type InputSpecOf<A extends Record<string, any>> = {
[K in keyof A]: Value<A[K]>
}
/** A value that is either directly provided or lazily computed via a {@link LazyBuild} function. */
export type MaybeLazyValues<A, T> = LazyBuild<A, T> | A
/**
* InputSpecs are the specs that are used by the os input specification form for this service.
@@ -100,6 +163,13 @@ export class InputSpec<
) {}
public _TYPE: Type = null as any as Type
public _PARTIAL: DeepPartial<Type> = null as any as DeepPartial<Type>
public readonly partialValidator: z.ZodType<DeepPartial<StaticValidatedAs>> =
zodDeepPartial(this.validator) as any
/**
* Builds the runtime form specification and combined Zod validator from this InputSpec's fields.
*
* @returns An object containing the resolved `spec` (field specs keyed by name) and a combined `validator`
*/
async build<OuterType>(options: LazyBuildOptions<OuterType>): Promise<{
spec: {
[K in keyof Type]: ValueSpec
@@ -123,29 +193,11 @@ export class InputSpec<
}
}
addKey<Key extends string, V extends Value<any, any, any>>(
key: Key,
build: V | ((tools: InputSpecTools<Type>) => V),
): InputSpec<
Type & { [K in Key]: V extends Value<infer T, any, any> ? T : never },
StaticValidatedAs & {
[K in Key]: V extends Value<any, infer S, any> ? S : never
}
> {
const value =
build instanceof Function ? build(createInputSpecTools<Type>()) : build
const newSpec = { ...this.spec, [key]: value } as any
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [
k,
(v as Value<any>).validator,
]),
),
)
return new InputSpec(newSpec, newValidator as any)
}
/**
* Adds multiple fields to this spec at once, returning a new `InputSpec` with extended types.
*
* @param build - A record of {@link Value} entries, or a function receiving typed tools that returns one
*/
add<AddSpec extends Record<string, Value<any, any, any>>>(
build: AddSpec | ((tools: InputSpecTools<Type>) => AddSpec),
): InputSpec<
@@ -174,6 +226,258 @@ export class InputSpec<
return new InputSpec(newSpec, newValidator as any)
}
/**
* Returns a new InputSpec containing only the specified keys.
* Use `true` to keep a field as-is, or a nested object to filter sub-fields of object-typed fields.
*
* @example
* ```ts
* const full = InputSpec.of({
* name: Value.text({ name: 'Name', required: true, default: null }),
* settings: Value.object({ name: 'Settings' }, InputSpec.of({
* debug: Value.toggle({ name: 'Debug', default: false }),
* port: Value.number({ name: 'Port', required: true, default: 8080, integer: true }),
* })),
* })
* const filtered = full.filter({ name: true, settings: { debug: true } })
* ```
*/
filter<F extends FilterKeys<Type>, Default extends boolean = false>(
keys: F,
keepByDefault?: Default,
): InputSpec<
ApplyFilter<Type, F, Default> & ApplyFilter<StaticValidatedAs, F, Default>,
ApplyFilter<StaticValidatedAs, F, Default>
> {
const newSpec: Record<string, Value<any>> = {}
for (const k of Object.keys(this.spec)) {
const filterVal = (keys as any)[k]
const value = (this.spec as any)[k] as Value<any> | undefined
if (!value) continue
if (filterVal === true) {
newSpec[k] = value
} else if (typeof filterVal === 'object' && filterVal !== null) {
const objectMeta = value._objectSpec
if (objectMeta) {
const filteredInner = objectMeta.inputSpec.filter(
filterVal,
keepByDefault,
)
newSpec[k] = Value.object(objectMeta.params, filteredInner)
} else {
newSpec[k] = value
}
} else if (keepByDefault && filterVal !== false) {
newSpec[k] = value
}
}
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [k, v.validator]),
),
)
return new InputSpec(newSpec as any, newValidator as any) as any
}
/**
* Returns a new InputSpec with the specified keys disabled.
* Use `true` to disable a field, or a nested object to disable sub-fields of object-typed fields.
* All fields remain in the spec — disabled fields simply cannot be edited by the user.
*
* @param keys - Which fields to disable, using the same shape as {@link FilterKeys}
* @param message - The reason the fields are disabled, displayed to the user
*
* @example
* ```ts
* const spec = InputSpec.of({
* name: Value.text({ name: 'Name', required: true, default: null }),
* settings: Value.object({ name: 'Settings' }, InputSpec.of({
* debug: Value.toggle({ name: 'Debug', default: false }),
* port: Value.number({ name: 'Port', required: true, default: 8080, integer: true }),
* })),
* })
* const disabled = spec.disable({ name: true, settings: { debug: true } }, 'Managed by the system')
* ```
*/
disable(
keys: FilterKeys<Type>,
message: string,
): InputSpec<Type, StaticValidatedAs> {
const newSpec: Record<string, Value<any>> = {}
for (const k in this.spec) {
const filterVal = (keys as any)[k]
const value = (this.spec as any)[k] as Value<any>
if (!filterVal) {
newSpec[k] = value
} else if (filterVal === true) {
newSpec[k] = value.withDisabled(message)
} else if (typeof filterVal === 'object' && filterVal !== null) {
const objectMeta = value._objectSpec
if (objectMeta) {
const disabledInner = objectMeta.inputSpec.disable(filterVal, message)
newSpec[k] = Value.object(objectMeta.params, disabledInner)
} else {
newSpec[k] = value.withDisabled(message)
}
}
}
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [k, v.validator]),
),
)
return new InputSpec(newSpec as any, newValidator as any) as any
}
/**
* Resolves a key path to its corresponding display name path.
* Each key is mapped to the `name` property of its built {@link ValueSpec}.
* Recurses into `Value.object` sub-specs for nested paths.
*
* @param path - Typed tuple of field keys (e.g. `["settings", "debug"]`)
* @param options - Build options providing effects and prefill data
* @returns Array of display names (e.g. `["Settings", "Debug"]`)
*/
async namePath<OuterType>(
path: KeyPaths<Type>,
options: LazyBuildOptions<OuterType>,
): Promise<string[]> {
if (path.length === 0) return []
const [key, ...rest] = path as [string, ...string[]]
const value = (this.spec as any)[key] as Value<any> | undefined
if (!value) return []
const built = await value.build(options as any)
const name =
'name' in built.spec ? (built.spec as { name: string }).name : key
if (rest.length === 0) return [name]
const objectMeta = value._objectSpec
if (objectMeta) {
const innerNames = await objectMeta.inputSpec.namePath(
rest as any,
options,
)
return [name, ...innerNames]
}
return [name]
}
/**
* Resolves a key path to the description of the target field.
* Recurses into `Value.object` sub-specs for nested paths.
*
* @param path - Typed tuple of field keys (e.g. `["settings", "debug"]`)
* @param options - Build options providing effects and prefill data
* @returns The description string, or `null` if the field has no description or was not found
*/
async description<OuterType>(
path: KeyPaths<Type>,
options: LazyBuildOptions<OuterType>,
): Promise<string | null> {
if (path.length === 0) return null
const [key, ...rest] = path as [string, ...string[]]
const value = (this.spec as any)[key] as Value<any> | undefined
if (!value) return null
if (rest.length === 0) {
const built = await value.build(options as any)
return 'description' in built.spec
? (built.spec as { description: string | null }).description
: null
}
const objectMeta = value._objectSpec
if (objectMeta) {
return objectMeta.inputSpec.description(rest as any, options)
}
return null
}
/**
* Returns a new InputSpec filtered to only include keys present in the given partial object.
* For nested `Value.object` fields, recurses into the partial value to filter sub-fields.
*
* @param partial - A deep-partial object whose defined keys determine which fields to keep
*/
filterFromPartial(
partial: DeepPartial<Type>,
): InputSpec<
DeepPartial<Type> & DeepPartial<StaticValidatedAs>,
DeepPartial<StaticValidatedAs>
> {
const newSpec: Record<string, Value<any>> = {}
for (const k of Object.keys(partial)) {
const value = (this.spec as any)[k] as Value<any> | undefined
if (!value) continue
const objectMeta = value._objectSpec
if (objectMeta) {
const partialVal = (partial as any)[k]
if (typeof partialVal === 'object' && partialVal !== null) {
const filteredInner =
objectMeta.inputSpec.filterFromPartial(partialVal)
newSpec[k] = Value.object(objectMeta.params, filteredInner)
continue
}
}
newSpec[k] = value
}
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [k, v.validator]),
),
)
return new InputSpec(newSpec as any, newValidator as any) as any
}
/**
* Returns a new InputSpec with fields disabled based on which keys are present in the given partial object.
* For nested `Value.object` fields, recurses into the partial value to disable sub-fields.
* All fields remain in the spec — disabled fields simply cannot be edited by the user.
*
* @param partial - A deep-partial object whose defined keys determine which fields to disable
* @param message - The reason the fields are disabled, displayed to the user
*/
disableFromPartial(
partial: DeepPartial<Type>,
message: string,
): InputSpec<Type, StaticValidatedAs> {
const newSpec: Record<string, Value<any>> = {}
for (const k in this.spec) {
const value = (this.spec as any)[k] as Value<any>
if (!(k in (partial as any))) {
newSpec[k] = value
continue
}
const objectMeta = value._objectSpec
if (objectMeta) {
const partialVal = (partial as any)[k]
if (typeof partialVal === 'object' && partialVal !== null) {
const disabledInner = objectMeta.inputSpec.disableFromPartial(
partialVal,
message,
)
newSpec[k] = Value.object(objectMeta.params, disabledInner)
continue
}
}
newSpec[k] = value.withDisabled(message)
}
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [k, v.validator]),
),
)
return new InputSpec(newSpec as any, newValidator as any) as any
}
/**
* Creates an `InputSpec` from a plain record of {@link Value} entries.
*
* @example
* ```ts
* const spec = InputSpec.of({
* username: Value.text({ name: 'Username', required: true, default: null }),
* verbose: Value.toggle({ name: 'Verbose Logging', default: false }),
* })
* ```
*/
static of<Spec extends Record<string, Value<any, any>>>(spec: Spec) {
const validator = z.object(
Object.fromEntries(

View File

@@ -9,6 +9,14 @@ import {
} from '../inputSpecTypes'
import { z } from 'zod'
/**
* Builder class for defining list-type form fields.
*
* A list presents an interface to add, remove, and reorder items. Items can be
* either text strings ({@link List.text}) or structured objects ({@link List.obj}).
*
* Used with {@link Value.list} to include a list field in an {@link InputSpec}.
*/
export class List<
Type extends StaticValidatedAs,
StaticValidatedAs = Type,
@@ -26,6 +34,12 @@ export class List<
) {}
readonly _TYPE: Type = null as any
/**
* Creates a list of text input items.
*
* @param a - List-level options (name, description, min/max length, defaults)
* @param aSpec - Item-level options (patterns, input mode, masking, generation)
*/
static text(
a: {
name: string
@@ -97,6 +111,7 @@ export class List<
}, validator)
}
/** Like {@link List.text} but options are resolved lazily at runtime via a builder function. */
static dynamicText<OuterType = unknown>(
getA: LazyBuild<
{
@@ -150,6 +165,12 @@ export class List<
}, validator)
}
/**
* Creates a list of structured object items, each defined by a nested {@link InputSpec}.
*
* @param a - List-level options (name, description, min/max length)
* @param aSpec - Item-level options (the nested spec, display expression, uniqueness constraint)
*/
static obj<
Type extends StaticValidatedAs,
StaticValidatedAs extends Record<string, any>,

View File

@@ -15,12 +15,15 @@ import { _, once } from '../../../util'
import { z } from 'zod'
import { DeepPartial } from '../../../types'
/** Zod schema for a file upload result — validates `{ path, commitment: { hash, size } }`. */
export const fileInfoParser = z.object({
path: z.string(),
commitment: z.object({ hash: z.string(), size: z.number() }),
})
/** The parsed result of a file upload, containing the file path and its content commitment (hash + size). */
export type FileInfo = z.infer<typeof fileInfoParser>
/** Conditional type: returns `T` if `Required` is `true`, otherwise `T | null`. */
export type AsRequired<T, Required extends boolean> = Required extends true
? T
: T | null
@@ -37,6 +40,19 @@ function asRequiredParser<Type, Input extends { required: boolean }>(
return parser.nullable() as any
}
/**
* Core builder class for defining a single form field in a service configuration spec.
*
* Each static factory method (e.g. `Value.text()`, `Value.toggle()`, `Value.select()`) creates
* a typed `Value` instance representing a specific field type. Dynamic variants (e.g. `Value.dynamicText()`)
* allow the field options to be computed lazily at runtime.
*
* Use with {@link InputSpec} to compose complete form specifications.
*
* @typeParam Type - The runtime type this field produces when filled in
* @typeParam StaticValidatedAs - The compile-time validated type (usually same as Type)
* @typeParam OuterType - The parent form's type context (used by dynamic variants)
*/
export class Value<
Type extends StaticValidatedAs,
StaticValidatedAs = Type,
@@ -54,6 +70,11 @@ export class Value<
) {}
public _TYPE: Type = null as any as Type
public _PARTIAL: DeepPartial<Type> = null as any as DeepPartial<Type>
/** @internal Used by {@link InputSpec.filter} to support nested filtering of object-typed fields. */
_objectSpec?: {
inputSpec: InputSpec<any, any>
params: { name: string; description?: string | null }
}
/**
* @description Displays a boolean toggle to enable/disable
@@ -99,6 +120,7 @@ export class Value<
validator,
)
}
/** Like {@link Value.toggle} but options are resolved lazily at runtime via a builder function. */
static dynamicToggle<OuterType = unknown>(
a: LazyBuild<
{
@@ -225,6 +247,7 @@ export class Value<
validator,
)
}
/** Like {@link Value.text} but options are resolved lazily at runtime via a builder function. */
static dynamicText<Required extends boolean, OuterType = unknown>(
getA: LazyBuild<
{
@@ -345,6 +368,7 @@ export class Value<
return { spec: built, validator }
}, validator)
}
/** Like {@link Value.textarea} but options are resolved lazily at runtime via a builder function. */
static dynamicTextarea<Required extends boolean, OuterType = unknown>(
getA: LazyBuild<
{
@@ -467,6 +491,7 @@ export class Value<
validator,
)
}
/** Like {@link Value.number} but options are resolved lazily at runtime via a builder function. */
static dynamicNumber<Required extends boolean, OuterType = unknown>(
getA: LazyBuild<
{
@@ -562,6 +587,7 @@ export class Value<
)
}
/** Like {@link Value.color} but options are resolved lazily at runtime via a builder function. */
static dynamicColor<Required extends boolean, OuterType = unknown>(
getA: LazyBuild<
{
@@ -659,6 +685,7 @@ export class Value<
validator,
)
}
/** Like {@link Value.datetime} but options are resolved lazily at runtime via a builder function. */
static dynamicDatetime<Required extends boolean, OuterType = unknown>(
getA: LazyBuild<
{
@@ -769,6 +796,7 @@ export class Value<
validator,
)
}
/** Like {@link Value.select} but options are resolved lazily at runtime via a builder function. */
static dynamicSelect<
Values extends Record<string, string>,
OuterType = unknown,
@@ -889,6 +917,7 @@ export class Value<
validator,
)
}
/** Like {@link Value.multiselect} but options are resolved lazily at runtime via a builder function. */
static dynamicMultiselect<
Values extends Record<string, string>,
OuterType = unknown,
@@ -963,7 +992,7 @@ export class Value<
},
spec: InputSpec<Type, StaticValidatedAs>,
) {
return new Value<Type, StaticValidatedAs>(async (options) => {
const value = new Value<Type, StaticValidatedAs>(async (options) => {
const built = await spec.build(options as any)
return {
spec: {
@@ -976,7 +1005,15 @@ export class Value<
validator: built.validator,
}
}, spec.validator)
value._objectSpec = { inputSpec: spec, params: a }
return value
}
/**
* Displays a file upload input field.
*
* @param a.extensions - Allowed file extensions (e.g. `[".pem", ".crt"]`)
* @param a.required - Whether a file must be selected
*/
static file<Required extends boolean>(a: {
name: string
description?: string | null
@@ -1000,6 +1037,7 @@ export class Value<
asRequiredParser(fileInfoParser, a),
)
}
/** Like {@link Value.file} but options are resolved lazily at runtime via a builder function. */
static dynamicFile<Required extends boolean, OuterType = unknown>(
a: LazyBuild<
{
@@ -1102,6 +1140,7 @@ export class Value<
}
}, a.variants.validator)
}
/** Like {@link Value.union} but options (including which variants are available) are resolved lazily at runtime. */
static dynamicUnion<
VariantValues extends {
[K in string]: {
@@ -1123,6 +1162,7 @@ export class Value<
OuterType
>,
): Value<UnionRes<VariantValues>, UnionRes<VariantValues>, OuterType>
/** Like {@link Value.union} but options are resolved lazily, with an explicit static validator type. */
static dynamicUnion<
StaticVariantValues extends {
[K in string]: {
@@ -1300,6 +1340,31 @@ export class Value<
}, z.any())
}
/**
* Returns a new Value that produces the same field spec but with `disabled` set to the given message.
* The field remains in the form but cannot be edited by the user.
*
* @param message - The reason the field is disabled, displayed to the user
*/
withDisabled(message: string): Value<Type, StaticValidatedAs, OuterType> {
const original = this
const v = new Value<Type, StaticValidatedAs, OuterType>(async (options) => {
const built = await original.build(options)
return {
spec: { ...built.spec, disabled: message } as ValueSpec,
validator: built.validator,
}
}, this.validator)
v._objectSpec = this._objectSpec
return v
}
/**
* Transforms the validated output value using a mapping function.
* The form field itself remains unchanged, but the value is transformed after validation.
*
* @param fn - A function to transform the validated value
*/
map<U>(fn: (value: StaticValidatedAs) => U): Value<U, U, OuterType> {
return new Value<U, U, OuterType>(async (options) => {
const built = await this.build(options)

View File

@@ -8,6 +8,11 @@ import {
} from './inputSpec'
import { z } from 'zod'
/**
* The runtime result type of a discriminated union form field.
* Contains `selection` (the chosen variant key), `value` (the variant's form data),
* and optionally `other` (partial data from previously selected variants).
*/
export type UnionRes<
VariantValues extends {
[K in string]: {
@@ -28,6 +33,7 @@ export type UnionRes<
}
}[K]
/** Like {@link UnionRes} but using the static (Zod-inferred) validated types. */
export type UnionResStaticValidatedAs<
VariantValues extends {
[K in string]: {
@@ -118,6 +124,11 @@ export class Variants<
>,
) {}
readonly _TYPE: UnionRes<VariantValues> = null as any
/**
* Creates a `Variants` instance from a record mapping variant keys to their display name and form spec.
*
* @param a - A record of `{ name: string, spec: InputSpec }` entries, one per variant
*/
static of<
VariantValues extends {
[K in string]: {

View File

@@ -5,42 +5,124 @@ import { Value } from './builder/value'
import { Variants } from './builder/variants'
/**
* Base SMTP settings, to be used by StartOS for system wide SMTP
* Creates an SMTP field spec with provider-specific defaults pre-filled.
*/
export const customSmtp: InputSpec<SmtpValue> = InputSpec.of<
InputSpecOf<SmtpValue>
>({
server: Value.text({
name: 'SMTP Server',
required: true,
default: null,
}),
port: Value.number({
name: 'Port',
required: true,
default: 587,
min: 1,
max: 65535,
integer: true,
}),
from: Value.text({
name: 'From Address',
required: true,
default: null,
placeholder: 'Example Name <test@example.com>',
inputmode: 'email',
patterns: [Patterns.emailWithName],
}),
login: Value.text({
name: 'Login',
required: true,
default: null,
}),
password: Value.text({
name: 'Password',
required: false,
default: null,
masked: true,
function smtpFields(
defaults: {
host?: string
port?: number
security?: 'starttls' | 'tls'
} = {},
): InputSpec<SmtpValue> {
return InputSpec.of<InputSpecOf<SmtpValue>>({
host: Value.text({
name: 'Host',
required: true,
default: defaults.host ?? null,
placeholder: 'smtp.example.com',
}),
port: Value.number({
name: 'Port',
required: true,
default: defaults.port ?? 587,
min: 1,
max: 65535,
integer: true,
}),
security: Value.select({
name: 'Connection Security',
default: defaults.security ?? 'starttls',
values: {
starttls: 'STARTTLS',
tls: 'TLS',
},
}),
from: Value.text({
name: 'From Address',
required: true,
default: null,
placeholder: 'Example Name <test@example.com>',
patterns: [Patterns.emailWithName],
}),
username: Value.text({
name: 'Username',
required: true,
default: null,
}),
password: Value.text({
name: 'Password',
required: false,
default: null,
masked: true,
}),
})
}
/**
* Base SMTP settings with no provider-specific defaults.
*/
export const customSmtp = smtpFields()
/**
* Provider presets for SMTP configuration.
* Each variant has SMTP fields pre-filled with the provider's recommended settings.
*/
export const smtpProviderVariants = Variants.of({
gmail: {
name: 'Gmail',
spec: smtpFields({
host: 'smtp.gmail.com',
port: 587,
security: 'starttls',
}),
},
ses: {
name: 'Amazon SES',
spec: smtpFields({
host: 'email-smtp.us-east-1.amazonaws.com',
port: 587,
security: 'starttls',
}),
},
sendgrid: {
name: 'SendGrid',
spec: smtpFields({
host: 'smtp.sendgrid.net',
port: 587,
security: 'starttls',
}),
},
mailgun: {
name: 'Mailgun',
spec: smtpFields({
host: 'smtp.mailgun.org',
port: 587,
security: 'starttls',
}),
},
protonmail: {
name: 'Proton Mail',
spec: smtpFields({
host: 'smtp.protonmail.ch',
port: 587,
security: 'starttls',
}),
},
other: {
name: 'Other',
spec: customSmtp,
},
})
/**
* System SMTP settings with provider presets.
* Wraps smtpProviderVariants in a union for use by the system email settings page.
*/
export const systemSmtpSpec = InputSpec.of({
provider: Value.union({
name: 'Provider',
default: null as any,
variants: smtpProviderVariants,
}),
})
@@ -55,19 +137,24 @@ const smtpVariants = Variants.of({
'A custom from address for this service. If not provided, the system from address will be used.',
required: false,
default: null,
placeholder: '<name>test@example.com',
inputmode: 'email',
patterns: [Patterns.email],
placeholder: 'Name <test@example.com>',
patterns: [Patterns.emailWithName],
}),
}),
},
custom: {
name: 'Custom Credentials',
spec: customSmtp,
spec: InputSpec.of({
provider: Value.union({
name: 'Provider',
default: null as any,
variants: smtpProviderVariants,
}),
}),
},
})
/**
* For service inputSpec. Gives users 3 options for SMTP: (1) disabled, (2) use system SMTP settings, (3) use custom SMTP settings
* For service inputSpec. Gives users 3 options for SMTP: (1) disabled, (2) use system SMTP settings, (3) use custom SMTP settings with provider presets
*/
export const smtpInputSpec = Value.dynamicUnion(async ({ effects }) => {
const smtp = await new GetSystemSmtp(effects).once()

View File

@@ -1,4 +1,12 @@
/**
* A record mapping field keys to their {@link ValueSpec} definitions.
* This is the root shape of a dynamic form specification — it defines the complete set
* of configurable fields for a service or action.
*/
export type InputSpec = Record<string, ValueSpec>
/**
* The discriminator for all supported form field types.
*/
export type ValueType =
| 'text'
| 'textarea'
@@ -13,6 +21,7 @@ export type ValueType =
| 'file'
| 'union'
| 'hidden'
/** Union of all concrete form field spec types. Discriminate on the `type` field. */
export type ValueSpec = ValueSpecOf<ValueType>
/** core spec types. These types provide the metadata for performing validations */
// prettier-ignore
@@ -32,37 +41,56 @@ export type ValueSpecOf<T extends ValueType> =
T extends "hidden" ? ValueSpecHidden :
never
/** Spec for a single-line text input field. */
export type ValueSpecText = {
/** Display label for the field. */
name: string
/** Optional help text displayed below the field. */
description: string | null
/** Optional warning message displayed to the user. */
warning: string | null
type: 'text'
/** Regex patterns used to validate the input value. */
patterns: Pattern[]
/** Minimum character length, or `null` for no minimum. */
minLength: number | null
/** Maximum character length, or `null` for no maximum. */
maxLength: number | null
/** Whether the field should obscure input (e.g. for passwords). */
masked: boolean
/** HTML input mode hint for mobile keyboards. */
inputmode: 'text' | 'email' | 'tel' | 'url'
/** Placeholder text shown when the field is empty. */
placeholder: string | null
/** Whether the field must have a value. */
required: boolean
/** Default value, which may be a literal string or a {@link RandomString} generation spec. */
default: DefaultString | null
/** `false` if editable, or a string message explaining why the field is disabled. */
disabled: false | string
/** If set, provides a "generate" button that fills the field with a random string matching this spec. */
generate: null | RandomString
/** Whether the field value cannot be changed after initial configuration. */
immutable: boolean
}
/** Spec for a multi-line textarea input field. */
export type ValueSpecTextarea = {
name: string
description: string | null
warning: string | null
type: 'textarea'
/** Regex patterns used to validate the input value. */
patterns: Pattern[]
placeholder: string | null
minLength: number | null
maxLength: number | null
/** Minimum number of visible rows. */
minRows: number
/** Maximum number of visible rows before scrolling. */
maxRows: number
required: boolean
default: string | null
@@ -70,12 +98,18 @@ export type ValueSpecTextarea = {
immutable: boolean
}
/** Spec for a numeric input field. */
export type ValueSpecNumber = {
type: 'number'
/** Minimum allowed value, or `null` for unbounded. */
min: number | null
/** Maximum allowed value, or `null` for unbounded. */
max: number | null
/** Whether only whole numbers are accepted. */
integer: boolean
/** Step increment for the input spinner, or `null` for any precision. */
step: number | null
/** Display label for the unit (e.g. `"MB"`, `"seconds"`), shown next to the field. */
units: string | null
placeholder: string | null
name: string
@@ -86,6 +120,7 @@ export type ValueSpecNumber = {
disabled: false | string
immutable: boolean
}
/** Spec for a browser-native color picker field. */
export type ValueSpecColor = {
name: string
description: string | null
@@ -93,34 +128,44 @@ export type ValueSpecColor = {
type: 'color'
required: boolean
/** Default hex color string (e.g. `"#ff0000"`), or `null`. */
default: string | null
disabled: false | string
immutable: boolean
}
/** Spec for a date, time, or datetime input field. */
export type ValueSpecDatetime = {
name: string
description: string | null
warning: string | null
type: 'datetime'
required: boolean
/** Controls which kind of picker is displayed. */
inputmode: 'date' | 'time' | 'datetime-local'
/** Minimum selectable date/time as an ISO string, or `null`. */
min: string | null
/** Maximum selectable date/time as an ISO string, or `null`. */
max: string | null
default: string | null
disabled: false | string
immutable: boolean
}
/** Spec for a single-select field displayed as radio buttons in a modal. */
export type ValueSpecSelect = {
/** Map of option keys to display labels. */
values: Record<string, string>
name: string
description: string | null
warning: string | null
type: 'select'
default: string | null
/** `false` if all enabled, a string disabling the whole field, or an array of disabled option keys. */
disabled: false | string | string[]
immutable: boolean
}
/** Spec for a multi-select field displayed as checkboxes in a modal. */
export type ValueSpecMultiselect = {
/** Map of option keys to display labels. */
values: Record<string, string>
name: string
@@ -128,12 +173,17 @@ export type ValueSpecMultiselect = {
warning: string | null
type: 'multiselect'
/** Minimum number of selections required, or `null`. */
minLength: number | null
/** Maximum number of selections allowed, or `null`. */
maxLength: number | null
/** `false` if all enabled, a string disabling the whole field, or an array of disabled option keys. */
disabled: false | string | string[]
/** Array of option keys selected by default. */
default: string[]
immutable: boolean
}
/** Spec for a boolean toggle (on/off switch). */
export type ValueSpecToggle = {
name: string
description: string | null
@@ -144,57 +194,81 @@ export type ValueSpecToggle = {
disabled: false | string
immutable: boolean
}
/**
* Spec for a discriminated union field — displays a dropdown for variant selection,
* and each variant can have its own nested sub-form.
*/
export type ValueSpecUnion = {
name: string
description: string | null
warning: string | null
type: 'union'
/** Map of variant keys to their display name and nested form spec. */
variants: Record<
string,
{
/** Display name for this variant in the dropdown. */
name: string
/** Nested form spec shown when this variant is selected. */
spec: InputSpec
}
>
/** `false` if all enabled, a string disabling the whole field, or an array of disabled variant keys. */
disabled: false | string | string[]
default: string | null
immutable: boolean
}
/** Spec for a file upload input field. */
export type ValueSpecFile = {
name: string
description: string | null
warning: string | null
type: 'file'
/** Allowed file extensions (e.g. `[".pem", ".crt"]`). */
extensions: string[]
required: boolean
}
/** Spec for a collapsible grouping of nested fields (a "sub-form"). */
export type ValueSpecObject = {
name: string
description: string | null
warning: string | null
type: 'object'
/** The nested form spec containing this object's fields. */
spec: InputSpec
}
/** Spec for a hidden field — not displayed to the user but included in the form data. */
export type ValueSpecHidden = {
type: 'hidden'
}
/** The two supported list item types. */
export type ListValueSpecType = 'text' | 'object'
/** Maps a {@link ListValueSpecType} to its concrete list item spec. */
// prettier-ignore
export type ListValueSpecOf<T extends ListValueSpecType> =
export type ListValueSpecOf<T extends ListValueSpecType> =
T extends "text" ? ListValueSpecText :
T extends "object" ? ListValueSpecObject :
never
/** A list field spec — union of text-list and object-list variants. */
export type ValueSpecList = ValueSpecListOf<ListValueSpecType>
/**
* Spec for a list field — an interface to add, remove, and edit items in an ordered collection.
* The `spec` field determines whether list items are text strings or structured objects.
*/
export type ValueSpecListOf<T extends ListValueSpecType> = {
name: string
description: string | null
warning: string | null
type: 'list'
/** The item spec — determines whether this is a list of text values or objects. */
spec: ListValueSpecOf<T>
/** Minimum number of items, or `null` for no minimum. */
minLength: number | null
/** Maximum number of items, or `null` for no maximum. */
maxLength: number | null
disabled: false | string
/** Default list items to populate on creation. */
default:
| string[]
| DefaultString[]
@@ -203,10 +277,14 @@ export type ValueSpecListOf<T extends ListValueSpecType> = {
| readonly DefaultString[]
| readonly Record<string, unknown>[]
}
/** A regex validation pattern with a human-readable description of what it enforces. */
export type Pattern = {
/** The regex pattern string (without delimiters). */
regex: string
/** A user-facing explanation shown when validation fails (e.g. `"Must be a valid email"`). */
description: string
}
/** Spec for text items within a list field. */
export type ListValueSpecText = {
type: 'text'
patterns: Pattern[]
@@ -218,13 +296,24 @@ export type ListValueSpecText = {
inputmode: 'text' | 'email' | 'tel' | 'url'
placeholder: string | null
}
/** Spec for object items within a list field. */
export type ListValueSpecObject = {
type: 'object'
/** The form spec for each object item. */
spec: InputSpec
/** Defines how uniqueness is determined among list items. */
uniqueBy: UniqueBy
/** An expression used to generate the display string for each item in the list summary (e.g. a key path). */
displayAs: string | null
}
/**
* Describes how list items determine uniqueness.
* - `null`: no uniqueness constraint
* - `string`: unique by a specific field key
* - `{ any: UniqueBy[] }`: unique if any of the sub-constraints match
* - `{ all: UniqueBy[] }`: unique if all sub-constraints match together
*/
export type UniqueBy =
| null
| string
@@ -234,12 +323,21 @@ export type UniqueBy =
| {
all: readonly UniqueBy[] | UniqueBy[]
}
/** A default value that is either a literal string or a {@link RandomString} generation spec. */
export type DefaultString = string | RandomString
/** Spec for generating a random string — used for default passwords, API keys, etc. */
export type RandomString = {
/** The character set to draw from (e.g. `"a-zA-Z0-9"`). */
charset: string
/** The length of the generated string. */
len: number
}
// sometimes the type checker needs just a little bit of help
/**
* Type guard that narrows a {@link ValueSpec} to a {@link ValueSpecListOf} of a specific item type.
*
* @param t - The value spec to check
* @param s - The list item type to narrow to (`"text"` or `"object"`)
*/
export function isValueSpecListOf<S extends ListValueSpecType>(
t: ValueSpec,
s: S,

View File

@@ -16,10 +16,12 @@ export type GetInput<A extends Record<string, any>> = (options: {
prefill: T.DeepPartial<A> | null
}) => Promise<null | void | undefined | T.DeepPartial<A>>
export type MaybeFn<T> = T | ((options: { effects: T.Effects }) => Promise<T>)
function callMaybeFn<T>(
maybeFn: MaybeFn<T>,
options: { effects: T.Effects },
export type MaybeFn<T, Opts = { effects: T.Effects }> =
| T
| ((options: Opts) => Promise<T>)
function callMaybeFn<T, Opts = { effects: T.Effects }>(
maybeFn: MaybeFn<T, Opts>,
options: Opts,
): Promise<T> {
if (maybeFn instanceof Function) {
return maybeFn(options)
@@ -57,7 +59,13 @@ export class Action<Id extends T.ActionId, Type extends Record<string, any>>
private constructor(
readonly id: Id,
private readonly metadataFn: MaybeFn<T.ActionMetadata>,
private readonly inputSpec: MaybeInputSpec<Type>,
private readonly inputSpec: MaybeFn<
MaybeInputSpec<Type>,
{
effects: T.Effects
prefill: unknown | null
}
>,
private readonly getInputFn: GetInput<Type>,
private readonly runFn: Run<Type>,
) {}
@@ -67,7 +75,13 @@ export class Action<Id extends T.ActionId, Type extends Record<string, any>>
>(
id: Id,
metadata: MaybeFn<Omit<T.ActionMetadata, 'hasInput'>>,
inputSpec: InputSpecType,
inputSpec: MaybeFn<
InputSpecType,
{
effects: T.Effects
prefill: unknown | null
}
>,
getInput: GetInput<ExtractInputSpecType<InputSpecType>>,
run: Run<ExtractInputSpecType<InputSpecType>>,
): Action<Id, ExtractInputSpecType<InputSpecType>> {
@@ -111,9 +125,12 @@ export class Action<Id extends T.ActionId, Type extends Record<string, any>>
}): Promise<T.ActionInput> {
let spec = {}
if (this.inputSpec) {
const built = await this.inputSpec.build(options)
this.prevInputSpec[options.effects.eventId!] = built
spec = built.spec
const inputSpec = await callMaybeFn(this.inputSpec, options)
const built = await inputSpec?.build(options)
if (built) {
this.prevInputSpec[options.effects.eventId!] = built
spec = built.spec
}
}
return {
eventId: options.effects.eventId!,

View File

@@ -1,6 +1,17 @@
import { DeepMap } from 'deep-equality-data-structures'
import * as P from './exver'
/**
* Compile-time utility type that validates a version string literal conforms to semver format.
*
* Resolves to `unknown` if valid, `never` if invalid. Used with {@link testTypeVersion}.
*
* @example
* ```ts
* type Valid = ValidateVersion<"1.2.3"> // unknown (valid)
* type Invalid = ValidateVersion<"-3"> // never (invalid)
* ```
*/
// prettier-ignore
export type ValidateVersion<T extends String> =
T extends `-${infer A}` ? never :
@@ -9,12 +20,32 @@ T extends `${infer A}-${string}` ? ValidateVersion<A> :
T extends `${bigint}.${infer A}` ? ValidateVersion<A> :
never
/**
* Compile-time utility type that validates an extended version string literal.
*
* Extended versions have the format `upstream:downstream` or `#flavor:upstream:downstream`.
*
* @example
* ```ts
* type Valid = ValidateExVer<"1.2.3:0"> // valid
* type Flavored = ValidateExVer<"#bitcoin:1.0:0"> // valid
* type Bad = ValidateExVer<"1.2-3"> // never (invalid)
* ```
*/
// prettier-ignore
export type ValidateExVer<T extends string> =
T extends `#${string}:${infer A}:${infer B}` ? ValidateVersion<A> & ValidateVersion<B> :
T extends `${infer A}:${infer B}` ? ValidateVersion<A> & ValidateVersion<B> :
never
/**
* Validates a tuple of extended version string literals at compile time.
*
* @example
* ```ts
* type Valid = ValidateExVers<["1.0:0", "2.0:0"]> // valid
* ```
*/
// prettier-ignore
export type ValidateExVers<T> =
T extends [] ? unknown[] :
@@ -460,6 +491,28 @@ class VersionRangeTable {
}
}
/**
* Represents a parsed version range expression used to match against {@link Version} or {@link ExtendedVersion} values.
*
* Version ranges support standard comparison operators (`=`, `>`, `<`, `>=`, `<=`, `!=`),
* caret (`^`) and tilde (`~`) ranges, boolean logic (`&&`, `||`, `!`), and flavor matching (`#flavor`).
*
* @example
* ```ts
* const range = VersionRange.parse(">=1.0.0:0 && <2.0.0:0")
* const version = ExtendedVersion.parse("1.5.0:0")
* console.log(range.satisfiedBy(version)) // true
*
* // Combine ranges with boolean logic
* const combined = VersionRange.and(
* VersionRange.parse(">=1.0:0"),
* VersionRange.parse("<3.0:0"),
* )
*
* // Match a specific flavor
* const flavored = VersionRange.parse("#bitcoin")
* ```
*/
export class VersionRange {
constructor(public atom: Anchor | And | Or | Not | P.Any | P.None | Flavor) {}
@@ -488,6 +541,7 @@ export class VersionRange {
}
}
/** Serializes this version range back to its canonical string representation. */
toString(): string {
switch (this.atom.type) {
case 'Anchor':
@@ -563,38 +617,69 @@ export class VersionRange {
return result
}
/**
* Parses a version range string into a `VersionRange`.
*
* @param range - A version range expression, e.g. `">=1.0.0:0 && <2.0.0:0"`, `"^1.2:0"`, `"*"`
* @returns The parsed `VersionRange`
* @throws If the string is not a valid version range expression
*/
static parse(range: string): VersionRange {
return VersionRange.parseRange(
P.parse(range, { startRule: 'VersionRange' }),
)
}
/**
* Creates a version range from a comparison operator and an {@link ExtendedVersion}.
*
* @param operator - One of `"="`, `">"`, `"<"`, `">="`, `"<="`, `"!="`, `"^"`, `"~"`
* @param version - The version to compare against
*/
static anchor(operator: P.CmpOp, version: ExtendedVersion) {
return new VersionRange({ type: 'Anchor', operator, version })
}
/**
* Creates a version range that matches only versions with the specified flavor.
*
* @param flavor - The flavor string to match, or `null` for the default (unflavored) variant
*/
static flavor(flavor: string | null) {
return new VersionRange({ type: 'Flavor', flavor })
}
/**
* Parses a legacy "emver" format version range string.
*
* @param range - A version range in the legacy emver format
* @returns The parsed `VersionRange`
*/
static parseEmver(range: string): VersionRange {
return VersionRange.parseRange(
P.parse(range, { startRule: 'EmverVersionRange' }),
)
}
/** Returns the intersection of this range with another (logical AND). */
and(right: VersionRange) {
return new VersionRange({ type: 'And', left: this, right })
}
/** Returns the union of this range with another (logical OR). */
or(right: VersionRange) {
return new VersionRange({ type: 'Or', left: this, right })
}
/** Returns the negation of this range (logical NOT). */
not() {
return new VersionRange({ type: 'Not', value: this })
}
/**
* Returns the logical AND (intersection) of multiple version ranges.
* Short-circuits on `none()` and skips `any()`.
*/
static and(...xs: Array<VersionRange>) {
let y = VersionRange.any()
for (let x of xs) {
@@ -613,6 +698,10 @@ export class VersionRange {
return y
}
/**
* Returns the logical OR (union) of multiple version ranges.
* Short-circuits on `any()` and skips `none()`.
*/
static or(...xs: Array<VersionRange>) {
let y = VersionRange.none()
for (let x of xs) {
@@ -631,14 +720,21 @@ export class VersionRange {
return y
}
/** Returns a version range that matches all versions (wildcard `*`). */
static any() {
return new VersionRange({ type: 'Any' })
}
/** Returns a version range that matches no versions (`!`). */
static none() {
return new VersionRange({ type: 'None' })
}
/**
* Returns `true` if the given version satisfies this range.
*
* @param version - A {@link Version} or {@link ExtendedVersion} to test
*/
satisfiedBy(version: Version | ExtendedVersion) {
return version.satisfies(this)
}
@@ -714,29 +810,60 @@ export class VersionRange {
}
}
/** Returns `true` if any version exists that could satisfy this range. */
satisfiable(): boolean {
return VersionRangeTable.collapse(this.tables()) !== false
}
/** Returns `true` if this range and `other` share at least one satisfying version. */
intersects(other: VersionRange): boolean {
return VersionRange.and(this, other).satisfiable()
}
/**
* Returns a canonical (simplified) form of this range using minterm expansion.
* Useful for normalizing complex boolean expressions into a minimal representation.
*/
normalize(): VersionRange {
return VersionRangeTable.minterms(this.tables())
}
}
/**
* Represents a semantic version number with numeric segments and optional prerelease identifiers.
*
* Follows semver precedence rules: numeric segments are compared left-to-right,
* and a version with prerelease identifiers has lower precedence than the same version without.
*
* @example
* ```ts
* const v = Version.parse("1.2.3")
* console.log(v.toString()) // "1.2.3"
* console.log(v.compare(Version.parse("1.3.0"))) // "less"
*
* const pre = Version.parse("2.0.0-beta.1")
* console.log(pre.compare(Version.parse("2.0.0"))) // "less" (prerelease < release)
* ```
*/
export class Version {
constructor(
/** The numeric version segments (e.g. `[1, 2, 3]` for `"1.2.3"`). */
public number: number[],
/** Optional prerelease identifiers (e.g. `["beta", 1]` for `"-beta.1"`). */
public prerelease: (string | number)[],
) {}
/** Serializes this version to its string form (e.g. `"1.2.3"` or `"1.0.0-beta.1"`). */
toString(): string {
return `${this.number.join('.')}${this.prerelease.length > 0 ? `-${this.prerelease.join('.')}` : ''}`
}
/**
* Compares this version against another using semver precedence rules.
*
* @param other - The version to compare against
* @returns `'greater'`, `'equal'`, or `'less'`
*/
compare(other: Version): 'greater' | 'equal' | 'less' {
const numLen = Math.max(this.number.length, other.number.length)
for (let i = 0; i < numLen; i++) {
@@ -783,6 +910,11 @@ export class Version {
return 'equal'
}
/**
* Compares two versions, returning a numeric value suitable for use with `Array.sort()`.
*
* @returns `-1` if less, `0` if equal, `1` if greater
*/
compareForSort(other: Version): -1 | 0 | 1 {
switch (this.compare(other)) {
case 'greater':
@@ -794,11 +926,21 @@ export class Version {
}
}
/**
* Parses a version string into a `Version` instance.
*
* @param version - A semver-compatible string, e.g. `"1.2.3"` or `"1.0.0-beta.1"`
* @throws If the string is not a valid version
*/
static parse(version: string): Version {
const parsed = P.parse(version, { startRule: 'Version' })
return new Version(parsed.number, parsed.prerelease)
}
/**
* Returns `true` if this version satisfies the given {@link VersionRange}.
* Internally treats this as an unflavored {@link ExtendedVersion} with downstream `0`.
*/
satisfies(versionRange: VersionRange): boolean {
return new ExtendedVersion(null, this, new Version([0], [])).satisfies(
versionRange,
@@ -806,18 +948,50 @@ export class Version {
}
}
// #flavor:0.1.2-beta.1:0
/**
* Represents an extended version with an optional flavor, an upstream version, and a downstream version.
*
* The format is `#flavor:upstream:downstream` (e.g. `#bitcoin:1.2.3:0`) or `upstream:downstream`
* for unflavored versions. Flavors allow multiple variants of a package to coexist.
*
* - **flavor**: An optional string identifier for the variant (e.g. `"bitcoin"`, `"litecoin"`)
* - **upstream**: The version of the upstream software being packaged
* - **downstream**: The version of the StartOS packaging itself
*
* Versions with different flavors are incomparable (comparison returns `null`).
*
* @example
* ```ts
* const v = ExtendedVersion.parse("#bitcoin:1.2.3:0")
* console.log(v.flavor) // "bitcoin"
* console.log(v.upstream) // Version { number: [1, 2, 3] }
* console.log(v.downstream) // Version { number: [0] }
* console.log(v.toString()) // "#bitcoin:1.2.3:0"
*
* const range = VersionRange.parse(">=1.0.0:0")
* console.log(v.satisfies(range)) // true
* ```
*/
export class ExtendedVersion {
constructor(
/** The flavor identifier (e.g. `"bitcoin"`), or `null` for unflavored versions. */
public flavor: string | null,
/** The upstream software version. */
public upstream: Version,
/** The downstream packaging version. */
public downstream: Version,
) {}
/** Serializes this extended version to its string form (e.g. `"#bitcoin:1.2.3:0"` or `"1.0.0:1"`). */
toString(): string {
return `${this.flavor ? `#${this.flavor}:` : ''}${this.upstream.toString()}:${this.downstream.toString()}`
}
/**
* Compares this extended version against another.
*
* @returns `'greater'`, `'equal'`, `'less'`, or `null` if the flavors differ (incomparable)
*/
compare(other: ExtendedVersion): 'greater' | 'equal' | 'less' | null {
if (this.flavor !== other.flavor) {
return null
@@ -829,6 +1003,10 @@ export class ExtendedVersion {
return this.downstream.compare(other.downstream)
}
/**
* Lexicographic comparison — compares flavors alphabetically first, then versions.
* Unlike {@link compare}, this never returns `null`: different flavors are ordered alphabetically.
*/
compareLexicographic(other: ExtendedVersion): 'greater' | 'equal' | 'less' {
if ((this.flavor || '') > (other.flavor || '')) {
return 'greater'
@@ -839,6 +1017,10 @@ export class ExtendedVersion {
}
}
/**
* Returns a numeric comparison result suitable for use with `Array.sort()`.
* Uses lexicographic ordering (flavors sorted alphabetically, then by version).
*/
compareForSort(other: ExtendedVersion): 1 | 0 | -1 {
switch (this.compareLexicographic(other)) {
case 'greater':
@@ -850,26 +1032,37 @@ export class ExtendedVersion {
}
}
/** Returns `true` if this version is strictly greater than `other`. Returns `false` if flavors differ. */
greaterThan(other: ExtendedVersion): boolean {
return this.compare(other) === 'greater'
}
/** Returns `true` if this version is greater than or equal to `other`. Returns `false` if flavors differ. */
greaterThanOrEqual(other: ExtendedVersion): boolean {
return ['greater', 'equal'].includes(this.compare(other) as string)
}
/** Returns `true` if this version equals `other` (same flavor, upstream, and downstream). */
equals(other: ExtendedVersion): boolean {
return this.compare(other) === 'equal'
}
/** Returns `true` if this version is strictly less than `other`. Returns `false` if flavors differ. */
lessThan(other: ExtendedVersion): boolean {
return this.compare(other) === 'less'
}
/** Returns `true` if this version is less than or equal to `other`. Returns `false` if flavors differ. */
lessThanOrEqual(other: ExtendedVersion): boolean {
return ['less', 'equal'].includes(this.compare(other) as string)
}
/**
* Parses an extended version string into an `ExtendedVersion`.
*
* @param extendedVersion - A string like `"1.2.3:0"` or `"#bitcoin:1.0.0:0"`
* @throws If the string is not a valid extended version
*/
static parse(extendedVersion: string): ExtendedVersion {
const parsed = P.parse(extendedVersion, { startRule: 'ExtendedVersion' })
return new ExtendedVersion(
@@ -879,6 +1072,12 @@ export class ExtendedVersion {
)
}
/**
* Parses a legacy "emver" format extended version string.
*
* @param extendedVersion - A version string in the legacy emver format
* @throws If the string is not a valid emver version (error message includes the input string)
*/
static parseEmver(extendedVersion: string): ExtendedVersion {
try {
const parsed = P.parse(extendedVersion, { startRule: 'Emver' })
@@ -1014,8 +1213,29 @@ export class ExtendedVersion {
}
}
/**
* Compile-time type-checking helper that validates an extended version string literal.
* If the string is invalid, TypeScript will report a type error at the call site.
*
* @example
* ```ts
* testTypeExVer("1.2.3:0") // compiles
* testTypeExVer("#bitcoin:1.0:0") // compiles
* testTypeExVer("invalid") // type error
* ```
*/
export const testTypeExVer = <T extends string>(t: T & ValidateExVer<T>) => t
/**
* Compile-time type-checking helper that validates a version string literal.
* If the string is invalid, TypeScript will report a type error at the call site.
*
* @example
* ```ts
* testTypeVersion("1.2.3") // compiles
* testTypeVersion("-3") // type error
* ```
*/
export const testTypeVersion = <T extends string>(t: T & ValidateVersion<T>) =>
t

View File

@@ -8,6 +8,6 @@ export * as types from './types'
export * as T from './types'
export * as yaml from 'yaml'
export * as inits from './inits'
export { z } from 'zod'
export { z } from './zExport'
export * as utils from './util'

View File

@@ -2,21 +2,37 @@ import { VersionRange } from '../../../base/lib/exver'
import * as T from '../../../base/lib/types'
import { once } from '../util'
/**
* The reason a service's init function is being called:
* - `'install'` — first-time installation
* - `'update'` — after a package update
* - `'restore'` — after restoring from backup
* - `null` — regular startup (no special lifecycle event)
*/
export type InitKind = 'install' | 'update' | 'restore' | null
/** Function signature for an init handler that runs during service startup. */
export type InitFn<Kind extends InitKind = InitKind> = (
effects: T.Effects,
kind: Kind,
) => Promise<void | null | undefined>
/** Object form of an init handler — implements an `init()` method. */
export interface InitScript<Kind extends InitKind = InitKind> {
init(effects: T.Effects, kind: Kind): Promise<void>
}
/** Either an {@link InitScript} object or an {@link InitFn} function. */
export type InitScriptOrFn<Kind extends InitKind = InitKind> =
| InitScript<Kind>
| InitFn<Kind>
/**
* Composes multiple init handlers into a single `ExpectedExports.init`-compatible function.
* Handlers are executed sequentially in the order provided.
*
* @param inits - One or more init handlers to compose
*/
export function setupInit(...inits: InitScriptOrFn[]): T.ExpectedExports.init {
return async (opts) => {
for (const idx in inits) {
@@ -42,6 +58,7 @@ export function setupInit(...inits: InitScriptOrFn[]): T.ExpectedExports.init {
}
}
/** Normalizes an {@link InitScriptOrFn} into an {@link InitScript} object. */
export function setupOnInit(onInit: InitScriptOrFn): InitScript {
return 'init' in onInit
? onInit

View File

@@ -1,6 +1,9 @@
import { ExtendedVersion, VersionRange } from '../../../base/lib/exver'
import * as T from '../../../base/lib/types'
/**
* Function signature for an uninit handler that runs during service shutdown/uninstall.
*/
export type UninitFn = (
effects: T.Effects,
/**
@@ -13,6 +16,7 @@ export type UninitFn = (
target: VersionRange | ExtendedVersion | null,
) => Promise<void | null | undefined>
/** Object form of an uninit handler — implements an `uninit()` method. */
export interface UninitScript {
uninit(
effects: T.Effects,
@@ -27,8 +31,15 @@ export interface UninitScript {
): Promise<void>
}
/** Either a {@link UninitScript} object or a {@link UninitFn} function. */
export type UninitScriptOrFn = UninitScript | UninitFn
/**
* Composes multiple uninit handlers into a single `ExpectedExports.uninit`-compatible function.
* Handlers are executed sequentially in the order provided.
*
* @param uninits - One or more uninit handlers to compose
*/
export function setupUninit(
...uninits: UninitScriptOrFn[]
): T.ExpectedExports.uninit {
@@ -40,6 +51,7 @@ export function setupUninit(
}
}
/** Normalizes a {@link UninitScriptOrFn} into a {@link UninitScript} object. */
export function setupOnUninit(onUninit: UninitScriptOrFn): UninitScript {
return 'uninit' in onUninit
? onUninit

View File

@@ -6,4 +6,5 @@ export type AddPackageSignerParams = {
id: PackageId
signer: Guid
versions: string | null
merge?: boolean
}

View File

@@ -5,6 +5,4 @@ export type AttachParams = {
password: EncryptedWire | null
guid: string
kiosk?: boolean
name: string | null
hostname: string | null
}

View File

@@ -1,3 +1,9 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type CheckPortRes = { ip: string; port: number; reachable: boolean }
export type CheckPortRes = {
ip: string
port: number
openExternally: boolean
openInternally: boolean
hairpinning: boolean
}

View File

@@ -4,7 +4,7 @@ import type { RecoverySource } from './RecoverySource'
export type SetupExecuteParams = {
guid: string
password: EncryptedWire
password: EncryptedWire | null
recoverySource: RecoverySource<EncryptedWire> | null
kiosk?: boolean
name: string | null

View File

@@ -0,0 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type SmtpSecurity = 'starttls' | 'tls'

View File

@@ -1,9 +1,11 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { SmtpSecurity } from './SmtpSecurity'
export type SmtpValue = {
server: string
host: string
port: number
from: string
login: string
username: string
password: string | null
security: SmtpSecurity
}

View File

@@ -1,10 +1,12 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { SmtpSecurity } from './SmtpSecurity'
export type TestSmtpParams = {
server: string
host: string
port: number
from: string
to: string
login: string
username: string
password: string
security: SmtpSecurity
}

View File

@@ -270,6 +270,7 @@ export { SideloadResponse } from './SideloadResponse'
export { SignalStrength } from './SignalStrength'
export { SignAssetParams } from './SignAssetParams'
export { SignerInfo } from './SignerInfo'
export { SmtpSecurity } from './SmtpSecurity'
export { SmtpValue } from './SmtpValue'
export { SshAddParams } from './SshAddParams'
export { SshDeleteParams } from './SshDeleteParams'

View File

@@ -12,6 +12,11 @@ import { FileContents } from './merkleArchive/fileContents'
const magicAndVersion = new Uint8Array([59, 59, 2])
/**
* Compares two `Uint8Array` instances byte-by-byte for equality.
*
* @returns `true` if both arrays have the same length and identical bytes
*/
export function compare(a: Uint8Array, b: Uint8Array) {
if (a.length !== b.length) return false
for (let i = 0; i < a.length; i++) {
@@ -20,12 +25,41 @@ export function compare(a: Uint8Array, b: Uint8Array) {
return true
}
/**
* Represents a parsed `.s9pk` package archive — the binary distribution format for StartOS services.
*
* An `S9pk` wraps a verified {@link Manifest}, a {@link MerkleArchive} containing the package's
* assets (icon, license, dependency metadata), and the total archive size in bytes.
*
* @example
* ```ts
* const s9pk = await S9pk.deserialize(file, null)
* console.log(s9pk.manifest.id) // e.g. "bitcoind"
* console.log(s9pk.size) // archive size in bytes
* const icon = await s9pk.icon() // base64 data URL
* const license = await s9pk.license()
* ```
*/
export class S9pk {
private constructor(
/** The parsed package manifest containing metadata, dependencies, and interface definitions. */
readonly manifest: Manifest,
/** The Merkle-verified archive containing the package's files. */
readonly archive: MerkleArchive,
/** The total size of the archive in bytes. */
readonly size: number,
) {}
/**
* Deserializes an `S9pk` from a `Blob` (e.g. a `File` from a browser file input).
*
* Validates the magic bytes and version header, then parses the Merkle archive structure.
* If a `commitment` is provided, the archive is cryptographically verified against it.
*
* @param source - The raw `.s9pk` file as a `Blob`
* @param commitment - An optional Merkle commitment to verify the archive against, or `null` to skip verification
* @returns A fully parsed `S9pk` instance
* @throws If the magic bytes are invalid or the archive fails verification
*/
static async deserialize(
source: Blob,
commitment: MerkleArchiveCommitment | null,
@@ -57,6 +91,14 @@ export class S9pk {
return new S9pk(manifest, archive, source.size)
}
/**
* Extracts the package icon from the archive and returns it as a base64-encoded data URL.
*
* Looks for a file named `icon.*` with an image MIME type (e.g. `icon.png`, `icon.svg`).
*
* @returns A data URL string like `"data:image/png;base64,..."` suitable for use in `<img src>`.
* @throws If no icon file is found in the archive
*/
async icon(): Promise<DataUrl> {
const iconName = Object.keys(this.archive.contents.contents).find(
(name) =>
@@ -73,6 +115,12 @@ export class S9pk {
)
}
/**
* Returns the metadata (e.g. `{ title }`) for a specific dependency by its package ID.
*
* @param id - The dependency's package identifier (e.g. `"bitcoind"`)
* @returns The dependency metadata object, or `null` if the dependency is not present in the archive
*/
async dependencyMetadataFor(id: PackageId) {
const entry = this.archive.contents.getPath([
'dependencies',
@@ -85,6 +133,12 @@ export class S9pk {
) as { title: string }
}
/**
* Returns the icon for a specific dependency as a base64 data URL.
*
* @param id - The dependency's package identifier
* @returns A data URL string, or `null` if the dependency or its icon is not present
*/
async dependencyIconFor(id: PackageId) {
const dir = this.archive.contents.getPath(['dependencies', id])
if (!dir || !(dir.contents instanceof DirectoryContents)) return null
@@ -101,6 +155,12 @@ export class S9pk {
)
}
/**
* Returns a merged record of all dependency metadata (title, icon, description, optional flag)
* for every dependency declared in the manifest.
*
* @returns A record keyed by package ID, each containing `{ title, icon, description, optional }`
*/
async dependencyMetadata() {
return Object.fromEntries(
await Promise.all(
@@ -119,6 +179,12 @@ export class S9pk {
)
}
/**
* Reads and returns the `LICENSE.md` file from the archive as a UTF-8 string.
*
* @returns The full license text
* @throws If `LICENSE.md` is not found in the archive
*/
async license(): Promise<string> {
const file = this.archive.contents.getPath(['LICENSE.md'])
if (!file || !(file.contents instanceof FileContents))

View File

@@ -1,4 +1,5 @@
export * as inputSpecTypes from './actions/input/inputSpecTypes'
import { InputSpec as InputSpecClass } from './actions/input/builder/inputSpec'
import {
DependencyRequirement,
@@ -20,20 +21,32 @@ export {
CurrentDependenciesResult,
} from './dependencies/setupDependencies'
/** An object that can be built into a terminable daemon process. */
export type DaemonBuildable = {
build(): Promise<{
term(): Promise<void>
}>
}
/** The three categories of service network interfaces. */
export type ServiceInterfaceType = 'ui' | 'p2p' | 'api'
/** A Node.js signal name (e.g. `"SIGTERM"`, `"SIGKILL"`). */
export type Signals = NodeJS.Signals
/** The SIGTERM signal — used for graceful daemon termination. */
export const SIGTERM: Signals = 'SIGTERM'
/** The SIGKILL signal — used for forceful daemon termination. */
export const SIGKILL: Signals = 'SIGKILL'
/** Sentinel value (`-1`) indicating that no timeout should be applied. */
export const NO_TIMEOUT = -1
/** A function that builds an absolute file path from a volume name and relative path. */
export type PathMaker = (options: { volume: string; path: string }) => string
/** A value that may or may not be wrapped in a `Promise`. */
export type MaybePromise<A> = Promise<A> | A
/**
* Namespace defining the required exports for a StartOS service package.
* Every package must export implementations matching these types.
*/
export namespace ExpectedExports {
version: 1
@@ -62,10 +75,16 @@ export namespace ExpectedExports {
target: ExtendedVersion | VersionRange | null
}) => Promise<unknown>
/** The package manifest describing the service's metadata, dependencies, and interfaces. */
export type manifest = Manifest
/** The map of user-invocable actions defined by this service. */
export type actions = Actions<Record<ActionId, Action<ActionId, any>>>
}
/**
* The complete ABI (Application Binary Interface) for a StartOS service package.
* Maps all required exports to their expected types.
*/
export type ABI = {
createBackup: ExpectedExports.createBackup
main: ExpectedExports.main
@@ -74,53 +93,82 @@ export type ABI = {
manifest: ExpectedExports.manifest
actions: ExpectedExports.actions
}
/** A time value in milliseconds. */
export type TimeMs = number
/** A version string in string form. */
export type VersionString = string
declare const DaemonProof: unique symbol
/** Opaque branded type proving that a daemon was started. Cannot be constructed directly. */
export type DaemonReceipt = {
[DaemonProof]: never
}
/** A running daemon with methods to wait for completion or terminate it. */
export type Daemon = {
/** Waits for the daemon to exit and returns its exit message. */
wait(): Promise<string>
/** Terminates the daemon. */
term(): Promise<null>
[DaemonProof]: never
}
/** The result status of a health check (extracted from `NamedHealthCheckResult`). */
export type HealthStatus = NamedHealthCheckResult['result']
/** SMTP mail server configuration values. */
export type SmtpValue = {
server: string
host: string
port: number
from: string
login: string
username: string
password: string | null | undefined
security: 'starttls' | 'tls'
}
/**
* Marker class indicating that a container should use its own built-in entrypoint
* rather than a custom command. Optionally accepts an override command array.
*/
export class UseEntrypoint {
readonly USE_ENTRYPOINT = 'USE_ENTRYPOINT'
constructor(readonly overridCmd?: string[]) {}
}
/** Type guard that checks if a {@link CommandType} is a {@link UseEntrypoint} instance. */
export function isUseEntrypoint(
command: CommandType,
): command is UseEntrypoint {
return typeof command === 'object' && 'USE_ENTRYPOINT' in command
}
export type CommandType = string | [string, ...string[]] | UseEntrypoint
/**
* The ways to specify a command to run in a container:
* - A shell string (run via `sh -c`)
* - An explicit argv array
* - A {@link UseEntrypoint} to use the container's built-in entrypoint
*/
export type CommandType =
| string
| [string, ...string[]]
| readonly [string, ...string[]]
| UseEntrypoint
/** The return type from starting a daemon — provides `wait()` and `term()` controls. */
export type DaemonReturned = {
/** Waits for the daemon process to exit. */
wait(): Promise<unknown>
/** Sends a signal to terminate the daemon. If it doesn't exit within `timeout` ms, sends SIGKILL. */
term(options?: { signal?: Signals; timeout?: number }): Promise<null>
}
export declare const hostName: unique symbol
// asdflkjadsf.onion | 1.2.3.4
/** A branded string type for hostnames (e.g. `.onion` addresses or IP addresses). */
export type Hostname = string & { [hostName]: never }
/** A string identifier for a service network interface. */
export type ServiceInterfaceId = string
export { ServiceInterface }
/** Maps effect method names to their kebab-case RPC equivalents. */
export type EffectMethod<T extends StringObject = Effects> = {
[K in keyof T]-?: K extends string
? T[K] extends Function
@@ -131,6 +179,7 @@ export type EffectMethod<T extends StringObject = Effects> = {
: never
}[keyof T]
/** Options for rsync-based file synchronization (used in backup/restore). */
export type SyncOptions = {
/** delete files that exist in the target directory, but not in the source directory */
delete: boolean
@@ -156,51 +205,75 @@ export type Metadata = {
mode: number
}
/** Result type for setting a service's dependency configuration and restart signal. */
export type SetResult = {
dependsOn: DependsOn
signal: Signals
}
/** A string identifier for a StartOS package (e.g. `"bitcoind"`). */
export type PackageId = string
/** A user-facing message string. */
export type Message = string
/** Whether a dependency needs to be actively running or merely installed. */
export type DependencyKind = 'running' | 'exists'
/**
* Maps package IDs to the health check IDs that must pass before this service considers
* the dependency satisfied.
*/
export type DependsOn = {
[packageId: string]: string[] | readonly string[]
}
/**
* A typed error that can be displayed to the user.
* Either a plain error message string, or a structured error code with description.
*/
export type KnownError =
| { error: string }
| {
errorCode: [number, string] | readonly [number, string]
}
/** An array of dependency requirements for a service. */
export type Dependencies = Array<DependencyRequirement>
/** Recursively makes all properties of `T` optional. */
export type DeepPartial<T> = T extends [infer A, ...infer Rest]
? [DeepPartial<A>, ...DeepPartial<Rest>]
: T extends {}
? { [P in keyof T]?: DeepPartial<T[P]> }
: T
/** Recursively removes all `readonly` modifiers from `T`. */
export type DeepWritable<T> = {
-readonly [K in keyof T]: T[K]
}
/** Casts a value to {@link DeepWritable} (identity at runtime, removes `readonly` at the type level). */
export function writable<T>(value: T): DeepWritable<T> {
return value
}
/** Recursively makes all properties of `T` readonly. */
export type DeepReadonly<T> = {
readonly [P in keyof T]: DeepReadonly<T[P]>
}
/** Casts a value to {@link DeepReadonly} (identity at runtime, adds `readonly` at the type level). */
export function readonly<T>(value: T): DeepReadonly<T> {
return value
}
/** Accepts either a mutable or deeply-readonly version of `T`. */
export type AllowReadonly<T> =
| T
| {
readonly [P in keyof T]: AllowReadonly<T[P]>
}
export type InputSpec<
Type extends StaticValidatedAs,
StaticValidatedAs extends Record<string, unknown> = Type,
> = InputSpecClass<Type, StaticValidatedAs>

View File

@@ -0,0 +1,10 @@
export class AbortedError extends Error {
readonly muteUnhandled = true as const
declare cause?: unknown
constructor(message?: string, options?: { cause?: unknown }) {
super(message)
this.name = 'AbortedError'
if (options?.cause !== undefined) this.cause = options.cause
}
}

View File

@@ -1,4 +1,5 @@
import { Effects } from '../Effects'
import { AbortedError } from './AbortedError'
import { DropGenerator, DropPromise } from './Drop'
export class GetOutboundGateway {
@@ -38,7 +39,7 @@ export class GetOutboundGateway {
})
await waitForNext
}
return new Promise<never>((_, rej) => rej(new Error('aborted')))
return new Promise<never>((_, rej) => rej(new AbortedError()))
}
/**

View File

@@ -1,5 +1,6 @@
import { Effects } from '../Effects'
import * as T from '../types'
import { AbortedError } from './AbortedError'
import { DropGenerator, DropPromise } from './Drop'
export class GetSystemSmtp {
@@ -39,7 +40,7 @@ export class GetSystemSmtp {
})
await waitForNext
}
return new Promise<never>((_, rej) => rej(new Error('aborted')))
return new Promise<never>((_, rej) => rej(new AbortedError()))
}
/**

View File

@@ -1,3 +1,11 @@
/**
* Converts an unknown thrown value into an Error instance.
* If `e` is already an Error, wraps it; if a string, uses it as the message;
* otherwise JSON-serializes it as the error message.
*
* @param e - The unknown value to convert
* @returns An Error instance
*/
export const asError = (e: unknown) => {
if (e instanceof Error) {
return new Error(e as any)

View File

@@ -1,3 +1,18 @@
/**
* Performs a deep structural equality check across all provided arguments.
* Returns true only if every argument is deeply equal to every other argument.
* Handles primitives, arrays, and plain objects recursively.
*
* @param args - Two or more values to compare for deep equality
* @returns True if all arguments are deeply equal
*
* @example
* ```ts
* deepEqual({ a: 1 }, { a: 1 }) // true
* deepEqual([1, 2], [1, 2], [1, 2]) // true
* deepEqual({ a: 1 }, { a: 2 }) // false
* ```
*/
export function deepEqual(...args: unknown[]) {
const objects = args.filter(
(x): x is object => typeof x === 'object' && x !== null,

View File

@@ -1,3 +1,13 @@
/**
* Computes the partial difference between two values.
* Returns `undefined` if the values are equal, or `{ diff }` containing only the changed parts.
* For arrays, the diff contains only items in `next` that have no deep-equal counterpart in `prev`.
* For objects, the diff contains only keys whose values changed.
*
* @param prev - The original value
* @param next - The updated value
* @returns An object containing the diff, or `undefined` if the values are equal
*/
export function partialDiff<T>(
prev: T,
next: T,
@@ -46,6 +56,14 @@ export function partialDiff<T>(
}
}
/**
* Deeply merges multiple values together. Objects are merged key-by-key recursively.
* Arrays are merged by appending items that are not already present (by deep equality).
* Primitives are resolved by taking the last argument.
*
* @param args - The values to merge, applied left to right
* @returns The merged result
*/
export function deepMerge(...args: unknown[]): unknown {
const lastItem = (args as any)[args.length - 1]
if (typeof lastItem !== 'object' || !lastItem) return lastItem

View File

@@ -1,6 +1,14 @@
import { DefaultString } from '../actions/input/inputSpecTypes'
import { getRandomString } from './getRandomString'
/**
* Resolves a DefaultString spec into a concrete string value.
* If the spec is a plain string, returns it directly.
* If it is a random-string specification, generates a random string accordingly.
*
* @param defaultSpec - A string literal or a random-string generation spec
* @returns The resolved default string value
*/
export function getDefaultString(defaultSpec: DefaultString): string {
if (typeof defaultSpec === 'string') {
return defaultSpec

View File

@@ -8,6 +8,7 @@ import {
HostnameInfo,
} from '../types'
import { Effects } from '../Effects'
import { AbortedError } from './AbortedError'
import { DropGenerator, DropPromise } from './Drop'
import { IpAddress, IPV6_LINK_LOCAL } from './ip'
import { deepEqual } from './deepEqual'
@@ -394,7 +395,7 @@ export class GetServiceInterface<Mapped = ServiceInterfaceFilled | null> {
}
await waitForNext
}
return new Promise<never>((_, rej) => rej(new Error('aborted')))
return new Promise<never>((_, rej) => rej(new AbortedError()))
}
/**

View File

@@ -1,5 +1,6 @@
import { Effects } from '../Effects'
import { PackageId } from '../osBindings'
import { AbortedError } from './AbortedError'
import { deepEqual } from './deepEqual'
import { DropGenerator, DropPromise } from './Drop'
import { ServiceInterfaceFilled, filledAddress } from './getServiceInterface'
@@ -105,7 +106,7 @@ export class GetServiceInterfaces<Mapped = ServiceInterfaceFilled[]> {
}
await waitForNext
}
return new Promise<never>((_, rej) => rej(new Error('aborted')))
return new Promise<never>((_, rej) => rej(new AbortedError()))
}
/**

View File

@@ -1,19 +1,41 @@
import { ExtendedVersion } from '../exver'
/**
* A vertex (node) in a directed graph, holding metadata and a list of connected edges.
* @typeParam VMetadata - The type of metadata stored on vertices
* @typeParam EMetadata - The type of metadata stored on edges
*/
export type Vertex<VMetadata = null, EMetadata = null> = {
metadata: VMetadata
edges: Array<Edge<EMetadata, VMetadata>>
}
/**
* A directed edge connecting two vertices, with its own metadata.
* @typeParam EMetadata - The type of metadata stored on edges
* @typeParam VMetadata - The type of metadata stored on the connected vertices
*/
export type Edge<EMetadata = null, VMetadata = null> = {
metadata: EMetadata
from: Vertex<VMetadata, EMetadata>
to: Vertex<VMetadata, EMetadata>
}
/**
* A directed graph data structure supporting vertex/edge management and graph traversal algorithms
* including breadth-first search, reverse BFS, and shortest path computation.
*
* @typeParam VMetadata - The type of metadata stored on vertices
* @typeParam EMetadata - The type of metadata stored on edges
*/
export class Graph<VMetadata = null, EMetadata = null> {
private readonly vertices: Array<Vertex<VMetadata, EMetadata>> = []
constructor() {}
/**
* Serializes the graph to a JSON string for debugging.
* @param metadataRepr - Optional function to transform metadata values before serialization
* @returns A pretty-printed JSON string of the graph structure
*/
dump(
metadataRepr: (metadata: VMetadata | EMetadata) => any = (a) => a,
): string {
@@ -30,6 +52,13 @@ export class Graph<VMetadata = null, EMetadata = null> {
2,
)
}
/**
* Adds a new vertex to the graph, optionally connecting it to existing vertices via edges.
* @param metadata - The metadata to attach to the new vertex
* @param fromEdges - Edges pointing from existing vertices to this new vertex
* @param toEdges - Edges pointing from this new vertex to existing vertices
* @returns The newly created vertex
*/
addVertex(
metadata: VMetadata,
fromEdges: Array<Omit<Edge<EMetadata, VMetadata>, 'to'>>,
@@ -60,6 +89,11 @@ export class Graph<VMetadata = null, EMetadata = null> {
this.vertices.push(vertex)
return vertex
}
/**
* Returns a generator that yields all vertices matching the predicate.
* @param predicate - A function to test each vertex
* @returns A generator of matching vertices
*/
findVertex(
predicate: (vertex: Vertex<VMetadata, EMetadata>) => boolean,
): Generator<Vertex<VMetadata, EMetadata>, null> {
@@ -74,6 +108,13 @@ export class Graph<VMetadata = null, EMetadata = null> {
}
return gen()
}
/**
* Adds a directed edge between two existing vertices.
* @param metadata - The metadata to attach to the edge
* @param from - The source vertex
* @param to - The destination vertex
* @returns The newly created edge
*/
addEdge(
metadata: EMetadata,
from: Vertex<VMetadata, EMetadata>,
@@ -88,6 +129,11 @@ export class Graph<VMetadata = null, EMetadata = null> {
edge.to.edges.push(edge)
return edge
}
/**
* Performs a breadth-first traversal following outgoing edges from the starting vertex or vertices.
* @param from - A starting vertex, or a predicate to select multiple starting vertices
* @returns A generator yielding vertices in BFS order
*/
breadthFirstSearch(
from:
| Vertex<VMetadata, EMetadata>
@@ -139,6 +185,11 @@ export class Graph<VMetadata = null, EMetadata = null> {
return rec(from)
}
}
/**
* Performs a reverse breadth-first traversal following incoming edges from the starting vertex or vertices.
* @param to - A starting vertex, or a predicate to select multiple starting vertices
* @returns A generator yielding vertices in reverse BFS order
*/
reverseBreadthFirstSearch(
to:
| Vertex<VMetadata, EMetadata>
@@ -190,6 +241,12 @@ export class Graph<VMetadata = null, EMetadata = null> {
return rec(to)
}
}
/**
* Finds the shortest path (by edge count) between two vertices using BFS.
* @param from - The starting vertex, or a predicate to select starting vertices
* @param to - The target vertex, or a predicate to identify target vertices
* @returns An array of edges forming the shortest path, or `null` if no path exists
*/
shortestPath(
from:
| Vertex<VMetadata, EMetadata>

View File

@@ -15,6 +15,21 @@ const digitsMs = (digits: string | null, multiplier: number) => {
const divideBy = multiplier / Math.pow(10, digits.length - 1)
return Math.round(value * divideBy)
}
/**
* Converts a human-readable time string to milliseconds.
* Supports units: `ms`, `s`, `m`, `h`, `d`. If a number is passed, it is returned as-is.
*
* @param time - A time string (e.g. `"500ms"`, `"1.5s"`, `"2h"`) or a numeric millisecond value
* @returns The time in milliseconds, or `undefined` if `time` is falsy
* @throws Error if the string format is invalid
*
* @example
* ```ts
* inMs("2s") // 2000
* inMs("1.5h") // 5400000
* inMs(500) // 500
* ```
*/
export const inMs = (time?: string | number) => {
if (typeof time === 'number') return time
if (!time) return undefined

View File

@@ -22,5 +22,6 @@ export { splitCommand } from './splitCommand'
export { nullIfEmpty } from './nullIfEmpty'
export { deepMerge, partialDiff } from './deepMerge'
export { deepEqual } from './deepEqual'
export { AbortedError } from './AbortedError'
export * as regexes from './regexes'
export { stringFromStdErrOut } from './stringFromStdErrOut'

View File

@@ -1,3 +1,14 @@
/**
* Represents an IPv4 or IPv6 address as raw octets with arithmetic and comparison operations.
*
* IPv4 addresses have 4 octets, IPv6 addresses have 16 octets.
*
* @example
* ```ts
* const ip = IpAddress.parse("192.168.1.1")
* const next = ip.add(1) // 192.168.1.2
* ```
*/
export class IpAddress {
private renderedOctets: number[]
protected constructor(
@@ -6,6 +17,13 @@ export class IpAddress {
) {
this.renderedOctets = [...octets]
}
/**
* Parses an IP address string into an IpAddress instance.
* Supports both IPv4 dotted-decimal and IPv6 colon-hex notation (including `::` shorthand).
* @param address - The IP address string to parse
* @returns A new IpAddress instance
* @throws Error if the address format is invalid
*/
static parse(address: string): IpAddress {
let octets
if (address.includes(':')) {
@@ -39,6 +57,12 @@ export class IpAddress {
}
return new IpAddress(octets, address)
}
/**
* Creates an IpAddress from a raw octet array.
* @param octets - Array of 4 octets (IPv4) or 16 octets (IPv6), each 0-255
* @returns A new IpAddress instance
* @throws Error if the octet array length is not 4 or 16, or any octet exceeds 255
*/
static fromOctets(octets: number[]) {
if (octets.length == 4) {
if (octets.some((o) => o > 255)) {
@@ -66,15 +90,24 @@ export class IpAddress {
throw new Error('invalid ip address')
}
}
/** Returns true if this is an IPv4 address (4 octets). */
isIpv4(): boolean {
return this.octets.length === 4
}
/** Returns true if this is an IPv6 address (16 octets). */
isIpv6(): boolean {
return this.octets.length === 16
}
/** Returns true if this is a public IPv4 address (not in any private range). */
isPublic(): boolean {
return this.isIpv4() && !PRIVATE_IPV4_RANGES.some((r) => r.contains(this))
}
/**
* Returns a new IpAddress incremented by `n`.
* @param n - The integer amount to add (fractional part is truncated)
* @returns A new IpAddress with the result
* @throws Error on overflow
*/
add(n: number): IpAddress {
let octets = [...this.octets]
n = Math.floor(n)
@@ -92,6 +125,12 @@ export class IpAddress {
}
return IpAddress.fromOctets(octets)
}
/**
* Returns a new IpAddress decremented by `n`.
* @param n - The integer amount to subtract (fractional part is truncated)
* @returns A new IpAddress with the result
* @throws Error on underflow
*/
sub(n: number): IpAddress {
let octets = [...this.octets]
n = Math.floor(n)
@@ -109,6 +148,11 @@ export class IpAddress {
}
return IpAddress.fromOctets(octets)
}
/**
* Compares this address to another, returning -1, 0, or 1.
* @param other - An IpAddress instance or string to compare against
* @returns -1 if this < other, 0 if equal, 1 if this > other
*/
cmp(other: string | IpAddress): -1 | 0 | 1 {
if (typeof other === 'string') other = IpAddress.parse(other)
const len = Math.max(this.octets.length, other.octets.length)
@@ -123,6 +167,7 @@ export class IpAddress {
}
return 0
}
/** The string representation of this IP address (e.g. `"192.168.1.1"` or `"::1"`). Cached and recomputed only when octets change. */
get address(): string {
if (
this.renderedOctets.length === this.octets.length &&
@@ -160,6 +205,17 @@ export class IpAddress {
}
}
/**
* Represents an IP network (CIDR notation) combining an IP address with a prefix length.
* Extends IpAddress with network-specific operations like containment checks and broadcast calculation.
*
* @example
* ```ts
* const net = IpNet.parse("192.168.1.0/24")
* net.contains("192.168.1.100") // true
* net.broadcast() // 192.168.1.255
* ```
*/
export class IpNet extends IpAddress {
private constructor(
octets: number[],
@@ -168,18 +224,35 @@ export class IpNet extends IpAddress {
) {
super(octets, address)
}
/**
* Creates an IpNet from an IpAddress and prefix length.
* @param ip - The base IP address
* @param prefix - The CIDR prefix length (0-32 for IPv4, 0-128 for IPv6)
* @returns A new IpNet instance
* @throws Error if prefix exceeds the address bit length
*/
static fromIpPrefix(ip: IpAddress, prefix: number): IpNet {
if (prefix > ip.octets.length * 8) {
throw new Error('invalid prefix')
}
return new IpNet(ip.octets, prefix, ip.address)
}
/**
* Parses a CIDR notation string (e.g. `"192.168.1.0/24"`) into an IpNet.
* @param ipnet - The CIDR string to parse
* @returns A new IpNet instance
*/
static parse(ipnet: string): IpNet {
const [address, prefixStr] = ipnet.split('/', 2)
const ip = IpAddress.parse(address)
const prefix = Number(prefixStr)
return IpNet.fromIpPrefix(ip, prefix)
}
/**
* Checks whether this network contains the given address or subnet.
* @param address - An IP address or subnet (string, IpAddress, or IpNet)
* @returns True if the address falls within this network's range
*/
contains(address: string | IpAddress | IpNet): boolean {
if (typeof address === 'string') address = IpAddress.parse(address)
if (address instanceof IpNet && address.prefix < this.prefix) return false
@@ -197,6 +270,7 @@ export class IpNet extends IpAddress {
const mask = 255 ^ (255 >> prefix)
return (this.octets[idx] & mask) === (address.octets[idx] & mask)
}
/** Returns the network address (all host bits zeroed) for this subnet. */
zero(): IpAddress {
let octets: number[] = []
let prefix = this.prefix
@@ -213,6 +287,7 @@ export class IpNet extends IpAddress {
return IpAddress.fromOctets(octets)
}
/** Returns the broadcast address (all host bits set to 1) for this subnet. */
broadcast(): IpAddress {
let octets: number[] = []
let prefix = this.prefix
@@ -229,11 +304,13 @@ export class IpNet extends IpAddress {
return IpAddress.fromOctets(octets)
}
/** The CIDR notation string for this network (e.g. `"192.168.1.0/24"`). */
get ipnet() {
return `${this.address}/${this.prefix}`
}
}
/** All private IPv4 ranges: loopback (127.0.0.0/8), Class A (10.0.0.0/8), Class B (172.16.0.0/12), Class C (192.168.0.0/16). */
export const PRIVATE_IPV4_RANGES = [
IpNet.parse('127.0.0.0/8'),
IpNet.parse('10.0.0.0/8'),
@@ -241,8 +318,12 @@ export const PRIVATE_IPV4_RANGES = [
IpNet.parse('192.168.0.0/16'),
]
/** IPv4 loopback network (127.0.0.0/8). */
export const IPV4_LOOPBACK = IpNet.parse('127.0.0.0/8')
/** IPv6 loopback address (::1/128). */
export const IPV6_LOOPBACK = IpNet.parse('::1/128')
/** IPv6 link-local network (fe80::/10). */
export const IPV6_LINK_LOCAL = IpNet.parse('fe80::/10')
/** Carrier-Grade NAT (CGNAT) address range (100.64.0.0/10), per RFC 6598. */
export const CGNAT = IpNet.parse('100.64.0.0/10')

View File

@@ -1,3 +1,16 @@
/**
* Wraps a function so it is only executed once. Subsequent calls return the cached result.
*
* @param fn - The function to execute at most once
* @returns A wrapper that lazily evaluates `fn` on first call and caches the result
*
* @example
* ```ts
* const getConfig = once(() => loadExpensiveConfig())
* getConfig() // loads config
* getConfig() // returns cached result
* ```
*/
export function once<B>(fn: () => B): () => B {
let result: [B] | [] = []
return () => {

View File

@@ -1,57 +1,68 @@
import { Pattern } from '../actions/input/inputSpecTypes'
import * as regexes from './regexes'
/** Pattern for validating IPv6 addresses. */
export const ipv6: Pattern = {
regex: regexes.ipv6.matches(),
description: 'Must be a valid IPv6 address',
}
/** Pattern for validating IPv4 addresses. */
export const ipv4: Pattern = {
regex: regexes.ipv4.matches(),
description: 'Must be a valid IPv4 address',
}
/** Pattern for validating hostnames (RFC-compliant). */
export const hostname: Pattern = {
regex: regexes.hostname.matches(),
description: 'Must be a valid hostname',
}
/** Pattern for validating `.local` mDNS hostnames. */
export const localHostname: Pattern = {
regex: regexes.localHostname.matches(),
description: 'Must be a valid ".local" hostname',
}
/** Pattern for validating HTTP/HTTPS URLs. */
export const url: Pattern = {
regex: regexes.url.matches(),
description: 'Must be a valid URL',
}
/** Pattern for validating `.local` URLs (mDNS/LAN). */
export const localUrl: Pattern = {
regex: regexes.localUrl.matches(),
description: 'Must be a valid ".local" URL',
}
/** Pattern for validating ASCII-only strings (printable characters). */
export const ascii: Pattern = {
regex: regexes.ascii.matches(),
description:
'May only contain ASCII characters. See https://www.w3schools.com/charsets/ref_html_ascii.asp',
}
/** Pattern for validating fully qualified domain names (FQDNs). */
export const domain: Pattern = {
regex: regexes.domain.matches(),
description: 'Must be a valid Fully Qualified Domain Name',
}
/** Pattern for validating email addresses. */
export const email: Pattern = {
regex: regexes.email.matches(),
description: 'Must be a valid email address',
}
/** Pattern for validating email addresses, optionally with a display name (e.g. `"John Doe <john@example.com>"`). */
export const emailWithName: Pattern = {
regex: regexes.emailWithName.matches(),
description: 'Must be a valid email address, optionally with a name',
}
/** Pattern for validating base64-encoded strings. */
export const base64: Pattern = {
regex: regexes.base64.matches(),
description:

View File

@@ -1,3 +1,16 @@
/**
* A wrapper around RegExp that supports composition into larger patterns.
* Provides helpers to produce anchored (full-match), grouped (sub-expression),
* and unanchored (contains) regex source strings.
*
* @example
* ```ts
* const digit = new ComposableRegex(/\d+/)
* digit.matches() // "^\\d+$"
* digit.contains() // "\\d+"
* digit.asExpr() // "(\\d+)"
* ```
*/
export class ComposableRegex {
readonly regex: RegExp
constructor(regex: RegExp | string) {
@@ -7,69 +20,94 @@ export class ComposableRegex {
this.regex = new RegExp(regex)
}
}
/** Returns the regex source wrapped in a capturing group, suitable for embedding in a larger expression. */
asExpr(): string {
return `(${this.regex.source})`
}
/** Returns the regex source anchored with `^...$` for full-string matching. */
matches(): string {
return `^${this.regex.source}$`
}
/** Returns the raw regex source string for substring/containment matching. */
contains(): string {
return this.regex.source
}
}
/**
* Escapes all regex special characters in a string so it can be used as a literal in a RegExp.
* @param str - The string to escape
* @returns The escaped string safe for regex interpolation
*/
export const escapeLiteral = (str: string) =>
str.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
/** Composable regex for matching IPv6 addresses (all standard forms including `::` shorthand). */
// https://ihateregex.io/expr/ipv6/
export const ipv6 = new ComposableRegex(
/(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))/,
)
/** Composable regex for matching IPv4 addresses in dotted-decimal notation. */
// https://ihateregex.io/expr/ipv4/
export const ipv4 = new ComposableRegex(
/(\b25[0-5]|\b2[0-4][0-9]|\b[01]?[0-9][0-9]?)(\.(25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)){3}/,
)
/** Composable regex for matching RFC-compliant hostnames. */
export const hostname = new ComposableRegex(
/(([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9\-]*[a-zA-Z0-9])\.)*([A-Za-z0-9]|[A-Za-z0-9][A-Za-z0-9\-]*[A-Za-z0-9])/,
)
/** Composable regex for matching `.local` mDNS hostnames. */
export const localHostname = new ComposableRegex(
/[-a-zA-Z0-9@:%._\+~#=]{1,256}\.local/,
)
/** Composable regex for matching HTTP/HTTPS URLs. */
// https://ihateregex.io/expr/url/
export const url = new ComposableRegex(
/https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()!@:%_\+.~#?&\/\/=]*)/,
)
/** Composable regex for matching `.local` URLs (mDNS/LAN). */
export const localUrl = new ComposableRegex(
/https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.local\b([-a-zA-Z0-9()!@:%_\+.~#?&\/\/=]*)/,
)
/** Composable regex for matching printable ASCII characters (space through tilde). */
// https://ihateregex.io/expr/ascii/
export const ascii = new ComposableRegex(/[ -~]*/)
/** Composable regex for matching fully qualified domain names. */
export const domain = new ComposableRegex(/[A-Za-z0-9.-]+\.[A-Za-z]{2,}/)
/** Composable regex for matching email addresses. */
// https://www.regular-expressions.info/email.html
export const email = new ComposableRegex(`[A-Za-z0-9._%+-]+@${domain.asExpr()}`)
/** Composable regex for matching email addresses optionally preceded by a display name (e.g. `"Name <email>"`). */
export const emailWithName = new ComposableRegex(
`${email.asExpr()}|([^<]*<${email.asExpr()}>)`,
)
/** Composable regex for matching base64-encoded strings (no whitespace). */
//https://rgxdb.com/r/1NUN74O6
export const base64 = new ComposableRegex(
/(?:[a-zA-Z0-9+\/]{4})*(?:|(?:[a-zA-Z0-9+\/]{3}=)|(?:[a-zA-Z0-9+\/]{2}==)|(?:[a-zA-Z0-9+\/]{1}===))/,
)
/** Composable regex for matching base64-encoded strings that may contain interspersed whitespace. */
//https://rgxdb.com/r/1NUN74O6
export const base64Whitespace = new ComposableRegex(
/(?:([a-zA-Z0-9+\/]\s*){4})*(?:|(?:([a-zA-Z0-9+\/]\s*){3}=)|(?:([a-zA-Z0-9+\/]\s*){2}==)|(?:([a-zA-Z0-9+\/]\s*){1}===))/,
)
/**
* Creates a composable regex for matching PEM-encoded blocks with the given label.
* @param label - The PEM label (e.g. `"CERTIFICATE"`, `"RSA PRIVATE KEY"`)
* @returns A ComposableRegex matching `-----BEGIN <label>-----...-----END <label>-----`
*/
export const pem = (label: string) =>
new ComposableRegex(
`-----BEGIN ${escapeLiteral(label)}-----\r?\n[a-zA-Z0-9+/\n\r=]*?\r?\n-----END ${escapeLiteral(label)}-----`,

View File

@@ -1,6 +1,22 @@
import { AllowReadonly } from '../types'
/**
* Normalizes a command into an argv-style string array.
* If given a string, wraps it as `["sh", "-c", command]`.
* If given a tuple, returns it as-is.
*
* @param command - A shell command string or a pre-split argv tuple
* @returns An argv-style string array suitable for process execution
*
* @example
* ```ts
* splitCommand("echo hello") // ["sh", "-c", "echo hello"]
* splitCommand(["node", "index.js"]) // ["node", "index.js"]
* ```
*/
export const splitCommand = (
command: string | [string, ...string[]],
command: string | AllowReadonly<[string, ...string[]]>,
): string[] => {
if (Array.isArray(command)) return command
return ['sh', '-c', command]
return ['sh', '-c', command as string]
}

View File

@@ -1,3 +1,10 @@
/**
* Extracts a string result from a stdout/stderr pair.
* Returns `stdout` on success; rejects with `stderr` if it is non-empty.
*
* @param x - An object containing `stdout` and `stderr` strings
* @returns A promise resolving to `stdout`, or rejecting with `stderr`
*/
export async function stringFromStdErrOut(x: {
stdout: string
stderr: string

View File

@@ -1,21 +1,47 @@
import * as T from '../types'
/**
* Flattens an intersection type into a single object type for improved readability in IDE tooltips.
* Arrays pass through unchanged; objects are remapped to a single flat type.
*
* @example
* ```ts
* type Merged = FlattenIntersection<{ a: 1 } & { b: 2 }>
* // Result: { a: 1; b: 2 }
* ```
*/
// prettier-ignore
export type FlattenIntersection<T> =
export type FlattenIntersection<T> =
T extends ArrayLike<any> ? T :
T extends object ? {} & {[P in keyof T]: T[P]} :
T;
/** Shorthand alias for {@link FlattenIntersection}. */
export type _<T> = FlattenIntersection<T>
/**
* Type guard that checks whether a value is a {@link T.KnownError}.
* Returns true if the value is an object containing an `error` or `error-code` property.
*
* @param e - The value to check
* @returns True if `e` is a KnownError
*/
export const isKnownError = (e: unknown): e is T.KnownError =>
e instanceof Object && ('error' in e || 'error-code' in e)
declare const affine: unique symbol
/**
* A branded/nominal type wrapper using a unique symbol to make structurally identical types incompatible.
* Useful for creating distinct type identities at the type level.
*/
export type Affine<A> = { [affine]: A }
type NeverPossible = { [affine]: string }
/**
* Evaluates to `never` if `A` is `any`, otherwise resolves to `A`.
* Useful for preventing `any` from silently propagating through generic constraints.
*/
export type NoAny<A> = NeverPossible extends A
? keyof NeverPossible extends keyof A
? never
@@ -54,6 +80,14 @@ type Numbers = '0' | '1' | '2' | '3' | '4' | '5' | '6' | '7' | '8' | '9'
type CapitalChars = CapitalLetters | Numbers
/**
* Converts a PascalCase or camelCase string type to kebab-case at the type level.
*
* @example
* ```ts
* type Result = ToKebab<"FooBar"> // "foo-bar"
* ```
*/
export type ToKebab<S extends string> = S extends string
? S extends `${infer Head}${CapitalChars}${infer Tail}` // string has a capital char somewhere
? Head extends '' // there is a capital char in the first position
@@ -101,6 +135,7 @@ export type ToKebab<S extends string> = S extends string
: S /* 'abc' */
: never
/** A generic object type with string keys and unknown values. */
export type StringObject = Record<string, unknown>
function test() {

14
sdk/base/lib/zExport.d.ts vendored Normal file
View File

@@ -0,0 +1,14 @@
import { z as _z } from 'zod'
import type { DeepPartial } from './types'
type ZodDeepPartial = <T>(a: _z.ZodType<T>) => _z.ZodType<DeepPartial<T>>
type ZodDeepLoose = <T>(a: _z.ZodType<T>) => _z.ZodType<T>
declare module 'zod' {
namespace z {
const deepPartial: ZodDeepPartial
const deepLoose: ZodDeepLoose
}
}
export { _z as z }

92
sdk/base/lib/zExport.js Normal file
View File

@@ -0,0 +1,92 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const zod_1 = require("zod");
const zod_deep_partial_1 = require("zod-deep-partial");
// Recursively make all ZodObjects in a schema loose (preserve extra keys at every nesting level).
// Uses _zod.def.type duck-typing instead of instanceof to avoid issues with mismatched zod versions.
function deepLoose(schema) {
const def = schema._zod?.def;
if (!def) return schema;
let result;
switch (def.type) {
case "optional":
result = deepLoose(def.innerType).optional();
break;
case "nullable":
result = deepLoose(def.innerType).nullable();
break;
case "object": {
const newShape = {};
for (const key in schema.shape) {
newShape[key] = deepLoose(schema.shape[key]);
}
result = zod_1.z.looseObject(newShape);
break;
}
case "array":
result = zod_1.z.array(deepLoose(def.element));
break;
case "union":
result = zod_1.z.union(def.options.map((o) => deepLoose(o)));
break;
case "intersection":
result = zod_1.z.intersection(deepLoose(def.left), deepLoose(def.right));
break;
case "record":
result = zod_1.z.record(def.keyType, deepLoose(def.valueType));
break;
case "tuple":
result = zod_1.z.tuple(def.items.map((i) => deepLoose(i)));
break;
case "lazy":
result = zod_1.z.lazy(() => deepLoose(def.getter()));
break;
default:
return schema;
}
return result;
}
// Add deepPartial and deepLoose to z at runtime
zod_1.z.deepPartial = (a) =>
deepLoose((0, zod_deep_partial_1.zodDeepPartial)(a));
zod_1.z.deepLoose = deepLoose;
// Override z.object to produce loose objects by default (extra keys are preserved, not stripped).
const _origObject = zod_1.z.object;
const _patchedObject = (...args) => _origObject(...args).loose();
// In CJS (Node.js), patch the source module in require.cache where 'object' is a writable property;
// the CJS getter chain (index → external → schemas) then relays the patched version.
// We walk only the zod entry module's dependency tree and match by identity (=== origObject).
try {
const _zodModule = require.cache[require.resolve("zod")];
for (const child of _zodModule?.children ?? []) {
for (const grandchild of child.children ?? []) {
const desc = Object.getOwnPropertyDescriptor(
grandchild.exports,
"object",
);
if (desc?.value === _origObject && desc.writable) {
grandchild.exports.object = _patchedObject;
}
}
}
} catch (_) {
// Not in CJS/Node environment (e.g. browser) — require.cache unavailable
}
// z.object is a non-configurable getter on the zod namespace, so we can't override it directly.
// Shadow it by exporting a new object with _z as prototype and our patched object on the instance.
const z = Object.create(zod_1.z, {
object: {
value: _patchedObject,
writable: true,
configurable: true,
enumerable: true,
},
});
exports.z = z;

View File

@@ -14,7 +14,8 @@
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"yaml": "^2.7.1",
"zod": "^4.3.6"
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
},
"devDependencies": {
"@types/jest": "^29.4.0",
@@ -5006,9 +5007,19 @@
"resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz",
"integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==",
"license": "MIT",
"peer": true,
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/zod-deep-partial": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/zod-deep-partial/-/zod-deep-partial-1.2.0.tgz",
"integrity": "sha512-dXfte+/YN0aFYs0kMGz6xfPQWEYNaKz/LsbfxrbwL+oY3l/aR9HOBTyWCpHZ5AJXMGWKSq+0X0oVPpRliUFcjQ==",
"license": "MIT",
"peerDependencies": {
"zod": "^4.1.13"
}
}
}
}

View File

@@ -28,7 +28,8 @@
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"yaml": "^2.7.1",
"zod": "^4.3.6"
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
},
"prettier": {
"trailingComma": "all",

View File

@@ -9,7 +9,12 @@ import {
import { ServiceInterfaceType, Effects } from '../../base/lib/types'
import * as patterns from '../../base/lib/util/patterns'
import { Backups } from './backup/Backups'
import { smtpInputSpec } from '../../base/lib/actions/input/inputSpecConstants'
import {
smtpInputSpec,
systemSmtpSpec,
customSmtp,
smtpProviderVariants,
} from '../../base/lib/actions/input/inputSpecConstants'
import { Daemon, Daemons } from './mainFn/Daemons'
import { checkPortListening } from './health/checkFns/checkPortListening'
import { checkWebUrl, runHealthScript } from './health/checkFns'
@@ -62,6 +67,7 @@ import {
import { getOwnServiceInterfaces } from '../../base/lib/util/getServiceInterfaces'
import { Volumes, createVolumes } from './util/Volume'
/** The minimum StartOS version required by this SDK release */
export const OSVersion = testTypeVersion('0.4.0-alpha.20')
// prettier-ignore
@@ -71,11 +77,29 @@ type AnyNeverCond<T extends any[], Then, Else> =
T extends [any, ...infer U] ? AnyNeverCond<U,Then, Else> :
never
/**
* The top-level SDK facade for building StartOS service packages.
*
* Use `StartSdk.of()` to create an uninitialized instance, then call `.withManifest()`
* to bind it to a manifest, and finally `.build()` to obtain the full toolkit of helpers
* for actions, daemons, backups, interfaces, health checks, and more.
*
* @typeParam Manifest - The service manifest type; starts as `never` until `.withManifest()` is called.
*/
export class StartSdk<Manifest extends T.SDKManifest> {
private constructor(readonly manifest: Manifest) {}
/**
* Create an uninitialized StartSdk instance. Call `.withManifest()` next.
* @returns A new StartSdk with no manifest bound.
*/
static of() {
return new StartSdk<never>(null as never)
}
/**
* Bind a manifest to the SDK, producing a typed SDK instance.
* @param manifest - The service manifest definition
* @returns A new StartSdk instance parameterized by the given manifest type
*/
withManifest<Manifest extends T.SDKManifest = never>(manifest: Manifest) {
return new StartSdk<Manifest>(manifest)
}
@@ -88,6 +112,14 @@ export class StartSdk<Manifest extends T.SDKManifest> {
return null as any
}
/**
* Finalize the SDK and return the full set of helpers for building a StartOS service.
*
* This method is only callable after `.withManifest()` has been called (enforced at the type level).
*
* @param isReady - Type-level gate; resolves to `true` only when a manifest is bound.
* @returns An object containing all SDK utilities: actions, daemons, backups, interfaces, health checks, volumes, triggers, and more.
*/
build(isReady: AnyNeverCond<[Manifest], 'Build not ready', true>) {
type NestedEffects = 'subcontainer' | 'store' | 'action' | 'plugin'
type InterfaceEffects =
@@ -137,13 +169,19 @@ export class StartSdk<Manifest extends T.SDKManifest> {
}
return {
/** The bound service manifest */
manifest: this.manifest,
/** Volume path helpers derived from the manifest volume definitions */
volumes: createVolumes(this.manifest),
...startSdkEffectWrapper,
/** Persist the current data version to the StartOS effect system */
setDataVersion,
/** Retrieve the current data version from the StartOS effect system */
getDataVersion,
action: {
/** Execute an action by its ID, optionally providing input */
run: actions.runAction,
/** Create a task notification for a specific package's action */
createTask: <T extends ActionInfo<T.ActionId, any>>(
effects: T.Effects,
packageId: T.PackageId,
@@ -158,6 +196,7 @@ export class StartSdk<Manifest extends T.SDKManifest> {
severity,
options: options,
}),
/** Create a task notification for this service's own action (uses manifest.id automatically) */
createOwnTask: <T extends ActionInfo<T.ActionId, any>>(
effects: T.Effects,
action: T,
@@ -171,9 +210,20 @@ export class StartSdk<Manifest extends T.SDKManifest> {
severity,
options: options,
}),
/**
* Clear one or more task notifications by their replay IDs
* @param effects - The effects context
* @param replayIds - One or more replay IDs of the tasks to clear
*/
clearTask: (effects: T.Effects, ...replayIds: string[]) =>
effects.action.clearTasks({ only: replayIds }),
},
/**
* Check whether the specified (or all) dependencies are satisfied.
* @param effects - The effects context
* @param packageIds - Optional subset of dependency IDs to check; defaults to all
* @returns An object describing which dependencies are satisfied and which are not
*/
checkDependencies: checkDependencies as <
DependencyId extends keyof Manifest['dependencies'] &
T.PackageId = keyof Manifest['dependencies'] & T.PackageId,
@@ -182,11 +232,25 @@ export class StartSdk<Manifest extends T.SDKManifest> {
packageIds?: DependencyId[],
) => Promise<CheckDependencies<DependencyId>>,
serviceInterface: {
/** Retrieve a single service interface belonging to this package by its ID */
getOwn: getOwnServiceInterface,
/** Retrieve a single service interface from any package */
get: getServiceInterface,
/** Retrieve all service interfaces belonging to this package */
getAllOwn: getOwnServiceInterfaces,
/** Retrieve all service interfaces, optionally filtering by package */
getAll: getServiceInterfaces,
},
/**
* Get the container IP address with reactive subscription support.
*
* Returns an object with multiple read strategies: `const()` for a value
* that retries on change, `once()` for a single read, `watch()` for an async
* generator, `onChange()` for a callback, and `waitFor()` to block until a predicate is met.
*
* @param effects - The effects context
* @param options - Optional filtering options (e.g. `containerId`)
*/
getContainerIp: (
effects: T.Effects,
options: Omit<
@@ -279,9 +343,22 @@ export class StartSdk<Manifest extends T.SDKManifest> {
},
MultiHost: {
/**
* Create a new MultiHost instance for binding ports and exporting interfaces.
* @param effects - The effects context
* @param id - A unique identifier for this multi-host group
*/
of: (effects: Effects, id: string) => new MultiHost({ id, effects }),
},
/**
* Return `null` if the given string is empty, otherwise return the string unchanged.
* Useful for converting empty user input into explicit null values.
*/
nullIfEmpty,
/**
* Indicate that a daemon should use the container image's configured entrypoint.
* @param overrideCmd - Optional command arguments to append after the entrypoint
*/
useEntrypoint: (overrideCmd?: string[]) =>
new T.UseEntrypoint(overrideCmd),
/**
@@ -396,7 +473,12 @@ export class StartSdk<Manifest extends T.SDKManifest> {
run: Run<{}>,
) => Action.withoutInput(id, metadata, run),
},
inputSpecConstants: { smtpInputSpec },
inputSpecConstants: {
smtpInputSpec,
systemSmtpSpec,
customSmtp,
smtpProviderVariants,
},
/**
* @description Use this function to create a service interface.
* @param effects
@@ -444,21 +526,37 @@ export class StartSdk<Manifest extends T.SDKManifest> {
masked: boolean
},
) => new ServiceInterfaceBuilder({ ...options, effects }),
/**
* Get the system SMTP configuration with reactive subscription support.
* @param effects - The effects context
*/
getSystemSmtp: <E extends Effects>(effects: E) =>
new GetSystemSmtp(effects),
/**
* Get the outbound network gateway address with reactive subscription support.
* @param effects - The effects context
*/
getOutboundGateway: <E extends Effects>(effects: E) =>
new GetOutboundGateway(effects),
/**
* Get an SSL certificate for the given hostnames with reactive subscription support.
* @param effects - The effects context
* @param hostnames - The hostnames to obtain a certificate for
* @param algorithm - Optional algorithm preference (e.g. Ed25519)
*/
getSslCertificate: <E extends Effects>(
effects: E,
hostnames: string[],
algorithm?: T.Algorithm,
) => new GetSslCertificate(effects, hostnames, algorithm),
/** Retrieve the manifest of any installed service package by its ID */
getServiceManifest,
healthCheck: {
checkPortListening,
checkWebUrl,
runHealthScript,
},
/** Common utility patterns (e.g. hostname regex, port validators) */
patterns,
/**
* @description Use this function to list every Action offered by the service. Actions will be displayed in the provided order.
@@ -638,21 +736,47 @@ export class StartSdk<Manifest extends T.SDKManifest> {
* ```
*/
setupInterfaces: setupServiceInterfaces,
/**
* Define the main entrypoint for the service. The provided function should
* configure and return a `Daemons` instance describing all long-running processes.
* @param fn - Async function that receives `effects` and returns a `Daemons` instance
*/
setupMain: (
fn: (o: { effects: Effects }) => Promise<Daemons<Manifest, any>>,
) => setupMain<Manifest>(fn),
/** Built-in trigger strategies for controlling health-check polling intervals */
trigger: {
/** Default trigger: polls at a fixed interval */
defaultTrigger,
/** Trigger with a cooldown period between checks */
cooldownTrigger,
/** Switches to a different interval after the first successful check */
changeOnFirstSuccess,
/** Uses different intervals based on success vs failure results */
successFailure,
},
Mounts: {
/**
* Create an empty Mounts builder for declaring volume, asset, dependency, and backup mounts.
* @returns A new Mounts instance with no mounts configured
*/
of: Mounts.of<Manifest>,
},
Backups: {
/**
* Create a Backups configuration that backs up entire volumes by name.
* @param volumeNames - Volume IDs from the manifest to include in backups
*/
ofVolumes: Backups.ofVolumes<Manifest>,
/**
* Create a Backups configuration from explicit sync path pairs.
* @param syncs - Array of `{ dataPath, backupPath }` objects
*/
ofSyncs: Backups.ofSyncs<Manifest>,
/**
* Create a Backups configuration with custom rsync options (e.g. exclude patterns).
* @param options - Partial sync options to override defaults
*/
withOptions: Backups.withOptions<Manifest>,
},
InputSpec: {
@@ -687,11 +811,20 @@ export class StartSdk<Manifest extends T.SDKManifest> {
InputSpec.of<Spec>(spec),
},
Daemon: {
/**
* Create a single Daemon that wraps a long-running process with automatic restart logic.
* Returns a curried function: call with `(effects, subcontainer, exec)`.
*/
get of() {
return Daemon.of<Manifest>()
},
},
Daemons: {
/**
* Create a new Daemons builder for defining the service's daemon topology.
* Chain `.addDaemon()` calls to register each long-running process.
* @param effects - The effects context
*/
of(effects: Effects) {
return Daemons.of<Manifest>({ effects })
},
@@ -798,6 +931,19 @@ export class StartSdk<Manifest extends T.SDKManifest> {
}
}
/**
* Run a one-shot command inside a temporary subcontainer.
*
* Creates a subcontainer, executes the command, and destroys the subcontainer when finished.
* Throws an {@link ExitError} if the command exits with a non-zero code or signal.
*
* @param effects - The effects context
* @param image - The container image to use
* @param command - The command to execute (string array or UseEntrypoint)
* @param options - Mount and command options
* @param name - Optional human-readable name for debugging
* @returns The stdout and stderr output of the command
*/
export async function runCommand<Manifest extends T.SDKManifest>(
effects: Effects,
image: { imageId: keyof Manifest['images'] & T.ImageId; sharedRun?: boolean },

View File

@@ -5,10 +5,12 @@ import { Affine, asError } from '../util'
import { ExtendedVersion, VersionRange } from '../../../base/lib'
import { InitKind, InitScript } from '../../../base/lib/inits'
/** Default rsync options used for backup and restore operations */
export const DEFAULT_OPTIONS: T.SyncOptions = {
delete: true,
exclude: [],
}
/** A single source-to-destination sync pair for backup and restore */
export type BackupSync<Volumes extends string> = {
dataPath: `/media/startos/volumes/${Volumes}/${string}`
backupPath: `/media/startos/backup/${string}`
@@ -17,8 +19,18 @@ export type BackupSync<Volumes extends string> = {
restoreOptions?: Partial<T.SyncOptions>
}
/** Effects type narrowed for backup/restore contexts, preventing reuse outside that scope */
export type BackupEffects = T.Effects & Affine<'Backups'>
/**
* Configures backup and restore operations using rsync.
*
* Supports syncing entire volumes or custom path pairs, with optional pre/post hooks
* for both backup and restore phases. Implements {@link InitScript} so it can be used
* as a restore-init step in `setupInit`.
*
* @typeParam M - The service manifest type
*/
export class Backups<M extends T.SDKManifest> implements InitScript {
private constructor(
private options = DEFAULT_OPTIONS,
@@ -31,6 +43,11 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
private postRestore = async (effects: BackupEffects) => {},
) {}
/**
* Create a Backups configuration that backs up entire volumes by name.
* Each volume is synced to a corresponding directory under `/media/startos/backup/volumes/`.
* @param volumeNames - One or more volume IDs from the manifest
*/
static ofVolumes<M extends T.SDKManifest = never>(
...volumeNames: Array<M['volumes'][number]>
): Backups<M> {
@@ -42,18 +59,31 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
)
}
/**
* Create a Backups configuration from explicit source/destination sync pairs.
* @param syncs - Array of `{ dataPath, backupPath }` objects with optional per-sync options
*/
static ofSyncs<M extends T.SDKManifest = never>(
...syncs: BackupSync<M['volumes'][number]>[]
) {
return syncs.reduce((acc, x) => acc.addSync(x), new Backups<M>())
}
/**
* Create an empty Backups configuration with custom default rsync options.
* Chain `.addVolume()` or `.addSync()` to add sync targets.
* @param options - Partial rsync options to override defaults (e.g. `{ exclude: ['cache'] }`)
*/
static withOptions<M extends T.SDKManifest = never>(
options?: Partial<T.SyncOptions>,
) {
return new Backups<M>({ ...DEFAULT_OPTIONS, ...options })
}
/**
* Override the default rsync options for both backup and restore.
* @param options - Partial rsync options to merge with current defaults
*/
setOptions(options?: Partial<T.SyncOptions>) {
this.options = {
...this.options,
@@ -62,6 +92,10 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
return this
}
/**
* Override rsync options used only during backup (not restore).
* @param options - Partial rsync options for the backup phase
*/
setBackupOptions(options?: Partial<T.SyncOptions>) {
this.backupOptions = {
...this.backupOptions,
@@ -70,6 +104,10 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
return this
}
/**
* Override rsync options used only during restore (not backup).
* @param options - Partial rsync options for the restore phase
*/
setRestoreOptions(options?: Partial<T.SyncOptions>) {
this.restoreOptions = {
...this.restoreOptions,
@@ -78,26 +116,47 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
return this
}
/**
* Register a hook to run before backup rsync begins (e.g. dump a database).
* @param fn - Async function receiving backup-scoped effects
*/
setPreBackup(fn: (effects: BackupEffects) => Promise<void>) {
this.preBackup = fn
return this
}
/**
* Register a hook to run after backup rsync completes.
* @param fn - Async function receiving backup-scoped effects
*/
setPostBackup(fn: (effects: BackupEffects) => Promise<void>) {
this.postBackup = fn
return this
}
/**
* Register a hook to run before restore rsync begins.
* @param fn - Async function receiving backup-scoped effects
*/
setPreRestore(fn: (effects: BackupEffects) => Promise<void>) {
this.preRestore = fn
return this
}
/**
* Register a hook to run after restore rsync completes.
* @param fn - Async function receiving backup-scoped effects
*/
setPostRestore(fn: (effects: BackupEffects) => Promise<void>) {
this.postRestore = fn
return this
}
/**
* Add a volume to the backup set by its ID.
* @param volume - The volume ID from the manifest
* @param options - Optional per-volume rsync overrides
*/
addVolume(
volume: M['volumes'][number],
options?: Partial<{
@@ -113,11 +172,19 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
})
}
/**
* Add a custom sync pair to the backup set.
* @param sync - A `{ dataPath, backupPath }` object with optional per-sync rsync options
*/
addSync(sync: BackupSync<M['volumes'][0]>) {
this.backupSet.push(sync)
return this
}
/**
* Execute the backup: runs pre-hook, rsyncs all configured paths, saves the data version, then runs post-hook.
* @param effects - The effects context
*/
async createBackup(effects: T.Effects) {
await this.preBackup(effects as BackupEffects)
for (const item of this.backupSet) {
@@ -149,6 +216,10 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
}
}
/**
* Execute the restore: runs pre-hook, rsyncs all configured paths from backup to data, restores the data version, then runs post-hook.
* @param effects - The effects context
*/
async restoreBackup(effects: T.Effects) {
this.preRestore(effects as BackupEffects)

View File

@@ -3,6 +3,11 @@ import * as T from '../../../base/lib/types'
import { _ } from '../util'
import { InitScript } from '../../../base/lib/inits'
/**
* Parameters for `setupBackups`. Either:
* - An array of volume IDs to back up entirely, or
* - An async factory function that returns a fully configured {@link Backups} instance
*/
export type SetupBackupsParams<M extends T.SDKManifest> =
| M['volumes'][number][]
| ((_: { effects: T.Effects }) => Promise<Backups<M>>)
@@ -12,6 +17,15 @@ type SetupBackupsRes = {
restoreInit: InitScript
}
/**
* Set up backup and restore exports for the service.
*
* Returns `{ createBackup, restoreInit }` which should be exported and wired into
* the service's init and backup entry points.
*
* @param options - Either an array of volume IDs or an async factory returning a Backups instance
* @returns An object with `createBackup` (the backup export) and `restoreInit` (an InitScript for restore)
*/
export function setupBackups<M extends T.SDKManifest>(
options: SetupBackupsParams<M>,
) {

View File

@@ -5,6 +5,7 @@ import { TriggerInput } from '../trigger/TriggerInput'
import { defaultTrigger } from '../trigger/defaultTrigger'
import { once, asError, Drop } from '../util'
/** Parameters for creating a health check */
export type HealthCheckParams = {
id: HealthCheckId
name: string
@@ -13,6 +14,13 @@ export type HealthCheckParams = {
fn(): Promise<HealthCheckResult> | HealthCheckResult
}
/**
* A periodic health check that reports daemon readiness to the StartOS UI.
*
* Polls at an interval controlled by a {@link Trigger}, reporting results as
* "starting" (during the grace period), "success", or "failure". Automatically
* pauses when the daemon is stopped and resumes when restarted.
*/
export class HealthCheck extends Drop {
private started: number | null = null
private setStarted = (started: number | null) => {
@@ -91,13 +99,21 @@ export class HealthCheck extends Drop {
}
})
}
/**
* Create a new HealthCheck instance and begin its polling loop.
* @param effects - The effects context for reporting health status
* @param options - Health check configuration (ID, name, check function, trigger, grace period)
* @returns A new HealthCheck instance
*/
static of(effects: Effects, options: HealthCheckParams): HealthCheck {
return new HealthCheck(effects, options)
}
/** Signal that the daemon is running, enabling health check polling */
start() {
if (this.started) return
this.setStarted(performance.now())
}
/** Signal that the daemon has stopped, pausing health check polling */
stop() {
if (!this.started) return
this.setStarted(null)

View File

@@ -1,3 +1,9 @@
import { T } from '../../../../base/lib'
/**
* The result of a single health check invocation.
*
* Contains a `result` field ("success", "failure", or "starting") and an optional `message`.
* This is the unnamed variant -- the health check name is added by the framework.
*/
export type HealthCheckResult = Omit<T.NamedHealthCheckResult, 'name'>

View File

@@ -3,6 +3,14 @@ export { checkPortListening } from './checkPortListening'
export { HealthCheckResult } from './HealthCheckResult'
export { checkWebUrl } from './checkWebUrl'
/**
* Create a promise that rejects after the specified timeout.
* Useful for racing against long-running health checks.
*
* @param ms - Timeout duration in milliseconds
* @param options.message - Custom error message (defaults to "Timed out")
* @returns A promise that never resolves, only rejects after the timeout
*/
export function timeoutPromise(ms: number, { message = 'Timed out' } = {}) {
return new Promise<never>((resolve, reject) =>
setTimeout(() => reject(new Error(message)), ms),

View File

@@ -8,6 +8,15 @@ import * as cp from 'child_process'
import * as fs from 'node:fs/promises'
import { DaemonCommandType, ExecCommandOptions, ExecFnOptions } from './Daemons'
/**
* Low-level controller for a single running process inside a subcontainer (or as a JS function).
*
* Manages the child process lifecycle: spawning, waiting, and signal-based termination.
* Used internally by {@link Daemon} to manage individual command executions.
*
* @typeParam Manifest - The service manifest type
* @typeParam C - The subcontainer type, or `null` for JS-only commands
*/
export class CommandController<
Manifest extends T.SDKManifest,
C extends SubContainer<Manifest> | null,
@@ -21,6 +30,13 @@ export class CommandController<
) {
super()
}
/**
* Factory method to create a new CommandController.
*
* Returns a curried async function: `(effects, subcontainer, exec) => CommandController`.
* If the exec spec has an `fn` property, runs the function; otherwise spawns a shell command
* in the subcontainer.
*/
static of<
Manifest extends T.SDKManifest,
C extends SubContainer<Manifest> | null,
@@ -130,6 +146,10 @@ export class CommandController<
}
}
}
/**
* Wait for the command to finish. Optionally terminate after a timeout.
* @param options.timeout - Milliseconds to wait before terminating. Defaults to no timeout.
*/
async wait({ timeout = NO_TIMEOUT } = {}) {
if (timeout > 0)
setTimeout(() => {
@@ -156,6 +176,15 @@ export class CommandController<
await this.subcontainer?.destroy()
}
}
/**
* Terminate the running command by sending a signal.
*
* Sends the specified signal (default: SIGTERM), then escalates to SIGKILL
* after the timeout expires. Destroys the subcontainer after the process exits.
*
* @param options.signal - The signal to send (default: SIGTERM)
* @param options.timeout - Milliseconds before escalating to SIGKILL
*/
async term({ signal = SIGTERM, timeout = this.sigtermTimeout } = {}) {
try {
if (!this.state.exited) {

View File

@@ -13,10 +13,15 @@ import { Oneshot } from './Oneshot'
const TIMEOUT_INCREMENT_MS = 1000
const MAX_TIMEOUT_MS = 30000
/**
* This is a wrapper around CommandController that has a state of off, where the command shouldn't be running
* and the others state of running, where it will keep a living running command
* A managed long-running process wrapper around {@link CommandController}.
*
* When started, the daemon automatically restarts its underlying command on failure
* with exponential backoff (up to 30 seconds). When stopped, the command is terminated
* gracefully. Implements {@link Drop} for automatic cleanup when the context is left.
*
* @typeParam Manifest - The service manifest type
* @typeParam C - The subcontainer type, or `null` for JS-only daemons
*/
export class Daemon<
Manifest extends T.SDKManifest,
C extends SubContainer<Manifest> | null = SubContainer<Manifest> | null,
@@ -33,9 +38,16 @@ export class Daemon<
) {
super()
}
/** Returns true if this daemon is a one-shot process (exits after success) */
isOneshot(): this is Oneshot<Manifest> {
return this.oneshot
}
/**
* Factory method to create a new Daemon.
*
* Returns a curried function: `(effects, subcontainer, exec) => Daemon`.
* The daemon auto-terminates when the effects context is left.
*/
static of<Manifest extends T.SDKManifest>() {
return <C extends SubContainer<Manifest> | null>(
effects: T.Effects,
@@ -57,6 +69,12 @@ export class Daemon<
return res
}
}
/**
* Start the daemon. If it is already running, this is a no-op.
*
* The daemon will automatically restart on failure with increasing backoff
* until {@link term} is called.
*/
async start() {
if (this.commandController) {
return
@@ -105,6 +123,17 @@ export class Daemon<
console.error(asError(err))
})
}
/**
* Terminate the daemon, stopping its underlying command.
*
* Sends the configured signal (default SIGTERM) and waits for the process to exit.
* Optionally destroys the subcontainer after termination.
*
* @param termOptions - Optional termination settings
* @param termOptions.signal - The signal to send (default: SIGTERM)
* @param termOptions.timeout - Milliseconds to wait before SIGKILL
* @param termOptions.destroySubcontainer - Whether to destroy the subcontainer after exit
*/
async term(termOptions?: {
signal?: NodeJS.Signals | undefined
timeout?: number | undefined
@@ -125,14 +154,20 @@ export class Daemon<
this.exiting = null
}
}
/** Get a reference-counted handle to the daemon's subcontainer, or null if there is none */
subcontainerRc(): SubContainerRc<Manifest> | null {
return this.subcontainer?.rc() ?? null
}
/** Check whether this daemon shares the same subcontainer as another daemon */
sharesSubcontainerWith(
other: Daemon<Manifest, SubContainer<Manifest> | null>,
): boolean {
return this.subcontainer?.guid === other.subcontainer?.guid
}
/**
* Register a callback to be invoked each time the daemon's process exits.
* @param fn - Callback receiving `true` on clean exit, `false` on error
*/
onExit(fn: (success: boolean) => void) {
this.onExitFns.push(fn)
}

View File

@@ -16,8 +16,15 @@ import { Daemon } from './Daemon'
import { CommandController } from './CommandController'
import { Oneshot } from './Oneshot'
/** Promisified version of `child_process.exec` */
export const cpExec = promisify(CP.exec)
/** Promisified version of `child_process.execFile` */
export const cpExecFile = promisify(CP.execFile)
/**
* Configuration for a daemon's health-check readiness probe.
*
* Determines how the system knows when a daemon is healthy and ready to serve.
*/
export type Ready = {
/** A human-readable display name for the health check. If null, the health check itself will be from the UI */
display: string | null
@@ -45,6 +52,10 @@ export type Ready = {
trigger?: Trigger
}
/**
* Options for running a daemon as a shell command inside a subcontainer.
* Includes the command to run, optional signal/timeout, environment, user, and stdio callbacks.
*/
export type ExecCommandOptions = {
command: T.CommandType
// Defaults to the DEFAULT_SIGTERM_TIMEOUT = 30_000ms
@@ -61,6 +72,11 @@ export type ExecCommandOptions = {
onStderr?: (chunk: Buffer | string | any) => void
}
/**
* Options for running a daemon via an async function that may optionally return
* a command to execute in the subcontainer. The function receives an `AbortSignal`
* for cooperative cancellation.
*/
export type ExecFnOptions<
Manifest extends T.SDKManifest,
C extends SubContainer<Manifest> | null,
@@ -73,6 +89,10 @@ export type ExecFnOptions<
sigtermTimeout?: number
}
/**
* The execution specification for a daemon: either an {@link ExecFnOptions} (async function)
* or an {@link ExecCommandOptions} (shell command, only valid when a subcontainer is provided).
*/
export type DaemonCommandType<
Manifest extends T.SDKManifest,
C extends SubContainer<Manifest> | null,
@@ -385,6 +405,13 @@ export class Daemons<Manifest extends T.SDKManifest, Ids extends string>
return null
}
/**
* Gracefully terminate all daemons in reverse dependency order.
*
* Daemons with no remaining dependents are shut down first, proceeding
* until all daemons have been terminated. Falls back to a bulk shutdown
* if a dependency cycle is detected.
*/
async term() {
const remaining = new Set(this.healthDaemons)
@@ -427,6 +454,10 @@ export class Daemons<Manifest extends T.SDKManifest, Ids extends string>
}
}
/**
* Start all registered daemons and their health checks.
* @returns This `Daemons` instance, now running
*/
async build() {
for (const daemon of this.healthDaemons) {
await daemon.updateStatus()

Some files were not shown because too many files have changed in this diff Show More