diff --git a/.github/actions/setup-build/action.yml b/.github/actions/setup-build/action.yml index b8efc4ebb..7cfcd4cc8 100644 --- a/.github/actions/setup-build/action.yml +++ b/.github/actions/setup-build/action.yml @@ -54,11 +54,11 @@ runs: - name: Set up Python if: inputs.setup-python == 'true' - uses: actions/setup-python@v5 + uses: actions/setup-python@v6 with: python-version: "3.x" - - uses: actions/setup-node@v4 + - uses: actions/setup-node@v6 with: node-version: ${{ inputs.nodejs-version }} cache: npm @@ -66,15 +66,15 @@ runs: - name: Set up Docker QEMU if: inputs.setup-docker == 'true' - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@v4 - name: Set up Docker Buildx if: inputs.setup-docker == 'true' - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: Configure sccache if: inputs.setup-sccache == 'true' - uses: actions/github-script@v7 + uses: actions/github-script@v8 with: script: | core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || ''); diff --git a/.github/workflows/start-cli.yaml b/.github/workflows/start-cli.yaml index d536e6faf..7baf2e7f0 100644 --- a/.github/workflows/start-cli.yaml +++ b/.github/workflows/start-cli.yaml @@ -68,7 +68,7 @@ jobs: - name: Mount tmpfs if: ${{ github.event.inputs.runner == 'fast' }} run: sudo mount -t tmpfs tmpfs . - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: submodules: recursive - uses: ./.github/actions/setup-build @@ -82,7 +82,7 @@ jobs: SCCACHE_GHA_ENABLED: on SCCACHE_GHA_VERSION: 0 - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v7 with: name: start-cli_${{ matrix.triple }} path: core/target/${{ matrix.triple }}/release/start-cli diff --git a/.github/workflows/start-registry.yaml b/.github/workflows/start-registry.yaml index 03dcd95fb..3e763cb7c 100644 --- a/.github/workflows/start-registry.yaml +++ b/.github/workflows/start-registry.yaml @@ -64,7 +64,7 @@ jobs: - name: Mount tmpfs if: ${{ github.event.inputs.runner == 'fast' }} run: sudo mount -t tmpfs tmpfs . - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: submodules: recursive - uses: ./.github/actions/setup-build @@ -78,7 +78,7 @@ jobs: SCCACHE_GHA_ENABLED: on SCCACHE_GHA_VERSION: 0 - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v7 with: name: start-registry_${{ matrix.arch }}.deb path: results/start-registry-*_${{ matrix.arch }}.deb @@ -102,13 +102,13 @@ jobs: if: ${{ github.event.inputs.runner == 'fast' }} - name: Set up docker QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@v4 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v3 + uses: docker/setup-buildx-action@v4 - name: "Login to GitHub Container Registry" - uses: docker/login-action@v3 + uses: docker/login-action@v4 with: registry: ghcr.io username: ${{github.actor}} @@ -116,14 +116,14 @@ jobs: - name: Docker meta id: meta - uses: docker/metadata-action@v5 + uses: docker/metadata-action@v6 with: images: ghcr.io/Start9Labs/startos-registry tags: | type=raw,value=${{ github.ref_name }} - name: Download debian package - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: pattern: start-registry_*.deb @@ -162,7 +162,7 @@ jobs: ADD *.deb . - RUN apt-get install -y ./*_$(uname -m).deb && rm *.deb + RUN apt-get update && apt-get install -y ./*_$(uname -m).deb && rm -rf *.deb /var/lib/apt/lists/* VOLUME /var/lib/startos diff --git a/.github/workflows/start-tunnel.yaml b/.github/workflows/start-tunnel.yaml index 1e15c324a..43b7fb5de 100644 --- a/.github/workflows/start-tunnel.yaml +++ b/.github/workflows/start-tunnel.yaml @@ -64,7 +64,7 @@ jobs: - name: Mount tmpfs if: ${{ github.event.inputs.runner == 'fast' }} run: sudo mount -t tmpfs tmpfs . - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: submodules: recursive - uses: ./.github/actions/setup-build @@ -78,7 +78,7 @@ jobs: SCCACHE_GHA_ENABLED: on SCCACHE_GHA_VERSION: 0 - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v7 with: name: start-tunnel_${{ matrix.arch }}.deb path: results/start-tunnel-*_${{ matrix.arch }}.deb diff --git a/.github/workflows/startos-iso.yaml b/.github/workflows/startos-iso.yaml index 40dec852b..3bf33d9a2 100644 --- a/.github/workflows/startos-iso.yaml +++ b/.github/workflows/startos-iso.yaml @@ -100,7 +100,7 @@ jobs: - name: Mount tmpfs if: ${{ github.event.inputs.runner == 'fast' }} run: sudo mount -t tmpfs tmpfs . - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: submodules: recursive - uses: ./.github/actions/setup-build @@ -114,7 +114,7 @@ jobs: SCCACHE_GHA_ENABLED: on SCCACHE_GHA_VERSION: 0 - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v7 with: name: compiled-${{ matrix.arch }}.tar path: compiled-${{ matrix.arch }}.tar @@ -124,14 +124,13 @@ jobs: strategy: fail-fast: false matrix: - # TODO: re-add "raspberrypi" to the platform list below platform: >- ${{ fromJson( format( '[ ["{0}"], - ["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "riscv64", "riscv64-nonfree"] + ["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "raspberrypi", "riscv64", "riscv64-nonfree"] ]', github.event.inputs.platform || 'ALL' ) @@ -209,14 +208,14 @@ jobs: run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache - name: Set up docker QEMU - uses: docker/setup-qemu-action@v3 + uses: docker/setup-qemu-action@v4 - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: submodules: recursive - name: Download compiled artifacts - uses: actions/download-artifact@v4 + uses: actions/download-artifact@v8 with: name: compiled-${{ env.ARCH }}.tar @@ -253,18 +252,18 @@ jobs: run: PLATFORM=${{ matrix.platform }} make img if: ${{ matrix.platform == 'raspberrypi' }} - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v7 with: name: ${{ matrix.platform }}.squashfs path: results/*.squashfs - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v7 with: name: ${{ matrix.platform }}.iso path: results/*.iso if: ${{ matrix.platform != 'raspberrypi' }} - - uses: actions/upload-artifact@v4 + - uses: actions/upload-artifact@v7 with: name: ${{ matrix.platform }}.img path: results/*.img diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index 94fac399f..426acfaee 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -24,7 +24,7 @@ jobs: if: github.event.pull_request.draft != true runs-on: ubuntu-latest steps: - - uses: actions/checkout@v4 + - uses: actions/checkout@v6 with: submodules: recursive - uses: ./.github/actions/setup-build diff --git a/.gitignore b/.gitignore index 10d8b5424..32c700eef 100644 --- a/.gitignore +++ b/.gitignore @@ -22,3 +22,4 @@ secrets.db tmp web/.i18n-checked docs/USER.md +*.s9pk diff --git a/ARCHITECTURE.md b/ARCHITECTURE.md index 967978a54..9adb1bd9f 100644 --- a/ARCHITECTURE.md +++ b/ARCHITECTURE.md @@ -5,7 +5,7 @@ StartOS is an open-source Linux distribution for running personal servers. It ma ## Tech Stack - Backend: Rust (async/Tokio, Axum web framework) -- Frontend: Angular 20 + TypeScript + TaigaUI +- Frontend: Angular 21 + TypeScript + Taiga UI 5 - Container runtime: Node.js/TypeScript with LXC - Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync - API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`) @@ -30,7 +30,7 @@ StartOS is an open-source Linux distribution for running personal servers. It ma - **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md). -- **`web/`** — Angular 20 + TypeScript workspace using Taiga UI. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md). +- **`web/`** — Angular 21 + TypeScript workspace using Taiga UI 5. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md). - **`container-runtime/`** — Node.js runtime that runs inside each service's LXC container. Loads the service's JavaScript from its S9PK package and manages subcontainers. Communicates with the host daemon via JSON-RPC over Unix socket. See [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md). diff --git a/CLAUDE.md b/CLAUDE.md index 7464695cf..da1c05c6d 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -31,6 +31,7 @@ make test-core # Run Rust tests - Check component-level CLAUDE.md files for component-specific conventions. ALWAYS read it before operating on that component. - Follow existing patterns before inventing new ones - Always use `make` recipes when they exist for testing builds rather than manually invoking build commands +- **Commit signing:** Never push unsigned commits. Before pushing, check all unpushed commits for signatures with `git log --show-signature @{upstream}..HEAD`. If any are unsigned, prompt the user to sign them with `git rebase --exec 'git commit --amend -S --no-edit' @{upstream}`. ## Supplementary Documentation @@ -50,7 +51,6 @@ On startup: 1. **Check for `docs/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer. 2. **Check `docs/TODO.md` for relevant tasks** - Show TODOs that either: - - Have no `@username` tag (relevant to everyone) - Are tagged with the current user's identifier diff --git a/Makefile b/Makefile index 7ab474909..151f19ee6 100644 --- a/Makefile +++ b/Makefile @@ -15,7 +15,8 @@ IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json) -BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) +TOR_S9PK := build/lib/tor_$(ARCH).s9pk +BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) $(TOR_S9PK) IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/) STARTD_SRC := core/startd.service $(BUILD_SRC) CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE) @@ -155,7 +156,7 @@ results/$(BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/startos) $( registry-deb: results/$(REGISTRY_BASENAME).deb results/$(REGISTRY_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-registry) $(REGISTRY_TARGETS) - PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh + PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=ca-certificates ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh tunnel-deb: results/$(TUNNEL_BASENAME).deb @@ -188,6 +189,9 @@ install: $(STARTOS_TARGETS) $(call mkdir,$(DESTDIR)/lib/systemd/system) $(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service) + if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \ + sed -i '/^Environment=/a Environment=RUST_BACKTRACE=full' $(DESTDIR)/lib/systemd/system/startd.service; \ + fi $(call mkdir,$(DESTDIR)/usr/lib) $(call rm,$(DESTDIR)/usr/lib/startos) @@ -283,6 +287,10 @@ core/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE) rm -rf core/bindings ./core/build/build-ts.sh ls core/bindings/*.ts | sed 's/core\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/bindings/index.ts + if [ -d core/bindings/tunnel ]; then \ + ls core/bindings/tunnel/*.ts | sed 's/core\/bindings\/tunnel\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' > core/bindings/tunnel/index.ts; \ + echo 'export * as Tunnel from "./tunnel";' >> core/bindings/index.ts; \ + fi npm --prefix sdk/base exec -- prettier --config=./sdk/base/package.json -w './core/bindings/**/*.ts' touch core/bindings/index.ts @@ -308,6 +316,9 @@ build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(sh $(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE) ./build/download-firmware.sh $(PLATFORM) +$(TOR_S9PK): ./build/download-tor-s9pk.sh + ./build/download-tor-s9pk.sh $(ARCH) + core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE) ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox diff --git a/build/download-tor-s9pk.sh b/build/download-tor-s9pk.sh new file mode 100755 index 000000000..8feb9f597 --- /dev/null +++ b/build/download-tor-s9pk.sh @@ -0,0 +1,14 @@ +#!/bin/bash + +cd "$(dirname "${BASH_SOURCE[0]}")" + +set -e + +ARCH=$1 + +if [ -z "$ARCH" ]; then + >&2 echo "usage: $0 " + exit 1 +fi + +curl --fail -L -o "./lib/tor_${ARCH}.s9pk" "https://s9pks.nyc3.cdn.digitaloceanspaces.com/tor_${ARCH}.s9pk" diff --git a/build/dpkg-deps/depends b/build/dpkg-deps/depends index da2012ae2..3e527e4d9 100644 --- a/build/dpkg-deps/depends +++ b/build/dpkg-deps/depends @@ -11,6 +11,7 @@ cifs-utils conntrack cryptsetup curl +dkms dmidecode dnsutils dosfstools @@ -36,6 +37,7 @@ lvm2 lxc magic-wormhole man-db +mokutil ncdu net-tools network-manager diff --git a/build/dpkg-deps/raspberrypi.depends b/build/dpkg-deps/raspberrypi.depends index b8f74d108..9066caffd 100644 --- a/build/dpkg-deps/raspberrypi.depends +++ b/build/dpkg-deps/raspberrypi.depends @@ -1,5 +1,6 @@ -- grub-efi ++ gdisk + parted ++ u-boot-rpi + raspberrypi-net-mods + raspberrypi-sys-mods + raspi-config diff --git a/build/image-recipe/Dockerfile b/build/image-recipe/Dockerfile index c53627214..13d1a80b0 100644 --- a/build/image-recipe/Dockerfile +++ b/build/image-recipe/Dockerfile @@ -23,6 +23,8 @@ RUN apt-get update && \ squashfs-tools \ rsync \ b3sum \ + btrfs-progs \ + gdisk \ dpkg-dev diff --git a/build/image-recipe/build.sh b/build/image-recipe/build.sh index 8bd27daf3..6648e719a 100755 --- a/build/image-recipe/build.sh +++ b/build/image-recipe/build.sh @@ -1,7 +1,6 @@ #!/bin/bash set -e -MAX_IMG_LEN=$((4 * 1024 * 1024 * 1024)) # 4GB echo "==== StartOS Image Build ====" @@ -132,6 +131,15 @@ ff02::1 ip6-allnodes ff02::2 ip6-allrouters EOT +if [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then + mkdir -p config/includes.chroot/etc/ssh/sshd_config.d + echo "PasswordAuthentication yes" > config/includes.chroot/etc/ssh/sshd_config.d/dev-password-auth.conf +fi + +# Installer marker file (used by installed GRUB to detect the live USB) +mkdir -p config/includes.binary +touch config/includes.binary/.startos-installer + if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then mkdir -p config/includes.chroot git clone --depth=1 --branch=stable https://github.com/raspberrypi/rpi-firmware.git config/includes.chroot/boot @@ -172,7 +180,13 @@ sed -i -e '2i set timeout=5' config/bootloaders/grub-pc/config.cfg mkdir -p config/archives if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then - curl -fsSL https://archive.raspberrypi.com/debian/raspberrypi.gpg.key | gpg --dearmor -o config/archives/raspi.key + # Fetch the keyring package (not the old raspberrypi.gpg.key, which has + # SHA1-only binding signatures that sqv on Trixie rejects). + KEYRING_DEB=$(mktemp) + curl -fsSL -o "$KEYRING_DEB" https://archive.raspberrypi.com/debian/pool/main/r/raspberrypi-archive-keyring/raspberrypi-archive-keyring_2025.1+rpt1_all.deb + dpkg-deb -x "$KEYRING_DEB" "$KEYRING_DEB.d" + cp "$KEYRING_DEB.d/usr/share/keyrings/raspberrypi-archive-keyring.gpg" config/archives/raspi.key + rm -rf "$KEYRING_DEB" "$KEYRING_DEB.d" echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/raspi.key.gpg] https://archive.raspberrypi.com/debian/ ${IB_SUITE} main" > config/archives/raspi.list fi @@ -209,6 +223,10 @@ cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF set -e +if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then + /usr/lib/startos/scripts/enable-kiosk +fi + if [ "${NVIDIA}" = "1" ]; then # install a specific NVIDIA driver version @@ -236,7 +254,7 @@ if [ "${NVIDIA}" = "1" ]; then echo "[nvidia-hook] Target kernel version: \${KVER}" >&2 # Ensure kernel headers are present - TEMP_APT_DEPS=(build-essential) + TEMP_APT_DEPS=(build-essential pkg-config) if [ ! -e "/lib/modules/\${KVER}/build" ]; then TEMP_APT_DEPS+=(linux-headers-\${KVER}) fi @@ -279,12 +297,32 @@ if [ "${NVIDIA}" = "1" ]; then echo "[nvidia-hook] NVIDIA \${NVIDIA_DRIVER_VERSION} installation complete for kernel \${KVER}" >&2 + echo "[nvidia-hook] Removing .run installer..." >&2 + rm -f "\${RUN_PATH}" + + echo "[nvidia-hook] Blacklisting nouveau..." >&2 + echo "blacklist nouveau" > /etc/modprobe.d/blacklist-nouveau.conf + echo "options nouveau modeset=0" >> /etc/modprobe.d/blacklist-nouveau.conf + + echo "[nvidia-hook] Rebuilding initramfs..." >&2 + update-initramfs -u -k "\${KVER}" + echo "[nvidia-hook] Removing build dependencies..." >&2 apt-get purge -y nvidia-depends apt-get autoremove -y echo "[nvidia-hook] Removed build dependencies." >&2 fi +# Install linux-kbuild for sign-file (Secure Boot module signing) +KVER_ALL="\$(ls -1t /boot/vmlinuz-* 2>/dev/null | head -n1 | sed 's|.*/vmlinuz-||')" +if [ -n "\${KVER_ALL}" ]; then + KBUILD_VER="\$(echo "\${KVER_ALL}" | grep -oP '^\d+\.\d+')" + if [ -n "\${KBUILD_VER}" ]; then + echo "[build] Installing linux-kbuild-\${KBUILD_VER} for Secure Boot support" >&2 + apt-get install -y "linux-kbuild-\${KBUILD_VER}" || echo "[build] WARNING: linux-kbuild-\${KBUILD_VER} not available" >&2 + fi +fi + cp /etc/resolv.conf /etc/resolv.conf.bak if [ "${IB_SUITE}" = trixie ] && [ "${IB_TARGET_ARCH}" != riscv64 ]; then @@ -298,9 +336,10 @@ fi if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then ln -sf /usr/bin/pi-beep /usr/local/bin/beep - KERNEL_VERSION=${RPI_KERNEL_VERSION} sh /boot/config.sh > /boot/config.txt + sh /boot/firmware/config.sh > /boot/firmware/config.txt mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8 mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712 + cp /usr/lib/u-boot/rpi_arm64/u-boot.bin /boot/firmware/u-boot.bin fi useradd --shell /bin/bash -G startos -m start9 @@ -310,14 +349,14 @@ usermod -aG systemd-journal start9 echo "start9 ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/010_start9-nopasswd" -if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then - /usr/lib/startos/scripts/enable-kiosk -fi - if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then passwd -l start9 fi +mkdir -p /media/startos +chmod 750 /media/startos +chown root:startos /media/startos + EOF SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(date '+%s')}" @@ -370,38 +409,85 @@ if [ "${IMAGE_TYPE}" = iso ]; then elif [ "${IMAGE_TYPE}" = img ]; then SECTOR_LEN=512 - BOOT_START=$((1024 * 1024)) # 1MiB - BOOT_LEN=$((512 * 1024 * 1024)) # 512MiB + FW_START=$((1024 * 1024)) # 1MiB (sector 2048) — Pi-specific + FW_LEN=$((128 * 1024 * 1024)) # 128MiB (Pi firmware + U-Boot + DTBs) + FW_END=$((FW_START + FW_LEN - 1)) + ESP_START=$((FW_END + 1)) # 100MB EFI System Partition (matches os_install) + ESP_LEN=$((100 * 1024 * 1024)) + ESP_END=$((ESP_START + ESP_LEN - 1)) + BOOT_START=$((ESP_END + 1)) # 2GB /boot (matches os_install) + BOOT_LEN=$((2 * 1024 * 1024 * 1024)) BOOT_END=$((BOOT_START + BOOT_LEN - 1)) ROOT_START=$((BOOT_END + 1)) - ROOT_LEN=$((MAX_IMG_LEN - ROOT_START)) - ROOT_END=$((MAX_IMG_LEN - 1)) + + # Size root partition to fit the squashfs + 256MB overhead for btrfs + # metadata and config overlay, avoiding the need for btrfs resize + SQUASHFS_SIZE=$(stat -c %s $prep_results_dir/binary/live/filesystem.squashfs) + ROOT_LEN=$(( SQUASHFS_SIZE + 256 * 1024 * 1024 )) + # Align to sector boundary + ROOT_LEN=$(( (ROOT_LEN + SECTOR_LEN - 1) / SECTOR_LEN * SECTOR_LEN )) + + # Total image: partitions + GPT backup header (34 sectors) + IMG_LEN=$((ROOT_START + ROOT_LEN + 34 * SECTOR_LEN)) + + # Fixed GPT partition UUIDs (deterministic, based on old MBR disk ID cb15ae4d) + FW_UUID=cb15ae4d-0001-4000-8000-000000000001 + ESP_UUID=cb15ae4d-0002-4000-8000-000000000002 + BOOT_UUID=cb15ae4d-0003-4000-8000-000000000003 + ROOT_UUID=cb15ae4d-0004-4000-8000-000000000004 TARGET_NAME=$prep_results_dir/${IMAGE_BASENAME}.img - truncate -s $MAX_IMG_LEN $TARGET_NAME + truncate -s $IMG_LEN $TARGET_NAME sfdisk $TARGET_NAME <<-EOF - label: dos - label-id: 0xcb15ae4d - unit: sectors - sector-size: 512 + label: gpt - ${TARGET_NAME}1 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=c, bootable - ${TARGET_NAME}2 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=83 + ${TARGET_NAME}1 : start=$((FW_START / SECTOR_LEN)), size=$((FW_LEN / SECTOR_LEN)), type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=${FW_UUID}, name="firmware" + ${TARGET_NAME}2 : start=$((ESP_START / SECTOR_LEN)), size=$((ESP_LEN / SECTOR_LEN)), type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B, uuid=${ESP_UUID}, name="efi" + ${TARGET_NAME}3 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=${BOOT_UUID}, name="boot" + ${TARGET_NAME}4 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=B921B045-1DF0-41C3-AF44-4C6F280D3FAE, uuid=${ROOT_UUID}, name="root" EOF - BOOT_DEV=$(losetup --show -f --offset $BOOT_START --sizelimit $BOOT_LEN $TARGET_NAME) - ROOT_DEV=$(losetup --show -f --offset $ROOT_START --sizelimit $ROOT_LEN $TARGET_NAME) + # Create named loop device nodes (high minor numbers to avoid conflicts) + # and detach any stale ones from previous failed builds + FW_DEV=/dev/startos-loop-fw + ESP_DEV=/dev/startos-loop-esp + BOOT_DEV=/dev/startos-loop-boot + ROOT_DEV=/dev/startos-loop-root + for dev in $FW_DEV:200 $ESP_DEV:201 $BOOT_DEV:202 $ROOT_DEV:203; do + name=${dev%:*} + minor=${dev#*:} + [ -e $name ] || mknod $name b 7 $minor + losetup -d $name 2>/dev/null || true + done - mkfs.vfat -F32 $BOOT_DEV - mkfs.ext4 $ROOT_DEV + losetup $FW_DEV --offset $FW_START --sizelimit $FW_LEN $TARGET_NAME + losetup $ESP_DEV --offset $ESP_START --sizelimit $ESP_LEN $TARGET_NAME + losetup $BOOT_DEV --offset $BOOT_START --sizelimit $BOOT_LEN $TARGET_NAME + losetup $ROOT_DEV --offset $ROOT_START --sizelimit $ROOT_LEN $TARGET_NAME + + mkfs.vfat -F32 -n firmware $FW_DEV + mkfs.vfat -F32 -n efi $ESP_DEV + mkfs.vfat -F32 -n boot $BOOT_DEV + mkfs.btrfs -f -L rootfs $ROOT_DEV TMPDIR=$(mktemp -d) + # Extract boot files from squashfs to staging area + BOOT_STAGING=$(mktemp -d) + unsquashfs -n -f -d $BOOT_STAGING $prep_results_dir/binary/live/filesystem.squashfs boot + + # Mount partitions (nested: firmware and efi inside boot) mkdir -p $TMPDIR/boot $TMPDIR/root - mount $ROOT_DEV $TMPDIR/root mount $BOOT_DEV $TMPDIR/boot - unsquashfs -n -f -d $TMPDIR $prep_results_dir/binary/live/filesystem.squashfs boot + mkdir -p $TMPDIR/boot/firmware $TMPDIR/boot/efi + mount $FW_DEV $TMPDIR/boot/firmware + mount $ESP_DEV $TMPDIR/boot/efi + mount $ROOT_DEV $TMPDIR/root + + # Copy boot files — nested mounts route firmware/* to the firmware partition + cp -a $BOOT_STAGING/boot/. $TMPDIR/boot/ + rm -rf $BOOT_STAGING mkdir $TMPDIR/root/images $TMPDIR/root/config B3SUM=$(b3sum $prep_results_dir/binary/live/filesystem.squashfs | head -c 16) @@ -414,40 +500,46 @@ elif [ "${IMAGE_TYPE}" = img ]; then mount -t overlay -o lowerdir=$TMPDIR/lower,workdir=$TMPDIR/root/config/work,upperdir=$TMPDIR/root/config/overlay overlay $TMPDIR/next if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then - sed -i 's| boot=startos| boot=startos init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt rsync -a $SOURCE_DIR/raspberrypi/img/ $TMPDIR/next/ + + # Install GRUB: ESP at /boot/efi (Part 2), /boot (Part 3) + mkdir -p $TMPDIR/next/boot \ + $TMPDIR/next/dev $TMPDIR/next/proc $TMPDIR/next/sys $TMPDIR/next/media/startos/root + mount --rbind $TMPDIR/boot $TMPDIR/next/boot + mount --bind /dev $TMPDIR/next/dev + mount -t proc proc $TMPDIR/next/proc + mount -t sysfs sysfs $TMPDIR/next/sys + mount --bind $TMPDIR/root $TMPDIR/next/media/startos/root + + chroot $TMPDIR/next grub-install --target=arm64-efi --removable --efi-directory=/boot/efi --boot-directory=/boot --no-nvram + chroot $TMPDIR/next update-grub + + umount $TMPDIR/next/media/startos/root + umount $TMPDIR/next/sys + umount $TMPDIR/next/proc + umount $TMPDIR/next/dev + umount -l $TMPDIR/next/boot + + # Fix root= in grub.cfg: update-grub sees loop devices, but the + # real device uses a fixed GPT PARTUUID for root (Part 4). + sed -i "s|root=[^ ]*|root=PARTUUID=${ROOT_UUID}|g" $TMPDIR/boot/grub/grub.cfg + + # Inject first-boot resize script into GRUB config + sed -i 's| boot=startos| boot=startos init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/grub/grub.cfg fi umount $TMPDIR/next umount $TMPDIR/lower + umount $TMPDIR/boot/firmware + umount $TMPDIR/boot/efi umount $TMPDIR/boot umount $TMPDIR/root - - e2fsck -fy $ROOT_DEV - resize2fs -M $ROOT_DEV - - BLOCK_COUNT=$(dumpe2fs -h $ROOT_DEV | awk '/^Block count:/ { print $3 }') - BLOCK_SIZE=$(dumpe2fs -h $ROOT_DEV | awk '/^Block size:/ { print $3 }') - ROOT_LEN=$((BLOCK_COUNT * BLOCK_SIZE)) - losetup -d $ROOT_DEV losetup -d $BOOT_DEV - - # Recreate partition 2 with the new size using sfdisk - sfdisk $TARGET_NAME <<-EOF - label: dos - label-id: 0xcb15ae4d - unit: sectors - sector-size: 512 - - ${TARGET_NAME}1 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=c, bootable - ${TARGET_NAME}2 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=83 - EOF - - TARGET_SIZE=$((ROOT_START + ROOT_LEN)) - truncate -s $TARGET_SIZE $TARGET_NAME + losetup -d $ESP_DEV + losetup -d $FW_DEV mv $TARGET_NAME $RESULTS_DIR/$IMAGE_BASENAME.img diff --git a/build/image-recipe/raspberrypi/img/etc/fstab b/build/image-recipe/raspberrypi/img/etc/fstab index 5f5164232..ece1fb4c7 100644 --- a/build/image-recipe/raspberrypi/img/etc/fstab +++ b/build/image-recipe/raspberrypi/img/etc/fstab @@ -1,2 +1,4 @@ -/dev/mmcblk0p1 /boot vfat umask=0077 0 2 -/dev/mmcblk0p2 / ext4 defaults 0 1 +PARTUUID=cb15ae4d-0001-4000-8000-000000000001 /boot/firmware vfat umask=0077 0 2 +PARTUUID=cb15ae4d-0002-4000-8000-000000000002 /boot/efi vfat umask=0077 0 1 +PARTUUID=cb15ae4d-0003-4000-8000-000000000003 /boot vfat umask=0077 0 2 +PARTUUID=cb15ae4d-0004-4000-8000-000000000004 / btrfs defaults 0 1 diff --git a/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh b/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh index 1fdca1c83..67e0629df 100755 --- a/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh +++ b/build/image-recipe/raspberrypi/img/usr/lib/startos/scripts/init_resize.sh @@ -12,15 +12,16 @@ get_variables () { BOOT_DEV_NAME=$(echo /sys/block/*/"${BOOT_PART_NAME}" | cut -d "/" -f 4) BOOT_PART_NUM=$(cat "/sys/block/${BOOT_DEV_NAME}/${BOOT_PART_NAME}/partition") - OLD_DISKID=$(fdisk -l "$ROOT_DEV" | sed -n 's/Disk identifier: 0x\([^ ]*\)/\1/p') - ROOT_DEV_SIZE=$(cat "/sys/block/${ROOT_DEV_NAME}/size") - if [ "$ROOT_DEV_SIZE" -le 67108864 ]; then - TARGET_END=$((ROOT_DEV_SIZE - 1)) + # GPT backup header/entries occupy last 33 sectors + USABLE_END=$((ROOT_DEV_SIZE - 34)) + + if [ "$USABLE_END" -le 67108864 ]; then + TARGET_END=$USABLE_END else TARGET_END=$((33554432 - 1)) DATA_PART_START=33554432 - DATA_PART_END=$((ROOT_DEV_SIZE - 1)) + DATA_PART_END=$USABLE_END fi PARTITION_TABLE=$(parted -m "$ROOT_DEV" unit s print | tr -d 's') @@ -57,37 +58,30 @@ check_variables () { main () { get_variables + # Fix GPT backup header first — the image was built with a tight root + # partition, so the backup GPT is not at the end of the SD card. parted + # will prompt interactively if this isn't fixed before we use it. + sgdisk -e "$ROOT_DEV" 2>/dev/null || true + if ! check_variables; then return 1 fi -# if [ "$ROOT_PART_END" -eq "$TARGET_END" ]; then -# reboot_pi -# fi - if ! echo Yes | parted -m --align=optimal "$ROOT_DEV" ---pretend-input-tty u s resizepart "$ROOT_PART_NUM" "$TARGET_END" ; then FAIL_REASON="Root partition resize failed" return 1 fi if [ -n "$DATA_PART_START" ]; then - if ! parted -ms --align=optimal "$ROOT_DEV" u s mkpart primary "$DATA_PART_START" "$DATA_PART_END"; then + if ! parted -ms --align=optimal "$ROOT_DEV" u s mkpart data "$DATA_PART_START" "$DATA_PART_END"; then FAIL_REASON="Data partition creation failed" return 1 fi fi - ( - echo x - echo i - echo "0xcb15ae4d" - echo r - echo w - ) | fdisk $ROOT_DEV - mount / -o remount,rw - resize2fs $ROOT_PART_DEV + btrfs filesystem resize max /media/startos/root if ! systemd-machine-id-setup --root=/media/startos/config/overlay/; then FAIL_REASON="systemd-machine-id-setup failed" @@ -111,7 +105,7 @@ mount / -o remount,ro beep if main; then - sed -i 's| init=/usr/lib/startos/scripts/init_resize\.sh||' /boot/cmdline.txt + sed -i 's| init=/usr/lib/startos/scripts/init_resize\.sh||' /boot/grub/grub.cfg echo "Resized root filesystem. Rebooting in 5 seconds..." sleep 5 else diff --git a/build/image-recipe/raspberrypi/squashfs/boot/cmdline.txt b/build/image-recipe/raspberrypi/squashfs/boot/cmdline.txt deleted file mode 100644 index f10c50da5..000000000 --- a/build/image-recipe/raspberrypi/squashfs/boot/cmdline.txt +++ /dev/null @@ -1 +0,0 @@ -usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory boot=startos \ No newline at end of file diff --git a/build/image-recipe/raspberrypi/squashfs/boot/config.sh b/build/image-recipe/raspberrypi/squashfs/boot/firmware/config.sh similarity index 77% rename from build/image-recipe/raspberrypi/squashfs/boot/config.sh rename to build/image-recipe/raspberrypi/squashfs/boot/firmware/config.sh index 1c74bc1b2..d23120bef 100644 --- a/build/image-recipe/raspberrypi/squashfs/boot/config.sh +++ b/build/image-recipe/raspberrypi/squashfs/boot/firmware/config.sh @@ -27,20 +27,18 @@ disable_overscan=1 # (e.g. for USB device mode) or if USB support is not required. otg_mode=1 -[all] - [pi4] # Run as fast as firmware / board allows arm_boost=1 -kernel=vmlinuz-${KERNEL_VERSION}-rpi-v8 -initramfs initrd.img-${KERNEL_VERSION}-rpi-v8 followkernel - -[pi5] -kernel=vmlinuz-${KERNEL_VERSION}-rpi-2712 -initramfs initrd.img-${KERNEL_VERSION}-rpi-2712 followkernel [all] gpu_mem=16 dtoverlay=pwm-2chan,disable-bt -EOF \ No newline at end of file +# Enable UART for U-Boot and serial console +enable_uart=1 + +# Load U-Boot as the bootloader (GRUB is chainloaded from U-Boot) +kernel=u-boot.bin + +EOF diff --git a/build/image-recipe/raspberrypi/squashfs/boot/config.txt b/build/image-recipe/raspberrypi/squashfs/boot/firmware/config.txt similarity index 93% rename from build/image-recipe/raspberrypi/squashfs/boot/config.txt rename to build/image-recipe/raspberrypi/squashfs/boot/firmware/config.txt index 4e1962a65..5bf25925d 100644 --- a/build/image-recipe/raspberrypi/squashfs/boot/config.txt +++ b/build/image-recipe/raspberrypi/squashfs/boot/firmware/config.txt @@ -84,4 +84,8 @@ arm_boost=1 gpu_mem=16 dtoverlay=pwm-2chan,disable-bt -auto_initramfs=1 \ No newline at end of file +# Enable UART for U-Boot and serial console +enable_uart=1 + +# Load U-Boot as the bootloader (GRUB is chainloaded from U-Boot) +kernel=u-boot.bin \ No newline at end of file diff --git a/build/image-recipe/raspberrypi/squashfs/etc/default/grub.d/raspberrypi.cfg b/build/image-recipe/raspberrypi/squashfs/etc/default/grub.d/raspberrypi.cfg new file mode 100644 index 000000000..0dc217b8b --- /dev/null +++ b/build/image-recipe/raspberrypi/squashfs/etc/default/grub.d/raspberrypi.cfg @@ -0,0 +1,4 @@ +# Raspberry Pi-specific GRUB overrides +# Overrides GRUB_CMDLINE_LINUX from /etc/default/grub with Pi-specific +# console devices and hardware quirks. +GRUB_CMDLINE_LINUX="boot=startos console=serial0,115200 console=tty1 usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory" diff --git a/build/image-recipe/raspberrypi/squashfs/etc/startos/config.yaml b/build/image-recipe/raspberrypi/squashfs/etc/startos/config.yaml index 7c81ad513..a7d1a5eae 100644 --- a/build/image-recipe/raspberrypi/squashfs/etc/startos/config.yaml +++ b/build/image-recipe/raspberrypi/squashfs/etc/startos/config.yaml @@ -1,6 +1,3 @@ -os-partitions: - boot: /dev/mmcblk0p1 - root: /dev/mmcblk0p2 ethernet-interface: end0 wifi-interface: wlan0 disable-encryption: true diff --git a/build/lib/scripts/chroot-and-upgrade b/build/lib/scripts/chroot-and-upgrade index c8e16acaf..f5dd417aa 100755 --- a/build/lib/scripts/chroot-and-upgrade +++ b/build/lib/scripts/chroot-and-upgrade @@ -34,7 +34,7 @@ set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters if [ -z "$NO_SYNC" ]; then echo 'Syncing...' - umount -R /media/startos/next 2> /dev/null + umount -l /media/startos/next 2> /dev/null umount /media/startos/upper 2> /dev/null rm -rf /media/startos/upper /media/startos/next mkdir /media/startos/upper @@ -58,13 +58,13 @@ mkdir -p /media/startos/next/media/startos/root mount --bind /run /media/startos/next/run mount --bind /tmp /media/startos/next/tmp mount --bind /dev /media/startos/next/dev -mount --bind /sys /media/startos/next/sys -mount --bind /proc /media/startos/next/proc +mount -t sysfs sysfs /media/startos/next/sys +mount -t proc proc /media/startos/next/proc mount --bind /boot /media/startos/next/boot mount --bind /media/startos/root /media/startos/next/media/startos/root if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then - mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars + mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars fi if [ -z "$*" ]; then @@ -111,6 +111,6 @@ if [ "$CHROOT_RES" -eq 0 ]; then reboot fi -umount /media/startos/next -umount /media/startos/upper +umount -l /media/startos/next +umount -l /media/startos/upper rm -rf /media/startos/upper /media/startos/next \ No newline at end of file diff --git a/build/lib/scripts/sign-unsigned-modules b/build/lib/scripts/sign-unsigned-modules new file mode 100755 index 000000000..fdaf11e88 --- /dev/null +++ b/build/lib/scripts/sign-unsigned-modules @@ -0,0 +1,76 @@ +#!/bin/bash + +# sign-unsigned-modules [--source --dest ] [--sign-file ] +# [--mok-key ] [--mok-pub ] +# +# Signs all unsigned kernel modules using the DKMS MOK key. +# +# Default (install) mode: +# Run inside a chroot. Finds and signs unsigned modules in /lib/modules in-place. +# sign-file and MOK key are auto-detected from standard paths. +# +# Overlay mode (--source/--dest): +# Finds unsigned modules in , copies to , signs the copies. +# Clears old signed modules in first. Used during upgrades where the +# overlay upper is tmpfs and writes would be lost. + +set -e + +SOURCE="" +DEST="" +SIGN_FILE="" +MOK_KEY="/var/lib/dkms/mok.key" +MOK_PUB="/var/lib/dkms/mok.pub" + +while [[ $# -gt 0 ]]; do + case $1 in + --source) SOURCE="$2"; shift 2;; + --dest) DEST="$2"; shift 2;; + --sign-file) SIGN_FILE="$2"; shift 2;; + --mok-key) MOK_KEY="$2"; shift 2;; + --mok-pub) MOK_PUB="$2"; shift 2;; + *) echo "Unknown option: $1" >&2; exit 1;; + esac +done + +# Auto-detect sign-file if not specified +if [ -z "$SIGN_FILE" ]; then + SIGN_FILE="$(ls -1 /usr/lib/linux-kbuild-*/scripts/sign-file 2>/dev/null | head -1)" +fi + +if [ -z "$SIGN_FILE" ] || [ ! -x "$SIGN_FILE" ]; then + exit 0 +fi + +if [ ! -f "$MOK_KEY" ] || [ ! -f "$MOK_PUB" ]; then + exit 0 +fi + +COUNT=0 + +if [ -n "$SOURCE" ] && [ -n "$DEST" ]; then + # Overlay mode: find unsigned in source, copy to dest, sign in dest + rm -rf "${DEST}"/lib/modules + + for ko in $(find "${SOURCE}"/lib/modules -name '*.ko' 2>/dev/null); do + if ! modinfo "$ko" 2>/dev/null | grep -q '^sig_id:'; then + rel_path="${ko#${SOURCE}}" + mkdir -p "${DEST}$(dirname "$rel_path")" + cp "$ko" "${DEST}${rel_path}" + "$SIGN_FILE" sha256 "$MOK_KEY" "$MOK_PUB" "${DEST}${rel_path}" + COUNT=$((COUNT + 1)) + fi + done +else + # In-place mode: sign modules directly + for ko in $(find /lib/modules -name '*.ko' 2>/dev/null); do + if ! modinfo "$ko" 2>/dev/null | grep -q '^sig_id:'; then + "$SIGN_FILE" sha256 "$MOK_KEY" "$MOK_PUB" "$ko" + COUNT=$((COUNT + 1)) + fi + done +fi + +if [ $COUNT -gt 0 ]; then + echo "[sign-modules] Signed $COUNT unsigned kernel modules" +fi diff --git a/build/lib/scripts/startos-initramfs-module b/build/lib/scripts/startos-initramfs-module index f093328cc..6299edd5b 100755 --- a/build/lib/scripts/startos-initramfs-module +++ b/build/lib/scripts/startos-initramfs-module @@ -104,6 +104,7 @@ local_mount_root() -olowerdir=/startos/config/overlay:/lower,upperdir=/upper/data,workdir=/upper/work \ overlay ${rootmnt} + mkdir -m 750 -p ${rootmnt}/media/startos mkdir -p ${rootmnt}/media/startos/config mount --bind /startos/config ${rootmnt}/media/startos/config mkdir -p ${rootmnt}/media/startos/images diff --git a/build/lib/scripts/upgrade b/build/lib/scripts/upgrade index 35230eb0a..488372e2d 100755 --- a/build/lib/scripts/upgrade +++ b/build/lib/scripts/upgrade @@ -24,7 +24,7 @@ fi unsquashfs -f -d / $1 boot -umount -R /media/startos/next 2> /dev/null || true +umount -l /media/startos/next 2> /dev/null || true umount /media/startos/upper 2> /dev/null || true umount /media/startos/lower 2> /dev/null || true @@ -45,18 +45,13 @@ mkdir -p /media/startos/next/media/startos/root mount --bind /run /media/startos/next/run mount --bind /tmp /media/startos/next/tmp mount --bind /dev /media/startos/next/dev -mount --bind /sys /media/startos/next/sys -mount --bind /proc /media/startos/next/proc -mount --bind /boot /media/startos/next/boot +mount -t sysfs sysfs /media/startos/next/sys +mount -t proc proc /media/startos/next/proc +mount --rbind /boot /media/startos/next/boot mount --bind /media/startos/root /media/startos/next/media/startos/root -if mountpoint /boot/efi 2>&1 > /dev/null; then - mkdir -p /media/startos/next/boot/efi - mount --bind /boot/efi /media/startos/next/boot/efi -fi - if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then - mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars + mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars fi chroot /media/startos/next bash -e << "EOF" @@ -68,24 +63,18 @@ fi EOF -# Promote the USB installer boot entry back to first in EFI boot order. -# The entry number was saved during initial OS install. -if [ -d /sys/firmware/efi ] && [ -f /media/startos/config/efi-installer-entry ]; then - USB_ENTRY=$(cat /media/startos/config/efi-installer-entry) - if [ -n "$USB_ENTRY" ]; then - CURRENT_ORDER=$(efibootmgr | grep BootOrder | sed 's/BootOrder: //') - OTHER_ENTRIES=$(echo "$CURRENT_ORDER" | tr ',' '\n' | grep -v "$USB_ENTRY" | tr '\n' ',' | sed 's/,$//') - if [ -n "$OTHER_ENTRIES" ]; then - efibootmgr -o "$USB_ENTRY,$OTHER_ENTRIES" - else - efibootmgr -o "$USB_ENTRY" - fi - fi -fi +# Sign unsigned kernel modules for Secure Boot +SIGN_FILE="$(ls -1 /media/startos/next/usr/lib/linux-kbuild-*/scripts/sign-file 2>/dev/null | head -1)" +/media/startos/next/usr/lib/startos/scripts/sign-unsigned-modules \ + --source /media/startos/lower \ + --dest /media/startos/config/overlay \ + --sign-file "$SIGN_FILE" \ + --mok-key /media/startos/config/overlay/var/lib/dkms/mok.key \ + --mok-pub /media/startos/config/overlay/var/lib/dkms/mok.pub sync -umount -Rl /media/startos/next +umount -l /media/startos/next umount /media/startos/upper umount /media/startos/lower diff --git a/build/manage-release.sh b/build/manage-release.sh index c3b71717a..94387d2a6 100755 --- a/build/manage-release.sh +++ b/build/manage-release.sh @@ -198,20 +198,22 @@ cmd_sign() { enter_release_dir resolve_gh_user + mkdir -p signatures + for file in $(release_files); do - gpg -u $START9_GPG_KEY --detach-sign --armor -o "${file}.start9.asc" "$file" + gpg -u $START9_GPG_KEY --detach-sign --armor -o "signatures/${file}.start9.asc" "$file" if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then - gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "${file}.${GH_USER}.asc" "$file" + gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "signatures/${file}.${GH_USER}.asc" "$file" fi done - gpg --export -a $START9_GPG_KEY > start9.key.asc + gpg --export -a $START9_GPG_KEY > signatures/start9.key.asc if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then - gpg --export -a "$GH_GPG_KEY" > "${GH_USER}.key.asc" + gpg --export -a "$GH_GPG_KEY" > "signatures/${GH_USER}.key.asc" else >&2 echo 'Warning: could not determine GitHub user or GPG signing key, skipping personal signature' fi - tar -czvf signatures.tar.gz *.asc + tar -czvf signatures.tar.gz -C signatures . gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber } @@ -229,17 +231,18 @@ cmd_cosign() { echo "Downloading existing signatures..." gh release download -R $REPO "v$VERSION" -p "signatures.tar.gz" -D "$(pwd)" --clobber - tar -xzf signatures.tar.gz + mkdir -p signatures + tar -xzf signatures.tar.gz -C signatures echo "Adding personal signatures as $GH_USER..." for file in $(release_files); do - gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "${file}.${GH_USER}.asc" "$file" + gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "signatures/${file}.${GH_USER}.asc" "$file" done - gpg --export -a "$GH_GPG_KEY" > "${GH_USER}.key.asc" + gpg --export -a "$GH_GPG_KEY" > "signatures/${GH_USER}.key.asc" echo "Re-packing signatures..." - tar -czvf signatures.tar.gz *.asc + tar -czvf signatures.tar.gz -C signatures . gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber echo "Done. Personal signatures for $GH_USER added to v$VERSION." diff --git a/container-runtime/container-runtime.service b/container-runtime/container-runtime.service index ed9d142f7..f04150969 100644 --- a/container-runtime/container-runtime.service +++ b/container-runtime/container-runtime.service @@ -5,7 +5,7 @@ OnFailure=container-runtime-failure.service [Service] Type=simple Environment=RUST_LOG=startos=debug -ExecStart=/usr/bin/node --experimental-detect-module --trace-warnings --unhandled-rejections=warn /usr/lib/startos/init/index.js +ExecStart=/usr/bin/node --experimental-detect-module --trace-warnings /usr/lib/startos/init/index.js Restart=no [Install] diff --git a/container-runtime/package-lock.json b/container-runtime/package-lock.json index b690a6d74..ff2e560de 100644 --- a/container-runtime/package-lock.json +++ b/container-runtime/package-lock.json @@ -37,7 +37,7 @@ }, "../sdk/dist": { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.58", + "version": "0.4.0-beta.61", "license": "MIT", "dependencies": { "@iarna/toml": "^3.0.0", diff --git a/container-runtime/src/Adapters/EffectCreator.ts b/container-runtime/src/Adapters/EffectCreator.ts index c244347eb..22328bbb9 100644 --- a/container-runtime/src/Adapters/EffectCreator.ts +++ b/container-runtime/src/Adapters/EffectCreator.ts @@ -187,9 +187,10 @@ export function makeEffects(context: EffectContext): Effects { getServiceManifest( ...[options]: Parameters ) { - return rpcRound("get-service-manifest", options) as ReturnType< - T.Effects["getServiceManifest"] - > + return rpcRound("get-service-manifest", { + ...options, + callback: context.callbacks?.addCallback(options.callback) || null, + }) as ReturnType }, subcontainer: { createFs(options: { imageId: string; name: string }) { @@ -211,9 +212,10 @@ export function makeEffects(context: EffectContext): Effects { > }) as Effects["exportServiceInterface"], getContainerIp(...[options]: Parameters) { - return rpcRound("get-container-ip", options) as ReturnType< - T.Effects["getContainerIp"] - > + return rpcRound("get-container-ip", { + ...options, + callback: context.callbacks?.addCallback(options.callback) || null, + }) as ReturnType }, getOsIp(...[]: Parameters) { return rpcRound("get-os-ip", {}) as ReturnType @@ -244,9 +246,10 @@ export function makeEffects(context: EffectContext): Effects { > }, getSslCertificate(options: Parameters[0]) { - return rpcRound("get-ssl-certificate", options) as ReturnType< - T.Effects["getSslCertificate"] - > + return rpcRound("get-ssl-certificate", { + ...options, + callback: context.callbacks?.addCallback(options.callback) || null, + }) as ReturnType }, getSslKey(options: Parameters[0]) { return rpcRound("get-ssl-key", options) as ReturnType< @@ -308,7 +311,10 @@ export function makeEffects(context: EffectContext): Effects { }, getStatus(...[o]: Parameters) { - return rpcRound("get-status", o) as ReturnType + return rpcRound("get-status", { + ...o, + callback: context.callbacks?.addCallback(o.callback) || null, + }) as ReturnType }, /// DEPRECATED setMainStatus(o: { status: "running" | "stopped" }): Promise { diff --git a/container-runtime/src/Adapters/RpcListener.ts b/container-runtime/src/Adapters/RpcListener.ts index f9dd0fac2..4f51e5278 100644 --- a/container-runtime/src/Adapters/RpcListener.ts +++ b/container-runtime/src/Adapters/RpcListener.ts @@ -298,13 +298,10 @@ export class RpcListener { } case "stop": { const { id } = stopType.parse(input) + this.callbacks?.removeChild("main") return handleRpc( id, - this.system.stop().then((result) => { - this.callbacks?.removeChild("main") - - return { result } - }), + this.system.stop().then((result) => ({ result })), ) } case "exit": { diff --git a/container-runtime/src/Adapters/Systems/SystemForEmbassy/index.ts b/container-runtime/src/Adapters/Systems/SystemForEmbassy/index.ts index 15a97178d..10b1d7ddc 100644 --- a/container-runtime/src/Adapters/Systems/SystemForEmbassy/index.ts +++ b/container-runtime/src/Adapters/Systems/SystemForEmbassy/index.ts @@ -42,6 +42,74 @@ function todo(): never { throw new Error("Not implemented") } +function getStatus( + effects: Effects, + options: Omit[0], "callback"> = {}, +) { + async function* watch(abort?: AbortSignal) { + const resolveCell = { resolve: () => {} } + effects.onLeaveContext(() => { + resolveCell.resolve() + }) + abort?.addEventListener("abort", () => resolveCell.resolve()) + while (effects.isInContext && !abort?.aborted) { + let callback: () => void = () => {} + const waitForNext = new Promise((resolve) => { + callback = resolve + resolveCell.resolve = resolve + }) + yield await effects.getStatus({ ...options, callback }) + await waitForNext + } + } + return { + const: () => + effects.getStatus({ + ...options, + callback: + effects.constRetry && + (() => effects.constRetry && effects.constRetry()), + }), + once: () => effects.getStatus(options), + watch: (abort?: AbortSignal) => { + const ctrl = new AbortController() + abort?.addEventListener("abort", () => ctrl.abort()) + return watch(ctrl.signal) + }, + onChange: ( + callback: ( + value: T.StatusInfo | null, + error?: Error, + ) => { cancel: boolean } | Promise<{ cancel: boolean }>, + ) => { + ;(async () => { + const ctrl = new AbortController() + for await (const value of watch(ctrl.signal)) { + try { + const res = await callback(value) + if (res.cancel) { + ctrl.abort() + break + } + } catch (e) { + console.error( + "callback function threw an error @ getStatus.onChange", + e, + ) + } + } + })() + .catch((e) => callback(null, e as Error)) + .catch((e) => + console.error( + "callback function threw an error @ getStatus.onChange", + e, + ), + ) + }, + } +} + /** * Local type for procedure values from the manifest. * The manifest's zod schemas use ZodTypeAny casts that produce `unknown` in zod v4. @@ -1046,16 +1114,26 @@ export class SystemForEmbassy implements System { timeoutMs: number | null, ): Promise { // TODO: docker - await effects.mount({ - location: `/media/embassy/${id}`, - target: { - packageId: id, - volumeId: "embassy", - subpath: null, - readonly: true, - idmap: [], - }, - }) + const status = await getStatus(effects, { packageId: id }).const() + if (!status) return + try { + await effects.mount({ + location: `/media/embassy/${id}`, + target: { + packageId: id, + volumeId: "embassy", + subpath: null, + readonly: true, + idmap: [], + }, + }) + } catch (e) { + console.error( + `Failed to mount dependency volume for ${id}, skipping autoconfig:`, + e, + ) + return + } configFile .withPath(`/media/embassy/${id}/config.json`) .read() @@ -1204,6 +1282,11 @@ async function updateConfig( if (specValue.target === "config") { const jp = require("jsonpath") const depId = specValue["package-id"] + const depStatus = await getStatus(effects, { packageId: depId }).const() + if (!depStatus) { + mutConfigValue[key] = null + continue + } await effects.mount({ location: `/media/embassy/${depId}`, target: { diff --git a/container-runtime/src/Adapters/Systems/SystemForEmbassy/matchManifest.ts b/container-runtime/src/Adapters/Systems/SystemForEmbassy/matchManifest.ts index f3fe101eb..d3a309d18 100644 --- a/container-runtime/src/Adapters/Systems/SystemForEmbassy/matchManifest.ts +++ b/container-runtime/src/Adapters/Systems/SystemForEmbassy/matchManifest.ts @@ -10,6 +10,11 @@ const matchJsProcedure = z.object({ const matchProcedure = z.union([matchDockerProcedure, matchJsProcedure]) export type Procedure = z.infer +const healthCheckFields = { + name: z.string(), + "success-message": z.string().nullable().optional(), +} + const matchAction = z.object({ name: z.string(), description: z.string(), @@ -32,13 +37,10 @@ export const matchManifest = z.object({ .optional(), "health-checks": z.record( z.string(), - z.intersection( - matchProcedure, - z.object({ - name: z.string(), - "success-message": z.string().nullable().optional(), - }), - ), + z.union([ + matchDockerProcedure.extend(healthCheckFields), + matchJsProcedure.extend(healthCheckFields), + ]), ), config: z .object({ diff --git a/container-runtime/src/Adapters/Systems/SystemForStartOs.ts b/container-runtime/src/Adapters/Systems/SystemForStartOs.ts index 3b0d767ed..adf76aa44 100644 --- a/container-runtime/src/Adapters/Systems/SystemForStartOs.ts +++ b/container-runtime/src/Adapters/Systems/SystemForStartOs.ts @@ -71,7 +71,7 @@ export class SystemForStartOs implements System { this.starting = true effects.constRetry = utils.once(() => { console.debug(".const() triggered") - effects.restart() + if (effects.isInContext) effects.restart() }) let mainOnTerm: () => Promise | undefined const daemons = await ( diff --git a/core/CLAUDE.md b/core/CLAUDE.md index 883e68991..b0fd6e7ce 100644 --- a/core/CLAUDE.md +++ b/core/CLAUDE.md @@ -22,7 +22,7 @@ cd sdk && make baseDist dist # Rebuild SDK after ts-bindings - Always run `cargo check -p start-os` after modifying Rust code - When adding RPC endpoints, follow the patterns in [rpc-toolkit.md](rpc-toolkit.md) - When modifying `#[ts(export)]` types, regenerate bindings and rebuild the SDK (see [ARCHITECTURE.md](../ARCHITECTURE.md#build-pipeline)) -- When adding i18n keys, add all 5 locales in `core/locales/i18n.yaml` (see [i18n-patterns.md](i18n-patterns.md)) +- **i18n is mandatory** — any user-facing string must go in `core/locales/i18n.yaml` with all 5 locales (`en_US`, `de_DE`, `es_ES`, `fr_FR`, `pl_PL`). This includes CLI subcommand descriptions (`about.`), CLI arg help (`help.arg.`), error messages (`error.`), notifications, setup messages, and any other text shown to users. Entries are alphabetically ordered within their section. See [i18n-patterns.md](i18n-patterns.md) - When using DB watches, follow the `TypedDbWatch` patterns in [patchdb.md](patchdb.md) - **Always use `.invoke(ErrorKind::...)` instead of `.status()` when running CLI commands** via `tokio::process::Command`. The `Invoke` trait (from `crate::util::Invoke`) captures stdout/stderr and checks exit codes properly. Using `.status()` leaks stderr directly to system logs, creating noise. For check-then-act patterns (e.g. `iptables -C`), use `.invoke(...).await.is_ok()` / `.is_err()` instead of `.status().await.map_or(false, |s| s.success())`. - Always use file utils in util::io instead of tokio::fs when available diff --git a/core/Cargo.lock b/core/Cargo.lock index 739b886cf..8bb465168 100644 --- a/core/Cargo.lock +++ b/core/Cargo.lock @@ -3376,6 +3376,15 @@ dependencies = [ "serde_json", ] +[[package]] +name = "keccak" +version = "0.1.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653" +dependencies = [ + "cpufeatures", +] + [[package]] name = "kv" version = "0.24.0" @@ -4355,7 +4364,7 @@ dependencies = [ "nix 0.30.1", "patch-db-macro", "serde", - "serde_cbor 0.11.1", + "serde_cbor_2", "thiserror 2.0.18", "tokio", "tracing", @@ -5377,7 +5386,7 @@ dependencies = [ "pin-project", "reqwest", "serde", - "serde_cbor 0.11.2", + "serde_cbor", "serde_json", "thiserror 2.0.18", "tokio", @@ -5785,19 +5794,20 @@ dependencies = [ [[package]] name = "serde_cbor" -version = "0.11.1" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" dependencies = [ "half 1.8.3", "serde", ] [[package]] -name = "serde_cbor" -version = "0.11.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5" +name = "serde_cbor_2" +version = "0.13.0" +source = "git+https://github.com/dr-bonez/cbor.git#2ce7fe5a5ca5700aa095668b5ba67154b7f213a4" dependencies = [ - "half 1.8.3", + "half 2.7.1", "serde", ] @@ -5984,6 +5994,16 @@ dependencies = [ "digest 0.10.7", ] +[[package]] +name = "sha3" +version = "0.10.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60" +dependencies = [ + "digest 0.10.7", + "keccak", +] + [[package]] name = "sharded-slab" version = "0.1.7" @@ -6414,7 +6434,7 @@ dependencies = [ [[package]] name = "start-os" -version = "0.4.0-alpha.20" +version = "0.4.0-alpha.21" dependencies = [ "aes", "async-acme", @@ -6518,6 +6538,7 @@ dependencies = [ "serde_yml", "sha-crypt", "sha2 0.10.9", + "sha3", "signal-hook", "socket2 0.6.2", "socks5-impl", diff --git a/core/Cargo.toml b/core/Cargo.toml index 9937dfaa1..581ea58e2 100644 --- a/core/Cargo.toml +++ b/core/Cargo.toml @@ -15,7 +15,7 @@ license = "MIT" name = "start-os" readme = "README.md" repository = "https://github.com/Start9Labs/start-os" -version = "0.4.0-alpha.20" # VERSION_BUMP +version = "0.4.0-alpha.21" # VERSION_BUMP [lib] name = "startos" @@ -170,9 +170,7 @@ once_cell = "1.19.0" openssh-keys = "0.6.2" openssl = { version = "0.10.57", features = ["vendored"] } p256 = { version = "0.13.2", features = ["pem"] } -patch-db = { version = "*", path = "../patch-db/patch-db", features = [ - "trace", -] } +patch-db = { version = "*", path = "../patch-db/core", features = ["trace"] } pbkdf2 = "0.12.2" pin-project = "1.1.3" pkcs8 = { version = "0.10.2", features = ["std"] } @@ -202,6 +200,7 @@ serde_toml = { package = "toml", version = "0.9.9+spec-1.0.0" } serde_yaml = { package = "serde_yml", version = "0.0.12" } sha-crypt = "0.5.0" sha2 = "0.10.2" +sha3 = "0.10" signal-hook = "0.3.17" socket2 = { version = "0.6.0", features = ["all"] } socks5-impl = { version = "0.7.2", features = ["client", "server"] } diff --git a/core/build/build-cli.sh b/core/build/build-cli.sh index d809a189f..889c5a766 100755 --- a/core/build/build-cli.sh +++ b/core/build/build-cli.sh @@ -67,6 +67,10 @@ if [[ "${ENVIRONMENT:-}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT:-}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-cli --target=$TARGET diff --git a/core/build/build-registrybox.sh b/core/build/build-registrybox.sh index 263a3ae6d..1d70895e3 100755 --- a/core/build/build-registrybox.sh +++ b/core/build/build-registrybox.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/build/build-start-container.sh b/core/build/build-start-container.sh index d5a56549e..12f47063b 100755 --- a/core/build/build-start-container.sh +++ b/core/build/build-start-container.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-container --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/build/build-startbox.sh b/core/build/build-startbox.sh index 5a6df1771..86907e2db 100755 --- a/core/build/build-startbox.sh +++ b/core/build/build-startbox.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/build/build-tunnelbox.sh b/core/build/build-tunnelbox.sh index 181af3644..1326a2422 100755 --- a/core/build/build-tunnelbox.sh +++ b/core/build/build-tunnelbox.sh @@ -38,6 +38,10 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi +if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then + RUSTFLAGS="$RUSTFLAGS -C debuginfo=1" +fi + echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl diff --git a/core/locales/i18n.yaml b/core/locales/i18n.yaml index 45a75fdc9..94d1225a2 100644 --- a/core/locales/i18n.yaml +++ b/core/locales/i18n.yaml @@ -857,6 +857,13 @@ error.set-sys-info: fr_FR: "Erreur de Définition des Infos Système" pl_PL: "Błąd Ustawiania Informacji o Systemie" +error.bios: + en_US: "BIOS/UEFI Error" + de_DE: "BIOS/UEFI-Fehler" + es_ES: "Error de BIOS/UEFI" + fr_FR: "Erreur BIOS/UEFI" + pl_PL: "Błąd BIOS/UEFI" + # disk/main.rs disk.main.disk-not-found: en_US: "StartOS disk not found." @@ -1248,6 +1255,13 @@ backup.bulk.leaked-reference: fr_FR: "référence fuitée vers BackupMountGuard" pl_PL: "wyciekła referencja do BackupMountGuard" +backup.bulk.service-not-ready: + en_US: "Cannot create a backup of a service that is still initializing or in an error state" + de_DE: "Es kann keine Sicherung eines Dienstes erstellt werden, der noch initialisiert wird oder sich im Fehlerzustand befindet" + es_ES: "No se puede crear una copia de seguridad de un servicio que aún se está inicializando o está en estado de error" + fr_FR: "Impossible de créer une sauvegarde d'un service encore en cours d'initialisation ou en état d'erreur" + pl_PL: "Nie można utworzyć kopii zapasowej usługi, która jest jeszcze inicjalizowana lub znajduje się w stanie błędu" + # backup/restore.rs backup.restore.package-error: en_US: "Error restoring package %{id}: %{error}" @@ -1372,6 +1386,21 @@ net.tor.client-error: fr_FR: "Erreur du client Tor : %{error}" pl_PL: "Błąd klienta Tor: %{error}" +# net/tunnel.rs +net.tunnel.timeout-waiting-for-add: + en_US: "timed out waiting for gateway %{gateway} to appear in database" + de_DE: "Zeitüberschreitung beim Warten auf das Erscheinen von Gateway %{gateway} in der Datenbank" + es_ES: "se agotó el tiempo esperando que la puerta de enlace %{gateway} aparezca en la base de datos" + fr_FR: "délai d'attente dépassé pour l'apparition de la passerelle %{gateway} dans la base de données" + pl_PL: "upłynął limit czasu oczekiwania na pojawienie się bramy %{gateway} w bazie danych" + +net.tunnel.timeout-waiting-for-remove: + en_US: "timed out waiting for gateway %{gateway} to be removed from database" + de_DE: "Zeitüberschreitung beim Warten auf das Entfernen von Gateway %{gateway} aus der Datenbank" + es_ES: "se agotó el tiempo esperando que la puerta de enlace %{gateway} sea eliminada de la base de datos" + fr_FR: "délai d'attente dépassé pour la suppression de la passerelle %{gateway} de la base de données" + pl_PL: "upłynął limit czasu oczekiwania na usunięcie bramy %{gateway} z bazy danych" + # net/wifi.rs net.wifi.ssid-no-special-characters: en_US: "SSID may not have special characters" @@ -1585,6 +1614,13 @@ net.gateway.cannot-delete-without-connection: fr_FR: "Impossible de supprimer l'appareil sans connexion active" pl_PL: "Nie można usunąć urządzenia bez aktywnego połączenia" +net.gateway.no-configured-echoip-urls: + en_US: "No configured echoip URLs" + de_DE: "Keine konfigurierten EchoIP-URLs" + es_ES: "No hay URLs de echoip configuradas" + fr_FR: "Aucune URL echoip configurée" + pl_PL: "Brak skonfigurowanych adresów URL echoip" + # net/dns.rs net.dns.timeout-updating-catalog: en_US: "timed out waiting to update dns catalog" @@ -2746,6 +2782,13 @@ help.arg.download-directory: fr_FR: "Chemin du répertoire de téléchargement" pl_PL: "Ścieżka katalogu do pobrania" +help.arg.echoip-urls: + en_US: "Echo IP service URLs for external IP detection" + de_DE: "Echo-IP-Dienst-URLs zur externen IP-Erkennung" + es_ES: "URLs del servicio Echo IP para detección de IP externa" + fr_FR: "URLs du service Echo IP pour la détection d'IP externe" + pl_PL: "Adresy URL usługi Echo IP do wykrywania zewnętrznego IP" + help.arg.emulate-missing-arch: en_US: "Emulate missing architecture using this one" de_DE: "Fehlende Architektur mit dieser emulieren" @@ -2914,6 +2957,13 @@ help.arg.log-limit: fr_FR: "Nombre maximum d'entrées de journal" pl_PL: "Maksymalna liczba wpisów logu" +help.arg.merge: + en_US: "Merge with existing version range instead of replacing" + de_DE: "Mit vorhandenem Versionsbereich zusammenführen statt ersetzen" + es_ES: "Combinar con el rango de versiones existente en lugar de reemplazar" + fr_FR: "Fusionner avec la plage de versions existante au lieu de remplacer" + pl_PL: "Połącz z istniejącym zakresem wersji zamiast zastępować" + help.arg.mirror-url: en_US: "URL of the mirror" de_DE: "URL des Spiegels" @@ -5204,12 +5254,12 @@ about.reset-user-interface-password: fr_FR: "Réinitialiser le mot de passe de l'interface utilisateur" pl_PL: "Zresetuj hasło interfejsu użytkownika" -about.reset-webserver: - en_US: "Reset the webserver" - de_DE: "Den Webserver zurücksetzen" - es_ES: "Restablecer el servidor web" - fr_FR: "Réinitialiser le serveur web" - pl_PL: "Zresetuj serwer internetowy" +about.uninitialize-webserver: + en_US: "Uninitialize the webserver" + de_DE: "Den Webserver deinitialisieren" + es_ES: "Desinicializar el servidor web" + fr_FR: "Désinitialiser le serveur web" + pl_PL: "Zdezinicjalizuj serwer internetowy" about.restart-server: en_US: "Restart the server" @@ -5246,6 +5296,13 @@ about.set-country: fr_FR: "Définir le pays" pl_PL: "Ustaw kraj" +about.set-echoip-urls: + en_US: "Set the Echo IP service URLs" + de_DE: "Die Echo-IP-Dienst-URLs festlegen" + es_ES: "Establecer las URLs del servicio Echo IP" + fr_FR: "Définir les URLs du service Echo IP" + pl_PL: "Ustaw adresy URL usługi Echo IP" + about.set-hostname: en_US: "Set the server hostname" de_DE: "Den Server-Hostnamen festlegen" diff --git a/core/src/backup/backup_bulk.rs b/core/src/backup/backup_bulk.rs index 722498f3c..cb70a77a1 100644 --- a/core/src/backup/backup_bulk.rs +++ b/core/src/backup/backup_bulk.rs @@ -300,6 +300,15 @@ async fn perform_backup( error: backup_result, }, ); + } else { + backup_report.insert( + id.clone(), + PackageBackupReport { + error: Some( + t!("backup.bulk.service-not-ready").to_string(), + ), + }, + ); } } @@ -323,9 +332,7 @@ async fn perform_backup( os_backup_file.save().await?; let luks_folder_old = backup_guard.path().join("luks.old"); - if tokio::fs::metadata(&luks_folder_old).await.is_ok() { - tokio::fs::remove_dir_all(&luks_folder_old).await?; - } + crate::util::io::delete_dir(&luks_folder_old).await?; let luks_folder_bak = backup_guard.path().join("luks"); if tokio::fs::metadata(&luks_folder_bak).await.is_ok() { tokio::fs::rename(&luks_folder_bak, &luks_folder_old).await?; diff --git a/core/src/backup/restore.rs b/core/src/backup/restore.rs index bc96d8823..77e7181f5 100644 --- a/core/src/backup/restore.rs +++ b/core/src/backup/restore.rs @@ -10,6 +10,7 @@ use tracing::instrument; use ts_rs::TS; use super::target::BackupTargetId; +use crate::PackageId; use crate::backup::os::OsBackup; use crate::context::setup::SetupResult; use crate::context::{RpcContext, SetupContext}; @@ -26,7 +27,6 @@ use crate::service::service_map::DownloadInstallFuture; use crate::setup::SetupExecuteProgress; use crate::system::{save_language, sync_kiosk}; use crate::util::serde::{IoFormat, Pem}; -use crate::{PLATFORM, PackageId}; #[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] @@ -90,7 +90,7 @@ pub async fn recover_full_server( recovery_source: TmpMountGuard, server_id: &str, recovery_password: &str, - kiosk: Option, + kiosk: bool, hostname: Option, SetupExecuteProgress { init_phases, @@ -123,7 +123,6 @@ pub async fn recover_full_server( os_backup.account.hostname = h; } - let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi"); sync_kiosk(kiosk).await?; let language = ctx.language.peek(|a| a.clone()); diff --git a/core/src/bins/container_cli.rs b/core/src/bins/container_cli.rs index a03204107..0f5c65226 100644 --- a/core/src/bins/container_cli.rs +++ b/core/src/bins/container_cli.rs @@ -7,10 +7,6 @@ use crate::service::cli::{ContainerCliContext, ContainerClientConfig}; use crate::util::logger::LOGGER; use crate::version::{Current, VersionT}; -lazy_static::lazy_static! { - static ref VERSION_STRING: String = Current::default().semver().to_string(); -} - pub fn main(args: impl IntoIterator) { LOGGER.enable(); if let Err(e) = CliApp::new( @@ -18,6 +14,10 @@ pub fn main(args: impl IntoIterator) { crate::service::effects::handler(), ) .mutate_command(super::translate_cli) + .mutate_command(|cmd| { + cmd.name("start-container") + .version(Current::default().semver().to_string()) + }) .run(args) { match e.data { diff --git a/core/src/bins/mod.rs b/core/src/bins/mod.rs index 2b1959db7..f790bf5c1 100644 --- a/core/src/bins/mod.rs +++ b/core/src/bins/mod.rs @@ -149,6 +149,11 @@ impl MultiExecutable { } pub fn execute(&self) { + #[cfg(feature = "backtrace-on-stack-overflow")] + unsafe { + backtrace_on_stack_overflow::enable() + }; + set_locale_from_env(); let mut popped = Vec::with_capacity(2); diff --git a/core/src/bins/registry.rs b/core/src/bins/registry.rs index 13d0c54c2..49892247c 100644 --- a/core/src/bins/registry.rs +++ b/core/src/bins/registry.rs @@ -8,6 +8,7 @@ use tokio::signal::unix::signal; use tracing::instrument; use crate::context::CliContext; +use crate::version::{Current, VersionT}; use crate::context::config::ClientConfig; use crate::net::web_server::{Acceptor, WebServer}; use crate::prelude::*; @@ -101,6 +102,10 @@ pub fn cli(args: impl IntoIterator) { crate::registry::registry_api(), ) .mutate_command(super::translate_cli) + .mutate_command(|cmd| { + cmd.name("start-registry") + .version(Current::default().semver().to_string()) + }) .run(args) { match e.data { diff --git a/core/src/bins/start_cli.rs b/core/src/bins/start_cli.rs index e1d737be4..85847f110 100644 --- a/core/src/bins/start_cli.rs +++ b/core/src/bins/start_cli.rs @@ -8,10 +8,6 @@ use crate::context::config::ClientConfig; use crate::util::logger::LOGGER; use crate::version::{Current, VersionT}; -lazy_static::lazy_static! { - static ref VERSION_STRING: String = Current::default().semver().to_string(); -} - pub fn main(args: impl IntoIterator) { LOGGER.enable(); @@ -20,6 +16,10 @@ pub fn main(args: impl IntoIterator) { crate::main_api(), ) .mutate_command(super::translate_cli) + .mutate_command(|cmd| { + cmd.name("start-cli") + .version(Current::default().semver().to_string()) + }) .run(args) { match e.data { diff --git a/core/src/bins/startd.rs b/core/src/bins/startd.rs index 314d3dc7a..487170b82 100644 --- a/core/src/bins/startd.rs +++ b/core/src/bins/startd.rs @@ -190,7 +190,7 @@ pub fn main(args: impl IntoIterator) { } } }); - rt.shutdown_timeout(Duration::from_secs(60)); + rt.shutdown_timeout(Duration::from_millis(100)); res }; diff --git a/core/src/bins/tunnel.rs b/core/src/bins/tunnel.rs index 07db8f671..3c72e556a 100644 --- a/core/src/bins/tunnel.rs +++ b/core/src/bins/tunnel.rs @@ -13,6 +13,7 @@ use tracing::instrument; use visit_rs::Visit; use crate::context::CliContext; +use crate::version::{Current, VersionT}; use crate::context::config::ClientConfig; use crate::net::tls::TlsListener; use crate::net::web_server::{Accept, Acceptor, MetadataVisitor, WebServer}; @@ -186,6 +187,10 @@ pub fn cli(args: impl IntoIterator) { crate::tunnel::api::tunnel_api(), ) .mutate_command(super::translate_cli) + .mutate_command(|cmd| { + cmd.name("start-tunnel") + .version(Current::default().semver().to_string()) + }) .run(args) { match e.data { diff --git a/core/src/context/config.rs b/core/src/context/config.rs index fb76f96ce..4e1dd4827 100644 --- a/core/src/context/config.rs +++ b/core/src/context/config.rs @@ -9,7 +9,6 @@ use serde::de::DeserializeOwned; use serde::{Deserialize, Serialize}; use crate::MAIN_DATA; -use crate::disk::OsPartitionInfo; use crate::prelude::*; use crate::util::serde::IoFormat; use crate::version::VersionT; @@ -120,8 +119,6 @@ impl ClientConfig { pub struct ServerConfig { #[arg(short, long, help = "help.arg.config-file-path")] pub config: Option, - #[arg(skip)] - pub os_partitions: Option, #[arg(long, help = "help.arg.socks-listen-address")] pub socks_listen: Option, #[arg(long, help = "help.arg.revision-cache-size")] @@ -138,7 +135,6 @@ impl ContextConfig for ServerConfig { self.config.take() } fn merge_with(&mut self, other: Self) { - self.os_partitions = self.os_partitions.take().or(other.os_partitions); self.socks_listen = self.socks_listen.take().or(other.socks_listen); self.revision_cache_size = self .revision_cache_size diff --git a/core/src/context/diagnostic.rs b/core/src/context/diagnostic.rs index c069d017f..bf27da071 100644 --- a/core/src/context/diagnostic.rs +++ b/core/src/context/diagnostic.rs @@ -39,7 +39,7 @@ impl DiagnosticContext { shutdown, disk_guid, error: Arc::new(error.into()), - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), }))) } } diff --git a/core/src/context/init.rs b/core/src/context/init.rs index b7d5eac6a..5f6c35222 100644 --- a/core/src/context/init.rs +++ b/core/src/context/init.rs @@ -32,7 +32,7 @@ impl InitContext { error: watch::channel(None).0, progress, shutdown, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), }))) } } diff --git a/core/src/context/rpc.rs b/core/src/context/rpc.rs index f1fb6343d..204b000b5 100644 --- a/core/src/context/rpc.rs +++ b/core/src/context/rpc.rs @@ -62,8 +62,8 @@ pub struct RpcContextSeed { pub db: TypedPatchDb, pub sync_db: watch::Sender, pub account: SyncRwLock, - pub net_controller: Arc, pub os_net_service: NetService, + pub net_controller: Arc, pub s9pk_arch: Option<&'static str>, pub services: ServiceMap, pub cancellable_installs: SyncMutex>>, @@ -327,12 +327,7 @@ impl RpcContext { let seed = Arc::new(RpcContextSeed { is_closed: AtomicBool::new(false), - os_partitions: config.os_partitions.clone().ok_or_else(|| { - Error::new( - eyre!("{}", t!("context.rpc.os-partition-info-missing")), - ErrorKind::Filesystem, - ) - })?, + os_partitions: OsPartitionInfo::from_fstab().await?, wifi_interface: wifi_interface.clone(), ethernet_interface: find_eth_iface().await?, disk_guid, @@ -351,10 +346,10 @@ impl RpcContext { services, cancellable_installs: SyncMutex::new(BTreeMap::new()), metrics_cache, + rpc_continuations: RpcContinuations::new(Some(shutdown.clone())), shutdown, lxc_manager: Arc::new(LxcManager::new()), open_authed_continuations: OpenAuthedContinuations::new(), - rpc_continuations: RpcContinuations::new(), wifi_manager: Arc::new(RwLock::new(wifi_interface.clone().map(|i| WpaCli::init(i)))), current_secret: Arc::new( Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).map_err(|e| { diff --git a/core/src/context/setup.rs b/core/src/context/setup.rs index d4d0bb9de..3d16624ef 100644 --- a/core/src/context/setup.rs +++ b/core/src/context/setup.rs @@ -85,7 +85,7 @@ impl SetupContext { result: OnceCell::new(), disk_guid: OnceCell::new(), shutdown, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), install_rootfs: SyncMutex::new(None), language: SyncMutex::new(None), keyboard: SyncMutex::new(None), diff --git a/core/src/db/model/mod.rs b/core/src/db/model/mod.rs index 05fc8502d..1cdc7f9bf 100644 --- a/core/src/db/model/mod.rs +++ b/core/src/db/model/mod.rs @@ -31,7 +31,7 @@ pub struct Database { impl Database { pub fn init( account: &AccountInfo, - kiosk: Option, + kiosk: bool, language: Option, keyboard: Option, ) -> Result { diff --git a/core/src/db/model/public.rs b/core/src/db/model/public.rs index 30ee515fd..9477dac82 100644 --- a/core/src/db/model/public.rs +++ b/core/src/db/model/public.rs @@ -49,7 +49,7 @@ pub struct Public { impl Public { pub fn init( account: &AccountInfo, - kiosk: Option, + kiosk: bool, language: Option, keyboard: Option, ) -> Result { @@ -146,10 +146,10 @@ impl Public { zram: true, governor: None, smtp: None, - ifconfig_url: default_ifconfig_url(), + echoip_urls: default_echoip_urls(), ram: 0, devices: Vec::new(), - kiosk, + kiosk: Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"), language, keyboard, }, @@ -168,8 +168,11 @@ fn get_platform() -> InternedString { (&*PLATFORM).into() } -pub fn default_ifconfig_url() -> Url { - "https://ifconfig.co".parse().unwrap() +pub fn default_echoip_urls() -> Vec { + vec![ + "https://ipconfig.io".parse().unwrap(), + "https://ifconfig.co".parse().unwrap(), + ] } #[derive(Debug, Deserialize, Serialize, HasModel, TS)] @@ -206,9 +209,9 @@ pub struct ServerInfo { pub zram: bool, pub governor: Option, pub smtp: Option, - #[serde(default = "default_ifconfig_url")] - #[ts(type = "string")] - pub ifconfig_url: Url, + #[serde(default = "default_echoip_urls")] + #[ts(type = "string[]")] + pub echoip_urls: Vec, #[ts(type = "number")] pub ram: u64, pub devices: Vec, diff --git a/core/src/disk/fsck/mod.rs b/core/src/disk/fsck/mod.rs index 1c6949138..f2d210d46 100644 --- a/core/src/disk/fsck/mod.rs +++ b/core/src/disk/fsck/mod.rs @@ -25,20 +25,28 @@ pub enum RepairStrategy { Preen, Aggressive, } +/// Detects the filesystem type of a block device using `grub-probe`. +/// Returns e.g. `"ext2"` (for ext4), `"btrfs"`, etc. +pub async fn detect_filesystem( + logicalname: impl AsRef + std::fmt::Debug, +) -> Result { + Ok(String::from_utf8( + Command::new("grub-probe") + .arg("-d") + .arg(logicalname.as_ref()) + .invoke(crate::ErrorKind::DiskManagement) + .await?, + )? + .trim() + .to_owned()) +} + impl RepairStrategy { pub async fn fsck( &self, logicalname: impl AsRef + std::fmt::Debug, ) -> Result { - match &*String::from_utf8( - Command::new("grub-probe") - .arg("-d") - .arg(logicalname.as_ref()) - .invoke(crate::ErrorKind::DiskManagement) - .await?, - )? - .trim() - { + match &*detect_filesystem(&logicalname).await? { "ext2" => self.e2fsck(logicalname).await, "btrfs" => self.btrfs_check(logicalname).await, fs => { diff --git a/core/src/disk/main.rs b/core/src/disk/main.rs index 349cb045e..da0007caf 100644 --- a/core/src/disk/main.rs +++ b/core/src/disk/main.rs @@ -7,7 +7,7 @@ use rust_i18n::t; use tokio::process::Command; use tracing::instrument; -use super::fsck::{RepairStrategy, RequiresReboot}; +use super::fsck::{RepairStrategy, RequiresReboot, detect_filesystem}; use super::util::pvscan; use crate::disk::mount::filesystem::block_dev::BlockDev; use crate::disk::mount::filesystem::{FileSystem, ReadWrite}; @@ -301,6 +301,37 @@ pub async fn mount_fs>( .with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?; blockdev_path = Path::new("/dev/mapper").join(&full_name); } + + // Convert ext4 → btrfs on the package-data partition if needed + let fs_type = detect_filesystem(&blockdev_path).await?; + if fs_type == "ext2" { + tracing::info!("Running e2fsck before converting {name} from ext4 to btrfs"); + Command::new("e2fsck") + .arg("-fy") + .arg(&blockdev_path) + .invoke(ErrorKind::DiskManagement) + .await?; + tracing::info!("Converting {name} from ext4 to btrfs"); + Command::new("btrfs-convert") + .arg("--no-progress") + .arg(&blockdev_path) + .invoke(ErrorKind::DiskManagement) + .await?; + // Defragment after conversion for optimal performance + let tmp_mount = datadir.as_ref().join(format!("{name}.convert-tmp")); + tokio::fs::create_dir_all(&tmp_mount).await?; + BlockDev::new(&blockdev_path) + .mount(&tmp_mount, ReadWrite) + .await?; + Command::new("btrfs") + .args(["filesystem", "defragment", "-r"]) + .arg(&tmp_mount) + .invoke(ErrorKind::DiskManagement) + .await?; + unmount(&tmp_mount, false).await?; + tokio::fs::remove_dir(&tmp_mount).await?; + } + let reboot = repair.fsck(&blockdev_path).await?; if !guid.ends_with("_UNENC") { @@ -342,3 +373,99 @@ pub async fn mount_all_fs>( reboot |= mount_fs(guid, &datadir, "package-data", repair, password).await?; Ok(reboot) } + +/// Temporarily activates a VG and opens LUKS to probe the `package-data` +/// filesystem type. Returns `None` if probing fails (e.g. LV doesn't exist). +#[instrument(skip_all)] +pub async fn probe_package_data_fs(guid: &str) -> Result, Error> { + // Import and activate the VG + match Command::new("vgimport") + .arg(guid) + .invoke(ErrorKind::DiskManagement) + .await + { + Ok(_) => {} + Err(e) + if format!("{}", e.source) + .lines() + .any(|l| l.trim() == format!("Volume group \"{}\" is not exported", guid)) => + { + // Already imported, that's fine + } + Err(e) => { + tracing::warn!("Could not import VG {guid} for filesystem probe: {e}"); + return Ok(None); + } + } + if let Err(e) = Command::new("vgchange") + .arg("-ay") + .arg(guid) + .invoke(ErrorKind::DiskManagement) + .await + { + tracing::warn!("Could not activate VG {guid} for filesystem probe: {e}"); + return Ok(None); + } + + let mut opened_luks = false; + let result = async { + let lv_path = Path::new("/dev").join(guid).join("package-data"); + if tokio::fs::metadata(&lv_path).await.is_err() { + return Ok(None); + } + + let blockdev_path = if !guid.ends_with("_UNENC") { + let full_name = format!("{guid}_package-data"); + let password = DEFAULT_PASSWORD; + if let Some(parent) = Path::new(PASSWORD_PATH).parent() { + tokio::fs::create_dir_all(parent).await?; + } + tokio::fs::write(PASSWORD_PATH, password) + .await + .with_ctx(|_| (ErrorKind::Filesystem, PASSWORD_PATH))?; + Command::new("cryptsetup") + .arg("-q") + .arg("luksOpen") + .arg("--allow-discards") + .arg(format!("--key-file={PASSWORD_PATH}")) + .arg(format!("--keyfile-size={}", password.len())) + .arg(&lv_path) + .arg(&full_name) + .invoke(ErrorKind::DiskManagement) + .await?; + let _ = tokio::fs::remove_file(PASSWORD_PATH).await; + opened_luks = true; + PathBuf::from(format!("/dev/mapper/{full_name}")) + } else { + lv_path.clone() + }; + + detect_filesystem(&blockdev_path).await.map(Some) + } + .await; + + // Always clean up: close LUKS, deactivate VG, export VG + if opened_luks { + let full_name = format!("{guid}_package-data"); + Command::new("cryptsetup") + .arg("-q") + .arg("luksClose") + .arg(&full_name) + .invoke(ErrorKind::DiskManagement) + .await + .log_err(); + } + Command::new("vgchange") + .arg("-an") + .arg(guid) + .invoke(ErrorKind::DiskManagement) + .await + .log_err(); + Command::new("vgexport") + .arg(guid) + .invoke(ErrorKind::DiskManagement) + .await + .log_err(); + + result +} diff --git a/core/src/disk/mod.rs b/core/src/disk/mod.rs index aea0ad9a3..ed312aede 100644 --- a/core/src/disk/mod.rs +++ b/core/src/disk/mod.rs @@ -1,13 +1,17 @@ +use std::collections::BTreeMap; use std::path::{Path, PathBuf}; use itertools::Itertools; use lazy_format::lazy_format; use rpc_toolkit::{CallRemoteHandler, Context, Empty, HandlerExt, ParentHandler, from_fn_async}; use serde::{Deserialize, Serialize}; +use tokio::process::Command; -use crate::Error; +use crate::{Error, ErrorKind}; use crate::context::{CliContext, RpcContext}; use crate::disk::util::DiskInfo; +use crate::prelude::*; +use crate::util::Invoke; use crate::util::serde::{HandlerExtSerde, WithIoFormat, display_serializable}; pub mod fsck; @@ -21,27 +25,143 @@ pub const REPAIR_DISK_PATH: &str = "/media/startos/config/repair-disk"; #[derive(Clone, Debug, Default, Deserialize, Serialize)] #[serde(rename_all = "camelCase")] pub struct OsPartitionInfo { - pub efi: Option, pub bios: Option, pub boot: PathBuf, pub root: PathBuf, - #[serde(skip)] // internal use only + #[serde(default)] + pub extra_boot: BTreeMap, + #[serde(skip)] pub data: Option, } impl OsPartitionInfo { pub fn contains(&self, logicalname: impl AsRef) -> bool { - self.efi - .as_ref() - .map(|p| p == logicalname.as_ref()) - .unwrap_or(false) - || self - .bios - .as_ref() - .map(|p| p == logicalname.as_ref()) - .unwrap_or(false) - || &*self.boot == logicalname.as_ref() - || &*self.root == logicalname.as_ref() + let p = logicalname.as_ref(); + self.bios.as_deref() == Some(p) + || p == &*self.boot + || p == &*self.root + || self.extra_boot.values().any(|v| v == p) } + + /// Build partition info by parsing /etc/fstab and resolving device specs, + /// then discovering the BIOS boot partition (which is never mounted). + pub async fn from_fstab() -> Result { + let fstab = tokio::fs::read_to_string("/etc/fstab") + .await + .with_ctx(|_| (ErrorKind::Filesystem, "/etc/fstab"))?; + + let mut boot = None; + let mut root = None; + let mut extra_boot = BTreeMap::new(); + + for line in fstab.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with('#') { + continue; + } + let mut fields = line.split_whitespace(); + let Some(source) = fields.next() else { + continue; + }; + let Some(target) = fields.next() else { + continue; + }; + + let dev = match resolve_fstab_source(source).await { + Ok(d) => d, + Err(e) => { + tracing::warn!("Failed to resolve fstab source {source}: {e}"); + continue; + } + }; + + match target { + "/" => root = Some(dev), + "/boot" => boot = Some(dev), + t if t.starts_with("/boot/") => { + if let Some(name) = t.strip_prefix("/boot/") { + extra_boot.insert(name.to_string(), dev); + } + } + _ => {} + } + } + + let boot = boot.unwrap_or_default(); + let bios = if !boot.as_os_str().is_empty() { + find_bios_boot_partition(&boot).await.ok().flatten() + } else { + None + }; + + Ok(Self { + bios, + boot, + root: root.unwrap_or_default(), + extra_boot, + data: None, + }) + } +} + +const BIOS_BOOT_TYPE_GUID: &str = "21686148-6449-6e6f-744e-656564726548"; + +/// Find the BIOS boot partition on the same disk as `known_part`. +async fn find_bios_boot_partition(known_part: &Path) -> Result, Error> { + let output = Command::new("lsblk") + .args(["-n", "-l", "-o", "NAME,PKNAME,PARTTYPE"]) + .arg(known_part) + .invoke(ErrorKind::DiskManagement) + .await?; + let text = String::from_utf8(output)?; + + let parent_disk = text.lines().find_map(|line| { + let mut fields = line.split_whitespace(); + let _name = fields.next()?; + let pkname = fields.next()?; + (!pkname.is_empty()).then(|| pkname.to_string()) + }); + + let Some(parent_disk) = parent_disk else { + return Ok(None); + }; + + let output = Command::new("lsblk") + .args(["-n", "-l", "-o", "NAME,PARTTYPE"]) + .arg(format!("/dev/{parent_disk}")) + .invoke(ErrorKind::DiskManagement) + .await?; + let text = String::from_utf8(output)?; + + for line in text.lines() { + let mut fields = line.split_whitespace(); + let Some(name) = fields.next() else { continue }; + let Some(parttype) = fields.next() else { + continue; + }; + if parttype.eq_ignore_ascii_case(BIOS_BOOT_TYPE_GUID) { + return Ok(Some(PathBuf::from(format!("/dev/{name}")))); + } + } + + Ok(None) +} + +/// Resolve an fstab device spec (e.g. /dev/sda1, PARTUUID=..., UUID=...) to a +/// canonical device path. +async fn resolve_fstab_source(source: &str) -> Result { + if source.starts_with('/') { + return Ok( + tokio::fs::canonicalize(source) + .await + .unwrap_or_else(|_| PathBuf::from(source)), + ); + } + // PARTUUID=, UUID=, LABEL= — resolve via blkid + let output = Command::new("blkid") + .args(["-o", "device", "-t", source]) + .invoke(ErrorKind::DiskManagement) + .await?; + Ok(PathBuf::from(String::from_utf8(output)?.trim())) } pub fn disk() -> ParentHandler { diff --git a/core/src/disk/mount/backup.rs b/core/src/disk/mount/backup.rs index 2c89981dc..f2b232ca0 100644 --- a/core/src/disk/mount/backup.rs +++ b/core/src/disk/mount/backup.rs @@ -53,9 +53,7 @@ impl BackupMountGuard { })?, )? } else { - if tokio::fs::metadata(&crypt_path).await.is_ok() { - tokio::fs::remove_dir_all(&crypt_path).await?; - } + crate::util::io::delete_dir(&crypt_path).await?; Default::default() }; let enc_key = if let (Some(hash), Some(wrapped_key)) = ( diff --git a/core/src/disk/mount/util.rs b/core/src/disk/mount/util.rs index 327bb2169..30b6a5435 100644 --- a/core/src/disk/mount/util.rs +++ b/core/src/disk/mount/util.rs @@ -52,13 +52,19 @@ pub async fn bind, P1: AsRef>( pub async fn unmount>(mountpoint: P, lazy: bool) -> Result<(), Error> { tracing::debug!("Unmounting {}.", mountpoint.as_ref().display()); let mut cmd = tokio::process::Command::new("umount"); + cmd.env("LANG", "C.UTF-8"); if lazy { cmd.arg("-l"); } - cmd.arg(mountpoint.as_ref()) + match cmd + .arg(mountpoint.as_ref()) .invoke(crate::ErrorKind::Filesystem) - .await?; - Ok(()) + .await + { + Ok(_) => Ok(()), + Err(e) if e.to_string().contains("not mounted") => Ok(()), + Err(e) => Err(e), + } } /// Unmounts all mountpoints under (and including) the given path, in reverse diff --git a/core/src/disk/util.rs b/core/src/disk/util.rs index 9cf2b6882..fdff3a966 100644 --- a/core/src/disk/util.rs +++ b/core/src/disk/util.rs @@ -41,6 +41,7 @@ pub struct DiskInfo { pub partitions: Vec, pub capacity: u64, pub guid: Option, + pub filesystem: Option, } #[derive(Clone, Debug, Deserialize, Serialize, ts_rs::TS)] @@ -55,6 +56,7 @@ pub struct PartitionInfo { pub used: Option, pub start_os: BTreeMap, pub guid: Option, + pub filesystem: Option, } #[derive(Clone, Debug, Default, Deserialize, Serialize, ts_rs::TS)] @@ -374,6 +376,15 @@ pub async fn list(os: &OsPartitionInfo) -> Result, Error> { disk_info.capacity = part_info.capacity; if let Some(g) = disk_guids.get(&disk_info.logicalname) { disk_info.guid = g.clone(); + if let Some(guid) = g { + disk_info.filesystem = + crate::disk::main::probe_package_data_fs(guid) + .await + .unwrap_or_else(|e| { + tracing::warn!("Failed to probe filesystem for {guid}: {e}"); + None + }); + } } else { disk_info.partitions = vec![part_info]; } @@ -384,11 +395,31 @@ pub async fn list(os: &OsPartitionInfo) -> Result, Error> { disk_info.partitions = Vec::with_capacity(index.parts.len()); if let Some(g) = disk_guids.get(&disk_info.logicalname) { disk_info.guid = g.clone(); + if let Some(guid) = g { + disk_info.filesystem = + crate::disk::main::probe_package_data_fs(guid) + .await + .unwrap_or_else(|e| { + tracing::warn!("Failed to probe filesystem for {guid}: {e}"); + None + }); + } } else { for part in index.parts { let mut part_info = part_info(part).await; if let Some(g) = disk_guids.get(&part_info.logicalname) { part_info.guid = g.clone(); + if let Some(guid) = g { + part_info.filesystem = + crate::disk::main::probe_package_data_fs(guid) + .await + .unwrap_or_else(|e| { + tracing::warn!( + "Failed to probe filesystem for {guid}: {e}" + ); + None + }); + } } disk_info.partitions.push(part_info); } @@ -461,6 +492,7 @@ async fn disk_info(disk: PathBuf) -> DiskInfo { partitions: Vec::new(), capacity, guid: None, + filesystem: None, } } @@ -544,6 +576,7 @@ async fn part_info(part: PathBuf) -> PartitionInfo { used, start_os, guid: None, + filesystem: None, } } diff --git a/core/src/error.rs b/core/src/error.rs index 55b4494b1..88f664394 100644 --- a/core/src/error.rs +++ b/core/src/error.rs @@ -101,6 +101,7 @@ pub enum ErrorKind { UpdateFailed = 77, Smtp = 78, SetSysInfo = 79, + Bios = 80, } impl ErrorKind { pub fn as_str(&self) -> String { @@ -185,6 +186,7 @@ impl ErrorKind { UpdateFailed => t!("error.update-failed"), Smtp => t!("error.smtp"), SetSysInfo => t!("error.set-sys-info"), + Bios => t!("error.bios"), } .to_string() } diff --git a/core/src/init.rs b/core/src/init.rs index 8b6a91625..ce489c0e9 100644 --- a/core/src/init.rs +++ b/core/src/init.rs @@ -173,6 +173,13 @@ pub async fn init( RpcContext::init_auth_cookie().await?; local_auth.complete(); + // Re-enroll MOK on every boot if Secure Boot key exists but isn't enrolled yet + if let Err(e) = + crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await + { + tracing::warn!("MOK enrollment failed: {e}"); + } + load_database.start(); let db = cfg.db().await?; crate::version::Current::default().pre_init(&db).await?; @@ -291,21 +298,15 @@ pub async fn init( init_tmp.start(); let tmp_dir = Path::new(PACKAGE_DATA).join("tmp"); - if tokio::fs::metadata(&tmp_dir).await.is_ok() { - tokio::fs::remove_dir_all(&tmp_dir).await?; - } + crate::util::io::delete_dir(&tmp_dir).await?; if tokio::fs::metadata(&tmp_dir).await.is_err() { tokio::fs::create_dir_all(&tmp_dir).await?; } let tmp_var = Path::new(PACKAGE_DATA).join("tmp/var"); - if tokio::fs::metadata(&tmp_var).await.is_ok() { - tokio::fs::remove_dir_all(&tmp_var).await?; - } + crate::util::io::delete_dir(&tmp_var).await?; crate::disk::mount::util::bind(&tmp_var, "/var/tmp", false).await?; let downloading = Path::new(PACKAGE_DATA).join("archive/downloading"); - if tokio::fs::metadata(&downloading).await.is_ok() { - tokio::fs::remove_dir_all(&downloading).await?; - } + crate::util::io::delete_dir(&downloading).await?; let tmp_docker = Path::new(PACKAGE_DATA).join("tmp").join(*CONTAINER_TOOL); crate::disk::mount::util::bind(&tmp_docker, *CONTAINER_DATADIR, false).await?; init_tmp.complete(); @@ -370,7 +371,7 @@ pub async fn init( enable_zram.complete(); update_server_info.start(); - sync_kiosk(server_info.as_kiosk().de()?).await?; + sync_kiosk(server_info.as_kiosk().de()?.unwrap_or(false)).await?; let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024; let devices = lshw().await?; let status_info = ServerStatus { diff --git a/core/src/lib.rs b/core/src/lib.rs index 10913503d..bf5805e88 100644 --- a/core/src/lib.rs +++ b/core/src/lib.rs @@ -400,10 +400,10 @@ pub fn server() -> ParentHandler { .with_call_remote::(), ) .subcommand( - "set-ifconfig-url", - from_fn_async(system::set_ifconfig_url) + "set-echoip-urls", + from_fn_async(system::set_echoip_urls) .no_display() - .with_about("about.set-ifconfig-url") + .with_about("about.set-echoip-urls") .with_call_remote::(), ) .subcommand( diff --git a/core/src/net/dns.rs b/core/src/net/dns.rs index 1083b74dc..49c667297 100644 --- a/core/src/net/dns.rs +++ b/core/src/net/dns.rs @@ -32,6 +32,7 @@ use crate::context::{CliContext, RpcContext}; use crate::db::model::Database; use crate::db::model::public::NetworkInterfaceInfo; use crate::net::gateway::NetworkInterfaceWatcher; +use crate::net::utils::is_private_ip; use crate::prelude::*; use crate::util::future::NonDetachingJoinHandle; use crate::util::io::file_string_stream; @@ -400,6 +401,18 @@ impl Resolver { }) }) { return Some(res); + } else if is_private_ip(src) { + // Source is a private IP not in any known subnet (e.g. VPN on a different VLAN). + // Return all private IPs from all interfaces. + let res: Vec = self.net_iface.peek(|i| { + i.values() + .filter_map(|i| i.ip_info.as_ref()) + .flat_map(|ip_info| ip_info.subnets.iter().map(|s| s.addr())) + .collect() + }); + if !res.is_empty() { + return Some(res); + } } else { tracing::warn!( "{}", diff --git a/core/src/net/gateway.rs b/core/src/net/gateway.rs index a377eb453..91a012df1 100644 --- a/core/src/net/gateway.rs +++ b/core/src/net/gateway.rs @@ -205,7 +205,7 @@ pub async fn check_port( CheckPortParams { port, gateway }: CheckPortParams, ) -> Result { let db = ctx.db.peek().await; - let base_url = db.as_public().as_server_info().as_ifconfig_url().de()?; + let base_urls = db.as_public().as_server_info().as_echoip_urls().de()?; let gateways = db .as_public() .as_server_info() @@ -240,22 +240,41 @@ pub async fn check_port( let client = reqwest::Client::builder(); #[cfg(target_os = "linux")] let client = client.interface(gateway.as_str()); - let url = base_url - .join(&format!("/port/{port}")) - .with_kind(ErrorKind::ParseUrl)?; - let IfconfigPortRes { + let client = client.build()?; + + let mut res = None; + for base_url in base_urls { + let url = base_url + .join(&format!("/port/{port}")) + .with_kind(ErrorKind::ParseUrl)?; + res = Some( + async { + client + .get(url) + .timeout(Duration::from_secs(5)) + .send() + .await? + .error_for_status()? + .json() + .await + } + .await, + ); + if res.as_ref().map_or(false, |r| r.is_ok()) { + break; + } + } + let Some(IfconfigPortRes { ip, port, reachable: open_externally, - } = client - .build()? - .get(url) - .timeout(Duration::from_secs(10)) - .send() - .await? - .error_for_status()? - .json() - .await?; + }) = res.transpose()? + else { + return Err(Error::new( + eyre!("{}", t!("net.gateway.no-configured-echoip-urls")), + ErrorKind::Network, + )); + }; let hairpinning = tokio::time::timeout( Duration::from_secs(5), @@ -761,7 +780,7 @@ async fn get_wan_ipv4(iface: &str, base_url: &Url) -> Result, E let text = client .build()? .get(url) - .timeout(Duration::from_secs(10)) + .timeout(Duration::from_secs(5)) .send() .await? .error_for_status()? @@ -857,7 +876,7 @@ async fn watch_ip( .fuse() }); - let mut prev_attempt: Option = None; + let mut echoip_ratelimit_state: BTreeMap = BTreeMap::new(); loop { until @@ -967,7 +986,7 @@ async fn watch_ip( &dhcp4_proxy, &policy_guard, &iface, - &mut prev_attempt, + &mut echoip_ratelimit_state, db, write_to, device_type, @@ -999,18 +1018,16 @@ async fn apply_policy_routing( }) .copied(); - // Flush and rebuild per-interface routing table. - // Clone all non-default routes from the main table so that LAN IPs on - // other subnets remain reachable when the priority-75 catch-all overrides - // default routing, then replace the default route with this interface's. - Command::new("ip") - .arg("route") - .arg("flush") - .arg("table") - .arg(&table_str) - .invoke(ErrorKind::Network) - .await - .log_err(); + // Rebuild per-interface routing table using `ip route replace` to avoid + // the connectivity gap that a flush+add cycle would create. We replace + // every desired route in-place (each replace is atomic in the kernel), + // then delete any stale routes that are no longer in the desired set. + + // Collect the set of desired non-default route prefixes (the first + // whitespace-delimited token of each `ip route show` line is the + // destination prefix, e.g. "192.168.1.0/24" or "10.0.0.0/8"). + let mut desired_prefixes = BTreeSet::::new(); + if let Ok(main_routes) = Command::new("ip") .arg("route") .arg("show") @@ -1025,11 +1042,14 @@ async fn apply_policy_routing( if line.is_empty() || line.starts_with("default") { continue; } + if let Some(prefix) = line.split_whitespace().next() { + desired_prefixes.insert(prefix.to_owned()); + } let mut cmd = Command::new("ip"); - cmd.arg("route").arg("add"); + cmd.arg("route").arg("replace"); for part in line.split_whitespace() { // Skip status flags that appear in route output but - // are not valid for `ip route add`. + // are not valid for `ip route replace`. if part == "linkdown" || part == "dead" { continue; } @@ -1039,10 +1059,11 @@ async fn apply_policy_routing( cmd.invoke(ErrorKind::Network).await.log_err(); } } - // Add default route via this interface's gateway + + // Replace the default route via this interface's gateway. { let mut cmd = Command::new("ip"); - cmd.arg("route").arg("add").arg("default"); + cmd.arg("route").arg("replace").arg("default"); if let Some(gw) = ipv4_gateway { cmd.arg("via").arg(gw.to_string()); } @@ -1056,6 +1077,40 @@ async fn apply_policy_routing( cmd.invoke(ErrorKind::Network).await.log_err(); } + // Delete stale routes: any non-default route in the per-interface table + // whose prefix is not in the desired set. + if let Ok(existing_routes) = Command::new("ip") + .arg("route") + .arg("show") + .arg("table") + .arg(&table_str) + .invoke(ErrorKind::Network) + .await + .and_then(|b| String::from_utf8(b).with_kind(ErrorKind::Utf8)) + { + for line in existing_routes.lines() { + let line = line.trim(); + if line.is_empty() || line.starts_with("default") { + continue; + } + let Some(prefix) = line.split_whitespace().next() else { + continue; + }; + if desired_prefixes.contains(prefix) { + continue; + } + Command::new("ip") + .arg("route") + .arg("del") + .arg(prefix) + .arg("table") + .arg(&table_str) + .invoke(ErrorKind::Network) + .await + .log_err(); + } + } + // Ensure global CONNMARK restore rules in mangle PREROUTING (forwarded // packets) and OUTPUT (locally-generated replies). Both are needed: // PREROUTING handles DNAT-forwarded traffic, OUTPUT handles replies from @@ -1174,7 +1229,7 @@ async fn poll_ip_info( dhcp4_proxy: &Option>, policy_guard: &Option, iface: &GatewayId, - prev_attempt: &mut Option, + echoip_ratelimit_state: &mut BTreeMap, db: Option<&TypedPatchDb>, write_to: &Watch>, device_type: Option, @@ -1221,43 +1276,49 @@ async fn poll_ip_info( apply_policy_routing(guard, iface, &lan_ip).await?; } - let ifconfig_url = if let Some(db) = db { + let echoip_urls = if let Some(db) = db { db.peek() .await .as_public() .as_server_info() - .as_ifconfig_url() + .as_echoip_urls() .de() - .unwrap_or_else(|_| crate::db::model::public::default_ifconfig_url()) + .unwrap_or_else(|_| crate::db::model::public::default_echoip_urls()) } else { - crate::db::model::public::default_ifconfig_url() + crate::db::model::public::default_echoip_urls() }; - let wan_ip = if prev_attempt.map_or(true, |i| i.elapsed() > Duration::from_secs(300)) - && !subnets.is_empty() - && !matches!( - device_type, - Some(NetworkInterfaceType::Bridge | NetworkInterfaceType::Loopback) - ) { - let res = match get_wan_ipv4(iface.as_str(), &ifconfig_url).await { - Ok(a) => a, - Err(e) => { - tracing::error!( - "{}", - t!( - "net.gateway.failed-to-determine-wan-ip", - iface = iface.to_string(), - error = e.to_string() - ) - ); - tracing::debug!("{e:?}"); - None + let mut wan_ip = None; + for echoip_url in echoip_urls { + let wan_ip = if echoip_ratelimit_state + .get(&echoip_url) + .map_or(true, |i| i.elapsed() > Duration::from_secs(300)) + && !subnets.is_empty() + && !matches!( + device_type, + Some(NetworkInterfaceType::Bridge | NetworkInterfaceType::Loopback) + ) { + match get_wan_ipv4(iface.as_str(), &echoip_url).await { + Ok(a) => { + wan_ip = a; + } + Err(e) => { + tracing::error!( + "{}", + t!( + "net.gateway.failed-to-determine-wan-ip", + iface = iface.to_string(), + error = e.to_string() + ) + ); + tracing::debug!("{e:?}"); + } + }; + echoip_ratelimit_state.insert(echoip_url, Instant::now()); + if wan_ip.is_some() { + break; } }; - *prev_attempt = Some(Instant::now()); - res - } else { - None - }; + } let mut ip_info = IpInfo { name: name.clone(), scope_id, diff --git a/core/src/net/host/mod.rs b/core/src/net/host/mod.rs index c77b4aa26..941cc15f3 100644 --- a/core/src/net/host/mod.rs +++ b/core/src/net/host/mod.rs @@ -283,7 +283,7 @@ impl Model { }; available.insert(HostnameInfo { ssl: opt.secure.map_or(false, |s| s.ssl), - public: true, + public: false, hostname: domain.clone(), port: Some(port), metadata: HostnameMetadata::PrivateDomain { gateways }, @@ -300,7 +300,7 @@ impl Model { } available.insert(HostnameInfo { ssl: true, - public: true, + public: false, hostname: domain, port: Some(port), metadata: HostnameMetadata::PrivateDomain { @@ -314,7 +314,7 @@ impl Model { { available.insert(HostnameInfo { ssl: true, - public: true, + public: false, hostname: domain, port: Some(opt.preferred_external_port), metadata: HostnameMetadata::PrivateDomain { diff --git a/core/src/net/net_controller.rs b/core/src/net/net_controller.rs index 529b8824a..c48c7c9c6 100644 --- a/core/src/net/net_controller.rs +++ b/core/src/net/net_controller.rs @@ -820,7 +820,6 @@ impl NetService { break; } } - self.shutdown = true; Ok(()) } @@ -832,6 +831,7 @@ impl NetService { impl Drop for NetService { fn drop(&mut self) { if !self.shutdown { + self.shutdown = true; let svc = std::mem::replace(self, Self::dummy()); tokio::spawn(async move { svc.remove_all().await.log_err() }); } diff --git a/core/src/net/service_interface.rs b/core/src/net/service_interface.rs index 7c4b294aa..6fc2aa52e 100644 --- a/core/src/net/service_interface.rs +++ b/core/src/net/service_interface.rs @@ -145,9 +145,10 @@ pub struct GatewayInfo { pub public: bool, } -#[derive(Clone, Debug, Deserialize, Serialize, TS)] -#[ts(export)] +#[derive(Clone, Debug, Deserialize, Serialize, HasModel, TS)] #[serde(rename_all = "camelCase")] +#[model = "Model"] +#[ts(export)] pub struct ServiceInterface { pub id: ServiceInterfaceId, pub name: String, diff --git a/core/src/net/ssl.rs b/core/src/net/ssl.rs index 3b8e69c8e..e2dfc6eea 100644 --- a/core/src/net/ssl.rs +++ b/core/src/net/ssl.rs @@ -188,7 +188,7 @@ lazy_static::lazy_static! { } fn asn1_time_to_system_time(time: &Asn1TimeRef) -> Result { - let diff = time.diff(&**ASN1_UNIX_EPOCH)?; + let diff = ASN1_UNIX_EPOCH.diff(time)?; let mut res = UNIX_EPOCH; if diff.days >= 0 { res += Duration::from_secs(diff.days as u64 * 86400); diff --git a/core/src/net/tunnel.rs b/core/src/net/tunnel.rs index da0f6d84c..694434514 100644 --- a/core/src/net/tunnel.rs +++ b/core/src/net/tunnel.rs @@ -1,3 +1,5 @@ +use std::time::Duration; + use clap::Parser; use imbl_value::InternedString; use patch_db::json_ptr::JsonPointer; @@ -8,7 +10,9 @@ use ts_rs::TS; use crate::GatewayId; use crate::context::{CliContext, RpcContext}; -use crate::db::model::public::{GatewayType, NetworkInterfaceInfo, NetworkInterfaceType}; +use crate::db::model::public::{ + GatewayType, NetworkInfo, NetworkInterfaceInfo, NetworkInterfaceType, +}; use crate::net::host::all_hosts; use crate::prelude::*; use crate::util::Invoke; @@ -139,6 +143,34 @@ pub async fn add_tunnel( .result?; } + // Wait for the sync loop to fully commit gateway state (addresses, hosts) + // to the database, with a 15-second timeout. + if tokio::time::timeout(Duration::from_secs(15), async { + let mut watch = ctx + .db + .watch("/public/serverInfo/network".parse::().unwrap()) + .await + .typed::(); + loop { + if watch + .peek()? + .as_gateways() + .as_idx(&iface) + .and_then(|g| g.as_ip_info().transpose_ref()) + .is_some() + { + break; + } + watch.changed().await?; + } + Ok::<_, Error>(()) + }) + .await + .is_err() + { + tracing::warn!("{}", t!("net.tunnel.timeout-waiting-for-add", gateway = iface.as_str())); + } + Ok(iface) } @@ -224,5 +256,27 @@ pub async fn remove_tunnel( .await .result?; + // Wait for the sync loop to fully commit gateway removal to the database, + // with a 15-second timeout. + if tokio::time::timeout(Duration::from_secs(15), async { + let mut watch = ctx + .db + .watch("/public/serverInfo/network".parse::().unwrap()) + .await + .typed::(); + loop { + if watch.peek()?.as_gateways().as_idx(&id).is_none() { + break; + } + watch.changed().await?; + } + Ok::<_, Error>(()) + }) + .await + .is_err() + { + tracing::warn!("{}", t!("net.tunnel.timeout-waiting-for-remove", gateway = id.as_str())); + } + Ok(()) } diff --git a/core/src/net/utils.rs b/core/src/net/utils.rs index 9f3a3682c..61466ee71 100644 --- a/core/src/net/utils.rs +++ b/core/src/net/utils.rs @@ -66,6 +66,13 @@ pub fn ipv6_is_local(addr: Ipv6Addr) -> bool { addr.is_loopback() || (addr.segments()[0] & 0xfe00) == 0xfc00 || ipv6_is_link_local(addr) } +pub fn is_private_ip(addr: IpAddr) -> bool { + match addr { + IpAddr::V4(v4) => v4.is_private() || v4.is_loopback() || v4.is_link_local(), + IpAddr::V6(v6) => ipv6_is_local(v6), + } +} + fn parse_iface_ip(output: &str) -> Result, Error> { let output = output.trim(); if output.is_empty() { diff --git a/core/src/net/vhost.rs b/core/src/net/vhost.rs index 970a9ccb9..6b4962e50 100644 --- a/core/src/net/vhost.rs +++ b/core/src/net/vhost.rs @@ -38,7 +38,7 @@ use crate::net::ssl::{CertStore, RootCaTlsHandler}; use crate::net::tls::{ ChainedHandler, TlsHandlerAction, TlsHandlerWrapper, TlsListener, TlsMetadata, WrapTlsHandler, }; -use crate::net::utils::ipv6_is_link_local; +use crate::net::utils::{ipv6_is_link_local, is_private_ip}; use crate::net::web_server::{Accept, AcceptStream, ExtractVisitor, TcpMetadata, extract}; use crate::prelude::*; use crate::util::collections::EqSet; @@ -732,8 +732,9 @@ where }; let src = tcp.peer_addr.ip(); - // Public: source is outside all known subnets (direct internet) - let is_public = !ip_info.subnets.iter().any(|s| s.contains(&src)); + // Private: source is in a known subnet or is a private IP (e.g. VPN on a different VLAN) + let is_public = + !ip_info.subnets.iter().any(|s| s.contains(&src)) && !is_private_ip(src); if is_public { self.public.contains(&gw.id) diff --git a/core/src/net/web_server.rs b/core/src/net/web_server.rs index 8ffe9deaa..eed7cf37c 100644 --- a/core/src/net/web_server.rs +++ b/core/src/net/web_server.rs @@ -509,7 +509,7 @@ where drop(queue_cell.replace(None)); if !runner.is_empty() { - tokio::time::timeout(Duration::from_secs(60), runner) + tokio::time::timeout(Duration::from_millis(100), runner) .await .log_err(); } diff --git a/core/src/os_install/fstab.template b/core/src/os_install/fstab.template index c299a0aae..196a06be7 100644 --- a/core/src/os_install/fstab.template +++ b/core/src/os_install/fstab.template @@ -1,3 +1,3 @@ {boot} /boot vfat umask=0077 0 2 {efi} /boot/efi vfat umask=0077 0 1 -{root} / ext4 defaults 0 1 \ No newline at end of file +{root} / btrfs defaults 0 1 \ No newline at end of file diff --git a/core/src/os_install/gpt.rs b/core/src/os_install/gpt.rs index 0fe5d0665..932538d98 100644 --- a/core/src/os_install/gpt.rs +++ b/core/src/os_install/gpt.rs @@ -197,11 +197,19 @@ pub async fn partition( .invoke(crate::ErrorKind::DiskManagement) .await?; + let mut extra_boot = std::collections::BTreeMap::new(); + let bios; + if efi { + extra_boot.insert("efi".to_string(), partition_for(&disk_path, 1)); + bios = None; + } else { + bios = Some(partition_for(&disk_path, 1)); + } Ok(OsPartitionInfo { - efi: efi.then(|| partition_for(&disk_path, 1)), - bios: (!efi).then(|| partition_for(&disk_path, 1)), + bios, boot: partition_for(&disk_path, 2), root: partition_for(&disk_path, 3), + extra_boot, data: data_part, }) } diff --git a/core/src/os_install/mbr.rs b/core/src/os_install/mbr.rs index b121198f8..090fa9554 100644 --- a/core/src/os_install/mbr.rs +++ b/core/src/os_install/mbr.rs @@ -164,10 +164,10 @@ pub async fn partition( .await?; Ok(OsPartitionInfo { - efi: None, bios: None, boot: partition_for(&disk_path, 1), root: partition_for(&disk_path, 2), + extra_boot: Default::default(), data: data_part, }) } diff --git a/core/src/os_install/mod.rs b/core/src/os_install/mod.rs index d04491acd..72918042d 100644 --- a/core/src/os_install/mod.rs +++ b/core/src/os_install/mod.rs @@ -21,69 +21,12 @@ use crate::prelude::*; use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile; use crate::setup::SetupInfo; use crate::util::Invoke; -use crate::util::io::{TmpDir, delete_file, open_file, write_file_atomic}; +use crate::util::io::{TmpDir, delete_dir, delete_file, open_file, write_file_atomic}; use crate::util::serde::IoFormat; mod gpt; mod mbr; -/// Get the EFI BootCurrent entry number (the entry firmware used to boot). -/// Returns None on non-EFI systems or if BootCurrent is not set. -async fn get_efi_boot_current() -> Result, Error> { - let efi_output = String::from_utf8( - Command::new("efibootmgr") - .invoke(ErrorKind::Grub) - .await?, - ) - .map_err(|e| Error::new(eyre!("efibootmgr output not valid UTF-8: {e}"), ErrorKind::Grub))?; - - Ok(efi_output - .lines() - .find(|line| line.starts_with("BootCurrent:")) - .and_then(|line| line.strip_prefix("BootCurrent:")) - .map(|s| s.trim().to_string())) -} - -/// Promote a specific boot entry to first in the EFI boot order. -async fn promote_efi_entry(entry: &str) -> Result<(), Error> { - let efi_output = String::from_utf8( - Command::new("efibootmgr") - .invoke(ErrorKind::Grub) - .await?, - ) - .map_err(|e| Error::new(eyre!("efibootmgr output not valid UTF-8: {e}"), ErrorKind::Grub))?; - - let current_order = efi_output - .lines() - .find(|line| line.starts_with("BootOrder:")) - .and_then(|line| line.strip_prefix("BootOrder:")) - .map(|s| s.trim()) - .unwrap_or(""); - - if current_order.is_empty() || current_order.starts_with(entry) { - return Ok(()); - } - - let other_entries: Vec<&str> = current_order - .split(',') - .filter(|e| e.trim() != entry) - .collect(); - - let new_order = if other_entries.is_empty() { - entry.to_string() - } else { - format!("{},{}", entry, other_entries.join(",")) - }; - - Command::new("efibootmgr") - .arg("-o") - .arg(&new_order) - .invoke(ErrorKind::Grub) - .await?; - - Ok(()) -} - /// Probe a squashfs image to determine its target architecture async fn probe_squashfs_arch(squashfs_path: &Path) -> Result { let output = String::from_utf8( @@ -182,6 +125,7 @@ struct DataDrive { pub struct InstallOsResult { pub part_info: OsPartitionInfo, pub rootfs: TmpMountGuard, + pub mok_enrolled: bool, } pub async fn install_os_to( @@ -199,7 +143,7 @@ pub async fn install_os_to( let part_info = partition(disk_path, capacity, partition_table, protect, use_efi).await?; - if let Some(efi) = &part_info.efi { + if let Some(efi) = part_info.extra_boot.get("efi") { Command::new("mkfs.vfat") .arg(efi) .invoke(crate::ErrorKind::DiskManagement) @@ -230,6 +174,7 @@ pub async fn install_os_to( delete_file(guard.path().join("config/upgrade")).await?; delete_file(guard.path().join("config/overlay/etc/hostname")).await?; delete_file(guard.path().join("config/disk.guid")).await?; + delete_dir(guard.path().join("config/lib/modules")).await?; Command::new("cp") .arg("-r") .arg(guard.path().join("config")) @@ -265,9 +210,7 @@ pub async fn install_os_to( let config_path = rootfs.path().join("config"); if tokio::fs::metadata("/tmp/config.bak").await.is_ok() { - if tokio::fs::metadata(&config_path).await.is_ok() { - tokio::fs::remove_dir_all(&config_path).await?; - } + crate::util::io::delete_dir(&config_path).await?; Command::new("cp") .arg("-r") .arg("/tmp/config.bak") @@ -317,10 +260,7 @@ pub async fn install_os_to( tokio::fs::write( rootfs.path().join("config/config.yaml"), - IoFormat::Yaml.to_vec(&ServerConfig { - os_partitions: Some(part_info.clone()), - ..Default::default() - })?, + IoFormat::Yaml.to_vec(&ServerConfig::default())?, ) .await?; @@ -339,7 +279,7 @@ pub async fn install_os_to( ReadWrite, ) .await?; - let efi = if let Some(efi) = &part_info.efi { + let efi = if let Some(efi) = part_info.extra_boot.get("efi") { Some( MountGuard::mount( &BlockDev::new(efi), @@ -380,8 +320,8 @@ pub async fn install_os_to( include_str!("fstab.template"), boot = part_info.boot.display(), efi = part_info - .efi - .as_ref() + .extra_boot + .get("efi") .map(|p| p.display().to_string()) .unwrap_or_else(|| "# N/A".to_owned()), root = part_info.root.display(), @@ -402,6 +342,28 @@ pub async fn install_os_to( .invoke(crate::ErrorKind::OpenSsh) .await?; + // Secure Boot: generate MOK key, sign unsigned modules, enroll MOK + let mut mok_enrolled = false; + if use_efi && crate::util::mok::is_secure_boot_enabled().await { + let new_key = crate::util::mok::ensure_dkms_key(overlay.path()).await?; + tracing::info!( + "DKMS MOK key: {}", + if new_key { + "generated" + } else { + "already exists" + } + ); + + crate::util::mok::sign_unsigned_modules(overlay.path()).await?; + + let mok_pub = overlay.path().join(crate::util::mok::DKMS_MOK_PUB.trim_start_matches('/')); + match crate::util::mok::enroll_mok(&mok_pub).await { + Ok(enrolled) => mok_enrolled = enrolled, + Err(e) => tracing::warn!("MOK enrollment failed: {e}"), + } + } + let mut install = Command::new("chroot"); install.arg(overlay.path()).arg("grub-install"); if !use_efi { @@ -443,7 +405,11 @@ pub async fn install_os_to( tokio::fs::remove_dir_all(&work).await?; lower.unmount().await?; - Ok(InstallOsResult { part_info, rootfs }) + Ok(InstallOsResult { + part_info, + rootfs, + mok_enrolled, + }) } pub async fn install_os( @@ -486,21 +452,11 @@ pub async fn install_os( let use_efi = tokio::fs::metadata("/sys/firmware/efi").await.is_ok(); - // Save the boot entry we booted from (the USB installer) before grub-install - // overwrites the boot order. - let boot_current = if use_efi { - match get_efi_boot_current().await { - Ok(entry) => entry, - Err(e) => { - tracing::warn!("Failed to get EFI BootCurrent: {e}"); - None - } - } - } else { - None - }; - - let InstallOsResult { part_info, rootfs } = install_os_to( + let InstallOsResult { + part_info, + rootfs, + mok_enrolled, + } = install_os_to( "/run/live/medium/live/filesystem.squashfs", &disk.logicalname, disk.capacity, @@ -511,24 +467,8 @@ pub async fn install_os( ) .await?; - // grub-install prepends its new entry to the EFI boot order, overriding the - // USB-first priority. Promote the USB entry (identified by BootCurrent from - // when we booted the installer) back to first, and persist the entry number - // so the upgrade script can do the same. - if let Some(ref entry) = boot_current { - if let Err(e) = promote_efi_entry(entry).await { - tracing::warn!("Failed to restore EFI boot order: {e}"); - } - let efi_entry_path = rootfs.path().join("config/efi-installer-entry"); - if let Err(e) = tokio::fs::write(&efi_entry_path, entry).await { - tracing::warn!("Failed to save EFI installer entry number: {e}"); - } - } - - ctx.config - .mutate(|c| c.os_partitions = Some(part_info.clone())); - let mut setup_info = SetupInfo::default(); + setup_info.mok_enrolled = mok_enrolled; if let Some(data_drive) = data_drive { let mut logicalname = &*data_drive.logicalname; @@ -612,7 +552,11 @@ pub async fn cli_install_os( let use_efi = efi.unwrap_or_else(|| !matches!(partition_table, Some(PartitionTable::Mbr))); - let InstallOsResult { part_info, rootfs } = install_os_to( + let InstallOsResult { + part_info, + rootfs, + mok_enrolled: _, + } = install_os_to( &squashfs, &disk, capacity, diff --git a/core/src/registry/context.rs b/core/src/registry/context.rs index c9773d14b..2aa5739f2 100644 --- a/core/src/registry/context.rs +++ b/core/src/registry/context.rs @@ -141,7 +141,7 @@ impl RegistryContext { listen: config.registry_listen.unwrap_or(DEFAULT_REGISTRY_LISTEN), db, datadir, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), client: Client::builder() .proxy(Proxy::custom(move |url| { if url.host_str().map_or(false, |h| h.ends_with(".onion")) { diff --git a/core/src/registry/package/signer.rs b/core/src/registry/package/signer.rs index 47ec7b13d..ee1cbc47a 100644 --- a/core/src/registry/package/signer.rs +++ b/core/src/registry/package/signer.rs @@ -59,8 +59,7 @@ pub struct AddPackageSignerParams { #[ts(type = "string | null")] pub versions: Option, #[arg(long, help = "help.arg.merge")] - #[ts(optional)] - pub merge: Option, + pub merge: bool, } pub async fn add_package_signer( @@ -89,7 +88,7 @@ pub async fn add_package_signer( .as_authorized_mut() .upsert(&signer, || Ok(VersionRange::None))? .mutate(|existing| { - *existing = if merge.unwrap_or(false) { + *existing = if merge { VersionRange::or(existing.clone(), versions) } else { versions diff --git a/core/src/rpc_continuations.rs b/core/src/rpc_continuations.rs index 42c3ae858..e084264ab 100644 --- a/core/src/rpc_continuations.rs +++ b/core/src/rpc_continuations.rs @@ -17,6 +17,7 @@ use ts_rs::TS; #[allow(unused_imports)] use crate::prelude::*; +use crate::shutdown::Shutdown; use crate::util::future::TimedResource; use crate::util::net::WebSocket; use crate::util::{FromStrParser, new_guid}; @@ -98,12 +99,15 @@ pub type RestHandler = Box RestFuture + Send>; pub struct WebSocketFuture { kill: Option>, + shutdown: Option>>, fut: BoxFuture<'static, ()>, } impl Future for WebSocketFuture { type Output = (); fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { - if self.kill.as_ref().map_or(false, |k| !k.is_empty()) { + if self.kill.as_ref().map_or(false, |k| !k.is_empty()) + || self.shutdown.as_ref().map_or(false, |s| !s.is_empty()) + { Poll::Ready(()) } else { self.fut.poll_unpin(cx) @@ -138,6 +142,7 @@ impl RpcContinuation { RpcContinuation::WebSocket(TimedResource::new( Box::new(|ws| WebSocketFuture { kill: None, + shutdown: None, fut: handler(ws.into()).boxed(), }), timeout, @@ -170,6 +175,7 @@ impl RpcContinuation { RpcContinuation::WebSocket(TimedResource::new( Box::new(|ws| WebSocketFuture { kill, + shutdown: None, fut: handler(ws.into()).boxed(), }), timeout, @@ -183,15 +189,21 @@ impl RpcContinuation { } } -pub struct RpcContinuations(AsyncMutex>); +pub struct RpcContinuations { + continuations: AsyncMutex>, + shutdown: Option>>, +} impl RpcContinuations { - pub fn new() -> Self { - RpcContinuations(AsyncMutex::new(BTreeMap::new())) + pub fn new(shutdown: Option>>) -> Self { + RpcContinuations { + continuations: AsyncMutex::new(BTreeMap::new()), + shutdown, + } } #[instrument(skip_all)] pub async fn clean(&self) { - let mut continuations = self.0.lock().await; + let mut continuations = self.continuations.lock().await; let mut to_remove = Vec::new(); for (guid, cont) in &*continuations { if cont.is_timed_out() { @@ -206,23 +218,28 @@ impl RpcContinuations { #[instrument(skip_all)] pub async fn add(&self, guid: Guid, handler: RpcContinuation) { self.clean().await; - self.0.lock().await.insert(guid, handler); + self.continuations.lock().await.insert(guid, handler); } pub async fn get_ws_handler(&self, guid: &Guid) -> Option { - let mut continuations = self.0.lock().await; + let mut continuations = self.continuations.lock().await; if !matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) { return None; } let Some(RpcContinuation::WebSocket(x)) = continuations.remove(guid) else { return None; }; - x.get().await + let handler = x.get().await?; + let shutdown = self.shutdown.as_ref().map(|s| s.subscribe()); + Some(Box::new(move |ws| { + let mut fut = handler(ws); + fut.shutdown = shutdown; + fut + })) } pub async fn get_rest_handler(&self, guid: &Guid) -> Option { - let mut continuations: tokio::sync::MutexGuard<'_, BTreeMap> = - self.0.lock().await; + let mut continuations = self.continuations.lock().await; if !matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) { return None; } diff --git a/core/src/s9pk/rpc.rs b/core/src/s9pk/rpc.rs index 2fafd7e0c..e84be8406 100644 --- a/core/src/s9pk/rpc.rs +++ b/core/src/s9pk/rpc.rs @@ -17,6 +17,7 @@ use crate::s9pk::manifest::{HardwareRequirements, Manifest}; use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile; use crate::s9pk::v2::SIG_CONTEXT; use crate::s9pk::v2::pack::ImageConfig; +use crate::sign::commitment::merkle_archive::MerkleArchiveCommitment; use crate::util::io::{TmpDir, create_file, open_file}; use crate::util::serde::{HandlerExtSerde, apply_expr}; use crate::util::{Apply, Invoke}; @@ -131,6 +132,13 @@ fn inspect() -> ParentHandler { .with_display_serializable() .with_about("about.display-s9pk-manifest"), ) + .subcommand( + "commitment", + from_fn_async(inspect_commitment) + .with_inherited(only_parent) + .with_display_serializable() + .with_about("about.display-s9pk-root-sighash-and-maxsize"), + ) } #[derive(Deserialize, Serialize, Parser, TS)] @@ -262,6 +270,15 @@ async fn inspect_manifest( Ok(s9pk.as_manifest().clone()) } +async fn inspect_commitment( + _: CliContext, + _: Empty, + S9pkPath { s9pk: s9pk_path }: S9pkPath, +) -> Result { + let s9pk = super::S9pk::open(&s9pk_path, None).await?; + s9pk.as_archive().commitment().await +} + async fn convert(ctx: CliContext, S9pkPath { s9pk: s9pk_path }: S9pkPath) -> Result<(), Error> { let mut s9pk = super::load( MultiCursorFile::from(open_file(&s9pk_path).await?), diff --git a/core/src/service/effects/callbacks.rs b/core/src/service/effects/callbacks.rs index d30665c96..a3acc5b18 100644 --- a/core/src/service/effects/callbacks.rs +++ b/core/src/service/effects/callbacks.rs @@ -1,6 +1,6 @@ use std::cmp::min; use std::collections::{BTreeMap, BTreeSet}; -use std::sync::{Arc, Mutex, Weak}; +use std::sync::{Arc, Weak}; use std::time::{Duration, SystemTime}; use clap::Parser; @@ -8,185 +8,72 @@ use futures::future::join_all; use imbl::{OrdMap, Vector, vector}; use imbl_value::InternedString; use patch_db::TypedDbWatch; -use patch_db::json_ptr::JsonPointer; use serde::{Deserialize, Serialize}; use tracing::warn; use ts_rs::TS; -use crate::db::model::Database; +use crate::db::model::package::PackageState; use crate::db::model::public::NetworkInterfaceInfo; +use crate::net::host::Host; +use crate::net::service_interface::ServiceInterface; use crate::net::ssl::FullchainCertData; use crate::prelude::*; use crate::service::effects::context::EffectContext; use crate::service::effects::net::ssl::Algorithm; use crate::service::rpc::{CallbackHandle, CallbackId}; use crate::service::{Service, ServiceActorSeed}; +use crate::status::StatusInfo; use crate::util::collections::EqMap; use crate::util::future::NonDetachingJoinHandle; +use crate::util::sync::SyncMutex; use crate::{GatewayId, HostId, PackageId, ServiceInterfaceId}; -#[derive(Default)] -pub struct ServiceCallbacks(Mutex); - -#[derive(Default)] -struct ServiceCallbackMap { - get_service_interface: BTreeMap<(PackageId, ServiceInterfaceId), Vec>, - list_service_interfaces: BTreeMap>, - get_system_smtp: Vec, - get_host_info: - BTreeMap<(PackageId, HostId), (NonDetachingJoinHandle<()>, Vec)>, - get_ssl_certificate: EqMap< - (BTreeSet, FullchainCertData, Algorithm), - (NonDetachingJoinHandle<()>, Vec), - >, - get_status: BTreeMap>, - get_container_ip: BTreeMap>, - get_service_manifest: BTreeMap>, - get_outbound_gateway: BTreeMap, Vec)>, +/// Abstraction for callbacks that are triggered by patchdb subscriptions. +/// +/// Handles the subscribe-wait-fire-remove pattern: when a callback is first +/// registered for a key, a patchdb subscription is spawned. When the subscription +/// fires, all handlers are consumed and invoked, then the subscription stops. +/// A new subscription is created if a handler is registered again. +pub struct DbWatchedCallbacks { + label: &'static str, + inner: SyncMutex, Vec)>>, } -impl ServiceCallbacks { - fn mutate(&self, f: impl FnOnce(&mut ServiceCallbackMap) -> T) -> T { - let mut this = self.0.lock().unwrap(); - f(&mut *this) +impl DbWatchedCallbacks { + pub fn new(label: &'static str) -> Self { + Self { + label, + inner: SyncMutex::new(BTreeMap::new()), + } } - pub fn gc(&self) { - self.mutate(|this| { - this.get_service_interface.retain(|_, v| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); - this.list_service_interfaces.retain(|_, v| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); - this.get_system_smtp - .retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - this.get_host_info.retain(|_, (_, v)| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); - this.get_ssl_certificate.retain(|_, (_, v)| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); - this.get_status.retain(|_, v| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); - this.get_service_manifest.retain(|_, v| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); - this.get_outbound_gateway.retain(|_, (_, v)| { - v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); - !v.is_empty() - }); - }) - } - - pub(super) fn add_get_service_interface( - &self, - package_id: PackageId, - service_interface_id: ServiceInterfaceId, - handler: CallbackHandler, - ) { - self.mutate(|this| { - this.get_service_interface - .entry((package_id, service_interface_id)) - .or_default() - .push(handler); - }) - } - - #[must_use] - pub fn get_service_interface( - &self, - id: &(PackageId, ServiceInterfaceId), - ) -> Option { - self.mutate(|this| { - Some(CallbackHandlers( - this.get_service_interface.remove(id).unwrap_or_default(), - )) - .filter(|cb| !cb.0.is_empty()) - }) - } - - pub(super) fn add_list_service_interfaces( - &self, - package_id: PackageId, - handler: CallbackHandler, - ) { - self.mutate(|this| { - this.list_service_interfaces - .entry(package_id) - .or_default() - .push(handler); - }) - } - - #[must_use] - pub fn list_service_interfaces(&self, id: &PackageId) -> Option { - self.mutate(|this| { - Some(CallbackHandlers( - this.list_service_interfaces.remove(id).unwrap_or_default(), - )) - .filter(|cb| !cb.0.is_empty()) - }) - } - - pub(super) fn add_get_system_smtp(&self, handler: CallbackHandler) { - self.mutate(|this| { - this.get_system_smtp.push(handler); - }) - } - - #[must_use] - pub fn get_system_smtp(&self) -> Option { - self.mutate(|this| { - Some(CallbackHandlers(std::mem::take(&mut this.get_system_smtp))) - .filter(|cb| !cb.0.is_empty()) - }) - } - - pub(super) fn add_get_host_info( + pub fn add( self: &Arc, - db: &TypedPatchDb, - package_id: PackageId, - host_id: HostId, + key: K, + watch: TypedDbWatch, handler: CallbackHandler, ) { - self.mutate(|this| { - this.get_host_info - .entry((package_id.clone(), host_id.clone())) + self.inner.mutate(|map| { + map.entry(key.clone()) .or_insert_with(|| { - let ptr: JsonPointer = - format!("/public/packageData/{}/hosts/{}", package_id, host_id) - .parse() - .expect("valid json pointer"); - let db = db.clone(); - let callbacks = Arc::clone(self); - let key = (package_id, host_id); + let this = Arc::clone(self); + let k = key; + let label = self.label; ( tokio::spawn(async move { - let mut sub = db.subscribe(ptr).await; - while sub.recv().await.is_some() { - if let Some(cbs) = callbacks.mutate(|this| { - this.get_host_info - .remove(&key) + let mut watch = watch.untyped(); + if watch.changed().await.is_ok() { + if let Some(cbs) = this.inner.mutate(|map| { + map.remove(&k) .map(|(_, handlers)| CallbackHandlers(handlers)) .filter(|cb| !cb.0.is_empty()) }) { - if let Err(e) = cbs.call(vector![]).await { - tracing::error!("Error in host info callback: {e}"); + let value = watch.peek_and_mark_seen().unwrap_or_default(); + if let Err(e) = cbs.call(vector![value]).await { + tracing::error!("Error in {label} callback: {e}"); tracing::debug!("{e:?}"); } } - // entry was removed when we consumed handlers, - // so stop watching — a new subscription will be - // created if the service re-registers - break; } }) .into(), @@ -198,6 +85,113 @@ impl ServiceCallbacks { }) } + pub fn gc(&self) { + self.inner.mutate(|map| { + map.retain(|_, (_, v)| { + v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); + !v.is_empty() + }); + }) + } +} + +pub struct ServiceCallbacks { + inner: SyncMutex, + get_host_info: Arc>, + get_status: Arc>, + get_service_interface: Arc>, + list_service_interfaces: Arc>, + get_system_smtp: Arc>, + get_service_manifest: Arc>, +} + +impl Default for ServiceCallbacks { + fn default() -> Self { + Self { + inner: SyncMutex::new(ServiceCallbackMap::default()), + get_host_info: Arc::new(DbWatchedCallbacks::new("host info")), + get_status: Arc::new(DbWatchedCallbacks::new("get_status")), + get_service_interface: Arc::new(DbWatchedCallbacks::new("get_service_interface")), + list_service_interfaces: Arc::new(DbWatchedCallbacks::new("list_service_interfaces")), + get_system_smtp: Arc::new(DbWatchedCallbacks::new("get_system_smtp")), + get_service_manifest: Arc::new(DbWatchedCallbacks::new("get_service_manifest")), + } + } +} + +#[derive(Default)] +struct ServiceCallbackMap { + get_ssl_certificate: EqMap< + (BTreeSet, FullchainCertData, Algorithm), + (NonDetachingJoinHandle<()>, Vec), + >, + get_container_ip: BTreeMap>, + get_outbound_gateway: BTreeMap, Vec)>, +} + +impl ServiceCallbacks { + fn mutate(&self, f: impl FnOnce(&mut ServiceCallbackMap) -> T) -> T { + self.inner.mutate(f) + } + + pub fn gc(&self) { + self.mutate(|this| { + this.get_ssl_certificate.retain(|_, (_, v)| { + v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); + !v.is_empty() + }); + this.get_outbound_gateway.retain(|_, (_, v)| { + v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); + !v.is_empty() + }); + }); + self.get_host_info.gc(); + self.get_status.gc(); + self.get_service_interface.gc(); + self.list_service_interfaces.gc(); + self.get_system_smtp.gc(); + self.get_service_manifest.gc(); + } + + pub(super) fn add_get_service_interface( + &self, + package_id: PackageId, + service_interface_id: ServiceInterfaceId, + watch: TypedDbWatch, + handler: CallbackHandler, + ) { + self.get_service_interface + .add((package_id, service_interface_id), watch, handler); + } + + pub(super) fn add_list_service_interfaces( + &self, + package_id: PackageId, + watch: TypedDbWatch, + handler: CallbackHandler, + ) { + self.list_service_interfaces.add(package_id, watch, handler); + } + + pub(super) fn add_get_system_smtp( + &self, + watch: TypedDbWatch, + handler: CallbackHandler, + ) { + self.get_system_smtp.add((), watch, handler); + } + + pub(super) fn add_get_host_info( + &self, + package_id: PackageId, + host_id: HostId, + watch: TypedDbWatch, + handler: CallbackHandler, + ) { + self.get_host_info + .add((package_id, host_id), watch, handler); + } + pub(super) fn add_get_ssl_certificate( &self, ctx: EffectContext, @@ -256,19 +250,14 @@ impl ServiceCallbacks { .push(handler); }) } - pub(super) fn add_get_status(&self, package_id: PackageId, handler: CallbackHandler) { - self.mutate(|this| this.get_status.entry(package_id).or_default().push(handler)) - } - #[must_use] - pub fn get_status(&self, package_id: &PackageId) -> Option { - self.mutate(|this| { - if let Some(watched) = this.get_status.remove(package_id) { - Some(CallbackHandlers(watched)) - } else { - None - } - .filter(|cb| !cb.0.is_empty()) - }) + + pub(super) fn add_get_status( + &self, + package_id: PackageId, + watch: TypedDbWatch, + handler: CallbackHandler, + ) { + self.get_status.add(package_id, watch, handler); } pub(super) fn add_get_container_ip(&self, package_id: PackageId, handler: CallbackHandler) { @@ -345,23 +334,13 @@ impl ServiceCallbacks { }) } - pub(super) fn add_get_service_manifest(&self, package_id: PackageId, handler: CallbackHandler) { - self.mutate(|this| { - this.get_service_manifest - .entry(package_id) - .or_default() - .push(handler) - }) - } - - #[must_use] - pub fn get_service_manifest(&self, package_id: &PackageId) -> Option { - self.mutate(|this| { - this.get_service_manifest - .remove(package_id) - .map(CallbackHandlers) - .filter(|cb| !cb.0.is_empty()) - }) + pub(super) fn add_get_service_manifest( + &self, + package_id: PackageId, + watch: TypedDbWatch, + handler: CallbackHandler, + ) { + self.get_service_manifest.add(package_id, watch, handler); } } diff --git a/core/src/service/effects/control.rs b/core/src/service/effects/control.rs index 88931812f..4333cf907 100644 --- a/core/src/service/effects/control.rs +++ b/core/src/service/effects/control.rs @@ -80,27 +80,32 @@ pub async fn get_status( package_id, callback, }: GetStatusParams, -) -> Result { +) -> Result, Error> { let context = context.deref()?; let id = package_id.unwrap_or_else(|| context.seed.id.clone()); - let db = context.seed.ctx.db.peek().await; + + let ptr = format!("/public/packageData/{}/statusInfo", id) + .parse() + .expect("valid json pointer"); + let mut watch = context + .seed + .ctx + .db + .watch(ptr) + .await + .typed::(); + + let status = watch.peek_and_mark_seen()?.de().ok(); if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context.seed.ctx.callbacks.add_get_status( id.clone(), + watch, super::callbacks::CallbackHandler::new(&context, callback), ); } - let status = db - .as_public() - .as_package_data() - .as_idx(&id) - .or_not_found(&id)? - .as_status_info() - .de()?; - Ok(status) } @@ -158,7 +163,7 @@ pub async fn set_main_status( if prev.is_none() && status == SetMainStatusStatus::Running { s.as_desired_mut().map_mutate(|s| { Ok(match s { - DesiredStatus::Restarting => DesiredStatus::Running, + DesiredStatus::Restarting { .. } => DesiredStatus::Running, x => x, }) })?; diff --git a/core/src/service/effects/dependency.rs b/core/src/service/effects/dependency.rs index 7cf233452..cf50c8674 100644 --- a/core/src/service/effects/dependency.rs +++ b/core/src/service/effects/dependency.rs @@ -399,27 +399,38 @@ pub async fn get_service_manifest( callback, }: GetServiceManifestParams, ) -> Result { + use crate::db::model::package::PackageState; + let context = context.deref()?; + let ptr = format!("/public/packageData/{}/stateInfo", package_id) + .parse() + .expect("valid json pointer"); + let mut watch = context + .seed + .ctx + .db + .watch(ptr) + .await + .typed::(); + + let manifest = watch + .peek_and_mark_seen()? + .as_manifest(ManifestPreference::Old) + .de()?; + if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context .seed .ctx .callbacks - .add_get_service_manifest(package_id.clone(), CallbackHandler::new(&context, callback)); + .add_get_service_manifest( + package_id.clone(), + watch, + CallbackHandler::new(&context, callback), + ); } - let db = context.seed.ctx.db.peek().await; - - let manifest = db - .as_public() - .as_package_data() - .as_idx(&package_id) - .or_not_found(&package_id)? - .as_state_info() - .as_manifest(ManifestPreference::New) - .de()?; - Ok(manifest) } diff --git a/core/src/service/effects/net/host.rs b/core/src/service/effects/net/host.rs index a20fcf189..193826aac 100644 --- a/core/src/service/effects/net/host.rs +++ b/core/src/service/effects/net/host.rs @@ -23,26 +23,30 @@ pub async fn get_host_info( }: GetHostInfoParams, ) -> Result, Error> { let context = context.deref()?; - let db = context.seed.ctx.db.peek().await; let package_id = package_id.unwrap_or_else(|| context.seed.id.clone()); + let ptr = format!("/public/packageData/{}/hosts/{}", package_id, host_id) + .parse() + .expect("valid json pointer"); + let mut watch = context + .seed + .ctx + .db + .watch(ptr) + .await + .typed::(); + + let res = watch.peek_and_mark_seen()?.de().ok(); + if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context.seed.ctx.callbacks.add_get_host_info( - &context.seed.ctx.db, package_id.clone(), host_id.clone(), + watch, CallbackHandler::new(&context, callback), ); } - let res = db - .as_public() - .as_package_data() - .as_idx(&package_id) - .and_then(|m| m.as_hosts().as_idx(&host_id)) - .map(|m| m.de()) - .transpose()?; - Ok(res) } diff --git a/core/src/service/effects/net/interface.rs b/core/src/service/effects/net/interface.rs index ff0452976..0716efffc 100644 --- a/core/src/service/effects/net/interface.rs +++ b/core/src/service/effects/net/interface.rs @@ -1,7 +1,5 @@ use std::collections::BTreeMap; -use imbl::vector; - use crate::net::service_interface::{AddressInfo, ServiceInterface, ServiceInterfaceType}; use crate::service::effects::callbacks::CallbackHandler; use crate::service::effects::prelude::*; @@ -42,7 +40,7 @@ pub async fn export_service_interface( interface_type: r#type, }; - let res = context + context .seed .ctx .db @@ -56,27 +54,8 @@ pub async fn export_service_interface( ifaces.insert(&id, &service_interface)?; Ok(()) }) - .await; - res.result?; - - if res.revision.is_some() { - if let Some(callbacks) = context - .seed - .ctx - .callbacks - .get_service_interface(&(package_id.clone(), id)) - { - callbacks.call(vector![]).await?; - } - if let Some(callbacks) = context - .seed - .ctx - .callbacks - .list_service_interfaces(&package_id) - { - callbacks.call(vector![]).await?; - } - } + .await + .result?; Ok(()) } @@ -101,26 +80,34 @@ pub async fn get_service_interface( ) -> Result, Error> { let context = context.deref()?; let package_id = package_id.unwrap_or_else(|| context.seed.id.clone()); - let db = context.seed.ctx.db.peek().await; + + let ptr = format!( + "/public/packageData/{}/serviceInterfaces/{}", + package_id, service_interface_id + ) + .parse() + .expect("valid json pointer"); + let mut watch = context + .seed + .ctx + .db + .watch(ptr) + .await + .typed::(); + + let res = watch.peek_and_mark_seen()?.de().ok(); if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context.seed.ctx.callbacks.add_get_service_interface( package_id.clone(), service_interface_id.clone(), + watch, CallbackHandler::new(&context, callback), ); } - let interface = db - .as_public() - .as_package_data() - .as_idx(&package_id) - .and_then(|m| m.as_service_interfaces().as_idx(&service_interface_id)) - .map(|m| m.de()) - .transpose()?; - - Ok(interface) + Ok(res) } #[derive(Debug, Clone, Serialize, Deserialize, TS)] @@ -142,27 +129,23 @@ pub async fn list_service_interfaces( let context = context.deref()?; let package_id = package_id.unwrap_or_else(|| context.seed.id.clone()); + let ptr = format!("/public/packageData/{}/serviceInterfaces", package_id) + .parse() + .expect("valid json pointer"); + let mut watch = context.seed.ctx.db.watch(ptr).await; + + let res = imbl_value::from_value(watch.peek_and_mark_seen()?) + .unwrap_or_default(); + if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context.seed.ctx.callbacks.add_list_service_interfaces( package_id.clone(), + watch.typed::>(), CallbackHandler::new(&context, callback), ); } - let res = context - .seed - .ctx - .db - .peek() - .await - .into_public() - .into_package_data() - .into_idx(&package_id) - .map(|m| m.into_service_interfaces().de()) - .transpose()? - .unwrap_or_default(); - Ok(res) } @@ -180,52 +163,22 @@ pub async fn clear_service_interfaces( let context = context.deref()?; let package_id = context.seed.id.clone(); - let res = context + context .seed .ctx .db .mutate(|db| { - let mut removed = Vec::new(); db.as_public_mut() .as_package_data_mut() .as_idx_mut(&package_id) .or_not_found(&package_id)? .as_service_interfaces_mut() .mutate(|s| { - Ok(s.retain(|id, _| { - if except.contains(id) { - true - } else { - removed.push(id.clone()); - false - } - })) - })?; - Ok(removed) + Ok(s.retain(|id, _| except.contains(id))) + }) }) - .await; - let removed = res.result?; - - if res.revision.is_some() { - for id in removed { - if let Some(callbacks) = context - .seed - .ctx - .callbacks - .get_service_interface(&(package_id.clone(), id)) - { - callbacks.call(vector![]).await?; - } - } - if let Some(callbacks) = context - .seed - .ctx - .callbacks - .list_service_interfaces(&package_id) - { - callbacks.call(vector![]).await?; - } - } + .await + .result?; Ok(()) } diff --git a/core/src/service/effects/system.rs b/core/src/service/effects/system.rs index abf6f36ad..e6a9afcba 100644 --- a/core/src/service/effects/system.rs +++ b/core/src/service/effects/system.rs @@ -16,25 +16,25 @@ pub async fn get_system_smtp( ) -> Result, Error> { let context = context.deref()?; + let ptr = "/public/serverInfo/smtp" + .parse() + .expect("valid json pointer"); + let mut watch = context.seed.ctx.db.watch(ptr).await; + + let res = imbl_value::from_value(watch.peek_and_mark_seen()?) + .with_kind(ErrorKind::Deserialization)?; + if let Some(callback) = callback { let callback = callback.register(&context.seed.persistent_container); context .seed .ctx .callbacks - .add_get_system_smtp(CallbackHandler::new(&context, callback)); + .add_get_system_smtp( + watch.typed::>(), + CallbackHandler::new(&context, callback), + ); } - let res = context - .seed - .ctx - .db - .peek() - .await - .into_public() - .into_server_info() - .into_smtp() - .de()?; - Ok(res) } diff --git a/core/src/service/effects/version.rs b/core/src/service/effects/version.rs index 185e1f629..7b82e060c 100644 --- a/core/src/service/effects/version.rs +++ b/core/src/service/effects/version.rs @@ -2,7 +2,7 @@ use std::path::Path; use crate::DATA_DIR; use crate::service::effects::prelude::*; -use crate::util::io::{delete_file, maybe_read_file_to_string, write_file_atomic}; +use crate::util::io::{delete_file, write_file_atomic}; use crate::volume::PKG_VOLUME_DIR; #[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)] @@ -36,11 +36,5 @@ pub async fn set_data_version( #[instrument(skip_all)] pub async fn get_data_version(context: EffectContext) -> Result, Error> { let context = context.deref()?; - let package_id = &context.seed.id; - let path = Path::new(DATA_DIR) - .join(PKG_VOLUME_DIR) - .join(package_id) - .join("data") - .join(".version"); - maybe_read_file_to_string(path).await + crate::service::get_data_version(&context.seed.id).await } diff --git a/core/src/service/mod.rs b/core/src/service/mod.rs index f17f2d266..e490130bd 100644 --- a/core/src/service/mod.rs +++ b/core/src/service/mod.rs @@ -46,12 +46,14 @@ use crate::service::uninstall::cleanup; use crate::util::Never; use crate::util::actor::concurrent::ConcurrentActor; use crate::util::future::NonDetachingJoinHandle; -use crate::util::io::{AsyncReadStream, AtomicFile, TermSize, delete_file}; +use crate::util::io::{ + AsyncReadStream, AtomicFile, TermSize, delete_file, maybe_read_file_to_string, +}; use crate::util::net::WebSocket; use crate::util::serde::Pem; use crate::util::sync::SyncMutex; use crate::util::tui::choose; -use crate::volume::data_dir; +use crate::volume::{PKG_VOLUME_DIR, data_dir}; use crate::{ActionId, CAP_1_KiB, DATA_DIR, ImageId, PackageId}; pub mod action; @@ -81,6 +83,17 @@ pub enum LoadDisposition { Undo, } +/// Read the data version file for a service from disk. +/// Returns `Ok(None)` if the file does not exist (fresh install). +pub async fn get_data_version(id: &PackageId) -> Result, Error> { + let path = Path::new(DATA_DIR) + .join(PKG_VOLUME_DIR) + .join(id) + .join("data") + .join(".version"); + maybe_read_file_to_string(&path).await +} + struct RootCommand(pub String); #[derive(Clone, Debug, Serialize, Deserialize, Default, TS)] @@ -390,12 +403,17 @@ impl Service { tracing::error!("Error opening s9pk for install: {e}"); tracing::debug!("{e:?}") }) { + let init_kind = if get_data_version(id).await.ok().flatten().is_some() { + InitKind::Update + } else { + InitKind::Install + }; if let Ok(service) = Self::install( ctx.clone(), s9pk, &s9pk_path, &None, - InitKind::Install, + init_kind, None::, None, ) @@ -404,11 +422,15 @@ impl Service { tracing::error!("Error installing service: {e}"); tracing::debug!("{e:?}") }) { + crate::volume::remove_install_backup(id).await.log_err(); return Ok(Some(service)); } } } cleanup(ctx, id, false).await.log_err(); + crate::volume::restore_volumes_from_install_backup(id) + .await + .log_err(); ctx.db .mutate(|v| v.as_public_mut().as_package_data_mut().remove(id)) .await @@ -424,12 +446,17 @@ impl Service { tracing::error!("Error opening s9pk for update: {e}"); tracing::debug!("{e:?}") }) { + let init_kind = if get_data_version(id).await.ok().flatten().is_some() { + InitKind::Update + } else { + InitKind::Install + }; if let Ok(service) = Self::install( ctx.clone(), s9pk, &s9pk_path, &None, - InitKind::Update, + init_kind, None::, None, ) @@ -438,37 +465,60 @@ impl Service { tracing::error!("Error installing service: {e}"); tracing::debug!("{e:?}") }) { + crate::volume::remove_install_backup(id).await.log_err(); return Ok(Some(service)); } } } - let s9pk = S9pk::open(s9pk_path, Some(id)).await?; - ctx.db - .mutate({ - |db| { - db.as_public_mut() - .as_package_data_mut() - .as_idx_mut(id) - .or_not_found(id)? - .as_state_info_mut() - .map_mutate(|s| { - if let PackageState::Updating(UpdatingState { - manifest, .. - }) = s - { - Ok(PackageState::Installed(InstalledState { manifest })) - } else { - Err(Error::new( - eyre!("{}", t!("service.mod.race-condition-detected")), - ErrorKind::Database, - )) - } - }) - } - }) - .await - .result?; - handle_installed(s9pk).await + match async { + let s9pk = S9pk::open(s9pk_path, Some(id)).await?; + ctx.db + .mutate({ + |db| { + db.as_public_mut() + .as_package_data_mut() + .as_idx_mut(id) + .or_not_found(id)? + .as_state_info_mut() + .map_mutate(|s| { + if let PackageState::Updating(UpdatingState { + manifest, + .. + }) = s + { + Ok(PackageState::Installed(InstalledState { manifest })) + } else { + Err(Error::new( + eyre!( + "{}", + t!("service.mod.race-condition-detected") + ), + ErrorKind::Database, + )) + } + }) + } + }) + .await + .result?; + handle_installed(s9pk).await + } + .await + { + Ok(service) => { + crate::volume::remove_install_backup(id).await.log_err(); + Ok(service) + } + Err(e) => { + tracing::error!( + "Update rollback failed for {id}, restoring volume snapshot: {e}" + ); + crate::volume::restore_volumes_from_install_backup(id) + .await + .log_err(); + Err(e) + } + } } PackageStateMatchModelRef::Removing(_) | PackageStateMatchModelRef::Restoring(_) => { if let Ok(s9pk) = S9pk::open(s9pk_path, Some(id)).await.map_err(|e| { @@ -617,17 +667,6 @@ impl Service { tokio::task::yield_now().await; } - // Trigger manifest callbacks after successful installation - let manifest = service.seed.persistent_container.s9pk.as_manifest(); - if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) { - let manifest_value = - serde_json::to_value(manifest).with_kind(ErrorKind::Serialization)?; - callbacks - .call(imbl::vector![manifest_value.into()]) - .await - .log_err(); - } - Ok(service) } diff --git a/core/src/service/rpc.rs b/core/src/service/rpc.rs index b5c8ed01c..94c15f42f 100644 --- a/core/src/service/rpc.rs +++ b/core/src/service/rpc.rs @@ -107,6 +107,12 @@ impl ExitParams { target: Some(InternedString::from_display(range)), } } + pub fn target_str(s: &str) -> Self { + Self { + id: Guid::new(), + target: Some(InternedString::intern(s)), + } + } pub fn uninstall() -> Self { Self { id: Guid::new(), diff --git a/core/src/service/service_actor.rs b/core/src/service/service_actor.rs index 4fec11a08..cac038cb3 100644 --- a/core/src/service/service_actor.rs +++ b/core/src/service/service_actor.rs @@ -1,7 +1,6 @@ use std::sync::Arc; use std::time::Duration; -use imbl::vector; use patch_db::TypedDbWatch; use super::ServiceActorSeed; @@ -99,19 +98,12 @@ async fn service_actor_loop<'a>( seed: &'a Arc, transition: &mut Option>, ) -> Result<(), Error> { - let id = &seed.id; let status_model = watch.peek_and_mark_seen()?; let status = status_model.de()?; - if let Some(callbacks) = seed.ctx.callbacks.get_status(id) { - callbacks - .call(vector![patch_db::ModelExt::into_value(status_model)]) - .await?; - } - match status { StatusInfo { - desired: DesiredStatus::Running | DesiredStatus::Restarting, + desired: DesiredStatus::Running | DesiredStatus::Restarting { .. }, started: None, .. } => { @@ -122,7 +114,7 @@ async fn service_actor_loop<'a>( } StatusInfo { desired: - DesiredStatus::Stopped | DesiredStatus::Restarting | DesiredStatus::BackingUp { .. }, + DesiredStatus::Stopped | DesiredStatus::Restarting { .. } | DesiredStatus::BackingUp { .. }, started: Some(_), .. } => { diff --git a/core/src/service/service_map.rs b/core/src/service/service_map.rs index 697578a7c..7c03caa0b 100644 --- a/core/src/service/service_map.rs +++ b/core/src/service/service_map.rs @@ -28,7 +28,7 @@ use crate::s9pk::S9pk; use crate::s9pk::manifest::PackageId; use crate::s9pk::merkle_archive::source::FileSource; use crate::service::rpc::{ExitParams, InitKind}; -use crate::service::{LoadDisposition, Service, ServiceRef}; +use crate::service::{LoadDisposition, Service, ServiceRef, get_data_version}; use crate::sign::commitment::merkle_archive::MerkleArchiveCommitment; use crate::status::{DesiredStatus, StatusInfo}; use crate::util::future::NonDetachingJoinHandle; @@ -243,12 +243,7 @@ impl ServiceMap { PackageState::Installing(installing) }, s9pk: installed_path, - status_info: StatusInfo { - error: None, - health: BTreeMap::new(), - started: None, - desired: DesiredStatus::Stopped, - }, + status_info: StatusInfo::default(), registry, developer_key: Pem::new(developer_key), icon, @@ -299,10 +294,11 @@ impl ServiceMap { s9pk.serialize(&mut progress_writer, true).await?; let (file, mut unpack_progress) = progress_writer.into_inner(); file.sync_all().await?; - unpack_progress.complete(); crate::util::io::rename(&download_path, &installed_path).await?; + unpack_progress.complete(); + Ok::<_, Error>(sync_progress_task) }) .await?; @@ -310,36 +306,52 @@ impl ServiceMap { .handle_last(async move { finalization_progress.start(); let s9pk = S9pk::open(&installed_path, Some(&id)).await?; + let data_version = get_data_version(&id).await?; + // Snapshot existing volumes before install/update modifies them + crate::volume::snapshot_volumes_for_install(&id).await?; let prev = if let Some(service) = service.take() { ensure_code!( recovery_source.is_none(), ErrorKind::InvalidRequest, "cannot restore over existing package" ); - let prev_version = service - .seed - .persistent_container - .s9pk - .as_manifest() - .version - .clone(); - let prev_can_migrate_to = &service - .seed - .persistent_container - .s9pk - .as_manifest() - .can_migrate_to; - let next_version = &s9pk.as_manifest().version; - let next_can_migrate_from = &s9pk.as_manifest().can_migrate_from; - let uninit = if prev_version.satisfies(next_can_migrate_from) { - ExitParams::target_version(&*prev_version) - } else if next_version.satisfies(prev_can_migrate_to) { - ExitParams::target_version(&s9pk.as_manifest().version) + let uninit = if let Some(ref data_ver) = data_version { + let prev_can_migrate_to = &service + .seed + .persistent_container + .s9pk + .as_manifest() + .can_migrate_to; + let next_version = &s9pk.as_manifest().version; + let next_can_migrate_from = &s9pk.as_manifest().can_migrate_from; + if let Ok(data_ver_ev) = data_ver.parse::() { + if data_ver_ev.satisfies(next_can_migrate_from) { + ExitParams::target_str(data_ver) + } else if next_version.satisfies(prev_can_migrate_to) { + ExitParams::target_version(&s9pk.as_manifest().version) + } else { + ExitParams::target_range(&VersionRange::and( + prev_can_migrate_to.clone(), + next_can_migrate_from.clone(), + )) + } + } else if let Ok(data_ver_range) = data_ver.parse::() { + ExitParams::target_range(&VersionRange::and( + data_ver_range, + next_can_migrate_from.clone(), + )) + } else if next_version.satisfies(prev_can_migrate_to) { + ExitParams::target_version(&s9pk.as_manifest().version) + } else { + ExitParams::target_range(&VersionRange::and( + prev_can_migrate_to.clone(), + next_can_migrate_from.clone(), + )) + } } else { - ExitParams::target_range(&VersionRange::and( - prev_can_migrate_to.clone(), - next_can_migrate_from.clone(), - )) + ExitParams::target_version( + &*service.seed.persistent_container.s9pk.as_manifest().version, + ) }; let cleanup = service.uninstall(uninit, false, false).await?; progress.complete(); @@ -354,7 +366,7 @@ impl ServiceMap { ®istry, if recovery_source.is_some() { InitKind::Restore - } else if prev.is_some() { + } else if data_version.is_some() { InitKind::Update } else { InitKind::Install @@ -372,6 +384,8 @@ impl ServiceMap { cleanup.await?; } + crate::volume::remove_install_backup(&id).await.log_err(); + drop(service); sync_progress_task.await.map_err(|_| { diff --git a/core/src/service/uninstall.rs b/core/src/service/uninstall.rs index 2f6515024..8f4bd8ad1 100644 --- a/core/src/service/uninstall.rs +++ b/core/src/service/uninstall.rs @@ -1,8 +1,6 @@ use std::collections::BTreeSet; use std::path::Path; -use imbl::vector; - use crate::context::RpcContext; use crate::db::model::package::{InstalledState, InstallingInfo, InstallingState, PackageState}; use crate::net::host::all_hosts; @@ -94,20 +92,13 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(), )); } }; - // Trigger manifest callbacks with null to indicate uninstall - if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) { - callbacks.call(vector![Value::Null]).await.log_err(); - } - if !soft { let path = Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(&manifest.id); - if tokio::fs::metadata(&path).await.is_ok() { - tokio::fs::remove_dir_all(&path).await?; - } - let logs_dir = Path::new(PACKAGE_DATA).join("logs").join(&manifest.id); - if tokio::fs::metadata(&logs_dir).await.is_ok() { - #[cfg(not(feature = "dev"))] - tokio::fs::remove_dir_all(&logs_dir).await?; + crate::util::io::delete_dir(&path).await?; + #[cfg(not(feature = "dev"))] + { + let logs_dir = Path::new(PACKAGE_DATA).join("logs").join(&manifest.id); + crate::util::io::delete_dir(&logs_dir).await?; } } }, diff --git a/core/src/setup.rs b/core/src/setup.rs index 2166cfc08..e6aa68212 100644 --- a/core/src/setup.rs +++ b/core/src/setup.rs @@ -95,8 +95,8 @@ const LIVE_MEDIUM_PATH: &str = "/run/live/medium"; pub async fn list_disks(ctx: SetupContext) -> Result, Error> { let mut disks = crate::disk::util::list( - &ctx.config - .peek(|c| c.os_partitions.clone()) + &crate::disk::OsPartitionInfo::from_fstab() + .await .unwrap_or_default(), ) .await?; @@ -115,7 +115,7 @@ pub async fn list_disks(ctx: SetupContext) -> Result, Error> { async fn setup_init( ctx: &SetupContext, password: Option, - kiosk: Option, + kiosk: bool, hostname: Option, init_phases: InitPhases, ) -> Result<(AccountInfo, InitResult), Error> { @@ -137,9 +137,8 @@ async fn setup_init( account.save(m)?; let info = m.as_public_mut().as_server_info_mut(); info.as_password_hash_mut().ser(&account.password)?; - if let Some(kiosk) = kiosk { - info.as_kiosk_mut().ser(&Some(kiosk))?; - } + info.as_kiosk_mut() + .ser(&Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"))?; if let Some(language) = language.clone() { info.as_language_mut().ser(&Some(language))?; } @@ -174,8 +173,7 @@ async fn setup_init( pub struct AttachParams { pub password: Option, pub guid: InternedString, - #[ts(optional)] - pub kiosk: Option, + pub kiosk: bool, } #[instrument(skip_all)] @@ -279,6 +277,7 @@ pub enum SetupStatusRes { pub struct SetupInfo { pub guid: Option, pub attach: bool, + pub mok_enrolled: bool, } #[derive(Debug, Deserialize, Serialize, TS)] @@ -410,8 +409,7 @@ pub struct SetupExecuteParams { guid: InternedString, password: Option, recovery_source: Option>, - #[ts(optional)] - kiosk: Option, + kiosk: bool, name: Option, hostname: Option, } @@ -548,7 +546,7 @@ pub async fn execute_inner( guid: InternedString, password: Option, recovery_source: Option>, - kiosk: Option, + kiosk: bool, hostname: Option, ) -> Result<(SetupResult, RpcContext), Error> { let progress = &ctx.progress; @@ -621,7 +619,7 @@ async fn fresh_setup( ctx: &SetupContext, guid: InternedString, password: &str, - kiosk: Option, + kiosk: bool, hostname: Option, SetupExecuteProgress { init_phases, @@ -630,8 +628,8 @@ async fn fresh_setup( }: SetupExecuteProgress, ) -> Result<(SetupResult, RpcContext), Error> { let account = AccountInfo::new(password, root_ca_start_time().await, hostname)?; + let db = ctx.db().await?; - let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi"); sync_kiosk(kiosk).await?; let language = ctx.language.peek(|a| a.clone()); @@ -682,7 +680,7 @@ async fn recover( recovery_source: BackupTargetFS, server_id: String, recovery_password: String, - kiosk: Option, + kiosk: bool, hostname: Option, progress: SetupExecuteProgress, ) -> Result<(SetupResult, RpcContext), Error> { @@ -707,7 +705,7 @@ async fn migrate( guid: InternedString, old_guid: &str, password: Option, - kiosk: Option, + kiosk: bool, hostname: Option, SetupExecuteProgress { init_phases, @@ -738,9 +736,7 @@ async fn migrate( ); let tmpdir = Path::new(package_data_transfer_args.0).join("tmp"); - if tokio::fs::metadata(&tmpdir).await.is_ok() { - tokio::fs::remove_dir_all(&tmpdir).await?; - } + crate::util::io::delete_dir(&tmpdir).await?; let ordering = std::sync::atomic::Ordering::Relaxed; diff --git a/core/src/status/mod.rs b/core/src/status/mod.rs index ea4d0da98..a9e23cdb2 100644 --- a/core/src/status/mod.rs +++ b/core/src/status/mod.rs @@ -38,7 +38,17 @@ impl Model { .map_mutate(|s| Ok(Some(s.unwrap_or_else(|| Utc::now()))))?; self.as_desired_mut().map_mutate(|s| { Ok(match s { - DesiredStatus::Restarting => DesiredStatus::Running, + DesiredStatus::Restarting { + restart_again: true, + } => { + // Clear the flag but stay Restarting so actor will stop→start again + DesiredStatus::Restarting { + restart_again: false, + } + } + DesiredStatus::Restarting { + restart_again: false, + } => DesiredStatus::Running, a => a, }) })?; @@ -55,7 +65,9 @@ impl Model { Ok(()) } pub fn restart(&mut self) -> Result<(), Error> { - self.as_desired_mut().map_mutate(|s| Ok(s.restart()))?; + let started = self.as_started().transpose_ref().is_some(); + self.as_desired_mut() + .map_mutate(|s| Ok(s.restart(started)))?; self.as_health_mut().ser(&Default::default())?; Ok(()) } @@ -69,7 +81,7 @@ impl Model { DesiredStatus::BackingUp { on_complete: StartStop::Stop, } => DesiredStatus::Stopped, - DesiredStatus::Restarting => DesiredStatus::Running, + DesiredStatus::Restarting { .. } => DesiredStatus::Running, x => x, }) })?; @@ -84,9 +96,14 @@ impl Model { #[serde(rename_all_fields = "camelCase")] pub enum DesiredStatus { Stopped, - Restarting, + Restarting { + #[serde(default)] + restart_again: bool, + }, Running, - BackingUp { on_complete: StartStop }, + BackingUp { + on_complete: StartStop, + }, } impl Default for DesiredStatus { fn default() -> Self { @@ -97,7 +114,7 @@ impl DesiredStatus { pub fn running(&self) -> bool { match self { Self::Running - | Self::Restarting + | Self::Restarting { .. } | Self::BackingUp { on_complete: StartStop::Start, } => true, @@ -140,10 +157,15 @@ impl DesiredStatus { } } - pub fn restart(&self) -> Self { + pub fn restart(&self, started: bool) -> Self { match self { - Self::Running => Self::Restarting, - x => *x, // no-op: restart is meaningless in any other state + Self::Running => Self::Restarting { + restart_again: false, + }, + Self::Restarting { .. } if !started => Self::Restarting { + restart_again: true, + }, + x => *x, } } } diff --git a/core/src/system/mod.rs b/core/src/system/mod.rs index b0570379b..4bb404e8b 100644 --- a/core/src/system/mod.rs +++ b/core/src/system/mod.rs @@ -6,7 +6,6 @@ use chrono::Utc; use clap::Parser; use color_eyre::eyre::eyre; use futures::FutureExt; -use imbl::vector; use imbl_value::InternedString; use rpc_toolkit::{Context, Empty, HandlerExt, ParentHandler, from_fn_async}; use serde::{Deserialize, Deserializer, Serialize, Serializer}; @@ -319,13 +318,11 @@ pub fn kernel_logs>() -> ParentHandler) -> Result<(), Error> { - if let Some(kiosk) = kiosk { - if kiosk { - enable_kiosk().await?; - } else { - disable_kiosk().await?; - } +pub async fn sync_kiosk(kiosk: bool) -> Result<(), Error> { + if kiosk { + enable_kiosk().await?; + } else { + disable_kiosk().await?; } Ok(()) } @@ -1150,9 +1147,6 @@ pub async fn set_system_smtp(ctx: RpcContext, smtp: SmtpValue) -> Result<(), Err }) .await .result?; - if let Some(callbacks) = ctx.callbacks.get_system_smtp() { - callbacks.call(vector![to_value(&smtp)?]).await?; - } Ok(()) } pub async fn clear_system_smtp(ctx: RpcContext) -> Result<(), Error> { @@ -1165,28 +1159,25 @@ pub async fn clear_system_smtp(ctx: RpcContext) -> Result<(), Error> { }) .await .result?; - if let Some(callbacks) = ctx.callbacks.get_system_smtp() { - callbacks.call(vector![Value::Null]).await?; - } Ok(()) } #[derive(Debug, Clone, Deserialize, Serialize, Parser)] -pub struct SetIfconfigUrlParams { - #[arg(help = "help.arg.ifconfig-url")] - pub url: url::Url, +pub struct SetEchoipUrlsParams { + #[arg(help = "help.arg.echoip-urls")] + pub urls: Vec, } -pub async fn set_ifconfig_url( +pub async fn set_echoip_urls( ctx: RpcContext, - SetIfconfigUrlParams { url }: SetIfconfigUrlParams, + SetEchoipUrlsParams { urls }: SetEchoipUrlsParams, ) -> Result<(), Error> { ctx.db .mutate(|db| { db.as_public_mut() .as_server_info_mut() - .as_ifconfig_url_mut() - .ser(&url) + .as_echoip_urls_mut() + .ser(&urls) }) .await .result diff --git a/core/src/tunnel/api.rs b/core/src/tunnel/api.rs index 10c2f21c2..51fff1714 100644 --- a/core/src/tunnel/api.rs +++ b/core/src/tunnel/api.rs @@ -5,12 +5,14 @@ use imbl_value::InternedString; use ipnet::Ipv4Net; use rpc_toolkit::{Context, Empty, HandlerArgs, HandlerExt, ParentHandler, from_fn_async}; use serde::{Deserialize, Serialize}; +use ts_rs::TS; use crate::context::CliContext; use crate::db::model::public::NetworkInterfaceType; use crate::net::forward::add_iptables_rule; use crate::prelude::*; use crate::tunnel::context::TunnelContext; +use crate::tunnel::db::PortForwardEntry; use crate::tunnel::wg::{WIREGUARD_INTERFACE_NAME, WgConfig, WgSubnetClients, WgSubnetConfig}; use crate::util::serde::{HandlerExtSerde, display_serializable}; @@ -51,6 +53,22 @@ pub fn tunnel_api() -> ParentHandler { .no_display() .with_about("about.remove-port-forward") .with_call_remote::(), + ) + .subcommand( + "update-label", + from_fn_async(update_forward_label) + .with_metadata("sync_db", Value::Bool(true)) + .no_display() + .with_about("about.update-port-forward-label") + .with_call_remote::(), + ) + .subcommand( + "set-enabled", + from_fn_async(set_forward_enabled) + .with_metadata("sync_db", Value::Bool(true)) + .no_display() + .with_about("about.enable-or-disable-port-forward") + .with_call_remote::(), ), ) .subcommand( @@ -73,9 +91,10 @@ pub fn tunnel_api() -> ParentHandler { ) } -#[derive(Deserialize, Serialize, Parser)] +#[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] pub struct SubnetParams { + #[ts(type = "string")] subnet: Ipv4Net, } @@ -151,7 +170,7 @@ pub fn device_api() -> ParentHandler { ) } -#[derive(Deserialize, Serialize, Parser)] +#[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] pub struct AddSubnetParams { name: InternedString, @@ -276,11 +295,13 @@ pub async fn remove_subnet( Ok(()) } -#[derive(Deserialize, Serialize, Parser)] +#[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] pub struct AddDeviceParams { + #[ts(type = "string")] subnet: Ipv4Net, name: InternedString, + #[ts(type = "string | null")] ip: Option, } @@ -337,10 +358,12 @@ pub async fn add_device( server.sync().await } -#[derive(Deserialize, Serialize, Parser)] +#[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] pub struct RemoveDeviceParams { + #[ts(type = "string")] subnet: Ipv4Net, + #[ts(type = "string")] ip: Ipv4Addr, } @@ -366,9 +389,10 @@ pub async fn remove_device( ctx.gc_forwards(&keep).await } -#[derive(Deserialize, Serialize, Parser)] +#[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] pub struct ListDevicesParams { + #[ts(type = "string")] subnet: Ipv4Net, } @@ -386,14 +410,18 @@ pub async fn list_devices( .de() } -#[derive(Deserialize, Serialize, Parser)] +#[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] pub struct ShowConfigParams { + #[ts(type = "string")] subnet: Ipv4Net, + #[ts(type = "string")] ip: Ipv4Addr, + #[ts(type = "string | null")] wan_addr: Option, #[serde(rename = "__ConnectInfo_local_addr")] #[arg(skip)] + #[ts(skip)] local_addr: Option, } @@ -448,16 +476,24 @@ pub async fn show_config( .to_string()) } -#[derive(Deserialize, Serialize, Parser)] +#[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] pub struct AddPortForwardParams { + #[ts(type = "string")] source: SocketAddrV4, + #[ts(type = "string")] target: SocketAddrV4, + #[arg(long)] + label: Option, } pub async fn add_forward( ctx: TunnelContext, - AddPortForwardParams { source, target }: AddPortForwardParams, + AddPortForwardParams { + source, + target, + label, + }: AddPortForwardParams, ) -> Result<(), Error> { let prefix = ctx .net_iface @@ -482,10 +518,16 @@ pub async fn add_forward( m.insert(source, rc); }); + let entry = PortForwardEntry { + target, + label, + enabled: true, + }; + ctx.db .mutate(|db| { db.as_port_forwards_mut() - .insert(&source, &target) + .insert(&source, &entry) .and_then(|replaced| { if replaced.is_some() { Err(Error::new( @@ -503,9 +545,10 @@ pub async fn add_forward( Ok(()) } -#[derive(Deserialize, Serialize, Parser)] +#[derive(Deserialize, Serialize, Parser, TS)] #[serde(rename_all = "camelCase")] pub struct RemovePortForwardParams { + #[ts(type = "string")] source: SocketAddrV4, } @@ -523,3 +566,94 @@ pub async fn remove_forward( } Ok(()) } + +#[derive(Deserialize, Serialize, Parser, TS)] +#[serde(rename_all = "camelCase")] +pub struct UpdatePortForwardLabelParams { + #[ts(type = "string")] + source: SocketAddrV4, + label: Option, +} + +pub async fn update_forward_label( + ctx: TunnelContext, + UpdatePortForwardLabelParams { source, label }: UpdatePortForwardLabelParams, +) -> Result<(), Error> { + ctx.db + .mutate(|db| { + db.as_port_forwards_mut().mutate(|pf| { + let entry = pf.0.get_mut(&source).ok_or_else(|| { + Error::new( + eyre!("Port forward from {source} not found"), + ErrorKind::NotFound, + ) + })?; + entry.label = label; + Ok(()) + }) + }) + .await + .result +} + +#[derive(Deserialize, Serialize, Parser, TS)] +#[serde(rename_all = "camelCase")] +pub struct SetPortForwardEnabledParams { + #[ts(type = "string")] + source: SocketAddrV4, + enabled: bool, +} + +pub async fn set_forward_enabled( + ctx: TunnelContext, + SetPortForwardEnabledParams { source, enabled }: SetPortForwardEnabledParams, +) -> Result<(), Error> { + let target = ctx + .db + .mutate(|db| { + db.as_port_forwards_mut().mutate(|pf| { + let entry = pf.0.get_mut(&source).ok_or_else(|| { + Error::new( + eyre!("Port forward from {source} not found"), + ErrorKind::NotFound, + ) + })?; + entry.enabled = enabled; + Ok(entry.target) + }) + }) + .await + .result?; + + if enabled { + let prefix = ctx + .net_iface + .peek(|i| { + i.iter() + .find_map(|(_, i)| { + i.ip_info.as_ref().and_then(|i| { + i.subnets + .iter() + .find(|s| s.contains(&IpAddr::from(*target.ip()))) + }) + }) + .cloned() + }) + .map(|s| s.prefix_len()) + .unwrap_or(32); + let rc = ctx + .forward + .add_forward(source, target, prefix, None) + .await?; + ctx.active_forwards.mutate(|m| { + m.insert(source, rc); + }); + } else { + if let Some(rc) = ctx.active_forwards.mutate(|m| m.remove(&source)) { + drop(rc); + ctx.forward.gc().await?; + } + } + + Ok(()) +} diff --git a/core/src/tunnel/context.rs b/core/src/tunnel/context.rs index ac56eaa36..0d6ab5df8 100644 --- a/core/src/tunnel/context.rs +++ b/core/src/tunnel/context.rs @@ -10,8 +10,8 @@ use http::HeaderMap; use imbl::OrdMap; use imbl_value::InternedString; use include_dir::Dir; -use ipnet::Ipv4Net; use patch_db::PatchDb; +use patch_db::json_ptr::ROOT; use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::{CallRemote, Context, Empty, ParentHandler}; use serde::{Deserialize, Serialize}; @@ -34,7 +34,8 @@ use crate::rpc_continuations::{OpenAuthedContinuations, RpcContinuations}; use crate::tunnel::TUNNEL_DEFAULT_LISTEN; use crate::tunnel::api::tunnel_api; use crate::tunnel::db::TunnelDatabase; -use crate::tunnel::wg::{WIREGUARD_INTERFACE_NAME, WgSubnetConfig}; +use crate::tunnel::migrations::run_migrations; +use crate::tunnel::wg::WIREGUARD_INTERFACE_NAME; use crate::util::collections::OrdMapIterMut; use crate::util::io::read_file_to_string; use crate::util::sync::{SyncMutex, Watch}; @@ -98,21 +99,11 @@ impl TunnelContext { tokio::fs::create_dir_all(&datadir).await?; } let db_path = datadir.join("tunnel.db"); - let db = TypedPatchDb::::load_or_init( - PatchDb::open(&db_path).await?, - || async { - let mut db = TunnelDatabase::default(); - db.wg.subnets.0.insert( - Ipv4Net::new_assert([10, 59, rand::random(), 1].into(), 24), - WgSubnetConfig { - name: "Default Subnet".into(), - ..Default::default() - }, - ); - Ok(db) - }, - ) - .await?; + let db = TypedPatchDb::::load_unchecked(PatchDb::open(&db_path).await?); + if db.dump(&ROOT).await.value.is_null() { + db.put(&ROOT, &TunnelDatabase::init()).await?; + } + db.mutate(|db| run_migrations(db)).await.result?; let listen = config.tunnel_listen.unwrap_or(TUNNEL_DEFAULT_LISTEN); let ip_info = crate::net::utils::load_ip_info().await?; let net_iface = db @@ -184,7 +175,11 @@ impl TunnelContext { } let mut active_forwards = BTreeMap::new(); - for (from, to) in peek.as_port_forwards().de()?.0 { + for (from, entry) in peek.as_port_forwards().de()?.0 { + if !entry.enabled { + continue; + } + let to = entry.target; let prefix = net_iface .peek(|i| { i.iter() @@ -206,7 +201,7 @@ impl TunnelContext { listen, db, datadir, - rpc_continuations: RpcContinuations::new(), + rpc_continuations: RpcContinuations::new(None), open_authed_continuations: OpenAuthedContinuations::new(), ephemeral_sessions: SyncMutex::new(Sessions::new()), net_iface, diff --git a/core/src/tunnel/db.rs b/core/src/tunnel/db.rs index bd83305fd..46197ce84 100644 --- a/core/src/tunnel/db.rs +++ b/core/src/tunnel/db.rs @@ -7,6 +7,7 @@ use axum::extract::ws; use clap::Parser; use imbl::{HashMap, OrdMap}; use imbl_value::InternedString; +use ipnet::Ipv4Net; use itertools::Itertools; use patch_db::Dump; use patch_db::json_ptr::{JsonPointer, ROOT}; @@ -25,25 +26,49 @@ use crate::rpc_continuations::{Guid, RpcContinuation}; use crate::sign::AnyVerifyingKey; use crate::tunnel::auth::SignerInfo; use crate::tunnel::context::TunnelContext; +use crate::tunnel::migrations; use crate::tunnel::web::WebserverInfo; -use crate::tunnel::wg::WgServer; +use crate::tunnel::wg::{WgServer, WgSubnetConfig}; use crate::util::serde::{HandlerExtSerde, apply_expr}; #[derive(Default, Deserialize, Serialize, HasModel, TS)] #[serde(rename_all = "camelCase")] #[model = "Model"] pub struct TunnelDatabase { + #[serde(default)] + #[ts(skip)] + pub migrations: BTreeSet, pub webserver: WebserverInfo, pub sessions: Sessions, pub password: Option, #[ts(as = "std::collections::HashMap::")] pub auth_pubkeys: HashMap, - #[ts(as = "std::collections::BTreeMap::")] + #[ts(as = "std::collections::BTreeMap::")] pub gateways: OrdMap, pub wg: WgServer, pub port_forwards: PortForwards, } +impl TunnelDatabase { + pub fn init() -> Self { + let mut db = Self { + migrations: migrations::MIGRATIONS + .iter() + .map(|m| m.name().into()) + .collect(), + ..Default::default() + }; + db.wg.subnets.0.insert( + Ipv4Net::new_assert([10, 59, rand::random(), 1].into(), 24), + WgSubnetConfig { + name: "Default Subnet".into(), + ..Default::default() + }, + ); + db + } +} + impl Model { pub fn gc_forwards(&mut self) -> Result, Error> { let mut keep_sources = BTreeSet::new(); @@ -53,7 +78,7 @@ impl Model { } self.as_port_forwards_mut().mutate(|pf| { Ok(pf.0.retain(|k, v| { - if keep_targets.contains(v.ip()) { + if keep_targets.contains(v.target.ip()) { keep_sources.insert(*k); true } else { @@ -67,14 +92,43 @@ impl Model { #[test] fn export_bindings_tunnel_db() { + use crate::tunnel::api::*; + use crate::tunnel::auth::{AddKeyParams, RemoveKeyParams, SetPasswordParams}; + TunnelDatabase::export_all_to("bindings/tunnel").unwrap(); + SubnetParams::export_all_to("bindings/tunnel").unwrap(); + AddSubnetParams::export_all_to("bindings/tunnel").unwrap(); + AddDeviceParams::export_all_to("bindings/tunnel").unwrap(); + RemoveDeviceParams::export_all_to("bindings/tunnel").unwrap(); + ListDevicesParams::export_all_to("bindings/tunnel").unwrap(); + ShowConfigParams::export_all_to("bindings/tunnel").unwrap(); + AddPortForwardParams::export_all_to("bindings/tunnel").unwrap(); + RemovePortForwardParams::export_all_to("bindings/tunnel").unwrap(); + UpdatePortForwardLabelParams::export_all_to("bindings/tunnel").unwrap(); + SetPortForwardEnabledParams::export_all_to("bindings/tunnel").unwrap(); + AddKeyParams::export_all_to("bindings/tunnel").unwrap(); + RemoveKeyParams::export_all_to("bindings/tunnel").unwrap(); + SetPasswordParams::export_all_to("bindings/tunnel").unwrap(); +} + +#[derive(Clone, Debug, Deserialize, Serialize, TS)] +#[serde(rename_all = "camelCase")] +pub struct PortForwardEntry { + pub target: SocketAddrV4, + pub label: Option, + #[serde(default = "default_true")] + pub enabled: bool, +} + +fn default_true() -> bool { + true } #[derive(Clone, Debug, Default, Deserialize, Serialize, TS)] -pub struct PortForwards(pub BTreeMap); +pub struct PortForwards(pub BTreeMap); impl Map for PortForwards { type Key = SocketAddrV4; - type Value = SocketAddrV4; + type Value = PortForwardEntry; fn key_str(key: &Self::Key) -> Result, Error> { Self::key_string(key) } diff --git a/core/src/tunnel/migrations/m_00_port_forward_entry.rs b/core/src/tunnel/migrations/m_00_port_forward_entry.rs new file mode 100644 index 000000000..32603ea9d --- /dev/null +++ b/core/src/tunnel/migrations/m_00_port_forward_entry.rs @@ -0,0 +1,20 @@ +use imbl_value::json; + +use super::TunnelMigration; +use crate::prelude::*; + +pub struct PortForwardEntry; +impl TunnelMigration for PortForwardEntry { + fn action(&self, db: &mut Value) -> Result<(), Error> { + for (_, value) in db["portForwards"].as_object_mut().unwrap().iter_mut() { + if value.is_string() { + *value = json!({ + "target": value.clone(), + "label": null, + "enabled": true, + }); + } + } + Ok(()) + } +} diff --git a/core/src/tunnel/migrations/mod.rs b/core/src/tunnel/migrations/mod.rs new file mode 100644 index 000000000..79c60403c --- /dev/null +++ b/core/src/tunnel/migrations/mod.rs @@ -0,0 +1,34 @@ +use patch_db::ModelExt; + +use crate::prelude::*; +use crate::tunnel::db::TunnelDatabase; + +mod m_00_port_forward_entry; + +pub trait TunnelMigration { + fn name(&self) -> &'static str { + let val = std::any::type_name_of_val(self); + val.rsplit_once("::").map_or(val, |v| v.1) + } + fn action(&self, db: &mut Value) -> Result<(), Error>; +} + +pub const MIGRATIONS: &[&dyn TunnelMigration] = &[ + &m_00_port_forward_entry::PortForwardEntry, +]; + +#[instrument(skip_all)] +pub fn run_migrations(db: &mut Model) -> Result<(), Error> { + let mut migrations = db.as_migrations().de().unwrap_or_default(); + for migration in MIGRATIONS { + let name = migration.name(); + if !migrations.contains(name) { + migration.action(ModelExt::as_value_mut(db))?; + migrations.insert(name.into()); + } + } + let mut db_deser = db.de()?; + db_deser.migrations = migrations; + db.ser(&db_deser)?; + Ok(()) +} diff --git a/core/src/tunnel/mod.rs b/core/src/tunnel/mod.rs index 5d69de7c0..ffb3f89b5 100644 --- a/core/src/tunnel/mod.rs +++ b/core/src/tunnel/mod.rs @@ -9,6 +9,7 @@ pub mod api; pub mod auth; pub mod context; pub mod db; +pub(crate) mod migrations; pub mod update; pub mod web; pub mod wg; diff --git a/core/src/tunnel/web.rs b/core/src/tunnel/web.rs index 791ae9a17..05be63c5b 100644 --- a/core/src/tunnel/web.rs +++ b/core/src/tunnel/web.rs @@ -168,10 +168,10 @@ pub fn web_api() -> ParentHandler { .with_call_remote::(), ) .subcommand( - "reset", + "uninit", from_fn_async(reset_web) .no_display() - .with_about("about.reset-webserver") + .with_about("about.uninitialize-webserver") .with_call_remote::(), ) } @@ -519,32 +519,7 @@ pub async fn init_web(ctx: CliContext) -> Result<(), Error> { .or_not_found("certificate in chain")?; println!("📝 Root CA:"); print!("{cert}\n"); - - println!(concat!( - "To access your Web URL securely, trust your Root CA (displayed above) on your client device(s):\n", - " - MacOS\n", - " 1. Open the Terminal app\n", - " 2. Type or copy/paste the following command (**DO NOT** click Enter/Return yet): pbpaste > ~/Desktop/tunnel-ca.crt\n", - " 3. Copy your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n", - " 4. Back in Terminal, click Enter/Return. tunnel-ca.crt is saved to your Desktop\n", - " 5. Complete by trusting your Root CA: https://docs.start9.com/device-guides/mac/ca.html\n", - " - Linux\n", - " 1. Open gedit, nano, or any editor\n", - " 2. Copy/paste your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n", - " 3. Name the file tunnel-ca.crt and save as plaintext\n", - " 4. Complete by trusting your Root CA: https://docs.start9.com/device-guides/linux/ca.html\n", - " - Windows\n", - " 1. Open the Notepad app\n", - " 2. Copy/paste your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n", - " 3. Name the file tunnel-ca.crt and save as plaintext\n", - " 4. Complete by trusting your Root CA: https://docs.start9.com/device-guides/windows/ca.html\n", - " - Android/Graphene\n", - " 1. Send the tunnel-ca.crt file (created above) to yourself\n", - " 2. Complete by trusting your Root CA: https://docs.start9.com/device-guides/android/ca.html\n", - " - iOS\n", - " 1. Send the tunnel-ca.crt file (created above) to yourself\n", - " 2. Complete by trusting your Root CA: https://docs.start9.com/device-guides/ios/ca.html\n", - )); + println!("Follow instructions to trust your Root CA (recommended): https://docs.start9.com/start-tunnel/installing/index.html#trust-your-root-ca"); return Ok(()); } diff --git a/core/src/upload.rs b/core/src/upload.rs index 5812834da..96f82b812 100644 --- a/core/src/upload.rs +++ b/core/src/upload.rs @@ -13,7 +13,6 @@ use futures::{FutureExt, Stream, StreamExt, ready}; use http::header::CONTENT_LENGTH; use http::{HeaderMap, StatusCode}; use imbl_value::InternedString; -use tokio::fs::File; use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt}; use tokio::sync::watch; @@ -23,6 +22,7 @@ use crate::progress::{PhaseProgressTrackerHandle, ProgressUnits}; use crate::rpc_continuations::{Guid, RpcContinuation}; use crate::s9pk::merkle_archive::source::ArchiveSource; use crate::s9pk::merkle_archive::source::multi_cursor_file::{FileCursor, MultiCursorFile}; +use crate::util::direct_io::DirectIoFile; use crate::util::io::{TmpDir, create_file}; pub async fn upload( @@ -69,16 +69,6 @@ impl Progress { false } } - fn handle_write(&mut self, res: &std::io::Result) -> bool { - match res { - Ok(a) => { - self.written += *a as u64; - self.tracker += *a as u64; - true - } - Err(e) => self.handle_error(e), - } - } async fn expected_size(watch: &mut watch::Receiver) -> Option { watch .wait_for(|progress| progress.error.is_some() || progress.expected_size.is_some()) @@ -192,16 +182,19 @@ impl UploadingFile { complete: false, }); let file = create_file(path).await?; + let multi_cursor = MultiCursorFile::open(&file).await?; + let direct_file = DirectIoFile::from_tokio_file(file).await?; let uploading = Self { tmp_dir: None, - file: MultiCursorFile::open(&file).await?, + file: multi_cursor, progress: progress.1, }; Ok(( UploadHandle { tmp_dir: None, - file, + file: direct_file, progress: progress.0, + last_synced: 0, }, uploading, )) @@ -346,8 +339,9 @@ impl AsyncSeek for UploadingFileReader { pub struct UploadHandle { tmp_dir: Option>, #[pin] - file: File, + file: DirectIoFile, progress: watch::Sender, + last_synced: u64, } impl UploadHandle { pub async fn upload(&mut self, request: Request) { @@ -394,6 +388,19 @@ impl UploadHandle { if let Err(e) = self.file.sync_all().await { self.progress.send_if_modified(|p| p.handle_error(&e)); } + // Update progress with final synced bytes + self.update_sync_progress(); + } + fn update_sync_progress(&mut self) { + let synced = self.file.bytes_synced(); + let delta = synced - self.last_synced; + if delta > 0 { + self.last_synced = synced; + self.progress.send_modify(|p| { + p.written += delta; + p.tracker += delta; + }); + } } } #[pin_project::pinned_drop] @@ -410,13 +417,23 @@ impl AsyncWrite for UploadHandle { buf: &[u8], ) -> Poll> { let this = self.project(); + // Update progress based on bytes actually flushed to disk + let synced = this.file.bytes_synced(); + let delta = synced - *this.last_synced; + if delta > 0 { + *this.last_synced = synced; + this.progress.send_modify(|p| { + p.written += delta; + p.tracker += delta; + }); + } match this.file.poll_write(cx, buf) { - Poll::Ready(res) => { + Poll::Ready(Err(e)) => { this.progress - .send_if_modified(|progress| progress.handle_write(&res)); - Poll::Ready(res) + .send_if_modified(|progress| progress.handle_error(&e)); + Poll::Ready(Err(e)) } - Poll::Pending => Poll::Pending, + a => a, } } fn poll_flush( diff --git a/core/src/util/direct_io.rs b/core/src/util/direct_io.rs new file mode 100644 index 000000000..67617c22c --- /dev/null +++ b/core/src/util/direct_io.rs @@ -0,0 +1,292 @@ +use std::alloc::Layout; +use std::io::Write; +use std::os::fd::AsRawFd; +use std::pin::Pin; +use std::task::{Context, Poll}; + +use tokio::io::AsyncWrite; +use tokio::task::JoinHandle; + +const BLOCK_SIZE: usize = 4096; +const BUF_CAP: usize = 256 * 1024; // 256KB + +/// Aligned buffer for O_DIRECT I/O. +struct AlignedBuf { + ptr: *mut u8, + len: usize, +} + +// SAFETY: We have exclusive ownership of the allocation. +unsafe impl Send for AlignedBuf {} + +impl AlignedBuf { + fn new() -> Self { + let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap(); + // SAFETY: layout has non-zero size + let ptr = unsafe { std::alloc::alloc(layout) }; + if ptr.is_null() { + std::alloc::handle_alloc_error(layout); + } + Self { ptr, len: 0 } + } + + fn as_slice(&self) -> &[u8] { + // SAFETY: ptr is valid for len bytes, properly aligned, exclusively owned + unsafe { std::slice::from_raw_parts(self.ptr, self.len) } + } + + fn push(&mut self, data: &[u8]) -> usize { + let n = data.len().min(BUF_CAP - self.len); + // SAFETY: src and dst don't overlap, both valid for n bytes + unsafe { + std::ptr::copy_nonoverlapping(data.as_ptr(), self.ptr.add(self.len), n); + } + self.len += n; + n + } + + fn aligned_len(&self) -> usize { + self.len & !(BLOCK_SIZE - 1) + } + + fn drain_front(&mut self, n: usize) { + debug_assert!(n <= self.len); + let remaining = self.len - n; + if remaining > 0 { + // SAFETY: regions may overlap, so we use copy (memmove) + unsafe { + std::ptr::copy(self.ptr.add(n), self.ptr, remaining); + } + } + self.len = remaining; + } + + /// Extract aligned data into a new buffer for flushing, leaving remainder. + fn take_aligned(&mut self) -> Option<(AlignedBuf, u64)> { + let aligned = self.aligned_len(); + if aligned == 0 { + return None; + } + let mut flush_buf = AlignedBuf::new(); + flush_buf.push(&self.as_slice()[..aligned]); + self.drain_front(aligned); + Some((flush_buf, aligned as u64)) + } +} + +impl Drop for AlignedBuf { + fn drop(&mut self) { + let layout = Layout::from_size_align(BUF_CAP, BLOCK_SIZE).unwrap(); + // SAFETY: ptr was allocated with this layout in new() + unsafe { std::alloc::dealloc(self.ptr, layout) }; + } +} + +enum FileState { + Idle(std::fs::File), + Flushing(JoinHandle>), + Done, +} + +/// A file writer that uses O_DIRECT to bypass the kernel page cache. +/// +/// Buffers writes in an aligned buffer and flushes to disk in the background. +/// New writes can proceed while a flush is in progress (double-buffering). +/// Progress is tracked via [`bytes_synced`](Self::bytes_synced), which reflects +/// bytes actually written to disk. +pub struct DirectIoFile { + file_state: FileState, + buf: AlignedBuf, + synced: u64, +} + +impl DirectIoFile { + fn new(file: std::fs::File) -> Self { + Self { + file_state: FileState::Idle(file), + buf: AlignedBuf::new(), + synced: 0, + } + } + + /// Convert an existing tokio File into a DirectIoFile by adding O_DIRECT. + pub async fn from_tokio_file(file: tokio::fs::File) -> std::io::Result { + let std_file = file.into_std().await; + let fd = std_file.as_raw_fd(); + // SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops + unsafe { + let flags = libc::fcntl(fd, libc::F_GETFL); + if flags == -1 { + return Err(std::io::Error::last_os_error()); + } + #[cfg(target_os = "linux")] + if libc::fcntl(fd, libc::F_SETFL, flags | libc::O_DIRECT) == -1 { + return Err(std::io::Error::last_os_error()); + } + } + Ok(Self::new(std_file)) + } + + /// Number of bytes confirmed written to disk. + pub fn bytes_synced(&self) -> u64 { + self.synced + } + + /// Flush any remaining buffered data and sync to disk. + /// + /// Removes the O_DIRECT flag for the final partial-block write, then + /// calls fsync. Updates `bytes_synced` to the final total. + pub async fn sync_all(&mut self) -> std::io::Result<()> { + // Wait for any in-flight flush + self.await_flush().await?; + + let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) else { + return Ok(()); + }; + + let mut buf = std::mem::replace(&mut self.buf, AlignedBuf::new()); + let remaining = buf.len as u64; + + tokio::task::spawn_blocking(move || { + let mut file = file; + + // Write any aligned portion + let aligned = buf.aligned_len(); + if aligned > 0 { + let slice = unsafe { std::slice::from_raw_parts(buf.ptr, aligned) }; + file.write_all(slice)?; + buf.drain_front(aligned); + } + + // Write remainder with O_DIRECT disabled + if buf.len > 0 { + let fd = file.as_raw_fd(); + // SAFETY: fd is valid, F_GETFL/F_SETFL are standard fcntl ops + #[cfg(target_os = "linux")] + unsafe { + let flags = libc::fcntl(fd, libc::F_GETFL); + libc::fcntl(fd, libc::F_SETFL, flags & !libc::O_DIRECT); + } + file.write_all(buf.as_slice())?; + } + + file.sync_all() + }) + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??; + + self.synced += remaining; + Ok(()) + } + + async fn await_flush(&mut self) -> std::io::Result<()> { + if let FileState::Flushing(handle) = &mut self.file_state { + let (file, flushed) = handle + .await + .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))??; + self.synced += flushed; + self.file_state = FileState::Idle(file); + } + Ok(()) + } + + /// Non-blocking poll: try to complete a pending flush. + /// Returns Ready(Ok(())) if idle (or just became idle), Pending if still flushing. + fn poll_complete_flush(&mut self, cx: &mut Context<'_>) -> Poll> { + if let FileState::Flushing(handle) = &mut self.file_state { + match Pin::new(handle).poll(cx) { + Poll::Ready(Ok(Ok((file, flushed)))) => { + self.synced += flushed; + self.file_state = FileState::Idle(file); + } + Poll::Ready(Ok(Err(e))) => { + self.file_state = FileState::Done; + return Poll::Ready(Err(e)); + } + Poll::Ready(Err(e)) => { + self.file_state = FileState::Done; + return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::Other, e))); + } + Poll::Pending => return Poll::Pending, + } + } + Poll::Ready(Ok(())) + } + + /// Start a background flush of aligned data if the file is idle. + fn maybe_start_flush(&mut self) { + if !matches!(self.file_state, FileState::Idle(_)) { + return; + } + let Some((flush_buf, count)) = self.buf.take_aligned() else { + return; + }; + let FileState::Idle(file) = std::mem::replace(&mut self.file_state, FileState::Done) else { + unreachable!() + }; + let handle = tokio::task::spawn_blocking(move || { + let mut file = file; + file.write_all(flush_buf.as_slice())?; + Ok((file, count)) + }); + self.file_state = FileState::Flushing(handle); + } +} + +impl AsyncWrite for DirectIoFile { + fn poll_write( + mut self: Pin<&mut Self>, + cx: &mut Context<'_>, + buf: &[u8], + ) -> Poll> { + // Try to complete any pending flush (non-blocking, registers waker) + match self.poll_complete_flush(cx) { + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + _ => {} // Pending is fine — we can still accept writes into the buffer + } + + // If file just became idle and buffer has aligned data, start a flush + // to free buffer space before accepting new data + self.maybe_start_flush(); + + // Accept data into the buffer + let n = self.buf.push(buf); + if n == 0 { + // Buffer full, must wait for flush to complete and free space. + // Waker was already registered by poll_complete_flush above. + return Poll::Pending; + } + + // If file is idle and we now have aligned data, start flushing + self.maybe_start_flush(); + + Poll::Ready(Ok(n)) + } + + fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.poll_complete_flush(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + Poll::Ready(Ok(())) => {} + } + + if self.buf.aligned_len() > 0 { + self.maybe_start_flush(); + // Poll the just-started flush + return self.poll_complete_flush(cx).map(|r| r.map(|_| ())); + } + + Poll::Ready(Ok(())) + } + + fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + match self.poll_complete_flush(cx) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Err(e)) => return Poll::Ready(Err(e)), + Poll::Ready(Ok(())) => {} + } + + self.file_state = FileState::Done; + Poll::Ready(Ok(())) + } +} diff --git a/core/src/util/io.rs b/core/src/util/io.rs index 99940c373..f1478e8b0 100644 --- a/core/src/util/io.rs +++ b/core/src/util/io.rs @@ -1047,6 +1047,20 @@ pub async fn delete_file(path: impl AsRef) -> Result<(), Error> { .with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("delete {path:?}"))) } +pub async fn delete_dir(path: impl AsRef) -> Result<(), Error> { + let path = path.as_ref(); + tokio::fs::remove_dir_all(path) + .await + .or_else(|e| { + if e.kind() == std::io::ErrorKind::NotFound { + Ok(()) + } else { + Err(e) + } + }) + .with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("delete dir {path:?}"))) +} + #[instrument(skip_all)] pub async fn rename(src: impl AsRef, dst: impl AsRef) -> Result<(), Error> { let src = src.as_ref(); diff --git a/core/src/util/mod.rs b/core/src/util/mod.rs index 9aac08fd8..e9e98e039 100644 --- a/core/src/util/mod.rs +++ b/core/src/util/mod.rs @@ -38,6 +38,7 @@ pub mod collections; pub mod cpupower; pub mod crypto; pub mod data_url; +pub mod direct_io; pub mod future; pub mod http_reader; pub mod io; @@ -45,6 +46,7 @@ pub mod iter; pub mod logger; pub mod lshw; pub mod mime; +pub mod mok; pub mod net; pub mod rpc; pub mod rpc_client; diff --git a/core/src/util/mok.rs b/core/src/util/mok.rs new file mode 100644 index 000000000..129e30167 --- /dev/null +++ b/core/src/util/mok.rs @@ -0,0 +1,125 @@ +use std::path::Path; + +use tokio::process::Command; + +use crate::prelude::*; +use crate::util::Invoke; +use crate::util::io::{delete_file, maybe_open_file, write_file_atomic}; + +pub const DKMS_MOK_KEY: &str = "/var/lib/dkms/mok.key"; +pub const DKMS_MOK_PUB: &str = "/var/lib/dkms/mok.pub"; + +pub async fn is_secure_boot_enabled() -> bool { + String::from_utf8_lossy( + &Command::new("mokutil") + .arg("--sb-state") + .env("LANG", "C.UTF-8") + .invoke(ErrorKind::Bios) + .await + .unwrap_or_default(), + ) + .contains("SecureBoot enabled") +} + +/// Generate a DKMS MOK key pair if one doesn't exist. +pub async fn ensure_dkms_key(root: &Path) -> Result { + let key_path = root.join(DKMS_MOK_KEY.trim_start_matches('/')); + if maybe_open_file(&key_path).await?.is_some() { + return Ok(false); // Already exists + } + Command::new("chroot") + .arg(root) + .arg("dkms") + .arg("generate_mok") + .invoke(ErrorKind::Bios) + .await?; + Ok(true) // Newly generated +} + +/// Sign all unsigned kernel modules in the given root using the DKMS MOK key. +/// Calls the sign-unsigned-modules script inside the chroot. +pub async fn sign_unsigned_modules(root: &Path) -> Result<(), Error> { + Command::new("chroot") + .arg(root) + .arg("/usr/lib/startos/scripts/sign-unsigned-modules") + .invoke(ErrorKind::OpenSsl) + .await?; + Ok(()) +} + +/// Read the start9 user's password hash from /etc/shadow. +/// Returns None if the user doesn't exist or the password is locked. +async fn start9_shadow_hash() -> Result, Error> { + let shadow = tokio::fs::read_to_string("/etc/shadow").await?; + for line in shadow.lines() { + if let Some(("start9", rest)) = line.split_once(':') { + if let Some((hash, _)) = rest.split_once(':') { + let hash = hash.trim_start_matches("!"); + if hash.starts_with('$') { + return Ok(Some(hash.to_owned())); + } + // Locked or invalid password + return Ok(None); + } + } + } + Ok(None) +} + +/// Enroll the DKMS MOK certificate using the start9 user's password from /etc/shadow. +/// Idempotent: skips if already enrolled, or if the user's password is not yet set. +/// `mok_pub` is the path to the MOK public certificate (may be inside a chroot overlay during install). +/// Returns true if a new enrollment was staged. +pub async fn enroll_mok(mok_pub: &Path) -> Result { + tracing::info!("enroll_mok: checking EFI and mok_pub={}", mok_pub.display()); + if tokio::fs::metadata("/sys/firmware/efi").await.is_err() { + tracing::info!("enroll_mok: no EFI, skipping"); + return Ok(false); + } + if maybe_open_file(mok_pub).await?.is_none() { + tracing::info!("enroll_mok: mok_pub not found, skipping"); + return Ok(false); + } + + // Check if already enrolled in firmware + let test_output = Command::new("mokutil") + .arg("--test-key") + .arg(mok_pub) + .env("LANG", "C.UTF-8") + .invoke(ErrorKind::Bios) + .await?; + let test_str = String::from_utf8(test_output)?; + tracing::info!("enroll_mok: mokutil --test-key output: {test_str:?}"); + if test_str.contains("is enrolled") { + tracing::info!("enroll_mok: already enrolled, skipping"); + return Ok(false); + } + + let Some(hash) = start9_shadow_hash().await? else { + tracing::info!("enroll_mok: start9 user password not set, skipping"); + return Ok(false); + }; + + // Revoke any pending enrollment (so we can re-import with current password) + let _ = Command::new("mokutil") + .arg("--revoke-import") + .arg(mok_pub) + .invoke(ErrorKind::Bios) + .await; + + let hash_file = Path::new("/tmp/mok-password-hash"); + write_file_atomic(hash_file, &hash).await?; + + tracing::info!("Enrolling DKMS MOK certificate"); + let result = Command::new("mokutil") + .arg("--import") + .arg(mok_pub) + .arg("--hash-file") + .arg(hash_file) + .invoke(ErrorKind::Bios) + .await; + + delete_file(hash_file).await.log_err(); + result?; + Ok(true) +} diff --git a/core/src/version/mod.rs b/core/src/version/mod.rs index 4c17bc32f..a4585358f 100644 --- a/core/src/version/mod.rs +++ b/core/src/version/mod.rs @@ -60,8 +60,9 @@ mod v0_4_0_alpha_17; mod v0_4_0_alpha_18; mod v0_4_0_alpha_19; mod v0_4_0_alpha_20; +mod v0_4_0_alpha_21; -pub type Current = v0_4_0_alpha_20::Version; // VERSION_BUMP +pub type Current = v0_4_0_alpha_21::Version; // VERSION_BUMP impl Current { #[instrument(skip(self, db))] @@ -189,7 +190,8 @@ enum Version { V0_4_0_alpha_17(Wrapper), V0_4_0_alpha_18(Wrapper), V0_4_0_alpha_19(Wrapper), - V0_4_0_alpha_20(Wrapper), // VERSION_BUMP + V0_4_0_alpha_20(Wrapper), + V0_4_0_alpha_21(Wrapper), // VERSION_BUMP Other(exver::Version), } @@ -252,7 +254,8 @@ impl Version { Self::V0_4_0_alpha_17(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_18(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_19(v) => DynVersion(Box::new(v.0)), - Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP + Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)), + Self::V0_4_0_alpha_21(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP Self::Other(v) => { return Err(Error::new( eyre!("unknown version {v}"), @@ -307,7 +310,8 @@ impl Version { Version::V0_4_0_alpha_17(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_18(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_19(Wrapper(x)) => x.semver(), - Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(), // VERSION_BUMP + Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(), + Version::V0_4_0_alpha_21(Wrapper(x)) => x.semver(), // VERSION_BUMP Version::Other(x) => x.clone(), } } diff --git a/core/src/version/v0_3_6_alpha_0.rs b/core/src/version/v0_3_6_alpha_0.rs index fbae2fc2f..e9c623bad 100644 --- a/core/src/version/v0_3_6_alpha_0.rs +++ b/core/src/version/v0_3_6_alpha_0.rs @@ -143,7 +143,8 @@ pub struct Version; impl VersionT for Version { type Previous = v0_3_5_2::Version; - type PreUpRes = (AccountInfo, SshKeys, CifsTargets); + /// (package_id, host_id, expanded_key) + type PreUpRes = (AccountInfo, SshKeys, CifsTargets, Vec<(String, String, [u8; 64])>); fn semver(self) -> exver::Version { V0_3_6_alpha_0.clone() } @@ -158,15 +159,17 @@ impl VersionT for Version { let cifs = previous_cifs(&pg).await?; + let tor_keys = previous_tor_keys(&pg).await?; + Command::new("systemctl") .arg("stop") .arg("postgresql@*.service") .invoke(crate::ErrorKind::Database) .await?; - Ok((account, ssh_keys, cifs)) + Ok((account, ssh_keys, cifs, tor_keys)) } - fn up(self, db: &mut Value, (account, ssh_keys, cifs): Self::PreUpRes) -> Result { + fn up(self, db: &mut Value, (account, ssh_keys, cifs, tor_keys): Self::PreUpRes) -> Result { let prev_package_data = db["package-data"].clone(); let wifi = json!({ @@ -183,6 +186,11 @@ impl VersionT for Version { "shuttingDown": db["server-info"]["status-info"]["shutting-down"], "restarting": db["server-info"]["status-info"]["restarting"], }); + let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?; + let onion_address = tor_address + .replace("https://", "") + .replace("http://", "") + .replace(".onion/", ""); let server_info = { let mut server_info = json!({ "arch": db["server-info"]["arch"], @@ -196,15 +204,9 @@ impl VersionT for Version { }); server_info["postInitMigrationTodos"] = json!({}); - let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?; // Maybe we do this like the Public::init does - server_info["torAddress"] = json!(tor_address); - server_info["onionAddress"] = json!( - tor_address - .replace("https://", "") - .replace("http://", "") - .replace(".onion/", "") - ); + server_info["torAddress"] = json!(&tor_address); + server_info["onionAddress"] = json!(&onion_address); server_info["networkInterfaces"] = json!({}); server_info["statusInfo"] = status_info; server_info["wifi"] = wifi; @@ -233,6 +235,30 @@ impl VersionT for Version { let private = { let mut value = json!({}); value["keyStore"] = crate::dbg!(to_value(&keystore)?); + // Preserve tor onion keys so later migrations (v0_4_0_alpha_20) can + // include them in onion-migration.json for the tor service. + if !tor_keys.is_empty() { + let mut onion_map: Value = json!({}); + let onion_obj = onion_map.as_object_mut().unwrap(); + let mut tor_migration = imbl::Vector::::new(); + for (package_id, host_id, key_bytes) in &tor_keys { + let onion_addr = onion_address_from_key(key_bytes); + let encoded_key = + base64::Engine::encode(&crate::util::serde::BASE64, key_bytes); + onion_obj.insert( + onion_addr.as_str().into(), + Value::String(encoded_key.clone().into()), + ); + tor_migration.push_back(json!({ + "hostname": &onion_addr, + "packageId": package_id, + "hostId": host_id, + "key": &encoded_key, + })); + } + value["keyStore"]["onion"] = onion_map; + value["torMigration"] = Value::Array(tor_migration); + } value["password"] = to_value(&account.password)?; value["compatS9pkKey"] = to_value(&crate::db::model::private::generate_developer_key())?; @@ -498,3 +524,109 @@ async fn previous_ssh_keys(pg: &sqlx::Pool) -> Result`. +/// Server key uses `("STARTOS", "STARTOS")`. +#[tracing::instrument(skip_all)] +async fn previous_tor_keys( + pg: &sqlx::Pool, +) -> Result, Error> { + let mut keys = Vec::new(); + + // Server tor key from the account table. + // Older installs have tor_key (64 bytes). Newer installs (post-NetworkKeys migration) + // made tor_key nullable and use network_key (32 bytes, needs expansion) instead. + let row = sqlx::query(r#"SELECT tor_key, network_key FROM account"#) + .fetch_one(pg) + .await + .with_kind(ErrorKind::Database)?; + if let Ok(tor_key) = row.try_get::, _>("tor_key") { + if let Ok(key) = <[u8; 64]>::try_from(tor_key) { + keys.push(("STARTOS".to_owned(), "STARTOS".to_owned(), key)); + } + } else if let Ok(net_key) = row.try_get::, _>("network_key") { + if let Ok(seed) = <[u8; 32]>::try_from(net_key) { + keys.push(( + "STARTOS".to_owned(), + "STARTOS".to_owned(), + crate::util::crypto::ed25519_expand_key(&seed), + )); + } + } + + // Package tor keys from the network_keys table (32-byte keys that need expansion) + if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM network_keys"#) + .fetch_all(pg) + .await + { + for row in rows { + let Ok(package) = row.try_get::("package") else { + continue; + }; + let Ok(interface) = row.try_get::("interface") else { + continue; + }; + let Ok(key_bytes) = row.try_get::, _>("key") else { + continue; + }; + if let Ok(seed) = <[u8; 32]>::try_from(key_bytes) { + keys.push(( + package, + interface, + crate::util::crypto::ed25519_expand_key(&seed), + )); + } + } + } + + // Package tor keys from the tor table (already 64-byte expanded keys) + if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM tor"#) + .fetch_all(pg) + .await + { + for row in rows { + let Ok(package) = row.try_get::("package") else { + continue; + }; + let Ok(interface) = row.try_get::("interface") else { + continue; + }; + let Ok(key_bytes) = row.try_get::, _>("key") else { + continue; + }; + if let Ok(key) = <[u8; 64]>::try_from(key_bytes) { + keys.push((package, interface, key)); + } + } + } + + Ok(keys) +} + +/// Derive the tor v3 onion address (without .onion suffix) from a 64-byte +/// expanded ed25519 secret key. +fn onion_address_from_key(expanded_key: &[u8; 64]) -> String { + use sha3::Digest; + + // Derive public key from expanded secret key using ed25519-dalek v1 + let esk = + ed25519_dalek_v1::ExpandedSecretKey::from_bytes(expanded_key).expect("invalid tor key"); + let pk = ed25519_dalek_v1::PublicKey::from(&esk); + let pk_bytes = pk.to_bytes(); + + // Compute onion v3 address: base32(pubkey || checksum || version) + // checksum = SHA3-256(".onion checksum" || pubkey || version)[0..2] + let mut hasher = sha3::Sha3_256::new(); + hasher.update(b".onion checksum"); + hasher.update(&pk_bytes); + hasher.update(b"\x03"); + let hash = hasher.finalize(); + + let mut raw = [0u8; 35]; + raw[..32].copy_from_slice(&pk_bytes); + raw[32] = hash[0]; // checksum byte 0 + raw[33] = hash[1]; // checksum byte 1 + raw[34] = 0x03; // version + + base32::encode(base32::Alphabet::Rfc4648 { padding: false }, &raw).to_ascii_lowercase() +} diff --git a/core/src/version/v0_4_0_alpha_20.rs b/core/src/version/v0_4_0_alpha_20.rs index 62b454bb1..01bff2251 100644 --- a/core/src/version/v0_4_0_alpha_20.rs +++ b/core/src/version/v0_4_0_alpha_20.rs @@ -2,11 +2,13 @@ use std::path::Path; use exver::{PreReleaseSegment, VersionRange}; use imbl_value::json; +use reqwest::Url; use super::v0_3_5::V0_3_0_COMPAT; use super::{VersionT, v0_4_0_alpha_19}; use crate::context::RpcContext; use crate::prelude::*; +use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile; lazy_static::lazy_static! { static ref V0_4_0_alpha_20: exver::Version = exver::Version::new( @@ -33,74 +35,106 @@ impl VersionT for Version { } #[instrument(skip_all)] fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result { - // Extract onion migration data before removing it - let onion_store = db + // Use the pre-built torMigration data from v0_3_6_alpha_0 if available. + // This contains all (hostname, packageId, hostId, key) entries with keys + // already resolved, avoiding the issue where packageData is empty during + // migration (packages aren't reinstalled until post_up). + let migration_data = if let Some(tor_migration) = db .get("private") - .and_then(|p| p.get("keyStore")) - .and_then(|k| k.get("onion")) - .cloned() - .unwrap_or(Value::Object(Default::default())); - - let mut addresses = imbl::Vector::::new(); - - // Extract OS host onion addresses - if let Some(onions) = db - .get("public") - .and_then(|p| p.get("serverInfo")) - .and_then(|s| s.get("network")) - .and_then(|n| n.get("host")) - .and_then(|h| h.get("onions")) - .and_then(|o| o.as_array()) + .and_then(|p| p.get("torMigration")) + .and_then(|t| t.as_array()) { - for onion in onions { - if let Some(hostname) = onion.as_str() { - let key = onion_store - .get(hostname) - .and_then(|v| v.as_str()) - .unwrap_or_default(); - addresses.push_back(json!({ - "hostname": hostname, - "packageId": "STARTOS", - "hostId": "STARTOS", - "key": key, - })); + json!({ + "addresses": tor_migration.clone(), + }) + } else { + // Fallback for fresh installs or installs that didn't go through + // v0_3_6_alpha_0 with the torMigration field. + let onion_store = db + .get("private") + .and_then(|p| p.get("keyStore")) + .and_then(|k| k.get("onion")) + .cloned() + .unwrap_or(Value::Object(Default::default())); + + let mut addresses = imbl::Vector::::new(); + + // Extract OS host onion addresses + if let Some(onions) = db + .get("public") + .and_then(|p| p.get("serverInfo")) + .and_then(|s| s.get("network")) + .and_then(|n| n.get("host")) + .and_then(|h| h.get("onions")) + .and_then(|o| o.as_array()) + { + for onion in onions { + if let Some(hostname) = onion.as_str() { + let key = onion_store + .get(hostname) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Error::new( + eyre!("missing tor key for onion address {hostname}"), + ErrorKind::Database, + ) + })?; + addresses.push_back(json!({ + "hostname": hostname, + "packageId": "STARTOS", + "hostId": "startos-ui", + "key": key, + })); + } } } - } - // Extract package host onion addresses - if let Some(packages) = db - .get("public") - .and_then(|p| p.get("packageData")) - .and_then(|p| p.as_object()) - { - for (package_id, package) in packages.iter() { - if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) { - for (host_id, host) in hosts.iter() { - if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) { - for onion in onions { - if let Some(hostname) = onion.as_str() { - let key = onion_store - .get(hostname) - .and_then(|v| v.as_str()) - .unwrap_or_default(); - addresses.push_back(json!({ - "hostname": hostname, - "packageId": &**package_id, - "hostId": &**host_id, - "key": key, - })); + // Extract package host onion addresses + if let Some(packages) = db + .get("public") + .and_then(|p| p.get("packageData")) + .and_then(|p| p.as_object()) + { + for (package_id, package) in packages.iter() { + if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) { + for (host_id, host) in hosts.iter() { + if let Some(onions) = host.get("onions").and_then(|o| o.as_array()) { + for onion in onions { + if let Some(hostname) = onion.as_str() { + let key = onion_store + .get(hostname) + .and_then(|v| v.as_str()) + .ok_or_else(|| { + Error::new( + eyre!( + "missing tor key for onion address {hostname}" + ), + ErrorKind::Database, + ) + })?; + addresses.push_back(json!({ + "hostname": hostname, + "packageId": &**package_id, + "hostId": &**host_id, + "key": key, + })); + } } } } } } } - } - let migration_data = json!({ - "addresses": addresses, - }); + json!({ + "addresses": addresses, + }) + }; + + // Clean up torMigration from private + if let Some(private) = db.get_mut("private").and_then(|p| p.as_object_mut()) { + private.remove("torMigration"); + } // Remove onions and tor-related fields from server host if let Some(host) = db @@ -200,7 +234,7 @@ impl VersionT for Version { } #[instrument(skip_all)] - async fn post_up(self, _ctx: &RpcContext, input: Value) -> Result<(), Error> { + async fn post_up(self, ctx: &RpcContext, input: Value) -> Result<(), Error> { let path = Path::new( "/media/startos/data/package-data/volumes/tor/data/startos/onion-migration.json", ); @@ -209,6 +243,53 @@ impl VersionT for Version { crate::util::io::write_file_atomic(path, json).await?; + // Sideload the bundled tor s9pk + let s9pk_path_str = format!("/usr/lib/startos/tor_{}.s9pk", crate::ARCH); + let s9pk_path = Path::new(&s9pk_path_str); + if tokio::fs::metadata(s9pk_path).await.is_ok() { + if let Err(e) = async { + let package_s9pk = tokio::fs::File::open(s9pk_path).await?; + let file = MultiCursorFile::open(&package_s9pk).await?; + + let key = ctx.db.peek().await.into_private().into_developer_key(); + let registry_url = + Url::parse("https://registry.start9.com/").with_kind(ErrorKind::ParseUrl)?; + + ctx.services + .install( + ctx.clone(), + || crate::s9pk::load(file.clone(), || Ok(key.de()?.0), None), + None, + None::, + None, + ) + .await? + .await? + .await?; + + // Set the marketplace URL on the installed tor package + let tor_id = "tor".parse::()?; + ctx.db + .mutate(|db| { + if let Some(pkg) = + db.as_public_mut().as_package_data_mut().as_idx_mut(&tor_id) + { + pkg.as_registry_mut().ser(&Some(registry_url))?; + } + Ok(()) + }) + .await + .result?; + + Ok::<_, Error>(()) + } + .await + { + tracing::error!("Error installing tor package: {e}"); + tracing::debug!("{e:?}"); + } + } + Ok(()) } fn down(self, _db: &mut Value) -> Result<(), Error> { diff --git a/core/src/version/v0_4_0_alpha_21.rs b/core/src/version/v0_4_0_alpha_21.rs new file mode 100644 index 000000000..2ca9631b3 --- /dev/null +++ b/core/src/version/v0_4_0_alpha_21.rs @@ -0,0 +1,37 @@ +use exver::{PreReleaseSegment, VersionRange}; + +use super::v0_3_5::V0_3_0_COMPAT; +use super::{VersionT, v0_4_0_alpha_20}; +use crate::prelude::*; + +lazy_static::lazy_static! { + static ref V0_4_0_alpha_21: exver::Version = exver::Version::new( + [0, 4, 0], + [PreReleaseSegment::String("alpha".into()), 21.into()] + ); +} + +#[derive(Clone, Copy, Debug, Default)] +pub struct Version; + +impl VersionT for Version { + type Previous = v0_4_0_alpha_20::Version; + type PreUpRes = (); + + async fn pre_up(self) -> Result { + Ok(()) + } + fn semver(self) -> exver::Version { + V0_4_0_alpha_21.clone() + } + fn compat(self) -> &'static VersionRange { + &V0_3_0_COMPAT + } + #[instrument(skip_all)] + fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result { + Ok(Value::Null) + } + fn down(self, _db: &mut Value) -> Result<(), Error> { + Ok(()) + } +} diff --git a/core/src/volume.rs b/core/src/volume.rs index 2755fd809..22bce54f1 100644 --- a/core/src/volume.rs +++ b/core/src/volume.rs @@ -1,13 +1,19 @@ use std::path::{Path, PathBuf}; +use tokio::process::Command; + use crate::PackageId; pub use crate::VolumeId; use crate::prelude::*; +use crate::util::Invoke; use crate::util::VersionString; +use crate::DATA_DIR; pub const PKG_VOLUME_DIR: &str = "package-data/volumes"; pub const BACKUP_DIR: &str = "/media/startos/backups"; +const INSTALL_BACKUP_SUFFIX: &str = ".install-backup"; + pub fn data_dir>(datadir: P, pkg_id: &PackageId, volume_id: &VolumeId) -> PathBuf { datadir .as_ref() @@ -33,3 +39,70 @@ pub fn asset_dir>( pub fn backup_dir(pkg_id: &PackageId) -> PathBuf { Path::new(BACKUP_DIR).join(pkg_id).join("data") } + +fn pkg_volume_dir(pkg_id: &PackageId) -> PathBuf { + Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(pkg_id) +} + +fn install_backup_path(pkg_id: &PackageId) -> PathBuf { + Path::new(DATA_DIR) + .join(PKG_VOLUME_DIR) + .join(format!("{pkg_id}{INSTALL_BACKUP_SUFFIX}")) +} + +/// Creates a COW snapshot of the package volume directory before install. +/// Uses `cp --reflink=always` so it's instant on btrfs and fails gracefully +/// on ext4 (no backup, current behavior preserved). +/// Returns `true` if a backup was created, `false` if no data existed or +/// the filesystem doesn't support reflinks. +pub async fn snapshot_volumes_for_install(pkg_id: &PackageId) -> Result { + let src = pkg_volume_dir(pkg_id); + if tokio::fs::metadata(&src).await.is_err() { + return Ok(false); + } + let dst = install_backup_path(pkg_id); + // Remove any stale backup from a previous failed attempt + crate::util::io::delete_dir(&dst).await?; + match Command::new("cp") + .arg("-a") + .arg("--reflink=always") + .arg(&src) + .arg(&dst) + .invoke(ErrorKind::Filesystem) + .await + { + Ok(_) => { + tracing::info!("Created install backup for {pkg_id} at {dst:?}"); + Ok(true) + } + Err(e) => { + tracing::warn!( + "Could not create install backup for {pkg_id} \ + (filesystem may not support reflinks): {e}" + ); + // Clean up partial copy if any + crate::util::io::delete_dir(&dst).await?; + Ok(false) + } + } +} + +/// Restores the package volume directory from a COW snapshot after a failed +/// install. The current (possibly corrupted) volume dir is deleted first. +/// No-op if no backup exists. +pub async fn restore_volumes_from_install_backup(pkg_id: &PackageId) -> Result<(), Error> { + let backup = install_backup_path(pkg_id); + if tokio::fs::metadata(&backup).await.is_err() { + return Ok(()); + } + let dst = pkg_volume_dir(pkg_id); + crate::util::io::delete_dir(&dst).await?; + crate::util::io::rename(&backup, &dst).await?; + tracing::info!("Restored volumes from install backup for {pkg_id}"); + Ok(()) +} + +/// Removes the install backup after a successful install. +pub async fn remove_install_backup(pkg_id: &PackageId) -> Result<(), Error> { + crate::util::io::delete_dir(&install_backup_path(pkg_id)).await +} diff --git a/core/start-tunneld.service b/core/start-tunneld.service index b0d0a2043..402326614 100644 --- a/core/start-tunneld.service +++ b/core/start-tunneld.service @@ -1,5 +1,7 @@ [Unit] Description=StartTunnel +After=network-online.target +Wants=network-online.target [Service] Type=simple diff --git a/debian/startos/postinst b/debian/startos/postinst index 246589f57..a3e3e946a 100755 --- a/debian/startos/postinst +++ b/debian/startos/postinst @@ -28,13 +28,12 @@ if [ -f /etc/default/grub ]; then sed -i '/\(^\|#\)'"$1"'=/d' /etc/default/grub printf '%s="%s"\n' "$1" "$2" >> /etc/default/grub } - # Enable both graphical and serial terminal output - grub_set GRUB_TERMINAL_INPUT 'console serial' - grub_set GRUB_TERMINAL_OUTPUT 'gfxterm serial' - # Remove GRUB_TERMINAL if present (replaced by INPUT/OUTPUT above) + # Graphical terminal (serial added conditionally via /etc/grub.d/01_serial) + grub_set GRUB_TERMINAL_INPUT 'console' + grub_set GRUB_TERMINAL_OUTPUT 'gfxterm' + # Remove GRUB_TERMINAL and GRUB_SERIAL_COMMAND if present sed -i '/^\(#\|\)GRUB_TERMINAL=/d' /etc/default/grub - # Serial console settings - grub_set GRUB_SERIAL_COMMAND 'serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1' + sed -i '/^\(#\|\)GRUB_SERIAL_COMMAND=/d' /etc/default/grub # Graphics mode and splash background grub_set GRUB_GFXMODE 800x600 grub_set GRUB_GFXPAYLOAD_LINUX keep @@ -49,6 +48,42 @@ if [ -f /etc/default/grub ]; then mkdir -p /boot/grub/startos-theme cp -r /usr/lib/startos/grub-theme/* /boot/grub/startos-theme/ fi + # Copy font to boot partition so GRUB can load it without accessing rootfs + if [ -f /usr/share/grub/unicode.pf2 ]; then + mkdir -p /boot/grub/fonts + cp /usr/share/grub/unicode.pf2 /boot/grub/fonts/unicode.pf2 + fi + # Install conditional serial console script for GRUB + cat > /etc/grub.d/01_serial <<-'GRUBEOF' + #!/bin/sh + cat << 'EOF' + # Conditionally enable serial console (avoids breaking gfxterm on EFI + # systems where the serial port is unavailable) + if serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1; then + terminal_input console serial + terminal_output gfxterm serial + fi + EOF + GRUBEOF + chmod +x /etc/grub.d/01_serial + # Install GRUB script to boot from StartOS installer USB when present. + # At boot, GRUB searches for the .startos-installer marker and, + # if found, loads the USB's own grub.cfg as the default boot entry. + cat > /etc/grub.d/07_startos_installer <<-'GRUBEOF' + #!/bin/sh + cat << 'EOF' + search --no-floppy --file --set=installer_dev /.startos-installer + if [ -n "$installer_dev" ]; then + menuentry "StartOS Installer" --id startos-installer { + set root=$installer_dev + configfile /boot/grub/grub.cfg + } + set default=startos-installer + set timeout=5 + fi + EOF + GRUBEOF + chmod +x /etc/grub.d/07_startos_installer fi VERSION="$(cat /usr/lib/startos/VERSION.txt)" diff --git a/patch-db b/patch-db index 05c93290c..12227eab1 160000 --- a/patch-db +++ b/patch-db @@ -1 +1 @@ -Subproject commit 05c93290c759bdf5e7308a24cf0d4a440ed287a0 +Subproject commit 12227eab18ec2f56b66fa16f3e49411a6eaae6f2 diff --git a/sdk/ARCHITECTURE.md b/sdk/ARCHITECTURE.md new file mode 100644 index 000000000..f785b2494 --- /dev/null +++ b/sdk/ARCHITECTURE.md @@ -0,0 +1,422 @@ +# SDK Architecture + +The Start SDK is split into two npm packages that form a layered architecture: **base** provides the foundational types, ABI contract, and effects interface; **package** builds on base to provide the developer-facing SDK facade. + +``` +┌─────────────────────────────────────────────────────────────┐ +│ package/ (@start9labs/start-sdk) │ +│ Developer-facing facade, daemon management, health checks, │ +│ backup system, file helpers, triggers, subcontainers │ +│ │ +│ ┌───────────────────────────────────────────────────────┐ │ +│ │ base/ (@start9labs/start-sdk-base) │ │ +│ │ ABI, Effects, OS bindings, actions/input builders, │ │ +│ │ ExVer parser, interfaces, dependencies, S9pk, utils │ │ +│ └───────────────────────────────────────────────────────┘ │ +└─────────────────────────────────────────────────────────────┘ + │ ▲ + │ Effects calls (RPC) │ Callbacks + ▼ │ +┌─────────────────────────────────────────────────────────────┐ +│ StartOS Runtime (Rust supervisor) │ +│ Executes effects, manages containers, networking, storage │ +└─────────────────────────────────────────────────────────────┘ +``` + +The SDK follows [Semantic Versioning](https://semver.org/) within the `0.4.0-beta.*` pre-release series. The SDK version tracks independently from the StartOS release versions. + +## Base Package (`base/`) + +The base package is a self-contained library of types, interfaces, and low-level builders. It has no dependency on the package layer and can be used independently when only type definitions or validation are needed. + +### OS Bindings (`base/lib/osBindings/`) + +~325 auto-generated TypeScript files defining every type exchanged between the SDK and the StartOS runtime. These cover the full surface area of the system: manifests, actions, health checks, service interfaces, bind parameters, dependency requirements, alerts, SSL, domains, SMTP, networking, images, and more. + +All bindings are re-exported through `base/lib/osBindings/index.ts`. + +Key types include: +- `Manifest` — The full service package manifest as understood by the OS +- `ActionMetadata` — Describes an action's name, description, visibility, and availability +- `BindParams` — Port binding configuration (protocol, hostId, internal port) +- `ServiceInterface` — A network endpoint exported to users +- `DependencyRequirement` — Version range and health check requirements for a dependency +- `SetHealth` — Health check result reporting +- `HostnameInfo` / `Host` — Hostname and host metadata + +### ABI and Core Types (`base/lib/types.ts`) + +Defines the Application Binary Interface — the contract every service package must fulfill: + +```typescript +namespace ExpectedExports { + main // Start the service daemon(s) + init // Initialize on install/update/restore + uninit // Clean up on uninstall/update/shutdown + manifest // Service metadata + actions // User-invocable operations + createBackup // Export service data +} +``` + +Also defines foundational types used throughout the SDK: +- `Daemon` / `DaemonReturned` — Running process handles with `wait()` and `term()` +- `CommandType` — Shell string, argv array, or `UseEntrypoint` +- `ServiceInterfaceType` — `'ui' | 'api' | 'p2p'` +- `SmtpValue` — SMTP server configuration +- `KnownError` — Structured user-facing errors +- `DependsOn` — Package-to-health-check dependency mapping +- `PathMaker`, `MaybePromise`, `DeepPartial`, `DeepReadonly` — Utility types + +### Effects Interface (`base/lib/Effects.ts`) + +The bridge between TypeScript service code and the StartOS runtime. Every runtime capability is accessed through an `Effects` object passed to lifecycle hooks. + +Effects are organized by subsystem: + +| Subsystem | Methods | Purpose | +|-----------|---------|---------| +| **Action** | `export`, `clear`, `getInput`, `run`, `createTask`, `clearTasks` | Register and invoke user actions | +| **Control** | `restart`, `shutdown`, `getStatus`, `setMainStatus` | Service lifecycle control | +| **Dependency** | `setDependencies`, `getDependencies`, `checkDependencies`, `mount`, `getInstalledPackages`, `getServiceManifest` | Inter-service dependency management | +| **Health** | `setHealth` | Report health check results | +| **Subcontainer** | `createFs`, `destroyFs` | Container filesystem management | +| **Networking** | `bind`, `getServicePortForward`, `clearBindings`, `getHostInfo`, `getContainerIp`, `getOsIp`, `getOutboundGateway` | Port binding and network info | +| **Interfaces** | `exportServiceInterface`, `getServiceInterface`, `listServiceInterfaces`, `clearServiceInterfaces` | Service endpoint management | +| **Plugin** | `plugin.url.register`, `plugin.url.exportUrl`, `plugin.url.clearUrls` | Plugin system hooks | +| **SSL** | `getSslCertificate`, `getSslKey` | TLS certificate management | +| **System** | `getSystemSmtp`, `setDataVersion`, `getDataVersion` | System-wide configuration | + +Effects also support reactive callbacks: many methods accept an optional `callback` parameter that the runtime invokes when the underlying value changes, enabling the reactive subscription patterns (`const()`, `watch()`, etc.). + +### Action and Input System (`base/lib/actions/`) + +#### Actions (`setupActions.ts`) + +The `Action` class defines user-invocable operations. Two factory methods: +- `Action.withInput(id, metadata, inputSpec, prefill, execute)` — Action with a validated form +- `Action.withoutInput(id, metadata, execute)` — Action without user input + +`Actions` is a typed map accumulated via `.addAction()` chaining. + +#### Input Specification (`actions/input/`) + +A builder-pattern system for declaring validated form inputs: + +``` +inputSpec/ +├── builder/ +│ ├── inputSpec.ts — InputSpec.of() entry point +│ ├── value.ts — Value class (individual form fields) +│ ├── list.ts — List builder (arrays of values) +│ └── variants.ts — Variants/Union builder (conditional fields) +├── inputSpecTypes.ts — Type definitions for all field types +└── inputSpecConstants.ts — Pre-built specs (SMTP, etc.) +``` + +Supported field types via `Value`: +- `text`, `textarea`, `number` — Text and numeric input +- `toggle` — Boolean switch +- `select`, `multiselect` — Single/multi-choice dropdown +- `list` — Repeatable array of sub-values +- `color`, `datetime` — Specialized pickers +- `object` — Nested sub-form +- `union` / `dynamicUnion` — Conditional fields based on a discriminator + +### Dependencies (`base/lib/dependencies/`) + +- `setupDependencies.ts` — Declare what the service depends on (package IDs, version ranges, health checks) +- `dependencies.ts` — Runtime dependency checking via `checkDependencies()` + +### Interfaces (`base/lib/interfaces/`) + +Network interface declaration and port binding: + +- `setupInterfaces.ts` — Top-level `setupServiceInterfaces()` function +- `Host.ts` — `MultiHost` class for binding ports and exporting interfaces. A single MultiHost can bind a port and export multiple interfaces (e.g. a primary UI and admin UI on the same port with different paths) +- `ServiceInterfaceBuilder.ts` — Builder for constructing `ServiceInterface` objects with name, type, description, scheme overrides, username, path, and query params +- `setupExportedUrls.ts` — URL plugin support for exporting URLs to other services + +### Initialization (`base/lib/inits/`) + +- `setupInit.ts` — Compose init scripts that run on install, update, restore, or boot +- `setupUninit.ts` — Compose uninit scripts that run on uninstall, update, or shutdown +- `setupOnInit` / `setupOnUninit` — Register callbacks for specific init/uninit events + +Init scripts receive a `kind` parameter (`'install' | 'update' | 'restore' | null`) so they can branch logic based on the initialization context. + +### Extended Versioning (`base/lib/exver/`) + +A PEG parser-based versioning system that extends semver: + +- **`Version`** — Standard semantic version (`1.2.3-beta.1`) +- **`ExtendedVersion` (ExVer)** — Adds an optional flavor prefix and a downstream version: `#flavor:upstream:downstream` +- **`VersionRange`** — Boolean expressions over version comparisons (`>=1.0.0 && <2.0.0 || =3.0.0`) + +The parser is generated from `exver.pegjs` via Peggy and emitted as `exver.ts`. + +ExVer separates upstream project versions from StartOS wrapper versions, allowing the package maintainer's versioning to evolve independently from the upstream software. + +### S9pk Format (`base/lib/s9pk/`) + +Parser and verifier for `.s9pk` service package archives: + +- `S9pk` class — Deserialize and inspect package contents +- Merkle archive support for cryptographic verification of package integrity +- Methods: `deserialize()`, `icon()`, `license()`, etc. + +### Utilities (`base/lib/util/`) + +~28 utility modules including: + +**Reactive subscription wrappers** — Each wraps an Effects callback-based method into a consistent reactive API: +- `Watchable` — Base class providing `const()`, `once()`, `watch()`, `onChange()`, `waitFor()` +- `GetContainerIp`, `GetStatus`, `GetSystemSmtp`, `GetOutboundGateway`, `GetSslCertificate`, `GetHostInfo`, `GetServiceManifest` — Typed wrappers for specific Effects methods + +**General utilities:** +- `deepEqual` / `deepMerge` — Deep object comparison and merging +- `patterns` — Hostname regex, port validators +- `splitCommand` — Parse shell command strings into argv arrays +- `Drop` — RAII-style cleanup utility +- `graph` — Dependency graph utilities + +## Package Layer (`package/`) + +The package layer provides the developer-facing API. It re-exports everything from base and adds higher-level abstractions. + +### StartSdk Facade (`package/lib/StartSdk.ts`) + +The primary entry point for service developers. Constructed via a builder chain: + +```typescript +const sdk = StartSdk.of() + .withManifest(manifest) + .build(true) +``` + +The `.build()` method returns an object containing the entire SDK surface area, organized by concern: + +| Category | Members | Purpose | +|----------|---------|---------| +| **Manifest** | `manifest`, `volumes` | Access manifest data and volume paths | +| **Actions** | `Action.withInput`, `Action.withoutInput`, `Actions`, `action.run`, `action.createTask`, `action.createOwnTask`, `action.clearTask` | Define and manage user actions | +| **Daemons** | `Daemons.of`, `Daemon.of`, `setupMain` | Configure service processes | +| **Health** | `healthCheck.checkPortListening`, `.checkWebUrl`, `.runHealthScript` | Built-in health checks | +| **Interfaces** | `createInterface`, `MultiHost.of`, `setupInterfaces`, `serviceInterface.*` | Network endpoint management | +| **Backups** | `setupBackups`, `Backups.ofVolumes`, `Backups.ofSyncs`, `Backups.withOptions` | Backup configuration | +| **Dependencies** | `setupDependencies`, `checkDependencies` | Dependency declaration and verification | +| **Init/Uninit** | `setupInit`, `setupUninit`, `setupOnInit`, `setupOnUninit` | Lifecycle hooks | +| **Containers** | `SubContainer.of`, `SubContainer.withTemp`, `Mounts.of` | Container execution with mounts | +| **Forms** | `InputSpec.of`, `Value`, `Variants`, `List` | Form input builders | +| **Triggers** | `trigger.defaultTrigger`, `.cooldownTrigger`, `.changeOnFirstSuccess`, `.successFailure` | Health check polling strategies | +| **Reactive** | `getContainerIp`, `getStatus`, `getSystemSmtp`, `getOutboundGateway`, `getSslCertificate`, `getServiceManifest` | Subscription-based data access | +| **Plugins** | `plugin.url.register`, `plugin.url.exportUrl` | Plugin system (gated by manifest `plugins` field) | +| **Effects** | `restart`, `shutdown`, `setHealth`, `mount`, `clearBindings`, ... | Direct effect wrappers | +| **Utilities** | `nullIfEmpty`, `useEntrypoint`, `patterns`, `setDataVersion`, `getDataVersion` | Misc helpers | + +### Daemon Management (`package/lib/mainFn/`) + +The daemon subsystem manages long-running processes: + +``` +mainFn/ +├── Daemons.ts — Multi-daemon topology builder +├── Daemon.ts — Single daemon wrapper +├── HealthDaemon.ts — Health check executor +├── CommandController.ts — Command execution controller +├── Mounts.ts — Volume/asset/dependency mount builder +├── Oneshot.ts — One-time startup commands +└── index.ts — setupMain() entry point +``` + +**Daemons** is a builder that accumulates process definitions: +```typescript +sdk.Daemons.of(effects) + .addDaemon('db', { /* command, ready probe, mounts */ }) + .addDaemon('app', { requires: ['db'], /* ... */ }) + .addHealthCheck('primary', { /* ... */ }) +``` + +Features: +- Startup ordering via `requires` (dependency graph between daemons) +- Ready probes (wait for a daemon to be ready before starting dependents) +- Graceful shutdown with configurable signals and timeouts +- One-shot commands that run before daemons start + +**Mounts** declares what to attach to a container: +```typescript +sdk.Mounts.of() + .mountVolume('main', '/data') + .mountAssets('scripts', '/scripts') + .mountDependency('bitcoind', 'main', '/bitcoin-data', { readonly: true }) + .mountBackup('/backup') +``` + +### Health Checks (`package/lib/health/`) + +``` +health/ +├── HealthCheck.ts — Periodic probe with startup grace period +└── checkFns/ + ├── checkPortListening.ts — TCP port connectivity check + ├── checkWebUrl.ts — HTTP(S) status code check + └── runHealthScript.ts — Script exit code check +``` + +Health checks are paired with **triggers** that control polling behavior: +- `defaultTrigger` — Fixed interval (e.g. every 30s) +- `cooldownTrigger` — Wait longer after failures +- `changeOnFirstSuccess` — Rapid polling until first success, then slow down +- `successFailure` — Different intervals for healthy vs unhealthy states + +### Backup System (`package/lib/backup/`) + +``` +backup/ +├── setupBackups.ts — Top-level setup function +└── Backups.ts — Volume selection and rsync options +``` + +Three builder patterns: +- `Backups.ofVolumes('main', 'data')` — Back up entire volumes +- `Backups.ofSyncs([{ dataPath, backupPath }])` — Custom sync pairs +- `Backups.withOptions({ exclude: ['cache/'] })` — Rsync options + +### File Helpers (`package/lib/util/fileHelper.ts`) + +Type-safe configuration file management: + +```typescript +const configFile = FileHelper.yaml(effects, sdk.volumes.main.path('config.yml'), { + port: 8080, + debug: false, +}) + +// Reactive reading +const config = await configFile.read.const(effects) + +// Partial merge +await configFile.merge({ debug: true }) + +// Full write +await configFile.write({ port: 9090, debug: true }) +``` + +Supported formats: JSON, YAML, TOML, INI, ENV, and custom parsers. + +### Subcontainers (`package/lib/util/SubContainer.ts`) + +Execute commands in isolated container environments: + +```typescript +// Long-lived subcontainer +const container = await sdk.SubContainer.of(effects, { imageId: 'main' }, mounts, 'app') + +// One-shot execution +await sdk.SubContainer.withTemp(effects, { imageId: 'main' }, mounts, 'migrate', async (c) => { + await c.exec(['run-migrations']) +}) +``` + +### Manifest Building (`package/lib/manifest/`) + +```typescript +const manifest = setupManifest({ + id: 'my-service', + title: 'My Service', + license: 'MIT', + description: { short: '...', long: '...' }, + images: { main: { source: { dockerTag: 'myimage:1.0' } } }, + volumes: { main: {} }, + dependencies: {}, + // ... +}) + +export default buildManifest(manifest) +``` + +`buildManifest()` finalizes the manifest with the current SDK version, OS version compatibility, and migration version ranges. + +### Versioning (`package/lib/version/`) + +Helpers for data version management during migrations: + +```typescript +await sdk.setDataVersion(effects, '1.2.0:0') +const version = await sdk.getDataVersion(effects) +``` + +Used in init scripts to track which migration version the service's data has been brought to. + +### Internationalization (`package/lib/i18n/`) + +```typescript +const t = setupI18n({ en_US: enStrings, es_ES: esStrings }) +const greeting = t('hello', { name: 'World' }) // "Hello, World!" or "Hola, World!" +``` + +Supports locale fallback and Intl-based formatting. + +### Triggers (`package/lib/trigger/`) + +Polling strategy functions that determine when health checks run: + +```typescript +sdk.trigger.defaultTrigger({ timeout: 30_000 }) +sdk.trigger.cooldownTrigger({ timeout: 30_000, cooldown: 60_000 }) +sdk.trigger.changeOnFirstSuccess({ first: 5_000, then: 30_000 }) +sdk.trigger.successFailure({ success: 60_000, failure: 10_000 }) +``` + +## Build Pipeline + +See [CONTRIBUTING.md](CONTRIBUTING.md) for detailed build instructions, make targets, and development workflow. + +At a high level: Peggy generates the ExVer parser, TypeScript compiles both packages in strict mode (base to `baseDist/`, package to `dist/`), hand-written `.js`/`.d.ts` pairs are copied into the output, and `node_modules` are bundled for self-contained distribution. + +## Data Flow + +A typical service package lifecycle: + +``` +1. INSTALL / UPDATE / RESTORE + ├── init({ effects, kind }) + │ ├── Version migrations (if update) + │ ├── setupDependencies() + │ ├── setupInterfaces() → bind ports, export interfaces + │ └── Actions registration → export actions to OS + │ +2. MAIN + │ setupMain() → Daemons.of(effects) + │ ├── Oneshots run first + │ ├── Daemons start in dependency order + │ ├── Health checks begin polling + │ └── Service runs until shutdown/restart + │ +3. SHUTDOWN / UNINSTALL / UPDATE + │ uninit({ effects, target }) + │ └── Version down-migrations (if needed) + │ +4. BACKUP (user-triggered) + createBackup({ effects }) + └── rsync volumes to backup location +``` + +## Key Design Patterns + +### Builder Pattern +Most SDK APIs use immutable builder chains: `Daemons.of().addDaemon().addHealthCheck()`, `Mounts.of().mountVolume().mountAssets()`, `Actions.of().addAction()`. This provides type accumulation — each chained call narrows the type to reflect what has been configured. + +### Effects as Capability System +All runtime interactions go through the `Effects` object rather than direct system calls. This makes the runtime boundary explicit, enables the OS to mediate all side effects, and makes service code testable by providing mock effects. + +### Reactive Subscriptions +The `Watchable` base class provides a consistent API for values that can change over time: +- `const(effects)` — Read once; if the value changes, triggers a retry of the enclosing context +- `once()` — Read once without reactivity +- `watch()` — Async generator yielding on each change +- `onChange(callback)` — Invoke callback on each change +- `waitFor(predicate)` — Block until a condition is met + +### Type-safe Manifest Threading +The manifest type flows through the entire SDK via generics. When you call `StartSdk.of().withManifest(manifest)`, the manifest's volume names, image IDs, dependency IDs, and plugin list become available as type constraints throughout all subsequent API calls. For example, `Mounts.of().mountVolume()` only accepts volume names declared in the manifest. diff --git a/sdk/CHANGELOG.md b/sdk/CHANGELOG.md new file mode 100644 index 000000000..f980422b2 --- /dev/null +++ b/sdk/CHANGELOG.md @@ -0,0 +1,133 @@ +# Changelog + +## 0.4.0-beta.61 — StartOS v0.4.0-alpha.21 (2026-03-16) + +### Fixed + +- Fixed bug where leaving the effect context triggered consts + +## 0.4.0-beta.60 — StartOS v0.4.0-alpha.20 (2026-03-16) + +### Added + +- Tunnel TS type exports and port forward labels +- Secure Boot MOK key enrollment fields in `SetupInfo` + +### Changed + +- Consolidated `Watchable` base class with generic `map`/`eq` support; renamed `call` to `fetch` +- Moved `GetServiceManifest` and `GetSslCertificate` from `package/` to `base/` +- Simplified `getServiceInterface`, `getServiceInterfaces`, `GetOutboundGateway`, `GetSystemSmtp`, and `fileHelper` using `Watchable` base class +- Simplified SDK Makefile with rsync + +### Fixed + +- Added `restart_again` flag to `DesiredStatus::Restarting` + +## 0.4.0-beta.59 — StartOS v0.4.0-alpha.20 (2026-03-06) + +### Added + +- Support for preferred external ports besides 443 +- Bridge filter kind on service interfaces + +### Fixed + +- Merge version ranges when adding existing package signer +- Task fix for action task system + +## 0.4.0-beta.56 — StartOS v0.4.0-alpha.19 (2026-02-02) + +### Added + +- `getOutboundGateway` effect and SDK wrapper +- Improved service version migration and data version handling +- `zod-deep-partial` integration with `partialValidator` on `InputSpec` +- SMTP rework with improved provider variants and system SMTP spec + +### Changed + +- Migrated from `ts-matches` to `zod` across all TypeScript packages +- Builder-style `InputSpec` API with prefill plumbing +- Split `row_actions` into `remove_action` and `overflow_actions` for URL plugins + +### Fixed + +- Scoped public domain to single binding and return single port check +- Preserved `z` namespace types for SDK consumers +- `--arch` flag falls back to emulation when native image unavailable + +## 0.4.0-beta.54 — StartOS v0.4.0-alpha.18 (2026-01-27) + +### Added + +- Device info RPC +- Hardware acceleration and NVIDIA card support on nonfree images + +### Changed + +- Consolidated setup flow +- Improved SDK abort handling and `InputSpec` filtering + +## 0.4.0-beta.49 — StartOS v0.4.0-alpha.17 (2026-01-10) + +### Added + +- JSDoc comments on all consumer-facing APIs +- StartTunnel random subnet support +- Port 80 to 5443 tunnel mapping + +### Fixed + +- `EffectCreator` type corrections +- Allow multiple equal signs in ENV `FileHelper` values +- Miscellaneous alpha.16 follow-up fixes + +## 0.4.0-beta.45 — StartOS v0.4.0-alpha.16 (2025-12-18) + +### Added + +- `map` and `eq` on `getServiceInterface` watcher +- Flavor-aware version range handling + +### Changed + +- Refactored `StatusInfo` types +- Improved shutdown ordering for daemons +- Improved StartTunnel validation and garbage collection + +## 0.4.0-beta.43 — StartOS v0.4.0-alpha.15 (2025-11-26) + +### Fixed + +- Minor bugfixes for alpha.14 + +## 0.4.0-beta.42 — StartOS v0.4.0-alpha.14 (2025-11-20) + +### Fixed + +- Bugfixes for alpha.13 + +## 0.4.0-beta.41 — StartOS v0.4.0-alpha.13 (2025-11-15) + +### Fixed + +- Bugfixes for alpha.12 + +## 0.4.0-beta.40 — StartOS v0.4.0-alpha.12 (2025-11-07) + +### Added + +- StartTunnel integration +- Configurable `textarea` rows in `InputSpec` + +## 0.4.0-beta.39 — StartOS v0.4.0-alpha.11 (2025-09-24) + +### Added + +- Gateway limiting for StartTunnel +- Improved copy UX around Tor SSL + +### Changed + +- SDK type updates and internal improvements diff --git a/sdk/CLAUDE.md b/sdk/CLAUDE.md index d03111f86..6ebd1ce4c 100644 --- a/sdk/CLAUDE.md +++ b/sdk/CLAUDE.md @@ -6,3 +6,10 @@ TypeScript SDK for packaging services for StartOS (`@start9labs/start-sdk`). - `base/` — Core types, ABI definitions, effects interface (`@start9labs/start-sdk-base`) - `package/` — Full SDK for package developers, re-exports base + +## Releasing + +When bumping the SDK version (in `package/package.json`), always update `CHANGELOG.md`: +1. Add a new version heading at the top of the file +2. Use the format: `## — StartOS ()` +3. Categorize entries under `### Added`, `### Changed`, `### Fixed`, or `### Removed` diff --git a/sdk/CONTRIBUTING.md b/sdk/CONTRIBUTING.md new file mode 100644 index 000000000..de9b411b0 --- /dev/null +++ b/sdk/CONTRIBUTING.md @@ -0,0 +1,209 @@ +# Contributing to Start SDK + +This guide covers developing the SDK itself. If you're building a service package *using* the SDK, see the [packaging docs](https://docs.start9.com/packaging). + +For contributing to the broader StartOS project, see the root [CONTRIBUTING.md](../CONTRIBUTING.md). + +## Prerequisites + +- **Node.js v22+** (via [nvm](https://github.com/nvm-sh/nvm) recommended) +- **npm** (ships with Node.js) +- **GNU Make** + +Verify your setup: + +```bash +node --version # v22.x or higher +npm --version +make --version +``` + +## Repository Layout + +``` +sdk/ +├── base/ # @start9labs/start-sdk-base (core types, ABI, effects) +│ ├── lib/ # TypeScript source +│ ├── package.json +│ ├── tsconfig.json +│ └── jest.config.js +├── package/ # @start9labs/start-sdk (full developer-facing SDK) +│ ├── lib/ # TypeScript source +│ ├── package.json +│ ├── tsconfig.json +│ └── jest.config.js +├── baseDist/ # Build output for base (generated) +├── dist/ # Build output for package (generated, published to npm) +├── Makefile # Build orchestration +├── README.md +├── ARCHITECTURE.md +└── CLAUDE.md +``` + +## Getting Started + +Install dependencies for both sub-packages: + +```bash +cd sdk +make node_modules +``` + +This runs `npm ci` in both `base/` and `package/`. + +## Building + +### Full Build + +```bash +make bundle +``` + +This runs the complete pipeline: TypeScript compilation, hand-written pair copying, node_modules bundling, formatting, and tests. Outputs land in `baseDist/` (base) and `dist/` (package). + +### Individual Targets + +| Target | Description | +|--------|-------------| +| `make bundle` | Full build: compile + format + test | +| `make baseDist` | Compile base package only | +| `make dist` | Compile full package (depends on base) | +| `make fmt` | Run Prettier on all `.ts` files | +| `make check` | Type-check without emitting (both packages) | +| `make clean` | Remove all build artifacts and node_modules | + +### What the Build Does + +1. **Peggy parser generation** — `base/lib/exver/exver.pegjs` is compiled to `exver.ts` (the ExVer version parser) +2. **TypeScript compilation** — Strict mode, CommonJS output, declaration files + - `base/` compiles to `baseDist/` + - `package/` compiles to `dist/` +3. **Hand-written pair copying** — `.js`/`.d.ts` files without a corresponding `.ts` source are copied into the output directories. These are manually maintained JavaScript files with hand-written type declarations. +4. **Dependency bundling** — `node_modules/` is rsynced into both output directories so the published package is self-contained +5. **Formatting** — Prettier formats all TypeScript source +6. **Testing** — Jest runs both test suites + +## Testing + +```bash +# Run all tests (base + package) +make test + +# Run base tests only +make base/test + +# Run package tests only +make package/test + +# Run a specific test file directly +cd base && npx jest --testPathPattern=exver +cd package && npx jest --testPathPattern=host +``` + +Tests use [Jest](https://jestjs.io/) with [ts-jest](https://kulshekhar.github.io/ts-jest/) for TypeScript support. Configuration is in each sub-package's `jest.config.js`. + +### Test Files + +Tests live alongside their subjects or in dedicated `test/` directories: + +- `base/lib/test/` — ExVer parsing, input spec types, deep merge, graph utilities, type validation +- `base/lib/util/inMs.test.ts` — Time conversion utility +- `package/lib/test/` — Health checks, host binding, input spec builder + +Test files use the `.test.ts` extension and are excluded from compilation via `tsconfig.json`. + +## Formatting + +```bash +make fmt +``` + +Runs Prettier with the project config (single quotes, no semicolons, trailing commas, 2-space indent). The Prettier config lives in each sub-package's `package.json`: + +```json +{ + "trailingComma": "all", + "tabWidth": 2, + "semi": false, + "singleQuote": true +} +``` + +## Type Checking + +To check types without building: + +```bash +make check +``` + +Or directly per package: + +```bash +cd base && npm run check +cd package && npm run check +``` + +Both packages use strict TypeScript (`"strict": true`) targeting ES2021 with CommonJS module output. + +## Local Development with a Service Package + +To test SDK changes against a local service package without publishing to npm: + +```bash +# Build and create a local npm link +make link + +# In your service package directory +npm link @start9labs/start-sdk +``` + +This symlinks the built `dist/` into your global node_modules so your service package picks up local changes. + +## Publishing + +```bash +make publish +``` + +This builds the full bundle, then runs `npm publish --access=public --tag=latest` from `dist/`. The published package is `@start9labs/start-sdk`. + +Only the `dist/` directory is published — it contains the compiled JavaScript, declaration files, bundled dependencies, and package metadata. + +## Adding New Features + +### Base vs Package + +Decide where new code belongs: + +- **`base/`** — Types, interfaces, ABI contracts, OS bindings, and low-level builders that have no dependency on the package layer. Code here should be usable independently. +- **`package/`** — Developer-facing API, convenience wrappers, runtime helpers (daemons, health checks, backups, file helpers, subcontainers). Code here imports from base and adds higher-level abstractions. + +### Key Conventions + +- **Builder pattern** — Most APIs use immutable builder chains (`.addDaemon()`, `.mountVolume()`, `.addAction()`). Each call returns a new type that accumulates configuration. +- **Effects boundary** — All runtime interactions go through the `Effects` interface. Never call system APIs directly. +- **Manifest type threading** — The manifest type flows through generics so that volume names, image IDs, and dependency IDs are type-constrained. +- **Re-export from package** — If you add a new export to base, also re-export it from `package/lib/index.ts` (or expose it through `StartSdk.build()`). + +### Adding OS Bindings + +Types in `base/lib/osBindings/` mirror Rust types from the StartOS core. When Rust types change, the corresponding TypeScript bindings need updating. These are re-exported through `base/lib/osBindings/index.ts`. + +### Writing Tests + +- Place test files next to the code they test, or in the `test/` directory +- Use the `.test.ts` extension +- Tests run in Node.js with ts-jest — no browser environment + +## Commit Messages + +Follow [Conventional Commits](https://www.conventionalcommits.org/): + +``` +feat(sdk): add WebSocket health check +fix(sdk): correct ExVer range parsing for pre-release versions +test(sdk): add coverage for MultiHost port binding +``` + +See the root [CONTRIBUTING.md](../CONTRIBUTING.md#commit-messages) for the full convention. diff --git a/sdk/base/LICENSE b/sdk/LICENSE similarity index 100% rename from sdk/base/LICENSE rename to sdk/LICENSE diff --git a/sdk/Makefile b/sdk/Makefile index 9370ab372..c793b3594 100644 --- a/sdk/Makefile +++ b/sdk/Makefile @@ -27,38 +27,24 @@ bundle: baseDist dist | test fmt base/lib/exver/exver.ts: base/node_modules base/lib/exver/exver.pegjs cd base && npm run peggy -baseDist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) base/package.json base/node_modules base/README.md base/LICENSE +baseDist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) base/package.json base/node_modules base/README.md LICENSE (cd base && npm run tsc) - # Copy hand-written .js/.d.ts pairs (no corresponding .ts source) into the output. - cd base/lib && find . -name '*.js' | while read f; do \ - base="$${f%.js}"; \ - if [ -f "$$base.d.ts" ] && [ ! -f "$$base.ts" ]; then \ - mkdir -p "../../baseDist/$$(dirname "$$f")"; \ - cp "$$f" "../../baseDist/$$f"; \ - cp "$$base.d.ts" "../../baseDist/$$base.d.ts"; \ - fi; \ - done + rsync -ac --include='*.js' --include='*.d.ts' --include='*/' --exclude='*' base/lib/ baseDist/ rsync -ac base/node_modules baseDist/ cp base/package.json baseDist/package.json cp base/README.md baseDist/README.md - cp base/LICENSE baseDist/LICENSE + cp LICENSE baseDist/LICENSE touch baseDist -dist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) package/package.json package/.npmignore package/node_modules package/README.md package/LICENSE +dist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) package/package.json package/.npmignore package/node_modules README.md LICENSE CHANGELOG.md (cd package && npm run tsc) - cd base/lib && find . -name '*.js' | while read f; do \ - base="$${f%.js}"; \ - if [ -f "$$base.d.ts" ] && [ ! -f "$$base.ts" ]; then \ - mkdir -p "../../dist/base/lib/$$(dirname "$$f")"; \ - cp "$$f" "../../dist/base/lib/$$f"; \ - cp "$$base.d.ts" "../../dist/base/lib/$$base.d.ts"; \ - fi; \ - done + rsync -ac --include='*.js' --include='*.d.ts' --include='*/' --exclude='*' base/lib/ dist/base/lib/ rsync -ac package/node_modules dist/ cp package/.npmignore dist/.npmignore cp package/package.json dist/package.json - cp package/README.md dist/README.md - cp package/LICENSE dist/LICENSE + cp README.md dist/README.md + cp LICENSE dist/LICENSE + cp CHANGELOG.md dist/CHANGELOG.md touch dist full-bundle: bundle @@ -86,7 +72,7 @@ base/node_modules: base/package-lock.json node_modules: package/node_modules base/node_modules -publish: bundle package/package.json package/README.md package/LICENSE +publish: bundle package/package.json README.md LICENSE CHANGELOG.md cd dist && npm publish --access=public --tag=latest link: bundle diff --git a/sdk/README.md b/sdk/README.md new file mode 100644 index 000000000..e06b79d8c --- /dev/null +++ b/sdk/README.md @@ -0,0 +1,103 @@ +# Start SDK + +The Start SDK (`@start9labs/start-sdk`) is a TypeScript framework for packaging services to run on [StartOS](https://github.com/Start9Labs/start-os). It provides a strongly-typed, builder-pattern API for defining every aspect of a service package: manifests, daemons, health checks, networking interfaces, actions, backups, dependencies, configuration, and more. + +## Features + +- **Type-safe manifest definitions** - Declare your service's identity, metadata, images, volumes, and dependencies with full TypeScript inference. +- **Daemon management** - Define multi-process topologies with startup ordering, ready probes, and graceful shutdown via `Daemons.of().addDaemon()`. +- **Health checks** - Built-in checks for TCP port listening, HTTP(S) endpoints, and custom scripts, with configurable polling strategies (fixed interval, cooldown, adaptive). +- **Network interfaces** - Bind ports, export UI/API/P2P interfaces, and manage hostnames with MultiHost and ServiceInterfaceBuilder. +- **User actions** - Create interactive operations with validated form inputs (text, select, toggle, list, union/variants, and more) that users can trigger from the StartOS UI. +- **Backup and restore** - Rsync-based volume backups with exclude patterns and custom sync paths. +- **Dependency management** - Declare inter-service dependencies with version ranges, health check requirements, and volume mounts. +- **Configuration file helpers** - Read, write, and merge JSON, YAML, TOML, INI, and ENV files with type-safe `FileHelper`. +- **Reactive subscriptions** - Watch for changes to container IPs, SSL certificates, SMTP config, service status, and more with `const()`, `once()`, `watch()`, `onChange()`, and `waitFor()` patterns. +- **Extended versioning (ExVer)** - Flavor-aware semantic versioning with range matching, supporting independent upstream and downstream version tracking. +- **Internationalization** - Built-in i18n support with locale fallback and parameter substitution. +- **Container execution** - Run commands in subcontainers with volume mounts, environment variables, and entrypoint overrides. +- **Plugin system** - Extensible plugin architecture (e.g. `url-v0` for URL management). + +## Quick Start + +```typescript +import { setupManifest, buildManifest } from '@start9labs/start-sdk' + +const manifest = setupManifest({ + id: 'my-service', + title: 'My Service', + license: 'MIT', + // ... +}) + +export default buildManifest(manifest) +``` + +The primary entry point is the `StartSdk` facade: + +```typescript +import { StartSdk } from '@start9labs/start-sdk' +import { manifest } from './manifest' + +export const sdk = StartSdk.of().withManifest(manifest).build(true) +``` + +From there, `sdk` exposes the full toolkit: + +```typescript +// Define daemons +export const main = sdk.setupMain(async ({ effects }) => + sdk.Daemons.of(effects) + .addDaemon('primary', { /* ... */ }) +) + +// Define actions +export const setName = sdk.Action.withInput('set-name', /* ... */) + +// Define interfaces +export const setInterfaces = sdk.setupInterfaces(async ({ effects }) => { + const multi = sdk.MultiHost.of(effects, 'web') + const origin = await multi.bindPort(80, { protocol: 'http' }) + const ui = sdk.createInterface(effects, { name: 'Web UI', id: 'ui', /* ... */ }) + return [await origin.export([ui])] +}) + +// Define backups +export const { createBackup, restoreBackup } = sdk.setupBackups( + async () => sdk.Backups.ofVolumes('main') +) +``` + +## Packages + +| Package | npm | Description | +|---------|-----|-------------| +| `package/` | `@start9labs/start-sdk` | Full SDK for service developers | +| `base/` | `@start9labs/start-sdk-base` | Core types, ABI definitions, and effects interface | + +## Documentation + +For comprehensive packaging guides, tutorials, and API reference: + +**[docs.start9.com/packaging](https://docs.start9.com/packaging)** + +The packaging docs cover: +- Environment setup and prerequisites +- Project structure and conventions +- Manifest, main, interfaces, actions, and all other service modules +- File models and configuration management +- Versioning, migrations, and initialization +- Dependencies and cross-service communication +- Building and installing `.s9pk` packages + +## Contributing + +See [CONTRIBUTING.md](CONTRIBUTING.md) for environment setup, building from source, testing, and development workflow. + +## Architecture + +See [ARCHITECTURE.md](ARCHITECTURE.md) for a detailed overview of the SDK's internal structure, module responsibilities, and data flow. + +## License + +MIT diff --git a/sdk/base/lib/Effects.ts b/sdk/base/lib/Effects.ts index d3d0b8923..554390654 100644 --- a/sdk/base/lib/Effects.ts +++ b/sdk/base/lib/Effects.ts @@ -69,7 +69,7 @@ export type Effects = { getStatus(options: { packageId?: PackageId callback?: () => void - }): Promise + }): Promise /** DEPRECATED: indicate to the host os what runstate the service is in */ setMainStatus(options: SetMainStatus): Promise diff --git a/sdk/base/lib/osBindings/AddPackageSignerParams.ts b/sdk/base/lib/osBindings/AddPackageSignerParams.ts index e9a7788ff..6baebf0c8 100644 --- a/sdk/base/lib/osBindings/AddPackageSignerParams.ts +++ b/sdk/base/lib/osBindings/AddPackageSignerParams.ts @@ -6,5 +6,5 @@ export type AddPackageSignerParams = { id: PackageId signer: Guid versions: string | null - merge?: boolean + merge: boolean } diff --git a/sdk/base/lib/osBindings/AttachParams.ts b/sdk/base/lib/osBindings/AttachParams.ts index 31283fec6..e469833c2 100644 --- a/sdk/base/lib/osBindings/AttachParams.ts +++ b/sdk/base/lib/osBindings/AttachParams.ts @@ -4,5 +4,5 @@ import type { EncryptedWire } from './EncryptedWire' export type AttachParams = { password: EncryptedWire | null guid: string - kiosk?: boolean + kiosk: boolean } diff --git a/sdk/base/lib/osBindings/DesiredStatus.ts b/sdk/base/lib/osBindings/DesiredStatus.ts index 72411a339..56afa2bca 100644 --- a/sdk/base/lib/osBindings/DesiredStatus.ts +++ b/sdk/base/lib/osBindings/DesiredStatus.ts @@ -3,6 +3,6 @@ import type { StartStop } from './StartStop' export type DesiredStatus = | { main: 'stopped' } - | { main: 'restarting' } + | { main: 'restarting'; restartAgain: boolean } | { main: 'running' } | { main: 'backing-up'; onComplete: StartStop } diff --git a/sdk/base/lib/osBindings/ServerInfo.ts b/sdk/base/lib/osBindings/ServerInfo.ts index a0eb98e0a..540110109 100644 --- a/sdk/base/lib/osBindings/ServerInfo.ts +++ b/sdk/base/lib/osBindings/ServerInfo.ts @@ -26,7 +26,7 @@ export type ServerInfo = { zram: boolean governor: Governor | null smtp: SmtpValue | null - ifconfigUrl: string + echoipUrls: string[] ram: number devices: Array kiosk: boolean | null diff --git a/sdk/base/lib/osBindings/SetupExecuteParams.ts b/sdk/base/lib/osBindings/SetupExecuteParams.ts index 69f358c54..a1581c752 100644 --- a/sdk/base/lib/osBindings/SetupExecuteParams.ts +++ b/sdk/base/lib/osBindings/SetupExecuteParams.ts @@ -6,7 +6,7 @@ export type SetupExecuteParams = { guid: string password: EncryptedWire | null recoverySource: RecoverySource | null - kiosk?: boolean + kiosk: boolean name: string | null hostname: string | null } diff --git a/sdk/base/lib/osBindings/SetupInfo.ts b/sdk/base/lib/osBindings/SetupInfo.ts index 06b6447e6..5f78d28c8 100644 --- a/sdk/base/lib/osBindings/SetupInfo.ts +++ b/sdk/base/lib/osBindings/SetupInfo.ts @@ -1,3 +1,7 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -export type SetupInfo = { guid: string | null; attach: boolean } +export type SetupInfo = { + guid: string | null + attach: boolean + mokEnrolled: boolean +} diff --git a/sdk/base/lib/osBindings/index.ts b/sdk/base/lib/osBindings/index.ts index 3df8c985f..25e45f0f0 100644 --- a/sdk/base/lib/osBindings/index.ts +++ b/sdk/base/lib/osBindings/index.ts @@ -306,3 +306,4 @@ export { WifiInfo } from './WifiInfo' export { WifiListInfo } from './WifiListInfo' export { WifiListOut } from './WifiListOut' export { WifiSsidParams } from './WifiSsidParams' +export * as Tunnel from './tunnel' diff --git a/sdk/base/lib/osBindings/tunnel/AddDeviceParams.ts b/sdk/base/lib/osBindings/tunnel/AddDeviceParams.ts new file mode 100644 index 000000000..c5ff2738d --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/AddDeviceParams.ts @@ -0,0 +1,7 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type AddDeviceParams = { + subnet: string + name: string + ip: string | null +} diff --git a/sdk/base/lib/osBindings/tunnel/AddKeyParams.ts b/sdk/base/lib/osBindings/tunnel/AddKeyParams.ts new file mode 100644 index 000000000..5bb62746d --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/AddKeyParams.ts @@ -0,0 +1,4 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AnyVerifyingKey } from './AnyVerifyingKey' + +export type AddKeyParams = { name: string; key: AnyVerifyingKey } diff --git a/sdk/base/lib/osBindings/tunnel/AddPortForwardParams.ts b/sdk/base/lib/osBindings/tunnel/AddPortForwardParams.ts new file mode 100644 index 000000000..ea50dca51 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/AddPortForwardParams.ts @@ -0,0 +1,7 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type AddPortForwardParams = { + source: string + target: string + label: string | null +} diff --git a/sdk/base/lib/osBindings/tunnel/AddSubnetParams.ts b/sdk/base/lib/osBindings/tunnel/AddSubnetParams.ts new file mode 100644 index 000000000..8790ad8a4 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/AddSubnetParams.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type AddSubnetParams = { name: string } diff --git a/sdk/base/lib/osBindings/tunnel/GatewayId.ts b/sdk/base/lib/osBindings/tunnel/GatewayId.ts new file mode 100644 index 000000000..1b0cc9b38 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/GatewayId.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type GatewayId = string diff --git a/sdk/base/lib/osBindings/tunnel/GatewayType.ts b/sdk/base/lib/osBindings/tunnel/GatewayType.ts new file mode 100644 index 000000000..aa7a2d6ed --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/GatewayType.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type GatewayType = 'inbound-outbound' | 'outbound-only' diff --git a/sdk/base/lib/osBindings/tunnel/IpInfo.ts b/sdk/base/lib/osBindings/tunnel/IpInfo.ts new file mode 100644 index 000000000..8cc7e206e --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/IpInfo.ts @@ -0,0 +1,13 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { NetworkInterfaceType } from './NetworkInterfaceType' + +export type IpInfo = { + name: string + scopeId: number + deviceType: NetworkInterfaceType | null + subnets: string[] + lanIp: string[] + wanIp: string | null + ntpServers: string[] + dnsServers: string[] +} diff --git a/sdk/base/lib/osBindings/tunnel/ListDevicesParams.ts b/sdk/base/lib/osBindings/tunnel/ListDevicesParams.ts new file mode 100644 index 000000000..2e2c17085 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/ListDevicesParams.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type ListDevicesParams = { subnet: string } diff --git a/sdk/base/lib/osBindings/tunnel/NetworkInterfaceInfo.ts b/sdk/base/lib/osBindings/tunnel/NetworkInterfaceInfo.ts new file mode 100644 index 000000000..a57f3c1e9 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/NetworkInterfaceInfo.ts @@ -0,0 +1,10 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { GatewayType } from './GatewayType' +import type { IpInfo } from './IpInfo' + +export type NetworkInterfaceInfo = { + name: string | null + secure: boolean | null + ipInfo: IpInfo | null + type: GatewayType | null +} diff --git a/sdk/base/lib/osBindings/tunnel/NetworkInterfaceType.ts b/sdk/base/lib/osBindings/tunnel/NetworkInterfaceType.ts new file mode 100644 index 000000000..6c0d9c363 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/NetworkInterfaceType.ts @@ -0,0 +1,8 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type NetworkInterfaceType = + | 'ethernet' + | 'wireless' + | 'bridge' + | 'wireguard' + | 'loopback' diff --git a/sdk/base/lib/osBindings/tunnel/PortForwardEntry.ts b/sdk/base/lib/osBindings/tunnel/PortForwardEntry.ts new file mode 100644 index 000000000..1619d3f40 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/PortForwardEntry.ts @@ -0,0 +1,7 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type PortForwardEntry = { + target: string + label: string | null + enabled: boolean +} diff --git a/sdk/base/lib/osBindings/tunnel/PortForwards.ts b/sdk/base/lib/osBindings/tunnel/PortForwards.ts index aa9991452..f2d249dd7 100644 --- a/sdk/base/lib/osBindings/tunnel/PortForwards.ts +++ b/sdk/base/lib/osBindings/tunnel/PortForwards.ts @@ -1,3 +1,4 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { PortForwardEntry } from './PortForwardEntry' -export type PortForwards = { [key: string]: string } +export type PortForwards = { [key: string]: PortForwardEntry } diff --git a/sdk/base/lib/osBindings/tunnel/RemoveDeviceParams.ts b/sdk/base/lib/osBindings/tunnel/RemoveDeviceParams.ts new file mode 100644 index 000000000..5fb6bb42c --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/RemoveDeviceParams.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type RemoveDeviceParams = { subnet: string; ip: string } diff --git a/sdk/base/lib/osBindings/tunnel/RemoveKeyParams.ts b/sdk/base/lib/osBindings/tunnel/RemoveKeyParams.ts new file mode 100644 index 000000000..cb1cf9049 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/RemoveKeyParams.ts @@ -0,0 +1,4 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. +import type { AnyVerifyingKey } from './AnyVerifyingKey' + +export type RemoveKeyParams = { key: AnyVerifyingKey } diff --git a/sdk/base/lib/osBindings/tunnel/RemovePortForwardParams.ts b/sdk/base/lib/osBindings/tunnel/RemovePortForwardParams.ts new file mode 100644 index 000000000..2e85f5e77 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/RemovePortForwardParams.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type RemovePortForwardParams = { source: string } diff --git a/sdk/base/lib/osBindings/tunnel/SetPasswordParams.ts b/sdk/base/lib/osBindings/tunnel/SetPasswordParams.ts new file mode 100644 index 000000000..f92cb8e7a --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/SetPasswordParams.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type SetPasswordParams = { password: string } diff --git a/sdk/base/lib/osBindings/tunnel/SetPortForwardEnabledParams.ts b/sdk/base/lib/osBindings/tunnel/SetPortForwardEnabledParams.ts new file mode 100644 index 000000000..51f923436 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/SetPortForwardEnabledParams.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type SetPortForwardEnabledParams = { source: string; enabled: boolean } diff --git a/sdk/base/lib/osBindings/tunnel/ShowConfigParams.ts b/sdk/base/lib/osBindings/tunnel/ShowConfigParams.ts new file mode 100644 index 000000000..3f7eecf25 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/ShowConfigParams.ts @@ -0,0 +1,7 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type ShowConfigParams = { + subnet: string + ip: string + wanAddr: string | null +} diff --git a/sdk/base/lib/osBindings/tunnel/SubnetParams.ts b/sdk/base/lib/osBindings/tunnel/SubnetParams.ts new file mode 100644 index 000000000..72981f8ae --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/SubnetParams.ts @@ -0,0 +1,3 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type SubnetParams = { subnet: string } diff --git a/sdk/base/lib/osBindings/tunnel/TunnelDatabase.ts b/sdk/base/lib/osBindings/tunnel/TunnelDatabase.ts index 2f484b5b7..74b8eacd9 100644 --- a/sdk/base/lib/osBindings/tunnel/TunnelDatabase.ts +++ b/sdk/base/lib/osBindings/tunnel/TunnelDatabase.ts @@ -1,5 +1,7 @@ // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. import type { AnyVerifyingKey } from './AnyVerifyingKey' +import type { GatewayId } from './GatewayId' +import type { NetworkInterfaceInfo } from './NetworkInterfaceInfo' import type { PortForwards } from './PortForwards' import type { Sessions } from './Sessions' import type { SignerInfo } from './SignerInfo' @@ -11,7 +13,7 @@ export type TunnelDatabase = { sessions: Sessions password: string | null authPubkeys: { [key: AnyVerifyingKey]: SignerInfo } - gateways: { [key: AnyVerifyingKey]: SignerInfo } + gateways: { [key: GatewayId]: NetworkInterfaceInfo } wg: WgServer portForwards: PortForwards } diff --git a/sdk/base/lib/osBindings/tunnel/UpdatePortForwardLabelParams.ts b/sdk/base/lib/osBindings/tunnel/UpdatePortForwardLabelParams.ts new file mode 100644 index 000000000..1697a1250 --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/UpdatePortForwardLabelParams.ts @@ -0,0 +1,6 @@ +// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. + +export type UpdatePortForwardLabelParams = { + source: string + label: string | null +} diff --git a/sdk/base/lib/osBindings/tunnel/index.ts b/sdk/base/lib/osBindings/tunnel/index.ts new file mode 100644 index 000000000..7c92639bb --- /dev/null +++ b/sdk/base/lib/osBindings/tunnel/index.ts @@ -0,0 +1,35 @@ +export { AddDeviceParams } from './AddDeviceParams' +export { AddKeyParams } from './AddKeyParams' +export { AddPortForwardParams } from './AddPortForwardParams' +export { AddSubnetParams } from './AddSubnetParams' +export { AnyVerifyingKey } from './AnyVerifyingKey' +export { Base64 } from './Base64' +export { GatewayId } from './GatewayId' +export { GatewayType } from './GatewayType' +export { IpInfo } from './IpInfo' +export { ListDevicesParams } from './ListDevicesParams' +export { NetworkInterfaceInfo } from './NetworkInterfaceInfo' +export { NetworkInterfaceType } from './NetworkInterfaceType' +export { Pem } from './Pem' +export { PortForwardEntry } from './PortForwardEntry' +export { PortForwards } from './PortForwards' +export { RemoveDeviceParams } from './RemoveDeviceParams' +export { RemoveKeyParams } from './RemoveKeyParams' +export { RemovePortForwardParams } from './RemovePortForwardParams' +export { Sessions } from './Sessions' +export { Session } from './Session' +export { SetPasswordParams } from './SetPasswordParams' +export { SetPortForwardEnabledParams } from './SetPortForwardEnabledParams' +export { ShowConfigParams } from './ShowConfigParams' +export { SignerInfo } from './SignerInfo' +export { SubnetParams } from './SubnetParams' +export { TunnelCertData } from './TunnelCertData' +export { TunnelDatabase } from './TunnelDatabase' +export { TunnelUpdateResult } from './TunnelUpdateResult' +export { UpdatePortForwardLabelParams } from './UpdatePortForwardLabelParams' +export { WebserverInfo } from './WebserverInfo' +export { WgConfig } from './WgConfig' +export { WgServer } from './WgServer' +export { WgSubnetClients } from './WgSubnetClients' +export { WgSubnetConfig } from './WgSubnetConfig' +export { WgSubnetMap } from './WgSubnetMap' diff --git a/sdk/base/lib/util/GetContainerIp.ts b/sdk/base/lib/util/GetContainerIp.ts new file mode 100644 index 000000000..7fb2cee2f --- /dev/null +++ b/sdk/base/lib/util/GetContainerIp.ts @@ -0,0 +1,18 @@ +import { Effects } from '../Effects' +import { PackageId } from '../osBindings' +import { Watchable } from './Watchable' + +export class GetContainerIp extends Watchable { + protected readonly label = 'GetContainerIp' + + constructor( + effects: Effects, + readonly opts: { packageId?: PackageId } = {}, + ) { + super(effects) + } + + protected fetch(callback?: () => void) { + return this.effects.getContainerIp({ ...this.opts, callback }) + } +} diff --git a/sdk/base/lib/util/GetHostInfo.ts b/sdk/base/lib/util/GetHostInfo.ts new file mode 100644 index 000000000..bd822781c --- /dev/null +++ b/sdk/base/lib/util/GetHostInfo.ts @@ -0,0 +1,18 @@ +import { Effects } from '../Effects' +import { Host, HostId, PackageId } from '../osBindings' +import { Watchable } from './Watchable' + +export class GetHostInfo extends Watchable { + protected readonly label = 'GetHostInfo' + + constructor( + effects: Effects, + readonly opts: { hostId: HostId; packageId?: PackageId }, + ) { + super(effects) + } + + protected fetch(callback?: () => void) { + return this.effects.getHostInfo({ ...this.opts, callback }) + } +} diff --git a/sdk/base/lib/util/GetOutboundGateway.ts b/sdk/base/lib/util/GetOutboundGateway.ts index 460bb8b90..7d8d70880 100644 --- a/sdk/base/lib/util/GetOutboundGateway.ts +++ b/sdk/base/lib/util/GetOutboundGateway.ts @@ -1,106 +1,14 @@ import { Effects } from '../Effects' -import { AbortedError } from './AbortedError' -import { DropGenerator, DropPromise } from './Drop' +import { Watchable } from './Watchable' -export class GetOutboundGateway { - constructor(readonly effects: Effects) {} +export class GetOutboundGateway extends Watchable { + protected readonly label = 'GetOutboundGateway' - /** - * Returns the effective outbound gateway. Reruns the context from which it has been called if the underlying value changes - */ - const() { - return this.effects.getOutboundGateway({ - callback: - this.effects.constRetry && - (() => this.effects.constRetry && this.effects.constRetry()), - }) - } - /** - * Returns the effective outbound gateway. Does nothing if the value changes - */ - once() { - return this.effects.getOutboundGateway({}) + constructor(effects: Effects) { + super(effects) } - private async *watchGen(abort?: AbortSignal) { - const resolveCell = { resolve: () => {} } - this.effects.onLeaveContext(() => { - resolveCell.resolve() - }) - abort?.addEventListener('abort', () => resolveCell.resolve()) - while (this.effects.isInContext && !abort?.aborted) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - yield await this.effects.getOutboundGateway({ - callback: () => callback(), - }) - await waitForNext - } - return new Promise((_, rej) => rej(new AbortedError())) - } - - /** - * Watches the effective outbound gateway. Returns an async iterator that yields whenever the value changes - */ - watch(abort?: AbortSignal): AsyncGenerator { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - return DropGenerator.of(this.watchGen(ctrl.signal), () => ctrl.abort()) - } - - /** - * Watches the effective outbound gateway. Takes a custom callback function to run whenever the value changes - */ - onChange( - callback: ( - value: string, - error?: Error, - ) => { cancel: boolean } | Promise<{ cancel: boolean }>, - ) { - ;(async () => { - const ctrl = new AbortController() - for await (const value of this.watch(ctrl.signal)) { - try { - const res = await callback(value) - if (res.cancel) { - ctrl.abort() - break - } - } catch (e) { - console.error( - 'callback function threw an error @ GetOutboundGateway.onChange', - e, - ) - } - } - })() - .catch((e) => callback('', e)) - .catch((e) => - console.error( - 'callback function threw an error @ GetOutboundGateway.onChange', - e, - ), - ) - } - - /** - * Watches the effective outbound gateway. Returns when the predicate is true - */ - waitFor(pred: (value: string) => boolean): Promise { - const ctrl = new AbortController() - return DropPromise.of( - Promise.resolve().then(async () => { - for await (const next of this.watchGen(ctrl.signal)) { - if (pred(next)) { - return next - } - } - return '' - }), - () => ctrl.abort(), - ) + protected fetch(callback?: () => void) { + return this.effects.getOutboundGateway({ callback }) } } diff --git a/sdk/base/lib/util/GetServiceManifest.ts b/sdk/base/lib/util/GetServiceManifest.ts new file mode 100644 index 000000000..09075f27b --- /dev/null +++ b/sdk/base/lib/util/GetServiceManifest.ts @@ -0,0 +1,52 @@ +import { Effects } from '../Effects' +import { Manifest, PackageId } from '../osBindings' +import { deepEqual } from './deepEqual' +import { Watchable } from './Watchable' + +export class GetServiceManifest extends Watchable< + Manifest | null, + Mapped +> { + protected readonly label = 'GetServiceManifest' + + constructor( + effects: Effects, + readonly opts: { packageId: PackageId }, + options?: { + map?: (value: Manifest | null) => Mapped + eq?: (a: Mapped, b: Mapped) => boolean + }, + ) { + super(effects, options) + } + + protected fetch(callback?: () => void) { + return this.effects.getServiceManifest({ ...this.opts, callback }) + } +} + +export function getServiceManifest( + effects: Effects, + packageId: PackageId, +): GetServiceManifest +export function getServiceManifest( + effects: Effects, + packageId: PackageId, + map: (manifest: Manifest | null) => Mapped, + eq?: (a: Mapped, b: Mapped) => boolean, +): GetServiceManifest +export function getServiceManifest( + effects: Effects, + packageId: PackageId, + map?: (manifest: Manifest | null) => Mapped, + eq?: (a: Mapped, b: Mapped) => boolean, +): GetServiceManifest { + return new GetServiceManifest( + effects, + { packageId }, + { + map: map ?? ((a) => a as Mapped), + eq: eq ?? ((a, b) => deepEqual(a, b)), + }, + ) +} diff --git a/sdk/base/lib/util/GetSslCertificate.ts b/sdk/base/lib/util/GetSslCertificate.ts new file mode 100644 index 000000000..72daee306 --- /dev/null +++ b/sdk/base/lib/util/GetSslCertificate.ts @@ -0,0 +1,20 @@ +import { Effects } from '../Effects' +import { Watchable } from './Watchable' + +export class GetSslCertificate extends Watchable<[string, string, string]> { + protected readonly label = 'GetSslCertificate' + + constructor( + effects: Effects, + readonly opts: { + hostnames: string[] + algorithm?: 'ecdsa' | 'ed25519' + }, + ) { + super(effects) + } + + protected fetch(callback?: () => void) { + return this.effects.getSslCertificate({ ...this.opts, callback }) + } +} diff --git a/sdk/base/lib/util/GetStatus.ts b/sdk/base/lib/util/GetStatus.ts new file mode 100644 index 000000000..c1d3df38a --- /dev/null +++ b/sdk/base/lib/util/GetStatus.ts @@ -0,0 +1,18 @@ +import { Effects } from '../Effects' +import { PackageId, StatusInfo } from '../osBindings' +import { Watchable } from './Watchable' + +export class GetStatus extends Watchable { + protected readonly label = 'GetStatus' + + constructor( + effects: Effects, + readonly opts: { packageId?: PackageId } = {}, + ) { + super(effects) + } + + protected fetch(callback?: () => void) { + return this.effects.getStatus({ ...this.opts, callback }) + } +} diff --git a/sdk/base/lib/util/GetSystemSmtp.ts b/sdk/base/lib/util/GetSystemSmtp.ts index 03cedba6f..2da804437 100644 --- a/sdk/base/lib/util/GetSystemSmtp.ts +++ b/sdk/base/lib/util/GetSystemSmtp.ts @@ -1,111 +1,15 @@ import { Effects } from '../Effects' import * as T from '../types' -import { AbortedError } from './AbortedError' -import { DropGenerator, DropPromise } from './Drop' +import { Watchable } from './Watchable' -export class GetSystemSmtp { - constructor(readonly effects: Effects) {} +export class GetSystemSmtp extends Watchable { + protected readonly label = 'GetSystemSmtp' - /** - * Returns the system SMTP credentials. Reruns the context from which it has been called if the underlying value changes - */ - const() { - return this.effects.getSystemSmtp({ - callback: - this.effects.constRetry && - (() => this.effects.constRetry && this.effects.constRetry()), - }) - } - /** - * Returns the system SMTP credentials. Does nothing if the credentials change - */ - once() { - return this.effects.getSystemSmtp({}) + constructor(effects: Effects) { + super(effects) } - private async *watchGen(abort?: AbortSignal) { - const resolveCell = { resolve: () => {} } - this.effects.onLeaveContext(() => { - resolveCell.resolve() - }) - abort?.addEventListener('abort', () => resolveCell.resolve()) - while (this.effects.isInContext && !abort?.aborted) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - yield await this.effects.getSystemSmtp({ - callback: () => callback(), - }) - await waitForNext - } - return new Promise((_, rej) => rej(new AbortedError())) - } - - /** - * Watches the system SMTP credentials. Returns an async iterator that yields whenever the value changes - */ - watch( - abort?: AbortSignal, - ): AsyncGenerator { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - return DropGenerator.of(this.watchGen(ctrl.signal), () => ctrl.abort()) - } - - /** - * Watches the system SMTP credentials. Takes a custom callback function to run whenever the credentials change - */ - onChange( - callback: ( - value: T.SmtpValue | null, - error?: Error, - ) => { cancel: boolean } | Promise<{ cancel: boolean }>, - ) { - ;(async () => { - const ctrl = new AbortController() - for await (const value of this.watch(ctrl.signal)) { - try { - const res = await callback(value) - if (res.cancel) { - ctrl.abort() - break - } - } catch (e) { - console.error( - 'callback function threw an error @ GetSystemSmtp.onChange', - e, - ) - } - } - })() - .catch((e) => callback(null, e)) - .catch((e) => - console.error( - 'callback function threw an error @ GetSystemSmtp.onChange', - e, - ), - ) - } - - /** - * Watches the system SMTP credentials. Returns when the predicate is true - */ - waitFor( - pred: (value: T.SmtpValue | null) => boolean, - ): Promise { - const ctrl = new AbortController() - return DropPromise.of( - Promise.resolve().then(async () => { - for await (const next of this.watchGen(ctrl.signal)) { - if (pred(next)) { - return next - } - } - return null - }), - () => ctrl.abort(), - ) + protected fetch(callback?: () => void) { + return this.effects.getSystemSmtp({ callback }) } } diff --git a/sdk/base/lib/util/Watchable.ts b/sdk/base/lib/util/Watchable.ts new file mode 100644 index 000000000..b65d24a2b --- /dev/null +++ b/sdk/base/lib/util/Watchable.ts @@ -0,0 +1,178 @@ +import { Effects } from '../Effects' +import { AbortedError } from './AbortedError' +import { deepEqual } from './deepEqual' +import { DropGenerator, DropPromise } from './Drop' + +export abstract class Watchable { + protected readonly mapFn: (value: Raw) => Mapped + protected readonly eqFn: (a: Mapped, b: Mapped) => boolean + + constructor( + readonly effects: Effects, + options?: { + map?: (value: Raw) => Mapped + eq?: (a: Mapped, b: Mapped) => boolean + }, + ) { + this.mapFn = options?.map ?? ((a) => a as unknown as Mapped) + this.eqFn = options?.eq ?? ((a, b) => deepEqual(a, b)) + } + + /** + * Fetch the current value, optionally registering a callback for change notification. + * The callback should be invoked when the underlying data changes. + */ + protected abstract fetch(callback?: () => void): Promise + protected abstract readonly label: string + + /** + * Produce a stream of raw values. Default implementation uses fetch() with + * effects callback in a loop. Override for custom subscription mechanisms + * (e.g. fs.watch). + */ + protected async *produce(abort: AbortSignal): AsyncGenerator { + const resolveCell = { resolve: () => {} } + this.effects.onLeaveContext(() => { + resolveCell.resolve() + }) + abort.addEventListener('abort', () => resolveCell.resolve()) + while (this.effects.isInContext && !abort.aborted) { + let callback: () => void = () => {} + const waitForNext = new Promise((resolve) => { + callback = resolve + resolveCell.resolve = resolve + }) + yield await this.fetch(() => callback()) + await waitForNext + } + } + + /** + * Lifecycle hook called when const() registers a subscription. + * Return a cleanup function to be called when the subscription ends. + * Override for side effects like FileHelper's consts tracking. + */ + protected onConstRegistered(_value: Mapped): (() => void) | void {} + + /** + * Internal generator that maps raw values and deduplicates using eq. + */ + private async *watchGen( + abort: AbortSignal, + ): AsyncGenerator { + let prev: { value: Mapped } | null = null + for await (const raw of this.produce(abort)) { + if (abort.aborted) return + const mapped = this.mapFn(raw) + if (!prev || !this.eqFn(prev.value, mapped)) { + prev = { value: mapped } + yield mapped + } + } + } + + /** + * Returns the value. Reruns the context from which it has been called if the underlying value changes + */ + async const(): Promise { + const abort = new AbortController() + const gen = this.watchGen(abort.signal) + const res = await gen.next() + const value = res.value as Mapped + if (this.effects.constRetry) { + const constRetry = this.effects.constRetry + const cleanup = this.onConstRegistered(value) + gen.next().then( + (a) => { + abort.abort() + cleanup?.() + if (!a.done) { + constRetry() + } + }, + () => { + abort.abort() + cleanup?.() + }, + ) + } else { + abort.abort() + } + return value + } + + /** + * Returns the value. Does nothing if the value changes + */ + async once(): Promise { + return this.mapFn(await this.fetch()) + } + + /** + * Watches the value. Returns an async iterator that yields whenever the value changes + */ + watch(abort?: AbortSignal): AsyncGenerator { + const ctrl = new AbortController() + abort?.addEventListener('abort', () => ctrl.abort()) + return DropGenerator.of( + (async function* (gen): AsyncGenerator { + yield* gen + throw new AbortedError() + })(this.watchGen(ctrl.signal)), + () => ctrl.abort(), + ) + } + + /** + * Watches the value. Takes a custom callback function to run whenever the value changes + */ + onChange( + callback: ( + value: Mapped | undefined, + error?: Error, + ) => { cancel: boolean } | Promise<{ cancel: boolean }>, + ) { + ;(async () => { + const ctrl = new AbortController() + for await (const value of this.watchGen(ctrl.signal)) { + try { + const res = await callback(value) + if (res.cancel) { + ctrl.abort() + break + } + } catch (e) { + console.error( + `callback function threw an error @ ${this.label}.onChange`, + e, + ) + } + } + })() + .catch((e) => callback(undefined, e)) + .catch((e) => + console.error( + `callback function threw an error @ ${this.label}.onChange`, + e, + ), + ) + } + + /** + * Watches the value. Returns when the predicate is true + */ + waitFor(pred: (value: Mapped) => boolean): Promise { + const ctrl = new AbortController() + return DropPromise.of( + Promise.resolve().then(async () => { + for await (const next of this.watchGen(ctrl.signal)) { + if (pred(next)) { + return next + } + } + throw new AbortedError() + }), + () => ctrl.abort(), + ) + } +} diff --git a/sdk/base/lib/util/getServiceInterface.ts b/sdk/base/lib/util/getServiceInterface.ts index e0cedc529..b5b7af325 100644 --- a/sdk/base/lib/util/getServiceInterface.ts +++ b/sdk/base/lib/util/getServiceInterface.ts @@ -8,11 +8,10 @@ import { HostnameInfo, } from '../types' import { Effects } from '../Effects' -import { AbortedError } from './AbortedError' -import { DropGenerator, DropPromise } from './Drop' import { IpAddress, IPV6_LINK_LOCAL } from './ip' import { deepEqual } from './deepEqual' import { once } from './once' +import { Watchable } from './Watchable' export type UrlString = string export type HostId = string @@ -36,6 +35,7 @@ export const getHostname = (url: string): Hostname | null => { * - `'ipv6'` — IPv6 addresses only * - `'localhost'` — loopback addresses (`localhost`, `127.0.0.1`, `::1`) * - `'link-local'` — IPv6 link-local addresses (fe80::/10) + * - `'bridge'` — The LXC bridge interface * - `'plugin'` — hostnames provided by a plugin package */ type FilterKinds = @@ -46,6 +46,7 @@ type FilterKinds = | 'ipv6' | 'localhost' | 'link-local' + | 'bridge' | 'plugin' /** @@ -120,7 +121,11 @@ type FilterReturnTy = F extends { const nonLocalFilter = { exclude: { - kind: ['localhost', 'link-local'] as ('localhost' | 'link-local')[], + kind: ['localhost', 'link-local', 'bridge'] as ( + | 'localhost' + | 'link-local' + | 'bridge' + )[], }, } as const const publicFilter = { @@ -284,6 +289,9 @@ function filterRec( (kind.has('link-local') && h.metadata.kind === 'ipv6' && IPV6_LINK_LOCAL.contains(IpAddress.parse(h.hostname))) || + (kind.has('bridge') && + h.metadata.kind === 'ipv4' && + h.metadata.gateway === 'lxcbr0') || (kind.has('plugin') && h.metadata.kind === 'plugin')), ) } @@ -431,136 +439,29 @@ const makeInterfaceFilled = async ({ return interfaceFilled } -export class GetServiceInterface { +export class GetServiceInterface< + Mapped = ServiceInterfaceFilled | null, +> extends Watchable { + protected readonly label = 'GetServiceInterface' + constructor( - readonly effects: Effects, + effects: Effects, readonly opts: { id: string; packageId?: string }, - readonly map: (interfaces: ServiceInterfaceFilled | null) => Mapped, - readonly eq: (a: Mapped, b: Mapped) => boolean, - ) {} - - /** - * Returns the requested service interface. Reruns the context from which it has been called if the underlying value changes - */ - async const() { - let abort = new AbortController() - const watch = this.watch(abort.signal) - const res = await watch.next() - if (this.effects.constRetry) { - watch - .next() - .then(() => { - abort.abort() - this.effects.constRetry && this.effects.constRetry() - }) - .catch() - } - return res.value - } - /** - * Returns the requested service interface. Does nothing if the value changes - */ - async once() { - const { id, packageId } = this.opts - const interfaceFilled = await makeInterfaceFilled({ - effects: this.effects, - id, - packageId, - }) - - return this.map(interfaceFilled) - } - - private async *watchGen(abort?: AbortSignal) { - let prev = null as { value: Mapped } | null - const { id, packageId } = this.opts - const resolveCell = { resolve: () => {} } - this.effects.onLeaveContext(() => { - resolveCell.resolve() - }) - abort?.addEventListener('abort', () => resolveCell.resolve()) - while (this.effects.isInContext && !abort?.aborted) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - const next = this.map( - await makeInterfaceFilled({ - effects: this.effects, - id, - packageId, - callback, - }), - ) - if (!prev || !this.eq(prev.value, next)) { - yield next - } - await waitForNext - } - return new Promise((_, rej) => rej(new AbortedError())) - } - - /** - * Watches the requested service interface. Returns an async iterator that yields whenever the value changes - */ - watch(abort?: AbortSignal): AsyncGenerator { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - return DropGenerator.of(this.watchGen(ctrl.signal), () => ctrl.abort()) - } - - /** - * Watches the requested service interface. Takes a custom callback function to run whenever the value changes - */ - onChange( - callback: ( - value: Mapped | null, - error?: Error, - ) => { cancel: boolean } | Promise<{ cancel: boolean }>, + options?: { + map?: (value: ServiceInterfaceFilled | null) => Mapped + eq?: (a: Mapped, b: Mapped) => boolean + }, ) { - ;(async () => { - const ctrl = new AbortController() - for await (const value of this.watch(ctrl.signal)) { - try { - const res = await callback(value) - if (res.cancel) { - ctrl.abort() - break - } - } catch (e) { - console.error( - 'callback function threw an error @ GetServiceInterface.onChange', - e, - ) - } - } - })() - .catch((e) => callback(null, e)) - .catch((e) => - console.error( - 'callback function threw an error @ GetServiceInterface.onChange', - e, - ), - ) + super(effects, options) } - /** - * Watches the requested service interface. Returns when the predicate is true - */ - waitFor(pred: (value: Mapped) => boolean): Promise { - const ctrl = new AbortController() - return DropPromise.of( - Promise.resolve().then(async () => { - for await (const next of this.watchGen(ctrl.signal)) { - if (pred(next)) { - return next - } - } - throw new Error('context left before predicate passed') - }), - () => ctrl.abort(), - ) + protected fetch(callback?: () => void) { + return makeInterfaceFilled({ + effects: this.effects, + id: this.opts.id, + packageId: this.opts.packageId, + callback, + }) } } @@ -580,11 +481,13 @@ export function getOwnServiceInterface( map?: (interfaces: ServiceInterfaceFilled | null) => Mapped, eq?: (a: Mapped, b: Mapped) => boolean, ): GetServiceInterface { - return new GetServiceInterface( + return new GetServiceInterface( effects, { id }, - map ?? ((a) => a as Mapped), - eq ?? ((a, b) => deepEqual(a, b)), + { + map: map ?? ((a) => a as Mapped), + eq: eq ?? ((a, b) => deepEqual(a, b)), + }, ) } @@ -604,10 +507,8 @@ export function getServiceInterface( map?: (interfaces: ServiceInterfaceFilled | null) => Mapped, eq?: (a: Mapped, b: Mapped) => boolean, ): GetServiceInterface { - return new GetServiceInterface( - effects, - opts, - map ?? ((a) => a as Mapped), - eq ?? ((a, b) => deepEqual(a, b)), - ) + return new GetServiceInterface(effects, opts, { + map: map ?? ((a) => a as Mapped), + eq: eq ?? ((a, b) => deepEqual(a, b)), + }) } diff --git a/sdk/base/lib/util/getServiceInterfaces.ts b/sdk/base/lib/util/getServiceInterfaces.ts index e6a745d56..a11199927 100644 --- a/sdk/base/lib/util/getServiceInterfaces.ts +++ b/sdk/base/lib/util/getServiceInterfaces.ts @@ -1,9 +1,8 @@ import { Effects } from '../Effects' import { PackageId } from '../osBindings' -import { AbortedError } from './AbortedError' import { deepEqual } from './deepEqual' -import { DropGenerator, DropPromise } from './Drop' import { ServiceInterfaceFilled, filledAddress } from './getServiceInterface' +import { Watchable } from './Watchable' const makeManyInterfaceFilled = async ({ effects, @@ -40,135 +39,28 @@ const makeManyInterfaceFilled = async ({ return serviceInterfacesFilled } -export class GetServiceInterfaces { +export class GetServiceInterfaces< + Mapped = ServiceInterfaceFilled[], +> extends Watchable { + protected readonly label = 'GetServiceInterfaces' + constructor( - readonly effects: Effects, + effects: Effects, readonly opts: { packageId?: string }, - readonly map: (interfaces: ServiceInterfaceFilled[]) => Mapped, - readonly eq: (a: Mapped, b: Mapped) => boolean, - ) {} - - /** - * Returns the service interfaces for the package. Reruns the context from which it has been called if the underlying value changes - */ - async const() { - let abort = new AbortController() - const watch = this.watch(abort.signal) - const res = await watch.next() - if (this.effects.constRetry) { - watch - .next() - .then(() => { - abort.abort() - this.effects.constRetry && this.effects.constRetry() - }) - .catch() - } - return res.value - } - /** - * Returns the service interfaces for the package. Does nothing if the value changes - */ - async once() { - const { packageId } = this.opts - const interfaceFilled: ServiceInterfaceFilled[] = - await makeManyInterfaceFilled({ - effects: this.effects, - packageId, - }) - - return this.map(interfaceFilled) - } - - private async *watchGen(abort?: AbortSignal) { - let prev = null as { value: Mapped } | null - const { packageId } = this.opts - const resolveCell = { resolve: () => {} } - this.effects.onLeaveContext(() => { - resolveCell.resolve() - }) - abort?.addEventListener('abort', () => resolveCell.resolve()) - while (this.effects.isInContext && !abort?.aborted) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - const next = this.map( - await makeManyInterfaceFilled({ - effects: this.effects, - packageId, - callback, - }), - ) - if (!prev || !this.eq(prev.value, next)) { - yield next - } - await waitForNext - } - return new Promise((_, rej) => rej(new AbortedError())) - } - - /** - * Watches the service interfaces for the package. Returns an async iterator that yields whenever the value changes - */ - watch(abort?: AbortSignal): AsyncGenerator { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - return DropGenerator.of(this.watchGen(ctrl.signal), () => ctrl.abort()) - } - - /** - * Watches the service interfaces for the package. Takes a custom callback function to run whenever the value changes - */ - onChange( - callback: ( - value: Mapped | null, - error?: Error, - ) => { cancel: boolean } | Promise<{ cancel: boolean }>, + options?: { + map?: (value: ServiceInterfaceFilled[]) => Mapped + eq?: (a: Mapped, b: Mapped) => boolean + }, ) { - ;(async () => { - const ctrl = new AbortController() - for await (const value of this.watch(ctrl.signal)) { - try { - const res = await callback(value) - if (res.cancel) { - ctrl.abort() - break - } - } catch (e) { - console.error( - 'callback function threw an error @ GetServiceInterfaces.onChange', - e, - ) - } - } - })() - .catch((e) => callback(null, e)) - .catch((e) => - console.error( - 'callback function threw an error @ GetServiceInterfaces.onChange', - e, - ), - ) + super(effects, options) } - /** - * Watches the service interfaces for the package. Returns when the predicate is true - */ - waitFor(pred: (value: Mapped) => boolean): Promise { - const ctrl = new AbortController() - return DropPromise.of( - Promise.resolve().then(async () => { - for await (const next of this.watchGen(ctrl.signal)) { - if (pred(next)) { - return next - } - } - throw new Error('context left before predicate passed') - }), - () => ctrl.abort(), - ) + protected fetch(callback?: () => void) { + return makeManyInterfaceFilled({ + effects: this.effects, + packageId: this.opts.packageId, + callback, + }) } } @@ -183,11 +75,13 @@ export function getOwnServiceInterfaces( map?: (interfaces: ServiceInterfaceFilled[]) => Mapped, eq?: (a: Mapped, b: Mapped) => boolean, ): GetServiceInterfaces { - return new GetServiceInterfaces( + return new GetServiceInterfaces( effects, {}, - map ?? ((a) => a as Mapped), - eq ?? ((a, b) => deepEqual(a, b)), + { + map: map ?? ((a) => a as Mapped), + eq: eq ?? ((a, b) => deepEqual(a, b)), + }, ) } @@ -207,10 +101,8 @@ export function getServiceInterfaces( map?: (interfaces: ServiceInterfaceFilled[]) => Mapped, eq?: (a: Mapped, b: Mapped) => boolean, ): GetServiceInterfaces { - return new GetServiceInterfaces( - effects, - opts, - map ?? ((a) => a as Mapped), - eq ?? ((a, b) => deepEqual(a, b)), - ) + return new GetServiceInterfaces(effects, opts, { + map: map ?? ((a) => a as Mapped), + eq: eq ?? ((a, b) => deepEqual(a, b)), + }) } diff --git a/sdk/base/lib/util/index.ts b/sdk/base/lib/util/index.ts index e156cb97b..22cf037ba 100644 --- a/sdk/base/lib/util/index.ts +++ b/sdk/base/lib/util/index.ts @@ -15,7 +15,13 @@ export { once } from './once' export { asError } from './asError' export * as Patterns from './patterns' export * from './typeHelpers' +export { Watchable } from './Watchable' +export { GetContainerIp } from './GetContainerIp' +export { GetHostInfo } from './GetHostInfo' export { GetOutboundGateway } from './GetOutboundGateway' +export { GetServiceManifest, getServiceManifest } from './GetServiceManifest' +export { GetSslCertificate } from './GetSslCertificate' +export { GetStatus } from './GetStatus' export { GetSystemSmtp } from './GetSystemSmtp' export { Graph, Vertex } from './graph' export { inMs } from './inMs' diff --git a/sdk/package/LICENSE b/sdk/package/LICENSE deleted file mode 100644 index 793257b96..000000000 --- a/sdk/package/LICENSE +++ /dev/null @@ -1,21 +0,0 @@ -MIT License - -Copyright (c) 2022 Start9 Labs - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. diff --git a/sdk/package/README.md b/sdk/package/README.md deleted file mode 100644 index d51b25b58..000000000 --- a/sdk/package/README.md +++ /dev/null @@ -1,18 +0,0 @@ -# Start SDK - -## Config Conversion - -- Copy the old config json (from the getConfig.ts) -- Install the start-sdk with `npm i` -- paste the config into makeOutput.ts::oldSpecToBuilder (second param) -- Make the third param - -```ts - { - StartSdk: "start-sdk/lib", - } -``` - -- run the script `npm run buildOutput` to make the output.ts -- Copy this whole file into startos/procedures/config/spec.ts -- Fix all the TODO diff --git a/sdk/package/lib/StartSdk.ts b/sdk/package/lib/StartSdk.ts index 9450e6b8e..6bf925141 100644 --- a/sdk/package/lib/StartSdk.ts +++ b/sdk/package/lib/StartSdk.ts @@ -59,7 +59,8 @@ import { setupOnInit, setupOnUninit, } from '../../base/lib/inits' -import { DropGenerator } from '../../base/lib/util/Drop' +import { GetContainerIp } from '../../base/lib/util/GetContainerIp' +import { GetStatus } from '../../base/lib/util/GetStatus' import { getOwnServiceInterface, ServiceInterfaceFilled, @@ -68,7 +69,7 @@ import { getOwnServiceInterfaces } from '../../base/lib/util/getServiceInterface import { Volumes, createVolumes } from './util/Volume' /** The minimum StartOS version required by this SDK release */ -export const OSVersion = testTypeVersion('0.4.0-alpha.20') +export const OSVersion = testTypeVersion('0.4.0-alpha.21') // prettier-ignore type AnyNeverCond = @@ -257,90 +258,7 @@ export class StartSdk { Parameters[0], 'callback' > = {}, - ) => { - async function* watch(abort?: AbortSignal) { - const resolveCell = { resolve: () => {} } - effects.onLeaveContext(() => { - resolveCell.resolve() - }) - abort?.addEventListener('abort', () => resolveCell.resolve()) - while (effects.isInContext && !abort?.aborted) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - yield await effects.getContainerIp({ ...options, callback }) - await waitForNext - } - } - return { - const: () => - effects.getContainerIp({ - ...options, - callback: - effects.constRetry && - (() => effects.constRetry && effects.constRetry()), - }), - once: () => effects.getContainerIp(options), - watch: (abort?: AbortSignal) => { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - return DropGenerator.of(watch(ctrl.signal), () => ctrl.abort()) - }, - onChange: ( - callback: ( - value: string | null, - error?: Error, - ) => { cancel: boolean } | Promise<{ cancel: boolean }>, - ) => { - ;(async () => { - const ctrl = new AbortController() - for await (const value of watch(ctrl.signal)) { - try { - const res = await callback(value) - if (res.cancel) { - ctrl.abort() - break - } - } catch (e) { - console.error( - 'callback function threw an error @ getContainerIp.onChange', - e, - ) - } - } - })() - .catch((e) => callback(null, e)) - .catch((e) => - console.error( - 'callback function threw an error @ getContainerIp.onChange', - e, - ), - ) - }, - waitFor: async (pred: (value: string | null) => boolean) => { - const resolveCell = { resolve: () => {} } - effects.onLeaveContext(() => { - resolveCell.resolve() - }) - while (effects.isInContext) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - const res = await effects.getContainerIp({ ...options, callback }) - if (pred(res)) { - resolveCell.resolve() - return res - } - await waitForNext - } - return null - }, - } - }, + ) => new GetContainerIp(effects, options), /** * Get the service's current status with reactive subscription support. @@ -355,90 +273,7 @@ export class StartSdk { getStatus: ( effects: T.Effects, options: Omit[0], 'callback'> = {}, - ) => { - async function* watch(abort?: AbortSignal) { - const resolveCell = { resolve: () => {} } - effects.onLeaveContext(() => { - resolveCell.resolve() - }) - abort?.addEventListener('abort', () => resolveCell.resolve()) - while (effects.isInContext && !abort?.aborted) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - yield await effects.getStatus({ ...options, callback }) - await waitForNext - } - } - return { - const: () => - effects.getStatus({ - ...options, - callback: - effects.constRetry && - (() => effects.constRetry && effects.constRetry()), - }), - once: () => effects.getStatus(options), - watch: (abort?: AbortSignal) => { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - return DropGenerator.of(watch(ctrl.signal), () => ctrl.abort()) - }, - onChange: ( - callback: ( - value: T.StatusInfo | null, - error?: Error, - ) => { cancel: boolean } | Promise<{ cancel: boolean }>, - ) => { - ;(async () => { - const ctrl = new AbortController() - for await (const value of watch(ctrl.signal)) { - try { - const res = await callback(value) - if (res.cancel) { - ctrl.abort() - break - } - } catch (e) { - console.error( - 'callback function threw an error @ getStatus.onChange', - e, - ) - } - } - })() - .catch((e) => callback(null, e)) - .catch((e) => - console.error( - 'callback function threw an error @ getStatus.onChange', - e, - ), - ) - }, - waitFor: async (pred: (value: T.StatusInfo | null) => boolean) => { - const resolveCell = { resolve: () => {} } - effects.onLeaveContext(() => { - resolveCell.resolve() - }) - while (effects.isInContext) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - const res = await effects.getStatus({ ...options, callback }) - if (pred(res)) { - resolveCell.resolve() - return res - } - await waitForNext - } - return null - }, - } - }, + ) => new GetStatus(effects, options), MultiHost: { /** @@ -646,7 +481,7 @@ export class StartSdk { effects: E, hostnames: string[], algorithm?: T.Algorithm, - ) => new GetSslCertificate(effects, hostnames, algorithm), + ) => new GetSslCertificate(effects, { hostnames, algorithm }), /** Retrieve the manifest of any installed service package by its ID */ getServiceManifest, healthCheck: { diff --git a/sdk/package/lib/util/GetServiceManifest.ts b/sdk/package/lib/util/GetServiceManifest.ts deleted file mode 100644 index 9f85570d2..000000000 --- a/sdk/package/lib/util/GetServiceManifest.ts +++ /dev/null @@ -1,156 +0,0 @@ -import { Effects } from '../../../base/lib/Effects' -import { Manifest, PackageId } from '../../../base/lib/osBindings' -import { AbortedError } from '../../../base/lib/util/AbortedError' -import { DropGenerator, DropPromise } from '../../../base/lib/util/Drop' -import { deepEqual } from '../../../base/lib/util/deepEqual' - -export class GetServiceManifest { - constructor( - readonly effects: Effects, - readonly packageId: PackageId, - readonly map: (manifest: Manifest | null) => Mapped, - readonly eq: (a: Mapped, b: Mapped) => boolean, - ) {} - - /** - * Returns the manifest of a service. Reruns the context from which it has been called if the underlying value changes - */ - async const() { - let abort = new AbortController() - const watch = this.watch(abort.signal) - const res = await watch.next() - if (this.effects.constRetry) { - watch - .next() - .then(() => { - abort.abort() - this.effects.constRetry && this.effects.constRetry() - }) - .catch() - } - return res.value - } - /** - * Returns the manifest of a service. Does nothing if it changes - */ - async once() { - const manifest = await this.effects.getServiceManifest({ - packageId: this.packageId, - }) - return this.map(manifest) - } - - private async *watchGen(abort?: AbortSignal) { - let prev = null as { value: Mapped } | null - const resolveCell = { resolve: () => {} } - this.effects.onLeaveContext(() => { - resolveCell.resolve() - }) - abort?.addEventListener('abort', () => resolveCell.resolve()) - while (this.effects.isInContext && !abort?.aborted) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - const next = this.map( - await this.effects.getServiceManifest({ - packageId: this.packageId, - callback: () => callback(), - }), - ) - if (!prev || !this.eq(prev.value, next)) { - prev = { value: next } - yield next - } - await waitForNext - } - return new Promise((_, rej) => rej(new AbortedError())) - } - - /** - * Watches the manifest of a service. Returns an async iterator that yields whenever the value changes - */ - watch(abort?: AbortSignal): AsyncGenerator { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - return DropGenerator.of(this.watchGen(ctrl.signal), () => ctrl.abort()) - } - - /** - * Watches the manifest of a service. Takes a custom callback function to run whenever it changes - */ - onChange( - callback: ( - value: Mapped | null, - error?: Error, - ) => { cancel: boolean } | Promise<{ cancel: boolean }>, - ) { - ;(async () => { - const ctrl = new AbortController() - for await (const value of this.watch(ctrl.signal)) { - try { - const res = await callback(value) - if (res.cancel) { - ctrl.abort() - break - } - } catch (e) { - console.error( - 'callback function threw an error @ GetServiceManifest.onChange', - e, - ) - } - } - })() - .catch((e) => callback(null, e)) - .catch((e) => - console.error( - 'callback function threw an error @ GetServiceManifest.onChange', - e, - ), - ) - } - - /** - * Watches the manifest of a service. Returns when the predicate is true - */ - waitFor(pred: (value: Mapped) => boolean): Promise { - const ctrl = new AbortController() - return DropPromise.of( - Promise.resolve().then(async () => { - for await (const next of this.watchGen(ctrl.signal)) { - if (pred(next)) { - return next - } - } - throw new Error('context left before predicate passed') - }), - () => ctrl.abort(), - ) - } -} - -export function getServiceManifest( - effects: Effects, - packageId: PackageId, -): GetServiceManifest -export function getServiceManifest( - effects: Effects, - packageId: PackageId, - map: (manifest: Manifest | null) => Mapped, - eq?: (a: Mapped, b: Mapped) => boolean, -): GetServiceManifest -export function getServiceManifest( - effects: Effects, - packageId: PackageId, - map?: (manifest: Manifest | null) => Mapped, - eq?: (a: Mapped, b: Mapped) => boolean, -): GetServiceManifest { - return new GetServiceManifest( - effects, - packageId, - map ?? ((a) => a as Mapped), - eq ?? ((a, b) => deepEqual(a, b)), - ) -} diff --git a/sdk/package/lib/util/GetSslCertificate.ts b/sdk/package/lib/util/GetSslCertificate.ts deleted file mode 100644 index b9967bf22..000000000 --- a/sdk/package/lib/util/GetSslCertificate.ts +++ /dev/null @@ -1,122 +0,0 @@ -import { T } from '..' -import { Effects } from '../../../base/lib/Effects' -import { AbortedError } from '../../../base/lib/util/AbortedError' -import { DropGenerator, DropPromise } from '../../../base/lib/util/Drop' - -export class GetSslCertificate { - constructor( - readonly effects: Effects, - readonly hostnames: string[], - readonly algorithm?: T.Algorithm, - ) {} - - /** - * Returns the an SSL Certificate for the given hostnames if permitted. Restarts the service if it changes - */ - const() { - return this.effects.getSslCertificate({ - hostnames: this.hostnames, - algorithm: this.algorithm, - callback: - this.effects.constRetry && - (() => this.effects.constRetry && this.effects.constRetry()), - }) - } - /** - * Returns the an SSL Certificate for the given hostnames if permitted. Does nothing if it changes - */ - once() { - return this.effects.getSslCertificate({ - hostnames: this.hostnames, - algorithm: this.algorithm, - }) - } - - private async *watchGen(abort?: AbortSignal) { - const resolveCell = { resolve: () => {} } - this.effects.onLeaveContext(() => { - resolveCell.resolve() - }) - abort?.addEventListener('abort', () => resolveCell.resolve()) - while (this.effects.isInContext && !abort?.aborted) { - let callback: () => void = () => {} - const waitForNext = new Promise((resolve) => { - callback = resolve - resolveCell.resolve = resolve - }) - yield await this.effects.getSslCertificate({ - hostnames: this.hostnames, - algorithm: this.algorithm, - callback: () => callback(), - }) - await waitForNext - } - return new Promise((_, rej) => rej(new AbortedError())) - } - - /** - * Watches the SSL Certificate for the given hostnames if permitted. Returns an async iterator that yields whenever the value changes - */ - watch( - abort?: AbortSignal, - ): AsyncGenerator<[string, string, string], never, unknown> { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - return DropGenerator.of(this.watchGen(ctrl.signal), () => ctrl.abort()) - } - - /** - * Watches the SSL Certificate for the given hostnames if permitted. Takes a custom callback function to run whenever it changes - */ - onChange( - callback: ( - value: [string, string, string] | null, - error?: Error, - ) => { cancel: boolean } | Promise<{ cancel: boolean }>, - ) { - ;(async () => { - const ctrl = new AbortController() - for await (const value of this.watch(ctrl.signal)) { - try { - const res = await callback(value) - if (res.cancel) { - ctrl.abort() - break - } - } catch (e) { - console.error( - 'callback function threw an error @ GetSslCertificate.onChange', - e, - ) - } - } - })() - .catch((e) => callback(null, e)) - .catch((e) => - console.error( - 'callback function threw an error @ GetSslCertificate.onChange', - e, - ), - ) - } - - /** - * Watches the SSL Certificate for the given hostnames if permitted. Returns when the predicate is true - */ - waitFor( - pred: (value: [string, string, string] | null) => boolean, - ): Promise<[string, string, string] | null> { - const ctrl = new AbortController() - return DropPromise.of( - Promise.resolve().then(async () => { - for await (const next of this.watchGen(ctrl.signal)) { - if (pred(next)) { - return next - } - } - return null - }), - () => ctrl.abort(), - ) - } -} diff --git a/sdk/package/lib/util/fileHelper.ts b/sdk/package/lib/util/fileHelper.ts index 6b428edfd..1bcde0c90 100644 --- a/sdk/package/lib/util/fileHelper.ts +++ b/sdk/package/lib/util/fileHelper.ts @@ -4,8 +4,8 @@ import * as TOML from '@iarna/toml' import * as INI from 'ini' import * as T from '../../../base/lib/types' import * as fs from 'node:fs/promises' -import { AbortedError, asError, deepEqual } from '../../../base/lib/util' -import { DropGenerator, DropPromise } from '../../../base/lib/util/Drop' +import { asError, deepEqual } from '../../../base/lib/util' +import { Watchable } from '../../../base/lib/util/Watchable' import { PathBase } from './Volume' const previousPath = /(.+?)\/([^/]*)$/ @@ -228,132 +228,72 @@ export class FileHelper { return map(this.validate(data)) } - private async readConst( + private createFileWatchable( effects: T.Effects, map: (value: A) => B, - eq: (left: B | null | undefined, right: B | null) => boolean, - ): Promise { - const watch = this.readWatch(effects, map, eq) - const res = await watch.next() - if (effects.constRetry) { - const record: (typeof this.consts)[number] = [ - effects.constRetry, - res.value, - map, - eq, - ] - this.consts.push(record) - watch - .next() - .then(() => { - this.consts = this.consts.filter((r) => r !== record) - effects.constRetry && effects.constRetry() - }) - .catch() - } - return res.value - } - - private async *readWatch( - effects: T.Effects, - map: (value: A) => B, - eq: (left: B | null | undefined, right: B | null) => boolean, - abort?: AbortSignal, + eq: (left: B | null, right: B | null) => boolean, ) { - let prev: { value: B | null } | null = null - while (effects.isInContext && !abort?.aborted) { - if (await exists(this.path)) { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - const watch = fs.watch(this.path, { - persistent: false, - signal: ctrl.signal, - }) - const newRes = await this.readOnce(map) - const listen = Promise.resolve() - .then(async () => { - for await (const _ of watch) { - ctrl.abort() - return null - } - }) - .catch((e) => console.error(asError(e))) - if (!prev || !eq(prev.value, newRes)) { - console.error('yielding', JSON.stringify({ prev: prev, newRes })) - yield newRes - } - prev = { value: newRes } - await listen - } else { - yield null - await onCreated(this.path).catch((e) => console.error(asError(e))) - } + const doRead = async (): Promise => { + const data = await this.readFile() + if (!data) return null + return this.validate(data) } - return new Promise((_, rej) => rej(new AbortedError())) - } + const filePath = this.path + const fileHelper = this - private readOnChange( - effects: T.Effects, - callback: ( - value: B | null, - error?: Error, - ) => { cancel: boolean } | Promise<{ cancel: boolean }>, - map: (value: A) => B, - eq: (left: B | null | undefined, right: B | null) => boolean, - ) { - ;(async () => { - const ctrl = new AbortController() - for await (const value of this.readWatch(effects, map, eq, ctrl.signal)) { - try { - const res = await callback(value) - if (res.cancel) ctrl.abort() - } catch (e) { - console.error( - 'callback function threw an error @ FileHelper.read.onChange', - e, - ) - } + const wrappedMap = (raw: A | null): B | null => { + if (raw === null) return null + return map(raw) + } + + return new (class extends Watchable { + protected readonly label = 'FileHelper' + + protected async fetch() { + return doRead() } - })() - .catch((e) => callback(null, e)) - .catch((e) => - console.error( - 'callback function threw an error @ FileHelper.read.onChange', - e, - ), - ) - } - private readWaitFor( - effects: T.Effects, - pred: (value: B | null, error?: Error) => boolean, - map: (value: A) => B, - ): Promise { - const ctrl = new AbortController() - return DropPromise.of( - Promise.resolve().then(async () => { - const watch = this.readWatch(effects, map, (_) => false, ctrl.signal) - while (true) { - try { - const res = await watch.next() - if (pred(res.value)) { - ctrl.abort() - return res.value - } - if (res.done) { - break - } - } catch (e) { - if (pred(null, e as Error)) { - break - } + protected async *produce( + abort: AbortSignal, + ): AsyncGenerator { + while (this.effects.isInContext && !abort.aborted) { + if (await exists(filePath)) { + const ctrl = new AbortController() + abort.addEventListener('abort', () => ctrl.abort()) + const watch = fs.watch(filePath, { + persistent: false, + signal: ctrl.signal, + }) + yield await doRead() + await Promise.resolve() + .then(async () => { + for await (const _ of watch) { + ctrl.abort() + return null + } + }) + .catch((e) => console.error(asError(e))) + } else { + yield null + await onCreated(filePath).catch((e) => console.error(asError(e))) } } - ctrl.abort() - return null - }), - () => ctrl.abort(), - ) + } + + protected onConstRegistered(value: B | null): (() => void) | void { + if (!this.effects.constRetry) return + const record: (typeof fileHelper.consts)[number] = [ + this.effects.constRetry, + value, + wrappedMap, + eq, + ] + fileHelper.consts.push(record) + return () => { + fileHelper.consts = fileHelper.consts.filter((r) => r !== record) + } + } + })(effects, { map: wrappedMap, eq }) } /** @@ -372,7 +312,7 @@ export class FileHelper { read(): ReadType read( map: (value: A) => B, - eq?: (left: B | null | undefined, right: B | null) => boolean, + eq?: (left: B | null, right: B | null) => boolean, ): ReadType read( map?: (value: A) => any, @@ -382,24 +322,19 @@ export class FileHelper { eq = eq ?? deepEqual return { once: () => this.readOnce(map), - const: (effects: T.Effects) => this.readConst(effects, map, eq), - watch: (effects: T.Effects, abort?: AbortSignal) => { - const ctrl = new AbortController() - abort?.addEventListener('abort', () => ctrl.abort()) - return DropGenerator.of( - this.readWatch(effects, map, eq, ctrl.signal), - () => ctrl.abort(), - ) - }, + const: (effects: T.Effects) => + this.createFileWatchable(effects, map, eq).const(), + watch: (effects: T.Effects, abort?: AbortSignal) => + this.createFileWatchable(effects, map, eq).watch(abort), onChange: ( effects: T.Effects, callback: ( value: A | null, error?: Error, ) => { cancel: boolean } | Promise<{ cancel: boolean }>, - ) => this.readOnChange(effects, callback, map, eq), + ) => this.createFileWatchable(effects, map, eq).onChange(callback), waitFor: (effects: T.Effects, pred: (value: A | null) => boolean) => - this.readWaitFor(effects, pred, map), + this.createFileWatchable(effects, map, eq).waitFor(pred), } } diff --git a/sdk/package/lib/util/index.ts b/sdk/package/lib/util/index.ts index 5facab2f8..cdb59d470 100644 --- a/sdk/package/lib/util/index.ts +++ b/sdk/package/lib/util/index.ts @@ -1,6 +1,4 @@ export * from '../../../base/lib/util' -export { GetSslCertificate } from './GetSslCertificate' -export { GetServiceManifest, getServiceManifest } from './GetServiceManifest' export { Drop } from '../../../base/lib/util/Drop' export { Volume, Volumes } from './Volume' diff --git a/sdk/package/lib/version/VersionGraph.ts b/sdk/package/lib/version/VersionGraph.ts index 84d24269e..2b82c67c3 100644 --- a/sdk/package/lib/version/VersionGraph.ts +++ b/sdk/package/lib/version/VersionGraph.ts @@ -331,6 +331,9 @@ export class VersionGraph target: VersionRange | ExtendedVersion | null, ): Promise { if (target) { + if (isRange(target) && !target.satisfiable()) { + return + } const from = await getDataVersion(effects) if (from) { target = await this.migrate({ diff --git a/sdk/package/package-lock.json b/sdk/package/package-lock.json index 868def13d..2e49b06f3 100644 --- a/sdk/package/package-lock.json +++ b/sdk/package/package-lock.json @@ -1,12 +1,12 @@ { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.58", + "version": "0.4.0-beta.61", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.58", + "version": "0.4.0-beta.61", "license": "MIT", "dependencies": { "@iarna/toml": "^3.0.0", diff --git a/sdk/package/package.json b/sdk/package/package.json index 14cc168bc..64dbf2d21 100644 --- a/sdk/package/package.json +++ b/sdk/package/package.json @@ -1,6 +1,6 @@ { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.58", + "version": "0.4.0-beta.61", "description": "Software development kit to facilitate packaging services for StartOS", "main": "./package/lib/index.js", "types": "./package/lib/index.d.ts", diff --git a/web/ARCHITECTURE.md b/web/ARCHITECTURE.md index 33d92abcf..7d8edc31f 100644 --- a/web/ARCHITECTURE.md +++ b/web/ARCHITECTURE.md @@ -1,6 +1,6 @@ # Web Architecture -Angular 20 + TypeScript workspace using [Taiga UI](https://taiga-ui.dev/) component library. +Angular 21 + TypeScript workspace using [Taiga UI 5](https://taiga-ui.dev/) component library. ## API Layer (JSON-RPC) diff --git a/web/CLAUDE.md b/web/CLAUDE.md index b6b7f2a67..d454f696e 100644 --- a/web/CLAUDE.md +++ b/web/CLAUDE.md @@ -1,6 +1,6 @@ # Web — Angular Frontend -Angular 20 + TypeScript workspace using [Taiga UI](https://taiga-ui.dev/) component library. +Angular 21 + TypeScript workspace using [Taiga UI 5](https://taiga-ui.dev/) component library. ## Projects @@ -21,15 +21,22 @@ npm run check # Type check all projects ## Golden Rules -1. **Taiga-first.** Use Taiga components, directives, and APIs whenever possible. Avoid hand-rolled HTML/CSS unless absolutely necessary. If Taiga has a component for it, use it. +1. **Taiga does it all.** We use Taiga UI 5 for everything — components, directives, layout, dialogs, forms, icons, and styling. Do not hand-roll HTML/CSS when Taiga provides a solution. If you think Taiga can't do something, you're probably wrong — look it up first. -2. **Pattern-match.** Nearly anything we build has a similar example elsewhere in this codebase. Search for existing patterns before writing new code. Copy the conventions used in neighboring components. +2. **Follow existing patterns.** Before writing new code, search this codebase for a similar example. Nearly anything we build has a precedent. Copy the conventions used in neighboring components. Do not invent new patterns when established ones exist. -3. **When unsure about Taiga, ask or look it up.** Use `WebFetch` against `https://taiga-ui.dev/llms-full.txt` to search for component usage, or ask the user. Taiga docs are authoritative. See [Taiga UI Docs](#taiga-ui-docs) below. +3. **Never guess Taiga APIs.** Taiga UI 5 has its own way of doing things. Do not make up component names, directive names, input bindings, or usage patterns from memory. Always verify against the official docs or the MCP server. Getting it wrong wastes everyone's time. + +4. **Use the Taiga MCP server.** If a `taiga-ui-mcp` MCP server is available, use it to look up components and get documentation with code examples. It provides two tools: `get_list_components` (search/filter components) and `get_component_example` (get full docs and examples for a component). This is the fastest and most accurate way to get Taiga usage information. + +5. **Fall back to the Taiga docs.** If the MCP server is not available, use `WebFetch` against `https://taiga-ui.dev/llms-full.txt` to search for component usage. Taiga docs are authoritative — this project's code is not. See [Taiga UI Docs](#taiga-ui-docs) below. ## Taiga UI Docs -Taiga provides an LLM-friendly reference at `https://taiga-ui.dev/llms-full.txt` (~2200 lines covering all components with code examples). Use `WebFetch` to search it when you need to look up a component, directive, or API: +Taiga provides AI-friendly references at [taiga-ui.dev/ai-support](https://taiga-ui.dev/ai-support): + +- **MCP server** — [`taiga-ui-mcp`](https://github.com/taiga-family/taiga-ui-mcp) provides full access to Taiga UI component docs and Angular code examples via the Model Context Protocol. +- **llms-full.txt** — `https://taiga-ui.dev/llms-full.txt` (~2200 lines covering all components with code examples). Use `WebFetch` to search it: ``` WebFetch url=https://taiga-ui.dev/llms-full.txt prompt="How to use TuiTextfield with a select dropdown" @@ -50,63 +57,3 @@ See [ARCHITECTURE.md](ARCHITECTURE.md) for the web architecture: API layer, Patc - **`toSignal()`** to convert Observables (e.g., PatchDB watches) to signals. - **`ChangeDetectionStrategy.OnPush`** on almost all components. - **`takeUntilDestroyed(inject(DestroyRef))`** for subscription cleanup. - -## Common Taiga Patterns - -### Textfield + Select (dropdown) - -```html - - - - - - - -``` - -Provider to remove the X clear button: - -```typescript -providers: [tuiTextfieldOptionsProvider({ cleaner: signal(false) })] -``` - -### Buttons - -```html - - - -``` - -### Dialogs - -```typescript -// Confirmation -this.dialog.openConfirm({ label: 'Warning', data: { content: '...', yes: 'Confirm', no: 'Cancel' } }) - -// Custom component in dialog -this.dialog.openComponent(new PolymorpheusComponent(MyComponent, injector), { label: 'Title' }) -``` - -### Toggle - -```html - -``` - -### Errors & Tooltips - -```html - - - -``` - -### Layout - -```html - - - -``` diff --git a/web/package-lock.json b/web/package-lock.json index ec86a389c..8757e3e75 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -1,12 +1,12 @@ { "name": "startos-ui", - "version": "0.4.0-alpha.20", + "version": "0.4.0-alpha.21", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "startos-ui", - "version": "0.4.0-alpha.20", + "version": "0.4.0-alpha.21", "license": "MIT", "dependencies": { "@angular/cdk": "^21.2.1", @@ -363,13 +363,13 @@ } }, "node_modules/@angular-devkit/architect": { - "version": "0.2102.1", - "resolved": "https://registry.npmjs.org/@angular-devkit/architect/-/architect-0.2102.1.tgz", - "integrity": "sha512-x2Qqz6oLYvEh9UBUG0AP1A4zROO/VP+k+zM9+4c2uZw1uqoBQFmutqgzncjVU7cR9R0RApgx9JRZHDFtQru68w==", + "version": "0.2102.2", + "resolved": "https://registry.npmjs.org/@angular-devkit/architect/-/architect-0.2102.2.tgz", + "integrity": "sha512-CDvFtXwyBtMRkTQnm+LfBNLL0yLV8ZGskrM1T6VkcGwXGFDott1FxUdj96ViodYsYL5fbJr0MNA6TlLcanV3kQ==", "devOptional": true, "license": "MIT", "dependencies": { - "@angular-devkit/core": "21.2.1", + "@angular-devkit/core": "21.2.2", "rxjs": "7.8.2" }, "bin": { @@ -382,9 +382,9 @@ } }, "node_modules/@angular-devkit/core": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular-devkit/core/-/core-21.2.1.tgz", - "integrity": "sha512-TpXGjERqVPN8EPt7LdmWAwh0oNQ/6uWFutzGZiXhJy81n1zb1O1XrqhRAmvP1cAo5O+na6IV2JkkCmxL6F8GUg==", + "version": "21.2.2", + "resolved": "https://registry.npmjs.org/@angular-devkit/core/-/core-21.2.2.tgz", + "integrity": "sha512-xUeKGe4BDQpkz0E6fnAPIJXE0y0nqtap0KhJIBhvN7xi3NenIzTmoi6T9Yv5OOBUdLZbOm4SOel8MhdXiIBpAQ==", "license": "MIT", "peer": true, "dependencies": { @@ -410,13 +410,13 @@ } }, "node_modules/@angular-devkit/schematics": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular-devkit/schematics/-/schematics-21.2.1.tgz", - "integrity": "sha512-CWoamHaasAHMjHcYqxbj0tMnoXxdGotcAz2SpiuWtH28Lnf5xfbTaJn/lwdMP8Wdh4tgA+uYh2l45A5auCwmkw==", + "version": "21.2.2", + "resolved": "https://registry.npmjs.org/@angular-devkit/schematics/-/schematics-21.2.2.tgz", + "integrity": "sha512-CCeyQxGUq+oyGnHd7PfcYIVbj9pRnqjQq0rAojoAqs1BJdtInx9weLBCLy+AjM3NHePeZrnwm+wEVr8apED8kg==", "license": "MIT", "peer": true, "dependencies": { - "@angular-devkit/core": "21.2.1", + "@angular-devkit/core": "21.2.2", "jsonc-parser": "3.3.1", "magic-string": "0.30.21", "ora": "9.3.0", @@ -447,14 +447,14 @@ } }, "node_modules/@angular/build": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/build/-/build-21.2.1.tgz", - "integrity": "sha512-cUpLNHJp9taII/FOcJHHfQYlMcZSRaf6eIxgSNS6Xfx1CeGoJNDN+J8+GFk+H1CPJt1EvbfyZ+dE5DbsgTD/QQ==", + "version": "21.2.2", + "resolved": "https://registry.npmjs.org/@angular/build/-/build-21.2.2.tgz", + "integrity": "sha512-Vq2eIneNxzhHm1MwEmRqEJDwHU9ODfSRDaMWwtysGMhpoMQmLdfTqkQDmkC2qVUr8mV8Z1i5I+oe5ZJaMr/PlQ==", "dev": true, "license": "MIT", "dependencies": { "@ampproject/remapping": "2.3.0", - "@angular-devkit/architect": "0.2102.1", + "@angular-devkit/architect": "0.2102.2", "@babel/core": "7.29.0", "@babel/helper-annotate-as-pure": "7.27.3", "@babel/helper-split-export-declaration": "7.24.7", @@ -497,7 +497,7 @@ "@angular/platform-browser": "^21.0.0", "@angular/platform-server": "^21.0.0", "@angular/service-worker": "^21.0.0", - "@angular/ssr": "^21.2.1", + "@angular/ssr": "^21.2.2", "karma": "^6.4.0", "less": "^4.2.0", "ng-packagr": "^21.0.0", @@ -547,9 +547,9 @@ } }, "node_modules/@angular/cdk": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/cdk/-/cdk-21.2.1.tgz", - "integrity": "sha512-JUFV8qLnO7CU5v4W0HzXSQrFkkJ4RH/qqdwrf9lup7YEnsLxB7cTGhsVisc9pWKAJsoNZ4pXCVOkqKc1mFL7dw==", + "version": "21.2.2", + "resolved": "https://registry.npmjs.org/@angular/cdk/-/cdk-21.2.2.tgz", + "integrity": "sha512-9AsZkwqy07No7+0qPydcJfXB6SpA9qLDBanoesNj5KsiZJ62PJH3oIjVyNeQEEe1HQWmSwBnhwN12OPLNMUlnw==", "license": "MIT", "peer": true, "dependencies": { @@ -564,20 +564,20 @@ } }, "node_modules/@angular/cli": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/cli/-/cli-21.2.1.tgz", - "integrity": "sha512-5SRfMTgwFj1zXOpfeZWHsxZBni0J4Xz7/CbewG47D6DmbstOrSdgt6eNzJ62R650t0G9dpri2YvToZgImtbjOQ==", + "version": "21.2.2", + "resolved": "https://registry.npmjs.org/@angular/cli/-/cli-21.2.2.tgz", + "integrity": "sha512-eZo8/qX+ZIpIWc0CN+cCX13Lbgi/031wAp8DRVhDDO6SMVtcr/ObOQ2S16+pQdOMXxiG3vby6IhzJuz9WACzMQ==", "devOptional": true, "license": "MIT", "peer": true, "dependencies": { - "@angular-devkit/architect": "0.2102.1", - "@angular-devkit/core": "21.2.1", - "@angular-devkit/schematics": "21.2.1", + "@angular-devkit/architect": "0.2102.2", + "@angular-devkit/core": "21.2.2", + "@angular-devkit/schematics": "21.2.2", "@inquirer/prompts": "7.10.1", "@listr2/prompt-adapter-inquirer": "3.0.5", "@modelcontextprotocol/sdk": "1.26.0", - "@schematics/angular": "21.2.1", + "@schematics/angular": "21.2.2", "@yarnpkg/lockfile": "1.1.0", "algoliasearch": "5.48.1", "ini": "6.0.0", @@ -600,9 +600,9 @@ } }, "node_modules/@angular/common": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/common/-/common-21.2.1.tgz", - "integrity": "sha512-xhv2i1Q9s1kpGbGsfj+o36+XUC/TQLcZyRuRxn3GwaN7Rv34FabC88ycpvoE+sW/txj4JRx9yPA0dRSZjwZ+Gg==", + "version": "21.2.4", + "resolved": "https://registry.npmjs.org/@angular/common/-/common-21.2.4.tgz", + "integrity": "sha512-NrP6qOuUpo3fqq14UJ1b2bIRtWsfvxh1qLqOyFV4gfBrHhXd0XffU1LUlUw1qp4w1uBSgPJ0/N5bSPUWrAguVg==", "license": "MIT", "peer": true, "dependencies": { @@ -612,14 +612,14 @@ "node": "^20.19.0 || ^22.12.0 || >=24.0.0" }, "peerDependencies": { - "@angular/core": "21.2.1", + "@angular/core": "21.2.4", "rxjs": "^6.5.3 || ^7.4.0" } }, "node_modules/@angular/compiler": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/compiler/-/compiler-21.2.1.tgz", - "integrity": "sha512-FxWaSaii1vfHIFA+JksqQ8NGB2frfqCrs7Ju50a44kbwR4fmanfn/VsiS/CbwBp9vcyT/Br9X/jAG4RuK/U2nw==", + "version": "21.2.4", + "resolved": "https://registry.npmjs.org/@angular/compiler/-/compiler-21.2.4.tgz", + "integrity": "sha512-9+ulVK3idIo/Tu4X2ic7/V0+Uj7pqrOAbOuIirYe6Ymm3AjexuFRiGBbfcH0VJhQ5cf8TvIJ1fuh+MI4JiRIxA==", "license": "MIT", "peer": true, "dependencies": { @@ -630,9 +630,9 @@ } }, "node_modules/@angular/compiler-cli": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/compiler-cli/-/compiler-cli-21.2.1.tgz", - "integrity": "sha512-qYCWLGtEju4cDtYLi4ZzbwKoF0lcGs+Lc31kuESvAzYvWNgk2EUOtwWo8kbgpAzAwSYodtxW6Q90iWEwfU6elw==", + "version": "21.2.4", + "resolved": "https://registry.npmjs.org/@angular/compiler-cli/-/compiler-cli-21.2.4.tgz", + "integrity": "sha512-vGjd7DZo/Ox50pQCm5EycmBu91JclimPtZoyNXu/2hSxz3oAkzwiHCwlHwk2g58eheSSp+lYtYRLmHAqSVZLjg==", "dev": true, "license": "MIT", "peer": true, @@ -654,7 +654,7 @@ "node": "^20.19.0 || ^22.12.0 || >=24.0.0" }, "peerDependencies": { - "@angular/compiler": "21.2.1", + "@angular/compiler": "21.2.4", "typescript": ">=5.9 <6.1" }, "peerDependenciesMeta": { @@ -664,9 +664,9 @@ } }, "node_modules/@angular/core": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/core/-/core-21.2.1.tgz", - "integrity": "sha512-pFTbg03s2ZI5cHNT+eWsGjwIIKiYkeAnodFbCAHjwFi9KCEYlTykFLjr9lcpGrBddfmAH7GE08Q73vgmsdcNHw==", + "version": "21.2.4", + "resolved": "https://registry.npmjs.org/@angular/core/-/core-21.2.4.tgz", + "integrity": "sha512-2+gd67ZuXHpGOqeb2o7XZPueEWEP81eJza2tSHkT5QMV8lnYllDEmaNnkPxnIjSLGP1O3PmiXxo4z8ibHkLZwg==", "license": "MIT", "peer": true, "dependencies": { @@ -676,7 +676,7 @@ "node": "^20.19.0 || ^22.12.0 || >=24.0.0" }, "peerDependencies": { - "@angular/compiler": "21.2.1", + "@angular/compiler": "21.2.4", "rxjs": "^6.5.3 || ^7.4.0", "zone.js": "~0.15.0 || ~0.16.0" }, @@ -690,9 +690,9 @@ } }, "node_modules/@angular/forms": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/forms/-/forms-21.2.1.tgz", - "integrity": "sha512-6aqOPk9xoa0dfeUDeEbhaiPhmt6MQrdn59qbGAomn9RMXA925TrHbJhSIkp9tXc2Fr4aJRi8zkD/cdXEc1IYeA==", + "version": "21.2.4", + "resolved": "https://registry.npmjs.org/@angular/forms/-/forms-21.2.4.tgz", + "integrity": "sha512-1fOhctA9ADEBYjI3nPQUR5dHsK2+UWAjup37Ksldk/k0w8UpD5YsN7JVNvsDMZRFMucKYcGykPblU7pABtsqnQ==", "license": "MIT", "peer": true, "dependencies": { @@ -703,16 +703,16 @@ "node": "^20.19.0 || ^22.12.0 || >=24.0.0" }, "peerDependencies": { - "@angular/common": "21.2.1", - "@angular/core": "21.2.1", - "@angular/platform-browser": "21.2.1", + "@angular/common": "21.2.4", + "@angular/core": "21.2.4", + "@angular/platform-browser": "21.2.4", "rxjs": "^6.5.3 || ^7.4.0" } }, "node_modules/@angular/language-service": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/language-service/-/language-service-21.2.1.tgz", - "integrity": "sha512-L8EaNhWDKMny18RURg/Ju2Dix2e7qLL/s2yDQrawgjQRmXAMnjimz10w/EiiG7FMK/Hj5fLycS5X8VITq1f2rg==", + "version": "21.2.4", + "resolved": "https://registry.npmjs.org/@angular/language-service/-/language-service-21.2.4.tgz", + "integrity": "sha512-seWlXWhayTwuL62Cfz+Ky/Wv67oYLX+cXplYoIinDVJPgQaU9jXpakLfKq8RwdRXVmgTE0HQ5dyoTozuWgJ8Nw==", "dev": true, "license": "MIT", "engines": { @@ -720,9 +720,9 @@ } }, "node_modules/@angular/platform-browser": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/platform-browser/-/platform-browser-21.2.1.tgz", - "integrity": "sha512-k4SJLxIaLT26vLjLuFL+ho0BiG5PrdxEsjsXFC7w5iUhomeouzkHVTZ4t7gaLNKrdRD7QNtU4Faw0nL0yx0ZPQ==", + "version": "21.2.4", + "resolved": "https://registry.npmjs.org/@angular/platform-browser/-/platform-browser-21.2.4.tgz", + "integrity": "sha512-1A9e/cQVu+3BkRCktLcO3RZGuw8NOTHw1frUUrpAz+iMyvIT4sDRFbL+U1g8qmOCZqRNC1Pi1HZfZ1kl6kvrcQ==", "license": "MIT", "peer": true, "dependencies": { @@ -732,9 +732,9 @@ "node": "^20.19.0 || ^22.12.0 || >=24.0.0" }, "peerDependencies": { - "@angular/animations": "21.2.1", - "@angular/common": "21.2.1", - "@angular/core": "21.2.1" + "@angular/animations": "21.2.4", + "@angular/common": "21.2.4", + "@angular/core": "21.2.4" }, "peerDependenciesMeta": { "@angular/animations": { @@ -743,13 +743,13 @@ } }, "node_modules/@angular/pwa": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/pwa/-/pwa-21.2.1.tgz", - "integrity": "sha512-oi4amOGT7g6voeBeyr9/8TBug+RSghmAVoyBNIrAPLNRXmhJGRtCom7WZoYHR4mJ8+Tf+PXcjm8RDO2Zq4AqpQ==", + "version": "21.2.2", + "resolved": "https://registry.npmjs.org/@angular/pwa/-/pwa-21.2.2.tgz", + "integrity": "sha512-/JU3gBXA/3LYNngR0dJF8H9RU1M5IaFh2CG2BCugIx8vmRFDC/GThCJKZqzPaYFutEg7ieXzr3cSj+2rtdr7vg==", "license": "MIT", "dependencies": { - "@angular-devkit/schematics": "21.2.1", - "@schematics/angular": "21.2.1", + "@angular-devkit/schematics": "21.2.2", + "@schematics/angular": "21.2.2", "parse5-html-rewriting-stream": "8.0.0" }, "engines": { @@ -758,7 +758,7 @@ "yarn": ">= 1.13.0" }, "peerDependencies": { - "@angular/cli": "^21.2.1" + "@angular/cli": "^21.2.2" }, "peerDependenciesMeta": { "@angular/cli": { @@ -767,9 +767,9 @@ } }, "node_modules/@angular/router": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/router/-/router-21.2.1.tgz", - "integrity": "sha512-FUKG+8ImQYxmlDUdAs7+VeS/VrBNrbo0zGiKkzVNU/bbcCyroKXJLXFtkFI3qmROiJNyIta2IMBCHJvIjLIMig==", + "version": "21.2.4", + "resolved": "https://registry.npmjs.org/@angular/router/-/router-21.2.4.tgz", + "integrity": "sha512-OjWze4XT8i2MThcBXMv7ru1k6/5L6QYZbcXuseqimFCHm2avEJ+mXPovY066fMBZJhqbXdjB82OhHAWkIHjglQ==", "license": "MIT", "peer": true, "dependencies": { @@ -779,16 +779,16 @@ "node": "^20.19.0 || ^22.12.0 || >=24.0.0" }, "peerDependencies": { - "@angular/common": "21.2.1", - "@angular/core": "21.2.1", - "@angular/platform-browser": "21.2.1", + "@angular/common": "21.2.4", + "@angular/core": "21.2.4", + "@angular/platform-browser": "21.2.4", "rxjs": "^6.5.3 || ^7.4.0" } }, "node_modules/@angular/service-worker": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@angular/service-worker/-/service-worker-21.2.1.tgz", - "integrity": "sha512-mFyEVh5KazB6wr9uoXhlDQQDaicH9/t2m6lsN+/t2y6iMPpTIuTbWYHXX1uVbLKcxne54ei78NgD3wNS7DMfmg==", + "version": "21.2.4", + "resolved": "https://registry.npmjs.org/@angular/service-worker/-/service-worker-21.2.4.tgz", + "integrity": "sha512-YcPMb0co2hEDwzOG5S27b6f8rotXEUDx88nQuhHDl/ztuzXaxKklJ21qVDVZ0R433YBCRQJl2D6ZrpJojsnBFw==", "license": "MIT", "peer": true, "dependencies": { @@ -801,7 +801,7 @@ "node": "^20.19.0 || ^22.12.0 || >=24.0.0" }, "peerDependencies": { - "@angular/core": "21.2.1", + "@angular/core": "21.2.4", "rxjs": "^6.5.3 || ^7.4.0" } }, @@ -1124,21 +1124,21 @@ } }, "node_modules/@emnapi/core": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.8.1.tgz", - "integrity": "sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@emnapi/core/-/core-1.9.0.tgz", + "integrity": "sha512-0DQ98G9ZQZOxfUcQn1waV2yS8aWdZ6kJMbYCJB3oUBecjWYO1fqJ+a1DRfPF3O5JEkwqwP1A9QEN/9mYm2Yd0w==", "dev": true, "license": "MIT", "optional": true, "dependencies": { - "@emnapi/wasi-threads": "1.1.0", + "@emnapi/wasi-threads": "1.2.0", "tslib": "^2.4.0" } }, "node_modules/@emnapi/runtime": { - "version": "1.8.1", - "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.8.1.tgz", - "integrity": "sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@emnapi/runtime/-/runtime-1.9.0.tgz", + "integrity": "sha512-QN75eB0IH2ywSpRpNddCRfQIhmJYBCJ1x5Lb3IscKAL8bMnVAKnRg8dCoXbHzVLLH7P38N2Z3mtulB7W0J0FKw==", "dev": true, "license": "MIT", "optional": true, @@ -1147,9 +1147,9 @@ } }, "node_modules/@emnapi/wasi-threads": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.1.0.tgz", - "integrity": "sha512-WI0DdZ8xFSbgMjR1sFsKABJ/C5OnRrjT06JXbZKexJGrDuPTzZdDYfFlsgcCXCyf+suG5QU2e/y1Wo2V/OapLQ==", + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@emnapi/wasi-threads/-/wasi-threads-1.2.0.tgz", + "integrity": "sha512-N10dEJNSsUx41Z6pZsXU8FjPjpBEplgH24sfkmITrBED1/U2Esum9F3lfLrMjKHHjmi557zQn7kR9R+XWXu5Rg==", "dev": true, "license": "MIT", "optional": true, @@ -1984,16 +1984,6 @@ } } }, - "node_modules/@isaacs/cliui": { - "version": "9.0.0", - "resolved": "https://registry.npmjs.org/@isaacs/cliui/-/cliui-9.0.0.tgz", - "integrity": "sha512-AokJm4tuBHillT+FpMtxQ60n8ObyXBatq7jD2/JA9dxbDDokKQm8KMht5ibGzLVU9IJDIKK4TPKgMHEYMn3lMg==", - "devOptional": true, - "license": "BlueOak-1.0.0", - "engines": { - "node": ">=18" - } - }, "node_modules/@isaacs/fs-minipass": { "version": "4.0.1", "resolved": "https://registry.npmjs.org/@isaacs/fs-minipass/-/fs-minipass-4.0.1.tgz", @@ -2182,9 +2172,9 @@ ] }, "node_modules/@maskito/angular": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/@maskito/angular/-/angular-5.1.1.tgz", - "integrity": "sha512-NCKvUpIF3KCJDRpDWcI0DAeVQaYgsJmGAycq7h/LVu6h7P0nSd5xanYpoM2/F1MdXvGh/aEAF37KooTsfwUqog==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/@maskito/angular/-/angular-5.1.2.tgz", + "integrity": "sha512-VLeO9+80AC0OE1JJf7OIHahCDejNVrT5gMfcBUYov0N7yT43GnzeyOtbwOuo0QETZEHdFGJkwWoNwZs0+nyZmw==", "license": "Apache-2.0", "peer": true, "dependencies": { @@ -2193,35 +2183,35 @@ "peerDependencies": { "@angular/core": ">=19.0.0", "@angular/forms": ">=19.0.0", - "@maskito/core": "^5.1.1" + "@maskito/core": "^5.1.2" } }, "node_modules/@maskito/core": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/@maskito/core/-/core-5.1.1.tgz", - "integrity": "sha512-grxipbOGKBs++kgAM/5K/lCghY/AfkSKNcRDSp8Nspf9dngxWiraMiPQVFsS8E0sab1wHEWlMXBdqEa3WyQXPA==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/@maskito/core/-/core-5.1.2.tgz", + "integrity": "sha512-eoeQ41uDu9AuhFQDzAPTNTr5VM+hMpRsrJjtHzCH3FM7u+/mOGLgtEeGE1+5Up5UCtY7h/N1hPaZ/qT5mcNWXQ==", "license": "Apache-2.0", "peer": true }, "node_modules/@maskito/kit": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/@maskito/kit/-/kit-5.1.1.tgz", - "integrity": "sha512-YlMPGzyX/zuYaAxWRFTwBYoP3bV1WzMhrW8D+7Tn6nUfCBUTogvnJrVhVyOi6dZzXGX8qP1yRVT6W/IVt79F5g==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/@maskito/kit/-/kit-5.1.2.tgz", + "integrity": "sha512-inVxaa36dLQp1NQ/a5dM791qgDZUulPDs299pS6KNXKN7wrisybSIoRVrpjoZt/QIe2TMtku313sBgtf2LhFAQ==", "license": "Apache-2.0", "peer": true, "peerDependencies": { - "@maskito/core": "^5.1.1" + "@maskito/core": "^5.1.2" } }, "node_modules/@maskito/phone": { - "version": "5.1.1", - "resolved": "https://registry.npmjs.org/@maskito/phone/-/phone-5.1.1.tgz", - "integrity": "sha512-yBb42/7FZpEjlAvvT5SabWX8jZdMjhGv4g7G6McarVM1RcG6vcV+kZd0xjAdDvOv8s5zGww+pN39Scn0tt+QMg==", + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/@maskito/phone/-/phone-5.1.2.tgz", + "integrity": "sha512-NlM9k/UpWxE9SNg00UtAe0DcobiTp0VklzYLAvxCkpg5zAOQDNipNeHUQobzuebK8QsLbMJnoGvDeqxEkO5VqA==", "license": "Apache-2.0", "peer": true, "peerDependencies": { - "@maskito/core": "^5.1.1", - "@maskito/kit": "^5.1.1", + "@maskito/core": "^5.1.2", + "@maskito/kit": "^5.1.2", "libphonenumber-js": ">=1.0.0" } }, @@ -2280,9 +2270,9 @@ } }, "node_modules/@modelcontextprotocol/sdk/node_modules/jose": { - "version": "6.1.3", - "resolved": "https://registry.npmjs.org/jose/-/jose-6.1.3.tgz", - "integrity": "sha512-0TpaTfihd4QMNwrz/ob2Bp7X04yuxJkjRGi4aKmOqwhov54i6u79oCv7T+C7lo70MKH6BesI3vscD1yb/yzKXQ==", + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/jose/-/jose-6.2.1.tgz", + "integrity": "sha512-jUaKr1yrbfaImV7R2TN/b3IcZzsw38/chqMpo2XJ7i2F8AfM/lA4G1goC3JVEwg0H7UldTmSt3P68nt31W7/mw==", "devOptional": true, "license": "MIT", "funding": { @@ -2879,9 +2869,9 @@ } }, "node_modules/@npmcli/agent/node_modules/lru-cache": { - "version": "11.2.6", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz", - "integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==", + "version": "11.2.7", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.7.tgz", + "integrity": "sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==", "devOptional": true, "license": "BlueOak-1.0.0", "engines": { @@ -2932,9 +2922,9 @@ } }, "node_modules/@npmcli/git/node_modules/lru-cache": { - "version": "11.2.6", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz", - "integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==", + "version": "11.2.7", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.7.tgz", + "integrity": "sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==", "devOptional": true, "license": "BlueOak-1.0.0", "engines": { @@ -4054,13 +4044,13 @@ "license": "MIT" }, "node_modules/@schematics/angular": { - "version": "21.2.1", - "resolved": "https://registry.npmjs.org/@schematics/angular/-/angular-21.2.1.tgz", - "integrity": "sha512-DjrHRMoILhbZ6tc7aNZWuHA1wCm1iU/JN1TxAwNEyIBgyU3Fx8Z5baK4w0TCpOIPt0RLWVgP2L7kka9aXWCUFA==", + "version": "21.2.2", + "resolved": "https://registry.npmjs.org/@schematics/angular/-/angular-21.2.2.tgz", + "integrity": "sha512-Ywa6HDtX7TRBQZTVMMnxX3Mk7yVnG8KtSFaXWrkx779+q8tqYdBwNwAqbNd4Zatr1GccKaz9xcptHJta5+DTxw==", "license": "MIT", "dependencies": { - "@angular-devkit/core": "21.2.1", - "@angular-devkit/schematics": "21.2.1", + "@angular-devkit/core": "21.2.2", + "@angular-devkit/schematics": "21.2.2", "jsonc-parser": "3.3.1" }, "engines": { @@ -4676,9 +4666,9 @@ "license": "MIT" }, "node_modules/@types/node": { - "version": "22.19.11", - "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.11.tgz", - "integrity": "sha512-BH7YwL6rA93ReqeQS1c4bsPpcfOmJasG+Fkr6Y59q83f9M1WcBRHR2vM+P9eOisYRcN3ujQoiZY8uk5W+1WL8w==", + "version": "22.19.15", + "resolved": "https://registry.npmjs.org/@types/node/-/node-22.19.15.tgz", + "integrity": "sha512-F0R/h2+dsy5wJAUe3tAU6oqa2qbWY5TpNfL/RGmo1y38hiyO1w3x2jPtt76wmuaJI4DQnOBu21cNXQ2STIUUWg==", "dev": true, "license": "MIT", "peer": true, @@ -4772,9 +4762,9 @@ } }, "node_modules/acorn": { - "version": "8.15.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", - "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", + "version": "8.16.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.16.0.tgz", + "integrity": "sha512-UVJyE9MttOsBQIDKw1skb9nAwQuR5wuGD3+82K6JgJlm/Y+KI92oNsMNGZCYdDsVtRHSak0pcV5Dno5+4jh9sw==", "dev": true, "license": "MIT", "bin": { @@ -4785,9 +4775,9 @@ } }, "node_modules/acorn-walk": { - "version": "8.3.4", - "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.4.tgz", - "integrity": "sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==", + "version": "8.3.5", + "resolved": "https://registry.npmjs.org/acorn-walk/-/acorn-walk-8.3.5.tgz", + "integrity": "sha512-HEHNfbars9v4pgpW6SO1KSPkfoS0xVOM/9UzkJltjlsHZmJasxg8aXkuZa7SMf8vKGIBhpUsPluQSqhJFCqebw==", "dev": true, "license": "MIT", "dependencies": { @@ -5034,13 +5024,16 @@ } }, "node_modules/baseline-browser-mapping": { - "version": "2.9.19", - "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.9.19.tgz", - "integrity": "sha512-ipDqC8FrAl/76p2SSWKSI+H9tFwm7vYqXQrItCuiVPt26Km0jS+NzSsBWAaBusvSbQcfJG+JitdMm+wZAgTYqg==", + "version": "2.10.8", + "resolved": "https://registry.npmjs.org/baseline-browser-mapping/-/baseline-browser-mapping-2.10.8.tgz", + "integrity": "sha512-PCLz/LXGBsNTErbtB6i5u4eLpHeMfi93aUv5duMmj6caNu6IphS4q6UevDnL36sZQv9lrP11dbPKGMaXPwMKfQ==", "dev": true, "license": "Apache-2.0", "bin": { - "baseline-browser-mapping": "dist/cli.js" + "baseline-browser-mapping": "dist/cli.cjs" + }, + "engines": { + "node": ">=6.0.0" } }, "node_modules/basic-auth": { @@ -5258,9 +5251,9 @@ } }, "node_modules/cacache/node_modules/lru-cache": { - "version": "11.2.6", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz", - "integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==", + "version": "11.2.7", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.7.tgz", + "integrity": "sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==", "devOptional": true, "license": "BlueOak-1.0.0", "engines": { @@ -5334,9 +5327,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001769", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001769.tgz", - "integrity": "sha512-BCfFL1sHijQlBGWBMuJyhZUhzo7wer5sVj9hqekB/7xn0Ypy+pER/edCYQm4exbXj4WiySGp40P8UuTh6w1srg==", + "version": "1.0.30001779", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001779.tgz", + "integrity": "sha512-U5og2PN7V4DMgF50YPNtnZJGWVLFjjsN3zb6uMT5VGYIewieDj1upwfuVNXf4Kor+89c3iCRJnSzMD5LmTvsfA==", "dev": true, "funding": [ { @@ -5501,56 +5494,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/cli-truncate/node_modules/is-fullwidth-code-point": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", - "integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==", - "devOptional": true, - "license": "MIT", - "dependencies": { - "get-east-asian-width": "^1.3.1" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/cli-truncate/node_modules/slice-ansi": { - "version": "8.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-8.0.0.tgz", - "integrity": "sha512-stxByr12oeeOyY2BlviTNQlYV5xOj47GirPr4yA1hE9JCtxfQN0+tVbkxwCtYDQWhEKWFHsEK48ORg5jrouCAg==", - "devOptional": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^6.2.3", - "is-fullwidth-code-point": "^5.1.0" - }, - "engines": { - "node": ">=20" - }, - "funding": { - "url": "https://github.com/chalk/slice-ansi?sponsor=1" - } - }, - "node_modules/cli-truncate/node_modules/string-width": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-8.2.0.tgz", - "integrity": "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw==", - "devOptional": true, - "license": "MIT", - "dependencies": { - "get-east-asian-width": "^1.5.0", - "strip-ansi": "^7.1.2" - }, - "engines": { - "node": ">=20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/cli-width": { "version": "4.1.0", "resolved": "https://registry.npmjs.org/cli-width/-/cli-width-4.1.0.tgz", @@ -5576,6 +5519,24 @@ "node": ">=20" } }, + "node_modules/cliui/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/cliui/node_modules/wrap-ansi": { "version": "9.0.2", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", @@ -5709,13 +5670,16 @@ } }, "node_modules/copy-anything": { - "version": "2.0.6", - "resolved": "https://registry.npmjs.org/copy-anything/-/copy-anything-2.0.6.tgz", - "integrity": "sha512-1j20GZTsvKNkc4BY3NpMOM8tt///wY3FpIzozTOFO2ffuZcV61nojHXVKIy3WM+7ADCy5FVhdZYHYDdgTU0yJw==", + "version": "3.0.5", + "resolved": "https://registry.npmjs.org/copy-anything/-/copy-anything-3.0.5.tgz", + "integrity": "sha512-yCEafptTtb4bk7GLEQoM8KVJpxAfdBJYaXyzQEgQQQgYrZiDp8SJmGKlYza6CYjEDNstAdNdKA3UuoULlEbS6w==", "dev": true, "license": "MIT", "dependencies": { - "is-what": "^3.14.1" + "is-what": "^4.1.8" + }, + "engines": { + "node": ">=12.13" }, "funding": { "url": "https://github.com/sponsors/mesqueeb" @@ -6092,14 +6056,11 @@ } }, "node_modules/dompurify": { - "version": "3.3.2", - "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.2.tgz", - "integrity": "sha512-6obghkliLdmKa56xdbLOpUZ43pAR6xFy1uOrxBaIDjT+yaRuuybLjGS9eVBoSR/UPU5fq3OXClEHLJNGvbxKpQ==", + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/dompurify/-/dompurify-3.3.3.tgz", + "integrity": "sha512-Oj6pzI2+RqBfFG+qOaOLbFXLQ90ARpcGG6UePL82bJLtdsa6CYJD7nmiU8MW9nQNOtCHV3lZ/Bzq1X0QYbBZCA==", "license": "(MPL-2.0 OR Apache-2.0)", "peer": true, - "engines": { - "node": ">=20" - }, "optionalDependencies": { "@types/trusted-types": "^2.0.7" } @@ -6148,9 +6109,9 @@ "license": "MIT" }, "node_modules/electron-to-chromium": { - "version": "1.5.286", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.286.tgz", - "integrity": "sha512-9tfDXhJ4RKFNerfjdCcZfufu49vg620741MNs26a9+bhLThdB+plgMeou98CAaHu/WATj2iHOOHTp1hWtABj2A==", + "version": "1.5.313", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.5.313.tgz", + "integrity": "sha512-QBMrTWEf00GXZmJyx2lbYD45jpI3TUFnNIzJ5BBc8piGUDwMPa1GV6HJWTZVvY/eiN3fSopl7NRbgGp9sZ9LTA==", "dev": true, "license": "ISC" }, @@ -6484,9 +6445,9 @@ } }, "node_modules/express-rate-limit": { - "version": "8.3.0", - "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-8.3.0.tgz", - "integrity": "sha512-KJzBawY6fB9FiZGdE/0aftepZ91YlaGIrV8vgblRM3J8X+dHx/aiowJWwkx6LIGyuqGiANsjSwwrbb8mifOJ4Q==", + "version": "8.3.1", + "resolved": "https://registry.npmjs.org/express-rate-limit/-/express-rate-limit-8.3.1.tgz", + "integrity": "sha512-D1dKN+cmyPWuvB+G2SREQDzPY1agpBIcTa9sJxOPMCNeH3gwzhqJRDWCXW3gg0y//+LQ/8j52JbMROWyrKdMdw==", "devOptional": true, "license": "MIT", "dependencies": { @@ -6907,18 +6868,18 @@ } }, "node_modules/glob": { - "version": "13.0.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-13.0.3.tgz", - "integrity": "sha512-/g3B0mC+4x724v1TgtBlBtt2hPi/EWptsIAmXUx9Z2rvBYleQcsrmaOzd5LyL50jf/Soi83ZDJmw2+XqvH/EeA==", + "version": "13.0.6", + "resolved": "https://registry.npmjs.org/glob/-/glob-13.0.6.tgz", + "integrity": "sha512-Wjlyrolmm8uDpm/ogGyXZXb1Z+Ca2B8NbJwqBVg0axK9GbBeoS7yGV6vjXnYdGm6X53iehEuxxbyiKp8QmN4Vw==", "devOptional": true, "license": "BlueOak-1.0.0", "dependencies": { - "minimatch": "^10.2.0", - "minipass": "^7.1.2", - "path-scurry": "^2.0.0" + "minimatch": "^10.2.2", + "minipass": "^7.1.3", + "path-scurry": "^2.0.2" }, "engines": { - "node": "20 || >=22" + "node": "18 || 20 || >=22" }, "funding": { "url": "https://github.com/sponsors/isaacs" @@ -6945,29 +6906,26 @@ "license": "BSD-2-Clause" }, "node_modules/glob/node_modules/balanced-match": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.2.tgz", - "integrity": "sha512-x0K50QvKQ97fdEz2kPehIerj+YTeptKF9hyYkKf6egnwmMWAkADiO0QCzSp0R5xN8FTZgYaBfSaue46Ej62nMg==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-4.0.4.tgz", + "integrity": "sha512-BLrgEcRTwX2o6gGxGOCNyMvGSp35YofuYzw9h1IMTRmKqttAZZVU67bdb9Pr2vUHA8+j3i2tJfjO6C6+4myGTA==", "devOptional": true, "license": "MIT", - "dependencies": { - "jackspeak": "^4.2.3" - }, "engines": { - "node": "20 || >=22" + "node": "18 || 20 || >=22" } }, "node_modules/glob/node_modules/brace-expansion": { - "version": "5.0.2", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.2.tgz", - "integrity": "sha512-Pdk8c9poy+YhOgVWw1JNN22/HcivgKWwpxKq04M/jTmHyCZn12WPJebZxdjSa5TmBqISrUSgNYU3eRORljfCCw==", + "version": "5.0.4", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-5.0.4.tgz", + "integrity": "sha512-h+DEnpVvxmfVefa4jFbCf5HdH5YMDXRsmKflpf1pILZWRFlTbJpxeU55nJl4Smt5HQaGzg1o6RHFPJaOqnmBDg==", "devOptional": true, "license": "MIT", "dependencies": { "balanced-match": "^4.0.2" }, "engines": { - "node": "20 || >=22" + "node": "18 || 20 || >=22" } }, "node_modules/glob/node_modules/minimatch": { @@ -7112,9 +7070,9 @@ } }, "node_modules/hono": { - "version": "4.12.5", - "resolved": "https://registry.npmjs.org/hono/-/hono-4.12.5.tgz", - "integrity": "sha512-3qq+FUBtlTHhtYxbxheZgY8NIFnkkC/MR8u5TTsr7YZ3wixryQ3cCwn3iZbg8p8B88iDBBAYSfZDS75t8MN7Vg==", + "version": "4.12.8", + "resolved": "https://registry.npmjs.org/hono/-/hono-4.12.8.tgz", + "integrity": "sha512-VJCEvtrezO1IAR+kqEYnxUOoStaQPGrCmX3j4wDTNOcD1uRPFpGlwQUIW8niPuvHXaTUxeOUl5MMDGrl+tmO9A==", "devOptional": true, "license": "MIT", "peer": true, @@ -7136,9 +7094,9 @@ } }, "node_modules/hosted-git-info/node_modules/lru-cache": { - "version": "11.2.6", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz", - "integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==", + "version": "11.2.7", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.7.tgz", + "integrity": "sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==", "devOptional": true, "license": "BlueOak-1.0.0", "engines": { @@ -7694,13 +7652,16 @@ } }, "node_modules/is-fullwidth-code-point": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", - "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==", - "dev": true, + "version": "5.1.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", + "integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==", + "devOptional": true, "license": "MIT", + "dependencies": { + "get-east-asian-width": "^1.3.1" + }, "engines": { - "node": ">=12" + "node": ">=18" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" @@ -7808,16 +7769,22 @@ } }, "node_modules/is-what": { - "version": "3.14.1", - "resolved": "https://registry.npmjs.org/is-what/-/is-what-3.14.1.tgz", - "integrity": "sha512-sNxgpk9793nzSs7bA6JQJGeIuRBQhAaNGG77kzYQgMkrID+lS6SlK07K5LaptscDlSaIgH+GPFzf+d75FVxozA==", + "version": "4.1.16", + "resolved": "https://registry.npmjs.org/is-what/-/is-what-4.1.16.tgz", + "integrity": "sha512-ZhMwEosbFJkA0YhFnNDgTM4ZxDRsS6HqTo7qsZM08fehyRYIYa0yHu5R6mgo1n/8MgaPBXiPimPD77baVFYg+A==", "dev": true, - "license": "MIT" + "license": "MIT", + "engines": { + "node": ">=12.13" + }, + "funding": { + "url": "https://github.com/sponsors/mesqueeb" + } }, "node_modules/is-wsl": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.0.tgz", - "integrity": "sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==", + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/is-wsl/-/is-wsl-3.1.1.tgz", + "integrity": "sha512-e6rvdUCiQCAuumZslxRJWR/Doq4VpPR82kqclvcS0efgt430SlGIk05vdCN58+VrzgtIcfNODjozVielycD4Sw==", "dev": true, "license": "MIT", "dependencies": { @@ -7870,22 +7837,6 @@ "node": ">=10" } }, - "node_modules/jackspeak": { - "version": "4.2.3", - "resolved": "https://registry.npmjs.org/jackspeak/-/jackspeak-4.2.3.tgz", - "integrity": "sha512-ykkVRwrYvFm1nb2AJfKKYPr0emF6IiXDYUaFx4Zn9ZuIH7MrzEZ3sD5RlqGXNRpHtvUHJyOnCEFxOlNDtGo7wg==", - "devOptional": true, - "license": "BlueOak-1.0.0", - "dependencies": { - "@isaacs/cliui": "^9.0.0" - }, - "engines": { - "node": "20 || >=22" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/jose": { "version": "4.15.9", "resolved": "https://registry.npmjs.org/jose/-/jose-4.15.9.tgz", @@ -7980,23 +7931,21 @@ "license": "MIT" }, "node_modules/less": { - "version": "4.5.1", - "resolved": "https://registry.npmjs.org/less/-/less-4.5.1.tgz", - "integrity": "sha512-UKgI3/KON4u6ngSsnDADsUERqhZknsVZbnuzlRZXLQCmfC/MDld42fTydUE9B+Mla1AL6SJ/Pp6SlEFi/AVGfw==", + "version": "4.6.4", + "resolved": "https://registry.npmjs.org/less/-/less-4.6.4.tgz", + "integrity": "sha512-OJmO5+HxZLLw0RLzkqaNHzcgEAQG7C0y3aMbwtCzIUFZsLMNNq/1IdAdHEycQ58CwUO3jPTHmoN+tE5I7FQxNg==", "dev": true, - "hasInstallScript": true, "license": "Apache-2.0", "peer": true, "dependencies": { - "copy-anything": "^2.0.1", - "parse-node-version": "^1.0.1", - "tslib": "^2.3.0" + "copy-anything": "^3.0.5", + "parse-node-version": "^1.0.1" }, "bin": { "lessc": "bin/lessc" }, "engines": { - "node": ">=14" + "node": ">=18" }, "optionalDependencies": { "errno": "^0.1.1", @@ -8034,9 +7983,9 @@ } }, "node_modules/libphonenumber-js": { - "version": "1.12.38", - "resolved": "https://registry.npmjs.org/libphonenumber-js/-/libphonenumber-js-1.12.38.tgz", - "integrity": "sha512-vwzxmasAy9hZigxtqTbFEwp8ZdZ975TiqVDwj5bKx5sR+zi5ucUQy9mbVTkKM9GzqdLdxux/hTw2nmN5J7POMA==", + "version": "1.12.40", + "resolved": "https://registry.npmjs.org/libphonenumber-js/-/libphonenumber-js-1.12.40.tgz", + "integrity": "sha512-HKGs7GowShNls3Zh+7DTr6wYpPk5jC78l508yQQY3e8ZgJChM3A9JZghmMJZuK+5bogSfuTafpjksGSR3aMIEg==", "license": "MIT", "peer": true }, @@ -8189,6 +8138,19 @@ "dev": true, "license": "MIT" }, + "node_modules/lint-staged/node_modules/is-fullwidth-code-point": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-4.0.0.tgz", + "integrity": "sha512-O4L094N2/dZ7xqVdrXhh9r1KODPJpFms8B5sGdJLPy664AgvXsreZUyCQQNItZRDlYug4xStLjNp/sz3HvBowQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/lint-staged/node_modules/listr2": { "version": "6.6.1", "resolved": "https://registry.npmjs.org/listr2/-/listr2-6.6.1.tgz", @@ -8292,6 +8254,23 @@ "dev": true, "license": "ISC" }, + "node_modules/lint-staged/node_modules/slice-ansi": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-5.0.0.tgz", + "integrity": "sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.0.0", + "is-fullwidth-code-point": "^4.0.0" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/slice-ansi?sponsor=1" + } + }, "node_modules/lint-staged/node_modules/string-width": { "version": "5.1.2", "resolved": "https://registry.npmjs.org/string-width/-/string-width-5.1.2.tgz", @@ -8364,6 +8343,24 @@ "devOptional": true, "license": "MIT" }, + "node_modules/listr2/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/listr2/node_modules/wrap-ansi": { "version": "9.0.2", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", @@ -8469,22 +8466,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/log-update/node_modules/is-fullwidth-code-point": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/is-fullwidth-code-point/-/is-fullwidth-code-point-5.1.0.tgz", - "integrity": "sha512-5XHYaSyiqADb4RnZ1Bdad6cPp8Toise4TzEjcOYDHZkTCbKgiUl7WTUCpNWHuxmDt91wnsZBc9xinNzopv3JMQ==", - "devOptional": true, - "license": "MIT", - "dependencies": { - "get-east-asian-width": "^1.3.1" - }, - "engines": { - "node": ">=18" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/log-update/node_modules/slice-ansi": { "version": "7.1.2", "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-7.1.2.tgz", @@ -8502,6 +8483,24 @@ "url": "https://github.com/chalk/slice-ansi?sponsor=1" } }, + "node_modules/log-update/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/log-update/node_modules/wrap-ansi": { "version": "9.0.2", "resolved": "https://registry.npmjs.org/wrap-ansi/-/wrap-ansi-9.0.2.tgz", @@ -8803,11 +8802,11 @@ } }, "node_modules/minipass": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.2.tgz", - "integrity": "sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==", + "version": "7.1.3", + "resolved": "https://registry.npmjs.org/minipass/-/minipass-7.1.3.tgz", + "integrity": "sha512-tEBHqDnIoM/1rXME1zgka9g6Q2lcoCkxHLuc7ODJ5BxbP5d4c2Z5cGgtXAku59200Cx7diuHTOYfSBD8n6mm8A==", "devOptional": true, - "license": "ISC", + "license": "BlueOak-1.0.0", "engines": { "node": ">=16 || 14 >=14.17" } @@ -8975,9 +8974,9 @@ "license": "MIT" }, "node_modules/msgpackr": { - "version": "1.11.8", - "resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.11.8.tgz", - "integrity": "sha512-bC4UGzHhVvgDNS7kn9tV8fAucIYUBuGojcaLiz7v+P63Lmtm0Xeji8B/8tYKddALXxJLpwIeBmUN3u64C4YkRA==", + "version": "1.11.9", + "resolved": "https://registry.npmjs.org/msgpackr/-/msgpackr-1.11.9.tgz", + "integrity": "sha512-FkoAAyyA6HM8wL882EcEyFZ9s7hVADSwG9xrVx3dxxNQAtgADTrJoEWivID82Iv1zWDsv/OtbrrcZAzGzOMdNw==", "dev": true, "license": "MIT", "optional": true, @@ -9091,9 +9090,9 @@ } }, "node_modules/needle": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/needle/-/needle-3.3.1.tgz", - "integrity": "sha512-6k0YULvhpw+RoLNiQCRKOl09Rv1dPLr8hHnVjHqdolKwDrdNyk+Hmrthi4lIGPPz3r39dLx0hsF5s40sZ3Us4Q==", + "version": "3.5.0", + "resolved": "https://registry.npmjs.org/needle/-/needle-3.5.0.tgz", + "integrity": "sha512-jaQyPKKk2YokHrEg+vFDYxXIHTCBgiZwSHOoVx/8V3GIBS8/VN6NdVRmg8q1ERtPkMvmOvebsgga4sAj5hls/w==", "dev": true, "license": "MIT", "optional": true, @@ -9427,9 +9426,9 @@ } }, "node_modules/node-releases": { - "version": "2.0.27", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.27.tgz", - "integrity": "sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==", + "version": "2.0.36", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-2.0.36.tgz", + "integrity": "sha512-TdC8FSgHz8Mwtw9g5L4gR/Sh9XhSP/0DEkQxfEFXOpiul5IiHgHan2VhYYb6agDSfp4KuvltmGApc8HMgUrIkA==", "dev": true, "license": "MIT" }, @@ -9725,22 +9724,6 @@ "url": "https://github.com/sponsors/sindresorhus" } }, - "node_modules/ora/node_modules/string-width": { - "version": "8.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-8.2.0.tgz", - "integrity": "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw==", - "license": "MIT", - "dependencies": { - "get-east-asian-width": "^1.5.0", - "strip-ansi": "^7.1.2" - }, - "engines": { - "node": ">=20" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, "node_modules/ordered-binary": { "version": "1.6.1", "resolved": "https://registry.npmjs.org/ordered-binary/-/ordered-binary-1.6.1.tgz", @@ -10017,9 +10000,9 @@ "license": "MIT" }, "node_modules/path-scurry": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.1.tgz", - "integrity": "sha512-oWyT4gICAu+kaA7QWk/jvCHWarMKNs6pXOGWKDTr7cw4IGcUbW+PeTfbaQiLGheFRpjo6O9J0PmyMfQPjH71oA==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/path-scurry/-/path-scurry-2.0.2.tgz", + "integrity": "sha512-3O/iVVsJAPsOnpwWIeD+d6z/7PmqApyQePUtCndjatj/9I5LylHvt5qluFaBT3I5h3r1ejfR056c+FCv+NnNXg==", "devOptional": true, "license": "BlueOak-1.0.0", "dependencies": { @@ -10027,16 +10010,16 @@ "minipass": "^7.1.2" }, "engines": { - "node": "20 || >=22" + "node": "18 || 20 || >=22" }, "funding": { "url": "https://github.com/sponsors/isaacs" } }, "node_modules/path-scurry/node_modules/lru-cache": { - "version": "11.2.6", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.6.tgz", - "integrity": "sha512-ESL2CrkS/2wTPfuend7Zhkzo2u0daGJ/A2VucJOgQ/C48S/zB8MMeMHSGKYpXhIjbPxfuezITkaBH1wqv00DDQ==", + "version": "11.2.7", + "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-11.2.7.tgz", + "integrity": "sha512-aY/R+aEsRelme17KGQa/1ZSIpLpNYYrhcrepKTZgE+W3WM16YMCaPwOHLHsmopZHELU0Ojin1lPVxKR0MihncA==", "devOptional": true, "license": "BlueOak-1.0.0", "engines": { @@ -10223,9 +10206,9 @@ } }, "node_modules/postcss": { - "version": "8.5.6", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.6.tgz", - "integrity": "sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==", + "version": "8.5.8", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.5.8.tgz", + "integrity": "sha512-OW/rX8O/jXnm82Ey1k44pObPtdblfiuWnrd8X7GJ7emImCOstunGbXUpp7HdBrFQX6rJzn3sPT397Wp5aCwCHg==", "dev": true, "funding": [ { @@ -10545,9 +10528,9 @@ } }, "node_modules/qs": { - "version": "6.14.2", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.2.tgz", - "integrity": "sha512-V/yCWTTF7VJ9hIh18Ugr2zhJMP01MY7c5kh4J870L7imm6/DIzBsNLTXzMwUA3yZ5b/KBqLx8Kp3uRvd7xSe3Q==", + "version": "6.15.0", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.15.0.tgz", + "integrity": "sha512-mAZTtNCeetKMH+pSjrb76NAM8V9a05I9aBZOHztWy/UqcJdQYNsf59vrRKWnojAT9Y+GbIvoTBC++CPHqpDBhQ==", "devOptional": true, "license": "BSD-3-Clause", "dependencies": { @@ -10675,13 +10658,13 @@ "license": "MIT" }, "node_modules/resolve": { - "version": "1.22.10", - "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.10.tgz", - "integrity": "sha512-NPRy+/ncIMeDlTAsuqwKIiferiawhefFJtkNSW0qZJEqMEb+qBt/77B/jGeeek+F0uOeN05CDa6HXbbIgtVX4w==", + "version": "1.22.11", + "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.22.11.tgz", + "integrity": "sha512-RfqAvLnMl313r7c9oclB1HhUEAezcpLjz95wFH4LVuhk9JF/r22qmVP9AMmOU4vMX7Q8pN8jwNg/CSpdFnMjTQ==", "dev": true, "license": "MIT", "dependencies": { - "is-core-module": "^2.16.0", + "is-core-module": "^2.16.1", "path-parse": "^1.0.7", "supports-preserve-symlinks-flag": "^1.0.0" }, @@ -10765,13 +10748,13 @@ "license": "MIT" }, "node_modules/rimraf": { - "version": "6.1.2", - "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-6.1.2.tgz", - "integrity": "sha512-cFCkPslJv7BAXJsYlK1dZsbP8/ZNLkCAQ0bi1hf5EKX2QHegmDFEFA6QhuYJlk7UDdc+02JjO80YSOrWPpw06g==", + "version": "6.1.3", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-6.1.3.tgz", + "integrity": "sha512-LKg+Cr2ZF61fkcaK1UdkH2yEBBKnYjTyWzTJT6KNPcSPaiT7HSdhtMXQuN5wkTX0Xu72KQ1l8S42rlmexS2hSA==", "dev": true, "license": "BlueOak-1.0.0", "dependencies": { - "glob": "^13.0.0", + "glob": "^13.0.3", "package-json-from-dist": "^1.0.1" }, "bin": { @@ -10876,12 +10859,15 @@ } }, "node_modules/rollup-plugin-dts": { - "version": "6.3.0", - "resolved": "https://registry.npmjs.org/rollup-plugin-dts/-/rollup-plugin-dts-6.3.0.tgz", - "integrity": "sha512-d0UrqxYd8KyZ6i3M2Nx7WOMy708qsV/7fTHMHxCMCBOAe3V/U7OMPu5GkX8hC+cmkHhzGnfeYongl1IgiooddA==", + "version": "6.4.0", + "resolved": "https://registry.npmjs.org/rollup-plugin-dts/-/rollup-plugin-dts-6.4.0.tgz", + "integrity": "sha512-2i00A5UoPCoDecLEs13Eu105QegSGfrbp1sDeUj/54LKGmv6XFHDxWKC6Wsb4BobGUWYVCWWjmjAc8bXXbXH/Q==", "dev": true, "license": "LGPL-3.0-only", "dependencies": { + "@jridgewell/remapping": "^2.3.5", + "@jridgewell/sourcemap-codec": "^1.5.5", + "convert-source-map": "^2.0.0", "magic-string": "^0.30.21" }, "engines": { @@ -10891,13 +10877,20 @@ "url": "https://github.com/sponsors/Swatinem" }, "optionalDependencies": { - "@babel/code-frame": "^7.27.1" + "@babel/code-frame": "^7.29.0" }, "peerDependencies": { "rollup": "^3.29.4 || ^4", - "typescript": "^4.5 || ^5.0" + "typescript": "^4.5 || ^5.0 || ^6.0" } }, + "node_modules/rollup-plugin-dts/node_modules/convert-source-map": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/convert-source-map/-/convert-source-map-2.0.0.tgz", + "integrity": "sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==", + "dev": true, + "license": "MIT" + }, "node_modules/rollup/node_modules/@types/estree": { "version": "1.0.8", "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.8.tgz", @@ -11352,17 +11345,17 @@ } }, "node_modules/slice-ansi": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-5.0.0.tgz", - "integrity": "sha512-FC+lgizVPfie0kkhqUScwRu1O/lF6NOgJmlCgK+/LYxDCTk8sGelYaHDhFcDN+Sn3Cv+3VSa4Byeo+IMCzpMgQ==", - "dev": true, + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/slice-ansi/-/slice-ansi-8.0.0.tgz", + "integrity": "sha512-stxByr12oeeOyY2BlviTNQlYV5xOj47GirPr4yA1hE9JCtxfQN0+tVbkxwCtYDQWhEKWFHsEK48ORg5jrouCAg==", + "devOptional": true, "license": "MIT", "dependencies": { - "ansi-styles": "^6.0.0", - "is-fullwidth-code-point": "^4.0.0" + "ansi-styles": "^6.2.3", + "is-fullwidth-code-point": "^5.1.0" }, "engines": { - "node": ">=12" + "node": ">=20" }, "funding": { "url": "https://github.com/chalk/slice-ansi?sponsor=1" @@ -11536,30 +11529,28 @@ } }, "node_modules/string-width": { - "version": "7.2.0", - "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", - "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", - "devOptional": true, + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-8.2.0.tgz", + "integrity": "sha512-6hJPQ8N0V0P3SNmP6h2J99RLuzrWz2gvT7VnK5tKvrNqJoyS9W4/Fb8mo31UiPvy00z7DQXkP2hnKBVav76thw==", "license": "MIT", "dependencies": { - "emoji-regex": "^10.3.0", - "get-east-asian-width": "^1.0.0", - "strip-ansi": "^7.1.0" + "get-east-asian-width": "^1.5.0", + "strip-ansi": "^7.1.2" }, "engines": { - "node": ">=18" + "node": ">=20" }, "funding": { "url": "https://github.com/sponsors/sindresorhus" } }, "node_modules/strip-ansi": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.1.2.tgz", - "integrity": "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA==", + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-7.2.0.tgz", + "integrity": "sha512-yDPMNjp4WyfYBkHnjIRLfca1i6KMyGCtsVgoKe/z1+6vukgaENdgGBZt+ZmKPc4gavvEZ5OgHfHdrazhgNyG7w==", "license": "MIT", "dependencies": { - "ansi-regex": "^6.0.1" + "ansi-regex": "^6.2.2" }, "engines": { "node": ">=12" @@ -11608,9 +11599,9 @@ } }, "node_modules/tar": { - "version": "7.5.10", - "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.10.tgz", - "integrity": "sha512-8mOPs1//5q/rlkNSPcCegA6hiHJYDmSLEI8aMH/CdSQJNWztHC9WHNam5zdQlfpTwB9Xp7IBEsHfV5LKMJGVAw==", + "version": "7.5.11", + "resolved": "https://registry.npmjs.org/tar/-/tar-7.5.11.tgz", + "integrity": "sha512-ChjMH33/KetonMTAtpYdgUFr0tbz69Fp2v7zWxQfYZX4g5ZN2nOBXm1R2xyA+lMIKrLKIoKAwFj93jE/avX9cQ==", "devOptional": true, "license": "BlueOak-1.0.0", "dependencies": { @@ -11652,21 +11643,21 @@ } }, "node_modules/tldts": { - "version": "7.0.23", - "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.23.tgz", - "integrity": "sha512-ASdhgQIBSay0R/eXggAkQ53G4nTJqTXqC2kbaBbdDwM7SkjyZyO0OaaN1/FH7U/yCeqOHDwFO5j8+Os/IS1dXw==", + "version": "7.0.25", + "resolved": "https://registry.npmjs.org/tldts/-/tldts-7.0.25.tgz", + "integrity": "sha512-keinCnPbwXEUG3ilrWQZU+CqcTTzHq9m2HhoUP2l7Xmi8l1LuijAXLpAJ5zRW+ifKTNscs4NdCkfkDCBYm352w==", "license": "MIT", "dependencies": { - "tldts-core": "^7.0.23" + "tldts-core": "^7.0.25" }, "bin": { "tldts": "bin/cli.js" } }, "node_modules/tldts-core": { - "version": "7.0.23", - "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.23.tgz", - "integrity": "sha512-0g9vrtDQLrNIiCj22HSe9d4mLVG3g5ph5DZ8zCKBr4OtrspmNB6ss7hVyzArAeE88ceZocIEGkyW1Ime7fxPtQ==", + "version": "7.0.25", + "resolved": "https://registry.npmjs.org/tldts-core/-/tldts-core-7.0.25.tgz", + "integrity": "sha512-ZjCZK0rppSBu7rjHYDYsEaMOIbbT+nWF57hKkv4IUmZWBNrBWBOjIElc0mKRgLM8bm7x/BBlof6t2gi/Oq/Asw==", "license": "MIT" }, "node_modules/to-buffer": { @@ -12563,6 +12554,24 @@ "node": "^20.19.0 || ^22.12.0 || >=23" } }, + "node_modules/yargs/node_modules/string-width": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/string-width/-/string-width-7.2.0.tgz", + "integrity": "sha512-tsaTIkKW9b4N+AEj+SVA+WhJzV7/zMhcSu78mLKWSk7cXMOSHsBKFWUs0fWwq8QyK3MgJBQRX6Gbi4kYbdvGkQ==", + "devOptional": true, + "license": "MIT", + "dependencies": { + "emoji-regex": "^10.3.0", + "get-east-asian-width": "^1.0.0", + "strip-ansi": "^7.1.0" + }, + "engines": { + "node": ">=18" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, "node_modules/yn": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", diff --git a/web/package.json b/web/package.json index a4f9635ca..8aa0e0799 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "startos-ui", - "version": "0.4.0-alpha.20", + "version": "0.4.0-alpha.21", "author": "Start9 Labs, Inc", "homepage": "https://start9.com/", "license": "MIT", diff --git a/web/projects/setup-wizard/src/app/app.component.ts b/web/projects/setup-wizard/src/app/app.component.ts index 7a9a3ecee..839a68e74 100644 --- a/web/projects/setup-wizard/src/app/app.component.ts +++ b/web/projects/setup-wizard/src/app/app.component.ts @@ -42,6 +42,7 @@ export class AppComponent implements OnInit { this.stateService.dataDriveGuid = status.guid } this.stateService.attach = status.attach + this.stateService.mokEnrolled = status.mokEnrolled await this.router.navigate(['/language']) break diff --git a/web/projects/setup-wizard/src/app/components/documentation.component.ts b/web/projects/setup-wizard/src/app/components/documentation.component.ts index 07d1b4a12..f8ccc25f4 100644 --- a/web/projects/setup-wizard/src/app/components/documentation.component.ts +++ b/web/projects/setup-wizard/src/app/components/documentation.component.ts @@ -46,7 +46,7 @@ import { DocsLinkDirective } from '@start9labs/shared' Download your server's Root CA and follow instructions diff --git a/web/projects/setup-wizard/src/app/components/mok-enrollment.dialog.ts b/web/projects/setup-wizard/src/app/components/mok-enrollment.dialog.ts new file mode 100644 index 000000000..2bb3d1ccc --- /dev/null +++ b/web/projects/setup-wizard/src/app/components/mok-enrollment.dialog.ts @@ -0,0 +1,204 @@ +import { Component, inject } from '@angular/core' +import { i18nPipe } from '@start9labs/shared' +import { TuiButton, TuiDialogContext, TuiIcon } from '@taiga-ui/core' +import { injectContext } from '@taiga-ui/polymorpheus' +import { StateService } from '../services/state.service' + +@Component({ + standalone: true, + imports: [TuiButton, TuiIcon, i18nPipe], + template: ` + @if (!stateService.kiosk) { +
+
+
+
+
+
+
+
+
+

+ {{ + 'Connect a monitor and keyboard to your server before rebooting.' + | i18n + }} +

+ } @else { +
+ +
+

+ {{ 'Keep your monitor connected for the next reboot.' | i18n }} +

+ } + +
+

+ {{ + 'Your system has Secure Boot enabled, which requires all kernel modules to be signed with a trusted key. Some hardware drivers — such as those for NVIDIA GPUs — are not signed by the default distribution key. Enrolling the StartOS signing key allows your firmware to trust these modules so your hardware can be fully utilized.' + | i18n + }} +

+

+ {{ + 'On the next boot, a blue screen (MokManager) will appear. You will have 10 seconds to select "Enroll MOK" before it dismisses.' + | i18n + }} +

+

+ {{ + 'If you miss the window, simply reboot to try again. The blue screen will appear on every boot until the key is enrolled.' + | i18n + }} +

+

+ {{ 'After clicking "Enroll MOK":' | i18n }} +

+
    +
  1. Click "Continue"
  2. +
  3. + {{ 'When prompted, enter your StartOS password' | i18n }} +
  4. +
  5. Click "Reboot"
  6. +
+
+ +
+ +
+ `, + styles: ` + :host { + display: flex; + flex-direction: column; + align-items: center; + text-align: center; + } + + .icon-container { + margin-bottom: 1rem; + } + + .monitor-icon { + width: 3rem; + height: 3rem; + color: var(--tui-status-info); + } + + .animation-container { + position: relative; + width: 160px; + height: 69px; + } + + .port { + position: absolute; + left: 20px; + top: 50%; + transform: translateY(-50%); + width: 28px; + height: 18px; + background: var(--tui-background-neutral-1); + border: 2px solid var(--tui-border-normal); + border-radius: 2px; + } + + .port-inner { + position: absolute; + top: 3px; + left: 3px; + right: 3px; + bottom: 3px; + background: var(--tui-background-neutral-2); + border-radius: 1px; + } + + .cable { + position: absolute; + top: 50%; + transform: translateY(-50%); + display: flex; + align-items: center; + animation: slide-in 2s ease-in-out 0.5s infinite; + left: 130px; + } + + .cable-connector { + width: 18px; + height: 12px; + background: var(--tui-text-secondary); + border-radius: 1px; + } + + .cable-body { + width: 50px; + height: 6px; + background: var(--tui-text-tertiary); + border-radius: 0 3px 3px 0; + } + + @keyframes slide-in { + 0% { + left: 130px; + opacity: 0; + } + 5% { + left: 130px; + opacity: 1; + } + 60% { + left: 32px; + opacity: 1; + } + 80% { + left: 32px; + opacity: 1; + } + 100% { + left: 32px; + opacity: 0; + } + } + + .mok-info { + text-align: left; + margin-top: 0.5rem; + + p { + margin: 0 0 0.75rem; + color: var(--tui-text-secondary); + } + + .steps-label { + margin-bottom: 0.25rem; + font-weight: 500; + color: var(--tui-text-primary); + } + + ol { + margin: 0 0 1rem; + padding-left: 1.5rem; + + li { + margin-bottom: 0.25rem; + } + } + } + + p { + margin: 0 0 1rem; + } + + footer { + display: flex; + justify-content: center; + } + `, +}) +export class MokEnrollmentDialog { + protected readonly context = injectContext>() + readonly stateService = inject(StateService) +} diff --git a/web/projects/setup-wizard/src/app/components/preserve-overwrite.dialog.ts b/web/projects/setup-wizard/src/app/components/preserve-overwrite.dialog.ts index 0cd4dace2..180120b30 100644 --- a/web/projects/setup-wizard/src/app/components/preserve-overwrite.dialog.ts +++ b/web/projects/setup-wizard/src/app/components/preserve-overwrite.dialog.ts @@ -1,12 +1,30 @@ import { Component } from '@angular/core' +import { FormsModule } from '@angular/forms' import { i18nPipe } from '@start9labs/shared' -import { TuiButton, TuiTitle } from '@taiga-ui/core' -import { TuiDialogContext } from '@taiga-ui/core' +import { + TuiButton, + TuiCheckbox, + TuiDialogContext, + TuiNotification, + TuiTitle, +} from '@taiga-ui/core' import { TuiHeader } from '@taiga-ui/layout' import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus' +export interface PreserveOverwriteData { + isExt4: boolean +} + @Component({ - imports: [TuiButton, TuiHeader, TuiTitle, i18nPipe], + imports: [ + FormsModule, + TuiButton, + TuiCheckbox, + TuiHeader, + TuiNotification, + TuiTitle, + i18nPipe, + ], template: `
@@ -24,6 +42,18 @@ import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus' {{ 'to discard' | i18n }} + @if (context.data.isExt4) { +

+ {{ + 'This drive uses ext4 and will be automatically converted to btrfs. A backup is strongly recommended before proceeding.' + | i18n + }} +

+ + }
@@ -24,7 +24,7 @@ import { StateService } from '../services/state.service' @if (!isFresh) {
`, @@ -147,7 +147,7 @@ export default class PasswordPage { Validators.maxLength(64), ]), confirm: new FormControl(''), - name: new FormControl('', [Validators.required]), + name: new FormControl('', this.isFresh ? [Validators.required] : []), }) readonly validator = (value: string) => (control: AbstractControl) => diff --git a/web/projects/setup-wizard/src/app/pages/success.page.ts b/web/projects/setup-wizard/src/app/pages/success.page.ts index 010c8bb05..f21d5f4ba 100644 --- a/web/projects/setup-wizard/src/app/pages/success.page.ts +++ b/web/projects/setup-wizard/src/app/pages/success.page.ts @@ -1,10 +1,10 @@ import { AfterViewInit, Component, + DOCUMENT, ElementRef, inject, ViewChild, - DOCUMENT, } from '@angular/core' import { DialogService, @@ -12,36 +12,37 @@ import { ErrorService, i18nPipe, } from '@start9labs/shared' -import { TuiIcon, TuiLoader, TuiTitle, TuiCell } from '@taiga-ui/core' +import { T } from '@start9labs/start-sdk' +import { TuiCell, TuiIcon, TuiLoader, TuiTitle } from '@taiga-ui/core' import { TuiAvatar } from '@taiga-ui/kit' import { TuiCardLarge, TuiHeader } from '@taiga-ui/layout' -import { ApiService } from '../services/api.service' -import { StateService } from '../services/state.service' +import { PolymorpheusComponent } from '@taiga-ui/polymorpheus' import { DocumentationComponent } from '../components/documentation.component' import { MatrixComponent } from '../components/matrix.component' +import { MokEnrollmentDialog } from '../components/mok-enrollment.dialog' import { RemoveMediaDialog } from '../components/remove-media.dialog' -import { T } from '@start9labs/start-sdk' -import { PolymorpheusComponent } from '@taiga-ui/polymorpheus' +import { ApiService } from '../services/api.service' +import { StateService } from '../services/state.service' @Component({ template: `
-

- +
+

{{ 'Setup Complete!' | i18n }} - - @if (!stateService.kiosk) { - - {{ - 'http://start.local was for setup only. It will no longer work.' - | i18n - }} - - } -

+

+
+ @if (!stateService.kiosk) { +

+ {{ + 'http://start.local was for setup only. It will no longer work.' + | i18n + }} +

+ }
@if (!result) { @@ -49,57 +50,85 @@ import { PolymorpheusComponent } from '@taiga-ui/polymorpheus' } @else { @if (!stateService.kiosk) { - } - + @if (result.needsRestart) { + + @if (stateService.mokEnrolled) { + + } + + } @else if (stateService.kiosk) { + } @@ -126,42 +165,18 @@ import { PolymorpheusComponent } from '@taiga-ui/polymorpheus' (click)="openLocalAddress()" > -
- {{ 'Open Local Address' | i18n }} -
{{ lanAddress }}
-
+ + {{ 'Open Local Address' | i18n }} + {{ lanAddress }} +
- `, styles: ` :host { @@ -82,12 +67,6 @@ import { UpdateService } from 'src/app/services/update.service' } } - button { - width: 100%; - border-radius: 0; - justify-content: flex-start; - } - :host-context(tui-root._mobile) { position: absolute; top: 3.5rem; @@ -109,12 +88,7 @@ import { UpdateService } from 'src/app/services/update.service' }, }) export class Nav { - private readonly service = inject(AuthService) - private readonly router = inject(Router) protected readonly sidebars = inject(SidebarService) - protected readonly api = inject(ApiService) - private readonly loader = inject(TuiNotificationMiddleService) - private readonly errorService = inject(ErrorService) protected readonly update = inject(UpdateService) protected readonly routes = [ @@ -134,18 +108,4 @@ export class Nav { link: 'port-forwards', }, ] as const - - protected async logout() { - const loader = this.loader.open('').subscribe() - try { - await this.api.logout() - this.service.authenticated.set(false) - this.router.navigate(['.']) - } catch (e: any) { - console.error(e) - this.errorService.handleError(e) - } finally { - loader.unsubscribe() - } - } } diff --git a/web/projects/start-tunnel/src/app/routes/home/components/placeholder.ts b/web/projects/start-tunnel/src/app/routes/home/components/placeholder.ts new file mode 100644 index 000000000..ee8326c6a --- /dev/null +++ b/web/projects/start-tunnel/src/app/routes/home/components/placeholder.ts @@ -0,0 +1,35 @@ +import { ChangeDetectionStrategy, Component, input } from '@angular/core' +import { TuiIcon } from '@taiga-ui/core' + +@Component({ + selector: 'app-placeholder', + template: ` + @if (icon(); as icon) { + + } + + `, + styles: ` + :host { + display: flex; + flex: 1; + flex-direction: column; + gap: 0.5rem; + align-items: center; + justify-content: center; + text-align: center; + padding: 1rem; + font: var(--tui-typography-body-l); + color: var(--tui-text-tertiary); + + tui-icon { + font-size: 2.5rem; + } + } + `, + changeDetection: ChangeDetectionStrategy.OnPush, + imports: [TuiIcon], +}) +export class PlaceholderComponent { + readonly icon = input() +} diff --git a/web/projects/start-tunnel/src/app/routes/home/routes/devices/add.ts b/web/projects/start-tunnel/src/app/routes/home/routes/devices/add.ts index 4ae171590..790ea6f9c 100644 --- a/web/projects/start-tunnel/src/app/routes/home/routes/devices/add.ts +++ b/web/projects/start-tunnel/src/app/routes/home/routes/devices/add.ts @@ -4,9 +4,8 @@ import { ReactiveFormsModule, Validators, } from '@angular/forms' -import { ErrorService } from '@start9labs/shared' -import { utils } from '@start9labs/start-sdk' import { WA_IS_MOBILE } from '@ng-web-apis/platform' +import { ErrorService } from '@start9labs/shared' import { TuiResponsiveDialogService } from '@taiga-ui/addon-mobile' import { TuiAnimated, @@ -20,7 +19,7 @@ import { TuiNotificationMiddleService, TuiSelect, } from '@taiga-ui/kit' -import { TuiForm, TuiElasticContainer } from '@taiga-ui/layout' +import { TuiElasticContainer, TuiForm } from '@taiga-ui/layout' import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus' import { ApiService } from 'src/app/services/api/api.service' @@ -107,15 +106,23 @@ export class DevicesAdd { protected readonly context = injectContext>() + private readonly autoSubnet = + !this.context.data.device && this.context.data.subnets().length === 1 + ? this.context.data.subnets().at(0) + : undefined + protected readonly form = inject(NonNullableFormBuilder).group({ name: [this.context.data.device?.name || '', Validators.required], subnet: [ - this.context.data.device?.subnet, + this.context.data.device?.subnet ?? this.autoSubnet, [Validators.required, subnetValidator], ], ip: [ - this.context.data.device?.ip || '', - [Validators.required, Validators.pattern(utils.Patterns.ipv4.regex)], + this.context.data.device?.ip || + (this.autoSubnet ? getIp(this.autoSubnet) : ''), + this.autoSubnet + ? [Validators.required, ipInSubnetValidator(this.autoSubnet.range)] + : [], ], }) @@ -161,7 +168,9 @@ export class DevicesAdd { ip, }) - this.dialogs.open(DEVICES_CONFIG, { data: config }).subscribe() + this.dialogs + .open(DEVICES_CONFIG, { data: config, closable: false }) + .subscribe() } } catch (e: any) { console.error(e) diff --git a/web/projects/start-tunnel/src/app/routes/home/routes/devices/index.ts b/web/projects/start-tunnel/src/app/routes/home/routes/devices/index.ts index 6fbdb58c8..6867d00f7 100644 --- a/web/projects/start-tunnel/src/app/routes/home/routes/devices/index.ts +++ b/web/projects/start-tunnel/src/app/routes/home/routes/devices/index.ts @@ -3,24 +3,28 @@ import { Component, computed, inject, - Signal, } from '@angular/core' import { toSignal } from '@angular/core/rxjs-interop' import { ErrorService } from '@start9labs/shared' import { TuiResponsiveDialogService } from '@taiga-ui/addon-mobile' import { TuiButton, TuiDataList, TuiDropdown } from '@taiga-ui/core' -import { TUI_CONFIRM, TuiNotificationMiddleService } from '@taiga-ui/kit' +import { + TUI_CONFIRM, + TuiNotificationMiddleService, + TuiSkeleton, +} from '@taiga-ui/kit' import { PatchDB } from 'patch-db-client' import { filter, map } from 'rxjs' +import { PlaceholderComponent } from 'src/app/routes/home/components/placeholder' import { ApiService } from 'src/app/services/api/api.service' import { TunnelData } from 'src/app/services/patch-db/data-model' import { DEVICES_ADD } from './add' import { DEVICES_CONFIG } from './config' -import { MappedDevice, MappedSubnet } from './utils' +import { MappedDevice } from './utils' @Component({ template: ` - +
@@ -49,7 +53,11 @@ import { MappedDevice, MappedSubnet } from './utils' iconStart="@tui.ellipsis-vertical" > Actions - + } @empty { -
No devices
+ + + }
Name
+ No devices +
`, changeDetection: ChangeDetectionStrategy.OnPush, - imports: [TuiButton, TuiDropdown, TuiDataList], + imports: [ + TuiButton, + TuiDropdown, + TuiDataList, + PlaceholderComponent, + TuiSkeleton, + ], }) export default class Devices { private readonly dialogs = inject(TuiResponsiveDialogService) @@ -90,7 +108,7 @@ export default class Devices { private readonly loading = inject(TuiNotificationMiddleService) private readonly errorService = inject(ErrorService) - protected readonly subnets: Signal = toSignal( + protected readonly subnets = toSignal( inject>(PatchDB) .watch$('wg', 'subnets') .pipe( @@ -102,11 +120,11 @@ export default class Devices { })), ), ), - { initialValue: [] }, + { initialValue: null }, ) protected readonly devices = computed(() => - this.subnets().flatMap(subnet => + this.subnets()?.flatMap(subnet => Object.entries(subnet.clients).map(([ip, { name }]) => ({ subnet: { name: subnet.name, @@ -141,7 +159,7 @@ export default class Devices { try { const data = await this.api.showDeviceConfig({ subnet: subnet.range, ip }) - this.dialogs.open(DEVICES_CONFIG, { data }).subscribe() + this.dialogs.open(DEVICES_CONFIG, { data, closable: false }).subscribe() } catch (e: any) { console.log(e) this.errorService.handleError(e) diff --git a/web/projects/start-tunnel/src/app/routes/home/routes/devices/utils.ts b/web/projects/start-tunnel/src/app/routes/home/routes/devices/utils.ts index 8d3bf45cd..4ef719191 100644 --- a/web/projects/start-tunnel/src/app/routes/home/routes/devices/utils.ts +++ b/web/projects/start-tunnel/src/app/routes/home/routes/devices/utils.ts @@ -1,8 +1,7 @@ import { Signal } from '@angular/core' import { AbstractControl } from '@angular/forms' -import { utils } from '@start9labs/start-sdk' +import { T, utils } from '@start9labs/start-sdk' import { IpNet } from '@start9labs/start-sdk/util' -import { WgServer } from 'src/app/services/patch-db/data-model' export interface MappedDevice { readonly subnet: { @@ -16,7 +15,7 @@ export interface MappedDevice { export interface MappedSubnet { readonly range: string readonly name: string - readonly clients: WgServer['subnets']['']['clients'] + readonly clients: T.Tunnel.WgSubnetClients } export interface DeviceData { diff --git a/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/add.ts b/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/add.ts index 6e8125ae8..21d203947 100644 --- a/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/add.ts +++ b/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/add.ts @@ -4,18 +4,19 @@ import { ReactiveFormsModule, Validators, } from '@angular/forms' -import { ErrorService } from '@start9labs/shared' import { WA_IS_MOBILE } from '@ng-web-apis/platform' +import { ErrorService } from '@start9labs/shared' import { tuiMarkControlAsTouchedAndValidate, TuiValueChanges, } from '@taiga-ui/cdk' import { TuiButton, + TuiCheckbox, TuiDialogContext, TuiError, + TuiInput, TuiNumberFormat, - TuiCheckbox, } from '@taiga-ui/core' import { TuiChevron, @@ -24,7 +25,7 @@ import { TuiNotificationMiddleService, TuiSelect, } from '@taiga-ui/kit' -import { TuiForm, TuiElasticContainer } from '@taiga-ui/layout' +import { TuiElasticContainer, TuiForm } from '@taiga-ui/layout' import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus' import { ApiService } from 'src/app/services/api/api.service' @@ -33,6 +34,11 @@ import { MappedDevice, PortForwardsData } from './utils' @Component({ template: `
+ + + + + @if (mobile) { @@ -124,6 +130,7 @@ import { MappedDevice, PortForwardsData } from './utils' TuiCheckbox, TuiValueChanges, TuiElasticContainer, + TuiInput, ], }) export class PortForwardsAdd { @@ -138,7 +145,13 @@ export class PortForwardsAdd { injectContext>() protected readonly form = inject(NonNullableFormBuilder).group({ - externalip: ['', Validators.required], + label: ['', Validators.required], + externalip: [ + this.context.data.ips().length === 1 + ? (this.context.data.ips().at(0) ?? '') + : '', + Validators.required, + ], externalport: [null as number | null, Validators.required], device: [null as MappedDevice | null, Validators.required], internalport: [null as number | null, Validators.required], @@ -162,19 +175,21 @@ export class PortForwardsAdd { const loader = this.loading.open('').subscribe() - const { externalip, externalport, device, internalport, also80 } = + const { label, externalip, externalport, device, internalport, also80 } = this.form.getRawValue() try { await this.api.addForward({ source: `${externalip}:${externalport}`, target: `${device!.ip}:${internalport}`, + label, }) if (externalport === 443 && internalport === 443 && also80) { await this.api.addForward({ source: `${externalip}:80`, target: `${device!.ip}:443`, + label: `${label} (HTTP redirect)`, }) } } catch (e: any) { diff --git a/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/edit-label.ts b/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/edit-label.ts new file mode 100644 index 000000000..73d013199 --- /dev/null +++ b/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/edit-label.ts @@ -0,0 +1,70 @@ +import { ChangeDetectionStrategy, Component, inject } from '@angular/core' +import { + NonNullableFormBuilder, + ReactiveFormsModule, + Validators, +} from '@angular/forms' +import { ErrorService } from '@start9labs/shared' +import { T } from '@start9labs/start-sdk' +import { TuiButton, TuiDialogContext, TuiError, TuiInput } from '@taiga-ui/core' +import { TuiNotificationMiddleService } from '@taiga-ui/kit' +import { TuiForm } from '@taiga-ui/layout' +import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus' +import { ApiService } from 'src/app/services/api/api.service' + +export interface EditLabelData { + readonly source: string + readonly label: T.Tunnel.PortForwardEntry['label'] +} + +@Component({ + template: ` + + + + + + +
+ +
+ + `, + changeDetection: ChangeDetectionStrategy.OnPush, + imports: [ReactiveFormsModule, TuiButton, TuiError, TuiInput, TuiForm], +}) +export class PortForwardsEditLabel { + private readonly api = inject(ApiService) + private readonly loading = inject(TuiNotificationMiddleService) + private readonly errorService = inject(ErrorService) + + protected readonly context = + injectContext>() + + protected readonly form = inject(NonNullableFormBuilder).group({ + label: [this.context.data.label, Validators.required], + }) + + protected async onSave() { + const loader = this.loading.open('').subscribe() + + try { + await this.api.updateForwardLabel({ + source: this.context.data.source, + label: this.form.getRawValue().label, + }) + } catch (e: any) { + console.error(e) + this.errorService.handleError(e) + } finally { + loader.unsubscribe() + this.context.$implicit.complete() + } + } +} + +export const PORT_FORWARDS_EDIT_LABEL = new PolymorpheusComponent( + PortForwardsEditLabel, +) diff --git a/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/index.ts b/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/index.ts index 8b7339c7d..f737cbb46 100644 --- a/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/index.ts +++ b/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/index.ts @@ -3,18 +3,32 @@ import { Component, computed, inject, + signal, Signal, } from '@angular/core' import { toSignal } from '@angular/core/rxjs-interop' -import { ReactiveFormsModule } from '@angular/forms' +import { FormsModule } from '@angular/forms' import { ErrorService } from '@start9labs/shared' import { utils } from '@start9labs/start-sdk' import { TuiResponsiveDialogService } from '@taiga-ui/addon-mobile' -import { TuiButton } from '@taiga-ui/core' -import { TUI_CONFIRM, TuiNotificationMiddleService } from '@taiga-ui/kit' +import { + TuiButton, + TuiDataList, + TuiDropdown, + TuiLoader, + TuiTextfield, +} from '@taiga-ui/core' +import { + TUI_CONFIRM, + TuiNotificationMiddleService, + TuiSkeleton, + TuiSwitch, +} from '@taiga-ui/kit' import { PatchDB } from 'patch-db-client' import { filter, map } from 'rxjs' +import { PlaceholderComponent } from 'src/app/routes/home/components/placeholder' import { PORT_FORWARDS_ADD } from 'src/app/routes/home/routes/port-forwards/add' +import { PORT_FORWARDS_EDIT_LABEL } from 'src/app/routes/home/routes/port-forwards/edit-label' import { ApiService } from 'src/app/services/api/api.service' import { TunnelData } from 'src/app/services/patch-db/data-model' @@ -22,9 +36,11 @@ import { MappedDevice, MappedForward } from './utils' @Component({ template: ` - +
+ + @@ -39,6 +55,23 @@ import { MappedDevice, MappedForward } from './utils' @for (forward of forwards(); track $index) { + + @@ -47,22 +80,59 @@ import { MappedDevice, MappedForward } from './utils' + + } @empty { -
No port forwards
+ + + }
Label External IP External Port Device
+ + + + {{ forward.label || '—' }} {{ forward.externalip }} {{ forward.externalport }} {{ forward.device.name }}
+ + No port forwards + +
`, changeDetection: ChangeDetectionStrategy.OnPush, - imports: [ReactiveFormsModule, TuiButton], + imports: [ + FormsModule, + TuiButton, + TuiDropdown, + TuiDataList, + TuiLoader, + TuiSwitch, + TuiTextfield, + PlaceholderComponent, + TuiSkeleton, + ], }) export default class PortForwards { private readonly dialogs = inject(TuiResponsiveDialogService) @@ -70,8 +140,6 @@ export default class PortForwards { private readonly loading = inject(TuiNotificationMiddleService) private readonly patch = inject>(PatchDB) private readonly errorService = inject(ErrorService) - - private readonly portForwards = toSignal(this.patch.watch$('portForwards')) private readonly ips = toSignal( this.patch.watch$('gateways').pipe( map(g => @@ -99,20 +167,38 @@ export default class PortForwards { { initialValue: [] }, ) + protected readonly portForwards = toSignal(this.patch.watch$('portForwards')) protected readonly forwards = computed(() => - Object.entries(this.portForwards() || {}).map(([source, target]) => { + Object.entries(this.portForwards() || {}).map(([source, entry]) => { const sourceSplit = source.split(':') - const targetSplit = target.split(':') + const targetSplit = entry.target.split(':') return { externalip: sourceSplit[0]!, externalport: sourceSplit[1]!, device: this.devices().find(d => d.ip === targetSplit[0])!, internalport: targetSplit[1]!, + label: entry.label, + enabled: entry.enabled, } }), ) + protected readonly toggling = signal(null) + + protected async onToggle(forward: MappedForward, index: number) { + this.toggling.set(index) + const source = `${forward.externalip}:${forward.externalport}` + + try { + await this.api.setForwardEnabled({ source, enabled: !forward.enabled }) + } catch (e: any) { + this.errorService.handleError(e) + } finally { + this.toggling.set(null) + } + } + protected onAdd(): void { this.dialogs .open(PORT_FORWARDS_ADD, { @@ -122,6 +208,18 @@ export default class PortForwards { .subscribe() } + protected onEditLabel(forward: MappedForward): void { + this.dialogs + .open(PORT_FORWARDS_EDIT_LABEL, { + label: 'Edit label', + data: { + source: `${forward.externalip}:${forward.externalport}`, + label: forward.label, + }, + }) + .subscribe() + } + protected onDelete({ externalip, externalport }: MappedForward): void { this.dialogs .open(TUI_CONFIRM, { label: 'Are you sure?' }) diff --git a/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/utils.ts b/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/utils.ts index 101c1eba9..0afb346f5 100644 --- a/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/utils.ts +++ b/web/projects/start-tunnel/src/app/routes/home/routes/port-forwards/utils.ts @@ -1,4 +1,5 @@ import { Signal } from '@angular/core' +import { T } from '@start9labs/start-sdk' export interface MappedDevice { readonly ip: string @@ -10,9 +11,11 @@ export interface MappedForward { readonly externalport: string readonly device: MappedDevice readonly internalport: string + readonly label: T.Tunnel.PortForwardEntry['label'] + readonly enabled: T.Tunnel.PortForwardEntry['enabled'] } export interface PortForwardsData { - readonly ips: Signal + readonly ips: Signal readonly devices: Signal } diff --git a/web/projects/start-tunnel/src/app/routes/home/routes/settings/index.ts b/web/projects/start-tunnel/src/app/routes/home/routes/settings/index.ts index 7b8d70ac5..f701a35ad 100644 --- a/web/projects/start-tunnel/src/app/routes/home/routes/settings/index.ts +++ b/web/projects/start-tunnel/src/app/routes/home/routes/settings/index.ts @@ -4,13 +4,19 @@ import { inject, signal, } from '@angular/core' +import { Router } from '@angular/router' import { ErrorService } from '@start9labs/shared' import { TuiResponsiveDialogService } from '@taiga-ui/addon-mobile' import { TuiButton, TuiCell, TuiTitle } from '@taiga-ui/core' -import { TuiBadge, TuiButtonLoading } from '@taiga-ui/kit' +import { + TuiBadge, + TuiButtonLoading, + TuiNotificationMiddleService, +} from '@taiga-ui/kit' import { TuiCard } from '@taiga-ui/layout' +import { ApiService } from 'src/app/services/api/api.service' +import { AuthService } from 'src/app/services/auth.service' import { UpdateService } from 'src/app/services/update.service' - import { CHANGE_PASSWORD } from './change-password' @Component({ @@ -50,6 +56,20 @@ import { CHANGE_PASSWORD } from './change-password' +
+ + Logout + + +
`, styles: ` @@ -67,6 +87,10 @@ import { CHANGE_PASSWORD } from './change-password' export default class Settings { private readonly dialogs = inject(TuiResponsiveDialogService) private readonly errorService = inject(ErrorService) + private readonly api = inject(ApiService) + private readonly auth = inject(AuthService) + private readonly router = inject(Router) + private readonly loading = inject(TuiNotificationMiddleService) protected readonly update = inject(UpdateService) protected readonly checking = signal(false) @@ -99,4 +123,18 @@ export default class Settings { this.applying.set(false) } } + + protected async onLogout() { + const loader = this.loading.open('').subscribe() + + try { + await this.api.logout() + this.auth.authenticated.set(false) + this.router.navigate(['/']) + } catch (e: any) { + this.errorService.handleError(e) + } finally { + loader.unsubscribe() + } + } } diff --git a/web/projects/start-tunnel/src/app/routes/home/routes/subnets/index.ts b/web/projects/start-tunnel/src/app/routes/home/routes/subnets/index.ts index c849b508e..c980c8463 100644 --- a/web/projects/start-tunnel/src/app/routes/home/routes/subnets/index.ts +++ b/web/projects/start-tunnel/src/app/routes/home/routes/subnets/index.ts @@ -3,9 +3,14 @@ import { toSignal } from '@angular/core/rxjs-interop' import { utils } from '@start9labs/start-sdk' import { TuiResponsiveDialogService } from '@taiga-ui/addon-mobile' import { TuiButton, TuiDataList, TuiDropdown } from '@taiga-ui/core' -import { TUI_CONFIRM, TuiNotificationMiddleService } from '@taiga-ui/kit' +import { + TUI_CONFIRM, + TuiNotificationMiddleService, + TuiSkeleton, +} from '@taiga-ui/kit' import { PatchDB } from 'patch-db-client' import { filter, map } from 'rxjs' +import { PlaceholderComponent } from 'src/app/routes/home/components/placeholder' import { ApiService } from 'src/app/services/api/api.service' import { TunnelData } from 'src/app/services/patch-db/data-model' @@ -13,7 +18,7 @@ import { SUBNETS_ADD } from './add' @Component({ template: ` - +
@@ -40,7 +45,11 @@ import { SUBNETS_ADD } from './add' iconStart="@tui.ellipsis-vertical" > Actions - + } @empty { -
No subnets
+ + + }
Name
+ No subnets +
`, changeDetection: ChangeDetectionStrategy.OnPush, - imports: [TuiButton, TuiDropdown, TuiDataList], + imports: [ + TuiButton, + TuiDropdown, + TuiDataList, + PlaceholderComponent, + TuiSkeleton, + ], }) export default class Subnets { private readonly dialogs = inject(TuiResponsiveDialogService) private readonly api = inject(ApiService) private readonly loading = inject(TuiNotificationMiddleService) - protected readonly subnets = toSignal( + protected readonly subnets = toSignal( inject>(PatchDB) .watch$('wg', 'subnets') .pipe( @@ -85,7 +104,7 @@ export default class Subnets { })), ), ), - { initialValue: [] }, + { initialValue: null }, ) protected onAdd(): void { @@ -111,7 +130,7 @@ export default class Subnets { .open(TUI_CONFIRM, { label: 'Are you sure?' }) .pipe(filter(Boolean)) .subscribe(async () => { - const subnet = this.subnets()[index]?.range || '' + const subnet = this.subnets()?.[index]?.range || '' const loader = this.loading.open('').subscribe() try { @@ -125,13 +144,13 @@ export default class Subnets { } private getNext(): string { - const current = this.subnets().map(s => utils.IpNet.parse(s.range)) + const current = this.subnets()?.map(s => utils.IpNet.parse(s.range)) const suggestion = utils.IpNet.parse('10.59.0.1/24') for (let i = 0; i < 256; i++) { suggestion.octets[2] = Math.floor(Math.random() * 256) if ( - !current.some( + !current?.some( s => s.contains(suggestion), // inverse check unnecessary since we don't allow subnets smaller than /24 ) ) { diff --git a/web/projects/start-tunnel/src/app/services/api/api.service.ts b/web/projects/start-tunnel/src/app/services/api/api.service.ts index 401d7f43c..f25d22b93 100644 --- a/web/projects/start-tunnel/src/app/services/api/api.service.ts +++ b/web/projects/start-tunnel/src/app/services/api/api.service.ts @@ -1,7 +1,8 @@ import { Injectable } from '@angular/core' +import { T } from '@start9labs/start-sdk' import { Dump } from 'patch-db-client' -import { TunnelData } from '../patch-db/data-model' import { Observable } from 'rxjs' +import { TunnelData } from '../patch-db/data-model' @Injectable({ providedIn: 'root', @@ -10,64 +11,43 @@ export abstract class ApiService { abstract openWebsocket$(guid: string): Observable abstract subscribe(): Promise // db.subscribe // auth - abstract login(params: LoginReq): Promise // auth.login + abstract login(params: T.Tunnel.SetPasswordParams): Promise // auth.login abstract logout(): Promise // auth.logout - abstract setPassword(params: LoginReq): Promise // auth.set-password + abstract setPassword(params: T.Tunnel.SetPasswordParams): Promise // auth.set-password // subnets - abstract addSubnet(params: UpsertSubnetReq): Promise // subnet.add - abstract editSubnet(params: UpsertSubnetReq): Promise // subnet.add - abstract deleteSubnet(params: DeleteSubnetReq): Promise // subnet.remove + abstract addSubnet( + params: T.Tunnel.SubnetParams & T.Tunnel.AddSubnetParams, + ): Promise // subnet.add + abstract editSubnet( + params: T.Tunnel.SubnetParams & T.Tunnel.AddSubnetParams, + ): Promise // subnet.edit + abstract deleteSubnet(params: T.Tunnel.SubnetParams): Promise // subnet.remove // devices - abstract addDevice(params: UpsertDeviceReq): Promise // device.add - abstract editDevice(params: UpsertDeviceReq): Promise // device.add - abstract deleteDevice(params: DeleteDeviceReq): Promise // device.remove - abstract showDeviceConfig(params: DeleteDeviceReq): Promise // device.show-config + abstract addDevice(params: T.Tunnel.AddDeviceParams): Promise // device.add + abstract editDevice(params: T.Tunnel.AddDeviceParams): Promise // device.edit + abstract deleteDevice(params: T.Tunnel.RemoveDeviceParams): Promise // device.remove + abstract showDeviceConfig( + params: T.Tunnel.RemoveDeviceParams, + ): Promise // device.show-config // forwards - abstract addForward(params: AddForwardReq): Promise // port-forward.add - abstract deleteForward(params: DeleteForwardReq): Promise // port-forward.remove + abstract addForward( + params: T.Tunnel.AddPortForwardParams, + ): Promise // port-forward.add + abstract deleteForward( + params: T.Tunnel.RemovePortForwardParams, + ): Promise // port-forward.remove + abstract updateForwardLabel( + params: T.Tunnel.UpdatePortForwardLabelParams, + ): Promise // port-forward.update-label + abstract setForwardEnabled( + params: T.Tunnel.SetPortForwardEnabledParams, + ): Promise // port-forward.set-enabled // update - abstract checkUpdate(): Promise // update.check - abstract applyUpdate(): Promise // update.apply + abstract checkUpdate(): Promise // update.check + abstract applyUpdate(): Promise // update.apply } export type SubscribeRes = { dump: Dump guid: string } - -export type LoginReq = { password: string } - -export type UpsertSubnetReq = { - name: string - subnet: string -} - -export type DeleteSubnetReq = { - subnet: string -} - -export type UpsertDeviceReq = { - name: string - subnet: string - ip: string -} - -export type DeleteDeviceReq = { - subnet: string - ip: string -} - -export type AddForwardReq = { - source: string // externalip:port - target: string // internalip:port -} - -export type DeleteForwardReq = { - source: string -} - -export type TunnelUpdateResult = { - status: string - installed: string - candidate: string -} diff --git a/web/projects/start-tunnel/src/app/services/api/live-api.service.ts b/web/projects/start-tunnel/src/app/services/api/live-api.service.ts index cabf8200f..83aaa8515 100644 --- a/web/projects/start-tunnel/src/app/services/api/live-api.service.ts +++ b/web/projects/start-tunnel/src/app/services/api/live-api.service.ts @@ -8,18 +8,8 @@ import { } from '@start9labs/shared' import { filter, firstValueFrom, Observable } from 'rxjs' import { webSocket } from 'rxjs/webSocket' -import { - AddForwardReq, - ApiService, - DeleteDeviceReq, - DeleteForwardReq, - DeleteSubnetReq, - LoginReq, - SubscribeRes, - TunnelUpdateResult, - UpsertDeviceReq, - UpsertSubnetReq, -} from './api.service' +import { T } from '@start9labs/start-sdk' +import { ApiService, SubscribeRes } from './api.service' import { AuthService } from '../auth.service' import { PATCH_CACHE } from '../patch-db/patch-db-source' @@ -52,7 +42,7 @@ export class LiveApiService extends ApiService { // auth - async login(params: LoginReq): Promise { + async login(params: T.Tunnel.SetPasswordParams): Promise { return this.rpcRequest({ method: 'auth.login', params }) } @@ -60,67 +50,87 @@ export class LiveApiService extends ApiService { return this.rpcRequest({ method: 'auth.logout', params: {} }) } - async setPassword(params: LoginReq): Promise { + async setPassword(params: T.Tunnel.SetPasswordParams): Promise { return this.rpcRequest({ method: 'auth.set-password', params }) } - async addSubnet(params: UpsertSubnetReq): Promise { + async addSubnet( + params: T.Tunnel.SubnetParams & T.Tunnel.AddSubnetParams, + ): Promise { return this.upsertSubnet(params) } - async editSubnet(params: UpsertSubnetReq): Promise { + async editSubnet( + params: T.Tunnel.SubnetParams & T.Tunnel.AddSubnetParams, + ): Promise { return this.upsertSubnet(params) } - async deleteSubnet(params: DeleteSubnetReq): Promise { + async deleteSubnet(params: T.Tunnel.SubnetParams): Promise { return this.rpcRequest({ method: 'subnet.remove', params }) } // devices - async addDevice(params: UpsertDeviceReq): Promise { + async addDevice(params: T.Tunnel.AddDeviceParams): Promise { return this.upsertDevice(params) } - async editDevice(params: UpsertDeviceReq): Promise { + async editDevice(params: T.Tunnel.AddDeviceParams): Promise { return this.upsertDevice(params) } - async deleteDevice(params: DeleteDeviceReq): Promise { + async deleteDevice(params: T.Tunnel.RemoveDeviceParams): Promise { return this.rpcRequest({ method: 'device.remove', params }) } - async showDeviceConfig(params: DeleteDeviceReq): Promise { + async showDeviceConfig(params: T.Tunnel.RemoveDeviceParams): Promise { return this.rpcRequest({ method: 'device.show-config', params }) } // forwards - async addForward(params: AddForwardReq): Promise { + async addForward(params: T.Tunnel.AddPortForwardParams): Promise { return this.rpcRequest({ method: 'port-forward.add', params }) } - async deleteForward(params: DeleteForwardReq): Promise { + async deleteForward( + params: T.Tunnel.RemovePortForwardParams, + ): Promise { return this.rpcRequest({ method: 'port-forward.remove', params }) } + async updateForwardLabel( + params: T.Tunnel.UpdatePortForwardLabelParams, + ): Promise { + return this.rpcRequest({ method: 'port-forward.update-label', params }) + } + + async setForwardEnabled( + params: T.Tunnel.SetPortForwardEnabledParams, + ): Promise { + return this.rpcRequest({ method: 'port-forward.set-enabled', params }) + } + // update - async checkUpdate(): Promise { + async checkUpdate(): Promise { return this.rpcRequest({ method: 'update.check', params: {} }) } - async applyUpdate(): Promise { + async applyUpdate(): Promise { return this.rpcRequest({ method: 'update.apply', params: {} }) } // private - private async upsertSubnet(params: UpsertSubnetReq): Promise { + private async upsertSubnet( + params: T.Tunnel.SubnetParams & T.Tunnel.AddSubnetParams, + ): Promise { return this.rpcRequest({ method: 'subnet.add', params }) } - private async upsertDevice(params: UpsertDeviceReq): Promise { + private async upsertDevice(params: T.Tunnel.AddDeviceParams): Promise { return this.rpcRequest({ method: 'device.add', params }) } diff --git a/web/projects/start-tunnel/src/app/services/api/mock-api.service.ts b/web/projects/start-tunnel/src/app/services/api/mock-api.service.ts index 6f82c597f..ed3ebe95e 100644 --- a/web/projects/start-tunnel/src/app/services/api/mock-api.service.ts +++ b/web/projects/start-tunnel/src/app/services/api/mock-api.service.ts @@ -1,19 +1,9 @@ import { inject, Injectable } from '@angular/core' import { shareReplay, Subject, tap } from 'rxjs' import { WebSocketSubject } from 'rxjs/webSocket' -import { - AddForwardReq, - ApiService, - DeleteDeviceReq, - DeleteForwardReq, - DeleteSubnetReq, - LoginReq, - SubscribeRes, - TunnelUpdateResult, - UpsertDeviceReq, - UpsertSubnetReq, -} from './api.service' +import { ApiService, SubscribeRes } from './api.service' import { pauseFor } from '@start9labs/shared' +import { T } from '@start9labs/start-sdk' import { AuthService } from '../auth.service' import { AddOperation, @@ -24,7 +14,7 @@ import { Revision, } from 'patch-db-client' import { toObservable } from '@angular/core/rxjs-interop' -import { mockTunnelData, WgClient, WgSubnet } from '../patch-db/data-model' +import { mockTunnelData } from '../patch-db/data-model' @Injectable({ providedIn: 'root', @@ -59,7 +49,7 @@ export class MockApiService extends ApiService { } } - async login(params: LoginReq): Promise { + async login(params: T.Tunnel.SetPasswordParams): Promise { await pauseFor(1000) return null } @@ -69,15 +59,15 @@ export class MockApiService extends ApiService { return null } - async setPassword(params: LoginReq): Promise { + async setPassword(params: T.Tunnel.SetPasswordParams): Promise { await pauseFor(1000) return null } - async addSubnet(params: UpsertSubnetReq): Promise { + async addSubnet(params: T.Tunnel.SubnetParams & T.Tunnel.AddSubnetParams): Promise { await pauseFor(1000) - const patch: AddOperation[] = [ + const patch: AddOperation[] = [ { op: PatchOp.ADD, path: `/wg/subnets/${replaceSlashes(params.subnet)}`, @@ -89,7 +79,7 @@ export class MockApiService extends ApiService { return null } - async editSubnet(params: UpsertSubnetReq): Promise { + async editSubnet(params: T.Tunnel.SubnetParams & T.Tunnel.AddSubnetParams): Promise { await pauseFor(1000) const patch: ReplaceOperation[] = [ @@ -104,7 +94,7 @@ export class MockApiService extends ApiService { return null } - async deleteSubnet(params: DeleteSubnetReq): Promise { + async deleteSubnet(params: T.Tunnel.SubnetParams): Promise { await pauseFor(1000) const patch: RemoveOperation[] = [ @@ -118,14 +108,14 @@ export class MockApiService extends ApiService { return null } - async addDevice(params: UpsertDeviceReq): Promise { + async addDevice(params: T.Tunnel.AddDeviceParams): Promise { await pauseFor(1000) - const patch: AddOperation[] = [ + const patch: AddOperation[] = [ { op: PatchOp.ADD, path: `/wg/subnets/${replaceSlashes(params.subnet)}/clients/${params.ip}`, - value: { name: params.name }, + value: { name: params.name, key: '', psk: '' }, }, ] this.mockRevision(patch) @@ -133,7 +123,7 @@ export class MockApiService extends ApiService { return null } - async editDevice(params: UpsertDeviceReq): Promise { + async editDevice(params: T.Tunnel.AddDeviceParams): Promise { await pauseFor(1000) const patch: ReplaceOperation[] = [ @@ -148,7 +138,7 @@ export class MockApiService extends ApiService { return null } - async deleteDevice(params: DeleteDeviceReq): Promise { + async deleteDevice(params: T.Tunnel.RemoveDeviceParams): Promise { await pauseFor(1000) const patch: RemoveOperation[] = [ @@ -162,20 +152,24 @@ export class MockApiService extends ApiService { return null } - async showDeviceConfig(params: DeleteDeviceReq): Promise { + async showDeviceConfig(params: T.Tunnel.RemoveDeviceParams): Promise { await pauseFor(1000) return MOCK_CONFIG } - async addForward(params: AddForwardReq): Promise { + async addForward(params: T.Tunnel.AddPortForwardParams): Promise { await pauseFor(1000) - const patch: AddOperation[] = [ + const patch: AddOperation[] = [ { op: PatchOp.ADD, path: `/portForwards/${params.source}`, - value: params.target, + value: { + target: params.target, + label: params.label || null, + enabled: true, + }, }, ] this.mockRevision(patch) @@ -183,7 +177,37 @@ export class MockApiService extends ApiService { return null } - async deleteForward(params: DeleteForwardReq): Promise { + async updateForwardLabel(params: T.Tunnel.UpdatePortForwardLabelParams): Promise { + await pauseFor(1000) + + const patch: ReplaceOperation[] = [ + { + op: PatchOp.REPLACE, + path: `/portForwards/${params.source}/label`, + value: params.label, + }, + ] + this.mockRevision(patch) + + return null + } + + async setForwardEnabled(params: T.Tunnel.SetPortForwardEnabledParams): Promise { + await pauseFor(1000) + + const patch: ReplaceOperation[] = [ + { + op: PatchOp.REPLACE, + path: `/portForwards/${params.source}/enabled`, + value: params.enabled, + }, + ] + this.mockRevision(patch) + + return null + } + + async deleteForward(params: T.Tunnel.RemovePortForwardParams): Promise { await pauseFor(1000) const patch: RemoveOperation[] = [ @@ -197,7 +221,7 @@ export class MockApiService extends ApiService { return null } - async checkUpdate(): Promise { + async checkUpdate(): Promise { await pauseFor(1000) return { status: 'update-available', @@ -206,7 +230,7 @@ export class MockApiService extends ApiService { } } - async applyUpdate(): Promise { + async applyUpdate(): Promise { await pauseFor(2000) return { status: 'updating', diff --git a/web/projects/start-tunnel/src/app/services/patch-db/data-model.ts b/web/projects/start-tunnel/src/app/services/patch-db/data-model.ts index 9df4fac6d..0ac9d0c30 100644 --- a/web/projects/start-tunnel/src/app/services/patch-db/data-model.ts +++ b/web/projects/start-tunnel/src/app/services/patch-db/data-model.ts @@ -1,46 +1,32 @@ import { T } from '@start9labs/start-sdk' -export type TunnelData = { - wg: WgServer - portForwards: Record - gateways: Record -} - -export type WgServer = { - subnets: Record -} - -export type WgSubnet = { - name: string - clients: Record -} - -export type WgClient = { - name: string -} +export type TunnelData = Pick< + T.Tunnel.TunnelDatabase, + 'wg' | 'portForwards' | 'gateways' +> export const mockTunnelData: TunnelData = { wg: { + port: 51820, + key: '', subnets: { '10.59.0.0/24': { name: 'Family', clients: { - '10.59.0.2': { - name: 'Start9 Server', - }, - '10.59.0.3': { - name: 'Phone', - }, - '10.59.0.4': { - name: 'Laptop', - }, + '10.59.0.2': { name: 'Start9 Server', key: '', psk: '' }, + '10.59.0.3': { name: 'Phone', key: '', psk: '' }, + '10.59.0.4': { name: 'Laptop', key: '', psk: '' }, }, }, }, }, portForwards: { - '69.1.1.42:443': '10.59.0.2:443', - '69.1.1.42:3000': '10.59.0.2:3000', + '69.1.1.42:443': { target: '10.59.0.2:443', label: 'HTTPS', enabled: true }, + '69.1.1.42:3000': { + target: '10.59.0.2:3000', + label: 'Grafana', + enabled: true, + }, }, gateways: { eth0: { diff --git a/web/projects/start-tunnel/src/app/services/update.service.ts b/web/projects/start-tunnel/src/app/services/update.service.ts index 7b91e12d7..79ec824ec 100644 --- a/web/projects/start-tunnel/src/app/services/update.service.ts +++ b/web/projects/start-tunnel/src/app/services/update.service.ts @@ -1,9 +1,8 @@ -import { Component, computed, inject, Injectable, signal } from '@angular/core' +import { computed, inject, Injectable, signal } from '@angular/core' import { toObservable } from '@angular/core/rxjs-interop' import { ErrorService } from '@start9labs/shared' -import { TuiResponsiveDialogService } from '@taiga-ui/addon-mobile' -import { TuiLoader } from '@taiga-ui/core' -import { PolymorpheusComponent } from '@taiga-ui/polymorpheus' +import { T } from '@start9labs/start-sdk' +import { TuiNotificationMiddleService } from '@taiga-ui/kit' import { catchError, EMPTY, @@ -14,27 +13,19 @@ import { switchMap, takeWhile, } from 'rxjs' -import { ApiService, TunnelUpdateResult } from './api/api.service' +import { ApiService } from './api/api.service' import { AuthService } from './auth.service' -@Component({ - template: '', - imports: [TuiLoader], -}) -class UpdatingDialog { - protected readonly text = 'StartTunnel is updating...' -} - @Injectable({ providedIn: 'root', }) export class UpdateService { private readonly api = inject(ApiService) private readonly auth = inject(AuthService) - private readonly dialogs = inject(TuiResponsiveDialogService) + private readonly loading = inject(TuiNotificationMiddleService) private readonly errorService = inject(ErrorService) - readonly result = signal(null) + readonly result = signal(null) readonly hasUpdate = computed( () => this.result()?.status === 'update-available', ) @@ -60,7 +51,7 @@ export class UpdateService { this.setResult(result) } - private setResult(result: TunnelUpdateResult): void { + private setResult(result: T.Tunnel.TunnelUpdateResult): void { this.result.set(result) if (result.status === 'updating') { @@ -105,11 +96,8 @@ export class UpdateService { private showUpdatingDialog(): void { if (this.updatingDialog) return - this.updatingDialog = this.dialogs - .open(new PolymorpheusComponent(UpdatingDialog), { - closable: false, - dismissible: false, - }) + this.updatingDialog = this.loading + .open('StartTunnel is updating...') .subscribe({ complete: () => (this.updatingDialog = null) }) } diff --git a/web/projects/start-tunnel/src/styles.scss b/web/projects/start-tunnel/src/styles.scss index 1cc21ec0e..6829b5003 100644 --- a/web/projects/start-tunnel/src/styles.scss +++ b/web/projects/start-tunnel/src/styles.scss @@ -66,6 +66,10 @@ tui-notification-middle { background: var(--tui-background-neutral-1); box-shadow: inset 0 0 0 1px var(--tui-background-neutral-1); + &:has(app-placeholder) thead { + display: none; + } + thead tr { position: sticky; top: 0; @@ -93,12 +97,6 @@ tui-notification-middle { } } -.placeholder { - padding: 1rem; - font: var(--tui-font-text-l); - color: var(--tui-text-tertiary); -} - qr-code { display: flex; justify-content: center; @@ -107,3 +105,21 @@ qr-code { tui-data-list { --tui-text-action: var(--tui-text-primary); } + +[tuiTheme='dark'] tui-notification-middle[style] { + &.tui-enter, + &.tui-leave { + --tui-scale: 0; + animation-name: tuiScale; + } + + &::before { + background: var(--tui-background-neutral-1); + backdrop-filter: blur(1rem); + box-shadow: inset 0 1px 1px var(--tui-background-neutral-2); + } + + tui-loader svg { + stroke: white; + } +} diff --git a/web/projects/ui/src/app/routes/login/ca-wizard/ca-wizard.component.html b/web/projects/ui/src/app/routes/login/ca-wizard/ca-wizard.component.html index 52c87c61c..18bb8b3e9 100644 --- a/web/projects/ui/src/app/routes/login/ca-wizard/ca-wizard.component.html +++ b/web/projects/ui/src/app/routes/login/ca-wizard/ca-wizard.component.html @@ -46,7 +46,7 @@ tuiButton docsLink size="s" - path="/start-os/user-manual/trust-ca.html" + path="/start-os/trust-ca.html" iconEnd="@tui.external-link" > {{ 'View instructions' | i18n }} diff --git a/web/projects/ui/src/app/routes/portal/components/header/menu.component.ts b/web/projects/ui/src/app/routes/portal/components/header/menu.component.ts index 77c76d5cf..851faeec3 100644 --- a/web/projects/ui/src/app/routes/portal/components/header/menu.component.ts +++ b/web/projects/ui/src/app/routes/portal/components/header/menu.component.ts @@ -52,7 +52,7 @@ import { ABOUT } from './about.component' tuiOption docsLink iconStart="@tui.book-open-text" - path="/start-os/user-manual" + path="/start-os/index.html" > {{ 'User manual' | i18n }} diff --git a/web/projects/ui/src/app/routes/portal/routes/backups/modals/jobs.component.ts b/web/projects/ui/src/app/routes/portal/routes/backups/modals/jobs.component.ts index 26e154787..c964787c3 100644 --- a/web/projects/ui/src/app/routes/portal/routes/backups/modals/jobs.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/backups/modals/jobs.component.ts @@ -31,7 +31,7 @@ import { EDIT } from './edit.component' Scheduling automatic backups is an excellent way to ensure your StartOS data is safely backed up. StartOS will issue a notification whenever one of your scheduled backups succeeds or fails. - + View instructions diff --git a/web/projects/ui/src/app/routes/portal/routes/backups/modals/targets.component.ts b/web/projects/ui/src/app/routes/portal/routes/backups/modals/targets.component.ts index 7d0dea1ad..926128bcf 100644 --- a/web/projects/ui/src/app/routes/portal/routes/backups/modals/targets.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/backups/modals/targets.component.ts @@ -28,7 +28,7 @@ import { backups. They can be physical drives plugged into your server, shared folders on your Local Area Network (LAN), or third party clouds such as Dropbox or Google Drive. - + View instructions diff --git a/web/projects/ui/src/app/routes/portal/routes/marketplace/components/notification.component.ts b/web/projects/ui/src/app/routes/portal/routes/marketplace/components/notification.component.ts index 3a6401e8c..e39cb9a77 100644 --- a/web/projects/ui/src/app/routes/portal/routes/marketplace/components/notification.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/marketplace/components/notification.component.ts @@ -7,7 +7,7 @@ import { TuiNotification } from '@taiga-ui/core' template: `
@@ -78,4 +78,15 @@ export class MarketplaceNotificationComponent { return null } + + get appearance() { + switch (this.status) { + case 'success': + return 'positive' + case 'error': + return 'negative' + default: + return 'info' + } + } } diff --git a/web/projects/ui/src/app/routes/portal/routes/metrics/time.component.ts b/web/projects/ui/src/app/routes/portal/routes/metrics/time.component.ts index aa64b2324..172475c9e 100644 --- a/web/projects/ui/src/app/routes/portal/routes/metrics/time.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/metrics/time.component.ts @@ -49,7 +49,7 @@ import { TimeService } from 'src/app/services/time.service' docsLink iconEnd="@tui.external-link" appearance="" - path="/start-os/faq/index.html" + path="/start-os/faq.html" fragment="#clock-sync-failure" [textContent]="'the docs' | i18n" > diff --git a/web/projects/ui/src/app/routes/portal/routes/notifications/item.component.ts b/web/projects/ui/src/app/routes/portal/routes/notifications/item.component.ts index 212e20fff..7323c285d 100644 --- a/web/projects/ui/src/app/routes/portal/routes/notifications/item.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/notifications/item.component.ts @@ -44,7 +44,7 @@ import { DataModel } from 'src/app/services/patch-db/data-model' {{ item.packageId || '-' }} } } @else { - - + {{ item.packageId || '-' }} } diff --git a/web/projects/ui/src/app/routes/portal/routes/services/components/health-check.component.ts b/web/projects/ui/src/app/routes/portal/routes/services/components/health-check.component.ts index a246a3c54..a9fb82f7a 100644 --- a/web/projects/ui/src/app/routes/portal/routes/services/components/health-check.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/services/components/health-check.component.ts @@ -101,7 +101,7 @@ export class ServiceHealthCheckComponent { case 'starting': return this.i18n.transform('Starting')! case 'success': - return `${this.i18n.transform('Success')}: ${message || 'health check passing'}` + return message || this.i18n.transform('Success')! case 'waiting': return message ? `${this.i18n.transform('Waiting on')} ${message}...` diff --git a/web/projects/ui/src/app/routes/portal/routes/services/routes/outlet.component.ts b/web/projects/ui/src/app/routes/portal/routes/services/routes/outlet.component.ts index 0613e7d49..31380252c 100644 --- a/web/projects/ui/src/app/routes/portal/routes/services/routes/outlet.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/services/routes/outlet.component.ts @@ -98,6 +98,7 @@ import { getManifest } from 'src/app/utils/get-package-data' padding-top: 1rem; border-radius: 0; cursor: pointer; + overflow: hidden; box-shadow: 0 -1px rgba(255, 255, 255, 0.1); } diff --git a/web/projects/ui/src/app/routes/portal/routes/system/routes/authorities/authorities.component.ts b/web/projects/ui/src/app/routes/portal/routes/system/routes/authorities/authorities.component.ts index 330849f52..590da78ae 100644 --- a/web/projects/ui/src/app/routes/portal/routes/system/routes/authorities/authorities.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/system/routes/authorities/authorities.component.ts @@ -21,7 +21,7 @@ import { AuthoritiesTableComponent } from './table.component' tuiIconButton size="xs" docsLink - path="/start-os/user-manual/trust-ca.html" + path="/start-os/trust-ca.html" appearance="icon" iconStart="@tui.book-open-text" > diff --git a/web/projects/ui/src/app/routes/portal/routes/system/routes/backups/backups.component.ts b/web/projects/ui/src/app/routes/portal/routes/system/routes/backups/backups.component.ts index f71e1d18d..94e2aeb89 100644 --- a/web/projects/ui/src/app/routes/portal/routes/system/routes/backups/backups.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/system/routes/backups/backups.component.ts @@ -61,7 +61,7 @@ import { BACKUP_RESTORE } from './restore.component' diff --git a/web/projects/ui/src/app/routes/portal/routes/system/routes/gateways/gateways.component.ts b/web/projects/ui/src/app/routes/portal/routes/system/routes/gateways/gateways.component.ts index 241fbe298..0b9776e2e 100644 --- a/web/projects/ui/src/app/routes/portal/routes/system/routes/gateways/gateways.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/system/routes/gateways/gateways.component.ts @@ -42,7 +42,7 @@ import { GatewaysTableComponent } from './table.component' tuiIconButton size="xs" docsLink - path="/start-os/user-manual/gateways.html" + path="/start-os/gateways.html" appearance="icon" iconStart="@tui.book-open-text" > @@ -71,7 +71,7 @@ import { GatewaysTableComponent } from './table.component' tuiIconButton size="xs" docsLink - path="/start-os/user-manual/gateways.html" + path="/start-os/gateways.html" fragment="#outbound-traffic" appearance="icon" iconStart="@tui.book-open-text" diff --git a/web/projects/ui/src/app/routes/portal/routes/system/routes/general/general.component.ts b/web/projects/ui/src/app/routes/portal/routes/system/routes/general/general.component.ts index c241f7b60..9f1057977 100644 --- a/web/projects/ui/src/app/routes/portal/routes/system/routes/general/general.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/system/routes/general/general.component.ts @@ -306,6 +306,7 @@ export default class SystemGeneralComponent { onLanguageChange(language: Language) { this.i18nService.setLang(language.name) + this.promptLanguageRestart() } // Expose shared utilities for template use @@ -562,6 +563,21 @@ export default class SystemGeneralComponent { .subscribe(() => this.restart()) } + private promptLanguageRestart() { + this.dialog + .openConfirm({ + label: 'Restart to apply', + data: { + content: + 'OS-level translations are already in effect. A restart is required for service-level translations to take effect.', + yes: 'Restart now', + no: 'Later', + }, + }) + .pipe(filter(Boolean)) + .subscribe(() => this.restart()) + } + private update() { this.dialogs .open(UPDATE, { diff --git a/web/projects/ui/src/app/routes/portal/routes/system/routes/smtp/smtp.component.ts b/web/projects/ui/src/app/routes/portal/routes/system/routes/smtp/smtp.component.ts index 4c35baee9..6ee478ba5 100644 --- a/web/projects/ui/src/app/routes/portal/routes/system/routes/smtp/smtp.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/system/routes/smtp/smtp.component.ts @@ -55,7 +55,7 @@ function detectProviderKey(host: string | undefined): string { tuiIconButton size="xs" docsLink - path="/start-os/user-manual/smtp.html" + path="/start-os/smtp.html" appearance="icon" iconStart="@tui.book-open-text" > diff --git a/web/projects/ui/src/app/routes/portal/routes/system/routes/ssh/ssh.component.ts b/web/projects/ui/src/app/routes/portal/routes/system/routes/ssh/ssh.component.ts index cb9aaafd0..e25e9f86f 100644 --- a/web/projects/ui/src/app/routes/portal/routes/system/routes/ssh/ssh.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/system/routes/ssh/ssh.component.ts @@ -39,7 +39,7 @@ import { SSHTableComponent } from './table.component' tuiIconButton size="xs" docsLink - path="/start-os/user-manual/ssh.html" + path="/start-os/ssh.html" appearance="icon" iconStart="@tui.book-open-text" > diff --git a/web/projects/ui/src/app/routes/portal/routes/system/routes/wifi/wifi.component.ts b/web/projects/ui/src/app/routes/portal/routes/system/routes/wifi/wifi.component.ts index 678bf36d5..6ae42a6b8 100644 --- a/web/projects/ui/src/app/routes/portal/routes/system/routes/wifi/wifi.component.ts +++ b/web/projects/ui/src/app/routes/portal/routes/system/routes/wifi/wifi.component.ts @@ -48,7 +48,7 @@ import { wifiSpec } from './wifi.const' tuiIconButton size="xs" docsLink - path="/start-os/user-manual/wifi.html" + path="/start-os/wifi.html" appearance="icon" iconStart="@tui.book-open-text" > diff --git a/web/projects/ui/src/app/services/api/embassy-mock-api.service.ts b/web/projects/ui/src/app/services/api/embassy-mock-api.service.ts index b6e03d477..ac89c1dca 100644 --- a/web/projects/ui/src/app/services/api/embassy-mock-api.service.ts +++ b/web/projects/ui/src/app/services/api/embassy-mock-api.service.ts @@ -1289,7 +1289,7 @@ export class MockApiService extends ApiService { op: PatchOp.REPLACE, path, value: { - desired: { main: 'restarting' }, + desired: { main: 'restarting', restartAgain: false }, started: null, error: null, health: {}, diff --git a/web/projects/ui/src/app/services/api/mock-patch.ts b/web/projects/ui/src/app/services/api/mock-patch.ts index 557310204..56bb797c9 100644 --- a/web/projects/ui/src/app/services/api/mock-patch.ts +++ b/web/projects/ui/src/app/services/api/mock-patch.ts @@ -239,7 +239,7 @@ export const mockPatchData: DataModel = { caFingerprint: '63:2B:11:99:44:40:17:DF:37:FC:C3:DF:0F:3D:15', ntpSynced: false, smtp: null, - ifconfigUrl: 'https://ifconfig.co', + echoipUrls: ['https://ipconfig.me', 'https://ifconfig.co'], platform: 'x86_64-nonfree', zram: true, governor: 'performance', diff --git a/web/projects/ui/src/styles.scss b/web/projects/ui/src/styles.scss index 0e4fbb17e..8e590dbb7 100644 --- a/web/projects/ui/src/styles.scss +++ b/web/projects/ui/src/styles.scss @@ -171,8 +171,7 @@ ul { .g-table { width: stretch; border: 1px solid var(--tui-background-neutral-1); - border-spacing: 0; - border-collapse: collapse; + border-collapse: collapse !important; border-radius: var(--tui-radius-s); overflow: hidden; box-shadow: inset 0 0 0 1px var(--tui-background-neutral-1); @@ -248,6 +247,10 @@ ul { display: none; } + &:has(app-placeholder) thead { + display: none; + } + tr:not(:last-child) { box-shadow: inset 0 -1px var(--tui-background-neutral-1); }