diff --git a/.github/workflows/startos-iso.yaml b/.github/workflows/startos-iso.yaml index 913a25327..d862fc109 100644 --- a/.github/workflows/startos-iso.yaml +++ b/.github/workflows/startos-iso.yaml @@ -102,12 +102,6 @@ jobs: core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || ''); core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || ''); - - name: Use Beta Toolchain - run: rustup default beta - - - name: Setup Cross - run: cargo install cross --git https://github.com/cross-rs/cross - - name: Make run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar env: diff --git a/.github/workflows/test.yaml b/.github/workflows/test.yaml index d79e2f597..81b75975c 100644 --- a/.github/workflows/test.yaml +++ b/.github/workflows/test.yaml @@ -27,11 +27,5 @@ jobs: with: node-version: ${{ env.NODEJS_VERSION }} - - name: Use Beta Toolchain - run: rustup default beta - - - name: Setup Cross - run: cargo install cross --git https://github.com/cross-rs/cross - - name: Build And Run Tests run: make test diff --git a/Makefile b/Makefile index b437810a9..def2fa8dd 100644 --- a/Makefile +++ b/Makefile @@ -160,7 +160,7 @@ results/$(REGISTRY_BASENAME).deb: dpkg-build.sh $(call ls-files,debian/start-reg tunnel-deb: results/$(TUNNEL_BASENAME).deb results/$(TUNNEL_BASENAME).deb: dpkg-build.sh $(call ls-files,debian/start-tunnel) $(TUNNEL_TARGETS) - PROJECT=start-tunnel PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=wireguard-tools,iptables ./build/os-compat/run-compat.sh ./dpkg-build.sh + PROJECT=start-tunnel PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=wireguard-tools,iptables,conntrack ./build/os-compat/run-compat.sh ./dpkg-build.sh $(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE) @@ -226,7 +226,7 @@ wormhole-squashfs: results/$(BASENAME).squashfs $(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}')) @echo "Paste the following command into the shell of your StartOS server:" @echo - @wormhole send results/$(BASENAME).squashfs 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo sh -c '"'"'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE) && /usr/lib/startos/scripts/prune-boot && cd /media/startos/images && wormhole receive --accept-file %s && CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/use-img ./$(BASENAME).squashfs'"'"'\n", $$3 }' + @wormhole send results/$(BASENAME).squashfs 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo sh -c '"'"'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE) && /usr/lib/startos/scripts/prune-boot && cd /media/startos/images && wormhole receive --accept-file %s && CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade ./$(BASENAME).squashfs'"'"'\n", $$3 }' update: $(STARTOS_TARGETS) @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @@ -254,7 +254,7 @@ update-squashfs: results/$(BASENAME).squashfs $(call ssh,'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE)') $(call ssh,'/usr/lib/startos/scripts/prune-boot') $(call cp,results/$(BASENAME).squashfs,/media/startos/images/next.rootfs) - $(call ssh,'sudo CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/use-img /media/startos/images/next.rootfs') + $(call ssh,'sudo CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade /media/startos/images/next.rootfs') emulate-reflash: $(STARTOS_TARGETS) @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi diff --git a/START-TUNNEL.md b/START-TUNNEL.md index 474e4d775..6d507f4de 100644 --- a/START-TUNNEL.md +++ b/START-TUNNEL.md @@ -14,6 +14,14 @@ Use it for private remote access to self-hosted services running on a personal s - **Forward Ports**: Forwarding a port creates a "reverse tunnel", exposing a specific port on a specific device to the public Internet. +## Features + +- **Create Subnets**: Each subnet creates a private, virtual local area network (VLAN), similar to the LAN created by a home router. + +- **Add Devices**: When you add a device (server, phone, laptop) to a subnet, it receives a LAN IP address on that subnet as well as a unique Wireguard config that must be copied, downloaded, or scanned into the device. + +- **Forward Ports**: Forwarding a port creates a "reverse tunnel", exposing a specific port on a specific device to the public Internet. + ## Installation 1. Rent a low cost VPS. For most use cases, the cheapest option should be enough. diff --git a/build/dpkg-deps/depends b/build/dpkg-deps/depends index 9703cc99e..00625f41b 100644 --- a/build/dpkg-deps/depends +++ b/build/dpkg-deps/depends @@ -7,6 +7,7 @@ bmon btrfs-progs ca-certificates cifs-utils +conntrack cryptsetup curl dmidecode @@ -19,7 +20,6 @@ flashrom fuse3 grub-common grub-efi -grub2-common htop httpdirfs iotop diff --git a/build/dpkg-deps/raspberrypi.depends b/build/dpkg-deps/raspberrypi.depends index 95673daea..64f847a4a 100644 --- a/build/dpkg-deps/raspberrypi.depends +++ b/build/dpkg-deps/raspberrypi.depends @@ -1,6 +1,5 @@ - grub-common - grub-efi -- grub2-common + parted + raspberrypi-net-mods + raspberrypi-sys-mods diff --git a/build/lib/scripts/chroot-and-upgrade b/build/lib/scripts/chroot-and-upgrade index 346596d13..69cd57202 100755 --- a/build/lib/scripts/chroot-and-upgrade +++ b/build/lib/scripts/chroot-and-upgrade @@ -10,24 +10,24 @@ fi POSITIONAL_ARGS=() while [[ $# -gt 0 ]]; do - case $1 in - --no-sync) - NO_SYNC=1 - shift - ;; - --create) - ONLY_CREATE=1 - shift - ;; - -*|--*) - echo "Unknown option $1" - exit 1 - ;; - *) - POSITIONAL_ARGS+=("$1") # save positional arg - shift # past argument - ;; - esac + case $1 in + --no-sync) + NO_SYNC=1 + shift + ;; + --create) + ONLY_CREATE=1 + shift + ;; + -*|--*) + echo "Unknown option $1" + exit 1 + ;; + *) + POSITIONAL_ARGS+=("$1") # save positional arg + shift # past argument + ;; + esac done set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters @@ -35,7 +35,7 @@ set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters if [ -z "$NO_SYNC" ]; then echo 'Syncing...' umount -R /media/startos/next 2> /dev/null - umount -R /media/startos/upper 2> /dev/null + umount /media/startos/upper 2> /dev/null rm -rf /media/startos/upper /media/startos/next mkdir /media/startos/upper mount -t tmpfs tmpfs /media/startos/upper @@ -43,8 +43,6 @@ if [ -z "$NO_SYNC" ]; then mount -t overlay \ -olowerdir=/media/startos/current,upperdir=/media/startos/upper/data,workdir=/media/startos/upper/work \ overlay /media/startos/next - mkdir -p /media/startos/next/media/startos/root - mount --bind /media/startos/root /media/startos/next/media/startos/root fi if [ -n "$ONLY_CREATE" ]; then @@ -56,12 +54,18 @@ mkdir -p /media/startos/next/dev mkdir -p /media/startos/next/sys mkdir -p /media/startos/next/proc mkdir -p /media/startos/next/boot +mkdir -p /media/startos/next/media/startos/root mount --bind /run /media/startos/next/run mount --bind /tmp /media/startos/next/tmp mount --bind /dev /media/startos/next/dev mount --bind /sys /media/startos/next/sys mount --bind /proc /media/startos/next/proc mount --bind /boot /media/startos/next/boot +mount --bind /media/startos/root /media/startos/next/media/startos/root + +if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then + mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars +fi if [ -z "$*" ]; then chroot /media/startos/next @@ -71,6 +75,10 @@ else CHROOT_RES=$? fi +if mountpoint /media/startos/next/sys/firmware/efi/efivars 2> /dev/null; then + umount /media/startos/next/sys/firmware/efi/efivars +fi + umount /media/startos/next/run umount /media/startos/next/tmp umount /media/startos/next/dev @@ -87,11 +95,12 @@ if [ "$CHROOT_RES" -eq 0 ]; then echo 'Upgrading...' + rm -f /media/startos/images/next.squashfs if ! time mksquashfs /media/startos/next /media/startos/images/next.squashfs -b 4096 -comp gzip; then - umount -R /media/startos/next - umount -R /media/startos/upper - rm -rf /media/startos/upper /media/startos/next - exit 1 + umount -l /media/startos/next + umount -l /media/startos/upper + rm -rf /media/startos/upper /media/startos/next + exit 1 fi hash=$(b3sum /media/startos/images/next.squashfs | head -c 32) mv /media/startos/images/next.squashfs /media/startos/images/${hash}.rootfs @@ -103,5 +112,5 @@ if [ "$CHROOT_RES" -eq 0 ]; then fi umount -R /media/startos/next -umount -R /media/startos/upper +umount /media/startos/upper rm -rf /media/startos/upper /media/startos/next \ No newline at end of file diff --git a/build/lib/scripts/forward-port b/build/lib/scripts/forward-port index 8152c6b7a..5d1e0ba45 100755 --- a/build/lib/scripts/forward-port +++ b/build/lib/scripts/forward-port @@ -5,34 +5,25 @@ if [ -z "$sip" ] || [ -z "$dip" ] || [ -z "$sport" ] || [ -z "$dport" ]; then exit 1 fi -# Helper function to check if a rule exists -nat_rule_exists() { +rule_exists() { iptables -t nat -C "$@" 2>/dev/null } -# Helper function to add or delete a rule idempotently -# Usage: apply_rule [add|del] -apply_nat_rule() { - local action="$1" - shift - - if [ "$action" = "add" ]; then - # Only add if rule doesn't exist - if ! rule_exists "$@"; then - iptables -t nat -A "$@" - fi - elif [ "$action" = "del" ]; then +apply_rule() { + if [ "$UNDO" = "1" ]; then if rule_exists "$@"; then iptables -t nat -D "$@" fi + else + if ! rule_exists "$@"; then + iptables -t nat -A "$@" + fi fi } -if [ "$UNDO" = 1 ]; then - action="del" -else - action="add" -fi +apply_rule PREROUTING -p tcp -d $sip --dport $sport -j DNAT --to-destination $dip:$dport +apply_rule OUTPUT -p tcp -d $sip --dport $sport -j DNAT --to-destination $dip:$dport -apply_nat_rule "$action" PREROUTING -p tcp -d $sip --dport $sport -j DNAT --to-destination $dip:$dport -apply_nat_rule "$action" OUTPUT -p tcp -d $sip --dport $sport -j DNAT --to-destination $dip:$dport \ No newline at end of file +if [ "$UNDO" = 1 ]; then + conntrack -D -p tcp -d $sip --dport $sport +fi \ No newline at end of file diff --git a/build/lib/scripts/upgrade b/build/lib/scripts/upgrade new file mode 100755 index 000000000..ee56e5c76 --- /dev/null +++ b/build/lib/scripts/upgrade @@ -0,0 +1,82 @@ +#!/bin/bash + +set -e + +SOURCE_DIR="$(dirname $(realpath "${BASH_SOURCE[0]}"))" + +if [ "$UID" -ne 0 ]; then + >&2 echo 'Must be run as root' + exit 1 +fi + +if ! [ -f "$1" ]; then + >&2 echo "usage: $0 " + exit 1 +fi + +echo 'Upgrading...' + +hash=$(b3sum $1 | head -c 32) +if [ -n "$2" ] && [ "$hash" != "$CHECKSUM" ]; then + >&2 echo 'Checksum mismatch' + exit 2 +fi + +unsquashfs -f -d / $1 boot + +umount -R /media/startos/next 2> /dev/null || true +umount /media/startos/upper 2> /dev/null || true +umount /media/startos/lower 2> /dev/null || true + +mkdir -p /media/startos/upper +mount -t tmpfs tmpfs /media/startos/upper +mkdir -p /media/startos/lower /media/startos/upper/data /media/startos/upper/work /media/startos/next +mount $1 /media/startos/lower +mount -t overlay \ + -olowerdir=/media/startos/lower,upperdir=/media/startos/upper/data,workdir=/media/startos/upper/work \ + overlay /media/startos/next + +mkdir -p /media/startos/next/run +mkdir -p /media/startos/next/dev +mkdir -p /media/startos/next/sys +mkdir -p /media/startos/next/proc +mkdir -p /media/startos/next/boot +mkdir -p /media/startos/next/media/startos/root +mount --bind /run /media/startos/next/run +mount --bind /tmp /media/startos/next/tmp +mount --bind /dev /media/startos/next/dev +mount --bind /sys /media/startos/next/sys +mount --bind /proc /media/startos/next/proc +mount --bind /boot /media/startos/next/boot +mount --bind /media/startos/root /media/startos/next/media/startos/root + +if mountpoint /boot/efi 2> /dev/null; then + mkdir -p /media/startos/next/boot/efi + mount --bind /boot/efi /media/startos/next/boot/efi +fi + +if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then + mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars +fi + +chroot /media/startos/next bash -e << "EOF" + +if dpkg -s grub-common 2>&1 > /dev/null; then + grub-install /dev/$(eval $(lsblk -o MOUNTPOINT,PKNAME -P | grep 'MOUNTPOINT="/media/startos/root"') && echo $PKNAME) + update-grub +fi + +EOF + +sync + +umount -R /media/startos/next +umount /media/startos/upper +umount /media/startos/lower + +mv $1 /media/startos/images/${hash}.rootfs +ln -rsf /media/startos/images/${hash}.rootfs /media/startos/config/current.rootfs + +sync + +echo 'System upgrade complete. Reboot to apply changes...' \ No newline at end of file diff --git a/build/lib/scripts/use-img b/build/lib/scripts/use-img deleted file mode 100755 index bbe530302..000000000 --- a/build/lib/scripts/use-img +++ /dev/null @@ -1,61 +0,0 @@ -#!/bin/bash - -set -e - -if [ "$UID" -ne 0 ]; then - >&2 echo 'Must be run as root' - exit 1 -fi - -if [ -z "$1" ]; then - >&2 echo "usage: $0 " - exit 1 -fi - -VERSION=$(unsquashfs -cat $1 /usr/lib/startos/VERSION.txt) -GIT_HASH=$(unsquashfs -cat $1 /usr/lib/startos/GIT_HASH.txt) -B3SUM=$(b3sum $1 | head -c 32) - -if [ -n "$CHECKSUM" ] && [ "$CHECKSUM" != "$B3SUM" ]; then - >&2 echo "CHECKSUM MISMATCH" - exit 2 -fi - -mv $1 /media/startos/images/${B3SUM}.rootfs -ln -rsf /media/startos/images/${B3SUM}.rootfs /media/startos/config/current.rootfs - -unsquashfs -n -f -d / /media/startos/images/${B3SUM}.rootfs boot - -umount -R /media/startos/next 2> /dev/null || true -umount -R /media/startos/lower 2> /dev/null || true -umount -R /media/startos/upper 2> /dev/null || true - -rm -rf /media/startos/lower /media/startos/upper /media/startos/next -mkdir /media/startos/upper -mount -t tmpfs tmpfs /media/startos/upper -mkdir -p /media/startos/lower /media/startos/upper/data /media/startos/upper/work /media/startos/next -mount /media/startos/images/${B3SUM}.rootfs /media/startos/lower -mount -t overlay \ - -olowerdir=/media/startos/lower,upperdir=/media/startos/upper/data,workdir=/media/startos/upper/work \ - overlay /media/startos/next -mkdir -p /media/startos/next/media/startos/root -mount --bind /media/startos/root /media/startos/next/media/startos/root -mkdir -p /media/startos/next/dev -mkdir -p /media/startos/next/sys -mkdir -p /media/startos/next/proc -mkdir -p /media/startos/next/boot -mount --bind /dev /media/startos/next/dev -mount --bind /sys /media/startos/next/sys -mount --bind /proc /media/startos/next/proc -mount --bind /boot /media/startos/next/boot - -chroot /media/startos/next update-grub2 - -umount -R /media/startos/next -umount -R /media/startos/upper -umount -R /media/startos/lower -rm -rf /media/startos/lower /media/startos/upper /media/startos/next - -sync - -reboot \ No newline at end of file diff --git a/container-runtime/package-lock.json b/container-runtime/package-lock.json index 8f2bc713f..756f31d5d 100644 --- a/container-runtime/package-lock.json +++ b/container-runtime/package-lock.json @@ -38,7 +38,7 @@ }, "../sdk/dist": { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.42", + "version": "0.4.0-beta.43", "license": "MIT", "dependencies": { "@iarna/toml": "^3.0.0", diff --git a/container-runtime/update-image.sh b/container-runtime/update-image.sh index 287117f46..069d6a07d 100755 --- a/container-runtime/update-image.sh +++ b/container-runtime/update-image.sh @@ -9,7 +9,7 @@ if [ "$ARCH" = "riscv64" ]; then RUST_ARCH="riscv64gc" fi -if mountpoint -q tmp/combined; then sudo umount -R tmp/combined; fi +if mountpoint -q tmp/combined; then sudo umount -l tmp/combined; fi if mountpoint -q tmp/lower; then sudo umount tmp/lower; fi sudo rm -rf tmp mkdir -p tmp/lower tmp/upper tmp/work tmp/combined diff --git a/core/Cargo.lock b/core/Cargo.lock index 6475294c7..ab19c8c97 100644 --- a/core/Cargo.lock +++ b/core/Cargo.lock @@ -3458,7 +3458,7 @@ dependencies = [ "lazy_async_pool", "models", "pin-project", - "rpc-toolkit", + "rpc-toolkit 0.3.2 (git+https://github.com/Start9Labs/rpc-toolkit.git?branch=master)", "serde", "serde_json", "tokio", @@ -4835,7 +4835,7 @@ dependencies = [ "rand 0.9.2", "regex", "reqwest", - "rpc-toolkit", + "rpc-toolkit 0.3.2 (git+https://github.com/Start9Labs/rpc-toolkit.git?branch=master)", "rustls 0.23.35", "serde", "serde_json", @@ -6744,6 +6744,34 @@ dependencies = [ "yajrc", ] +[[package]] +name = "rpc-toolkit" +version = "0.3.2" +source = "git+https://github.com/Start9Labs/rpc-toolkit.git?rev=068db90#068db905ee38a7da97cc4a43b806409204e73723" +dependencies = [ + "async-stream", + "async-trait", + "axum 0.8.6", + "clap", + "futures", + "http", + "http-body-util", + "imbl-value 0.4.3 (registry+https://github.com/rust-lang/crates.io-index)", + "itertools 0.14.0", + "lazy_format", + "lazy_static", + "openssl", + "pin-project", + "reqwest", + "serde", + "serde_json", + "thiserror 2.0.17", + "tokio", + "tokio-stream", + "url", + "yajrc", +] + [[package]] name = "rsa" version = "0.9.8" @@ -7880,7 +7908,7 @@ dependencies = [ [[package]] name = "start-os" -version = "0.4.0-alpha.12" +version = "0.4.0-alpha.13" dependencies = [ "aes 0.7.5", "arti-client", @@ -7888,7 +7916,6 @@ dependencies = [ "async-compression", "async-stream", "async-trait", - "aws-lc-sys", "axum 0.8.6", "backtrace-on-stack-overflow", "barrage", @@ -7981,7 +8008,7 @@ dependencies = [ "reqwest", "reqwest_cookie_store", "rpassword", - "rpc-toolkit", + "rpc-toolkit 0.3.2 (git+https://github.com/Start9Labs/rpc-toolkit.git?rev=068db90)", "rust-argon2", "safelog", "semver", diff --git a/core/build-cli.sh b/core/build-cli.sh index e575b8be7..4cff77981 100755 --- a/core/build-cli.sh +++ b/core/build-cli.sh @@ -2,12 +2,19 @@ cd "$(dirname "${BASH_SOURCE[0]}")" +source ./builder-alias.sh + set -ea shopt -s expand_aliases PROFILE=${PROFILE:-release} if [ "${PROFILE}" = "release" ]; then BUILD_FLAGS="--release" +else + if [ "$PROFILE" != "debug"]; then + >&2 echo "Unknonw profile $PROFILE: falling back to debug..." + PROFILE=debug + fi fi if [ -z "${ARCH:-}" ]; then @@ -18,15 +25,20 @@ if [ "$ARCH" = "arm64" ]; then ARCH="aarch64" fi +RUST_ARCH="$ARCH" +if [ "$ARCH" = "riscv64" ]; then + RUST_ARCH="riscv64gc" +fi + if [ -z "${KERNEL_NAME:-}" ]; then KERNEL_NAME=$(uname -s) fi if [ -z "${TARGET:-}" ]; then if [ "$KERNEL_NAME" = "Linux" ]; then - TARGET="$ARCH-unknown-linux-musl" + TARGET="$RUST_ARCH-unknown-linux-musl" elif [ "$KERNEL_NAME" = "Darwin" ]; then - TARGET="$ARCH-apple-darwin" + TARGET="$RUST_ARCH-apple-darwin" else >&2 echo "unknown kernel $KERNEL_NAME" exit 1 @@ -53,4 +65,7 @@ fi echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" -cross build --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features $FEATURE_ARGS --locked --bin start-cli --target=$TARGET \ No newline at end of file +rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features $FEATURE_ARGS --locked --bin start-cli --target=$TARGET +if [ "$(ls -nd "core/target/$TARGET/$PROFILE/start-cli" | awk '{ print $3 }')" != "$UID" ]; then + rust-zig-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo" +fi \ No newline at end of file diff --git a/core/build-containerbox.sh b/core/build-containerbox.sh index efa0f6fce..46c023d4c 100755 --- a/core/build-containerbox.sh +++ b/core/build-containerbox.sh @@ -2,12 +2,19 @@ cd "$(dirname "${BASH_SOURCE[0]}")" +source ./builder-alias.sh + set -ea shopt -s expand_aliases PROFILE=${PROFILE:-release} if [ "${PROFILE}" = "release" ]; then BUILD_FLAGS="--release" +else + if [ "$PROFILE" != "debug"]; then + >&2 echo "Unknonw profile $PROFILE: falling back to debug..." + PROFILE=debug + fi fi if [ -z "$ARCH" ]; then @@ -33,4 +40,7 @@ fi echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" -cross build --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-container,$FEATURES --locked --bin containerbox --target=$RUST_ARCH-unknown-linux-musl \ No newline at end of file +rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-container,$FEATURES --locked --bin containerbox --target=$RUST_ARCH-unknown-linux-musl +if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/containerbox" | awk '{ print $3 }')" != "$UID" ]; then + rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo" +fi \ No newline at end of file diff --git a/core/build-registrybox.sh b/core/build-registrybox.sh index ec6630d0f..ad1c6dce4 100755 --- a/core/build-registrybox.sh +++ b/core/build-registrybox.sh @@ -2,12 +2,19 @@ cd "$(dirname "${BASH_SOURCE[0]}")" +source ./builder-alias.sh + set -ea shopt -s expand_aliases PROFILE=${PROFILE:-release} if [ "${PROFILE}" = "release" ]; then BUILD_FLAGS="--release" +else + if [ "$PROFILE" != "debug"]; then + >&2 echo "Unknonw profile $PROFILE: falling back to debug..." + PROFILE=debug + fi fi if [ -z "$ARCH" ]; then @@ -33,4 +40,7 @@ fi echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" -cross build --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-registry,registry,$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl +rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-registry,registry,$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl +if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/registrybox" | awk '{ print $3 }')" != "$UID" ]; then + rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo" +fi \ No newline at end of file diff --git a/core/build-startbox.sh b/core/build-startbox.sh index 655566879..c637c1d97 100755 --- a/core/build-startbox.sh +++ b/core/build-startbox.sh @@ -2,12 +2,19 @@ cd "$(dirname "${BASH_SOURCE[0]}")" +source ./builder-alias.sh + set -ea shopt -s expand_aliases PROFILE=${PROFILE:-release} if [ "${PROFILE}" = "release" ]; then BUILD_FLAGS="--release" +else + if [ "$PROFILE" != "debug"]; then + >&2 echo "Unknonw profile $PROFILE: falling back to debug..." + PROFILE=debug + fi fi if [ -z "$ARCH" ]; then @@ -33,4 +40,7 @@ fi echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" -cross build --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli,startd,$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl +rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli,startd,$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl +if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/startbox" | awk '{ print $3 }')" != "$UID" ]; then + rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo" +fi \ No newline at end of file diff --git a/core/build-ts.sh b/core/build-ts.sh index e29a43a66..757ed6991 100755 --- a/core/build-ts.sh +++ b/core/build-ts.sh @@ -2,12 +2,19 @@ cd "$(dirname "${BASH_SOURCE[0]}")" +source ./builder-alias.sh + set -ea shopt -s expand_aliases PROFILE=${PROFILE:-release} if [ "${PROFILE}" = "release" ]; then BUILD_FLAGS="--release" +else + if [ "$PROFILE" != "debug"]; then + >&2 echo "Unknonw profile $PROFILE: falling back to debug..." + PROFILE=debug + fi fi if [ -z "$ARCH" ]; then @@ -31,4 +38,7 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then fi echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" -cross test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features test,$FEATURES --locked 'export_bindings_' +rust-zig-builder cargo test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features test,$FEATURES --locked 'export_bindings_' +if [ "$(ls -nd "core/startos/bindings" | awk '{ print $3 }')" != "$UID" ]; then + rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID core/startos/bindings && chown -R $UID:$UID /root/.cargo" +fi \ No newline at end of file diff --git a/core/build-tunnelbox.sh b/core/build-tunnelbox.sh index da662c9b7..1a6516388 100755 --- a/core/build-tunnelbox.sh +++ b/core/build-tunnelbox.sh @@ -2,12 +2,19 @@ cd "$(dirname "${BASH_SOURCE[0]}")" +source ./builder-alias.sh + set -ea shopt -s expand_aliases PROFILE=${PROFILE:-release} if [ "${PROFILE}" = "release" ]; then BUILD_FLAGS="--release" +else + if [ "$PROFILE" != "debug"]; then + >&2 echo "Unknonw profile $PROFILE: falling back to debug..." + PROFILE=debug + fi fi if [ -z "$ARCH" ]; then @@ -33,4 +40,7 @@ fi echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" -cross build --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-tunnel,tunnel,$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl +rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-tunnel,tunnel,$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl +if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/tunnelbox" | awk '{ print $3 }')" != "$UID" ]; then + rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo" +fi \ No newline at end of file diff --git a/core/builder-alias.sh b/core/builder-alias.sh index fa8d545d6..e940efd76 100644 --- a/core/builder-alias.sh +++ b/core/builder-alias.sh @@ -1,3 +1,8 @@ #!/bin/bash -alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -e SCCACHE_GHA_ENABLED -e SCCACHE_GHA_VERSION -e ACTIONS_RESULTS_URL -e ACTIONS_RUNTIME_TOKEN -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$HOME/.cache/sccache":/root/.cache/sccache -v "$(pwd)":/home/rust/src -w /home/rust/src -P start9/rust-musl-cross:$ARCH-musl' +USE_TTY= +if tty -s; then + USE_TTY="-it" +fi + +alias 'rust-zig-builder'='docker run '"$USE_TTY"' --rm -e "RUSTFLAGS=$RUSTFLAGS" -e "CFLAGS=-D_FORTIFY_SOURCE=2" -e "CXXFLAGS=-D_FORTIFY_SOURCE=2" -e SCCACHE_GHA_ENABLED -e SCCACHE_GHA_VERSION -e ACTIONS_RESULTS_URL -e ACTIONS_RUNTIME_TOKEN -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$HOME/.cache/sccache":/root/.cache/sccache -v "$(pwd)":/workdir -w /workdir -P start9/cargo-zigbuild' diff --git a/core/run-tests.sh b/core/run-tests.sh index 8798b29b8..9273bc066 100755 --- a/core/run-tests.sh +++ b/core/run-tests.sh @@ -2,12 +2,19 @@ cd "$(dirname "${BASH_SOURCE[0]}")" +source ./builder-alias.sh + set -ea shopt -s expand_aliases PROFILE=${PROFILE:-release} if [ "${PROFILE}" = "release" ]; then BUILD_FLAGS="--release" +else + if [ "$PROFILE" != "debug"]; then + >&2 echo "Unknonw profile $PROFILE: falling back to debug..." + PROFILE=debug + fi fi if [ -z "$ARCH" ]; then @@ -31,8 +38,8 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then RUSTFLAGS="--cfg tokio_unstable" fi -source ./core/builder-alias.sh echo "FEATURES=\"$FEATURES\"" echo "RUSTFLAGS=\"$RUSTFLAGS\"" -cross test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=test,$FEATURES --workspace --locked --target=$ARCH-unknown-linux-musl -- --skip export_bindings_ \ No newline at end of file +rust-zig-builder cargo test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=test,$FEATURES --workspace --locked -- --skip export_bindings_ +rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo" \ No newline at end of file diff --git a/core/startos/Cargo.toml b/core/startos/Cargo.toml index c3ccdbb9d..4fd67cebf 100644 --- a/core/startos/Cargo.toml +++ b/core/startos/Cargo.toml @@ -15,7 +15,7 @@ license = "MIT" name = "start-os" readme = "README.md" repository = "https://github.com/Start9Labs/start-os" -version = "0.4.0-alpha.12" # VERSION_BUMP +version = "0.4.0-alpha.13" # VERSION_BUMP [lib] name = "startos" @@ -93,7 +93,6 @@ async-compression = { version = "0.4.32", features = [ ] } async-stream = "0.3.5" async-trait = "0.1.74" -aws-lc-sys = { version = "0.32", features = ["bindgen"] } axum = { version = "0.8.4", features = ["ws"] } backtrace-on-stack-overflow = { version = "0.3.0", optional = true } barrage = "0.2.3" @@ -223,7 +222,7 @@ regex = "1.10.2" reqwest = { version = "0.12.4", features = ["json", "socks", "stream"] } reqwest_cookie_store = "0.8.0" rpassword = "7.2.0" -rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "master" } +rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", rev = "068db90" } rust-argon2 = "2.0.0" safelog = { version = "0.4.8", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true } semver = { version = "1.0.20", features = ["serde"] } @@ -252,7 +251,7 @@ termion = "4.0.5" textwrap = "0.16.1" thiserror = "2.0.12" tokio = { version = "1.38.1", features = ["full"] } -tokio-rustls = "0.26.0" +tokio-rustls = "0.26.4" tokio-stream = { version = "0.1.14", features = ["io-util", "net", "sync"] } tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" } tokio-tungstenite = { version = "0.26.2", features = ["native-tls", "url"] } diff --git a/core/startos/src/bins/tunnel.rs b/core/startos/src/bins/tunnel.rs index 3a2e2337a..66921c28f 100644 --- a/core/startos/src/bins/tunnel.rs +++ b/core/startos/src/bins/tunnel.rs @@ -22,7 +22,7 @@ use crate::tunnel::tunnel_router; use crate::tunnel::web::TunnelCertHandler; use crate::util::logger::LOGGER; -#[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)] enum WebserverListener { Http, Https(SocketAddr), diff --git a/core/startos/src/disk/mount/util.rs b/core/startos/src/disk/mount/util.rs index 292345a59..1e0e84952 100644 --- a/core/startos/src/disk/mount/util.rs +++ b/core/startos/src/disk/mount/util.rs @@ -48,7 +48,6 @@ pub async fn bind, P1: AsRef>( pub async fn unmount>(mountpoint: P, lazy: bool) -> Result<(), Error> { tracing::debug!("Unmounting {}.", mountpoint.as_ref().display()); let mut cmd = tokio::process::Command::new("umount"); - cmd.arg("-R"); if lazy { cmd.arg("-l"); } diff --git a/core/startos/src/disk/util.rs b/core/startos/src/disk/util.rs index a6b5bea39..596b31f2b 100644 --- a/core/startos/src/disk/util.rs +++ b/core/startos/src/disk/util.rs @@ -280,6 +280,9 @@ pub async fn list(os: &OsPartitionInfo) -> Result, Error> { .try_fold( BTreeMap::::new(), |mut disks, dir_entry| async move { + if dir_entry.file_type().await?.is_dir() { + return Ok(disks); + } if let Some(disk_path) = dir_entry.path().file_name().and_then(|s| s.to_str()) { let (disk_path, part_path) = if let Some(end) = PARTITION_REGEX.find(disk_path) { ( diff --git a/core/startos/src/net/acme.rs b/core/startos/src/net/acme.rs index 2d562b0df..857ec3e65 100644 --- a/core/startos/src/net/acme.rs +++ b/core/startos/src/net/acme.rs @@ -26,6 +26,7 @@ use crate::context::{CliContext, RpcContext}; use crate::db::model::Database; use crate::db::model::public::AcmeSettings; use crate::db::{DbAccess, DbAccessByKey, DbAccessMut}; +use crate::net::ssl::should_use_cert; use crate::net::tls::{SingleCertResolver, TlsHandler}; use crate::net::web_server::Accept; use crate::prelude::*; @@ -63,20 +64,27 @@ where .and_then(|p| p.as_idx(JsonKey::new_ref(san_info))) { let cert = cert.de().log_err()?; - return Some( - CertifiedKey::from_der( - cert.fullchain - .into_iter() - .map(|c| Ok(CertificateDer::from(c.to_der()?))) - .collect::>() - .log_err()?, - PrivateKeyDer::from(PrivatePkcs8KeyDer::from( - cert.key.0.private_key_to_pkcs8().log_err()?, - )), - &*self.crypto_provider, - ) - .log_err()?, - ); + if cert + .fullchain + .get(0) + .and_then(|c| should_use_cert(&c.0).log_err()) + .unwrap_or(false) + { + return Some( + CertifiedKey::from_der( + cert.fullchain + .into_iter() + .map(|c| Ok(CertificateDer::from(c.to_der()?))) + .collect::>() + .log_err()?, + PrivateKeyDer::from(PrivatePkcs8KeyDer::from( + cert.key.0.private_key_to_pkcs8().log_err()?, + )), + &*self.crypto_provider, + ) + .log_err()?, + ); + } } if !self.in_progress.send_if_modified(|x| { @@ -307,6 +315,16 @@ where return Ok(None); }; let cert = cert.de()?; + if !cert + .fullchain + .get(0) + .map(|c| should_use_cert(&c.0)) + .transpose() + .map_err(Error::from)? + .unwrap_or(false) + { + return Ok(None); + } Ok(Some(( String::from_utf8( cert.key diff --git a/core/startos/src/net/forward.rs b/core/startos/src/net/forward.rs index d02c8d1af..a46e02c7a 100644 --- a/core/startos/src/net/forward.rs +++ b/core/startos/src/net/forward.rs @@ -437,7 +437,8 @@ impl InterfaceForwardState { for mut entry in self.state.iter_mut() { entry.gc(ip_info, &self.port_forward).await?; } - Ok(()) + + self.port_forward.gc().await } } @@ -537,7 +538,6 @@ impl InterfacePortForwardController { _ = ip_info.changed() => { interfaces = ip_info.read(); state.sync(&interfaces).await.log_err(); - state.port_forward.gc().await.log_err(); } } } diff --git a/core/startos/src/net/gateway.rs b/core/startos/src/net/gateway.rs index 2db91cd70..c2479eff1 100644 --- a/core/startos/src/net/gateway.rs +++ b/core/startos/src/net/gateway.rs @@ -1,5 +1,6 @@ use std::any::Any; use std::collections::{BTreeMap, BTreeSet, HashMap}; +use std::fmt; use std::future::Future; use std::net::{IpAddr, Ipv4Addr, SocketAddr, SocketAddrV6}; use std::sync::{Arc, Weak}; @@ -130,7 +131,6 @@ async fn list_interfaces( } #[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)] -#[ts(export)] struct NetworkInterfaceSetPublicParams { gateway: GatewayId, public: Option, @@ -147,7 +147,6 @@ async fn set_public( } #[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)] -#[ts(export)] struct UnsetPublicParams { gateway: GatewayId, } @@ -163,7 +162,6 @@ async fn unset_public( } #[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)] -#[ts(export)] struct ForgetGatewayParams { gateway: GatewayId, } @@ -176,7 +174,6 @@ async fn forget_iface( } #[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)] -#[ts(export)] struct RenameGatewayParams { id: GatewayId, name: InternedString, @@ -404,6 +401,12 @@ async fn watcher( ) { loop { let res: Result<(), Error> = async { + Command::new("systemctl") + .arg("start") + .arg("NetworkManager") + .invoke(ErrorKind::Network) + .await?; + let connection = Connection::system().await?; let netman_proxy = NetworkManagerProxy::new(&connection).await?; @@ -436,6 +439,11 @@ async fn watcher( until .run(async { let devices = netman_proxy.all_devices().await?; + ensure_code!( + !devices.is_empty(), + ErrorKind::Network, + "NetworkManager returned no devices. Trying again..." + ); let mut ifaces = BTreeSet::new(); let mut jobs = Vec::new(); for device in devices { @@ -1538,6 +1546,14 @@ pub struct NetworkInterfaceListenerAcceptMetadata { pub inner: ::Metadata, pub info: GatewayInfo, } +impl fmt::Debug for NetworkInterfaceListenerAcceptMetadata { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("NetworkInterfaceListenerAcceptMetadata") + .field("inner", &self.inner) + .field("info", &self.info) + .finish() + } +} impl Clone for NetworkInterfaceListenerAcceptMetadata where ::Metadata: Clone, @@ -1614,3 +1630,39 @@ where Self::new(Some(Either::Left(listener))) } } + +#[test] +fn test_filter() { + use crate::net::host::binding::NetInfo; + let wg1 = "wg1".parse::().unwrap(); + assert!(!InterfaceFilter::filter( + &AndFilter( + NetInfo { + private_disabled: [wg1.clone()].into_iter().collect(), + public_enabled: Default::default(), + assigned_port: None, + assigned_ssl_port: None, + }, + AndFilter(IdFilter(wg1.clone()), PublicFilter { public: false }), + ) + .into_dyn(), + &wg1, + &NetworkInterfaceInfo { + name: None, + public: None, + secure: None, + ip_info: Some(Arc::new(IpInfo { + name: "".into(), + scope_id: 3, + device_type: Some(NetworkInterfaceType::Wireguard), + subnets: ["10.59.0.2/24".parse::().unwrap()] + .into_iter() + .collect(), + lan_ip: Default::default(), + wan_ip: None, + ntp_servers: Default::default(), + dns_servers: Default::default(), + })), + }, + )); +} diff --git a/core/startos/src/net/ssl.rs b/core/startos/src/net/ssl.rs index aa34bab72..d3a5f8d1c 100644 --- a/core/startos/src/net/ssl.rs +++ b/core/startos/src/net/ssl.rs @@ -19,7 +19,7 @@ use openssl::x509::extension::{ AuthorityKeyIdentifier, BasicConstraints, KeyUsage, SubjectAlternativeName, SubjectKeyIdentifier, }; -use openssl::x509::{X509, X509Builder, X509NameBuilder}; +use openssl::x509::{X509, X509Builder, X509NameBuilder, X509Ref}; use openssl::*; use patch_db::HasModel; use serde::{Deserialize, Serialize}; @@ -48,6 +48,17 @@ pub fn gen_nistp256() -> Result, ErrorStack> { )?)?) } +pub fn should_use_cert(cert: &X509Ref) -> Result { + Ok(cert + .not_before() + .compare(Asn1Time::days_from_now(0)?.as_ref())? + == Ordering::Less + && cert + .not_after() + .compare(Asn1Time::days_from_now(30)?.as_ref())? + == Ordering::Greater) +} + #[derive(Debug, Deserialize, Serialize, HasModel)] #[model = "Model"] #[serde(rename_all = "camelCase")] @@ -83,30 +94,8 @@ impl Model { .map(|m| m.de()) .transpose()? { - if cert_data - .certs - .ed25519 - .not_before() - .compare(Asn1Time::days_from_now(0)?.as_ref())? - == Ordering::Less - && cert_data - .certs - .ed25519 - .not_after() - .compare(Asn1Time::days_from_now(30)?.as_ref())? - == Ordering::Greater - && cert_data - .certs - .nistp256 - .not_before() - .compare(Asn1Time::days_from_now(0)?.as_ref())? - == Ordering::Less - && cert_data - .certs - .nistp256 - .not_after() - .compare(Asn1Time::days_from_now(30)?.as_ref())? - == Ordering::Greater + if should_use_cert(&cert_data.certs.ed25519)? + && should_use_cert(&cert_data.certs.nistp256)? { return Ok(FullchainCertData { root: self.as_root_cert().de()?.0, diff --git a/core/startos/src/net/tor/ctor.rs b/core/startos/src/net/tor/ctor.rs index 83011bd36..e956e551e 100644 --- a/core/startos/src/net/tor/ctor.rs +++ b/core/startos/src/net/tor/ctor.rs @@ -889,7 +889,8 @@ async fn torctl( } } } - Err(Error::new(eyre!("Log stream terminated"), ErrorKind::Tor)) + // Err(Error::new(eyre!("Log stream terminated"), ErrorKind::Tor)) + Ok(()) }; let health_checker = async { let mut last_success = Instant::now(); diff --git a/core/startos/src/net/vhost.rs b/core/startos/src/net/vhost.rs index 80c3325fd..7447b8b76 100644 --- a/core/startos/src/net/vhost.rs +++ b/core/startos/src/net/vhost.rs @@ -1,5 +1,6 @@ use std::any::Any; use std::collections::{BTreeMap, BTreeSet}; +use std::fmt; use std::net::{IpAddr, SocketAddr}; use std::sync::{Arc, Weak}; use std::task::{Poll, ready}; @@ -41,6 +42,7 @@ use crate::net::tls::{ use crate::net::web_server::{Accept, AcceptStream, ExtractVisitor, TcpMetadata, extract}; use crate::prelude::*; use crate::util::collections::EqSet; +use crate::util::future::WeakFuture; use crate::util::serde::{HandlerExtSerde, MaybeUtf8String, display_serializable}; use crate::util::sync::{SyncMutex, Watch}; @@ -134,7 +136,6 @@ impl VHostController { pub fn dump_table( &self, ) -> BTreeMap, BTreeMap>, EqSet>> { - let ip_info = self.interfaces.watcher.ip_info(); self.servers.peek(|s| { s.iter() .map(|(k, v)| { @@ -187,7 +188,7 @@ pub trait VHostTarget: std::fmt::Debug + Eq { hello: &'a ClientHello<'a>, metadata: &'a ::Metadata, ) -> impl Future> + Send + 'a; - fn handle_stream(&self, stream: AcceptStream, prev: Self::PreprocessRes); + fn handle_stream(&self, stream: AcceptStream, prev: Self::PreprocessRes, rc: Weak<()>); } pub trait DynVHostTargetT: std::fmt::Debug + Any { @@ -199,7 +200,7 @@ pub trait DynVHostTargetT: std::fmt::Debug + Any { hello: &'a ClientHello<'a>, metadata: &'a ::Metadata, ) -> BoxFuture<'a, Option<(ServerConfig, Box)>>; - fn handle_stream(&self, stream: AcceptStream, prev: Box); + fn handle_stream(&self, stream: AcceptStream, prev: Box, rc: Weak<()>); fn eq(&self, other: &dyn DynVHostTargetT) -> bool; } impl + 'static> DynVHostTargetT for T { @@ -219,9 +220,9 @@ impl + 'static> DynVHostTargetT for T { .map(|o| o.map(|(cfg, res)| (cfg, Box::new(res) as Box))) .boxed() } - fn handle_stream(&self, stream: AcceptStream, prev: Box) { + fn handle_stream(&self, stream: AcceptStream, prev: Box, rc: Weak<()>) { if let Ok(prev) = prev.downcast() { - VHostTarget::handle_stream(self, stream, *prev); + VHostTarget::handle_stream(self, stream, *prev, rc); } } fn eq(&self, other: &dyn DynVHostTargetT) -> bool { @@ -251,21 +252,27 @@ impl PartialEq for DynVHostTarget { } } impl Eq for DynVHostTarget {} -struct Preprocessed(DynVHostTarget, Box); +struct Preprocessed(DynVHostTarget, Weak<()>, Box); +impl fmt::Debug for Preprocessed { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + (self.0).0.fmt(f) + } +} impl DynVHostTarget { async fn into_preprocessed( self, + rc: Weak<()>, prev: ServerConfig, hello: &ClientHello<'_>, metadata: &::Metadata, ) -> Option<(ServerConfig, Preprocessed)> { let (cfg, res) = self.0.preprocess(prev, hello, metadata).await?; - Some((cfg, Preprocessed(self, res))) + Some((cfg, Preprocessed(self, rc, res))) } } impl Preprocessed { fn finish(self, stream: AcceptStream) { - (self.0).0.handle_stream(stream, self.1); + (self.0).0.handle_stream(stream, self.2, self.1); } } @@ -279,6 +286,7 @@ pub struct ProxyTarget { impl PartialEq for ProxyTarget { fn eq(&self, other: &Self) -> bool { self.filter == other.filter + && self.acme == other.acme && self.addr == other.addr && self.connect_ssl.as_ref().map(Arc::as_ptr) == other.connect_ssl.as_ref().map(Arc::as_ptr) @@ -294,6 +302,9 @@ where type PreprocessRes = AcceptStream; fn filter(&self, metadata: &::Metadata) -> bool { let info = extract::(metadata); + if info.is_none() { + tracing::warn!("No GatewayInfo on metadata"); + } info.as_ref() .map_or(true, |i| self.filter.filter(&i.id, &i.info)) } @@ -304,7 +315,7 @@ where &'a self, mut prev: ServerConfig, hello: &'a ClientHello<'a>, - metadata: &'a ::Metadata, + _: &'a ::Metadata, ) -> Option<(ServerConfig, Self::PreprocessRes)> { let tcp_stream = TcpStream::connect(self.addr) .await @@ -345,8 +356,10 @@ where } Some((prev, Box::pin(tcp_stream))) } - fn handle_stream(&self, mut stream: AcceptStream, mut prev: Self::PreprocessRes) { - tokio::spawn(async move { tokio::io::copy_bidirectional(&mut stream, &mut prev).await }); + fn handle_stream(&self, mut stream: AcceptStream, mut prev: Self::PreprocessRes, rc: Weak<()>) { + tokio::spawn(async move { + WeakFuture::new(rc, tokio::io::copy_bidirectional(&mut stream, &mut prev)).await + }); } } @@ -436,16 +449,16 @@ where return Some(prev); } - let target = self.0.peek(|m| { + let (target, rc) = self.0.peek(|m| { m.get(&hello.server_name().map(InternedString::from)) .into_iter() .flatten() .filter(|(_, rc)| rc.strong_count() > 0) .find(|(t, _)| t.0.filter(metadata)) - .map(|(e, _)| e.clone()) + .map(|(t, rc)| (t.clone(), rc.clone())) })?; - let (prev, store) = target.into_preprocessed(prev, hello, metadata).await?; + let (prev, store) = target.into_preprocessed(rc, prev, hello, metadata).await?; self.1 = Some(store); @@ -480,6 +493,14 @@ struct VHostListenerMetadata { inner: TlsMetadata, preprocessed: Preprocessed, } +impl fmt::Debug for VHostListenerMetadata { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_struct("VHostListenerMetadata") + .field("inner", &self.inner) + .field("preprocessed", &self.preprocessed) + .finish() + } +} impl Accept for VHostListener where for<'a> M: HasModel> @@ -637,6 +658,7 @@ impl VHostServer { changed = true; Arc::new(()) }; + targets.retain(|_, rc| rc.strong_count() > 0); targets.insert(target, Arc::downgrade(&rc)); writable.insert(hostname, targets); res = Ok(rc); diff --git a/core/startos/src/net/web_server.rs b/core/startos/src/net/web_server.rs index a03365417..c1844ec44 100644 --- a/core/startos/src/net/web_server.rs +++ b/core/startos/src/net/web_server.rs @@ -1,3 +1,4 @@ +use core::fmt; use std::any::Any; use std::collections::BTreeMap; use std::future::Future; @@ -68,7 +69,7 @@ pub fn extract< metadata: &M, ) -> Option { let mut visitor = ExtractVisitor(None); - visitor.visit(metadata); + metadata.visit(&mut visitor); visitor.0 } @@ -84,7 +85,7 @@ impl Visit for TcpMetadata { } pub trait Accept { - type Metadata; + type Metadata: fmt::Debug; fn poll_accept( &mut self, cx: &mut std::task::Context<'_>, @@ -144,7 +145,7 @@ where } } -#[derive(Clone, VisitFields)] +#[derive(Debug, Clone, VisitFields)] pub struct MapListenerMetadata { pub inner: M, pub key: K, @@ -162,7 +163,7 @@ where impl Accept for BTreeMap where - K: Clone, + K: Clone + fmt::Debug, A: Accept, { type Metadata = MapListenerMetadata; @@ -218,40 +219,38 @@ trait DynAcceptT: Send + Sync { fn poll_accept( &mut self, cx: &mut std::task::Context<'_>, - ) -> Poll< - Result< - ( - Box Visit> + Send + Sync>, - AcceptStream, - ), - Error, - >, - >; + ) -> Poll>; } impl DynAcceptT for A where A: Accept + Send + Sync, - for<'a> ::Metadata: Visit> + Send + Sync + 'static, + ::Metadata: DynMetadataT + 'static, { fn poll_accept( &mut self, cx: &mut std::task::Context<'_>, - ) -> Poll< - Result< - ( - Box Visit> + Send + Sync>, - AcceptStream, - ), - Error, - >, - > { + ) -> Poll> { let (metadata, stream) = ready!(Accept::poll_accept(self, cx)?); - Poll::Ready(Ok((Box::new(metadata), stream))) + Poll::Ready(Ok((DynMetadata(Box::new(metadata)), stream))) } } pub struct DynAccept(Box); +trait DynMetadataT: for<'a> Visit> + fmt::Debug + Send + Sync {} +impl DynMetadataT for T where for<'a> T: Visit> + fmt::Debug + Send + Sync {} + +#[derive(Debug)] +pub struct DynMetadata(Box); +impl<'a> Visit> for DynMetadata { + fn visit( + &self, + visitor: &mut ExtensionVisitor<'a>, + ) -> as Visitor>::Result { + self.0.visit(visitor) + } +} + impl Accept for DynAccept { - type Metadata = Box Visit> + Send + Sync>; + type Metadata = DynMetadata; fn poll_accept( &mut self, cx: &mut std::task::Context<'_>, @@ -325,7 +324,7 @@ impl Acceptor> { } impl Acceptor> where - K: Ord + Clone + Send + Sync + 'static, + K: Ord + Clone + fmt::Debug + Send + Sync + 'static, { pub async fn bind_map( listen: impl IntoIterator, @@ -347,7 +346,7 @@ where } impl Acceptor> where - K: Ord + Clone + Send + Sync + 'static, + K: Ord + Clone + fmt::Debug + Send + Sync + 'static, { pub async fn bind_map_dyn( listen: impl IntoIterator, diff --git a/core/startos/src/os_install/mod.rs b/core/startos/src/os_install/mod.rs index 7b5609b6a..892cc52ae 100644 --- a/core/startos/src/os_install/mod.rs +++ b/core/startos/src/os_install/mod.rs @@ -356,7 +356,10 @@ pub async fn execute( let mut install = Command::new("chroot"); install.arg(overlay.path()).arg("grub-install"); if tokio::fs::metadata("/sys/firmware/efi").await.is_err() { - install.arg("--target=i386-pc"); + match ARCH { + "x86_64" => install.arg("--target=i386-pc"), + _ => &mut install, + }; } else { match ARCH { "x86_64" => install.arg("--target=x86_64-efi"), @@ -372,7 +375,7 @@ pub async fn execute( Command::new("chroot") .arg(overlay.path()) - .arg("update-grub2") + .arg("update-grub") .invoke(crate::ErrorKind::Grub) .await?; dev.unmount(false).await?; diff --git a/core/startos/src/service/effects/subcontainer/sync.rs b/core/startos/src/service/effects/subcontainer/sync.rs index 16ffca3cd..6e4930c40 100644 --- a/core/startos/src/service/effects/subcontainer/sync.rs +++ b/core/startos/src/service/effects/subcontainer/sync.rs @@ -150,31 +150,39 @@ impl ExecParams { cmd.env(k, v); } - if let Some(uid) = user.as_deref().and_then(|u| u.parse::().ok()) { - cmd.uid(uid); - } else if let Some(user) = user { - let passwd = std::fs::read_to_string("/etc/passwd") - .with_ctx(|_| (ErrorKind::Filesystem, "read /etc/passwd")); - if passwd.is_err() && user == "root" { - cmd.uid(0); - cmd.gid(0); + if let Some((uid, gid)) = + if let Some(uid) = user.as_deref().and_then(|u| u.parse::().ok()) { + Some((uid, uid)) + } else if let Some(user) = user { + let passwd = std::fs::read_to_string("/etc/passwd") + .with_ctx(|_| (ErrorKind::Filesystem, "read /etc/passwd")); + Some(if passwd.is_err() && user == "root" { + (0, 0) + } else { + let (uid, gid) = passwd? + .lines() + .find_map(|l| { + let mut split = l.trim().split(":"); + if user != split.next()? { + return None; + } + split.next(); // throw away x + Some((split.next()?.parse().ok()?, split.next()?.parse().ok()?)) + // uid gid + }) + .or_not_found(lazy_format!("{user} in /etc/passwd"))?; + (uid, gid) + }) } else { - let (uid, gid) = passwd? - .lines() - .find_map(|l| { - let mut split = l.trim().split(":"); - if user != split.next()? { - return None; - } - split.next(); // throw away x - Some((split.next()?.parse().ok()?, split.next()?.parse().ok()?)) - // uid gid - }) - .or_not_found(lazy_format!("{user} in /etc/passwd"))?; - cmd.uid(uid); - cmd.gid(gid); + None } - }; + { + std::os::unix::fs::chown("/proc/self/fd/0", Some(uid), Some(gid)).log_err(); + std::os::unix::fs::chown("/proc/self/fd/1", Some(uid), Some(gid)).log_err(); + std::os::unix::fs::chown("/proc/self/fd/2", Some(uid), Some(gid)).log_err(); + cmd.uid(uid); + cmd.gid(gid); + } if let Some(workdir) = workdir { cmd.current_dir(workdir); } else { diff --git a/core/startos/src/service/mod.rs b/core/startos/src/service/mod.rs index b4c895ee0..9f8621d2a 100644 --- a/core/startos/src/service/mod.rs +++ b/core/startos/src/service/mod.rs @@ -725,6 +725,8 @@ pub struct AttachParams { name: Option, #[ts(type = "string | null")] image_id: Option, + #[ts(type = "string | null")] + user: Option, } pub async fn attach( ctx: RpcContext, @@ -738,6 +740,7 @@ pub async fn attach( subcontainer, image_id, name, + user, }: AttachParams, ) -> Result { let (container_id, subcontainer_id, image_id, workdir, root_command) = { @@ -814,9 +817,26 @@ pub async fn attach( .join("etc") .join("passwd"); - let root_command = get_passwd_root_command(passwd).await; + let image_meta = serde_json::from_str::( + &tokio::fs::read_to_string( + root_dir + .join("media/startos/images/") + .join(&image_id) + .with_extension("json"), + ) + .await?, + ) + .with_kind(ErrorKind::Deserialization)?; - let workdir = attach_workdir(&image_id, &root_dir).await?; + let root_command = get_passwd_command( + passwd, + user.as_deref() + .or_else(|| image_meta["user"].as_str()) + .unwrap_or("root"), + ) + .await; + + let workdir = image_meta["workdir"].as_str().map(|s| s.to_owned()); if subcontainer_ids.len() > 1 { let subcontainer_ids = subcontainer_ids @@ -849,6 +869,7 @@ pub async fn attach( pty_size: Option, image_id: ImageId, workdir: Option, + user: Option, root_command: &RootCommand, ) -> Result<(), Error> { use axum::extract::ws::Message; @@ -871,6 +892,10 @@ pub async fn attach( .with_extension("env"), ); + if let Some(user) = user { + cmd.arg("--user").arg(&*user); + } + if let Some(workdir) = workdir { cmd.arg("--workdir").arg(workdir); } @@ -1032,6 +1057,7 @@ pub async fn attach( pty_size, image_id, workdir, + user, &root_command, ) .await @@ -1051,19 +1077,46 @@ pub async fn attach( Ok(guid) } -async fn attach_workdir(image_id: &ImageId, root_dir: &Path) -> Result, Error> { - let path_str = root_dir.join("media/startos/images/"); - - let mut subcontainer_json = - tokio::fs::File::open(path_str.join(image_id).with_extension("json")).await?; - let mut contents = vec![]; - subcontainer_json.read_to_end(&mut contents).await?; - let subcontainer_json: serde_json::Value = - serde_json::from_slice(&contents).with_kind(ErrorKind::Filesystem)?; - Ok(subcontainer_json["workdir"].as_str().map(|x| x.to_string())) +#[derive(Deserialize, Serialize, TS)] +#[serde(rename_all = "camelCase")] +pub struct ListSubcontainersParams { + pub id: PackageId, } -async fn get_passwd_root_command(etc_passwd_path: PathBuf) -> RootCommand { +#[derive(Clone, Debug, Serialize, Deserialize, TS)] +#[serde(rename_all = "camelCase")] +pub struct SubcontainerInfo { + pub name: InternedString, + pub image_id: ImageId, +} + +pub async fn list_subcontainers( + ctx: RpcContext, + ListSubcontainersParams { id }: ListSubcontainersParams, +) -> Result, Error> { + let service = ctx.services.get(&id).await; + let service_ref = service.as_ref().or_not_found(&id)?; + let container = &service_ref.seed.persistent_container; + + let subcontainers = container.subcontainers.lock().await; + + let result: BTreeMap = subcontainers + .iter() + .map(|(guid, subcontainer)| { + ( + guid.clone(), + SubcontainerInfo { + name: subcontainer.name.clone(), + image_id: subcontainer.image_id.clone(), + }, + ) + }) + .collect(); + + Ok(result) +} + +async fn get_passwd_command(etc_passwd_path: PathBuf, user: &str) -> RootCommand { async { let mut file = tokio::fs::File::open(etc_passwd_path).await?; @@ -1074,8 +1127,8 @@ async fn get_passwd_root_command(etc_passwd_path: PathBuf) -> RootCommand { for line in contents.split('\n') { let line_information = line.split(':').collect::>(); - if let (Some(&"root"), Some(shell)) = - (line_information.first(), line_information.last()) + if let (Some(&u), Some(shell)) = (line_information.first(), line_information.last()) + && u == user { return Ok(shell.to_string()); } @@ -1106,6 +1159,8 @@ pub struct CliAttachParams { #[arg(long, short)] name: Option, #[arg(long, short)] + user: Option, + #[arg(long, short)] image_id: Option, } #[instrument[skip_all]] @@ -1147,6 +1202,7 @@ pub async fn cli_attach( "subcontainer": params.subcontainer, "imageId": params.image_id, "name": params.name, + "user": params.user, }), ) .await?, diff --git a/core/startos/src/tunnel/api.rs b/core/startos/src/tunnel/api.rs index e2e60ffe7..5b88a5464 100644 --- a/core/startos/src/tunnel/api.rs +++ b/core/startos/src/tunnel/api.rs @@ -353,6 +353,7 @@ pub async fn show_config( Ok(client .client_config( ip, + subnet, wg.as_key().de()?.verifying_key(), (wan_addr, wg.as_port().de()?).into(), ) diff --git a/core/startos/src/tunnel/auth.rs b/core/startos/src/tunnel/auth.rs index 21482980f..62b960f96 100644 --- a/core/startos/src/tunnel/auth.rs +++ b/core/startos/src/tunnel/auth.rs @@ -293,14 +293,7 @@ pub async fn set_password_cli( Ok(()) } -pub async fn reset_password( - HandlerArgs { - context, - parent_method, - method, - .. - }: HandlerArgs, -) -> Result<(), Error> { +pub async fn reset_password(ctx: CliContext) -> Result<(), Error> { println!("Generating a random password..."); let params = SetPasswordParams { password: base32::encode( @@ -309,11 +302,7 @@ pub async fn reset_password( ), }; - context - .call_remote::( - &parent_method.iter().chain(method.iter()).join("."), - to_value(¶ms)?, - ) + ctx.call_remote::("auth.set-password", to_value(¶ms)?) .await?; println!("Your new password is:"); diff --git a/core/startos/src/tunnel/client.conf.template b/core/startos/src/tunnel/client.conf.template index 58673b890..c7e811d48 100644 --- a/core/startos/src/tunnel/client.conf.template +++ b/core/startos/src/tunnel/client.conf.template @@ -7,6 +7,6 @@ PrivateKey = {privkey} [Peer] PublicKey = {server_pubkey} PresharedKey = {psk} -AllowedIPs = 0.0.0.0/0,::/0 +AllowedIPs = {subnet} Endpoint = {server_addr} PersistentKeepalive = 25 \ No newline at end of file diff --git a/core/startos/src/tunnel/wg.rs b/core/startos/src/tunnel/wg.rs index 539a7438e..efe487e46 100644 --- a/core/startos/src/tunnel/wg.rs +++ b/core/startos/src/tunnel/wg.rs @@ -170,12 +170,14 @@ impl WgConfig { pub fn client_config( self, addr: Ipv4Addr, + subnet: Ipv4Net, server_pubkey: Base64, server_addr: SocketAddr, ) -> ClientConfig { ClientConfig { client_config: self, client_addr: addr, + subnet, server_pubkey, server_addr, } @@ -213,6 +215,7 @@ where pub struct ClientConfig { client_config: WgConfig, client_addr: Ipv4Addr, + subnet: Ipv4Net, #[serde(deserialize_with = "deserialize_verifying_key")] server_pubkey: Base64, server_addr: SocketAddr, @@ -226,6 +229,7 @@ impl std::fmt::Display for ClientConfig { privkey = self.client_config.key.to_padded_string(), psk = self.client_config.psk.to_padded_string(), addr = self.client_addr, + subnet = self.subnet, server_pubkey = self.server_pubkey.to_padded_string(), server_addr = self.server_addr, ) diff --git a/core/startos/src/update/mod.rs b/core/startos/src/update/mod.rs index 635ab59c1..18d23320e 100644 --- a/core/startos/src/update/mod.rs +++ b/core/startos/src/update/mod.rs @@ -19,12 +19,6 @@ use ts_rs::TS; use crate::PLATFORM; use crate::context::{CliContext, RpcContext}; -use crate::disk::mount::filesystem::MountType; -use crate::disk::mount::filesystem::bind::Bind; -use crate::disk::mount::filesystem::block_dev::BlockDev; -use crate::disk::mount::filesystem::efivarfs::EfiVarFs; -use crate::disk::mount::filesystem::overlayfs::OverlayGuard; -use crate::disk::mount::guard::{GenericMountGuard, MountGuard, TmpMountGuard}; use crate::notifications::{NotificationLevel, notify}; use crate::prelude::*; use crate::progress::{ @@ -275,7 +269,6 @@ async fn maybe_do_update( download_phase.set_total(asset.commitment.size); download_phase.set_units(Some(ProgressUnits::Bytes)); let reverify_phase = progress.add_phase("Reverifying File".into(), Some(10)); - let sync_boot_phase = progress.add_phase("Syncing Boot Files".into(), Some(1)); let finalize_phase = progress.add_phase("Finalizing Update".into(), Some(1)); let start_progress = progress.snapshot(); @@ -331,7 +324,6 @@ async fn maybe_do_update( prune_phase, download_phase, reverify_phase, - sync_boot_phase, finalize_phase, }, ) @@ -388,7 +380,6 @@ struct UpdateProgressHandles { prune_phase: PhaseProgressTrackerHandle, download_phase: PhaseProgressTrackerHandle, reverify_phase: PhaseProgressTrackerHandle, - sync_boot_phase: PhaseProgressTrackerHandle, finalize_phase: PhaseProgressTrackerHandle, } @@ -401,7 +392,6 @@ async fn do_update( mut prune_phase, mut download_phase, mut reverify_phase, - mut sync_boot_phase, mut finalize_phase, }: UpdateProgressHandles, ) -> Result<(), Error> { @@ -416,9 +406,7 @@ async fn do_update( prune_phase.complete(); download_phase.start(); - let path = Path::new("/media/startos/images") - .join(hex::encode(&asset.commitment.hash[..16])) - .with_extension("rootfs"); + let path = Path::new("/media/startos/images/next.squashfs"); let mut dst = AtomicFile::new(&path, None::<&Path>) .await .with_kind(ErrorKind::Filesystem)?; @@ -438,92 +426,24 @@ async fn do_update( dst.save().await.with_kind(ErrorKind::Filesystem)?; reverify_phase.complete(); - sync_boot_phase.start(); + finalize_phase.start(); Command::new("unsquashfs") .arg("-n") .arg("-f") .arg("-d") .arg("/") .arg(&path) - .arg("boot") + .arg("/usr/lib/startos/scripts/upgrade") .invoke(crate::ErrorKind::Filesystem) .await?; - if &*PLATFORM != "raspberrypi" { - let mountpoint = "/media/startos/next"; - let root_guard = OverlayGuard::mount( - TmpMountGuard::mount(&BlockDev::new(&path), MountType::ReadOnly).await?, - mountpoint, - ) - .await?; - let startos = MountGuard::mount( - &Bind::new("/media/startos/root"), - root_guard.path().join("media/startos/root"), - MountType::ReadOnly, - ) - .await?; - let boot_guard = MountGuard::mount( - &Bind::new("/boot"), - root_guard.path().join("boot"), - MountType::ReadWrite, - ) - .await?; - let dev = MountGuard::mount( - &Bind::new("/dev"), - root_guard.path().join("dev"), - MountType::ReadWrite, - ) - .await?; - let proc = MountGuard::mount( - &Bind::new("/proc"), - root_guard.path().join("proc"), - MountType::ReadWrite, - ) - .await?; - let sys = MountGuard::mount( - &Bind::new("/sys"), - root_guard.path().join("sys"), - MountType::ReadWrite, - ) - .await?; - let efivarfs = if tokio::fs::metadata("/sys/firmware/efi").await.is_ok() { - Some( - MountGuard::mount( - &EfiVarFs, - root_guard.path().join("sys/firmware/efi/efivars"), - MountType::ReadWrite, - ) - .await?, - ) - } else { - None - }; - Command::new("chroot") - .arg(root_guard.path()) - .arg("update-grub2") - .invoke(ErrorKind::Grub) - .await?; + let checksum = hex::encode(&asset.commitment.hash[..16]); - if let Some(efivarfs) = efivarfs { - efivarfs.unmount(false).await?; - } - sys.unmount(false).await?; - proc.unmount(false).await?; - dev.unmount(false).await?; - boot_guard.unmount(false).await?; - startos.unmount(false).await?; - root_guard.unmount(false).await?; - } - sync_boot_phase.complete(); - - finalize_phase.start(); - Command::new("ln") - .arg("-rsf") + Command::new("/usr/lib/startos/scripts/upgrade") + .env("CHECKSUM", &checksum) .arg(&path) - .arg("/media/startos/config/current.rootfs") - .invoke(crate::ErrorKind::Filesystem) + .invoke(ErrorKind::Grub) .await?; - Command::new("sync").invoke(ErrorKind::Filesystem).await?; finalize_phase.complete(); progress.complete(); diff --git a/core/startos/src/util/future.rs b/core/startos/src/util/future.rs index d9a8369e2..4807411c8 100644 --- a/core/startos/src/util/future.rs +++ b/core/startos/src/util/future.rs @@ -1,11 +1,10 @@ use std::pin::Pin; +use std::sync::Weak; use std::task::{Context, Poll}; -use axum::middleware::FromFn; use futures::future::{BoxFuture, FusedFuture, abortable, pending}; use futures::stream::{AbortHandle, Abortable, BoxStream}; use futures::{Future, FutureExt, Stream, StreamExt}; -use rpc_toolkit::from_fn_blocking; use tokio::sync::watch; use tokio::task::LocalSet; @@ -201,3 +200,26 @@ async fn test_cancellable() { handle.cancel_and_wait().await; assert!(weak.strong_count() == 0); } + +#[pin_project::pin_project] +pub struct WeakFuture { + rc: Weak<()>, + #[pin] + fut: Fut, +} +impl WeakFuture { + pub fn new(rc: Weak<()>, fut: Fut) -> Self { + Self { rc, fut } + } +} +impl Future for WeakFuture { + type Output = Option; + fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll { + let this = self.project(); + if this.rc.strong_count() > 0 { + this.fut.poll(cx).map(Some) + } else { + Poll::Ready(None) + } + } +} diff --git a/core/startos/src/util/mod.rs b/core/startos/src/util/mod.rs index 61e1bcfb4..772c8cd6e 100644 --- a/core/startos/src/util/mod.rs +++ b/core/startos/src/util/mod.rs @@ -49,7 +49,7 @@ pub mod net; pub mod rpc; pub mod rpc_client; pub mod serde; -//pub mod squashfs; +// pub mod squashfs; pub mod sync; pub mod tui; diff --git a/core/startos/src/util/squashfs.rs b/core/startos/src/util/squashfs.rs index 4f63e846a..d2ab135c2 100644 --- a/core/startos/src/util/squashfs.rs +++ b/core/startos/src/util/squashfs.rs @@ -98,8 +98,7 @@ impl Visit> for Superblock { #[pin_project::pin_project] pub struct MetadataBlocksWriter { - input: [u8; 8192], - size: usize, + input: PartialBuffer<[u8; 8192]>, size_addr: Option, output: PartialBuffer<[u8; 8192]>, output_flushed: usize, @@ -123,25 +122,29 @@ enum WriteState { WritingSizeHeader(u16), WritingOutput(Box), EncodingInput, - FinishingCompression, - WritingFinalSizeHeader(u64, u64), SeekingToEnd(u64), } fn poll_seek_helper( - writer: std::pin::Pin<&mut W>, + mut writer: std::pin::Pin<&mut W>, seek_state: &mut SeekState, cx: &mut std::task::Context<'_>, pos: u64, ) -> std::task::Poll> { match *seek_state { SeekState::Idle => { - writer.start_seek(std::io::SeekFrom::Start(pos))?; + writer.as_mut().start_seek(std::io::SeekFrom::Start(pos))?; *seek_state = SeekState::Seeking(pos); - Poll::Pending + match writer.as_mut().poll_complete(cx)? { + Poll::Ready(result) => { + *seek_state = SeekState::Idle; + Poll::Ready(Ok(result)) + } + Poll::Pending => Poll::Pending, + } } SeekState::Seeking(target) if target == pos => { - let result = ready!(writer.poll_complete(cx))?; + let result = ready!(writer.as_mut().poll_complete(cx))?; *seek_state = SeekState::Idle; Poll::Ready(Ok(result)) } @@ -151,35 +154,53 @@ fn poll_seek_helper( pos, old_target ); - writer.start_seek(std::io::SeekFrom::Start(pos))?; + writer.as_mut().start_seek(std::io::SeekFrom::Start(pos))?; *seek_state = SeekState::Seeking(pos); - Poll::Pending + match writer.as_mut().poll_complete(cx)? { + Poll::Ready(result) => { + *seek_state = SeekState::Idle; + Poll::Ready(Ok(result)) + } + Poll::Pending => Poll::Pending, + } } SeekState::GettingPosition => { tracing::warn!( "poll_seek({}) called while getting stream position, canceling", pos ); - writer.start_seek(std::io::SeekFrom::Start(pos))?; + writer.as_mut().start_seek(std::io::SeekFrom::Start(pos))?; *seek_state = SeekState::Seeking(pos); - Poll::Pending + match writer.as_mut().poll_complete(cx)? { + Poll::Ready(result) => { + *seek_state = SeekState::Idle; + Poll::Ready(Ok(result)) + } + Poll::Pending => Poll::Pending, + } } } } fn poll_stream_position_helper( - writer: std::pin::Pin<&mut W>, + mut writer: std::pin::Pin<&mut W>, seek_state: &mut SeekState, cx: &mut std::task::Context<'_>, ) -> std::task::Poll> { match *seek_state { SeekState::Idle => { - writer.start_seek(std::io::SeekFrom::Current(0))?; + writer.as_mut().start_seek(std::io::SeekFrom::Current(0))?; *seek_state = SeekState::GettingPosition; - Poll::Pending + match writer.as_mut().poll_complete(cx)? { + Poll::Ready(result) => { + *seek_state = SeekState::Idle; + Poll::Ready(Ok(result)) + } + Poll::Pending => Poll::Pending, + } } SeekState::GettingPosition => { - let result = ready!(writer.poll_complete(cx))?; + let result = ready!(writer.as_mut().poll_complete(cx))?; *seek_state = SeekState::Idle; Poll::Ready(Ok(result)) } @@ -188,18 +209,22 @@ fn poll_stream_position_helper( "poll_stream_position called while seeking to {}, canceling", target ); - writer.start_seek(std::io::SeekFrom::Current(0))?; + writer.as_mut().start_seek(std::io::SeekFrom::Current(0))?; *seek_state = SeekState::GettingPosition; - Poll::Pending + match writer.as_mut().poll_complete(cx)? { + Poll::Ready(result) => { + *seek_state = SeekState::Idle; + Poll::Ready(Ok(result)) + } + Poll::Pending => Poll::Pending, + } } } } impl Write for MetadataBlocksWriter { fn write(&mut self, buf: &[u8]) -> std::io::Result { - let n = buf.len().min(self.input.len() - self.size); - self.input[self.size..self.size + n].copy_from_slice(&buf[..n]); - self.size += n; + let n = self.input.copy_unwritten_from(&mut PartialBuffer::new(buf)); if n < buf.len() { self.flush()?; } @@ -207,9 +232,9 @@ impl Write for MetadataBlocksWriter { } fn flush(&mut self) -> std::io::Result<()> { loop { - match self.write_state { + match &self.write_state { WriteState::Idle => { - if self.size == 0 { + if self.input.written().is_empty() { return Ok(()); } self.write_state = WriteState::WritingSizeHeader(0); @@ -218,12 +243,12 @@ impl Write for MetadataBlocksWriter { WriteState::WritingSizeHeader(size) => { let done = if let Some(size_addr) = self.size_addr { self.writer.seek(SeekFrom::Start(size_addr))?; - Some(size_addr + size as u64) + Some(size_addr + 2 + *size as u64) } else { self.size_addr = Some(self.writer.stream_position()?); None }; - self.output.unwritten_mut()[..2].copy_from_slice(&u16::to_le_bytes(size)[..]); + self.output.unwritten_mut()[..2].copy_from_slice(&u16::to_le_bytes(*size)[..]); self.output.advance(2); self.write_state = WriteState::WritingOutput(Box::new(if let Some(end) = done { @@ -242,80 +267,33 @@ impl Write for MetadataBlocksWriter { } else { self.output.reset(); self.output_flushed = 0; - self.write_state = *next; + self.write_state = *next.clone(); } } WriteState::EncodingInput => { let encoder = self.zstd.get_or_insert_with(|| ZstdEncoder::new(22)); - let mut input = PartialBuffer::new(&self.input[..self.size]); - while !self.output.unwritten().is_empty() && !input.unwritten().is_empty() { - encoder.encode(&mut input, &mut self.output)?; - } - while !encoder.flush(&mut self.output)? {} - while !encoder.finish(&mut self.output)? {} - if !self.output.unwritten().is_empty() { - let mut input = - PartialBuffer::new(&self.input[self.input_flushed..self.size]); - encoder.encode(&mut input, &mut self.output)?; - self.input_flushed += input.written().len(); - } - self.write_state = WriteState::WritingOutput(Box::new()); - continue; - } - - WriteState::FinishingCompression => { - if !self.output.unwritten().is_empty() { - if self.zstd.as_mut().unwrap().finish(&mut self.output)? { - self.zstd = None; - } - } - if self.output.written().len() > self.output_flushed { - self.write_state = WriteState::WritingOutput; - continue; - } - if self.zstd.is_none() && self.output.written().len() == self.output_flushed { - self.output_flushed = 0; - self.output.reset(); - let end_addr = self.writer.stream_position()?; - let size_addr = self.size_addr.ok_or_else(|| { - std::io::Error::new( - std::io::ErrorKind::InvalidData, - "size_addr not set when finishing compression", - ) - })?; - self.write_state = WriteState::WritingFinalSizeHeader(size_addr, end_addr); - continue; - } - return Ok(()); - } - - WriteState::WritingFinalSizeHeader(size_addr, end_addr) => { - if self.output.written().len() > self.output_flushed { - let n = self - .writer - .write(&self.output.written()[self.output_flushed..])?; - self.output_flushed += n; - continue; - } - self.writer.seek(std::io::SeekFrom::Start(size_addr))?; - self.output.unwritten_mut()[..2] - .copy_from_slice(&((end_addr - size_addr - 2) as u16).to_le_bytes()); - self.output.advance(2); - let n = self.writer.write(&self.output.written())?; - self.output_flushed = n; - if n == 2 { - self.output_flushed = 0; - self.output.reset(); - self.write_state = WriteState::SeekingToEnd(end_addr); - } - continue; + encoder.encode( + &mut PartialBuffer::new(&self.input.written()), + &mut self.output, + )?; + let compressed = if !encoder.finish(&mut self.output)? { + std::mem::swap(&mut self.output, &mut self.input); + false + } else { + true + }; + self.zstd = None; + self.input.reset(); + self.write_state = + WriteState::WritingOutput(Box::new(WriteState::WritingSizeHeader( + self.output.written().len() as u16 + | if compressed { 0 } else { 0x8000 }, + ))); } WriteState::SeekingToEnd(end_addr) => { - self.writer.seek(std::io::SeekFrom::Start(end_addr))?; - self.input_flushed = 0; - self.size = 0; + self.writer.seek(std::io::SeekFrom::Start(*end_addr))?; self.size_addr = None; self.write_state = WriteState::Idle; return Ok(()); @@ -332,11 +310,9 @@ impl AsyncWrite for MetadataBlocksWriter { buf: &[u8], ) -> std::task::Poll> { let this = self.as_mut().project(); - let n = buf.len().min(this.input.len() - *this.size); - this.input[*this.size..*this.size + n].copy_from_slice(&buf[..n]); - *this.size += n; + let n = this.input.copy_unwritten_from(&mut PartialBuffer::new(buf)); if n < buf.len() { - ready!(self.poll_flush(cx)?); + ready!(self.poll_flush(cx))?; } Poll::Ready(Ok(n)) } @@ -347,115 +323,76 @@ impl AsyncWrite for MetadataBlocksWriter { ) -> std::task::Poll> { loop { let mut this = self.as_mut().project(); - match *this.write_state { + match this.write_state.clone() { WriteState::Idle => { - if *this.size == 0 { + if this.input.written().is_empty() { return Poll::Ready(Ok(())); } - if this.size_addr.is_none() { + *this.write_state = WriteState::WritingSizeHeader(0); + } + + WriteState::WritingSizeHeader(size) => { + let done = if let Some(size_addr) = *this.size_addr { + ready!(poll_seek_helper( + this.writer.as_mut(), + this.seek_state, + cx, + size_addr + ))?; + Some(size_addr + 2 + size as u64) + } else { let pos = ready!(poll_stream_position_helper( this.writer.as_mut(), this.seek_state, cx ))?; *this.size_addr = Some(pos); - this.output.unwritten_mut()[..2].copy_from_slice(&[0; 2]); - this.output.advance(2); - } - *this.write_state = WriteState::WritingOutput; - continue; - } - - WriteState::WritingOutput => { - if this.output.written().len() > *this.output_flushed { - let n = ready!( - this.writer - .as_mut() - .poll_write(cx, &this.output.written()[*this.output_flushed..]) - )?; - *this.output_flushed += n; - continue; - } - if this.output.written().len() == *this.output_flushed { - *this.output_flushed = 0; - this.output.reset(); - } - if *this.input_flushed < *this.size { - if !this.output.unwritten().is_empty() { - let mut input = - PartialBuffer::new(&this.input[*this.input_flushed..*this.size]); - this.zstd - .get_or_insert_with(|| ZstdEncoder::new(22)) - .encode(&mut input, this.output)?; - *this.input_flushed += input.written().len(); - } - continue; + None + }; + this.output.unwritten_mut()[..2] + .copy_from_slice(&u16::to_le_bytes(size)[..]); + this.output.advance(2); + *this.write_state = WriteState::WritingOutput(Box::new(if let Some(end) = done { + WriteState::SeekingToEnd(end) } else { - if !this.output.unwritten().is_empty() { - if this.zstd.as_mut().unwrap().finish(this.output)? { - *this.zstd = None; - } - continue; - } - if this.zstd.is_none() - && this.output.written().len() == *this.output_flushed - { - *this.output_flushed = 0; - this.output.reset(); - if let Some(size_addr) = *this.size_addr { - let end_addr = ready!(poll_stream_position_helper( - this.writer.as_mut(), - this.seek_state, - cx - ))?; - *this.write_state = - WriteState::WritingFinalSizeHeader(size_addr, end_addr); - ready!(poll_seek_helper( - this.writer.as_mut(), - this.seek_state, - cx, - size_addr - ))?; - this.output.unwritten_mut()[..2].copy_from_slice( - &((end_addr - size_addr - 2) as u16).to_le_bytes(), - ); - this.output.advance(2); - continue; - } - } - } - return Poll::Ready(Ok(())); + WriteState::EncodingInput + })); } - WriteState::WritingSizeHeader(_size_addr) => { - *this.write_state = WriteState::WritingOutput; - continue; + WriteState::WritingOutput(next) => { + if this.output.written().len() > *this.output_flushed { + let n = ready!(this + .writer + .as_mut() + .poll_write(cx, &this.output.written()[*this.output_flushed..]))?; + *this.output_flushed += n; + } else { + this.output.reset(); + *this.output_flushed = 0; + *this.write_state = *next; + } } WriteState::EncodingInput => { - *this.write_state = WriteState::WritingOutput; - continue; - } - - WriteState::FinishingCompression => { - *this.write_state = WriteState::WritingOutput; - continue; - } - - WriteState::WritingFinalSizeHeader(_size_addr, end_addr) => { - if this.output.written().len() > *this.output_flushed { - let n = ready!( - this.writer - .as_mut() - .poll_write(cx, &this.output.written()[*this.output_flushed..]) - )?; - *this.output_flushed += n; - continue; - } - *this.output_flushed = 0; - this.output.reset(); - *this.write_state = WriteState::SeekingToEnd(end_addr); - continue; + let encoder = this.zstd.get_or_insert_with(|| ZstdEncoder::new(22)); + encoder.encode( + &mut PartialBuffer::new(this.input.written()), + this.output, + )?; + let compressed = if !encoder.finish(this.output)? { + std::mem::swap(this.output, this.input); + false + } else { + true + }; + *this.zstd = None; + this.input.reset(); + *this.write_state = WriteState::WritingOutput(Box::new( + WriteState::WritingSizeHeader( + this.output.written().len() as u16 + | if compressed { 0 } else { 0x8000 }, + ), + )); } WriteState::SeekingToEnd(end_addr) => { @@ -466,8 +403,6 @@ impl AsyncWrite for MetadataBlocksWriter { end_addr ))?; *this.size_addr = None; - *this.input_flushed = 0; - *this.size = 0; *this.write_state = WriteState::Idle; return Poll::Ready(Ok(())); } @@ -486,11 +421,9 @@ impl AsyncWrite for MetadataBlocksWriter { impl MetadataBlocksWriter { pub fn new(writer: W) -> Self { Self { - input: [0; 8192], - input_flushed: 0, - size: 0, + input: PartialBuffer::new([0; 8192]), size_addr: None, - output: PartialBuffer::new([0; 4096]), + output: PartialBuffer::new([0; 8192]), output_flushed: 0, zstd: None, seek_state: SeekState::Idle, @@ -507,11 +440,10 @@ use tokio::io::AsyncRead; pub struct MetadataBlocksReader { #[pin] reader: R, - size_buf: [u8; 2], - size_bytes_read: usize, - compressed: [u8; 8192], + size_buf: PartialBuffer<[u8; 2]>, + compressed: PartialBuffer<[u8; 8192]>, compressed_size: usize, - compressed_pos: usize, + is_compressed: bool, output: PartialBuffer<[u8; 8192]>, output_pos: usize, zstd: Option, @@ -531,11 +463,10 @@ impl MetadataBlocksReader { pub fn new(reader: R) -> Self { Self { reader, - size_buf: [0; 2], - size_bytes_read: 0, - compressed: [0; 8192], + size_buf: PartialBuffer::new([0; 2]), + compressed: PartialBuffer::new([0; 8192]), compressed_size: 0, - compressed_pos: 0, + is_compressed: false, output: PartialBuffer::new([0; 8192]), output_pos: 0, zstd: None, @@ -551,11 +482,9 @@ impl Read for MetadataBlocksReader { loop { match self.state { ReadState::ReadingSize => { - let n = self - .reader - .read(&mut self.size_buf[self.size_bytes_read..])?; + let n = self.reader.read(self.size_buf.unwritten_mut())?; if n == 0 { - if self.size_bytes_read == 0 { + if self.size_buf.written().is_empty() { self.state = ReadState::Eof; return Ok(0); } else { @@ -566,56 +495,57 @@ impl Read for MetadataBlocksReader { } } - self.size_bytes_read += n; - if self.size_bytes_read < 2 { + self.size_buf.advance(n); + + if self.size_buf.written().len() < 2 { continue; } - let size_header = u16::from_le_bytes(self.size_buf); - let is_compressed = (size_header & 0x8000) == 0; - let size = (size_header & 0x7FFF) as usize; + let size_header = u16::from_le_bytes([ + self.size_buf.written()[0], + self.size_buf.written()[1], + ]); + self.is_compressed = (size_header & 0x8000) == 0; + self.compressed_size = (size_header & 0x7FFF) as usize; - if !is_compressed { + if self.compressed_size == 0 || self.compressed_size > 8192 { return Err(std::io::Error::new( std::io::ErrorKind::InvalidData, - "Uncompressed metadata blocks not supported", + format!("Invalid metadata block size: {}", self.compressed_size), )); } - if size == 0 || size > 8192 { - return Err(std::io::Error::new( - std::io::ErrorKind::InvalidData, - format!("Invalid metadata block size: {}", size), - )); - } - - self.compressed_size = size; - self.compressed_pos = 0; - self.size_bytes_read = 0; + self.compressed.reset(); + self.size_buf.reset(); self.state = ReadState::ReadingData; continue; } ReadState::ReadingData => { - let n = self - .reader - .read(&mut self.compressed[self.compressed_pos..self.compressed_size])?; + let n = self.reader.read(self.compressed.unwritten_mut())?; if n == 0 { return Err(std::io::Error::new( std::io::ErrorKind::UnexpectedEof, - "Unexpected EOF reading compressed data", + "Unexpected EOF reading data", )); } - self.compressed_pos += n; - if self.compressed_pos < self.compressed_size { + self.compressed.advance(n); + + if !self.compressed.unwritten().is_empty() { continue; } - self.zstd = Some(ZstdDecoder::new()); self.output_pos = 0; self.output.reset(); - self.state = ReadState::Decompressing; + if self.is_compressed { + self.zstd = Some(ZstdDecoder::new()); + self.state = ReadState::Decompressing; + } else { + self.output + .copy_unwritten_from(&mut PartialBuffer::new(self.compressed.written())); + self.state = ReadState::Outputting; + } continue; } @@ -625,7 +555,7 @@ impl Read for MetadataBlocksReader { continue; } - let mut input = PartialBuffer::new(&self.compressed[..self.compressed_size]); + let mut input = PartialBuffer::new(self.compressed.written()); let decoder = self.zstd.as_mut().unwrap(); if decoder.decode(&mut input, &mut self.output)? { @@ -676,13 +606,13 @@ impl AsyncRead for MetadataBlocksReader { match *this.state { ReadState::ReadingSize => { - let mut read_buf = - tokio::io::ReadBuf::new(&mut this.size_buf[*this.size_bytes_read..]); + let mut read_buf = tokio::io::ReadBuf::new(this.size_buf.unwritten_mut()); + let before = read_buf.filled().len(); ready!(this.reader.as_mut().poll_read(cx, &mut read_buf))?; + let n = read_buf.filled().len() - before; - let n = read_buf.filled().len(); if n == 0 { - if *this.size_bytes_read == 0 { + if this.size_buf.written().is_empty() { *this.state = ReadState::Eof; return Poll::Ready(Ok(())); } else { @@ -693,22 +623,16 @@ impl AsyncRead for MetadataBlocksReader { } } - *this.size_bytes_read += n; - if *this.size_bytes_read < 2 { + this.size_buf.advance(n); + + if this.size_buf.written().len() < 2 { continue; } - let size_header = u16::from_le_bytes(*this.size_buf); - let is_compressed = (size_header & 0x8000) == 0; + let size_header = u16::from_le_bytes(*this.size_buf.written()); + *this.is_compressed = (size_header & 0x8000) == 0; let size = (size_header & 0x7FFF) as usize; - if !is_compressed { - return Poll::Ready(Err(std::io::Error::new( - std::io::ErrorKind::InvalidData, - "Uncompressed metadata blocks not supported", - ))); - } - if size == 0 || size > 8192 { return Poll::Ready(Err(std::io::Error::new( std::io::ErrorKind::InvalidData, @@ -716,36 +640,42 @@ impl AsyncRead for MetadataBlocksReader { ))); } - *this.compressed_size = size; - *this.compressed_pos = 0; - *this.size_bytes_read = 0; + this.compressed.reset(); + this.compressed.reserve(size); + this.size_buf.reset(); *this.state = ReadState::ReadingData; continue; } ReadState::ReadingData => { - let mut read_buf = tokio::io::ReadBuf::new( - &mut this.compressed[*this.compressed_pos..*this.compressed_size], - ); + let mut read_buf = tokio::io::ReadBuf::new(this.compressed.unwritten_mut()); + let before = read_buf.filled().len(); ready!(this.reader.as_mut().poll_read(cx, &mut read_buf))?; + let n = read_buf.filled().len() - before; - let n = read_buf.filled().len(); if n == 0 { return Poll::Ready(Err(std::io::Error::new( std::io::ErrorKind::UnexpectedEof, - "Unexpected EOF reading compressed data", + "Unexpected EOF reading data", ))); } - *this.compressed_pos += n; - if *this.compressed_pos < *this.compressed_size { + this.compressed.advance(n); + + if !this.compressed.unwritten().is_empty() { continue; } - *this.zstd = Some(ZstdDecoder::new()); *this.output_pos = 0; this.output.reset(); - *this.state = ReadState::Decompressing; + if *this.is_compressed { + *this.zstd = Some(ZstdDecoder::new()); + *this.state = ReadState::Decompressing; + } else { + this.output + .copy_unwritten_from(&mut PartialBuffer::new(this.compressed.written())); + *this.state = ReadState::Outputting; + } continue; } @@ -755,7 +685,7 @@ impl AsyncRead for MetadataBlocksReader { continue; } - let mut input = PartialBuffer::new(&this.compressed[..*this.compressed_size]); + let mut input = PartialBuffer::new(this.compressed.written()); let decoder = this.zstd.as_mut().unwrap(); if decoder.decode(&mut input, this.output)? { diff --git a/core/startos/src/version/mod.rs b/core/startos/src/version/mod.rs index 6f8bae32a..108a51d4b 100644 --- a/core/startos/src/version/mod.rs +++ b/core/startos/src/version/mod.rs @@ -52,8 +52,9 @@ mod v0_4_0_alpha_9; mod v0_4_0_alpha_10; mod v0_4_0_alpha_11; mod v0_4_0_alpha_12; +mod v0_4_0_alpha_13; -pub type Current = v0_4_0_alpha_12::Version; // VERSION_BUMP +pub type Current = v0_4_0_alpha_13::Version; // VERSION_BUMP impl Current { #[instrument(skip(self, db))] @@ -167,7 +168,8 @@ enum Version { V0_4_0_alpha_9(Wrapper), V0_4_0_alpha_10(Wrapper), V0_4_0_alpha_11(Wrapper), - V0_4_0_alpha_12(Wrapper), // VERSION_BUMP + V0_4_0_alpha_12(Wrapper), + V0_4_0_alpha_13(Wrapper), // VERSION_BUMP Other(exver::Version), } @@ -222,7 +224,8 @@ impl Version { Self::V0_4_0_alpha_9(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_10(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_11(v) => DynVersion(Box::new(v.0)), - Self::V0_4_0_alpha_12(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP + Self::V0_4_0_alpha_12(v) => DynVersion(Box::new(v.0)), + Self::V0_4_0_alpha_13(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP Self::Other(v) => { return Err(Error::new( eyre!("unknown version {v}"), @@ -269,7 +272,8 @@ impl Version { Version::V0_4_0_alpha_9(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_10(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_11(Wrapper(x)) => x.semver(), - Version::V0_4_0_alpha_12(Wrapper(x)) => x.semver(), // VERSION_BUMP + Version::V0_4_0_alpha_12(Wrapper(x)) => x.semver(), + Version::V0_4_0_alpha_13(Wrapper(x)) => x.semver(), // VERSION_BUMP Version::Other(x) => x.clone(), } } diff --git a/core/startos/src/version/v0_4_0_alpha_13.rs b/core/startos/src/version/v0_4_0_alpha_13.rs new file mode 100644 index 000000000..86b3d7dbb --- /dev/null +++ b/core/startos/src/version/v0_4_0_alpha_13.rs @@ -0,0 +1,37 @@ +use exver::{PreReleaseSegment, VersionRange}; + +use super::v0_3_5::V0_3_0_COMPAT; +use super::{VersionT, v0_4_0_alpha_12}; +use crate::prelude::*; + +lazy_static::lazy_static! { + static ref V0_4_0_alpha_13: exver::Version = exver::Version::new( + [0, 4, 0], + [PreReleaseSegment::String("alpha".into()), 13.into()] + ); +} + +#[derive(Clone, Copy, Debug, Default)] +pub struct Version; + +impl VersionT for Version { + type Previous = v0_4_0_alpha_12::Version; + type PreUpRes = (); + + async fn pre_up(self) -> Result { + Ok(()) + } + fn semver(self) -> exver::Version { + V0_4_0_alpha_13.clone() + } + fn compat(self) -> &'static VersionRange { + &V0_3_0_COMPAT + } + #[instrument(skip_all)] + fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result { + Ok(Value::Null) + } + fn down(self, _db: &mut Value) -> Result<(), Error> { + Ok(()) + } +} diff --git a/debian/startos/postinst b/debian/startos/postinst index cca71d3e8..afc71cb5f 100755 --- a/debian/startos/postinst +++ b/debian/startos/postinst @@ -122,7 +122,6 @@ ln -sf /usr/lib/startos/scripts/wireguard-vps-proxy-setup /usr/bin/wireguard-vps echo "fs.inotify.max_user_watches=1048576" > /etc/sysctl.d/97-startos.conf -locale-gen en_US.UTF-8 dpkg-reconfigure --frontend noninteractive locales if ! getent group | grep '^startos:'; then diff --git a/dpkg-build.sh b/dpkg-build.sh index e9a539d10..8f9a2677f 100755 --- a/dpkg-build.sh +++ b/dpkg-build.sh @@ -63,7 +63,7 @@ find . -type f -not -path "./DEBIAN/*" -exec md5sum {} \; | sort -k 2 | sed 's/\ cd ../.. cd dpkg-workdir -dpkg-deb --root-owner-group -b $BASENAME +dpkg-deb --root-owner-group -Zzstd -b $BASENAME mkdir -p ../results mv $BASENAME.deb ../results/$BASENAME.deb rm -rf $BASENAME \ No newline at end of file diff --git a/image-recipe/run-local-build.sh b/image-recipe/run-local-build.sh index 307f065d0..78b0e49fb 100755 --- a/image-recipe/run-local-build.sh +++ b/image-recipe/run-local-build.sh @@ -7,6 +7,11 @@ BASEDIR="$(pwd -P)" SUITE=trixie +USE_TTY= +if tty -s; then + USE_TTY="-it" +fi + dockerfile_hash=$(sha256sum ${BASEDIR}/image-recipe/Dockerfile | head -c 7) docker_img_name="startos_build:${SUITE}-${dockerfile_hash}" diff --git a/sdk/base/lib/osBindings/ForgetGatewayParams.ts b/sdk/base/lib/osBindings/ForgetGatewayParams.ts deleted file mode 100644 index 8d43c3a3b..000000000 --- a/sdk/base/lib/osBindings/ForgetGatewayParams.ts +++ /dev/null @@ -1,4 +0,0 @@ -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -import type { GatewayId } from "./GatewayId" - -export type ForgetGatewayParams = { gateway: GatewayId } diff --git a/sdk/base/lib/osBindings/NetworkInterfaceSetPublicParams.ts b/sdk/base/lib/osBindings/NetworkInterfaceSetPublicParams.ts deleted file mode 100644 index c81719031..000000000 --- a/sdk/base/lib/osBindings/NetworkInterfaceSetPublicParams.ts +++ /dev/null @@ -1,7 +0,0 @@ -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -import type { GatewayId } from "./GatewayId" - -export type NetworkInterfaceSetPublicParams = { - gateway: GatewayId - public: boolean | null -} diff --git a/sdk/base/lib/osBindings/RenameGatewayParams.ts b/sdk/base/lib/osBindings/RenameGatewayParams.ts deleted file mode 100644 index 5c282e683..000000000 --- a/sdk/base/lib/osBindings/RenameGatewayParams.ts +++ /dev/null @@ -1,4 +0,0 @@ -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -import type { GatewayId } from "./GatewayId" - -export type RenameGatewayParams = { id: GatewayId; name: string } diff --git a/sdk/base/lib/osBindings/UnsetPublicParams.ts b/sdk/base/lib/osBindings/UnsetPublicParams.ts deleted file mode 100644 index 1e0673fd4..000000000 --- a/sdk/base/lib/osBindings/UnsetPublicParams.ts +++ /dev/null @@ -1,4 +0,0 @@ -// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. -import type { GatewayId } from "./GatewayId" - -export type UnsetPublicParams = { gateway: GatewayId } diff --git a/sdk/base/lib/osBindings/index.ts b/sdk/base/lib/osBindings/index.ts index 502bd992f..72f9d4745 100644 --- a/sdk/base/lib/osBindings/index.ts +++ b/sdk/base/lib/osBindings/index.ts @@ -76,7 +76,6 @@ export { EventId } from "./EventId" export { ExportActionParams } from "./ExportActionParams" export { ExportServiceInterfaceParams } from "./ExportServiceInterfaceParams" export { FileType } from "./FileType" -export { ForgetGatewayParams } from "./ForgetGatewayParams" export { FullIndex } from "./FullIndex" export { FullProgress } from "./FullProgress" export { GatewayId } from "./GatewayId" @@ -143,7 +142,6 @@ export { NamedProgress } from "./NamedProgress" export { NetInfo } from "./NetInfo" export { NetworkInfo } from "./NetworkInfo" export { NetworkInterfaceInfo } from "./NetworkInterfaceInfo" -export { NetworkInterfaceSetPublicParams } from "./NetworkInterfaceSetPublicParams" export { NetworkInterfaceType } from "./NetworkInterfaceType" export { OnionHostname } from "./OnionHostname" export { OsIndex } from "./OsIndex" @@ -175,7 +173,6 @@ export { RemovePackageFromCategoryParams } from "./RemovePackageFromCategoryPara export { RemovePackageParams } from "./RemovePackageParams" export { RemoveTunnelParams } from "./RemoveTunnelParams" export { RemoveVersionParams } from "./RemoveVersionParams" -export { RenameGatewayParams } from "./RenameGatewayParams" export { ReplayId } from "./ReplayId" export { RequestCommitment } from "./RequestCommitment" export { RunActionParams } from "./RunActionParams" @@ -211,7 +208,6 @@ export { TaskSeverity } from "./TaskSeverity" export { TaskTrigger } from "./TaskTrigger" export { Task } from "./Task" export { TestSmtpParams } from "./TestSmtpParams" -export { UnsetPublicParams } from "./UnsetPublicParams" export { UpdatingState } from "./UpdatingState" export { VerifyCifsParams } from "./VerifyCifsParams" export { VersionSignerParams } from "./VersionSignerParams" diff --git a/sdk/package/lib/StartSdk.ts b/sdk/package/lib/StartSdk.ts index f5e9a41fa..8ad2b3b3b 100644 --- a/sdk/package/lib/StartSdk.ts +++ b/sdk/package/lib/StartSdk.ts @@ -61,7 +61,7 @@ import { } from "../../base/lib/inits" import { DropGenerator } from "../../base/lib/util/Drop" -export const OSVersion = testTypeVersion("0.4.0-alpha.12") +export const OSVersion = testTypeVersion("0.4.0-alpha.13") // prettier-ignore type AnyNeverCond = diff --git a/sdk/package/lib/mainFn/CommandController.ts b/sdk/package/lib/mainFn/CommandController.ts index 156792dcf..b325ac585 100644 --- a/sdk/package/lib/mainFn/CommandController.ts +++ b/sdk/package/lib/mainFn/CommandController.ts @@ -77,10 +77,14 @@ export class CommandController< if (exec.runAsInit) { childProcess = await subcontainer!.launch(commands, { env: exec.env, + user: exec.user, + cwd: exec.cwd, }) } else { childProcess = await subcontainer!.spawn(commands, { env: exec.env, + user: exec.user, + cwd: exec.cwd, stdio: exec.onStdout || exec.onStderr ? "pipe" : "inherit", }) } diff --git a/sdk/package/package-lock.json b/sdk/package/package-lock.json index 6f02a18fd..e2a15252b 100644 --- a/sdk/package/package-lock.json +++ b/sdk/package/package-lock.json @@ -1,12 +1,12 @@ { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.42", + "version": "0.4.0-beta.43", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.42", + "version": "0.4.0-beta.43", "license": "MIT", "dependencies": { "@iarna/toml": "^3.0.0", diff --git a/sdk/package/package.json b/sdk/package/package.json index 1c09a1f13..fcbed7bfc 100644 --- a/sdk/package/package.json +++ b/sdk/package/package.json @@ -1,6 +1,6 @@ { "name": "@start9labs/start-sdk", - "version": "0.4.0-beta.42", + "version": "0.4.0-beta.43", "description": "Software development kit to facilitate packaging services for StartOS", "main": "./package/lib/index.js", "types": "./package/lib/index.d.ts", diff --git a/web/package-lock.json b/web/package-lock.json index e5198c0cb..24f189f93 100644 --- a/web/package-lock.json +++ b/web/package-lock.json @@ -1,12 +1,12 @@ { "name": "startos-ui", - "version": "0.4.0-alpha.12", + "version": "0.4.0-alpha.13", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "startos-ui", - "version": "0.4.0-alpha.12", + "version": "0.4.0-alpha.13", "license": "MIT", "dependencies": { "@angular/animations": "^20.3.0", diff --git a/web/package.json b/web/package.json index 7159f5e24..e634aae1e 100644 --- a/web/package.json +++ b/web/package.json @@ -1,6 +1,6 @@ { "name": "startos-ui", - "version": "0.4.0-alpha.12", + "version": "0.4.0-alpha.13", "author": "Start9 Labs, Inc", "homepage": "https://start9.com/", "license": "MIT", diff --git a/web/projects/install-wizard/src/app/app.component.ts b/web/projects/install-wizard/src/app/app.component.ts index 62c2b030d..37f360b09 100644 --- a/web/projects/install-wizard/src/app/app.component.ts +++ b/web/projects/install-wizard/src/app/app.component.ts @@ -59,7 +59,9 @@ export class AppComponent { await this.api.reboot() this.dialogs .open( - 'Please wait 1-2 minutes, then refresh this page to access the StartOS setup wizard.', + window.location.host === 'localhost' + ? 'Please wait 1-2 minutes for your server to restart' + : 'Please wait 1-2 minutes, then refresh this page to access the StartOS setup wizard.', { label: 'Rebooting', size: 's', diff --git a/web/projects/ui/src/app/services/api/api.fixures.ts b/web/projects/ui/src/app/services/api/api.fixures.ts index 30a1fea2a..9924a6d43 100644 --- a/web/projects/ui/src/app/services/api/api.fixures.ts +++ b/web/projects/ui/src/app/services/api/api.fixures.ts @@ -110,7 +110,7 @@ export namespace Mock { squashfs: { aarch64: { publishedAt: '2025-04-21T20:58:48.140749883Z', - url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.12/startos-0.4.0-alpha.12-33ae46f~dev_aarch64.squashfs', + url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.13/startos-0.4.0-alpha.13-33ae46f~dev_aarch64.squashfs', commitment: { hash: '4elBFVkd/r8hNadKmKtLIs42CoPltMvKe2z3LRqkphk=', size: 1343500288, @@ -122,7 +122,7 @@ export namespace Mock { }, 'aarch64-nonfree': { publishedAt: '2025-04-21T21:07:00.249285116Z', - url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.12/startos-0.4.0-alpha.12-33ae46f~dev_aarch64-nonfree.squashfs', + url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.13/startos-0.4.0-alpha.13-33ae46f~dev_aarch64-nonfree.squashfs', commitment: { hash: 'MrCEi4jxbmPS7zAiGk/JSKlMsiuKqQy6RbYOxlGHOIQ=', size: 1653075968, @@ -134,7 +134,7 @@ export namespace Mock { }, raspberrypi: { publishedAt: '2025-04-21T21:16:12.933319237Z', - url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.12/startos-0.4.0-alpha.12-33ae46f~dev_raspberrypi.squashfs', + url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.13/startos-0.4.0-alpha.13-33ae46f~dev_raspberrypi.squashfs', commitment: { hash: '/XTVQRCqY3RK544PgitlKu7UplXjkmzWoXUh2E4HCw0=', size: 1490731008, @@ -146,7 +146,7 @@ export namespace Mock { }, x86_64: { publishedAt: '2025-04-21T21:14:20.246908903Z', - url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.12/startos-0.4.0-alpha.12-33ae46f~dev_x86_64.squashfs', + url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.13/startos-0.4.0-alpha.13-33ae46f~dev_x86_64.squashfs', commitment: { hash: '/6romKTVQGSaOU7FqSZdw0kFyd7P+NBSYNwM3q7Fe44=', size: 1411657728, @@ -158,7 +158,7 @@ export namespace Mock { }, 'x86_64-nonfree': { publishedAt: '2025-04-21T21:15:17.955265284Z', - url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.12/startos-0.4.0-alpha.12-33ae46f~dev_x86_64-nonfree.squashfs', + url: 'https://alpha-registry-x.start9.com/startos/v0.4.0-alpha.13/startos-0.4.0-alpha.13-33ae46f~dev_x86_64-nonfree.squashfs', commitment: { hash: 'HCRq9sr/0t85pMdrEgNBeM4x11zVKHszGnD1GDyZbSE=', size: 1731035136, @@ -385,7 +385,7 @@ export namespace Mock { docsUrl: 'https://bitcoin.org', releaseNotes: 'Even better support for Bitcoin and wallets!', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: BTC_ICON, sourceVersion: null, @@ -420,7 +420,7 @@ export namespace Mock { docsUrl: 'https://bitcoinknots.org', releaseNotes: 'Even better support for Bitcoin and wallets!', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: BTC_ICON, sourceVersion: null, @@ -465,7 +465,7 @@ export namespace Mock { docsUrl: 'https://bitcoin.org', releaseNotes: 'Even better support for Bitcoin and wallets!', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: BTC_ICON, sourceVersion: null, @@ -500,7 +500,7 @@ export namespace Mock { docsUrl: 'https://bitcoinknots.org', releaseNotes: 'Even better support for Bitcoin and wallets!', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: BTC_ICON, sourceVersion: null, @@ -547,7 +547,7 @@ export namespace Mock { docsUrl: 'https://lightning.engineering/', releaseNotes: 'Upstream release to 0.17.5', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: LND_ICON, sourceVersion: null, @@ -595,7 +595,7 @@ export namespace Mock { docsUrl: 'https://lightning.engineering/', releaseNotes: 'Upstream release to 0.17.4', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: LND_ICON, sourceVersion: null, @@ -647,7 +647,7 @@ export namespace Mock { docsUrl: 'https://bitcoin.org', releaseNotes: 'Even better support for Bitcoin and wallets!', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: BTC_ICON, sourceVersion: null, @@ -682,7 +682,7 @@ export namespace Mock { docsUrl: 'https://bitcoinknots.org', releaseNotes: 'Even better support for Bitcoin and wallets!', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: BTC_ICON, sourceVersion: null, @@ -727,7 +727,7 @@ export namespace Mock { docsUrl: 'https://lightning.engineering/', releaseNotes: 'Upstream release and minor fixes.', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: LND_ICON, sourceVersion: null, @@ -775,7 +775,7 @@ export namespace Mock { marketingSite: '', releaseNotes: 'Upstream release and minor fixes.', osVersion: '0.3.6', - sdkVersion: '0.4.0-beta.42', + sdkVersion: '0.4.0-beta.43', gitHash: 'fakehash', icon: PROXY_ICON, sourceVersion: null,