Feature/new registry (#2612)

* wip

* overhaul boot process

* wip: new registry

* wip

* wip

* wip

* wip

* wip

* wip

* os registry complete

* ui fixes

* fixes

* fixes

* more fixes

* fix merkle archive
This commit is contained in:
Aiden McClelland
2024-05-06 10:20:44 -06:00
committed by GitHub
parent 8a38666105
commit 9b14d714ca
167 changed files with 6297 additions and 3190 deletions

1
.gitignore vendored
View File

@@ -20,7 +20,6 @@ secrets.db
/ENVIRONMENT.txt
/GIT_HASH.txt
/VERSION.txt
/eos-*.tar.gz
/*.deb
/target
/*.squashfs

View File

@@ -120,7 +120,6 @@ install: $(ALL_TARGETS)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/embassy-cli)
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then $(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); fi
@@ -164,15 +163,16 @@ wormhole-deb: results/$(BASENAME).deb
update: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo NO_SYNC=1 /media/embassy/next/usr/lib/startos/scripts/chroot-and-upgrade "apt-get install -y $(shell cat ./build/lib/depends)"')
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
emulate-reflash: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
$(call ssh,"sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot")
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo rm -f /media/startos/config/disk.guid')
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
upload-ota: results/$(BASENAME).squashfs
TARGET=$(TARGET) KEY=$(KEY) ./upload-ota.sh

View File

@@ -1,107 +0,0 @@
# Building StartOS
⚠️ The commands given assume a Debian or Ubuntu-based environment. _Building in
a VM is NOT yet supported_ ⚠️
## Prerequisites
1. Install dependencies
- Avahi
- `sudo apt install -y avahi-daemon`
- Installed by default on most Debian systems - https://avahi.org
- Build Essentials (needed to run `make`)
- `sudo apt install -y build-essential`
- Docker
- `curl -fsSL https://get.docker.com | sh`
- https://docs.docker.com/get-docker
- Add your user to the docker group: `sudo usermod -a -G docker $USER`
- Reload user environment `exec sudo su -l $USER`
- Prepare Docker environment
- Setup buildx (https://docs.docker.com/buildx/working-with-buildx/)
- Create a builder: `docker buildx create --use`
- Add multi-arch build ability:
`docker run --rm --privileged linuxkit/binfmt:v0.8`
- Node Version 12+
- snap: `sudo snap install node`
- [nvm](https://github.com/nvm-sh/nvm#installing-and-updating):
`nvm install --lts`
- https://nodejs.org/en/docs
- NPM Version 7+
- apt: `sudo apt install -y npm`
- [nvm](https://github.com/nvm-sh/nvm#installing-and-updating):
`nvm install --lts`
- https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
- jq
- `sudo apt install -y jq`
- https://stedolan.github.io/jq
- yq
- snap: `sudo snap install yq`
- binaries: https://github.com/mikefarah/yq/releases/
- https://mikefarah.gitbook.io/yq
2. Clone the latest repo with required submodules
> :information_source: You chan check latest available version
> [here](https://github.com/Start9Labs/start-os/releases)
```
git clone --recursive https://github.com/Start9Labs/start-os.git --branch latest
```
## Build Raspberry Pi Image
```
cd start-os
make embassyos-raspi.img ARCH=aarch64
```
## Flash
Flash the resulting `embassyos-raspi.img` to your SD Card
We recommend [Balena Etcher](https://www.balena.io/etcher/)
## Setup
Visit http://start.local from any web browser - We recommend
[Firefox](https://www.mozilla.org/firefox/browsers)
Enter your product key. This is generated during the build process and can be
found in `product_key.txt`, located in the root directory.
## Troubleshooting
1. I just flashed my SD card, fired up StartOS, bootup sounds and all, but my
browser is saying "Unable to connect" with start.local.
- Try doing a hard refresh on your browser, or opening the url in a
private/incognito window. If you've ran an instance of StartOS before,
sometimes you can have a stale cache that will block you from navigating to
the page.
2. Flashing the image isn't working with balenaEtcher. I'm getting
`Cannot read property 'message' of null` when I try.
- The latest versions of Balena may not flash properly. This version here:
https://github.com/balena-io/etcher/releases/tag/v1.5.122 should work
properly.
3. Startup isn't working properly and I'm curious as to why. How can I view logs
regarding startup for debugging?
- Find the IP of your device
- Run `nc <ip> 8080` and it will print the logs
4. I need to ssh into my server to fix something, but I cannot get to the
console to add ssh keys normally.
- During the Build step, instead of running just
`make embassyos-raspi.img ARCH=aarch64` run
`ENVIRONMENT=dev make embassyos-raspi.img ARCH=aarch64`. Flash like normal,
and insert into your server. Boot up StartOS, then on another computer on
the same network, ssh into the the server with the username `start9` password
`embassy`.
4. I need to reset my password, how can I do that?
- You will need to reflash your device. Select "Use Existing Drive" once you are
in setup, and it will prompt you to set a new password.

View File

@@ -1,76 +0,0 @@
# Release Process
## `embassyos_0.3.x-1_amd64.deb`
- Description: debian package for x86_64 - intended to be installed on pureos
- Destination: GitHub Release Tag
- Requires: N/A
- Build steps:
- Clone `https://github.com/Start9Labs/embassy-os-deb` at `master`
- Run `make TAG=master` from that folder
- Artifact: `./embassyos_0.3.x-1_amd64.deb`
## `eos-<version>-<git hash>-<date>_amd64.iso`
- Description: live usb image for x86_64
- Destination: GitHub Release Tag
- Requires: `embassyos_0.3.x-1_amd64.deb`
- Build steps:
- Clone `https://github.com/Start9Labs/eos-image-recipes` at `master`
- Copy `embassyos_0.3.x-1_amd64.deb` to
`overlays/vendor/root/embassyos_0.3.x-1_amd64.deb`
- Run `./run-local-build.sh byzantium` from that folder
- Artifact: `./results/eos-<version>-<git hash>-<date>_amd64.iso`
## `eos.x86_64.squashfs`
- Description: compressed embassyOS x86_64 filesystem image
- Destination: GitHub Release Tag, Registry @
`resources/eos/<version>/eos.x86_64.squashfs`
- Requires: `eos-<version>-<git hash>-<date>_amd64.iso`
- Build steps:
- From `https://github.com/Start9Labs/eos-image-recipes` at `master`
- `./extract-squashfs.sh results/eos-<version>-<git hash>-<date>_amd64.iso`
- Artifact: `./results/eos.x86_64.squashfs`
## `eos.raspberrypi.squashfs`
- Description: compressed embassyOS raspberrypi filesystem image
- Destination: GitHub Release Tag, Registry @
`resources/eos/<version>/eos.raspberrypi.squashfs`
- Requires: N/A
- Build steps:
- Clone `https://github.com/Start9Labs/embassy-os` at `master`
- `make embassyos-raspi.img`
- flash `embassyos-raspi.img` to raspberry pi
- boot raspberry pi with ethernet
- wait for chime
- you can watch logs using `nc <ip> 8080`
- unplug raspberry pi, put sd card back in build machine
- `./build/raspberry-pi/rip-image.sh`
- Artifact: `./eos.raspberrypi.squashfs`
## `lite-upgrade.img`
- Description: update image for users coming from 0.3.2.1 and before
- Destination: Registry @ `resources/eos/<version>/eos.img`
- Requires: `eos.raspberrypi.squashfs`
- Build steps:
- From `https://github.com/Start9Labs/embassy-os` at `master`
- `make lite-upgrade.img`
- Artifact `./lite-upgrade.img`
## `eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
- Description: pre-initialized raspberrypi image
- Destination: GitHub Release Tag (as tar.gz)
- Requires: `eos.raspberrypi.squashfs`
- Build steps:
- From `https://github.com/Start9Labs/embassy-os` at `master`
- `make eos_raspberrypi.img`
- `tar --format=posix -cS -f- eos-<version>-<git hash>-<date>_raspberrypi.img | gzip > eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
- Artifact `./eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
## `embassy-sdk`
- Build and deploy to all registries

View File

@@ -34,7 +34,6 @@ network-manager
nvme-cli
nyx
openssh-server
podman
postgresql
psmisc
qemu-guest-agent

View File

@@ -5,44 +5,104 @@ if [ "$UID" -ne 0 ]; then
exit 1
fi
POSITIONAL_ARGS=()
while [[ $# -gt 0 ]]; do
case $1 in
--no-sync)
NO_SYNC=1
shift
;;
--create)
ONLY_CREATE=1
shift
;;
-*|--*)
echo "Unknown option $1"
exit 1
;;
*)
POSITIONAL_ARGS+=("$1") # save positional arg
shift # past argument
;;
esac
done
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
if [ -z "$NO_SYNC" ]; then
echo 'Syncing...'
rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next
umount -R /media/startos/next 2> /dev/null
rm -rf /media/startos/upper /media/startos/next
mkdir /media/startos/upper
mount -t tmpfs tmpfs /media/startos/upper
mkdir -p /media/startos/upper/data /media/startos/upper/work /media/startos/next
mount -t overlay \
-olowerdir=/media/startos/current,upperdir=/media/startos/upper/data,workdir=/media/startos/upper/work \
overlay /media/startos/next
fi
mkdir -p /media/embassy/next/run
mkdir -p /media/embassy/next/dev
mkdir -p /media/embassy/next/sys
mkdir -p /media/embassy/next/proc
mkdir -p /media/embassy/next/boot
mount --bind /run /media/embassy/next/run
mount --bind /tmp /media/embassy/next/tmp
mount --bind /dev /media/embassy/next/dev
mount --bind /sys /media/embassy/next/sys
mount --bind /proc /media/embassy/next/proc
mount --bind /boot /media/embassy/next/boot
if [ -n "$ONLY_CREATE" ]; then
exit 0
fi
mkdir -p /media/startos/next/run
mkdir -p /media/startos/next/dev
mkdir -p /media/startos/next/sys
mkdir -p /media/startos/next/proc
mkdir -p /media/startos/next/boot
mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
if [ -z "$*" ]; then
chroot /media/embassy/next
chroot /media/startos/next
CHROOT_RES=$?
else
chroot /media/embassy/next "$SHELL" -c "$*"
chroot /media/startos/next "$SHELL" -c "$*"
CHROOT_RES=$?
fi
umount /media/embassy/next/run
umount /media/embassy/next/tmp
umount /media/embassy/next/dev
umount /media/embassy/next/sys
umount /media/embassy/next/proc
umount /media/embassy/next/boot
umount /media/startos/next/run
umount /media/startos/next/tmp
umount /media/startos/next/dev
umount /media/startos/next/sys
umount /media/startos/next/proc
umount /media/startos/next/boot
if [ "$CHROOT_RES" -eq 0 ]; then
if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then
echo 'Pruning...'
current="$(readlink -f /media/startos/config/current.rootfs)"
needed=$(du -s --bytes /media/startos/next | awk '{print $1}')
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$needed" ]]; do
to_prune="$(ls -t1 /media/startos/images/*.rootfs | grep -v "$current" | tail -n1)"
if [ -e "$to_prune" ]; then
echo " Pruning $to_prune"
rm -rf "$to_prune"
else
>&2 echo "Not enough space and nothing to prune!"
exit 1
fi
done
echo 'done.'
fi
echo 'Upgrading...'
touch /media/embassy/config/upgrade
time mksquashfs /media/startos/next /media/startos/images/next.squashfs -b 4096 -comp gzip
hash=$(start-cli util b3sum /media/startos/images/next.squashfs | head -c 32)
mv /media/startos/images/next.squashfs /media/startos/images/${hash}.rootfs
ln -rsf /media/startos/images/${hash}.rootfs /media/startos/config/current.rootfs
sync
reboot
fi
fi
umount -R /media/startos/next
rm -rf /media/startos/upper /media/startos/next

View File

@@ -1,98 +0,0 @@
# Local filesystem mounting -*- shell-script -*-
#
# This script overrides local_mount_root() in /scripts/local
# and mounts root as a read-only filesystem with a temporary (rw)
# overlay filesystem.
#
. /scripts/local
local_mount_root()
{
echo 'using embassy initramfs module'
local_top
local_device_setup "${ROOT}" "root file system"
ROOT="${DEV}"
# Get the root filesystem type if not set
if [ -z "${ROOTFSTYPE}" ]; then
FSTYPE=$(get_fstype "${ROOT}")
else
FSTYPE=${ROOTFSTYPE}
fi
local_premount
# CHANGES TO THE ORIGINAL FUNCTION BEGIN HERE
# N.B. this code still lacks error checking
modprobe ${FSTYPE}
checkfs ${ROOT} root "${FSTYPE}"
ROOTFLAGS="$(echo "${ROOTFLAGS}" | sed 's/subvol=\(next\|current\)//' | sed 's/^-o *$//')"
if [ "${FSTYPE}" != "unknown" ]; then
mount -t ${FSTYPE} ${ROOTFLAGS} ${ROOT} ${rootmnt}
else
mount ${ROOTFLAGS} ${ROOT} ${rootmnt}
fi
echo 'mounting embassyfs'
mkdir /embassyfs
mount --move ${rootmnt} /embassyfs
if ! [ -d /embassyfs/current ] && [ -d /embassyfs/prev ]; then
mv /embassyfs/prev /embassyfs/current
fi
if ! [ -d /embassyfs/current ]; then
mkdir /embassyfs/current
for FILE in $(ls /embassyfs); do
if [ "$FILE" != current ]; then
mv /embassyfs/$FILE /embassyfs/current/
fi
done
fi
mkdir -p /embassyfs/config
if [ -f /embassyfs/config/upgrade ] && [ -d /embassyfs/next ]; then
mv /embassyfs/current /embassyfs/prev
mv /embassyfs/next /embassyfs/current
rm /embassyfs/config/upgrade
fi
if ! [ -d /embassyfs/next ]; then
if [ -d /embassyfs/prev ]; then
mv /embassyfs/prev /embassyfs/next
else
mkdir /embassyfs/next
fi
fi
mkdir /lower /upper
mount -r --bind /embassyfs/current /lower
modprobe overlay || insmod "/lower/lib/modules/$(uname -r)/kernel/fs/overlayfs/overlay.ko"
# Mount a tmpfs for the overlay in /upper
mount -t tmpfs tmpfs /upper
mkdir /upper/data /upper/work
# Mount the final overlay-root in $rootmnt
mount -t overlay \
-olowerdir=/lower,upperdir=/upper/data,workdir=/upper/work \
overlay ${rootmnt}
mkdir -p ${rootmnt}/media/embassy/config
mount --bind /embassyfs/config ${rootmnt}/media/embassy/config
mkdir -p ${rootmnt}/media/embassy/next
mount --bind /embassyfs/next ${rootmnt}/media/embassy/next
mkdir -p ${rootmnt}/media/embassy/embassyfs
mount -r --bind /embassyfs ${rootmnt}/media/embassy/embassyfs
}

View File

@@ -3,8 +3,8 @@
ARGS=
for ARG in $@; do
if [ -d "/media/embassy/embassyfs" ] && [ "$ARG" = "/" ]; then
ARG=/media/embassy/embassyfs
if [ -d "/media/startos/root" ] && [ "$ARG" = "/" ]; then
ARG=/media/startos/root
fi
ARGS="$ARGS $ARG"
done

View File

@@ -0,0 +1,114 @@
# Local filesystem mounting -*- shell-script -*-
#
# This script overrides local_mount_root() in /scripts/local
# and mounts root as a read-only filesystem with a temporary (rw)
# overlay filesystem.
#
. /scripts/local
local_mount_root()
{
echo 'using startos initramfs module'
local_top
local_device_setup "${ROOT}" "root file system"
ROOT="${DEV}"
# Get the root filesystem type if not set
if [ -z "${ROOTFSTYPE}" ]; then
FSTYPE=$(get_fstype "${ROOT}")
else
FSTYPE=${ROOTFSTYPE}
fi
local_premount
# CHANGES TO THE ORIGINAL FUNCTION BEGIN HERE
# N.B. this code still lacks error checking
modprobe ${FSTYPE}
checkfs ${ROOT} root "${FSTYPE}"
echo 'mounting startos'
mkdir /startos
ROOTFLAGS="$(echo "${ROOTFLAGS}" | sed 's/subvol=\(next\|current\)//' | sed 's/^-o *$//')"
if [ "${FSTYPE}" != "unknown" ]; then
mount -t ${FSTYPE} ${ROOTFLAGS} ${ROOT} /startos
else
mount ${ROOTFLAGS} ${ROOT} /startos
fi
if [ -d /startos/images ]; then
if [ -h /startos/config/current.rootfs ] && [ -e /startos/config/current.rootfs ]; then
image=$(readlink -f /startos/config/current.rootfs)
else
image="$(ls -t1 /startos/images/*.rootfs | head -n1)"
fi
if ! [ -f "$image" ]; then
>&2 echo "image $image not available to boot"
exit 1
fi
else
if [ -f /startos/config/upgrade ] && [ -d /startos/next ]; then
oldroot=/startos/next
elif [ -d /startos/current ]; then
oldroot=/startos/current
elif [ -d /startos/prev ]; then
oldroot=/startos/prev
else
>&2 echo no StartOS filesystem found
exit 1
fi
mkdir -p /startos/config/overlay/etc
mv $oldroot/etc/fstab /startos/config/overlay/etc/fstab
mv $oldroot/etc/machine-id /startos/config/overlay/etc/machine-id
mv $oldroot/etc/ssh /startos/config/overlay/etc/ssh
mkdir -p /startos/images
mv $oldroot /startos/images/legacy.rootfs
rm -rf /startos/next /startos/current /startos/prev
ln -rsf /startos/images/old.squashfs /startos/config/current.rootfs
image=$(readlink -f /startos/config/current.rootfs)
fi
mkdir /lower /upper
if [ -d "$image" ]; then
mount -r --bind $image /lower
elif [ -f "$image" ]; then
modprobe squashfs
mount -r $image /lower
else
>&2 echo "not a regular file or directory: $image"
exit 1
fi
modprobe overlay || insmod "/lower/lib/modules/$(uname -r)/kernel/fs/overlayfs/overlay.ko"
# Mount a tmpfs for the overlay in /upper
mount -t tmpfs tmpfs /upper
mkdir /upper/data /upper/work
mkdir -p /startos/config/overlay
# Mount the final overlay-root in $rootmnt
mount -t overlay \
-olowerdir=/startos/config/overlay:/lower,upperdir=/upper/data,workdir=/upper/work \
overlay ${rootmnt}
mkdir -p ${rootmnt}/media/startos/config
mount --bind /startos/config ${rootmnt}/media/startos/config
mkdir -p ${rootmnt}/media/startos/images
mount --bind /startos/images ${rootmnt}/media/startos/images
mkdir -p ${rootmnt}/media/startos/root
mount -r --bind /startos ${rootmnt}/media/startos/root
mkdir -p ${rootmnt}/media/startos/current
mount -r --bind /lower ${rootmnt}/media/startos/current
}

View File

@@ -63,7 +63,7 @@ sudo unsquashfs -f -d $TMPDIR startos.raspberrypi.squashfs
REAL_GIT_HASH=$(cat $TMPDIR/usr/lib/startos/GIT_HASH.txt)
REAL_VERSION=$(cat $TMPDIR/usr/lib/startos/VERSION.txt)
REAL_ENVIRONMENT=$(cat $TMPDIR/usr/lib/startos/ENVIRONMENT.txt)
sudo sed -i 's| boot=embassy| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo sed -i 's| boot=startos| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo cp ./build/raspberrypi/fstab $TMPDIR/etc/
sudo cp ./build/raspberrypi/init_resize.sh $TMPDIR/usr/lib/startos/scripts/init_resize.sh
sudo umount $TMPDIR/boot

1675
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -28,7 +28,7 @@ set +e
fail=
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
if ! rust-musl-builder sh -c "(cd core && cargo build --release $(if [ -n "$FEATURES" ]; then echo "--features $FEATURES"; fi) --locked --bin startbox --target=$ARCH-unknown-linux-musl)"; then
if ! rust-musl-builder sh -c "(cd core && cargo build --release --no-default-features --features cli,daemon,$FEATURES --locked --bin startbox --target=$ARCH-unknown-linux-musl)"; then
fail=true
fi
if ! rust-musl-builder sh -c "(cd core && cargo build --release --no-default-features --features container-runtime,$FEATURES --locked --bin containerbox --target=$ARCH-unknown-linux-musl)"; then

42
core/build-reg.sh Executable file
View File

@@ -0,0 +1,42 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
set +e
fail=
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
if ! rust-musl-builder sh -c "(cd core && cargo build --release --no-default-features --features cli,registry,$FEATURES --locked --bin registrybox --target=$ARCH-unknown-linux-musl)"; then
fail=true
fi
set -e
cd core
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
if [ -n "$fail" ]; then
exit 1
fi

View File

@@ -11,7 +11,7 @@ futures = "0.3.28"
lazy_async_pool = "0.3.3"
models = { path = "../models" }
pin-project = "1.1.3"
rpc-toolkit = "0.2.3"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "refactor/no-dyn-ctx" }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
tokio = { version = "1", features = ["full"] }

View File

@@ -12,4 +12,4 @@ if [ -z "$PLATFORM" ]; then
export PLATFORM=$(uname -m)
fi
cargo install --path=./startos --no-default-features --features=cli,docker --bin start-cli --locked
cargo install --path=./startos --no-default-features --features=cli,docker,registry --bin start-cli --locked

View File

@@ -6,6 +6,7 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
axum = "0.7.5"
base64 = "0.21.4"
color-eyre = "0.6.2"
ed25519-dalek = { version = "2.0.0", features = ["serde"] }
@@ -22,8 +23,8 @@ patch-db = { version = "*", path = "../../patch-db/patch-db", features = [
] }
rand = "0.8.5"
regex = "1.10.2"
reqwest = "0.11.22"
rpc-toolkit = "0.2.2"
reqwest = "0.12"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "refactor/no-dyn-ctx" }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
sqlx = { version = "0.7.2", features = [

View File

@@ -1,9 +1,10 @@
use std::fmt::{Debug, Display};
use axum::http::uri::InvalidUri;
use axum::http::StatusCode;
use color_eyre::eyre::eyre;
use num_enum::TryFromPrimitive;
use patch_db::Revision;
use rpc_toolkit::hyper::http::uri::InvalidUri;
use rpc_toolkit::reqwest;
use rpc_toolkit::yajrc::{
RpcError, INVALID_PARAMS_ERROR, INVALID_REQUEST_ERROR, METHOD_NOT_FOUND_ERROR, PARSE_ERROR,
@@ -207,6 +208,13 @@ impl Error {
}
}
}
impl axum::response::IntoResponse for Error {
fn into_response(self) -> axum::response::Response {
let mut res = axum::Json(RpcError::from(self)).into_response();
*res.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
res
}
}
impl From<std::convert::Infallible> for Error {
fn from(value: std::convert::Infallible) -> Self {
match value {}

View File

@@ -22,7 +22,7 @@ name = "startos"
path = "src/lib.rs"
[[bin]]
name = "containerbox"
name = "startbox"
path = "src/main.rs"
[[bin]]
@@ -30,14 +30,19 @@ name = "start-cli"
path = "src/main.rs"
[[bin]]
name = "startbox"
name = "containerbox"
path = "src/main.rs"
[[bin]]
name = "registrybox"
path = "src/main.rs"
[features]
cli = []
container-runtime = []
daemon = []
default = ["cli", "daemon"]
registry = []
default = ["cli", "daemon", "registry"]
dev = []
unstable = ["console-subscriber", "tokio/tracing"]
docker = []
@@ -58,7 +63,7 @@ base32 = "0.4.0"
base64 = "0.21.4"
base64ct = "1.6.0"
basic-cookies = "0.1.4"
blake3 = "1.5.0"
blake3 = { version = "1.5.0", features = ["mmap", "rayon"] }
bytes = "1"
chrono = { version = "0.4.31", features = ["serde"] }
clap = "4.4.12"
@@ -89,6 +94,7 @@ helpers = { path = "../helpers" }
hex = "0.4.3"
hmac = "0.12.1"
http = "1.0.0"
http-body-util = "0.1"
id-pool = { version = "0.2.2", default-features = false, features = [
"serde",
"u16",
@@ -134,10 +140,10 @@ proptest = "1.3.1"
proptest-derive = "0.4.0"
rand = { version = "0.8.5", features = ["std"] }
regex = "1.10.2"
reqwest = { version = "0.11.23", features = ["stream", "json", "socks"] }
reqwest_cookie_store = "0.6.0"
reqwest = { version = "0.12.4", features = ["stream", "json", "socks"] }
reqwest_cookie_store = "0.7.0"
rpassword = "7.2.0"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "refactor/traits" }
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "refactor/no-dyn-ctx" }
rust-argon2 = "2.0.0"
rustyline-async = "0.4.1"
semver = { version = "1.0.20", features = ["serde"] }
@@ -145,6 +151,7 @@ serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.1" }
serde_json = "1.0"
serde_toml = { package = "toml", version = "0.8.2" }
serde_urlencoded = "0.7"
serde_with = { version = "3.4.0", features = ["macros", "json"] }
serde_yaml = "0.9.25"
sha2 = "0.10.2"

View File

@@ -0,0 +1,7 @@
# Seeds for failure cases proptest has generated in the past. It is
# automatically read and these particular cases re-run before any
# novel cases are generated.
#
# It is recommended to check this file in to source control so that
# everyone who runs the test benefits from these saved cases.
cc dbb4790c31f9e400ed29a9ba2dbd61e3c55ce8a3fbae16601ca3512e803020ed # shrinks to files = []

View File

@@ -1,7 +1,6 @@
use clap::Parser;
pub use models::ActionId;
use models::PackageId;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;

View File

@@ -4,9 +4,10 @@ use chrono::{DateTime, Utc};
use clap::Parser;
use color_eyre::eyre::eyre;
use imbl_value::{json, InternedString};
use itertools::Itertools;
use josekit::jwk::Jwk;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, from_fn_async, AnyContext, CallRemote, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;
@@ -82,7 +83,7 @@ impl std::str::FromStr for PasswordType {
})
}
}
pub fn auth() -> ParentHandler {
pub fn auth<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"login",
@@ -94,11 +95,11 @@ pub fn auth() -> ParentHandler {
.subcommand(
"logout",
from_fn_async(logout)
.with_metadata("get-session", Value::Bool(true))
.with_remote_cli::<CliContext>()
.no_display(),
.with_metadata("get_session", Value::Bool(true))
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand("session", session())
.subcommand("session", session::<C>())
.subcommand(
"reset-password",
from_fn_async(reset_password_impl).no_cli(),
@@ -112,7 +113,7 @@ pub fn auth() -> ParentHandler {
from_fn_async(get_pubkey)
.with_metadata("authenticated", Value::Bool(false))
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
@@ -128,26 +129,20 @@ fn gen_pwd() {
.unwrap()
)
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct CliLoginParams {
password: Option<PasswordType>,
}
#[instrument(skip_all)]
async fn cli_login(
ctx: CliContext,
CliLoginParams { password }: CliLoginParams,
HandlerArgs {
context: ctx,
parent_method,
method,
..
}: HandlerArgs<CliContext>,
) -> Result<(), RpcError> {
let password = if let Some(password) = password {
password.decrypt(&ctx)?
} else {
rpassword::prompt_password("Password: ")?
};
let password = rpassword::prompt_password("Password: ")?;
ctx.call_remote(
"auth.login",
ctx.call_remote::<RpcContext>(
&parent_method.into_iter().chain(method).join("."),
json!({
"password": password,
"metadata": {
@@ -185,7 +180,8 @@ pub fn check_password_against_db(db: &DatabaseModel, password: &str) -> Result<(
#[command(rename_all = "kebab-case")]
pub struct LoginParams {
password: Option<PasswordType>,
#[serde(default)]
#[ts(skip)]
#[serde(rename = "__auth_userAgent")] // from Auth middleware
user_agent: Option<String>,
#[serde(default)]
#[ts(type = "any")]
@@ -226,7 +222,8 @@ pub async fn login_impl(
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct LogoutParams {
#[ts(type = "string")]
#[ts(skip)]
#[serde(rename = "__auth_session")] // from Auth middleware
session: InternedString,
}
@@ -262,23 +259,23 @@ pub struct SessionList {
sessions: Sessions,
}
pub fn session() -> ParentHandler {
pub fn session<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"list",
from_fn_async(list)
.with_metadata("get-session", Value::Bool(true))
.with_metadata("get_session", Value::Bool(true))
.with_display_serializable()
.with_custom_display_fn::<AnyContext, _>(|handle, result| {
.with_custom_display_fn(|handle, result| {
Ok(display_sessions(handle.params, result))
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"kill",
from_fn_async(kill)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
@@ -374,21 +371,16 @@ pub struct ResetPasswordParams {
#[instrument(skip_all)]
async fn cli_reset_password(
ctx: CliContext,
ResetPasswordParams {
old_password,
new_password,
}: ResetPasswordParams,
HandlerArgs {
context: ctx,
parent_method,
method,
..
}: HandlerArgs<CliContext>,
) -> Result<(), RpcError> {
let old_password = if let Some(old_password) = old_password {
old_password.decrypt(&ctx)?
} else {
rpassword::prompt_password("Current Password: ")?
};
let old_password = rpassword::prompt_password("Current Password: ")?;
let new_password = if let Some(new_password) = new_password {
new_password.decrypt(&ctx)?
} else {
let new_password = {
let new_password = rpassword::prompt_password("New Password: ")?;
if new_password != rpassword::prompt_password("Confirm: ")? {
return Err(Error::new(
@@ -400,8 +392,8 @@ async fn cli_reset_password(
new_password
};
ctx.call_remote(
"auth.reset-password",
ctx.call_remote::<RpcContext>(
&parent_method.into_iter().chain(method).join("."),
imbl_value::json!({ "old-password": old_password, "new-password": new_password }),
)
.await?;
@@ -447,7 +439,7 @@ pub async fn reset_password_impl(
#[instrument(skip_all)]
pub async fn get_pubkey(ctx: RpcContext) -> Result<Jwk, RpcError> {
let secret = ctx.as_ref().clone();
let secret = <RpcContext as AsRef<Jwk>>::as_ref(&ctx).clone();
let pub_key = secret.to_public_key()?;
Ok(pub_key)
}

View File

@@ -20,7 +20,7 @@ use crate::backup::os::OsBackup;
use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext;
use crate::db::model::public::BackupProgress;
use crate::db::model::DatabaseModel;
use crate::db::model::{Database, DatabaseModel};
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
@@ -42,9 +42,9 @@ pub struct BackupParams {
password: crate::auth::PasswordType,
}
struct BackupStatusGuard(Option<PatchDb>);
struct BackupStatusGuard(Option<TypedPatchDb<Database>>);
impl BackupStatusGuard {
fn new(db: PatchDb) -> Self {
fn new(db: TypedPatchDb<Database>) -> Self {
Self(Some(db))
}
async fn handle_result(
@@ -296,7 +296,7 @@ async fn perform_backup(
if tokio::fs::metadata(&luks_folder_bak).await.is_ok() {
tokio::fs::rename(&luks_folder_bak, &luks_folder_old).await?;
}
let luks_folder = Path::new("/media/embassy/config/luks");
let luks_folder = Path::new("/media/startos/config/luks");
if tokio::fs::metadata(&luks_folder).await.is_ok() {
dir_copy(&luks_folder, &luks_folder_bak, None).await?;
}

View File

@@ -3,7 +3,7 @@ use std::collections::BTreeMap;
use chrono::{DateTime, Utc};
use models::{HostId, PackageId};
use reqwest::Url;
use rpc_toolkit::{from_fn_async, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use crate::context::CliContext;
@@ -34,23 +34,23 @@ pub struct PackageBackupReport {
}
// #[command(subcommands(backup_bulk::backup_all, target::target))]
pub fn backup() -> ParentHandler {
pub fn backup<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"create",
from_fn_async(backup_bulk::backup_all)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("target", target::target())
.subcommand("target", target::target::<C>())
}
pub fn package_backup() -> ParentHandler {
pub fn package_backup<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"restore",
from_fn_async(restore::restore_packages_rpc)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
@@ -61,5 +61,5 @@ struct BackupMetadata {
pub network_keys: BTreeMap<HostId, Base64<[u8; 32]>>,
#[serde(default)]
pub tor_keys: BTreeMap<HostId, Base32<[u8; 64]>>, // DEPRECATED
pub marketplace_url: Option<Url>,
pub registry: Option<Url>,
}

View File

@@ -4,7 +4,7 @@ use std::path::{Path, PathBuf};
use clap::Parser;
use color_eyre::eyre::eyre;
use imbl_value::InternedString;
use rpc_toolkit::{command, from_fn_async, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
@@ -46,25 +46,25 @@ pub struct CifsBackupTarget {
start_os: Option<EmbassyOsRecoveryInfo>,
}
pub fn cifs() -> ParentHandler {
pub fn cifs<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"add",
from_fn_async(add)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"update",
from_fn_async(update)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}

View File

@@ -8,7 +8,7 @@ use color_eyre::eyre::eyre;
use digest::generic_array::GenericArray;
use digest::OutputSizeUser;
use models::PackageId;
use rpc_toolkit::{command, from_fn_async, AnyContext, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use tokio::sync::Mutex;
@@ -138,23 +138,23 @@ impl FileSystem for BackupTargetFS {
}
// #[command(subcommands(cifs::cifs, list, info, mount, umount))]
pub fn target() -> ParentHandler {
pub fn target<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("cifs", cifs::cifs())
.subcommand("cifs", cifs::cifs::<C>())
.subcommand(
"list",
from_fn_async(list)
.with_display_serializable()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"info",
from_fn_async(info)
.with_display_serializable()
.with_custom_display_fn::<AnyContext, _>(|params, info| {
.with_custom_display_fn::<CliContext, _>(|params, info| {
Ok(display_backup_info(params.params, info))
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}

View File

@@ -5,6 +5,8 @@ use std::path::Path;
#[cfg(feature = "container-runtime")]
pub mod container_cli;
pub mod deprecated;
#[cfg(feature = "registry")]
pub mod registry;
#[cfg(feature = "cli")]
pub mod start_cli;
#[cfg(feature = "daemon")]
@@ -20,6 +22,8 @@ fn select_executable(name: &str) -> Option<fn(VecDeque<OsString>)> {
"start-cli" => Some(container_cli::main),
#[cfg(feature = "daemon")]
"startd" => Some(startd::main),
#[cfg(feature = "registry")]
"registry" => Some(registry::main),
"embassy-cli" => Some(|_| deprecated::renamed("embassy-cli", "start-cli")),
"embassy-sdk" => Some(|_| deprecated::renamed("embassy-sdk", "start-sdk")),
"embassyd" => Some(|_| deprecated::renamed("embassyd", "startd")),

View File

@@ -0,0 +1,86 @@
use std::ffi::OsString;
use clap::Parser;
use futures::FutureExt;
use tokio::signal::unix::signal;
use tracing::instrument;
use crate::net::web_server::WebServer;
use crate::prelude::*;
use crate::registry::context::{RegistryConfig, RegistryContext};
use crate::util::logger::EmbassyLogger;
#[instrument(skip_all)]
async fn inner_main(config: &RegistryConfig) -> Result<(), Error> {
let server = async {
let ctx = RegistryContext::init(config).await?;
let server = WebServer::registry(ctx.listen, ctx.clone());
let mut shutdown_recv = ctx.shutdown.subscribe();
let sig_handler_ctx = ctx;
let sig_handler = tokio::spawn(async move {
use tokio::signal::unix::SignalKind;
futures::future::select_all(
[
SignalKind::interrupt(),
SignalKind::quit(),
SignalKind::terminate(),
]
.iter()
.map(|s| {
async move {
signal(*s)
.unwrap_or_else(|_| panic!("register {:?} handler", s))
.recv()
.await
}
.boxed()
}),
)
.await;
sig_handler_ctx
.shutdown
.send(())
.map_err(|_| ())
.expect("send shutdown signal");
});
shutdown_recv
.recv()
.await
.with_kind(crate::ErrorKind::Unknown)?;
sig_handler.abort();
Ok::<_, Error>(server)
}
.await?;
server.shutdown().await;
Ok(())
}
pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init();
let config = RegistryConfig::parse_from(args).load().unwrap();
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("failed to initialize runtime");
rt.block_on(inner_main(&config))
};
match res {
Ok(()) => (),
Err(e) => {
eprintln!("{}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -16,7 +16,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init();
if let Err(e) = CliApp::new(
|cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?),
crate::main_api(),
crate::expanded_api(),
)
.run(args)
{

View File

@@ -104,7 +104,7 @@ async fn setup_or_init(config: &ServerConfig) -> Result<Option<Shutdown>, Error>
Command::new("reboot")
.invoke(crate::ErrorKind::Unknown)
.await?;
} else if tokio::fs::metadata("/media/embassy/config/disk.guid")
} else if tokio::fs::metadata("/media/startos/config/disk.guid")
.await
.is_err()
{
@@ -136,7 +136,7 @@ async fn setup_or_init(config: &ServerConfig) -> Result<Option<Shutdown>, Error>
tracing::debug!("{:?}", e);
}
} else {
let guid_string = tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
let guid_string = tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?;
let guid = guid_string.trim();
let requires_reboot = crate::disk::main::import(
@@ -202,7 +202,7 @@ async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
crate::sound::BEP.play().await?;
run_script_if_exists("/media/embassy/config/preinit.sh").await;
run_script_if_exists("/media/startos/config/preinit.sh").await;
let res = match setup_or_init(config).await {
Err(e) => {
@@ -213,12 +213,12 @@ async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
let ctx = DiagnosticContext::init(
config,
if tokio::fs::metadata("/media/embassy/config/disk.guid")
if tokio::fs::metadata("/media/startos/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new(
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
@@ -245,7 +245,7 @@ async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
Ok(s) => Ok(s),
};
run_script_if_exists("/media/embassy/config/postinit.sh").await;
run_script_if_exists("/media/startos/config/postinit.sh").await;
res
}

View File

@@ -23,7 +23,7 @@ async fn inner_main(config: &ServerConfig) -> Result<Option<Shutdown>, Error> {
let rpc_ctx = RpcContext::init(
config,
Arc::new(
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
@@ -129,12 +129,12 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
crate::sound::BEETHOVEN.play().await?;
let ctx = DiagnosticContext::init(
&config,
if tokio::fs::metadata("/media/embassy/config/disk.guid")
if tokio::fs::metadata("/media/startos/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new(
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),

View File

@@ -9,7 +9,7 @@ use models::{ErrorKind, OptionExt, PackageId};
use patch_db::value::InternedString;
use patch_db::Value;
use regex::Regex;
use rpc_toolkit::{from_fn_async, Empty, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;
@@ -134,16 +134,19 @@ pub struct ConfigParams {
}
// #[command(subcommands(get, set))]
pub fn config() -> ParentHandler<ConfigParams> {
pub fn config<C: Context>() -> ParentHandler<C, ConfigParams> {
ParentHandler::new()
.subcommand(
"get",
from_fn_async(get)
.with_inherited(|ConfigParams { id }, _| id)
.with_display_serializable()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"set",
set::<C>().with_inherited(|ConfigParams { id }, _| id),
)
.subcommand("set", set().with_inherited(|ConfigParams { id }, _| id))
}
#[instrument(skip_all)]
@@ -173,13 +176,13 @@ pub struct SetParams {
// metadata(sync_db = true)
// )]
#[instrument(skip_all)]
pub fn set() -> ParentHandler<SetParams, PackageId> {
pub fn set<C: Context>() -> ParentHandler<C, SetParams, PackageId> {
ParentHandler::new().root_handler(
from_fn_async(set_impl)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|set_params, id| (id, set_params))
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}

View File

@@ -10,7 +10,7 @@ use reqwest::Proxy;
use reqwest_cookie_store::CookieStoreMutex;
use rpc_toolkit::reqwest::{Client, Url};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{call_remote_http, CallRemote, Context};
use rpc_toolkit::{call_remote_http, CallRemote, Context, Empty};
use tokio::net::TcpStream;
use tokio::runtime::Runtime;
use tokio_tungstenite::{MaybeTlsStream, WebSocketStream};
@@ -18,15 +18,17 @@ use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::context::config::{local_config_path, ClientConfig};
use crate::core::rpc_continuations::RequestGuid;
use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::prelude::*;
use crate::rpc_continuations::RequestGuid;
#[derive(Debug)]
pub struct CliContextSeed {
pub runtime: OnceCell<Runtime>,
pub base_url: Url,
pub rpc_url: Url,
pub registry_url: Option<Url>,
pub client: Client,
pub cookie_store: Arc<CookieStoreMutex>,
pub cookie_path: PathBuf,
@@ -66,6 +68,8 @@ impl CliContext {
"http://localhost".parse()?
};
let registry = config.registry.clone();
let cookie_path = config.cookie_path.unwrap_or_else(|| {
local_config_path()
.as_deref()
@@ -104,6 +108,17 @@ impl CliContext {
.push("v1");
url
},
registry_url: registry
.map(|mut registry| {
registry
.path_segments_mut()
.map_err(|_| eyre!("Url cannot be base"))
.with_kind(crate::ErrorKind::ParseUrl)?
.push("rpc")
.push("v0");
Ok::<_, Error>(registry)
})
.transpose()?,
client: {
let mut builder = Client::builder().cookie_provider(cookie_store.clone());
if let Some(proxy) = config.proxy {
@@ -198,6 +213,29 @@ impl CliContext {
.await
.with_kind(ErrorKind::Network)
}
pub async fn call_remote<RemoteContext>(
&self,
method: &str,
params: Value,
) -> Result<Value, RpcError>
where
Self: CallRemote<RemoteContext>,
{
<Self as CallRemote<RemoteContext, Empty>>::call_remote(&self, method, params, Empty {})
.await
}
pub async fn call_remote_with<RemoteContext, T>(
&self,
method: &str,
params: Value,
extra: T,
) -> Result<Value, RpcError>
where
Self: CallRemote<RemoteContext, T>,
{
<Self as CallRemote<RemoteContext, T>>::call_remote(&self, method, params, extra).await
}
}
impl AsRef<Jwk> for CliContext {
fn as_ref(&self) -> &Jwk {
@@ -223,9 +261,23 @@ impl Context for CliContext {
.clone()
}
}
#[async_trait::async_trait]
impl CallRemote for CliContext {
async fn call_remote(&self, method: &str, params: Value) -> Result<Value, RpcError> {
impl CallRemote<RpcContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
}
}
impl CallRemote<DiagnosticContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
}
}
impl CallRemote<SetupContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
}
}
impl CallRemote<InstallContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
}
}

View File

@@ -14,7 +14,7 @@ use crate::init::init_postgres;
use crate::prelude::*;
use crate::util::serde::IoFormat;
pub const DEVICE_CONFIG_PATH: &str = "/media/embassy/config/config.yaml"; // "/media/startos/config/config.yaml";
pub const DEVICE_CONFIG_PATH: &str = "/media/startos/config/config.yaml"; // "/media/startos/config/config.yaml";
pub const CONFIG_PATH: &str = "/etc/startos/config.yaml";
pub const CONFIG_PATH_LOCAL: &str = ".startos/config.yaml";
@@ -58,6 +58,8 @@ pub struct ClientConfig {
pub config: Option<PathBuf>,
#[arg(short = 'h', long = "host")]
pub host: Option<Url>,
#[arg(short = 'r', long = "registry")]
pub registry: Option<Url>,
#[arg(short = 'p', long = "proxy")]
pub proxy: Option<Url>,
#[arg(long = "cookie-path")]
@@ -71,8 +73,10 @@ impl ContextConfig for ClientConfig {
}
fn merge_with(&mut self, other: Self) {
self.host = self.host.take().or(other.host);
self.registry = self.registry.take().or(other.registry);
self.proxy = self.proxy.take().or(other.proxy);
self.cookie_path = self.cookie_path.take().or(other.cookie_path);
self.developer_key_path = self.developer_key_path.take().or(other.developer_key_path);
}
}
impl ClientConfig {

View File

@@ -8,6 +8,7 @@ use tokio::sync::broadcast::Sender;
use tracing::instrument;
use crate::context::config::ServerConfig;
use crate::rpc_continuations::RpcContinuations;
use crate::shutdown::Shutdown;
use crate::Error;
@@ -16,6 +17,7 @@ pub struct DiagnosticContextSeed {
pub shutdown: Sender<Option<Shutdown>>,
pub error: Arc<RpcError>,
pub disk_guid: Option<Arc<String>>,
pub rpc_continuations: RpcContinuations,
}
#[derive(Clone)]
@@ -37,10 +39,15 @@ impl DiagnosticContext {
shutdown,
disk_guid,
error: Arc::new(error.into()),
rpc_continuations: RpcContinuations::new(),
})))
}
}
impl AsRef<RpcContinuations> for DiagnosticContext {
fn as_ref(&self) -> &RpcContinuations {
&self.rpc_continuations
}
}
impl Context for DiagnosticContext {}
impl Deref for DiagnosticContext {
type Target = DiagnosticContextSeed;

View File

@@ -6,33 +6,32 @@ use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use imbl_value::InternedString;
use josekit::jwk::Jwk;
use patch_db::PatchDb;
use reqwest::{Client, Proxy};
use rpc_toolkit::Context;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{CallRemote, Context, Empty};
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tokio::time::Instant;
use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::account::AccountInfo;
use crate::context::config::ServerConfig;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation, WebSocketHandler};
use crate::db::prelude::PatchDbExt;
use crate::db::model::Database;
use crate::dependencies::compute_dependency_config_errs;
use crate::disk::OsPartitionInfo;
use crate::init::check_time_is_synchronized;
use crate::lxc::{LxcContainer, LxcManager};
use crate::lxc::{ContainerId, LxcContainer, LxcManager};
use crate::middleware::auth::HashSessionToken;
use crate::net::net_controller::NetController;
use crate::net::utils::{find_eth_iface, find_wifi_iface};
use crate::net::wifi::WpaCli;
use crate::prelude::*;
use crate::rpc_continuations::RpcContinuations;
use crate::service::ServiceMap;
use crate::shutdown::Shutdown;
use crate::system::get_mem_info;
use crate::util::lshw::{lshw, LshwDevice};
use crate::{account::AccountInfo, lxc::ContainerId};
pub struct RpcContextSeed {
is_closed: AtomicBool,
@@ -41,7 +40,7 @@ pub struct RpcContextSeed {
pub ethernet_interface: String,
pub datadir: PathBuf,
pub disk_guid: Arc<String>,
pub db: PatchDb,
pub db: TypedPatchDb<Database>,
pub account: RwLock<AccountInfo>,
pub net_controller: Arc<NetController>,
pub services: ServiceMap,
@@ -50,7 +49,7 @@ pub struct RpcContextSeed {
pub tor_socks: SocketAddr,
pub lxc_manager: Arc<LxcManager>,
pub open_authed_websockets: Mutex<BTreeMap<HashSessionToken, Vec<oneshot::Sender<()>>>>,
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
pub rpc_continuations: RpcContinuations,
pub wifi_manager: Option<Arc<RwLock<WpaCli>>>,
pub current_secret: Arc<Jwk>,
pub client: Client,
@@ -80,7 +79,7 @@ impl RpcContext {
)));
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let db = config.db().await?;
let db = TypedPatchDb::<Database>::load(config.db().await?).await?;
let peek = db.peek().await;
let account = AccountInfo::load(&peek)?;
tracing::info!("Opened PatchDB");
@@ -159,7 +158,7 @@ impl RpcContext {
tor_socks: tor_proxy,
lxc_manager: Arc::new(LxcManager::new()),
open_authed_websockets: Mutex::new(BTreeMap::new()),
rpc_stream_continuations: Mutex::new(BTreeMap::new()),
rpc_continuations: RpcContinuations::new(),
wifi_manager: wifi_interface
.clone()
.map(|i| Arc::new(RwLock::new(WpaCli::init(i)))),
@@ -236,54 +235,27 @@ impl RpcContext {
Ok(())
}
#[instrument(skip_all)]
pub async fn clean_continuations(&self) {
let mut continuations = self.rpc_stream_continuations.lock().await;
let mut to_remove = Vec::new();
for (guid, cont) in &*continuations {
if cont.is_timed_out() {
to_remove.push(guid.clone());
}
}
for guid in to_remove {
continuations.remove(&guid);
}
}
#[instrument(skip_all)]
pub async fn add_continuation(&self, guid: RequestGuid, handler: RpcContinuation) {
self.clean_continuations().await;
self.rpc_stream_continuations
.lock()
.await
.insert(guid, handler);
}
pub async fn get_ws_continuation_handler(
pub async fn call_remote<RemoteContext>(
&self,
guid: &RequestGuid,
) -> Option<WebSocketHandler> {
let mut continuations = self.rpc_stream_continuations.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) {
return None;
}
let Some(RpcContinuation::WebSocket(x)) = continuations.remove(guid) else {
return None;
};
x.get().await
method: &str,
params: Value,
) -> Result<Value, RpcError>
where
Self: CallRemote<RemoteContext>,
{
<Self as CallRemote<RemoteContext, Empty>>::call_remote(&self, method, params, Empty {})
.await
}
pub async fn get_rest_continuation_handler(&self, guid: &RequestGuid) -> Option<RestHandler> {
let mut continuations: tokio::sync::MutexGuard<'_, BTreeMap<RequestGuid, RpcContinuation>> =
self.rpc_stream_continuations.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) {
return None;
}
let Some(RpcContinuation::Rest(x)) = continuations.remove(guid) else {
return None;
};
x.get().await
pub async fn call_remote_with<RemoteContext, T>(
&self,
method: &str,
params: Value,
extra: T,
) -> Result<Value, RpcError>
where
Self: CallRemote<RemoteContext, T>,
{
<Self as CallRemote<RemoteContext, T>>::call_remote(&self, method, params, extra).await
}
}
impl AsRef<Jwk> for RpcContext {
@@ -291,6 +263,11 @@ impl AsRef<Jwk> for RpcContext {
&CURRENT_SECRET
}
}
impl AsRef<RpcContinuations> for RpcContext {
fn as_ref(&self) -> &RpcContinuations {
&self.rpc_continuations
}
}
impl Context for RpcContext {}
impl Deref for RpcContext {
type Target = RpcContextSeed;

View File

@@ -1,7 +1,6 @@
use clap::Parser;
use color_eyre::eyre::eyre;
use models::PackageId;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;

View File

@@ -1 +0,0 @@
pub mod rpc_continuations;

View File

@@ -1,75 +0,0 @@
use std::time::Duration;
use axum::extract::ws::WebSocket;
use axum::extract::Request;
use axum::response::Response;
use futures::future::BoxFuture;
use helpers::TimedResource;
use imbl_value::InternedString;
#[allow(unused_imports)]
use crate::prelude::*;
use crate::util::new_guid;
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
pub struct RequestGuid(InternedString);
impl RequestGuid {
pub fn new() -> Self {
Self(new_guid())
}
pub fn from(r: &str) -> Option<RequestGuid> {
if r.len() != 32 {
return None;
}
for c in r.chars() {
if !(c >= 'A' && c <= 'Z' || c >= '2' && c <= '7') {
return None;
}
}
Some(RequestGuid(InternedString::intern(r)))
}
}
impl AsRef<str> for RequestGuid {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
#[test]
fn parse_guid() {
println!(
"{:?}",
RequestGuid::from(&format!("{}", RequestGuid::new()))
)
}
impl std::fmt::Display for RequestGuid {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
pub type RestHandler =
Box<dyn FnOnce(Request) -> BoxFuture<'static, Result<Response, crate::Error>> + Send>;
pub type WebSocketHandler = Box<dyn FnOnce(WebSocket) -> BoxFuture<'static, ()> + Send>;
pub enum RpcContinuation {
Rest(TimedResource<RestHandler>),
WebSocket(TimedResource<WebSocketHandler>),
}
impl RpcContinuation {
pub fn rest(handler: RestHandler, timeout: Duration) -> Self {
RpcContinuation::Rest(TimedResource::new(handler, timeout))
}
pub fn ws(handler: WebSocketHandler, timeout: Duration) -> Self {
RpcContinuation::WebSocket(TimedResource::new(handler, timeout))
}
pub fn is_timed_out(&self) -> bool {
match self {
RpcContinuation::Rest(a) => a.is_timed_out(),
RpcContinuation::WebSocket(a) => a.is_timed_out(),
}
}
}

View File

@@ -11,10 +11,11 @@ use clap::Parser;
use futures::{FutureExt, StreamExt};
use http::header::COOKIE;
use http::HeaderMap;
use itertools::Itertools;
use patch_db::json_ptr::{JsonPointer, ROOT};
use patch_db::{Dump, Revision};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, from_fn_async, CallRemote, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::sync::oneshot;
@@ -167,11 +168,11 @@ pub async fn subscribe(
}))
}
pub fn db() -> ParentHandler {
pub fn db<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("dump", from_fn_async(cli_dump).with_display_serializable())
.subcommand("dump", from_fn_async(dump).no_cli())
.subcommand("put", put())
.subcommand("put", put::<C>())
.subcommand("apply", from_fn_async(cli_apply).no_display())
.subcommand("apply", from_fn_async(apply).no_cli())
}
@@ -195,21 +196,28 @@ pub struct CliDumpParams {
#[instrument(skip_all)]
async fn cli_dump(
ctx: CliContext,
CliDumpParams {
path,
include_private,
}: CliDumpParams,
HandlerArgs {
context,
parent_method,
method,
params: CliDumpParams {
include_private,
path,
},
..
}: HandlerArgs<CliContext, CliDumpParams>,
) -> Result<Dump, RpcError> {
let dump = if let Some(path) = path {
PatchDb::open(path).await?.dump(&ROOT).await
} else {
let method = parent_method.into_iter().chain(method).join(".");
from_value::<Dump>(
ctx.call_remote(
"db.dump",
imbl_value::json!({ "includePrivate":include_private }),
)
.await?,
context
.call_remote::<RpcContext>(
&method,
imbl_value::json!({ "includePrivate":include_private }),
)
.await?,
)?
};
@@ -237,36 +245,54 @@ pub async fn dump(
})
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct CliApplyParams {
expr: String,
path: Option<PathBuf>,
}
#[instrument(skip_all)]
async fn cli_apply(
ctx: CliContext,
ApplyParams { expr, path }: ApplyParams,
HandlerArgs {
context,
parent_method,
method,
params: CliApplyParams { expr, path },
..
}: HandlerArgs<CliContext, CliApplyParams>,
) -> Result<(), RpcError> {
if let Some(path) = path {
PatchDb::open(path)
.await?
.mutate(|db| {
.apply_function(|db| {
let res = apply_expr(
serde_json::to_value(patch_db::Value::from(db.clone()))
serde_json::to_value(patch_db::Value::from(db))
.with_kind(ErrorKind::Deserialization)?
.into(),
&expr,
)?;
db.ser(
&serde_json::from_value::<model::Database>(res.clone().into()).with_ctx(
|_| {
(
crate::ErrorKind::Deserialization,
"result does not match database model",
)
},
Ok::<_, Error>((
to_value(
&serde_json::from_value::<model::Database>(res.clone().into()).with_ctx(
|_| {
(
crate::ErrorKind::Deserialization,
"result does not match database model",
)
},
)?,
)?,
)
(),
))
})
.await?;
} else {
ctx.call_remote("db.apply", imbl_value::json!({ "expr": expr }))
let method = parent_method.into_iter().chain(method).join(".");
context
.call_remote::<RpcContext>(&method, imbl_value::json!({ "expr": expr }))
.await?;
}
@@ -278,10 +304,9 @@ async fn cli_apply(
#[command(rename_all = "kebab-case")]
pub struct ApplyParams {
expr: String,
path: Option<PathBuf>,
}
pub async fn apply(ctx: RpcContext, ApplyParams { expr, .. }: ApplyParams) -> Result<(), Error> {
pub async fn apply(ctx: RpcContext, ApplyParams { expr }: ApplyParams) -> Result<(), Error> {
ctx.db
.mutate(|db| {
let res = apply_expr(
@@ -303,12 +328,12 @@ pub async fn apply(ctx: RpcContext, ApplyParams { expr, .. }: ApplyParams) -> Re
.await
}
pub fn put() -> ParentHandler {
pub fn put<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"ui",
from_fn_async(ui)
.with_display_serializable()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser, TS)]

View File

@@ -54,7 +54,7 @@ impl PackageState {
pub fn expect_installed(&self) -> Result<&InstalledState, Error> {
match self {
Self::Installed(a) => Ok(a),
a => Err(Error::new(
_ => Err(Error::new(
eyre!(
"Package {} is not in installed state",
self.as_manifest(ManifestPreference::Old).id
@@ -161,7 +161,7 @@ impl Model<PackageState> {
pub fn expect_installed(&self) -> Result<&Model<InstalledState>, Error> {
match self.as_match() {
PackageStateMatchModelRef::Installed(a) => Ok(a),
a => Err(Error::new(
_ => Err(Error::new(
eyre!(
"Package {} is not in installed state",
self.as_manifest(ManifestPreference::Old).as_id().de()?
@@ -251,7 +251,7 @@ impl Model<PackageState> {
PackageStateMatchModelMut::Installed(s) | PackageStateMatchModelMut::Removing(s) => {
s.as_manifest_mut()
}
PackageStateMatchModelMut::Error(s) => {
PackageStateMatchModelMut::Error(_) => {
return Err(Error::new(
eyre!("could not determine package state to get manifest"),
ErrorKind::Database,
@@ -325,7 +325,7 @@ pub struct PackageDataEntry {
pub state_info: PackageState,
pub status: Status,
#[ts(type = "string | null")]
pub marketplace_url: Option<Url>,
pub registry: Option<Url>,
#[ts(type = "string")]
pub developer_key: Pem<ed25519_dalek::VerifyingKey>,
pub icon: DataUrl<'static>,

View File

@@ -19,6 +19,7 @@ use crate::account::AccountInfo;
use crate::db::model::package::AllPackageData;
use crate::net::utils::{get_iface_ipv4_addr, get_iface_ipv6_addr};
use crate::prelude::*;
use crate::progress::FullProgress;
use crate::util::cpupower::Governor;
use crate::util::Version;
use crate::version::{Current, VersionT};
@@ -175,24 +176,13 @@ pub struct BackupProgress {
pub struct ServerStatus {
pub backup_progress: Option<BTreeMap<PackageId, BackupProgress>>,
pub updated: bool,
pub update_progress: Option<UpdateProgress>,
pub update_progress: Option<FullProgress>,
#[serde(default)]
pub shutting_down: bool,
#[serde(default)]
pub restarting: bool,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct UpdateProgress {
#[ts(type = "number | null")]
pub size: Option<u64>,
#[ts(type = "number")]
pub downloaded: u64,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]

View File

@@ -1,22 +1,16 @@
use std::collections::BTreeMap;
use std::future::Future;
use std::marker::PhantomData;
use std::panic::UnwindSafe;
use std::str::FromStr;
use chrono::{DateTime, Utc};
pub use imbl_value::Value;
use patch_db::json_ptr::ROOT;
use patch_db::value::InternedString;
pub use patch_db::{HasModel, PatchDb};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use crate::db::model::DatabaseModel;
use crate::prelude::*;
pub type Peeked = Model<super::model::Database>;
pub fn to_value<T>(value: &T) -> Result<Value, Error>
where
T: Serialize,
@@ -31,45 +25,7 @@ where
patch_db::value::from_value(value).with_kind(ErrorKind::Deserialization)
}
pub trait PatchDbExt {
fn peek(&self) -> impl Future<Output = DatabaseModel> + Send;
fn mutate<U: UnwindSafe + Send>(
&self,
f: impl FnOnce(&mut DatabaseModel) -> Result<U, Error> + UnwindSafe + Send,
) -> impl Future<Output = Result<U, Error>> + Send;
fn map_mutate(
&self,
f: impl FnOnce(DatabaseModel) -> Result<DatabaseModel, Error> + UnwindSafe + Send,
) -> impl Future<Output = Result<DatabaseModel, Error>> + Send;
}
impl PatchDbExt for PatchDb {
async fn peek(&self) -> DatabaseModel {
DatabaseModel::from(self.dump(&ROOT).await.value)
}
async fn mutate<U: UnwindSafe + Send>(
&self,
f: impl FnOnce(&mut DatabaseModel) -> Result<U, Error> + UnwindSafe + Send,
) -> Result<U, Error> {
Ok(self
.apply_function(|mut v| {
let model = <&mut DatabaseModel>::from(&mut v);
let res = f(model)?;
Ok::<_, Error>((v, res))
})
.await?
.1)
}
async fn map_mutate(
&self,
f: impl FnOnce(DatabaseModel) -> Result<DatabaseModel, Error> + UnwindSafe + Send,
) -> Result<DatabaseModel, Error> {
Ok(DatabaseModel::from(
self.apply_function(|v| f(DatabaseModel::from(v)).map(|a| (a.into(), ())))
.await?
.0,
))
}
}
pub type TypedPatchDb<T> = patch_db::TypedPatchDb<T, Error>;
/// &mut Model<T> <=> &mut Value
#[repr(transparent)]
@@ -125,7 +81,7 @@ impl<T: Serialize + DeserializeOwned> Model<T> {
Ok(res)
}
pub fn map_mutate(&mut self, f: impl FnOnce(T) -> Result<T, Error>) -> Result<T, Error> {
let mut orig = self.de()?;
let orig = self.de()?;
let res = f(orig)?;
self.ser(&res)?;
Ok(res)
@@ -262,10 +218,9 @@ where
.into()),
}
}
pub fn upsert<F, D>(&mut self, key: &T::Key, value: F) -> Result<&mut Model<T::Value>, Error>
pub fn upsert<F>(&mut self, key: &T::Key, value: F) -> Result<&mut Model<T::Value>, Error>
where
F: FnOnce() -> D,
D: AsRef<T::Value>,
F: FnOnce() -> T::Value,
{
use serde::ser::Error;
match &mut self.value {
@@ -278,7 +233,7 @@ where
s.as_ref().index_or_insert(v)
});
if !exists {
res.ser(value().as_ref())?;
res.ser(&value())?;
}
Ok(res)
}
@@ -375,6 +330,18 @@ where
}
}
impl<T: Map> Model<T> {
pub fn contains_key(&self, key: &T::Key) -> Result<bool, Error> {
use serde::de::Error;
let s = T::key_str(key)?;
match &self.value {
Value::Object(o) => Ok(o.contains_key(s.as_ref())),
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
kind: patch_db::value::ErrorKind::Deserialization,
}
.into()),
}
}
pub fn into_idx(self, key: &T::Key) -> Option<Model<T::Value>> {
use patch_db::ModelExt;
let s = T::key_str(key).ok()?;

View File

@@ -4,7 +4,7 @@ use std::time::Duration;
use clap::Parser;
use models::PackageId;
use patch_db::json_patch::merge;
use rpc_toolkit::{command, from_fn_async, Empty, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;
@@ -15,8 +15,8 @@ use crate::db::model::package::CurrentDependencies;
use crate::prelude::*;
use crate::Error;
pub fn dependency() -> ParentHandler {
ParentHandler::new().subcommand("configure", configure())
pub fn dependency<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand("configure", configure::<C>())
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel, TS)]
@@ -50,7 +50,7 @@ pub struct ConfigureParams {
dependent_id: PackageId,
dependency_id: PackageId,
}
pub fn configure() -> ParentHandler<ConfigureParams> {
pub fn configure<C: Context>() -> ParentHandler<C, ConfigureParams> {
ParentHandler::new().root_handler(
from_fn_async(configure_impl)
.with_inherited(|params, _| params)

View File

@@ -8,6 +8,7 @@ use ed25519_dalek::{SigningKey, VerifyingKey};
use tracing::instrument;
use crate::context::CliContext;
use crate::util::serde::Pem;
use crate::{Error, ResultExt};
#[instrument(skip_all)]
@@ -45,3 +46,7 @@ pub fn init(ctx: CliContext) -> Result<(), Error> {
}
Ok(())
}
pub fn pubkey(ctx: CliContext) -> Result<Pem<ed25519_dalek::VerifyingKey>, Error> {
Ok(Pem(ctx.developer_key()?.verifying_key()))
}

View File

@@ -1,38 +1,48 @@
use std::path::Path;
use std::sync::Arc;
use clap::Parser;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, from_fn, from_fn_async, AnyContext, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use rpc_toolkit::{
from_fn, from_fn_async, CallRemoteHandler, Context, Empty, HandlerExt, ParentHandler,
};
use crate::context::{CliContext, DiagnosticContext};
use crate::context::{CliContext, DiagnosticContext, RpcContext};
use crate::init::SYSTEM_REBUILD_PATH;
use crate::logs::{fetch_logs, LogResponse, LogSource};
use crate::shutdown::Shutdown;
use crate::Error;
pub fn diagnostic() -> ParentHandler {
pub fn diagnostic<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("error", from_fn(error).with_remote_cli::<CliContext>())
.subcommand("logs", from_fn_async(logs).no_cli())
.subcommand("error", from_fn(error).with_call_remote::<CliContext>())
.subcommand("logs", crate::system::logs::<DiagnosticContext>())
.subcommand(
"logs",
from_fn_async(crate::logs::cli_logs::<DiagnosticContext, Empty>).no_display(),
)
.subcommand(
"kernel-logs",
crate::system::kernel_logs::<DiagnosticContext>(),
)
.subcommand(
"kernel-logs",
from_fn_async(crate::logs::cli_logs::<DiagnosticContext, Empty>).no_display(),
)
.subcommand(
"exit",
from_fn(exit).no_display().with_remote_cli::<CliContext>(),
from_fn(exit).no_display().with_call_remote::<CliContext>(),
)
.subcommand(
"restart",
from_fn(restart)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("disk", disk())
.subcommand("disk", disk::<C>())
.subcommand(
"rebuild",
from_fn_async(rebuild)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
@@ -41,26 +51,6 @@ pub fn error(ctx: DiagnosticContext) -> Result<Arc<RpcError>, Error> {
Ok(ctx.error.clone())
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct LogsParams {
#[ts(type = "number | null")]
limit: Option<usize>,
cursor: Option<String>,
before: bool,
}
pub async fn logs(
_: AnyContext,
LogsParams {
limit,
cursor,
before,
}: LogsParams,
) -> Result<LogResponse, Error> {
Ok(fetch_logs(LogSource::System, limit, cursor, before).await?)
}
pub fn exit(ctx: DiagnosticContext) -> Result<(), Error> {
ctx.shutdown.send(None).expect("receiver dropped");
Ok(())
@@ -83,17 +73,20 @@ pub async fn rebuild(ctx: DiagnosticContext) -> Result<(), Error> {
restart(ctx)
}
pub fn disk() -> ParentHandler {
ParentHandler::new().subcommand(
"forget",
from_fn_async(forget_disk)
.no_display()
.with_remote_cli::<CliContext>(),
)
pub fn disk<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("forget", from_fn_async(forget_disk::<C>).no_cli())
.subcommand(
"forget",
CallRemoteHandler::<CliContext, _, _>::new(
from_fn_async(forget_disk::<RpcContext>).no_display(),
)
.no_display(),
)
}
pub async fn forget_disk(_: AnyContext) -> Result<(), Error> {
let disk_guid = Path::new("/media/embassy/config/disk.guid");
pub async fn forget_disk<C: Context>(_: C) -> Result<(), Error> {
let disk_guid = Path::new("/media/startos/config/disk.guid");
if tokio::fs::metadata(disk_guid).await.is_ok() {
tokio::fs::remove_file(disk_guid).await?;
}

View File

@@ -38,7 +38,7 @@ fn backup_existing_undo_file<'a>(path: &'a Path) -> BoxFuture<'a, Result<(), Err
pub async fn e2fsck_aggressive(
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
let undo_path = Path::new("/media/embassy/config")
let undo_path = Path::new("/media/startos/config")
.join(
logicalname
.as_ref()

View File

@@ -302,7 +302,7 @@ pub async fn mount_fs<P: AsRef<Path>>(
if !guid.ends_with("_UNENC") {
// Backup LUKS header if e2fsck succeeded
let luks_folder = Path::new("/media/embassy/config/luks");
let luks_folder = Path::new("/media/startos/config/luks");
tokio::fs::create_dir_all(luks_folder).await?;
let tmp_luks_bak = luks_folder.join(format!(".{full_name}.luks.bak.tmp"));
if tokio::fs::metadata(&tmp_luks_bak).await.is_ok() {

View File

@@ -1,6 +1,8 @@
use std::path::{Path, PathBuf};
use rpc_toolkit::{from_fn_async, AnyContext, Empty, HandlerExt, ParentHandler};
use rpc_toolkit::{
from_fn_async, CallRemoteHandler, Context, Empty, HandlerExt, ParentHandler,
};
use serde::{Deserialize, Serialize};
use crate::context::{CliContext, RpcContext};
@@ -14,7 +16,7 @@ pub mod mount;
pub mod util;
pub const BOOT_RW_PATH: &str = "/media/boot-rw";
pub const REPAIR_DISK_PATH: &str = "/media/embassy/config/repair-disk";
pub const REPAIR_DISK_PATH: &str = "/media/startos/config/repair-disk";
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
@@ -40,22 +42,23 @@ impl OsPartitionInfo {
}
}
pub fn disk() -> ParentHandler {
pub fn disk<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"list",
from_fn_async(list)
.with_display_serializable()
.with_custom_display_fn::<AnyContext, _>(|handle, result| {
.with_custom_display_fn(|handle, result| {
Ok(display_disk_info(handle.params, result))
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("repair", from_fn_async(|_: C| repair()).no_cli())
.subcommand(
"repair",
from_fn_async(repair)
.no_display()
.with_remote_cli::<CliContext>(),
CallRemoteHandler::<CliContext, _, _>::new(
from_fn_async(|_: RpcContext| repair()).no_display(),
),
)
}

View File

@@ -11,17 +11,21 @@ use crate::disk::mount::guard::{GenericMountGuard, MountGuard, TmpMountGuard};
use crate::prelude::*;
use crate::util::io::TmpDir;
struct OverlayFs<P0: AsRef<Path>, P1: AsRef<Path>> {
pub struct OverlayFs<P0: AsRef<Path>, P1: AsRef<Path>, P2: AsRef<Path>> {
lower: P0,
upper: P1,
work: P2,
}
impl<P0: AsRef<Path>, P1: AsRef<Path>> OverlayFs<P0, P1> {
pub fn new(lower: P0, upper: P1) -> Self {
Self { lower, upper }
impl<P0: AsRef<Path>, P1: AsRef<Path>, P2: AsRef<Path>> OverlayFs<P0, P1, P2> {
pub fn new(lower: P0, upper: P1, work: P2) -> Self {
Self { lower, upper, work }
}
}
impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync> FileSystem
for OverlayFs<P0, P1>
impl<
P0: AsRef<Path> + Send + Sync,
P1: AsRef<Path> + Send + Sync,
P2: AsRef<Path> + Send + Sync,
> FileSystem for OverlayFs<P0, P1, P2>
{
fn mount_type(&self) -> Option<impl AsRef<str>> {
Some("overlay")
@@ -33,24 +37,20 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync> FileSystem
[
Box::new(lazy_format!("lowerdir={}", self.lower.as_ref().display()))
as Box<dyn Display>,
Box::new(lazy_format!(
"upperdir={}/upper",
self.upper.as_ref().display()
)),
Box::new(lazy_format!(
"workdir={}/work",
self.upper.as_ref().display()
)),
Box::new(lazy_format!("upperdir={}", self.upper.as_ref().display())),
Box::new(lazy_format!("workdir={}", self.work.as_ref().display())),
]
}
async fn pre_mount(&self) -> Result<(), Error> {
tokio::fs::create_dir_all(self.upper.as_ref().join("upper")).await?;
tokio::fs::create_dir_all(self.upper.as_ref().join("work")).await?;
tokio::fs::create_dir_all(self.upper.as_ref()).await?;
tokio::fs::create_dir_all(self.work.as_ref()).await?;
Ok(())
}
async fn source_hash(
&self,
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
tokio::fs::create_dir_all(self.upper.as_ref()).await?;
tokio::fs::create_dir_all(self.work.as_ref()).await?;
let mut sha = Sha256::new();
sha.update("OverlayFs");
sha.update(
@@ -77,6 +77,18 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync> FileSystem
.as_os_str()
.as_bytes(),
);
sha.update(
tokio::fs::canonicalize(self.work.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
self.upper.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
);
Ok(sha.finalize())
}
}
@@ -95,7 +107,11 @@ impl OverlayGuard {
let lower = TmpMountGuard::mount(base, ReadOnly).await?;
let upper = TmpDir::new().await?;
let inner_guard = MountGuard::mount(
&OverlayFs::new(lower.path(), upper.as_ref()),
&OverlayFs::new(
lower.path(),
upper.as_ref().join("upper"),
upper.as_ref().join("work"),
),
mountpoint,
ReadWrite,
)

View File

@@ -2,7 +2,6 @@ use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Weak};
use bytes::Buf;
use lazy_static::lazy_static;
use models::ResultExt;
use tokio::sync::Mutex;
@@ -13,7 +12,7 @@ use super::util::unmount;
use crate::util::{Invoke, Never};
use crate::Error;
pub const TMP_MOUNTPOINT: &'static str = "/media/embassy/tmp";
pub const TMP_MOUNTPOINT: &'static str = "/media/startos/tmp";
#[async_trait::async_trait]
pub trait GenericMountGuard: std::fmt::Debug + Send + Sync + 'static {

View File

@@ -58,7 +58,7 @@ impl std::error::Error for ErrorCollection {}
macro_rules! ensure_code {
($x:expr, $c:expr, $fmt:expr $(, $arg:expr)*) => {
if !($x) {
return Err(crate::error::Error::new(color_eyre::eyre::eyre!($fmt, $($arg, )*), $c));
Err::<(), _>(crate::error::Error::new(color_eyre::eyre::eyre!($fmt, $($arg, )*), $c))?;
}
};
}

View File

@@ -12,6 +12,7 @@ use tracing::instrument;
use crate::account::AccountInfo;
use crate::context::config::ServerConfig;
use crate::db::model::public::ServerStatus;
use crate::db::model::Database;
use crate::disk::mount::util::unmount;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::prelude::*;
@@ -20,8 +21,8 @@ use crate::util::cpupower::{get_available_governors, get_preferred_governor, set
use crate::util::Invoke;
use crate::{Error, ARCH};
pub const SYSTEM_REBUILD_PATH: &str = "/media/embassy/config/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/media/embassy/config/standby";
pub const SYSTEM_REBUILD_PATH: &str = "/media/startos/config/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/media/startos/config/standby";
pub async fn check_time_is_synchronized() -> Result<bool, Error> {
Ok(String::from_utf8(
@@ -179,7 +180,7 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
}
pub struct InitResult {
pub db: patch_db::PatchDb,
pub db: TypedPatchDb<Database>,
}
#[instrument(skip_all)]
@@ -207,7 +208,7 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
.await?;
}
let db = cfg.db().await?;
let db = TypedPatchDb::<Database>::load_unchecked(cfg.db().await?);
let peek = db.peek().await;
tracing::info!("Opened PatchDB");

View File

@@ -6,11 +6,12 @@ use clap::{value_parser, CommandFactory, FromArgMatches, Parser};
use color_eyre::eyre::eyre;
use emver::VersionRange;
use futures::{FutureExt, StreamExt};
use itertools::Itertools;
use patch_db::json_ptr::JsonPointer;
use reqwest::header::{HeaderMap, CONTENT_LENGTH};
use reqwest::Url;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::CallRemote;
use rpc_toolkit::HandlerArgs;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use tokio::sync::oneshot;
@@ -18,10 +19,10 @@ use tracing::instrument;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::db::model::package::{ManifestPreference, PackageState, PackageStateMatchModelRef};
use crate::prelude::*;
use crate::progress::{FullProgress, PhasedProgressBar};
use crate::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::s9pk::manifest::PackageId;
use crate::s9pk::merkle_archive::source::http::HttpSource;
use crate::s9pk::S9pk;
@@ -110,7 +111,7 @@ pub struct InstallParams {
id: PackageId,
#[arg(short = 'm', long = "marketplace-url")]
#[ts(type = "string | null")]
marketplace_url: Option<Url>,
registry: Option<Url>,
#[arg(short = 'v', long = "version-spec")]
version_spec: Option<String>,
#[arg(long = "version-priority")]
@@ -125,7 +126,7 @@ pub async fn install(
ctx: RpcContext,
InstallParams {
id,
marketplace_url,
registry,
version_spec,
version_priority,
}: InstallParams,
@@ -135,15 +136,14 @@ pub async fn install(
Some(v) => &*v,
};
let version: VersionRange = version_str.parse()?;
let marketplace_url =
marketplace_url.unwrap_or_else(|| crate::DEFAULT_MARKETPLACE.parse().unwrap());
let registry = registry.unwrap_or_else(|| crate::DEFAULT_MARKETPLACE.parse().unwrap());
let version_priority = version_priority.unwrap_or_default();
let s9pk = S9pk::deserialize(
&HttpSource::new(
ctx.client.clone(),
format!(
"{}/package/v0/{}.s9pk?spec={}&version-priority={}",
marketplace_url, id, version, version_priority,
registry, id, version, version_priority,
)
.parse()?,
)
@@ -188,7 +188,7 @@ pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
.with_kind(ErrorKind::Database)?,
)
.await;
ctx.add_continuation(
ctx.rpc_continuations.add(
progress.clone(),
RpcContinuation::ws(
Box::new(|mut ws| {
@@ -203,28 +203,26 @@ pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
})?;
tokio::select! {
res = async {
while let Some(rev) = sub.recv().await {
if !rev.patch.0.is_empty() { // TODO: don't send empty patches?
ws.send(Message::Text(
serde_json::to_string(&if let Some(p) = db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(&id)
.and_then(|e| e.as_state_info().as_installing_info()).map(|i| i.as_progress())
{
Ok::<_, ()>(p.de()?)
} else {
let mut p = FullProgress::new();
p.overall.complete();
Ok(p)
})
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
while let Some(_) = sub.recv().await {
ws.send(Message::Text(
serde_json::to_string(&if let Some(p) = db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(&id)
.and_then(|e| e.as_state_info().as_installing_info()).map(|i| i.as_progress())
{
Ok::<_, ()>(p.de()?)
} else {
let mut p = FullProgress::new();
p.overall.complete();
Ok(p)
})
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
Ok::<_, Error>(())
} => res?,
@@ -322,15 +320,30 @@ impl FromArgMatches for CliInstallParams {
}
#[instrument(skip_all)]
pub async fn cli_install(ctx: CliContext, params: CliInstallParams) -> Result<(), RpcError> {
pub async fn cli_install(
HandlerArgs {
context: ctx,
parent_method,
method,
params,
..
}: HandlerArgs<CliContext, CliInstallParams>,
) -> Result<(), RpcError> {
let method = parent_method.into_iter().chain(method).collect_vec();
match params {
CliInstallParams::Sideload(path) => {
let file = crate::s9pk::load(&ctx, path).await?;
// rpc call remote sideload
let SideloadResponse { upload, progress } = from_value::<SideloadResponse>(
ctx.call_remote("package.sideload", imbl_value::json!({}))
.await?,
ctx.call_remote::<RpcContext>(
&method[..method.len() - 1]
.into_iter()
.chain(std::iter::once(&"sideload"))
.join("."),
imbl_value::json!({}),
)
.await?,
)?;
let upload = async {
@@ -387,7 +400,7 @@ pub async fn cli_install(ctx: CliContext, params: CliInstallParams) -> Result<()
upload?;
}
CliInstallParams::Marketplace(params) => {
ctx.call_remote("package.install", to_value(&params)?)
ctx.call_remote::<RpcContext>(&method.join("."), to_value(&params)?)
.await?;
}
}

View File

@@ -28,7 +28,6 @@ pub mod bins;
pub mod config;
pub mod context;
pub mod control;
pub mod core;
pub mod db;
pub mod dependencies;
pub mod developer;
@@ -49,6 +48,7 @@ pub mod prelude;
pub mod progress;
pub mod properties;
pub mod registry;
pub mod rpc_continuations;
pub mod s9pk;
pub mod service;
pub mod setup;
@@ -71,12 +71,14 @@ pub use error::{Error, ErrorKind, ResultExt};
use imbl_value::Value;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{
command, from_fn, from_fn_async, from_fn_blocking, AnyContext, HandlerExt, ParentHandler,
from_fn, from_fn_async, from_fn_blocking, CallRemoteHandler, Context, Empty, HandlerExt,
ParentHandler,
};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::CliContext;
use crate::context::{CliContext, DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::registry::context::{RegistryContext, RegistryUrlParams};
use crate::util::serde::HandlerExtSerde;
#[derive(Deserialize, Serialize, Parser, TS)]
@@ -86,102 +88,117 @@ pub struct EchoParams {
message: String,
}
pub fn echo(_: AnyContext, EchoParams { message }: EchoParams) -> Result<String, RpcError> {
pub fn echo<C: Context>(_: C, EchoParams { message }: EchoParams) -> Result<String, RpcError> {
Ok(message)
}
pub fn main_api() -> ParentHandler {
pub fn main_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("git-info", from_fn(version::git_info))
.subcommand::<C, _>("git-info", from_fn(version::git_info))
.subcommand(
"echo",
from_fn(echo)
from_fn(echo::<RpcContext>)
.with_metadata("authenticated", Value::Bool(false))
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("init", from_fn_blocking(developer::init).no_display())
.subcommand("server", server())
.subcommand("package", package())
.subcommand("net", net::net())
.subcommand("auth", auth::auth())
.subcommand("db", db::db())
.subcommand("ssh", ssh::ssh())
.subcommand("wifi", net::wifi::wifi())
.subcommand("disk", disk::disk())
.subcommand("notification", notifications::notification())
.subcommand("backup", backup::backup())
.subcommand("marketplace", registry::marketplace::marketplace())
.subcommand("lxc", lxc::lxc())
.subcommand("server", server::<C>())
.subcommand("package", package::<C>())
.subcommand("net", net::net::<C>())
.subcommand("auth", auth::auth::<C>())
.subcommand("db", db::db::<C>())
.subcommand("ssh", ssh::ssh::<C>())
.subcommand("wifi", net::wifi::wifi::<C>())
.subcommand("disk", disk::disk::<C>())
.subcommand("notification", notifications::notification::<C>())
.subcommand("backup", backup::backup::<C>())
.subcommand(
"registry",
CallRemoteHandler::<RpcContext, _, _, RegistryUrlParams>::new(
registry::registry_api::<RegistryContext>(),
)
.no_cli(),
)
.subcommand("lxc", lxc::lxc::<C>())
.subcommand("s9pk", s9pk::rpc::s9pk())
.subcommand("util", util::rpc::util::<C>())
}
pub fn server() -> ParentHandler {
pub fn server<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"time",
from_fn_async(system::time)
.with_display_serializable()
.with_custom_display_fn::<AnyContext, _>(|handle, result| {
.with_custom_display_fn(|handle, result| {
Ok(system::display_time(handle.params, result))
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("experimental", system::experimental::<C>())
.subcommand("logs", system::logs::<RpcContext>())
.subcommand(
"logs",
from_fn_async(logs::cli_logs::<RpcContext, Empty>).no_display(),
)
.subcommand("kernel-logs", system::kernel_logs::<RpcContext>())
.subcommand(
"kernel-logs",
from_fn_async(logs::cli_logs::<RpcContext, Empty>).no_display(),
)
.subcommand("experimental", system::experimental())
.subcommand("logs", system::logs())
.subcommand("kernel-logs", system::kernel_logs())
.subcommand(
"metrics",
from_fn_async(system::metrics)
.with_display_serializable()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"shutdown",
from_fn_async(shutdown::shutdown)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"restart",
from_fn_async(shutdown::restart)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"rebuild",
from_fn_async(shutdown::rebuild)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"update",
from_fn_async(update::update_system)
.with_metadata("sync_db", Value::Bool(true))
.with_custom_display_fn::<AnyContext, _>(|handle, result| {
Ok(update::display_update_result(handle.params, result))
})
.with_remote_cli::<CliContext>(),
.no_cli(),
)
.subcommand(
"update",
from_fn_async(update::cli_update_system).no_display(),
)
.subcommand(
"update-firmware",
from_fn_async(firmware::update_firmware)
.with_custom_display_fn::<AnyContext, _>(|_handle, result| {
from_fn_async(|_: RpcContext| firmware::update_firmware())
.with_custom_display_fn(|_handle, result| {
Ok(firmware::display_firmware_update_result(result))
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
pub fn package() -> ParentHandler {
pub fn package<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"action",
from_fn_async(action::action)
.with_display_serializable()
.with_custom_display_fn::<AnyContext, _>(|handle, result| {
.with_custom_display_fn(|handle, result| {
Ok(action::display_action_result(handle.params, result))
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"install",
@@ -196,47 +213,51 @@ pub fn package() -> ParentHandler {
from_fn_async(install::uninstall)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"list",
from_fn_async(install::list)
.with_display_serializable()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("config", config::config())
.subcommand("config", config::config::<C>())
.subcommand(
"start",
from_fn_async(control::start)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"stop",
from_fn_async(control::stop)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"restart",
from_fn_async(control::restart)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("logs", logs::package_logs())
.subcommand(
"logs",
from_fn_async(logs::cli_logs::<RpcContext, logs::PackageIdParams>).no_display(),
)
.subcommand("logs", logs::logs())
.subcommand(
"properties",
from_fn_async(properties::properties)
.with_custom_display_fn::<AnyContext, _>(|_handle, result| {
.with_custom_display_fn(|_handle, result| {
Ok(properties::display_properties(result))
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("dependency", dependencies::dependency())
.subcommand("backup", backup::package_backup())
.subcommand("dependency", dependencies::dependency::<C>())
.subcommand("backup", backup::package_backup::<C>())
.subcommand("connect", from_fn_async(service::connect_rpc).no_cli())
.subcommand(
"connect",
@@ -244,32 +265,51 @@ pub fn package() -> ParentHandler {
)
}
pub fn diagnostic_api() -> ParentHandler {
pub fn diagnostic_api() -> ParentHandler<DiagnosticContext> {
ParentHandler::new()
.subcommand(
.subcommand::<DiagnosticContext, _>(
"git-info",
from_fn(version::git_info).with_metadata("authenticated", Value::Bool(false)),
)
.subcommand("echo", from_fn(echo).with_remote_cli::<CliContext>())
.subcommand("diagnostic", diagnostic::diagnostic())
.subcommand(
"echo",
from_fn(echo::<DiagnosticContext>).with_call_remote::<CliContext>(),
)
.subcommand("diagnostic", diagnostic::diagnostic::<DiagnosticContext>())
}
pub fn setup_api() -> ParentHandler {
pub fn setup_api() -> ParentHandler<SetupContext> {
ParentHandler::new()
.subcommand(
.subcommand::<SetupContext, _>(
"git-info",
from_fn(version::git_info).with_metadata("authenticated", Value::Bool(false)),
)
.subcommand("echo", from_fn(echo).with_remote_cli::<CliContext>())
.subcommand("setup", setup::setup())
.subcommand(
"echo",
from_fn(echo::<SetupContext>).with_call_remote::<CliContext>(),
)
.subcommand("setup", setup::setup::<SetupContext>())
}
pub fn install_api() -> ParentHandler {
pub fn install_api() -> ParentHandler<InstallContext> {
ParentHandler::new()
.subcommand(
.subcommand::<InstallContext, _>(
"git-info",
from_fn(version::git_info).with_metadata("authenticated", Value::Bool(false)),
)
.subcommand("echo", from_fn(echo).with_remote_cli::<CliContext>())
.subcommand("install", os_install::install())
.subcommand(
"echo",
from_fn(echo::<InstallContext>).with_call_remote::<CliContext>(),
)
.subcommand("install", os_install::install::<InstallContext>())
}
pub fn expanded_api() -> ParentHandler<CliContext> {
main_api()
.subcommand("init", from_fn_blocking(developer::init).no_display())
.subcommand("pubkey", from_fn_blocking(developer::pubkey))
.subcommand("diagnostic", diagnostic::diagnostic::<CliContext>())
.subcommand("setup", setup::setup::<CliContext>())
.subcommand("install", os_install::install::<CliContext>())
.subcommand("registry", registry::registry_api::<CliContext>())
}

View File

@@ -4,13 +4,17 @@ use std::time::{Duration, UNIX_EPOCH};
use axum::extract::ws::{self, WebSocket};
use chrono::{DateTime, Utc};
use clap::Parser;
use clap::{Args, FromArgMatches, Parser};
use color_eyre::eyre::eyre;
use futures::stream::BoxStream;
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
use futures::{Future, FutureExt, Stream, StreamExt, TryStreamExt};
use itertools::Itertools;
use models::PackageId;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, from_fn_async, CallRemote, Empty, HandlerExt, ParentHandler};
use rpc_toolkit::{
from_fn_async, CallRemote, Context, Empty, HandlerArgs, HandlerExt, HandlerFor, ParentHandler,
};
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::{Child, Command};
@@ -19,10 +23,10 @@ use tokio_tungstenite::tungstenite::Message;
use tracing::instrument;
use crate::context::{CliContext, RpcContext};
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::error::ResultExt;
use crate::lxc::ContainerId;
use crate::prelude::*;
use crate::rpc_continuations::{RequestGuid, RpcContinuation, RpcContinuations};
use crate::util::serde::Reversible;
use crate::util::Invoke;
@@ -211,7 +215,6 @@ fn deserialize_log_message<'de, D: serde::de::Deserializer<'de>>(
pub enum LogSource {
Kernel,
Unit(&'static str),
System,
Container(ContainerId),
}
@@ -220,168 +223,195 @@ pub const SYSTEM_UNIT: &str = "startd";
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct LogsParam {
pub struct PackageIdParams {
id: PackageId,
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct LogsParams<Extra: FromArgMatches + Args = Empty> {
#[command(flatten)]
#[serde(flatten)]
extra: Extra,
#[arg(short = 'l', long = "limit")]
limit: Option<usize>,
#[arg(short = 'c', long = "cursor")]
#[arg(short = 'c', long = "cursor", conflicts_with = "follow")]
cursor: Option<String>,
#[arg(short = 'B', long = "before")]
#[arg(short = 'B', long = "before", conflicts_with = "follow")]
#[serde(default)]
before: bool,
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct CliLogsParams<Extra: FromArgMatches + Args = Empty> {
#[command(flatten)]
#[serde(flatten)]
rpc_params: LogsParams<Extra>,
#[arg(short = 'f', long = "follow")]
#[serde(default)]
follow: bool,
}
pub fn logs() -> ParentHandler<LogsParam> {
ParentHandler::<LogsParam>::new()
#[allow(private_bounds)]
pub fn logs<
C: Context + AsRef<RpcContinuations>,
Extra: FromArgMatches + Serialize + DeserializeOwned + Args + Send + Sync + 'static,
>(
source: impl for<'a> LogSourceFn<'a, C, Extra>,
) -> ParentHandler<C, LogsParams<Extra>> {
ParentHandler::new()
.root_handler(
from_fn_async(cli_logs)
.no_display()
.with_inherited(|params, _| params),
)
.root_handler(
from_fn_async(logs_nofollow)
logs_nofollow::<C, Extra>(source.clone())
.with_inherited(|params, _| params)
.no_cli(),
)
.subcommand(
"follow",
from_fn_async(logs_follow)
logs_follow::<C, Extra>(source)
.with_inherited(|params, _| params)
.no_cli(),
)
}
pub async fn cli_logs(
ctx: CliContext,
_: Empty,
LogsParam {
id,
limit,
cursor,
before,
follow,
}: LogsParam,
) -> Result<(), RpcError> {
if follow {
if cursor.is_some() {
return Err(RpcError::from(Error::new(
eyre!("The argument '--cursor <cursor>' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
if before {
return Err(RpcError::from(Error::new(
eyre!("The argument '--before' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
cli_logs_generic_follow(ctx, "package.logs.follow", Some(id), limit).await
} else {
cli_logs_generic_nofollow(ctx, "package.logs", Some(id), limit, cursor, before).await
}
}
pub async fn logs_nofollow(
ctx: RpcContext,
_: Empty,
LogsParam {
id,
limit,
cursor,
before,
pub async fn cli_logs<RemoteContext, Extra>(
HandlerArgs {
context: ctx,
parent_method,
method,
params: CliLogsParams { rpc_params, follow },
..
}: LogsParam,
) -> Result<LogResponse, Error> {
let container_id = ctx
.services
.get(&id)
.await
.as_ref()
.map(|x| x.container_id())
.ok_or_else(|| {
Error::new(
eyre!("No service found with id: {}", id),
ErrorKind::NotFound,
)
})??;
fetch_logs(LogSource::Container(container_id), limit, cursor, before).await
}
pub async fn logs_follow(
ctx: RpcContext,
_: Empty,
LogsParam { id, limit, .. }: LogsParam,
) -> Result<LogFollowResponse, Error> {
let container_id = ctx
.services
.get(&id)
.await
.as_ref()
.map(|x| x.container_id())
.ok_or_else(|| {
Error::new(
eyre!("No service found with id: {}", id),
ErrorKind::NotFound,
)
})??;
follow_logs(ctx, LogSource::Container(container_id), limit).await
}
}: HandlerArgs<CliContext, CliLogsParams<Extra>>,
) -> Result<(), RpcError>
where
CliContext: CallRemote<RemoteContext>,
Extra: FromArgMatches + Args + Serialize + Send + Sync,
{
let method = parent_method
.into_iter()
.chain(method)
.chain(follow.then_some("follow"))
.join(".");
pub async fn cli_logs_generic_nofollow(
ctx: CliContext,
method: &str,
id: Option<PackageId>,
limit: Option<usize>,
cursor: Option<String>,
before: bool,
) -> Result<(), RpcError> {
let res = from_value::<LogResponse>(
ctx.call_remote(
method,
imbl_value::json!({
"id": id,
"limit": limit,
"cursor": cursor,
"before": before,
}),
)
.await?,
)?;
if follow {
let res = from_value::<LogFollowResponse>(
ctx.call_remote::<RemoteContext>(&method, to_value(&rpc_params)?)
.await?,
)?;
for entry in res.entries.iter() {
println!("{}", entry);
}
let mut stream = ctx.ws_continuation(res.guid).await?;
while let Some(log) = stream.try_next().await? {
if let Message::Text(log) = log {
println!("{}", serde_json::from_str::<LogEntry>(&log)?);
}
}
} else {
let res = from_value::<LogResponse>(
ctx.call_remote::<RemoteContext>(&method, to_value(&rpc_params)?)
.await?,
)?;
Ok(())
}
pub async fn cli_logs_generic_follow(
ctx: CliContext,
method: &str,
id: Option<PackageId>,
limit: Option<usize>,
) -> Result<(), RpcError> {
let res = from_value::<LogFollowResponse>(
ctx.call_remote(
method,
imbl_value::json!({
"id": id,
"limit": limit,
}),
)
.await?,
)?;
let mut stream = ctx.ws_continuation(res.guid).await?;
while let Some(log) = stream.try_next().await? {
if let Message::Text(log) = log {
println!("{}", serde_json::from_str::<LogEntry>(&log)?);
for entry in res.entries.iter() {
println!("{}", entry);
}
}
Ok(())
}
trait LogSourceFn<'a, Context, Extra>: Clone + Send + Sync + 'static {
type Fut: Future<Output = Result<LogSource, Error>> + Send + 'a;
fn call(&self, ctx: &'a Context, extra: Extra) -> Self::Fut;
}
impl<'a, C: Context, Extra, F, Fut> LogSourceFn<'a, C, Extra> for F
where
F: Fn(&'a C, Extra) -> Fut + Clone + Send + Sync + 'static,
Fut: Future<Output = Result<LogSource, Error>> + Send + 'a,
{
type Fut = Fut;
fn call(&self, ctx: &'a C, extra: Extra) -> Self::Fut {
self(ctx, extra)
}
}
fn logs_nofollow<C, Extra>(
f: impl for<'a> LogSourceFn<'a, C, Extra>,
) -> impl HandlerFor<C, Params = Empty, InheritedParams = LogsParams<Extra>, Ok = LogResponse, Err = Error>
where
C: Context,
Extra: FromArgMatches + Args + Send + Sync + 'static,
{
from_fn_async(
move |HandlerArgs {
context,
inherited_params:
LogsParams {
extra,
limit,
cursor,
before,
},
..
}: HandlerArgs<C, Empty, LogsParams<Extra>>| {
let f = f.clone();
async move { fetch_logs(f.call(&context, extra).await?, limit, cursor, before).await }
},
)
}
fn logs_follow<
C: Context + AsRef<RpcContinuations>,
Extra: FromArgMatches + Args + Send + Sync + 'static,
>(
f: impl for<'a> LogSourceFn<'a, C, Extra>,
) -> impl HandlerFor<
C,
Params = Empty,
InheritedParams = LogsParams<Extra>,
Ok = LogFollowResponse,
Err = Error,
> {
from_fn_async(
move |HandlerArgs {
context,
inherited_params: LogsParams { extra, limit, .. },
..
}: HandlerArgs<C, Empty, LogsParams<Extra>>| {
let f = f.clone();
async move {
let src = f.call(&context, extra).await?;
follow_logs(context, src, limit).await
}
},
)
}
async fn get_package_id(
ctx: &RpcContext,
PackageIdParams { id }: PackageIdParams,
) -> Result<LogSource, Error> {
let container_id = ctx
.services
.get(&id)
.await
.as_ref()
.map(|x| x.container_id())
.ok_or_else(|| {
Error::new(
eyre!("No service found with id: {}", id),
ErrorKind::NotFound,
)
})??;
Ok(LogSource::Container(container_id))
}
pub fn package_logs() -> ParentHandler<RpcContext, LogsParams<PackageIdParams>> {
logs::<RpcContext, PackageIdParams>(get_package_id)
}
pub async fn journalctl(
id: LogSource,
limit: usize,
@@ -474,10 +504,6 @@ fn gen_journalctl_command(id: &LogSource) -> Command {
cmd.arg("-u");
cmd.arg(id);
}
LogSource::System => {
cmd.arg("-u");
cmd.arg(SYSTEM_UNIT);
}
LogSource::Container(_container_id) => {
cmd.arg("-u").arg("container-runtime.service");
}
@@ -533,8 +559,8 @@ pub async fn fetch_logs(
}
#[instrument(skip_all)]
pub async fn follow_logs(
ctx: RpcContext,
pub async fn follow_logs<Context: AsRef<RpcContinuations>>(
ctx: Context,
id: LogSource,
limit: Option<usize>,
) -> Result<LogFollowResponse, Error> {
@@ -556,23 +582,24 @@ pub async fn follow_logs(
}
let guid = RequestGuid::new();
ctx.add_continuation(
guid.clone(),
RpcContinuation::ws(
Box::new(move |socket| {
ws_handler(first_entry, stream, socket)
.map(|x| match x {
Ok(_) => (),
Err(e) => {
tracing::error!("Error in log stream: {}", e);
}
})
.boxed()
}),
Duration::from_secs(30),
),
)
.await;
ctx.as_ref()
.add(
guid.clone(),
RpcContinuation::ws(
Box::new(move |socket| {
ws_handler(first_entry, stream, socket)
.map(|x| match x {
Ok(_) => (),
Err(e) => {
tracing::error!("Error in log stream: {}", e);
}
})
.boxed()
}),
Duration::from_secs(30),
),
)
.await;
Ok(LogFollowResponse { start_cursor, guid })
}

View File

@@ -12,8 +12,8 @@ use imbl_value::{InOMap, InternedString};
use models::InvalidId;
use rpc_toolkit::yajrc::{RpcError, RpcResponse};
use rpc_toolkit::{
from_fn_async, AnyContext, CallRemoteHandler, GenericRpcMethod, Handler, HandlerArgs,
HandlerExt, ParentHandler, RpcRequest,
from_fn_async, CallRemoteHandler, Context, Empty, GenericRpcMethod, HandlerArgs, HandlerExt,
HandlerFor, ParentHandler, RpcRequest,
};
use rustyline_async::{ReadlineEvent, SharedWriter};
use serde::{Deserialize, Serialize};
@@ -25,7 +25,6 @@ use tokio::time::Instant;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::idmapped::IdMapped;
@@ -34,6 +33,7 @@ use crate::disk::mount::filesystem::{MountType, ReadWrite};
use crate::disk::mount::guard::{GenericMountGuard, MountGuard, TmpMountGuard};
use crate::disk::mount::util::unmount;
use crate::prelude::*;
use crate::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::util::clap::FromStrParser;
use crate::util::rpc_client::UnixRpcClient;
use crate::util::{new_guid, Invoke};
@@ -370,16 +370,16 @@ impl Drop for LxcContainer {
#[derive(Default, Serialize)]
pub struct LxcConfig {}
pub fn lxc() -> ParentHandler {
pub fn lxc<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"create",
from_fn_async(create).with_remote_cli::<CliContext>(),
from_fn_async(create).with_call_remote::<CliContext>(),
)
.subcommand(
"list",
from_fn_async(list)
.with_custom_display_fn::<AnyContext, _>(|_, res| {
.with_custom_display_fn(|_, res| {
use prettytable::*;
let mut table = table!([bc => "GUID"]);
for guid in res {
@@ -388,13 +388,13 @@ pub fn lxc() -> ParentHandler {
table.printstd();
Ok(())
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("connect", from_fn_async(connect_rpc).no_cli())
.subcommand("connect", from_fn_async(connect_rpc_cli).no_display())
@@ -448,59 +448,59 @@ pub async fn connect(ctx: &RpcContext, container: &LxcContainer) -> Result<Reque
let rpc = container.connect_rpc(Some(Duration::from_secs(30))).await?;
let guid = RequestGuid::new();
ctx.add_continuation(
guid.clone(),
RpcContinuation::ws(
Box::new(|mut ws| {
async move {
if let Err(e) = async {
loop {
match ws.next().await {
None => break,
Some(Ok(Message::Text(txt))) => {
let mut id = None;
let result = async {
let req: RpcRequest =
serde_json::from_str(&txt).map_err(|e| RpcError {
data: Some(serde_json::Value::String(
e.to_string(),
)),
..rpc_toolkit::yajrc::PARSE_ERROR
})?;
id = req.id;
rpc.request(req.method, req.params).await
ctx.rpc_continuations
.add(
guid.clone(),
RpcContinuation::ws(
Box::new(|mut ws| {
async move {
if let Err(e) = async {
loop {
match ws.next().await {
None => break,
Some(Ok(Message::Text(txt))) => {
let mut id = None;
let result = async {
let req: RpcRequest = serde_json::from_str(&txt)
.map_err(|e| RpcError {
data: Some(serde_json::Value::String(
e.to_string(),
)),
..rpc_toolkit::yajrc::PARSE_ERROR
})?;
id = req.id;
rpc.request(req.method, req.params).await
}
.await;
ws.send(Message::Text(
serde_json::to_string(
&RpcResponse::<GenericRpcMethod> { id, result },
)
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
Some(Ok(_)) => (),
Some(Err(e)) => {
return Err(Error::new(e, ErrorKind::Network));
}
.await;
ws.send(Message::Text(
serde_json::to_string(&RpcResponse::<GenericRpcMethod> {
id,
result,
})
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
Some(Ok(_)) => (),
Some(Err(e)) => {
return Err(Error::new(e, ErrorKind::Network));
}
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("{e}");
tracing::debug!("{e:?}");
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("{e}");
tracing::debug!("{e:?}");
}
}
.boxed()
}),
Duration::from_secs(30),
),
)
.await;
.boxed()
}),
Duration::from_secs(30),
),
)
.await;
Ok(guid)
}
@@ -614,11 +614,25 @@ pub async fn connect_cli(ctx: &CliContext, guid: RequestGuid) -> Result<(), Erro
}
pub async fn connect_rpc_cli(
handle_args: HandlerArgs<CliContext, ConnectParams>,
HandlerArgs {
context,
parent_method,
method,
params,
inherited_params,
raw_params,
}: HandlerArgs<CliContext, ConnectParams>,
) -> Result<(), Error> {
let ctx = handle_args.context.clone();
let guid = CallRemoteHandler::<CliContext, _>::new(from_fn_async(connect_rpc))
.handle_async(handle_args)
let ctx = context.clone();
let guid = CallRemoteHandler::<CliContext, _, _>::new(from_fn_async(connect_rpc))
.handle_async(HandlerArgs {
context,
parent_method,
method,
params: rpc_toolkit::util::Flat(params, Empty {}),
inherited_params,
raw_params,
})
.await?;
connect_cli(&ctx, guid).await

View File

@@ -245,7 +245,6 @@ impl Borrow<str> for HashSessionToken {
}
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Metadata {
#[serde(default = "const_true")]
authenticated: bool,
@@ -274,7 +273,6 @@ impl Auth {
}
}
}
#[async_trait::async_trait]
impl Middleware<RpcContext> for Auth {
type Metadata = Metadata;
async fn process_http_request(
@@ -306,7 +304,7 @@ impl Middleware<RpcContext> for Auth {
});
}
if let Some(user_agent) = self.user_agent.as_ref().and_then(|h| h.to_str().ok()) {
request.params["user-agent"] = Value::String(Arc::new(user_agent.to_owned()))
request.params["__auth_userAgent"] = Value::String(Arc::new(user_agent.to_owned()))
// TODO: will this panic?
}
} else if metadata.authenticated {
@@ -318,7 +316,7 @@ impl Middleware<RpcContext> for Auth {
})
}
Ok(HasValidSession(SessionType::Session(s))) if metadata.get_session => {
request.params["session"] =
request.params["__auth_session"] =
Value::String(Arc::new(s.hashed().deref().to_owned()));
// TODO: will this panic?
}

View File

@@ -44,8 +44,7 @@ impl Cors {
}
}
}
#[async_trait::async_trait]
impl<Context: Send + 'static> Middleware<Context> for Cors {
impl<Context: Send + Sync + 'static> Middleware<Context> for Cors {
type Metadata = Empty;
async fn process_http_request(
&mut self,

View File

@@ -23,7 +23,6 @@ impl SyncDb {
}
}
#[async_trait::async_trait]
impl Middleware<RpcContext> for SyncDb {
type Metadata = Metadata;
async fn process_rpc_request(

View File

@@ -14,7 +14,6 @@ impl DiagnosticMode {
}
}
#[async_trait::async_trait]
impl Middleware<DiagnosticContext> for DiagnosticMode {
type Metadata = Empty;
async fn process_rpc_request(

View File

@@ -3,7 +3,7 @@ use std::net::IpAddr;
use clap::Parser;
use futures::TryStreamExt;
use rpc_toolkit::{from_fn_async, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::sync::RwLock;
use ts_rs::TS;
@@ -53,12 +53,12 @@ pub async fn init_ips() -> Result<BTreeMap<String, IpInfo>, Error> {
}
// #[command(subcommands(update))]
pub fn dhcp() -> ParentHandler {
pub fn dhcp<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"update",
from_fn_async::<_, _, (), Error, (RpcContext, UpdateParams)>(update)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser, TS)]

View File

@@ -1,4 +1,4 @@
use rpc_toolkit::ParentHandler;
use rpc_toolkit::{Context, ParentHandler};
pub mod dhcp;
pub mod dns;
@@ -18,8 +18,8 @@ pub mod wifi;
pub const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
pub fn net() -> ParentHandler {
pub fn net<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("tor", tor::tor())
.subcommand("dhcp", dhcp::dhcp())
.subcommand("tor", tor::tor::<C>())
.subcommand("dhcp", dhcp::dhcp::<C>())
}

View File

@@ -6,12 +6,10 @@ use color_eyre::eyre::eyre;
use imbl::OrdMap;
use lazy_format::lazy_format;
use models::{HostId, OptionExt, PackageId};
use patch_db::PatchDb;
use tokio::sync::Mutex;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument;
use crate::db::prelude::PatchDbExt;
use crate::db::model::Database;
use crate::error::ErrorCollection;
use crate::hostname::Hostname;
use crate::net::dns::DnsController;
@@ -21,11 +19,12 @@ use crate::net::host::binding::{AddSslOptions, BindOptions};
use crate::net::host::{Host, HostKind};
use crate::net::tor::TorController;
use crate::net::vhost::{AlpnInfo, VHostController};
use crate::prelude::*;
use crate::util::serde::MaybeUtf8String;
use crate::{Error, HOST_IP};
use crate::HOST_IP;
pub struct NetController {
db: PatchDb,
db: TypedPatchDb<Database>,
pub(super) tor: TorController,
pub(super) vhost: VHostController,
pub(super) dns: DnsController,
@@ -36,7 +35,7 @@ pub struct NetController {
impl NetController {
#[instrument(skip_all)]
pub async fn init(
db: PatchDb,
db: TypedPatchDb<Database>,
tor_control: SocketAddr,
tor_socks: SocketAddr,
dns_bind: &[SocketAddr],
@@ -394,14 +393,23 @@ impl NetService {
pub fn get_ext_port(&self, host_id: HostId, internal_port: u16) -> Result<u16, Error> {
let host_id_binds = self.binds.get_key_value(&host_id);
match host_id_binds {
Some((id, binds)) => {
Some((_, binds)) => {
if let Some(ext_port_info) = binds.lan.get(&internal_port) {
Ok(ext_port_info.0)
} else {
Err(Error::new(eyre!("Internal Port {} not found in NetService binds", internal_port), crate::ErrorKind::NotFound))
Err(Error::new(
eyre!(
"Internal Port {} not found in NetService binds",
internal_port
),
crate::ErrorKind::NotFound,
))
}
},
None => Err(Error::new(eyre!("HostID {} not found in NetService binds", host_id), crate::ErrorKind::NotFound))
}
None => Err(Error::new(
eyre!("HostID {} not found in NetService binds", host_id),
crate::ErrorKind::NotFound,
)),
}
}
}

View File

@@ -17,7 +17,6 @@ use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::*;
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use tokio::sync::Mutex;
use tracing::instrument;
use crate::account::AccountInfo;

View File

@@ -24,18 +24,19 @@ use tokio::io::BufReader;
use tokio_util::io::ReaderStream;
use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::core::rpc_continuations::RequestGuid;
use crate::db::subscribe;
use crate::hostname::Hostname;
use crate::middleware::auth::{Auth, HasValidSession};
use crate::middleware::cors::Cors;
use crate::middleware::db::SyncDb;
use crate::middleware::diagnostic::DiagnosticMode;
use crate::rpc_continuations::RequestGuid;
use crate::{diagnostic_api, install_api, main_api, setup_api, Error, ErrorKind, ResultExt};
const NOT_FOUND: &[u8] = b"Not Found";
const METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed";
const NOT_AUTHORIZED: &[u8] = b"Not Authorized";
const INTERNAL_SERVER_ERROR: &[u8] = b"Internal Server Error";
#[cfg(all(feature = "daemon", not(feature = "test")))]
const EMBEDDED_UIS: Dir<'_> =
@@ -112,7 +113,7 @@ pub fn main_ui_server_router(ctx: RpcContext) -> Router {
.route("/rpc/*path", {
let ctx = ctx.clone();
post(
Server::new(move || ready(Ok(ctx.clone())), main_api())
Server::new(move || ready(Ok(ctx.clone())), main_api::<RpcContext>())
.middleware(Cors::new())
.middleware(Auth::new())
.middleware(SyncDb::new()),
@@ -140,7 +141,7 @@ pub fn main_ui_server_router(ctx: RpcContext) -> Router {
tracing::debug!("No Guid Path");
bad_request()
}
Some(guid) => match ctx.get_ws_continuation_handler(&guid).await {
Some(guid) => match ctx.rpc_continuations.get_ws_handler(&guid).await {
Some(cont) => ws.on_upgrade(cont),
_ => not_found(),
},
@@ -163,7 +164,7 @@ pub fn main_ui_server_router(ctx: RpcContext) -> Router {
tracing::debug!("No Guid Path");
bad_request()
}
Some(guid) => match ctx.get_rest_continuation_handler(&guid).await {
Some(guid) => match ctx.rpc_continuations.get_rest_handler(&guid).await {
None => not_found(),
Some(cont) => cont(request).await.unwrap_or_else(server_error),
},
@@ -216,7 +217,7 @@ async fn if_authorized<
) -> Result<Response, Error> {
if let Err(e) = HasValidSession::from_header(parts.headers.get(http::header::COOKIE), ctx).await
{
un_authorized(e, parts.uri.path())
Ok(unauthorized(e, parts.uri.path()))
} else {
f().await
}
@@ -305,17 +306,17 @@ async fn main_start_os_ui(req: Request, ctx: RpcContext) -> Result<Response, Err
}
}
fn un_authorized(err: Error, path: &str) -> Result<Response, Error> {
pub fn unauthorized(err: Error, path: &str) -> Response {
tracing::warn!("unauthorized for {} @{:?}", err, path);
tracing::debug!("{:?}", err);
Ok(Response::builder()
Response::builder()
.status(StatusCode::UNAUTHORIZED)
.body(NOT_AUTHORIZED.into())
.unwrap())
.unwrap()
}
/// HTTP status code 404
fn not_found() -> Response {
pub fn not_found() -> Response {
Response::builder()
.status(StatusCode::NOT_FOUND)
.body(NOT_FOUND.into())
@@ -323,21 +324,23 @@ fn not_found() -> Response {
}
/// HTTP status code 405
fn method_not_allowed() -> Response {
pub fn method_not_allowed() -> Response {
Response::builder()
.status(StatusCode::METHOD_NOT_ALLOWED)
.body(METHOD_NOT_ALLOWED.into())
.unwrap()
}
fn server_error(err: Error) -> Response {
pub fn server_error(err: Error) -> Response {
tracing::error!("internal server error: {}", err);
tracing::debug!("{:?}", err);
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(err.to_string().into())
.body(INTERNAL_SERVER_ERROR.into())
.unwrap()
}
fn bad_request() -> Response {
pub fn bad_request() -> Response {
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())

View File

@@ -12,8 +12,7 @@ use helpers::NonDetachingJoinHandle;
use itertools::Itertools;
use lazy_static::lazy_static;
use regex::Regex;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, from_fn_async, AnyContext, Empty, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::net::TcpStream;
use tokio::process::Command;
@@ -25,10 +24,7 @@ use tracing::instrument;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::logs::{
cli_logs_generic_follow, cli_logs_generic_nofollow, fetch_logs, follow_logs, journalctl,
LogFollowResponse, LogResponse, LogSource,
};
use crate::logs::{journalctl, LogSource, LogsParams};
use crate::prelude::*;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::util::Invoke;
@@ -86,23 +82,27 @@ lazy_static! {
static ref PROGRESS_REGEX: Regex = Regex::new("PROGRESS=([0-9]+)").unwrap();
}
pub fn tor() -> ParentHandler {
pub fn tor<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"list-services",
from_fn_async(list_services)
.with_display_serializable()
.with_custom_display_fn::<AnyContext, _>(|handle, result| {
.with_custom_display_fn(|handle, result| {
Ok(display_services(handle.params, result))
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("logs", logs())
.subcommand(
"logs",
from_fn_async(crate::logs::cli_logs::<RpcContext, Empty>).no_display(),
)
.subcommand(
"reset",
from_fn_async(reset)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser, TS)]
@@ -143,89 +143,10 @@ pub async fn list_services(ctx: RpcContext, _: Empty) -> Result<Vec<OnionAddress
ctx.net_controller.tor.list_services().await
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct LogsParams {
#[arg(short = 'l', long = "limit")]
#[ts(type = "number | null")]
limit: Option<usize>,
#[arg(short = 'c', long = "cursor")]
cursor: Option<String>,
#[arg(short = 'B', long = "before")]
#[serde(default)]
before: bool,
#[arg(short = 'f', long = "follow")]
#[serde(default)]
follow: bool,
}
pub fn logs() -> ParentHandler<LogsParams> {
ParentHandler::new()
.root_handler(
from_fn_async(cli_logs)
.no_display()
.with_inherited(|params, _| params),
)
.root_handler(
from_fn_async(logs_nofollow)
.with_inherited(|params, _| params)
.no_cli(),
)
.subcommand(
"follow",
from_fn_async(logs_follow)
.with_inherited(|params, _| params)
.no_cli(),
)
}
pub async fn cli_logs(
ctx: CliContext,
_: Empty,
LogsParams {
limit,
cursor,
before,
follow,
}: LogsParams,
) -> Result<(), RpcError> {
if follow {
if cursor.is_some() {
return Err(RpcError::from(Error::new(
eyre!("The argument '--cursor <cursor>' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
if before {
return Err(RpcError::from(Error::new(
eyre!("The argument '--before' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
cli_logs_generic_follow(ctx, "net.tor.logs.follow", None, limit).await
} else {
cli_logs_generic_nofollow(ctx, "net.tor.logs", None, limit, cursor, before).await
}
}
pub async fn logs_nofollow(
_: AnyContext,
_: Empty,
LogsParams {
limit,
cursor,
before,
..
}: LogsParams,
) -> Result<LogResponse, Error> {
fetch_logs(LogSource::Unit(SYSTEMD_UNIT), limit, cursor, before).await
}
pub async fn logs_follow(
ctx: RpcContext,
_: Empty,
LogsParams { limit, .. }: LogsParams,
) -> Result<LogFollowResponse, Error> {
follow_logs(ctx, LogSource::Unit(SYSTEMD_UNIT), limit).await
pub fn logs() -> ParentHandler<RpcContext, LogsParams> {
crate::logs::logs::<RpcContext, Empty>(|_: &RpcContext, _| async {
Ok(LogSource::Unit(SYSTEMD_UNIT))
})
}
fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> {

View File

@@ -19,6 +19,7 @@ use tokio_rustls::{LazyConfigAcceptor, TlsConnector};
use tracing::instrument;
use ts_rs::TS;
use crate::db::model::Database;
use crate::prelude::*;
use crate::util::io::{BackTrackingReader, TimeoutStream};
use crate::util::serde::MaybeUtf8String;
@@ -26,11 +27,11 @@ use crate::util::serde::MaybeUtf8String;
// not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
pub struct VHostController {
db: PatchDb,
db: TypedPatchDb<Database>,
servers: Mutex<BTreeMap<u16, VHostServer>>,
}
impl VHostController {
pub fn new(db: PatchDb) -> Self {
pub fn new(db: TypedPatchDb<Database>) -> Self {
Self {
db,
servers: Mutex::new(BTreeMap::new()),
@@ -100,7 +101,7 @@ struct VHostServer {
}
impl VHostServer {
#[instrument(skip_all)]
async fn new(port: u16, db: PatchDb) -> Result<Self, Error> {
async fn new(port: u16, db: TypedPatchDb<Database>) -> Result<Self, Error> {
// check if port allowed
let listener = TcpListener::bind(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), port))
.await

View File

@@ -8,7 +8,7 @@ use clap::Parser;
use isocountry::CountryCode;
use lazy_static::lazy_static;
use regex::Regex;
use rpc_toolkit::{command, from_fn_async, AnyContext, Empty, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use tokio::sync::RwLock;
@@ -17,6 +17,7 @@ use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::public::WifiInfo;
use crate::db::model::Database;
use crate::net::utils::find_wifi_iface;
use crate::prelude::*;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
@@ -36,57 +37,55 @@ pub fn wifi_manager(ctx: &RpcContext) -> Result<&WifiManager, Error> {
}
}
pub fn wifi() -> ParentHandler {
pub fn wifi<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"add",
from_fn_async(add)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"connect",
from_fn_async(connect)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"delete",
from_fn_async(delete)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"get",
from_fn_async(get)
.with_display_serializable()
.with_custom_display_fn::<AnyContext, _>(|handle, result| {
.with_custom_display_fn(|handle, result| {
Ok(display_wifi_info(handle.params, result))
})
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand("country", country())
.subcommand("available", available())
.subcommand("country", country::<C>())
.subcommand("available", available::<C>())
}
pub fn available() -> ParentHandler {
pub fn available<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"get",
from_fn_async(get_available)
.with_display_serializable()
.with_custom_display_fn::<AnyContext, _>(|handle, result| {
Ok(display_wifi_list(handle.params, result))
})
.with_remote_cli::<CliContext>(),
.with_custom_display_fn(|handle, result| Ok(display_wifi_list(handle.params, result)))
.with_call_remote::<CliContext>(),
)
}
pub fn country() -> ParentHandler {
pub fn country<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"set",
from_fn_async(set_country)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
@@ -113,7 +112,7 @@ pub async fn add(ctx: RpcContext, AddParams { ssid, password }: AddParams) -> Re
));
}
async fn add_procedure(
db: PatchDb,
db: TypedPatchDb<Database>,
wifi_manager: WifiManager,
ssid: &Ssid,
password: &Psk,
@@ -170,7 +169,7 @@ pub async fn connect(ctx: RpcContext, SsidParams { ssid }: SsidParams) -> Result
));
}
async fn connect_procedure(
db: PatchDb,
db: TypedPatchDb<Database>,
wifi_manager: WifiManager,
ssid: &Ssid,
) -> Result<(), Error> {
@@ -718,7 +717,7 @@ impl WpaCli {
Ok(())
}
pub async fn save_config(&mut self, db: PatchDb) -> Result<(), Error> {
pub async fn save_config(&mut self, db: TypedPatchDb<Database>) -> Result<(), Error> {
let new_country = self.get_country_low().await?;
db.mutate(|d| {
d.as_public_mut()
@@ -758,7 +757,11 @@ impl WpaCli {
.collect())
}
#[instrument(skip_all)]
pub async fn select_network(&mut self, db: PatchDb, ssid: &Ssid) -> Result<bool, Error> {
pub async fn select_network(
&mut self,
db: TypedPatchDb<Database>,
ssid: &Ssid,
) -> Result<bool, Error> {
let m_id = self.check_active_network(ssid).await?;
match m_id {
None => Err(Error::new(
@@ -810,7 +813,11 @@ impl WpaCli {
}
}
#[instrument(skip_all)]
pub async fn remove_network(&mut self, db: PatchDb, ssid: &Ssid) -> Result<bool, Error> {
pub async fn remove_network(
&mut self,
db: TypedPatchDb<Database>,
ssid: &Ssid,
) -> Result<bool, Error> {
let found_networks = self.find_networks(ssid).await?;
if found_networks.is_empty() {
return Ok(true);
@@ -824,7 +831,7 @@ impl WpaCli {
#[instrument(skip_all)]
pub async fn set_add_network(
&mut self,
db: PatchDb,
db: TypedPatchDb<Database>,
ssid: &Ssid,
psk: &Psk,
) -> Result<(), Error> {
@@ -833,7 +840,12 @@ impl WpaCli {
Ok(())
}
#[instrument(skip_all)]
pub async fn add_network(&mut self, db: PatchDb, ssid: &Ssid, psk: &Psk) -> Result<(), Error> {
pub async fn add_network(
&mut self,
db: TypedPatchDb<Database>,
ssid: &Ssid,
psk: &Psk,
) -> Result<(), Error> {
self.add_network_low(ssid, psk).await?;
self.save_config(db).await?;
Ok(())

View File

@@ -8,7 +8,7 @@ use clap::Parser;
use color_eyre::eyre::eyre;
use imbl_value::InternedString;
use models::PackageId;
use rpc_toolkit::{command, from_fn_async, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;
@@ -21,31 +21,31 @@ use crate::util::clap::FromStrParser;
use crate::util::serde::HandlerExtSerde;
// #[command(subcommands(list, delete, delete_before, create))]
pub fn notification() -> ParentHandler {
pub fn notification<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"list",
from_fn_async(list)
.with_display_serializable()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"delete",
from_fn_async(delete)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"delete-before",
from_fn_async(delete_before)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"create",
from_fn_async(create)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}

View File

@@ -3,7 +3,7 @@ use std::path::{Path, PathBuf};
use clap::Parser;
use color_eyre::eyre::eyre;
use models::Error;
use rpc_toolkit::{command, from_fn_async, AnyContext, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use ts_rs::TS;
@@ -13,11 +13,15 @@ use crate::context::{CliContext, InstallContext};
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::efivarfs::EfiVarFs;
use crate::disk::mount::filesystem::overlayfs::OverlayFs;
use crate::disk::mount::filesystem::{MountType, ReadWrite};
use crate::disk::mount::guard::{GenericMountGuard, MountGuard, TmpMountGuard};
use crate::disk::util::{DiskInfo, PartitionTable};
use crate::disk::OsPartitionInfo;
use crate::net::utils::find_eth_iface;
use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::util::io::TmpDir;
use crate::util::serde::IoFormat;
use crate::util::Invoke;
use crate::ARCH;
@@ -25,33 +29,33 @@ use crate::ARCH;
mod gpt;
mod mbr;
pub fn install() -> ParentHandler {
pub fn install<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("disk", disk())
.subcommand("disk", disk::<C>())
.subcommand(
"execute",
from_fn_async(execute)
from_fn_async(execute::<InstallContext>)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
.subcommand(
"reboot",
from_fn_async(reboot)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
pub fn disk() -> ParentHandler {
pub fn disk<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"list",
from_fn_async(list)
.no_display()
.with_remote_cli::<CliContext>(),
.with_call_remote::<CliContext>(),
)
}
pub async fn list() -> Result<Vec<DiskInfo>, Error> {
pub async fn list(_: InstallContext) -> Result<Vec<DiskInfo>, Error> {
let skip = match async {
Ok::<_, Error>(
Path::new(
@@ -122,8 +126,8 @@ pub struct ExecuteParams {
overwrite: bool,
}
pub async fn execute(
_: AnyContext,
pub async fn execute<C: Context>(
_: C,
ExecuteParams {
logicalname,
mut overwrite,
@@ -215,44 +219,62 @@ pub async fn execute(
.arg("rootfs")
.invoke(crate::ErrorKind::DiskManagement)
.await?;
let rootfs = TmpMountGuard::mount(&BlockDev::new(&part_info.root), ReadWrite).await?;
let config_path = rootfs.path().join("config");
if tokio::fs::metadata("/tmp/config.bak").await.is_ok() {
if tokio::fs::metadata(&config_path).await.is_ok() {
tokio::fs::remove_dir_all(&config_path).await?;
}
Command::new("cp")
.arg("-r")
.arg("/tmp/config.bak")
.arg(rootfs.path().join("config"))
.arg(&config_path)
.invoke(crate::ErrorKind::Filesystem)
.await?;
} else {
tokio::fs::create_dir(rootfs.path().join("config")).await?;
tokio::fs::create_dir_all(&config_path).await?;
}
tokio::fs::create_dir(rootfs.path().join("next")).await?;
let current = rootfs.path().join("current");
tokio::fs::create_dir(&current).await?;
tokio::fs::create_dir(current.join("boot")).await?;
let boot = MountGuard::mount(
let images_path = rootfs.path().join("images");
tokio::fs::create_dir_all(&images_path).await?;
let image_path = images_path
.join(hex::encode(
&MultiCursorFile::from(
tokio::fs::File::open("/run/live/medium/live/filesystem.squashfs").await?,
)
.blake3_mmap()
.await?
.as_bytes()[..16],
))
.with_extension("rootfs");
tokio::fs::copy("/run/live/medium/live/filesystem.squashfs", &image_path).await?;
// TODO: check hash of fs
let unsquash_target = TmpDir::new().await?;
let bootfs = MountGuard::mount(
&BlockDev::new(&part_info.boot),
current.join("boot"),
unsquash_target.join("boot"),
ReadWrite,
)
.await?;
let efi = if let Some(efi) = &part_info.efi {
Some(MountGuard::mount(&BlockDev::new(efi), current.join("boot/efi"), ReadWrite).await?)
} else {
None
};
Command::new("unsquashfs")
.arg("-n")
.arg("-f")
.arg("-d")
.arg(&current)
.arg(&*unsquash_target)
.arg("/run/live/medium/live/filesystem.squashfs")
.arg("boot")
.invoke(crate::ErrorKind::Filesystem)
.await?;
bootfs.unmount(true).await?;
unsquash_target.delete().await?;
Command::new("ln")
.arg("-rsf")
.arg(&image_path)
.arg(config_path.join("current.rootfs"))
.invoke(ErrorKind::DiskManagement)
.await?;
tokio::fs::write(
rootfs.path().join("config/config.yaml"),
@@ -264,8 +286,55 @@ pub async fn execute(
)
.await?;
let lower = TmpMountGuard::mount(&BlockDev::new(&image_path), MountType::ReadOnly).await?;
let work = config_path.join("work");
let upper = config_path.join("overlay");
let overlay =
TmpMountGuard::mount(&OverlayFs::new(&lower.path(), &upper, &work), ReadWrite).await?;
let boot = MountGuard::mount(
&BlockDev::new(&part_info.boot),
overlay.path().join("boot"),
ReadWrite,
)
.await?;
let efi = if let Some(efi) = &part_info.efi {
Some(
MountGuard::mount(
&BlockDev::new(efi),
overlay.path().join("boot/efi"),
ReadWrite,
)
.await?,
)
} else {
None
};
let start_os_fs = MountGuard::mount(
&Bind::new(rootfs.path()),
overlay.path().join("media/startos/root"),
MountType::ReadOnly,
)
.await?;
let dev = MountGuard::mount(&Bind::new("/dev"), overlay.path().join("dev"), ReadWrite).await?;
let proc =
MountGuard::mount(&Bind::new("/proc"), overlay.path().join("proc"), ReadWrite).await?;
let sys = MountGuard::mount(&Bind::new("/sys"), overlay.path().join("sys"), ReadWrite).await?;
let efivarfs = if tokio::fs::metadata("/sys/firmware/efi").await.is_ok() {
Some(
MountGuard::mount(
&EfiVarFs,
overlay.path().join("sys/firmware/efi/efivars"),
ReadWrite,
)
.await?,
)
} else {
None
};
tokio::fs::write(
current.join("etc/fstab"),
overlay.path().join("etc/fstab"),
format!(
include_str!("fstab.template"),
boot = part_info.boot.display(),
@@ -280,42 +349,20 @@ pub async fn execute(
.await?;
Command::new("chroot")
.arg(&current)
.arg(overlay.path())
.arg("systemd-machine-id-setup")
.invoke(crate::ErrorKind::Systemd)
.await?;
Command::new("chroot")
.arg(&current)
.arg(overlay.path())
.arg("ssh-keygen")
.arg("-A")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
let start_os_fs = MountGuard::mount(
&Bind::new(rootfs.path()),
current.join("media/embassy/embassyfs"),
MountType::ReadOnly,
)
.await?;
let dev = MountGuard::mount(&Bind::new("/dev"), current.join("dev"), ReadWrite).await?;
let proc = MountGuard::mount(&Bind::new("/proc"), current.join("proc"), ReadWrite).await?;
let sys = MountGuard::mount(&Bind::new("/sys"), current.join("sys"), ReadWrite).await?;
let efivarfs = if tokio::fs::metadata("/sys/firmware/efi").await.is_ok() {
Some(
MountGuard::mount(
&EfiVarFs,
current.join("sys/firmware/efi/efivars"),
ReadWrite,
)
.await?,
)
} else {
None
};
let mut install = Command::new("chroot");
install.arg(&current).arg("grub-install");
install.arg(overlay.path()).arg("grub-install");
if tokio::fs::metadata("/sys/firmware/efi").await.is_err() {
install.arg("--target=i386-pc");
} else {
@@ -331,7 +378,7 @@ pub async fn execute(
.await?;
Command::new("chroot")
.arg(&current)
.arg(overlay.path())
.arg("update-grub2")
.invoke(crate::ErrorKind::Grub)
.await?;
@@ -346,7 +393,13 @@ pub async fn execute(
efi.unmount(false).await?;
}
boot.unmount(false).await?;
overlay.unmount().await?;
tokio::fs::remove_dir_all(&work).await?;
lower.unmount().await?;
rootfs.unmount().await?;
Ok(())
}

View File

@@ -11,7 +11,7 @@ use tokio::io::{AsyncSeek, AsyncWrite};
use tokio::sync::{mpsc, watch};
use ts_rs::TS;
use crate::db::model::DatabaseModel;
use crate::db::model::{Database, DatabaseModel};
use crate::prelude::*;
lazy_static::lazy_static! {
@@ -23,6 +23,7 @@ lazy_static::lazy_static! {
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord, TS)]
#[serde(untagged)]
pub enum Progress {
NotStarted(()),
Complete(bool),
Progress {
#[ts(type = "number")]
@@ -33,10 +34,13 @@ pub enum Progress {
}
impl Progress {
pub fn new() -> Self {
Progress::Complete(false)
Progress::NotStarted(())
}
pub fn update_bar(self, bar: &ProgressBar) {
match self {
Self::NotStarted(()) => {
bar.set_style(SPINNER.clone());
}
Self::Complete(false) => {
bar.set_style(SPINNER.clone());
bar.tick();
@@ -60,9 +64,15 @@ impl Progress {
}
}
}
pub fn start(&mut self) {
*self = match *self {
Self::NotStarted(()) => Self::Complete(false),
a => a,
};
}
pub fn set_done(&mut self, done: u64) {
*self = match *self {
Self::Complete(false) => Self::Progress { done, total: None },
Self::Complete(false) | Self::NotStarted(()) => Self::Progress { done, total: None },
Self::Progress { mut done, total } => {
if let Some(total) = total {
if done > total {
@@ -76,7 +86,7 @@ impl Progress {
}
pub fn set_total(&mut self, total: u64) {
*self = match *self {
Self::Complete(false) => Self::Progress {
Self::Complete(false) | Self::NotStarted(()) => Self::Progress {
done: 0,
total: Some(total),
},
@@ -104,12 +114,15 @@ impl Progress {
pub fn complete(&mut self) {
*self = Self::Complete(true);
}
pub fn is_complete(&self) -> bool {
matches!(self, Self::Complete(true))
}
}
impl std::ops::Add<u64> for Progress {
type Output = Self;
fn add(self, rhs: u64) -> Self::Output {
match self {
Self::Complete(false) => Self::Progress {
Self::Complete(false) | Self::NotStarted(()) => Self::Progress {
done: rhs,
total: None,
},
@@ -218,7 +231,7 @@ impl FullProgressTracker {
}
pub fn sync_to_db<DerefFn>(
mut self,
db: PatchDb,
db: TypedPatchDb<Database>,
deref: DerefFn,
min_interval: Option<Duration>,
) -> impl Future<Output = Result<(), Error>> + 'static
@@ -308,6 +321,9 @@ impl PhaseProgressTrackerHandle {
}
}
}
pub fn start(&mut self) {
self.progress.send_modify(|p| p.start());
}
pub fn set_done(&mut self, done: u64) {
self.progress.send_modify(|p| p.set_done(done));
self.update_overall();
@@ -324,6 +340,12 @@ impl PhaseProgressTrackerHandle {
self.progress.send_modify(|p| p.complete());
self.update_overall();
}
pub fn writer<W>(self, writer: W) -> ProgressTrackerWriter<W> {
ProgressTrackerWriter {
writer,
progress: self,
}
}
}
impl std::ops::AddAssign<u64> for PhaseProgressTrackerHandle {
fn add_assign(&mut self, rhs: u64) {

View File

@@ -1,7 +1,6 @@
use clap::Parser;
use imbl_value::{json, Value};
use models::PackageId;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use crate::context::RpcContext;

View File

@@ -1,234 +1,255 @@
use std::collections::BTreeMap;
use std::path::PathBuf;
use std::time::Duration;
use clap::Parser;
use color_eyre::eyre::eyre;
use console::style;
use futures::StreamExt;
use indicatif::{ProgressBar, ProgressStyle};
use reqwest::{header, Body, Client, Url};
use rpc_toolkit::command;
use itertools::Itertools;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::CliContext;
use crate::s9pk::S9pk;
use crate::{Error, ErrorKind};
use crate::prelude::*;
use crate::registry::context::RegistryContext;
use crate::registry::signer::{ContactInfo, SignerInfo, SignerKey};
use crate::registry::RegistryDatabase;
use crate::rpc_continuations::RequestGuid;
use crate::util::serde::{display_serializable, HandlerExtSerde, Pem, WithIoFormat};
async fn registry_user_pass(location: &str) -> Result<(Url, String, String), Error> {
let mut url = Url::parse(location)?;
let user = url.username().to_string();
let pass = url.password().map(str::to_string);
if user.is_empty() || url.path() != "/" {
return Err(Error::new(
eyre!("{location:?} is not like \"https://user@registry.example.com/\""),
ErrorKind::ParseUrl,
));
pub fn admin_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("signer", signers_api::<C>())
.subcommand("add", from_fn_async(add_admin).no_cli())
.subcommand("add", from_fn_async(cli_add_admin).no_display())
.subcommand(
"list",
from_fn_async(list_admins)
.with_display_serializable()
.with_custom_display_fn(|handle, result| Ok(display_signers(handle.params, result)))
.with_call_remote::<CliContext>(),
)
}
fn signers_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"list",
from_fn_async(list_signers)
.with_metadata("admin", Value::Bool(true))
.with_display_serializable()
.with_custom_display_fn(|handle, result| Ok(display_signers(handle.params, result)))
.with_call_remote::<CliContext>(),
)
.subcommand(
"add",
from_fn_async(add_signer)
.with_metadata("admin", Value::Bool(true))
.no_cli(),
)
.subcommand("add", from_fn_async(cli_add_signer).no_display())
}
impl Model<BTreeMap<RequestGuid, SignerInfo>> {
pub fn get_signer(&self, key: &SignerKey) -> Result<RequestGuid, Error> {
self.as_entries()?
.into_iter()
.map(|(guid, s)| Ok::<_, Error>((guid, s.as_keys().de()?)))
.filter_ok(|(_, s)| s.contains(key))
.next()
.transpose()?
.map(|(a, _)| a)
.ok_or_else(|| Error::new(eyre!("unknown signer"), ErrorKind::Authorization))
}
let _ = url.set_username("");
let _ = url.set_password(None);
let pass = match pass {
Some(p) => p,
None => {
let pass_prompt = format!("{} Password for {user}: ", style("?").yellow());
tokio::task::spawn_blocking(move || rpassword::prompt_password(pass_prompt))
.await
.unwrap()?
pub fn get_signer_info(&self, key: &SignerKey) -> Result<(RequestGuid, SignerInfo), Error> {
self.as_entries()?
.into_iter()
.map(|(guid, s)| Ok::<_, Error>((guid, s.de()?)))
.filter_ok(|(_, s)| s.keys.contains(key))
.next()
.transpose()?
.ok_or_else(|| Error::new(eyre!("unknown signer"), ErrorKind::Authorization))
}
pub fn add_signer(&mut self, signer: &SignerInfo) -> Result<(), Error> {
if let Some((guid, s)) = self
.as_entries()?
.into_iter()
.map(|(guid, s)| Ok::<_, Error>((guid, s.de()?)))
.filter_ok(|(_, s)| !s.keys.is_disjoint(&signer.keys))
.next()
.transpose()?
{
return Err(Error::new(
eyre!(
"A signer {} ({}) already exists with a matching key",
guid,
s.name
),
ErrorKind::InvalidRequest,
));
}
};
Ok((url, user.to_string(), pass.to_string()))
}
#[derive(serde::Serialize, Debug)]
struct Package {
id: String,
version: String,
arches: Option<Vec<String>>,
}
async fn do_index(
httpc: &Client,
mut url: Url,
user: &str,
pass: &str,
pkg: &Package,
) -> Result<(), Error> {
url.set_path("/admin/v0/index");
let req = httpc
.post(url)
.header(header::ACCEPT, "text/plain")
.basic_auth(user, Some(pass))
.json(pkg)
.build()?;
let res = httpc.execute(req).await?;
if !res.status().is_success() {
let info = res.text().await?;
return Err(Error::new(eyre!("{}", info), ErrorKind::Registry));
self.insert(&RequestGuid::new(), signer)
}
Ok(())
}
async fn do_upload(
httpc: &Client,
mut url: Url,
user: &str,
pass: &str,
pkg_id: &str,
body: Body,
) -> Result<(), Error> {
url.set_path("/admin/v0/upload");
let req = httpc
.post(url)
.header(header::ACCEPT, "text/plain")
.query(&["id", pkg_id])
.basic_auth(user, Some(pass))
.body(body)
.build()?;
let res = httpc.execute(req).await?;
if !res.status().is_success() {
let info = res.text().await?;
return Err(Error::new(eyre!("{}", info), ErrorKind::Registry));
pub async fn list_signers(
ctx: RegistryContext,
) -> Result<BTreeMap<RequestGuid, SignerInfo>, Error> {
ctx.db.peek().await.into_index().into_signers().de()
}
pub fn display_signers<T>(params: WithIoFormat<T>, signers: BTreeMap<RequestGuid, SignerInfo>) {
use prettytable::*;
if let Some(format) = params.format {
return display_serializable(format, signers);
}
Ok(())
let mut table = Table::new();
table.add_row(row![bc =>
"ID",
"NAME",
"CONTACT",
"KEYS",
]);
for (id, info) in signers {
table.add_row(row![
id.as_ref(),
&info.name,
&info.contact.into_iter().join("\n"),
&info.keys.into_iter().join("\n"),
]);
}
table.print_tty(false).unwrap();
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
pub async fn add_signer(ctx: RegistryContext, signer: SignerInfo) -> Result<(), Error> {
ctx.db
.mutate(|db| db.as_index_mut().as_signers_mut().add_signer(&signer))
.await
}
#[derive(Debug, Deserialize, Serialize, Parser)]
#[command(rename_all = "kebab-case")]
pub struct PublishParams {
location: String,
path: PathBuf,
#[arg(name = "no-verify", long = "no-verify")]
no_verify: bool,
#[arg(name = "no-upload", long = "no-upload")]
no_upload: bool,
#[arg(name = "no-index", long = "no-index")]
no_index: bool,
#[serde(rename_all = "camelCase")]
pub struct CliAddSignerParams {
#[arg(long = "name", short = 'n')]
pub name: String,
#[arg(long = "contact", short = 'c')]
pub contact: Vec<ContactInfo>,
#[arg(long = "ed25519-key")]
pub ed25519_keys: Vec<Pem<ed25519_dalek::VerifyingKey>>,
pub database: Option<PathBuf>,
}
pub async fn publish(
_: CliContext,
PublishParams {
location,
no_index,
no_upload,
no_verify,
path,
}: PublishParams,
pub async fn cli_add_signer(
HandlerArgs {
context: ctx,
parent_method,
method,
params:
CliAddSignerParams {
name,
contact,
ed25519_keys,
database,
},
..
}: HandlerArgs<CliContext, CliAddSignerParams>,
) -> Result<(), Error> {
// Prepare for progress bars.
let bytes_bar_style =
ProgressStyle::with_template("{percent}% {wide_bar} [{bytes}/{total_bytes}] [{eta}]")
.unwrap();
let plain_line_style =
ProgressStyle::with_template("{prefix:.bold.dim} {wide_msg}...").unwrap();
let spinner_line_style =
ProgressStyle::with_template("{prefix:.bold.dim} {spinner} {wide_msg}...").unwrap();
// Read the file to get manifest information and check validity..
// Open file right away so it can not change out from under us.
let file = tokio::fs::File::open(&path).await?;
let manifest = if no_verify {
let pb = ProgressBar::new(1)
.with_style(spinner_line_style.clone())
.with_prefix("[1/3]")
.with_message("Querying s9pk");
pb.enable_steady_tick(Duration::from_millis(200));
let s9pk = S9pk::open(&path, None, false).await?;
let m = s9pk.as_manifest().clone();
pb.set_style(plain_line_style.clone());
pb.abandon();
m
} else {
let pb = ProgressBar::new(1)
.with_style(spinner_line_style.clone())
.with_prefix("[1/3]")
.with_message("Verifying s9pk");
pb.enable_steady_tick(Duration::from_millis(200));
let s9pk = S9pk::open(&path, None, false).await?;
// s9pk.validate().await?;
todo!();
let m = s9pk.as_manifest().clone();
pb.set_style(plain_line_style.clone());
pb.abandon();
m
let signer = SignerInfo {
name,
contact,
keys: ed25519_keys.into_iter().map(SignerKey::Ed25519).collect(),
};
let pkg = Package {
id: manifest.id.to_string(),
version: manifest.version.to_string(),
arches: manifest.hardware_requirements.arch.clone(),
};
println!("{} id = {}", style(">").green(), pkg.id);
println!("{} version = {}", style(">").green(), pkg.version);
if let Some(arches) = &pkg.arches {
println!("{} arches = {:?}", style(">").green(), arches);
if let Some(database) = database {
TypedPatchDb::<RegistryDatabase>::load(PatchDb::open(database).await?)
.await?
.mutate(|db| db.as_index_mut().as_signers_mut().add_signer(&signer))
.await?;
} else {
println!(
"{} No architecture listed in hardware_requirements",
style(">").red()
);
}
// Process the url and get the user's password.
let (registry, user, pass) = registry_user_pass(&location).await?;
// Now prepare a stream of the file which will show a progress bar as it is consumed.
let file_size = file.metadata().await?.len();
let file_stream = tokio_util::io::ReaderStream::new(file);
ProgressBar::new(0)
.with_style(plain_line_style.clone())
.with_prefix("[2/3]")
.with_message("Uploading s9pk")
.abandon();
let pb = ProgressBar::new(file_size).with_style(bytes_bar_style.clone());
let stream_pb = pb.clone();
let file_stream = file_stream.inspect(move |bytes| {
if let Ok(bytes) = bytes {
stream_pb.inc(bytes.len() as u64);
}
});
let httpc = Client::builder().build().unwrap();
// And upload!
if no_upload {
println!("{} Skipping upload", style(">").yellow());
} else {
do_upload(
&httpc,
registry.clone(),
&user,
&pass,
&pkg.id,
Body::wrap_stream(file_stream),
ctx.call_remote::<RegistryContext>(
&parent_method.into_iter().chain(method).join("."),
to_value(&signer)?,
)
.await?;
}
pb.finish_and_clear();
Ok(())
}
// Also index, so it will show up in the registry.
let pb = ProgressBar::new(0)
.with_style(spinner_line_style.clone())
.with_prefix("[3/3]")
.with_message("Indexing registry");
pb.enable_steady_tick(Duration::from_millis(200));
if no_index {
println!("{} Skipping index", style(">").yellow());
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AddAdminParams {
#[ts(type = "string")]
pub signer: RequestGuid,
}
pub async fn add_admin(
ctx: RegistryContext,
AddAdminParams { signer }: AddAdminParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
ensure_code!(
db.as_index().as_signers().contains_key(&signer)?,
ErrorKind::InvalidRequest,
"unknown signer {signer}"
);
db.as_admins_mut().mutate(|a| Ok(a.insert(signer)))?;
Ok(())
})
.await
}
#[derive(Debug, Deserialize, Serialize, Parser)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
pub struct CliAddAdminParams {
pub signer: RequestGuid,
pub database: Option<PathBuf>,
}
pub async fn cli_add_admin(
HandlerArgs {
context: ctx,
parent_method,
method,
params: CliAddAdminParams { signer, database },
..
}: HandlerArgs<CliContext, CliAddAdminParams>,
) -> Result<(), Error> {
if let Some(database) = database {
TypedPatchDb::<RegistryDatabase>::load(PatchDb::open(database).await?)
.await?
.mutate(|db| {
ensure_code!(
db.as_index().as_signers().contains_key(&signer)?,
ErrorKind::InvalidRequest,
"unknown signer {signer}"
);
db.as_admins_mut().mutate(|a| Ok(a.insert(signer)))?;
Ok(())
})
.await?;
} else {
do_index(&httpc, registry.clone(), &user, &pass, &pkg).await?;
}
pb.set_style(plain_line_style.clone());
pb.abandon();
// All done
if !no_index {
println!(
"{} Package {} is now published to {}",
style(">").green(),
pkg.id,
registry
);
ctx.call_remote::<RegistryContext>(
&parent_method.into_iter().chain(method).join("."),
to_value(&AddAdminParams { signer })?,
)
.await?;
}
Ok(())
}
pub async fn list_admins(ctx: RegistryContext) -> Result<BTreeMap<RequestGuid, SignerInfo>, Error> {
let db = ctx.db.peek().await;
let admins = db.as_admins().de()?;
Ok(db
.into_index()
.into_signers()
.de()?
.into_iter()
.filter(|(id, _)| admins.contains(id))
.collect())
}

View File

@@ -0,0 +1,36 @@
use reqwest::Client;
use serde::{Deserialize, Serialize};
use tokio::io::AsyncWrite;
use ts_rs::TS;
use url::Url;
use crate::prelude::*;
use crate::registry::signer::{AcceptSigners, FileValidator, SignatureInfo};
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct RegistryAsset {
#[ts(type = "string")]
pub url: Url,
pub signature_info: SignatureInfo,
}
impl AsRef<RegistryAsset> for RegistryAsset {
fn as_ref(&self) -> &RegistryAsset {
self
}
}
impl RegistryAsset {
pub fn validate(&self, accept: AcceptSigners) -> Result<FileValidator, Error> {
self.signature_info.validate(accept)
}
pub async fn download(
&self,
client: Client,
dst: &mut (impl AsyncWrite + Unpin + Send + ?Sized),
validator: &FileValidator,
) -> Result<(), Error> {
validator.download(self.url.clone(), client, dst).await
}
}

View File

@@ -0,0 +1,214 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use axum::body::Body;
use axum::extract::Request;
use axum::response::Response;
use chrono::Utc;
use http_body_util::BodyExt;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{Middleware, RpcRequest, RpcResponse};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha512};
use tokio::io::AsyncWriteExt;
use tokio::sync::Mutex;
use ts_rs::TS;
use crate::prelude::*;
use crate::registry::context::RegistryContext;
use crate::registry::signer::SignerKey;
use crate::util::serde::{Base64, Pem};
pub const AUTH_SIG_HEADER: &str = "X-StartOS-Registry-Auth-Sig";
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Metadata {
#[serde(default)]
admin: bool,
#[serde(default)]
get_signer: bool,
}
#[derive(Clone)]
pub struct Auth {
nonce_cache: Arc<Mutex<BTreeMap<Instant, u64>>>, // for replay protection
signer: Option<Result<SignerKey, RpcError>>,
}
impl Auth {
pub fn new() -> Self {
Self {
nonce_cache: Arc::new(Mutex::new(BTreeMap::new())),
signer: None,
}
}
async fn handle_nonce(&mut self, nonce: u64) -> Result<(), Error> {
let mut cache = self.nonce_cache.lock().await;
if cache.values().any(|n| *n == nonce) {
return Err(Error::new(
eyre!("replay attack detected"),
ErrorKind::Authorization,
));
}
while let Some(entry) = cache.first_entry() {
if entry.key().elapsed() > Duration::from_secs(60) {
entry.remove_entry();
} else {
break;
}
}
Ok(())
}
}
#[derive(Serialize, Deserialize, TS)]
pub struct RegistryAdminLogRecord {
pub timestamp: String,
pub name: String,
#[ts(type = "{ id: string | number | null; method: string; params: any }")]
pub request: RpcRequest,
pub key: SignerKey,
}
#[derive(Serialize, Deserialize)]
pub struct SignatureHeader {
pub timestamp: i64,
pub nonce: u64,
#[serde(flatten)]
pub signer: SignerKey,
pub signature: Base64<[u8; 64]>,
}
impl SignatureHeader {
pub fn sign_ed25519(
key: &ed25519_dalek::SigningKey,
body: &[u8],
context: &str,
) -> Result<Self, Error> {
let timestamp = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or_else(|e| e.duration().as_secs() as i64 * -1);
let nonce = rand::random();
let signer = SignerKey::Ed25519(Pem(key.verifying_key()));
let mut hasher = Sha512::new();
hasher.update(&i64::to_be_bytes(timestamp));
hasher.update(&u64::to_be_bytes(nonce));
hasher.update(body);
let signature = Base64(
key.sign_prehashed(hasher, Some(context.as_bytes()))?
.to_bytes(),
);
Ok(Self {
timestamp,
nonce,
signer,
signature,
})
}
}
impl Middleware<RegistryContext> for Auth {
type Metadata = Metadata;
async fn process_http_request(
&mut self,
ctx: &RegistryContext,
request: &mut Request,
) -> Result<(), Response> {
if request.headers().contains_key(AUTH_SIG_HEADER) {
self.signer = Some(
async {
let request = request;
let SignatureHeader {
timestamp,
nonce,
signer,
signature,
} = serde_urlencoded::from_str(
request
.headers()
.get(AUTH_SIG_HEADER)
.or_not_found("missing X-StartOS-Registry-Auth-Sig")
.with_kind(ErrorKind::InvalidRequest)?
.to_str()
.with_kind(ErrorKind::Utf8)?,
)
.with_kind(ErrorKind::Deserialization)?;
let now = SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.unwrap_or_else(|e| e.duration().as_secs() as i64 * -1);
if (now - timestamp).abs() > 30 {
return Err(Error::new(
eyre!("timestamp not within 30s of now"),
ErrorKind::InvalidSignature,
));
}
self.handle_nonce(nonce).await?;
let body = std::mem::replace(request.body_mut(), Body::empty())
.collect()
.await
.with_kind(ErrorKind::Network)?
.to_bytes();
let mut verifier = signer.verifier();
verifier.update(&i64::to_be_bytes(timestamp));
verifier.update(&u64::to_be_bytes(nonce));
verifier.update(&body);
*request.body_mut() = Body::from(body);
verifier.verify(&*signature, &ctx.hostname)?;
Ok(signer)
}
.await
.map_err(RpcError::from),
);
}
Ok(())
}
async fn process_rpc_request(
&mut self,
ctx: &RegistryContext,
metadata: Self::Metadata,
request: &mut RpcRequest,
) -> Result<(), RpcResponse> {
async move {
let signer = self.signer.take().transpose()?;
if metadata.get_signer {
if let Some(signer) = &signer {
request.params["__auth_signer"] = to_value(signer)?;
}
}
if metadata.admin {
let signer = signer
.ok_or_else(|| Error::new(eyre!("UNAUTHORIZED"), ErrorKind::Authorization))?;
let db = ctx.db.peek().await;
let (guid, admin) = db.as_index().as_signers().get_signer_info(&signer)?;
if db.into_admins().de()?.contains(&guid) {
let mut log = tokio::fs::OpenOptions::new()
.create(true)
.append(true)
.open(ctx.datadir.join("admin.log"))
.await?;
log.write_all(
(serde_json::to_string(&RegistryAdminLogRecord {
timestamp: Utc::now().to_rfc3339(),
name: admin.name,
request: request.clone(),
key: signer,
})
.with_kind(ErrorKind::Serialization)?
+ "\n")
.as_bytes(),
)
.await?;
} else {
return Err(Error::new(eyre!("UNAUTHORIZED"), ErrorKind::Authorization));
}
}
Ok(())
}
.await
.map_err(|e| RpcResponse::from_result(Err(e)))
}
}

View File

@@ -0,0 +1,242 @@
use std::net::{Ipv4Addr, SocketAddr};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use clap::Parser;
use imbl_value::InternedString;
use patch_db::PatchDb;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{CallRemote, Context, Empty};
use serde::{Deserialize, Serialize};
use tokio::sync::broadcast::Sender;
use tracing::instrument;
use url::Url;
use crate::context::config::{ContextConfig, CONFIG_PATH};
use crate::context::{CliContext, RpcContext};
use crate::prelude::*;
use crate::registry::auth::{SignatureHeader, AUTH_SIG_HEADER};
use crate::registry::RegistryDatabase;
use crate::rpc_continuations::RpcContinuations;
use crate::version::VersionT;
#[derive(Debug, Clone, Default, Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
pub struct RegistryConfig {
#[arg(short = 'c', long = "config")]
pub config: Option<PathBuf>,
#[arg(short = 'l', long = "listen")]
pub listen: Option<SocketAddr>,
#[arg(short = 'h', long = "hostname")]
pub hostname: InternedString,
#[arg(short = 'd', long = "datadir")]
pub datadir: Option<PathBuf>,
}
impl ContextConfig for RegistryConfig {
fn next(&mut self) -> Option<PathBuf> {
self.config.take()
}
fn merge_with(&mut self, other: Self) {
self.datadir = self.datadir.take().or(other.datadir);
}
}
impl RegistryConfig {
pub fn load(mut self) -> Result<Self, Error> {
let path = self.next();
self.load_path_rec(path)?;
self.load_path_rec(Some(CONFIG_PATH))?;
Ok(self)
}
}
pub struct RegistryContextSeed {
pub hostname: InternedString,
pub listen: SocketAddr,
pub db: TypedPatchDb<RegistryDatabase>,
pub datadir: PathBuf,
pub rpc_continuations: RpcContinuations,
pub shutdown: Sender<()>,
}
#[derive(Clone)]
pub struct RegistryContext(Arc<RegistryContextSeed>);
impl RegistryContext {
#[instrument(skip_all)]
pub async fn init(config: &RegistryConfig) -> Result<Self, Error> {
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let datadir = config
.datadir
.as_deref()
.unwrap_or_else(|| Path::new("/var/lib/startos"))
.to_owned();
if tokio::fs::metadata(&datadir).await.is_err() {
tokio::fs::create_dir_all(&datadir).await?;
}
let db_path = datadir.join("registry.db");
let db = TypedPatchDb::<RegistryDatabase>::load_or_init(
PatchDb::open(&db_path).await?,
|| async { Ok(Default::default()) },
)
.await?;
Ok(Self(Arc::new(RegistryContextSeed {
hostname: config.hostname.clone(),
listen: config
.listen
.unwrap_or(SocketAddr::new(Ipv4Addr::LOCALHOST.into(), 5959)),
db,
datadir,
rpc_continuations: RpcContinuations::new(),
shutdown,
})))
}
}
impl AsRef<RpcContinuations> for RegistryContext {
fn as_ref(&self) -> &RpcContinuations {
&self.rpc_continuations
}
}
impl Context for RegistryContext {}
impl Deref for RegistryContext {
type Target = RegistryContextSeed;
fn deref(&self) -> &Self::Target {
&*self.0
}
}
#[derive(Debug, Deserialize, Serialize, Parser)]
pub struct RegistryUrlParams {
pub registry: Url,
}
impl CallRemote<RegistryContext> for CliContext {
async fn call_remote(
&self,
mut method: &str,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
use reqwest::header::{ACCEPT, CONTENT_LENGTH, CONTENT_TYPE};
use reqwest::Method;
use rpc_toolkit::yajrc::{GenericRpcMethod, Id, RpcRequest};
use rpc_toolkit::RpcResponse;
let url = self
.registry_url
.clone()
.ok_or_else(|| Error::new(eyre!("`--registry` required"), ErrorKind::InvalidRequest))?;
method = method.strip_prefix("registry.").unwrap_or(method);
let rpc_req = RpcRequest {
id: Some(Id::Number(0.into())),
method: GenericRpcMethod::<_, _, Value>::new(method),
params,
};
let body = serde_json::to_vec(&rpc_req)?;
let host = url.host().or_not_found("registry hostname")?.to_string();
let res = self
.client
.request(Method::POST, url)
.header(CONTENT_TYPE, "application/json")
.header(ACCEPT, "application/json")
.header(CONTENT_LENGTH, body.len())
.header(
AUTH_SIG_HEADER,
serde_urlencoded::to_string(&SignatureHeader::sign_ed25519(
self.developer_key()?,
&body,
&host,
)?)
.with_kind(ErrorKind::Serialization)?,
)
.body(body)
.send()
.await?;
match res
.headers()
.get(CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
{
Some("application/json") => {
serde_json::from_slice::<RpcResponse>(&*res.bytes().await?)
.with_kind(ErrorKind::Deserialization)?
.result
}
_ => Err(Error::new(eyre!("missing content type"), ErrorKind::Network).into()),
}
}
}
fn hardware_header(ctx: &RpcContext) -> String {
let mut url: Url = "http://localhost".parse().unwrap();
url.query_pairs_mut()
.append_pair(
"os.version",
&crate::version::Current::new().semver().to_string(),
)
.append_pair(
"os.compat",
&crate::version::Current::new().compat().to_string(),
)
.append_pair("os.arch", &*crate::PLATFORM)
.append_pair("hardware.arch", &*crate::ARCH)
.append_pair("hardware.ram", &ctx.hardware.ram.to_string());
for hw in &ctx.hardware.devices {
url.query_pairs_mut()
.append_pair(&format!("hardware.device.{}", hw.class()), hw.product());
}
url.query().unwrap_or_default().to_string()
}
impl CallRemote<RegistryContext, RegistryUrlParams> for RpcContext {
async fn call_remote(
&self,
mut method: &str,
params: Value,
RegistryUrlParams { registry }: RegistryUrlParams,
) -> Result<Value, RpcError> {
use reqwest::header::{ACCEPT, CONTENT_LENGTH, CONTENT_TYPE};
use reqwest::Method;
use rpc_toolkit::yajrc::{GenericRpcMethod, Id, RpcRequest};
use rpc_toolkit::RpcResponse;
let url = registry.join("rpc/v0")?;
method = method.strip_prefix("registry.").unwrap_or(method);
let rpc_req = RpcRequest {
id: Some(Id::Number(0.into())),
method: GenericRpcMethod::<_, _, Value>::new(method),
params,
};
let body = serde_json::to_vec(&rpc_req)?;
let res = self
.client
.request(Method::POST, url)
.header(CONTENT_TYPE, "application/json")
.header(ACCEPT, "application/json")
.header(CONTENT_LENGTH, body.len())
.header("X-StartOS-Hardware", &hardware_header(self))
.body(body)
.send()
.await?;
match res
.headers()
.get(CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
{
Some("application/json") => {
serde_json::from_slice::<RpcResponse>(&*res.bytes().await?)
.with_kind(ErrorKind::Deserialization)?
.result
}
_ => Err(Error::new(eyre!("missing content type"), ErrorKind::Network).into()),
}
}
}

View File

@@ -0,0 +1,171 @@
use std::path::PathBuf;
use clap::Parser;
use itertools::Itertools;
use patch_db::json_ptr::{JsonPointer, ROOT};
use patch_db::Dump;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;
use crate::context::CliContext;
use crate::prelude::*;
use crate::registry::context::RegistryContext;
use crate::registry::RegistryDatabase;
use crate::util::serde::{apply_expr, HandlerExtSerde};
pub fn db_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("dump", from_fn_async(cli_dump).with_display_serializable())
.subcommand(
"dump",
from_fn_async(dump)
.with_metadata("admin", Value::Bool(true))
.no_cli(),
)
.subcommand("apply", from_fn_async(cli_apply).no_display())
.subcommand(
"apply",
from_fn_async(apply)
.with_metadata("admin", Value::Bool(true))
.no_cli(),
)
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct CliDumpParams {
#[arg(long = "pointer", short = 'p')]
pointer: Option<JsonPointer>,
path: Option<PathBuf>,
}
#[instrument(skip_all)]
async fn cli_dump(
HandlerArgs {
context,
parent_method,
method,
params: CliDumpParams { pointer, path },
..
}: HandlerArgs<CliContext, CliDumpParams>,
) -> Result<Dump, RpcError> {
let dump = if let Some(path) = path {
PatchDb::open(path).await?.dump(&ROOT).await
} else {
let method = parent_method.into_iter().chain(method).join(".");
from_value::<Dump>(
context
.call_remote::<RegistryContext>(&method, imbl_value::json!({ "pointer": pointer }))
.await?,
)?
};
Ok(dump)
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct DumpParams {
#[arg(long = "pointer", short = 'p')]
#[ts(type = "string | null")]
pointer: Option<JsonPointer>,
}
pub async fn dump(ctx: RegistryContext, DumpParams { pointer }: DumpParams) -> Result<Dump, Error> {
Ok(ctx
.db
.dump(&pointer.as_ref().map_or(ROOT, |p| p.borrowed()))
.await)
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct CliApplyParams {
expr: String,
path: Option<PathBuf>,
}
#[instrument(skip_all)]
async fn cli_apply(
HandlerArgs {
context,
parent_method,
method,
params: CliApplyParams { expr, path },
..
}: HandlerArgs<CliContext, CliApplyParams>,
) -> Result<(), RpcError> {
if let Some(path) = path {
PatchDb::open(path)
.await?
.apply_function(|db| {
let res = apply_expr(
serde_json::to_value(patch_db::Value::from(db))
.with_kind(ErrorKind::Deserialization)?
.into(),
&expr,
)?;
Ok::<_, Error>((
to_value(
&serde_json::from_value::<RegistryDatabase>(res.clone().into()).with_ctx(
|_| {
(
crate::ErrorKind::Deserialization,
"result does not match database model",
)
},
)?,
)?,
(),
))
})
.await?;
} else {
let method = parent_method.into_iter().chain(method).join(".");
context
.call_remote::<RegistryContext>(&method, imbl_value::json!({ "expr": expr }))
.await?;
}
Ok(())
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct ApplyParams {
expr: String,
path: Option<PathBuf>,
}
pub async fn apply(
ctx: RegistryContext,
ApplyParams { expr, .. }: ApplyParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
let res = apply_expr(
serde_json::to_value(patch_db::Value::from(db.clone()))
.with_kind(ErrorKind::Deserialization)?
.into(),
&expr,
)?;
db.ser(
&serde_json::from_value::<RegistryDatabase>(res.clone().into()).with_ctx(|_| {
(
crate::ErrorKind::Deserialization,
"result does not match database model",
)
})?,
)
})
.await
}

View File

@@ -1,101 +0,0 @@
use base64::Engine;
use clap::Parser;
use color_eyre::eyre::eyre;
use reqwest::{StatusCode, Url};
use rpc_toolkit::{command, from_fn_async, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::version::VersionT;
use crate::{Error, ResultExt};
pub fn marketplace() -> ParentHandler {
ParentHandler::new().subcommand("get", from_fn_async(get).with_remote_cli::<CliContext>())
}
pub fn with_query_params(ctx: RpcContext, mut url: Url) -> Url {
url.query_pairs_mut()
.append_pair(
"os.version",
&crate::version::Current::new().semver().to_string(),
)
.append_pair(
"os.compat",
&crate::version::Current::new().compat().to_string(),
)
.append_pair("os.arch", &*crate::PLATFORM)
.append_pair("hardware.arch", &*crate::ARCH)
.append_pair("hardware.ram", &ctx.hardware.ram.to_string());
for hw in &ctx.hardware.devices {
url.query_pairs_mut()
.append_pair(&format!("hardware.device.{}", hw.class()), hw.product());
}
url
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct GetParams {
#[ts(type = "string")]
url: Url,
}
pub async fn get(ctx: RpcContext, GetParams { url }: GetParams) -> Result<Value, Error> {
let mut response = ctx
.client
.get(with_query_params(ctx.clone(), url))
.send()
.await
.with_kind(crate::ErrorKind::Network)?;
let status = response.status();
if status.is_success() {
match response
.headers_mut()
.remove("Content-Type")
.as_ref()
.and_then(|h| h.to_str().ok())
.and_then(|h| h.split(";").next())
.map(|h| h.trim())
{
Some("application/json") => response
.json()
.await
.with_kind(crate::ErrorKind::Deserialization),
Some("text/plain") => Ok(Value::String(
response
.text()
.await
.with_kind(crate::ErrorKind::Registry)?,
)),
Some(ctype) => Ok(Value::String(format!(
"data:{};base64,{}",
ctype,
base64::engine::general_purpose::URL_SAFE.encode(
&response
.bytes()
.await
.with_kind(crate::ErrorKind::Registry)?
)
))),
_ => Err(Error::new(
eyre!("missing Content-Type"),
crate::ErrorKind::Registry,
)),
}
} else {
let message = response.text().await.with_kind(crate::ErrorKind::Network)?;
Err(Error::new(
eyre!("{}", message),
match status {
StatusCode::BAD_REQUEST => crate::ErrorKind::InvalidRequest,
StatusCode::NOT_FOUND => crate::ErrorKind::NotFound,
_ => crate::ErrorKind::Registry,
},
))
}
}

View File

@@ -1,2 +1,126 @@
use std::collections::{BTreeMap, BTreeSet};
use std::net::SocketAddr;
use axum::Router;
use futures::future::ready;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler, Server};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::{CliContext};
use crate::middleware::cors::Cors;
use crate::net::static_server::{bad_request, not_found, server_error};
use crate::net::web_server::WebServer;
use crate::prelude::*;
use crate::registry::auth::Auth;
use crate::registry::context::{RegistryContext};
use crate::registry::os::index::OsIndex;
use crate::registry::signer::SignerInfo;
use crate::rpc_continuations::RequestGuid;
use crate::util::serde::HandlerExtSerde;
pub mod admin;
pub mod marketplace;
pub mod asset;
pub mod auth;
pub mod context;
pub mod db;
pub mod os;
pub mod signer;
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
pub struct RegistryDatabase {
pub admins: BTreeSet<RequestGuid>,
pub index: FullIndex,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct FullIndex {
// pub package: PackageIndex,
pub os: OsIndex,
#[ts(as = "BTreeMap::<String, SignerInfo>")]
pub signers: BTreeMap<RequestGuid, SignerInfo>,
}
pub async fn get_full_index(ctx: RegistryContext) -> Result<FullIndex, Error> {
ctx.db.peek().await.into_index().de()
}
pub fn registry_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"index",
from_fn_async(get_full_index)
.with_display_serializable()
.with_call_remote::<CliContext>(),
)
.subcommand("os", os::os_api::<C>())
.subcommand("admin", admin::admin_api::<C>())
.subcommand("db", db::db_api::<C>())
}
pub fn registry_server_router(ctx: RegistryContext) -> Router {
use axum::extract as x;
use axum::routing::{any, get, post};
Router::new()
.route("/rpc/*path", {
let ctx = ctx.clone();
post(
Server::new(move || ready(Ok(ctx.clone())), registry_api())
.middleware(Cors::new())
.middleware(Auth::new()),
)
})
.route(
"/ws/rpc/*path",
get({
let ctx = ctx.clone();
move |x::Path(path): x::Path<String>,
ws: axum::extract::ws::WebSocketUpgrade| async move {
match RequestGuid::from(&path) {
None => {
tracing::debug!("No Guid Path");
bad_request()
}
Some(guid) => match ctx.rpc_continuations.get_ws_handler(&guid).await {
Some(cont) => ws.on_upgrade(cont),
_ => not_found(),
},
}
}
}),
)
.route(
"/rest/rpc/*path",
any({
let ctx = ctx.clone();
move |request: x::Request| async move {
let path = request
.uri()
.path()
.strip_prefix("/rest/rpc/")
.unwrap_or_default();
match RequestGuid::from(&path) {
None => {
tracing::debug!("No Guid Path");
bad_request()
}
Some(guid) => match ctx.rpc_continuations.get_rest_handler(&guid).await {
None => not_found(),
Some(cont) => cont(request).await.unwrap_or_else(server_error),
},
}
}
}),
)
}
impl WebServer {
pub fn registry(bind: SocketAddr, ctx: RegistryContext) -> Self {
Self::new(bind, registry_server_router(ctx))
}
}

View File

@@ -0,0 +1,341 @@
use std::collections::BTreeMap;
use std::panic::UnwindSafe;
use std::path::PathBuf;
use std::time::Duration;
use axum::response::Response;
use clap::Parser;
use futures::{FutureExt, TryStreamExt};
use helpers::NonDetachingJoinHandle;
use imbl_value::InternedString;
use itertools::Itertools;
use rpc_toolkit::{from_fn_async, CallRemote, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha512};
use ts_rs::TS;
use url::Url;
use crate::context::CliContext;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhasedProgressBar};
use crate::registry::asset::RegistryAsset;
use crate::registry::context::RegistryContext;
use crate::registry::os::index::OsVersionInfo;
use crate::registry::os::SIG_CONTEXT;
use crate::registry::signer::{Blake3Ed25519Signature, Signature, SignatureInfo, SignerKey};
use crate::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::s9pk::merkle_archive::source::ArchiveSource;
use crate::util::{Apply, Version};
pub fn add_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"iso",
from_fn_async(add_iso)
.with_metadata("getSigner", Value::Bool(true))
.no_cli(),
)
.subcommand(
"img",
from_fn_async(add_img)
.with_metadata("getSigner", Value::Bool(true))
.no_cli(),
)
.subcommand(
"squashfs",
from_fn_async(add_squashfs)
.with_metadata("getSigner", Value::Bool(true))
.no_cli(),
)
}
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AddAssetParams {
#[ts(type = "string")]
pub url: Url,
pub signature: Signature,
#[ts(type = "string")]
pub version: Version,
#[ts(type = "string")]
pub platform: InternedString,
#[serde(default)]
pub upload: bool,
#[serde(rename = "__auth_signer")]
pub signer: SignerKey,
}
async fn add_asset(
ctx: RegistryContext,
AddAssetParams {
url,
signature,
version,
platform,
upload,
signer,
}: AddAssetParams,
accessor: impl FnOnce(&mut Model<OsVersionInfo>) -> &mut Model<BTreeMap<InternedString, RegistryAsset>>
+ UnwindSafe
+ Send,
) -> Result<Option<RequestGuid>, Error> {
ensure_code!(
signature.signer() == signer,
ErrorKind::InvalidSignature,
"asset signature does not match request signer"
);
ctx.db
.mutate(|db| {
let signer_guid = db.as_index().as_signers().get_signer(&signer)?;
if db
.as_index()
.as_os()
.as_versions()
.as_idx(&version)
.or_not_found(&version)?
.as_signers()
.de()?
.contains(&signer_guid)
{
accessor(
db.as_index_mut()
.as_os_mut()
.as_versions_mut()
.as_idx_mut(&version)
.or_not_found(&version)?,
)
.upsert(&platform, || RegistryAsset {
url,
signature_info: SignatureInfo::new(SIG_CONTEXT),
})?
.as_signature_info_mut()
.mutate(|s| s.add_sig(&signature))?;
Ok(())
} else {
Err(Error::new(eyre!("UNAUTHORIZED"), ErrorKind::Authorization))
}
})
.await?;
let guid = if upload {
let guid = RequestGuid::new();
let auth_guid = guid.clone();
let signer = signature.signer();
let hostname = ctx.hostname.clone();
ctx.rpc_continuations
.add(
guid.clone(),
RpcContinuation::rest(
Box::new(|req| {
async move {
Ok(
if async move {
let auth_sig = base64::decode(
req.headers().get("X-StartOS-Registry-Auth-Sig")?,
)
.ok()?;
signer
.verify_message(
auth_guid.as_ref().as_bytes(),
&auth_sig,
&hostname,
)
.ok()?;
Some(())
}
.await
.is_some()
{
Response::builder()
.status(200)
.body(axum::body::Body::empty())
.with_kind(ErrorKind::Network)?
} else {
Response::builder()
.status(401)
.body(axum::body::Body::empty())
.with_kind(ErrorKind::Network)?
},
)
}
.boxed()
}),
Duration::from_secs(30),
),
)
.await;
Some(guid)
} else {
None
};
Ok(guid)
}
pub async fn add_iso(
ctx: RegistryContext,
params: AddAssetParams,
) -> Result<Option<RequestGuid>, Error> {
add_asset(ctx, params, |m| m.as_iso_mut()).await
}
pub async fn add_img(
ctx: RegistryContext,
params: AddAssetParams,
) -> Result<Option<RequestGuid>, Error> {
add_asset(ctx, params, |m| m.as_img_mut()).await
}
pub async fn add_squashfs(
ctx: RegistryContext,
params: AddAssetParams,
) -> Result<Option<RequestGuid>, Error> {
add_asset(ctx, params, |m| m.as_squashfs_mut()).await
}
#[derive(Debug, Deserialize, Serialize, Parser)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
pub struct CliAddAssetParams {
#[arg(short = 'p', long = "platform")]
pub platform: InternedString,
#[arg(short = 'v', long = "version")]
pub version: Version,
pub file: PathBuf,
pub url: Url,
#[arg(short = 'u', long = "upload")]
pub upload: bool,
}
pub async fn cli_add_asset(
HandlerArgs {
context: ctx,
parent_method,
method,
params:
CliAddAssetParams {
platform,
version,
file: path,
url,
upload,
},
..
}: HandlerArgs<CliContext, CliAddAssetParams>,
) -> Result<(), Error> {
let ext = match path.extension().and_then(|e| e.to_str()) {
Some("iso") => "iso",
Some("img") => "img",
Some("squashfs") => "squashfs",
_ => {
return Err(Error::new(
eyre!("Unknown extension"),
ErrorKind::InvalidRequest,
))
}
};
let file = tokio::fs::File::open(&path).await?.into();
let mut progress = FullProgressTracker::new();
let progress_handle = progress.handle();
let mut sign_phase =
progress_handle.add_phase(InternedString::intern("Signing File"), Some(10));
let mut index_phase = progress_handle.add_phase(
InternedString::intern("Adding File to Registry Index"),
Some(1),
);
let mut upload_phase = if upload {
Some(progress_handle.add_phase(InternedString::intern("Uploading File"), Some(100)))
} else {
None
};
let progress_task: NonDetachingJoinHandle<()> = tokio::spawn(async move {
let mut bar = PhasedProgressBar::new(&format!("Adding {} to registry...", path.display()));
loop {
let snap = progress.snapshot();
bar.update(&snap);
if snap.overall.is_complete() {
break;
}
progress.changed().await
}
})
.into();
sign_phase.start();
let blake3_sig =
Blake3Ed25519Signature::sign_file(ctx.developer_key()?, &file, SIG_CONTEXT).await?;
let size = blake3_sig.size;
let signature = Signature::Blake3Ed25519(blake3_sig);
sign_phase.complete();
index_phase.start();
let add_res = from_value::<Option<RequestGuid>>(
ctx.call_remote::<RegistryContext>(
&parent_method
.into_iter()
.chain(method)
.chain([ext])
.join("."),
imbl_value::json!({
"platform": platform,
"version": version,
"url": &url,
"signature": signature,
"upload": upload,
}),
)
.await?,
)?;
index_phase.complete();
if let Some(guid) = add_res {
upload_phase.as_mut().map(|p| p.start());
upload_phase.as_mut().map(|p| p.set_total(size));
let reg_url = ctx.registry_url.as_ref().or_not_found("--registry")?;
ctx.client
.post(url)
.header("X-StartOS-Registry-Token", guid.as_ref())
.header(
"X-StartOS-Registry-Auth-Sig",
base64::encode(
ctx.developer_key()?
.sign_prehashed(
Sha512::new_with_prefix(guid.as_ref().as_bytes()),
Some(
reg_url
.host()
.or_not_found("registry hostname")?
.to_string()
.as_bytes(),
),
)?
.to_bytes(),
),
)
.body(reqwest::Body::wrap_stream(
tokio_util::io::ReaderStream::new(file.fetch(0, size).await?).inspect_ok(
move |b| {
upload_phase
.as_mut()
.map(|p| *p += b.len() as u64)
.apply(|_| ())
},
),
))
.send()
.await?;
// upload_phase.as_mut().map(|p| p.complete());
}
progress_handle.complete();
progress_task.await.with_kind(ErrorKind::Unknown)?;
Ok(())
}

View File

@@ -0,0 +1,182 @@
use std::collections::BTreeMap;
use std::panic::UnwindSafe;
use std::path::{Path, PathBuf};
use clap::Parser;
use helpers::{AtomicFile, NonDetachingJoinHandle};
use imbl_value::{json, InternedString};
use itertools::Itertools;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::CliContext;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhasedProgressBar};
use crate::registry::asset::RegistryAsset;
use crate::registry::context::RegistryContext;
use crate::registry::os::index::OsVersionInfo;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::util::Version;
pub fn get_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("iso", from_fn_async(get_iso).no_cli())
.subcommand("iso", from_fn_async(cli_get_os_asset).no_display())
.subcommand("img", from_fn_async(get_img).no_cli())
.subcommand("img", from_fn_async(cli_get_os_asset).no_display())
.subcommand("squashfs", from_fn_async(get_squashfs).no_cli())
.subcommand("squashfs", from_fn_async(cli_get_os_asset).no_display())
}
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetOsAssetParams {
#[ts(type = "string")]
pub version: Version,
#[ts(type = "string")]
pub platform: InternedString,
}
async fn get_os_asset(
ctx: RegistryContext,
GetOsAssetParams { version, platform }: GetOsAssetParams,
accessor: impl FnOnce(&Model<OsVersionInfo>) -> &Model<BTreeMap<InternedString, RegistryAsset>>
+ UnwindSafe
+ Send,
) -> Result<RegistryAsset, Error> {
accessor(
ctx.db
.peek()
.await
.as_index()
.as_os()
.as_versions()
.as_idx(&version)
.or_not_found(&version)?,
)
.as_idx(&platform)
.or_not_found(&platform)?
.de()
}
pub async fn get_iso(
ctx: RegistryContext,
params: GetOsAssetParams,
) -> Result<RegistryAsset, Error> {
get_os_asset(ctx, params, |info| info.as_iso()).await
}
pub async fn get_img(
ctx: RegistryContext,
params: GetOsAssetParams,
) -> Result<RegistryAsset, Error> {
get_os_asset(ctx, params, |info| info.as_img()).await
}
pub async fn get_squashfs(
ctx: RegistryContext,
params: GetOsAssetParams,
) -> Result<RegistryAsset, Error> {
get_os_asset(ctx, params, |info| info.as_squashfs()).await
}
#[derive(Debug, Deserialize, Serialize, Parser)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
pub struct CliGetOsAssetParams {
pub version: Version,
pub platform: InternedString,
#[arg(long = "download", short = 'd')]
pub download: Option<PathBuf>,
#[arg(
long = "reverify",
short = 'r',
help = "verify the hash of the file a second time after download"
)]
pub reverify: bool,
}
async fn cli_get_os_asset(
HandlerArgs {
context: ctx,
parent_method,
method,
params:
CliGetOsAssetParams {
version,
platform,
download,
reverify,
},
..
}: HandlerArgs<CliContext, CliGetOsAssetParams>,
) -> Result<RegistryAsset, Error> {
let res = from_value::<RegistryAsset>(
ctx.call_remote::<RegistryContext>(
&parent_method.into_iter().chain(method).join("."),
json!({
"version": version,
"platform": platform,
}),
)
.await?,
)?;
let validator = res.validate(res.signature_info.all_signers())?;
if let Some(download) = download {
let mut file = AtomicFile::new(&download, None::<&Path>)
.await
.with_kind(ErrorKind::Filesystem)?;
let mut progress = FullProgressTracker::new();
let progress_handle = progress.handle();
let mut download_phase =
progress_handle.add_phase(InternedString::intern("Downloading File"), Some(100));
download_phase.set_total(validator.size()?);
let reverify_phase = if reverify {
Some(progress_handle.add_phase(InternedString::intern("Reverifying File"), Some(10)))
} else {
None
};
let progress_task: NonDetachingJoinHandle<()> = tokio::spawn(async move {
let mut bar = PhasedProgressBar::new("Downloading...");
loop {
let snap = progress.snapshot();
bar.update(&snap);
if snap.overall.is_complete() {
break;
}
progress.changed().await
}
})
.into();
download_phase.start();
let mut download_writer = download_phase.writer(&mut *file);
res.download(ctx.client.clone(), &mut download_writer, &validator)
.await?;
let (_, mut download_phase) = download_writer.into_inner();
file.save().await.with_kind(ErrorKind::Filesystem)?;
download_phase.complete();
if let Some(mut reverify_phase) = reverify_phase {
reverify_phase.start();
validator
.validate_file(&MultiCursorFile::from(
tokio::fs::File::open(download).await?,
))
.await?;
reverify_phase.complete();
}
progress_handle.complete();
progress_task.await.with_kind(ErrorKind::Unknown)?;
}
Ok(res)
}

View File

@@ -0,0 +1,14 @@
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
pub mod add;
pub mod get;
pub mod sign;
pub fn asset_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("add", add::add_api::<C>())
.subcommand("add", from_fn_async(add::cli_add_asset).no_display())
.subcommand("sign", sign::sign_api::<C>())
.subcommand("sign", from_fn_async(sign::cli_sign_asset).no_display())
.subcommand("get", get::get_api::<C>())
}

View File

@@ -0,0 +1,188 @@
use std::collections::BTreeMap;
use std::panic::UnwindSafe;
use std::path::PathBuf;
use clap::Parser;
use helpers::NonDetachingJoinHandle;
use imbl_value::InternedString;
use itertools::Itertools;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::CliContext;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhasedProgressBar};
use crate::registry::asset::RegistryAsset;
use crate::registry::context::RegistryContext;
use crate::registry::os::index::OsVersionInfo;
use crate::registry::os::SIG_CONTEXT;
use crate::registry::signer::{Blake3Ed25519Signature, Signature};
use crate::util::Version;
pub fn sign_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("iso", from_fn_async(sign_iso).no_cli())
.subcommand("img", from_fn_async(sign_img).no_cli())
.subcommand("squashfs", from_fn_async(sign_squashfs).no_cli())
}
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct SignAssetParams {
#[ts(type = "string")]
version: Version,
#[ts(type = "string")]
platform: InternedString,
signature: Signature,
}
async fn sign_asset(
ctx: RegistryContext,
SignAssetParams {
version,
platform,
signature,
}: SignAssetParams,
accessor: impl FnOnce(&mut Model<OsVersionInfo>) -> &mut Model<BTreeMap<InternedString, RegistryAsset>>
+ UnwindSafe
+ Send,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
let guid = db.as_index().as_signers().get_signer(&signature.signer())?;
if !db
.as_index()
.as_os()
.as_versions()
.as_idx(&version)
.or_not_found(&version)?
.as_signers()
.de()?
.contains(&guid)
{
return Err(Error::new(
eyre!("signer {guid} is not authorized"),
ErrorKind::Authorization,
));
}
accessor(
db.as_index_mut()
.as_os_mut()
.as_versions_mut()
.as_idx_mut(&version)
.or_not_found(&version)?,
)
.as_idx_mut(&platform)
.or_not_found(&platform)?
.as_signature_info_mut()
.mutate(|s| s.add_sig(&signature))?;
Ok(())
})
.await
}
pub async fn sign_iso(ctx: RegistryContext, params: SignAssetParams) -> Result<(), Error> {
sign_asset(ctx, params, |m| m.as_iso_mut()).await
}
pub async fn sign_img(ctx: RegistryContext, params: SignAssetParams) -> Result<(), Error> {
sign_asset(ctx, params, |m| m.as_img_mut()).await
}
pub async fn sign_squashfs(ctx: RegistryContext, params: SignAssetParams) -> Result<(), Error> {
sign_asset(ctx, params, |m| m.as_squashfs_mut()).await
}
#[derive(Debug, Deserialize, Serialize, Parser)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
pub struct CliSignAssetParams {
#[arg(short = 'p', long = "platform")]
pub platform: InternedString,
#[arg(short = 'v', long = "version")]
pub version: Version,
pub file: PathBuf,
}
pub async fn cli_sign_asset(
HandlerArgs {
context: ctx,
parent_method,
method,
params:
CliSignAssetParams {
platform,
version,
file: path,
},
..
}: HandlerArgs<CliContext, CliSignAssetParams>,
) -> Result<(), Error> {
let ext = match path.extension().and_then(|e| e.to_str()) {
Some("iso") => "iso",
Some("img") => "img",
Some("squashfs") => "squashfs",
_ => {
return Err(Error::new(
eyre!("Unknown extension"),
ErrorKind::InvalidRequest,
))
}
};
let file = tokio::fs::File::open(&path).await?.into();
let mut progress = FullProgressTracker::new();
let progress_handle = progress.handle();
let mut sign_phase =
progress_handle.add_phase(InternedString::intern("Signing File"), Some(10));
let mut index_phase = progress_handle.add_phase(
InternedString::intern("Adding Signature to Registry Index"),
Some(1),
);
let progress_task: NonDetachingJoinHandle<()> = tokio::spawn(async move {
let mut bar = PhasedProgressBar::new(&format!("Adding {} to registry...", path.display()));
loop {
let snap = progress.snapshot();
bar.update(&snap);
if snap.overall.is_complete() {
break;
}
progress.changed().await
}
})
.into();
sign_phase.start();
let blake3_sig =
Blake3Ed25519Signature::sign_file(ctx.developer_key()?, &file, SIG_CONTEXT).await?;
let signature = Signature::Blake3Ed25519(blake3_sig);
sign_phase.complete();
index_phase.start();
ctx.call_remote::<RegistryContext>(
&parent_method
.into_iter()
.chain(method)
.chain([ext])
.join("."),
imbl_value::json!({
"platform": platform,
"version": version,
"signature": signature,
}),
)
.await?;
index_phase.complete();
progress_handle.complete();
progress_task.await.with_kind(ErrorKind::Unknown)?;
Ok(())
}

View File

@@ -0,0 +1,44 @@
use std::collections::{BTreeMap, BTreeSet};
use emver::VersionRange;
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::prelude::*;
use crate::registry::asset::RegistryAsset;
use crate::registry::context::RegistryContext;
use crate::rpc_continuations::RequestGuid;
use crate::util::Version;
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct OsIndex {
#[ts(as = "BTreeMap::<String, OsVersionInfo>")]
pub versions: BTreeMap<Version, OsVersionInfo>,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct OsVersionInfo {
pub headline: String,
pub release_notes: String,
#[ts(type = "string")]
pub source_version: VersionRange,
#[ts(type = "string[]")]
pub signers: BTreeSet<RequestGuid>,
#[ts(as = "BTreeMap::<String, RegistryAsset>")]
pub iso: BTreeMap<InternedString, RegistryAsset>, // platform (i.e. x86_64-nonfree) -> asset
#[ts(as = "BTreeMap::<String, RegistryAsset>")]
pub squashfs: BTreeMap<InternedString, RegistryAsset>, // platform (i.e. x86_64-nonfree) -> asset
#[ts(as = "BTreeMap::<String, RegistryAsset>")]
pub img: BTreeMap<InternedString, RegistryAsset>, // platform (i.e. raspberrypi) -> asset
}
pub async fn get_os_index(ctx: RegistryContext) -> Result<OsIndex, Error> {
ctx.db.peek().await.into_index().into_os().de()
}

View File

@@ -0,0 +1,22 @@
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use crate::context::CliContext;
use crate::util::serde::HandlerExtSerde;
pub const SIG_CONTEXT: &str = "startos";
pub mod asset;
pub mod index;
pub mod version;
pub fn os_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"index",
from_fn_async(index::get_os_index)
.with_display_serializable()
.with_call_remote::<CliContext>(),
)
.subcommand("asset", asset::asset_api::<C>())
.subcommand("version", version::version_api::<C>())
}

View File

@@ -0,0 +1,183 @@
use std::collections::BTreeMap;
use clap::Parser;
use emver::VersionRange;
use itertools::Itertools;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::CliContext;
use crate::prelude::*;
use crate::registry::context::RegistryContext;
use crate::registry::os::index::OsVersionInfo;
use crate::registry::signer::SignerKey;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::util::Version;
pub mod signer;
pub fn version_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"add",
from_fn_async(add_version)
.with_metadata("admin", Value::Bool(true))
.with_metadata("getSigner", Value::Bool(true))
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_version)
.with_metadata("admin", Value::Bool(true))
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand("signer", signer::signer_api::<C>())
.subcommand(
"get",
from_fn_async(get_version)
.with_display_serializable()
.with_custom_display_fn(|handle, result| {
Ok(display_version_info(handle.params, result))
})
.with_call_remote::<CliContext>(),
)
}
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AddVersionParams {
#[ts(type = "string")]
pub version: Version,
pub headline: String,
pub release_notes: String,
#[ts(type = "string")]
pub source_version: VersionRange,
#[arg(skip)]
#[ts(skip)]
#[serde(rename = "__auth_signer")]
pub signer: Option<SignerKey>,
}
pub async fn add_version(
ctx: RegistryContext,
AddVersionParams {
version,
headline,
release_notes,
source_version,
signer,
}: AddVersionParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
let signer = signer
.map(|s| db.as_index().as_signers().get_signer(&s))
.transpose()?;
db.as_index_mut()
.as_os_mut()
.as_versions_mut()
.upsert(&version, || OsVersionInfo::default())?
.mutate(|i| {
i.headline = headline;
i.release_notes = release_notes;
i.source_version = source_version;
i.signers.extend(signer);
Ok(())
})
})
.await
}
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct RemoveVersionParams {
#[ts(type = "string")]
pub version: Version,
}
pub async fn remove_version(
ctx: RegistryContext,
RemoveVersionParams { version }: RemoveVersionParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_index_mut()
.as_os_mut()
.as_versions_mut()
.remove(&version)?;
Ok(())
})
.await
}
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetVersionParams {
#[ts(type = "string | null")]
#[arg(long = "src")]
pub source: Option<Version>,
#[ts(type = "string | null")]
#[arg(long = "target")]
pub target: Option<VersionRange>,
}
pub async fn get_version(
ctx: RegistryContext,
GetVersionParams { source, target }: GetVersionParams,
) -> Result<BTreeMap<Version, OsVersionInfo>, Error> {
let target = target.unwrap_or(VersionRange::Any);
ctx.db
.peek()
.await
.into_index()
.into_os()
.into_versions()
.into_entries()?
.into_iter()
.map(|(v, i)| i.de().map(|i| (v, i)))
.filter_ok(|(version, info)| {
version.satisfies(&target)
&& source
.as_ref()
.map_or(true, |s| s.satisfies(&info.source_version))
})
.collect()
}
pub fn display_version_info<T>(params: WithIoFormat<T>, info: BTreeMap<Version, OsVersionInfo>) {
use prettytable::*;
if let Some(format) = params.format {
return display_serializable(format, info);
}
let mut table = Table::new();
table.add_row(row![bc =>
"VERSION",
"HEADLINE",
"RELEASE NOTES",
"ISO PLATFORMS",
"IMG PLATFORMS",
"SQUASHFS PLATFORMS",
]);
for (version, info) in &info {
table.add_row(row![
version.as_str(),
&info.headline,
&info.release_notes,
&info.iso.keys().into_iter().join(", "),
&info.img.keys().into_iter().join(", "),
&info.squashfs.keys().into_iter().join(", "),
]);
}
table.print_tty(false).unwrap();
}

View File

@@ -0,0 +1,133 @@
use std::collections::BTreeMap;
use clap::Parser;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::CliContext;
use crate::prelude::*;
use crate::registry::admin::display_signers;
use crate::registry::context::RegistryContext;
use crate::registry::signer::SignerInfo;
use crate::rpc_continuations::RequestGuid;
use crate::util::serde::HandlerExtSerde;
use crate::util::Version;
pub fn signer_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"add",
from_fn_async(add_version_signer)
.with_metadata("admin", Value::Bool(true))
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_version_signer)
.with_metadata("admin", Value::Bool(true))
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand(
"list",
from_fn_async(list_version_signers)
.with_display_serializable()
.with_custom_display_fn(|handle, result| Ok(display_signers(handle.params, result)))
.with_call_remote::<CliContext>(),
)
}
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct VersionSignerParams {
#[ts(type = "string")]
pub version: Version,
#[ts(type = "string")]
pub signer: RequestGuid,
}
pub async fn add_version_signer(
ctx: RegistryContext,
VersionSignerParams { version, signer }: VersionSignerParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
ensure_code!(
db.as_index().as_signers().contains_key(&signer)?,
ErrorKind::InvalidRequest,
"unknown signer {signer}"
);
db.as_index_mut()
.as_os_mut()
.as_versions_mut()
.as_idx_mut(&version)
.or_not_found(&version)?
.as_signers_mut()
.mutate(|s| Ok(s.insert(signer)))?;
Ok(())
})
.await
}
pub async fn remove_version_signer(
ctx: RegistryContext,
VersionSignerParams { version, signer }: VersionSignerParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
if !db
.as_index_mut()
.as_os_mut()
.as_versions_mut()
.as_idx_mut(&version)
.or_not_found(&version)?
.as_signers_mut()
.mutate(|s| Ok(s.remove(&signer)))?
{
return Err(Error::new(
eyre!("signer {signer} is not authorized to sign for v{version}"),
ErrorKind::NotFound,
));
}
Ok(())
})
.await
}
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct ListVersionSignersParams {
#[ts(type = "string")]
pub version: Version,
}
pub async fn list_version_signers(
ctx: RegistryContext,
ListVersionSignersParams { version }: ListVersionSignersParams,
) -> Result<BTreeMap<RequestGuid, SignerInfo>, Error> {
let db = ctx.db.peek().await;
db.as_index()
.as_os()
.as_versions()
.as_idx(&version)
.or_not_found(&version)?
.as_signers()
.de()?
.into_iter()
.filter_map(|guid| {
db.as_index()
.as_signers()
.as_idx(&guid)
.map(|s| s.de().map(|s| (guid, s)))
})
.collect()
}

View File

@@ -0,0 +1,477 @@
use std::collections::{HashMap, HashSet};
use std::path::Path;
use std::str::FromStr;
use clap::builder::ValueParserFactory;
use imbl_value::InternedString;
use reqwest::Client;
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha512};
use tokio::io::AsyncWrite;
use ts_rs::TS;
use url::Url;
use crate::prelude::*;
use crate::s9pk::merkle_archive::source::http::HttpSource;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::s9pk::merkle_archive::source::{ArchiveSource, FileSource};
use crate::util::clap::FromStrParser;
use crate::util::serde::{Base64, Pem};
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct SignerInfo {
pub name: String,
pub contact: Vec<ContactInfo>,
pub keys: HashSet<SignerKey>,
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, Hash, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
#[serde(tag = "alg", content = "pubkey")]
pub enum SignerKey {
Ed25519(Pem<ed25519_dalek::VerifyingKey>),
}
impl SignerKey {
pub fn verifier(&self) -> Verifier {
match self {
Self::Ed25519(k) => Verifier::Ed25519(*k, Sha512::new()),
}
}
pub fn verify_message(
&self,
message: &[u8],
signature: &[u8],
context: &str,
) -> Result<(), Error> {
let mut v = self.verifier();
v.update(message);
v.verify(signature, context)
}
}
impl std::fmt::Display for SignerKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Ed25519(k) => write!(f, "{k}"),
}
}
}
pub enum Verifier {
Ed25519(Pem<ed25519_dalek::VerifyingKey>, Sha512),
}
impl Verifier {
pub fn update(&mut self, data: &[u8]) {
match self {
Self::Ed25519(_, h) => h.update(data),
}
}
pub fn verify(self, signature: &[u8], context: &str) -> Result<(), Error> {
match self {
Self::Ed25519(k, h) => k.verify_prehashed_strict(
h,
Some(context.as_bytes()),
&ed25519_dalek::Signature::from_slice(signature)?,
)?,
}
Ok(())
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
// TODO: better types
pub enum ContactInfo {
Email(String),
Matrix(String),
Website(#[ts(type = "string")] Url),
}
impl std::fmt::Display for ContactInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Self::Email(e) => write!(f, "mailto:{e}"),
Self::Matrix(m) => write!(f, "https://matrix.to/#/{m}"),
Self::Website(w) => write!(f, "{w}"),
}
}
}
impl FromStr for ContactInfo {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(if let Some(s) = s.strip_prefix("mailto:") {
Self::Email(s.to_owned())
} else if let Some(s) = s.strip_prefix("https://matrix.to/#/") {
Self::Matrix(s.to_owned())
} else {
Self::Website(s.parse()?)
})
}
}
impl ValueParserFactory for ContactInfo {
type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser {
Self::Parser::new()
}
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct SignatureInfo {
#[ts(type = "string")]
pub context: InternedString,
pub blake3_ed255i9: Option<Blake3Ed2551SignatureInfo>,
}
impl SignatureInfo {
pub fn new(context: &str) -> Self {
Self {
context: context.into(),
blake3_ed255i9: None,
}
}
pub fn validate(&self, accept: AcceptSigners) -> Result<FileValidator, Error> {
FileValidator::from_signatures(self.signatures(), accept, &self.context)
}
pub fn all_signers(&self) -> AcceptSigners {
AcceptSigners::All(
self.signatures()
.map(|s| AcceptSigners::Signer(s.signer()))
.collect(),
)
.flatten()
}
pub fn signatures(&self) -> impl Iterator<Item = Signature> + '_ {
self.blake3_ed255i9.iter().flat_map(|info| {
info.signatures
.iter()
.map(|(k, s)| (k.clone(), *s))
.map(|(pubkey, signature)| {
Signature::Blake3Ed25519(Blake3Ed25519Signature {
hash: info.hash,
size: info.size,
pubkey,
signature,
})
})
})
}
pub fn add_sig(&mut self, signature: &Signature) -> Result<(), Error> {
signature.validate(&self.context)?;
match signature {
Signature::Blake3Ed25519(s) => {
if self
.blake3_ed255i9
.as_ref()
.map_or(true, |info| info.hash == s.hash)
{
let new = if let Some(mut info) = self.blake3_ed255i9.take() {
info.signatures.insert(s.pubkey, s.signature);
info
} else {
s.info()
};
self.blake3_ed255i9 = Some(new);
Ok(())
} else {
Err(Error::new(
eyre!("hash sum mismatch"),
ErrorKind::InvalidSignature,
))
}
}
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub enum AcceptSigners {
#[serde(skip)]
Accepted(Signature),
Signer(SignerKey),
Any(Vec<AcceptSigners>),
All(Vec<AcceptSigners>),
}
impl AcceptSigners {
const fn null() -> Self {
Self::Any(Vec::new())
}
pub fn flatten(self) -> Self {
match self {
Self::Any(mut s) | Self::All(mut s) if s.len() == 1 => s.swap_remove(0).flatten(),
s => s,
}
}
pub fn accepted(&self) -> bool {
match self {
Self::Accepted(_) => true,
Self::All(s) => s.iter().all(|s| s.accepted()),
_ => false,
}
}
pub fn try_accept(
self,
context: &str,
) -> Box<dyn Iterator<Item = Result<Signature, Error>> + Send + Sync + '_> {
match self {
Self::Accepted(s) => Box::new(std::iter::once(s).map(|s| {
s.validate(context)?;
Ok(s)
})),
Self::All(s) => Box::new(s.into_iter().flat_map(|s| s.try_accept(context))),
_ => Box::new(std::iter::once(Err(Error::new(
eyre!("signer(s) not accepted"),
ErrorKind::InvalidSignature,
)))),
}
}
pub fn process_signature(&mut self, sig: &Signature) {
let new = match std::mem::replace(self, Self::null()) {
Self::Accepted(s) => Self::Accepted(s),
Self::Signer(s) => {
if s == sig.signer() {
Self::Accepted(sig.clone())
} else {
Self::Signer(s)
}
}
Self::All(mut s) => {
s.iter_mut().for_each(|s| s.process_signature(sig));
Self::All(s)
}
Self::Any(mut s) => {
if let Some(s) = s
.iter_mut()
.map(|s| {
s.process_signature(sig);
s
})
.filter(|s| s.accepted())
.next()
{
std::mem::replace(s, Self::null())
} else {
Self::Any(s)
}
}
};
*self = new;
}
}
#[must_use]
pub struct FileValidator {
blake3: Option<blake3::Hash>,
size: Option<u64>,
}
impl FileValidator {
fn add_blake3(&mut self, hash: [u8; 32], size: u64) -> Result<(), Error> {
if let Some(h) = self.blake3 {
ensure_code!(h == hash, ErrorKind::InvalidSignature, "hash sum mismatch");
}
self.blake3 = Some(blake3::Hash::from_bytes(hash));
if let Some(s) = self.size {
ensure_code!(s == size, ErrorKind::InvalidSignature, "file size mismatch");
}
self.size = Some(size);
Ok(())
}
pub fn blake3(&self) -> Result<blake3::Hash, Error> {
if let Some(hash) = self.blake3 {
Ok(hash)
} else {
Err(Error::new(
eyre!("no BLAKE3 signatures found"),
ErrorKind::InvalidSignature,
))
}
}
pub fn size(&self) -> Result<u64, Error> {
if let Some(size) = self.size {
Ok(size)
} else {
Err(Error::new(
eyre!("no signatures found"),
ErrorKind::InvalidSignature,
))
}
}
pub fn from_signatures(
signatures: impl IntoIterator<Item = Signature>,
mut accept: AcceptSigners,
context: &str,
) -> Result<Self, Error> {
let mut res = Self {
blake3: None,
size: None,
};
for signature in signatures {
accept.process_signature(&signature);
}
for signature in accept.try_accept(context) {
match signature? {
Signature::Blake3Ed25519(s) => res.add_blake3(*s.hash, s.size)?,
}
}
Ok(res)
}
pub async fn download(
&self,
url: Url,
client: Client,
dst: &mut (impl AsyncWrite + Unpin + Send + ?Sized),
) -> Result<(), Error> {
let src = HttpSource::new(client, url).await?;
let (Some(hash), Some(size)) = (self.blake3, self.size) else {
return Err(Error::new(
eyre!("no BLAKE3 signatures found"),
ErrorKind::InvalidSignature,
));
};
src.section(0, size)
.copy_verify(dst, Some((hash, size)))
.await?;
Ok(())
}
pub async fn validate_file(&self, file: &MultiCursorFile) -> Result<(), Error> {
ensure_code!(
file.size().await == Some(self.size()?),
ErrorKind::InvalidSignature,
"file size mismatch"
);
ensure_code!(
file.blake3_mmap().await? == self.blake3()?,
ErrorKind::InvalidSignature,
"hash sum mismatch"
);
Ok(())
}
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct Blake3Ed2551SignatureInfo {
pub hash: Base64<[u8; 32]>,
pub size: u64,
pub signatures: HashMap<Pem<ed25519_dalek::VerifyingKey>, Base64<[u8; 64]>>,
}
impl Blake3Ed2551SignatureInfo {
pub fn validate(&self, context: &str) -> Result<Vec<Pem<ed25519_dalek::VerifyingKey>>, Error> {
self.signatures
.iter()
.map(|(k, s)| {
let sig = Blake3Ed25519Signature {
hash: self.hash,
size: self.size,
pubkey: k.clone(),
signature: *s,
};
sig.validate(context)?;
Ok(sig.pubkey)
})
.collect()
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub enum Signature {
Blake3Ed25519(Blake3Ed25519Signature),
}
impl Signature {
pub fn validate(&self, context: &str) -> Result<(), Error> {
match self {
Self::Blake3Ed25519(a) => a.validate(context),
}
}
pub fn signer(&self) -> SignerKey {
match self {
Self::Blake3Ed25519(s) => SignerKey::Ed25519(s.pubkey.clone()),
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct Blake3Ed25519Signature {
pub hash: Base64<[u8; 32]>,
pub size: u64,
pub pubkey: Pem<ed25519_dalek::VerifyingKey>,
// ed25519-sig(sha512(blake3(file) + len_u64_be(file)))
pub signature: Base64<[u8; 64]>,
}
impl Blake3Ed25519Signature {
pub async fn sign_file(
key: &ed25519_dalek::SigningKey,
file: &MultiCursorFile,
context: &str,
) -> Result<Self, Error> {
let size = file
.size()
.await
.ok_or_else(|| Error::new(eyre!("failed to get file size"), ErrorKind::Filesystem))?;
let hash = file.blake3_mmap().await?;
let signature = key.sign_prehashed(
Sha512::new_with_prefix(hash.as_bytes()).chain_update(u64::to_be_bytes(size)),
Some(context.as_bytes()),
)?;
Ok(Self {
hash: Base64(*hash.as_bytes()),
size,
pubkey: Pem::new(key.verifying_key()),
signature: Base64(signature.to_bytes()),
})
}
pub fn validate(&self, context: &str) -> Result<(), Error> {
let sig = ed25519_dalek::Signature::from_bytes(&*self.signature);
self.pubkey.verify_prehashed_strict(
Sha512::new_with_prefix(*self.hash).chain_update(u64::to_be_bytes(self.size)),
Some(context.as_bytes()),
&sig,
)?;
Ok(())
}
pub async fn check_file(&self, file: &MultiCursorFile) -> Result<(), Error> {
let size = file
.size()
.await
.ok_or_else(|| Error::new(eyre!("failed to get file size"), ErrorKind::Filesystem))?;
if self.size != size {
return Err(Error::new(
eyre!("incorrect file size: expected {} got {}", self.size, size),
ErrorKind::InvalidSignature,
));
}
let hash = file.blake3_mmap().await?;
if &*self.hash != hash.as_bytes() {
return Err(Error::new(
eyre!("hash sum mismatch"),
ErrorKind::InvalidSignature,
));
}
Ok(())
}
pub fn info(&self) -> Blake3Ed2551SignatureInfo {
Blake3Ed2551SignatureInfo {
hash: self.hash,
size: self.size,
signatures: [(self.pubkey, self.signature)].into_iter().collect(),
}
}
}

View File

@@ -0,0 +1,142 @@
use std::collections::BTreeMap;
use std::str::FromStr;
use std::time::Duration;
use axum::extract::ws::WebSocket;
use axum::extract::Request;
use axum::response::Response;
use clap::builder::ValueParserFactory;
use futures::future::BoxFuture;
use helpers::TimedResource;
use imbl_value::InternedString;
use tokio::sync::Mutex;
#[allow(unused_imports)]
use crate::prelude::*;
use crate::util::clap::FromStrParser;
use crate::util::new_guid;
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
pub struct RequestGuid(InternedString);
impl RequestGuid {
pub fn new() -> Self {
Self(new_guid())
}
pub fn from(r: &str) -> Option<RequestGuid> {
if r.len() != 32 {
return None;
}
for c in r.chars() {
if !(c >= 'A' && c <= 'Z' || c >= '2' && c <= '7') {
return None;
}
}
Some(RequestGuid(InternedString::intern(r)))
}
}
impl AsRef<str> for RequestGuid {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl FromStr for RequestGuid {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from(s).ok_or_else(|| Error::new(eyre!("invalid guid"), ErrorKind::Deserialization))
}
}
impl ValueParserFactory for RequestGuid {
type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser {
Self::Parser::new()
}
}
#[test]
fn parse_guid() {
println!(
"{:?}",
RequestGuid::from(&format!("{}", RequestGuid::new()))
)
}
impl std::fmt::Display for RequestGuid {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
pub type RestHandler =
Box<dyn FnOnce(Request) -> BoxFuture<'static, Result<Response, crate::Error>> + Send>;
pub type WebSocketHandler = Box<dyn FnOnce(WebSocket) -> BoxFuture<'static, ()> + Send>;
pub enum RpcContinuation {
Rest(TimedResource<RestHandler>),
WebSocket(TimedResource<WebSocketHandler>),
}
impl RpcContinuation {
pub fn rest(handler: RestHandler, timeout: Duration) -> Self {
RpcContinuation::Rest(TimedResource::new(handler, timeout))
}
pub fn ws(handler: WebSocketHandler, timeout: Duration) -> Self {
RpcContinuation::WebSocket(TimedResource::new(handler, timeout))
}
pub fn is_timed_out(&self) -> bool {
match self {
RpcContinuation::Rest(a) => a.is_timed_out(),
RpcContinuation::WebSocket(a) => a.is_timed_out(),
}
}
}
pub struct RpcContinuations(Mutex<BTreeMap<RequestGuid, RpcContinuation>>);
impl RpcContinuations {
pub fn new() -> Self {
RpcContinuations(Mutex::new(BTreeMap::new()))
}
#[instrument(skip_all)]
pub async fn clean(&self) {
let mut continuations = self.0.lock().await;
let mut to_remove = Vec::new();
for (guid, cont) in &*continuations {
if cont.is_timed_out() {
to_remove.push(guid.clone());
}
}
for guid in to_remove {
continuations.remove(&guid);
}
}
#[instrument(skip_all)]
pub async fn add(&self, guid: RequestGuid, handler: RpcContinuation) {
self.clean().await;
self.0.lock().await.insert(guid, handler);
}
pub async fn get_ws_handler(&self, guid: &RequestGuid) -> Option<WebSocketHandler> {
let mut continuations = self.0.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) {
return None;
}
let Some(RpcContinuation::WebSocket(x)) = continuations.remove(guid) else {
return None;
};
x.get().await
}
pub async fn get_rest_handler(&self, guid: &RequestGuid) -> Option<RestHandler> {
let mut continuations: tokio::sync::MutexGuard<'_, BTreeMap<RequestGuid, RpcContinuation>> =
self.0.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) {
return None;
}
let Some(RpcContinuation::Rest(x)) = continuations.remove(guid) else {
return None;
};
x.get().await
}
}

View File

@@ -3,6 +3,7 @@ use std::fmt::Debug;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use blake3::Hash;
use futures::future::BoxFuture;
use futures::FutureExt;
use imbl::OrdMap;
@@ -11,11 +12,11 @@ use itertools::Itertools;
use tokio::io::AsyncRead;
use crate::prelude::*;
use crate::s9pk::merkle_archive::hash::{Hash, HashWriter};
use crate::s9pk::merkle_archive::sink::{Sink, TrackingWriter};
use crate::s9pk::merkle_archive::source::{ArchiveSource, DynFileSource, FileSource, Section};
use crate::s9pk::merkle_archive::write_queue::WriteQueue;
use crate::s9pk::merkle_archive::{varint, Entry, EntryContents};
use crate::util::io::ParallelBlake3Writer;
#[derive(Clone)]
pub struct DirectoryContents<S> {
@@ -155,7 +156,7 @@ impl<S: ArchiveSource> DirectoryContents<Section<S>> {
pub fn deserialize<'a>(
source: &'a S,
header: &'a mut (impl AsyncRead + Unpin + Send),
sighash: Hash,
(sighash, max_size): (Hash, u64),
) -> BoxFuture<'a, Result<Self, Error>> {
async move {
use tokio::io::AsyncReadExt;
@@ -168,15 +169,20 @@ impl<S: ArchiveSource> DirectoryContents<Section<S>> {
header.read_exact(&mut size).await?;
let size = u64::from_be_bytes(size);
ensure_code!(
size <= max_size,
ErrorKind::InvalidSignature,
"size is greater than signed"
);
let mut toc_reader = source.fetch(position, size).await?;
let len = varint::deserialize_varint(&mut toc_reader).await?;
let mut entries = OrdMap::new();
for _ in 0..len {
entries.insert(
varint::deserialize_varstring(&mut toc_reader).await?.into(),
Entry::deserialize(source, &mut toc_reader).await?,
);
let name = varint::deserialize_varstring(&mut toc_reader).await?;
let entry = Entry::deserialize(source, &mut toc_reader).await?;
entries.insert(name.into(), entry);
}
let res = Self {
@@ -233,7 +239,8 @@ impl<S: FileSource> DirectoryContents<S> {
#[instrument(skip_all)]
pub fn sighash<'a>(&'a self) -> BoxFuture<'a, Result<Hash, Error>> {
async move {
let mut hasher = TrackingWriter::new(0, HashWriter::new());
let mut hasher =
TrackingWriter::new(0, ParallelBlake3Writer::new(super::hash::BUFFER_CAPACITY));
let mut sig_contents = OrdMap::new();
for (name, entry) in &**self {
sig_contents.insert(name.clone(), entry.to_missing().await?);
@@ -244,7 +251,8 @@ impl<S: FileSource> DirectoryContents<S> {
}
.serialize_toc(&mut WriteQueue::new(0), &mut hasher)
.await?;
Ok(hasher.into_inner().finalize())
let hash = hasher.into_inner().finalize().await?;
Ok(hash)
}
.boxed()
}
@@ -267,7 +275,9 @@ impl<S: FileSource> DirectoryContents<S> {
_ => std::cmp::Ordering::Equal,
}) {
varint::serialize_varstring(&**name, w).await?;
entry.serialize_header(queue.add(entry).await?, w).await?;
if let Some(pos) = entry.serialize_header(queue.add(entry).await?, w).await? {
eprintln!("DEBUG ====> {name} @ {pos}");
}
}
Ok(())

View File

@@ -1,9 +1,10 @@
use blake3::Hash;
use tokio::io::AsyncRead;
use crate::prelude::*;
use crate::s9pk::merkle_archive::hash::{Hash, HashWriter};
use crate::s9pk::merkle_archive::sink::{Sink, TrackingWriter};
use crate::s9pk::merkle_archive::source::{ArchiveSource, DynFileSource, FileSource, Section};
use crate::util::io::ParallelBlake3Writer;
#[derive(Debug, Clone)]
pub struct FileContents<S>(S);
@@ -13,7 +14,6 @@ impl<S> FileContents<S> {
}
pub const fn header_size() -> u64 {
8 // position: u64 BE
+ 8 // size: u64 BE
}
}
impl<S: ArchiveSource> FileContents<Section<S>> {
@@ -21,6 +21,7 @@ impl<S: ArchiveSource> FileContents<Section<S>> {
pub async fn deserialize(
source: &S,
header: &mut (impl AsyncRead + Unpin + Send),
size: u64,
) -> Result<Self, Error> {
use tokio::io::AsyncReadExt;
@@ -28,27 +29,23 @@ impl<S: ArchiveSource> FileContents<Section<S>> {
header.read_exact(&mut position).await?;
let position = u64::from_be_bytes(position);
let mut size = [0u8; 8];
header.read_exact(&mut size).await?;
let size = u64::from_be_bytes(size);
Ok(Self(source.section(position, size)))
}
}
impl<S: FileSource> FileContents<S> {
pub async fn hash(&self) -> Result<Hash, Error> {
let mut hasher = TrackingWriter::new(0, HashWriter::new());
pub async fn hash(&self) -> Result<(Hash, u64), Error> {
let mut hasher =
TrackingWriter::new(0, ParallelBlake3Writer::new(super::hash::BUFFER_CAPACITY));
self.serialize_body(&mut hasher, None).await?;
Ok(hasher.into_inner().finalize())
let size = hasher.position();
let hash = hasher.into_inner().finalize().await?;
Ok((hash, size))
}
#[instrument(skip_all)]
pub async fn serialize_header<W: Sink>(&self, position: u64, w: &mut W) -> Result<u64, Error> {
use tokio::io::AsyncWriteExt;
let size = self.0.size().await?;
w.write_all(&position.to_be_bytes()).await?;
w.write_all(&size.to_be_bytes()).await?;
Ok(position)
}
@@ -56,21 +53,9 @@ impl<S: FileSource> FileContents<S> {
pub async fn serialize_body<W: Sink>(
&self,
w: &mut W,
verify: Option<Hash>,
verify: Option<(Hash, u64)>,
) -> Result<(), Error> {
let start = if verify.is_some() {
Some(w.current_position().await?)
} else {
None
};
self.0.copy_verify(w, verify).await?;
if let Some(start) = start {
ensure_code!(
w.current_position().await? - start == self.0.size().await?,
ErrorKind::Pack,
"FileSource::copy wrote a number of bytes that does not match FileSource::size"
);
}
Ok(())
}
pub fn into_dyn(self) -> FileContents<DynFileSource> {

View File

@@ -1,68 +1,57 @@
pub use blake3::Hash;
use blake3::Hasher;
use std::task::Poll;
use blake3::Hash;
use tokio::io::AsyncWrite;
use tokio_util::either::Either;
use crate::prelude::*;
use crate::util::io::{ParallelBlake3Writer, TeeWriter};
#[pin_project::pin_project]
pub struct HashWriter {
hasher: Hasher,
}
impl HashWriter {
pub fn new() -> Self {
Self {
hasher: Hasher::new(),
}
}
pub fn finalize(self) -> Hash {
self.hasher.finalize()
}
}
impl AsyncWrite for HashWriter {
fn poll_write(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<Result<usize, std::io::Error>> {
self.project().hasher.update(buf);
std::task::Poll::Ready(Ok(buf.len()))
}
fn poll_flush(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
std::task::Poll::Ready(Ok(()))
}
fn poll_shutdown(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
std::task::Poll::Ready(Ok(()))
}
}
pub const BUFFER_CAPACITY: usize = 10 * 1024 * 1024; // 10MiB
#[pin_project::pin_project]
pub struct VerifyingWriter<W> {
verify: Option<(Hasher, Hash)>,
verify: Option<(Hash, u64)>,
#[pin]
writer: W,
writer: Either<TeeWriter<W, ParallelBlake3Writer>, W>,
}
impl<W: AsyncWrite> VerifyingWriter<W> {
pub fn new(w: W, verify: Option<Hash>) -> Self {
pub fn new(w: W, verify: Option<(Hash, u64)>) -> Self {
Self {
verify: verify.map(|v| (Hasher::new(), v)),
writer: w,
writer: if verify.is_some() {
Either::Left(TeeWriter::new(
w,
ParallelBlake3Writer::new(BUFFER_CAPACITY),
BUFFER_CAPACITY,
))
} else {
Either::Right(w)
},
verify,
}
}
pub fn verify(self) -> Result<W, Error> {
if let Some((actual, expected)) = self.verify {
ensure_code!(
actual.finalize() == expected,
ErrorKind::InvalidSignature,
"hash sum does not match"
);
}
impl<W: AsyncWrite + Unpin> VerifyingWriter<W> {
pub async fn verify(self) -> Result<W, Error> {
match self.writer {
Either::Left(writer) => {
let (writer, actual) = writer.into_inner().await?;
if let Some((expected, remaining)) = self.verify {
ensure_code!(
actual.finalize().await? == expected,
ErrorKind::InvalidSignature,
"hash sum mismatch"
);
ensure_code!(
remaining == 0,
ErrorKind::InvalidSignature,
"file size mismatch"
);
}
Ok(writer)
}
Either::Right(writer) => Ok(writer),
}
Ok(self.writer)
}
}
impl<W: AsyncWrite> AsyncWrite for VerifyingWriter<W> {
@@ -70,28 +59,35 @@ impl<W: AsyncWrite> AsyncWrite for VerifyingWriter<W> {
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> std::task::Poll<Result<usize, std::io::Error>> {
) -> Poll<Result<usize, std::io::Error>> {
let this = self.project();
match this.writer.poll_write(cx, buf) {
std::task::Poll::Ready(Ok(written)) => {
if let Some((h, _)) = this.verify {
h.update(&buf[..written]);
}
std::task::Poll::Ready(Ok(written))
if let Some((_, remaining)) = this.verify {
if *remaining < buf.len() as u64 {
return Poll::Ready(Err(std::io::Error::other(eyre!(
"attempted to write more bytes than signed"
))));
}
}
match this.writer.poll_write(cx, buf)? {
Poll::Pending => Poll::Pending,
Poll::Ready(n) => {
if let Some((_, remaining)) = this.verify {
*remaining -= n as u64;
}
Poll::Ready(Ok(n))
}
a => a,
}
}
fn poll_flush(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
) -> Poll<Result<(), std::io::Error>> {
self.project().writer.poll_flush(cx)
}
fn poll_shutdown(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Result<(), std::io::Error>> {
) -> Poll<Result<(), std::io::Error>> {
self.project().writer.poll_shutdown(cx)
}
}

View File

@@ -1,12 +1,14 @@
use std::path::Path;
use blake3::Hash;
use ed25519_dalek::{Signature, SigningKey, VerifyingKey};
use imbl_value::InternedString;
use sha2::{Digest, Sha512};
use tokio::io::AsyncRead;
use crate::prelude::*;
use crate::s9pk::merkle_archive::directory_contents::DirectoryContents;
use crate::s9pk::merkle_archive::file_contents::FileContents;
use crate::s9pk::merkle_archive::hash::Hash;
use crate::s9pk::merkle_archive::sink::Sink;
use crate::s9pk::merkle_archive::source::{ArchiveSource, DynFileSource, FileSource, Section};
use crate::s9pk::merkle_archive::write_queue::WriteQueue;
@@ -23,8 +25,8 @@ pub mod write_queue;
#[derive(Debug, Clone)]
enum Signer {
Signed(VerifyingKey, Signature),
Signer(SigningKey),
Signed(VerifyingKey, Signature, u64, InternedString),
Signer(SigningKey, InternedString),
}
#[derive(Debug, Clone)]
@@ -33,22 +35,23 @@ pub struct MerkleArchive<S> {
contents: DirectoryContents<S>,
}
impl<S> MerkleArchive<S> {
pub fn new(contents: DirectoryContents<S>, signer: SigningKey) -> Self {
pub fn new(contents: DirectoryContents<S>, signer: SigningKey, context: &str) -> Self {
Self {
signer: Signer::Signer(signer),
signer: Signer::Signer(signer, context.into()),
contents,
}
}
pub fn signer(&self) -> VerifyingKey {
match &self.signer {
Signer::Signed(k, _) => *k,
Signer::Signer(k) => k.verifying_key(),
Signer::Signed(k, _, _, _) => *k,
Signer::Signer(k, _) => k.verifying_key(),
}
}
pub const fn header_size() -> u64 {
32 // pubkey
+ 64 // signature
+ 32 // sighash
+ 8 // size
+ DirectoryContents::<Section<S>>::header_size()
}
pub fn contents(&self) -> &DirectoryContents<S> {
@@ -57,8 +60,8 @@ impl<S> MerkleArchive<S> {
pub fn contents_mut(&mut self) -> &mut DirectoryContents<S> {
&mut self.contents
}
pub fn set_signer(&mut self, key: SigningKey) {
self.signer = Signer::Signer(key);
pub fn set_signer(&mut self, key: SigningKey, context: &str) {
self.signer = Signer::Signer(key, context.into());
}
pub fn sort_by(
&mut self,
@@ -71,6 +74,7 @@ impl<S: ArchiveSource> MerkleArchive<Section<S>> {
#[instrument(skip_all)]
pub async fn deserialize(
source: &S,
context: &str,
header: &mut (impl AsyncRead + Unpin + Send),
) -> Result<Self, Error> {
use tokio::io::AsyncReadExt;
@@ -87,12 +91,20 @@ impl<S: ArchiveSource> MerkleArchive<Section<S>> {
header.read_exact(&mut sighash).await?;
let sighash = Hash::from_bytes(sighash);
let contents = DirectoryContents::deserialize(source, header, sighash).await?;
let mut max_size = [0u8; 8];
header.read_exact(&mut max_size).await?;
let max_size = u64::from_be_bytes(max_size);
pubkey.verify_strict(contents.sighash().await?.as_bytes(), &signature)?;
pubkey.verify_prehashed_strict(
Sha512::new_with_prefix(sighash.as_bytes()).chain_update(&u64::to_be_bytes(max_size)),
Some(context.as_bytes()),
&signature,
)?;
let contents = DirectoryContents::deserialize(source, header, (sighash, max_size)).await?;
Ok(Self {
signer: Signer::Signed(pubkey, signature),
signer: Signer::Signed(pubkey, signature, max_size, context.into()),
contents,
})
}
@@ -109,15 +121,26 @@ impl<S: FileSource> MerkleArchive<S> {
use tokio::io::AsyncWriteExt;
let sighash = self.contents.sighash().await?;
let size = self.contents.toc_size();
let (pubkey, signature) = match &self.signer {
Signer::Signed(pubkey, signature) => (*pubkey, *signature),
Signer::Signer(s) => (s.into(), ed25519_dalek::Signer::sign(s, sighash.as_bytes())),
let (pubkey, signature, max_size) = match &self.signer {
Signer::Signed(pubkey, signature, max_size, _) => (*pubkey, *signature, *max_size),
Signer::Signer(s, context) => (
s.into(),
ed25519_dalek::SigningKey::sign_prehashed(
s,
Sha512::new_with_prefix(sighash.as_bytes())
.chain_update(&u64::to_be_bytes(size)),
Some(context.as_bytes()),
)?,
size,
),
};
w.write_all(pubkey.as_bytes()).await?;
w.write_all(&signature.to_bytes()).await?;
w.write_all(sighash.as_bytes()).await?;
w.write_all(&u64::to_be_bytes(max_size)).await?;
let mut next_pos = w.current_position().await?;
next_pos += DirectoryContents::<S>::header_size();
self.contents.serialize_header(next_pos, w).await?;
@@ -137,7 +160,7 @@ impl<S: FileSource> MerkleArchive<S> {
#[derive(Debug, Clone)]
pub struct Entry<S> {
hash: Option<Hash>,
hash: Option<(Hash, u64)>,
contents: EntryContents<S>,
}
impl<S> Entry<S> {
@@ -150,7 +173,7 @@ impl<S> Entry<S> {
pub fn file(source: S) -> Self {
Self::new(EntryContents::File(FileContents::new(source)))
}
pub fn hash(&self) -> Option<Hash> {
pub fn hash(&self) -> Option<(Hash, u64)> {
self.hash
}
pub fn as_contents(&self) -> &EntryContents<S> {
@@ -189,6 +212,7 @@ impl<S> Entry<S> {
}
pub fn header_size(&self) -> u64 {
32 // hash
+ 8 // size: u64 BE
+ self.contents.header_size()
}
}
@@ -205,10 +229,14 @@ impl<S: ArchiveSource> Entry<Section<S>> {
header.read_exact(&mut hash).await?;
let hash = Hash::from_bytes(hash);
let contents = EntryContents::deserialize(source, header, hash).await?;
let mut size = [0u8; 8];
header.read_exact(&mut size).await?;
let size = u64::from_be_bytes(size);
let contents = EntryContents::deserialize(source, header, (hash, size)).await?;
Ok(Self {
hash: Some(hash),
hash: Some((hash, size)),
contents,
})
}
@@ -258,12 +286,13 @@ impl<S: FileSource> Entry<S> {
) -> Result<Option<u64>, Error> {
use tokio::io::AsyncWriteExt;
let hash = if let Some(hash) = self.hash {
let (hash, size) = if let Some(hash) = self.hash {
hash
} else {
self.contents.hash().await?
};
w.write_all(hash.as_bytes()).await?;
w.write_all(&u64::to_be_bytes(size)).await?;
self.contents.serialize_header(position, w).await
}
pub fn into_dyn(self) -> Entry<DynFileSource> {
@@ -305,7 +334,7 @@ impl<S: ArchiveSource> EntryContents<Section<S>> {
pub async fn deserialize(
source: &S,
header: &mut (impl AsyncRead + Unpin + Send),
hash: Hash,
(hash, size): (Hash, u64),
) -> Result<Self, Error> {
use tokio::io::AsyncReadExt;
@@ -313,9 +342,11 @@ impl<S: ArchiveSource> EntryContents<Section<S>> {
header.read_exact(&mut type_id).await?;
match type_id[0] {
0 => Ok(Self::Missing),
1 => Ok(Self::File(FileContents::deserialize(source, header).await?)),
1 => Ok(Self::File(
FileContents::deserialize(source, header, size).await?,
)),
2 => Ok(Self::Directory(
DirectoryContents::deserialize(source, header, hash).await?,
DirectoryContents::deserialize(source, header, (hash, size)).await?,
)),
id => Err(Error::new(
eyre!("Unknown type id {id} found in MerkleArchive"),
@@ -325,14 +356,14 @@ impl<S: ArchiveSource> EntryContents<Section<S>> {
}
}
impl<S: FileSource> EntryContents<S> {
pub async fn hash(&self) -> Result<Hash, Error> {
pub async fn hash(&self) -> Result<(Hash, u64), Error> {
match self {
Self::Missing => Err(Error::new(
eyre!("Cannot compute hash of missing file"),
ErrorKind::Pack,
)),
Self::File(f) => f.hash().await,
Self::Directory(d) => d.sighash().await,
Self::Directory(d) => Ok((d.sighash().await?, d.toc_size())),
}
}
#[instrument(skip_all)]

View File

@@ -36,6 +36,9 @@ impl<W> TrackingWriter<W> {
writer: w,
}
}
pub fn position(&self) -> u64 {
self.position
}
pub fn into_inner(self) -> W {
self.writer
}

View File

@@ -3,7 +3,7 @@ use futures::stream::BoxStream;
use futures::{StreamExt, TryStreamExt};
use reqwest::header::{ACCEPT_RANGES, CONTENT_LENGTH, RANGE};
use reqwest::{Client, Url};
use tokio::io::AsyncRead;
use tokio::io::{AsyncRead, AsyncReadExt, Take};
use tokio_util::io::StreamReader;
use crate::prelude::*;
@@ -50,9 +50,8 @@ impl HttpSource {
})
}
}
#[async_trait::async_trait]
impl ArchiveSource for HttpSource {
type Reader = HttpReader;
type Reader = Take<HttpReader>;
async fn size(&self) -> Option<u64> {
self.size
}
@@ -72,7 +71,8 @@ impl ArchiveSource for HttpSource {
.boxed()
} else {
futures::stream::empty().boxed()
}))),
}))
.take(size)),
_ => todo!(),
}
}

View File

@@ -2,6 +2,8 @@ use std::path::PathBuf;
use std::sync::Arc;
use blake3::Hash;
use futures::future::BoxFuture;
use futures::{Future, FutureExt};
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncWrite};
@@ -11,29 +13,51 @@ use crate::s9pk::merkle_archive::hash::VerifyingWriter;
pub mod http;
pub mod multi_cursor_file;
#[async_trait::async_trait]
pub trait FileSource: Clone + Send + Sync + Sized + 'static {
type Reader: AsyncRead + Unpin + Send;
async fn size(&self) -> Result<u64, Error>;
async fn reader(&self) -> Result<Self::Reader, Error>;
async fn copy<W: AsyncWrite + Unpin + Send + ?Sized>(&self, w: &mut W) -> Result<(), Error> {
tokio::io::copy(&mut self.reader().await?, w).await?;
Ok(())
}
async fn copy_verify<W: AsyncWrite + Unpin + Send + ?Sized>(
fn size(&self) -> impl Future<Output = Result<u64, Error>> + Send;
fn reader(&self) -> impl Future<Output = Result<Self::Reader, Error>> + Send;
fn copy<W: AsyncWrite + Unpin + Send + ?Sized>(
&self,
w: &mut W,
verify: Option<Hash>,
) -> Result<(), Error> {
let mut w = VerifyingWriter::new(w, verify);
tokio::io::copy(&mut self.reader().await?, &mut w).await?;
w.verify()?;
Ok(())
) -> impl Future<Output = Result<(), Error>> + Send {
async move {
tokio::io::copy(&mut self.reader().await?, w).await?;
Ok(())
}
}
async fn to_vec(&self, verify: Option<Hash>) -> Result<Vec<u8>, Error> {
let mut vec = Vec::with_capacity(self.size().await? as usize);
self.copy_verify(&mut vec, verify).await?;
Ok(vec)
fn copy_verify<W: AsyncWrite + Unpin + Send + ?Sized>(
&self,
w: &mut W,
verify: Option<(Hash, u64)>,
) -> impl Future<Output = Result<(), Error>> + Send {
async move {
let mut w = VerifyingWriter::new(w, verify);
tokio::io::copy(&mut self.reader().await?, &mut w).await?;
w.verify().await?;
Ok(())
}
}
fn to_vec(
&self,
verify: Option<(Hash, u64)>,
) -> impl Future<Output = Result<Vec<u8>, Error>> + Send {
fn to_vec(
src: &impl FileSource,
verify: Option<(Hash, u64)>,
) -> BoxFuture<Result<Vec<u8>, Error>> {
async move {
let mut vec = Vec::with_capacity(if let Some((_, size)) = &verify {
*size
} else {
src.size().await?
} as usize);
src.copy_verify(&mut vec, verify).await?;
Ok(vec)
}
.boxed()
}
to_vec(self, verify)
}
}
@@ -44,7 +68,6 @@ impl DynFileSource {
Self(Arc::new(source))
}
}
#[async_trait::async_trait]
impl FileSource for DynFileSource {
type Reader = Box<dyn AsyncRead + Unpin + Send>;
async fn size(&self) -> Result<u64, Error> {
@@ -62,11 +85,11 @@ impl FileSource for DynFileSource {
async fn copy_verify<W: AsyncWrite + Unpin + Send + ?Sized>(
&self,
mut w: &mut W,
verify: Option<Hash>,
verify: Option<(Hash, u64)>,
) -> Result<(), Error> {
self.0.copy_verify(&mut w, verify).await
}
async fn to_vec(&self, verify: Option<Hash>) -> Result<Vec<u8>, Error> {
async fn to_vec(&self, verify: Option<(Hash, u64)>) -> Result<Vec<u8>, Error> {
self.0.to_vec(verify).await
}
}
@@ -79,9 +102,9 @@ trait DynableFileSource: Send + Sync + 'static {
async fn copy_verify(
&self,
w: &mut (dyn AsyncWrite + Unpin + Send),
verify: Option<Hash>,
verify: Option<(Hash, u64)>,
) -> Result<(), Error>;
async fn to_vec(&self, verify: Option<Hash>) -> Result<Vec<u8>, Error>;
async fn to_vec(&self, verify: Option<(Hash, u64)>) -> Result<Vec<u8>, Error>;
}
#[async_trait::async_trait]
impl<T: FileSource> DynableFileSource for T {
@@ -97,16 +120,15 @@ impl<T: FileSource> DynableFileSource for T {
async fn copy_verify(
&self,
w: &mut (dyn AsyncWrite + Unpin + Send),
verify: Option<Hash>,
verify: Option<(Hash, u64)>,
) -> Result<(), Error> {
FileSource::copy_verify(self, w, verify).await
}
async fn to_vec(&self, verify: Option<Hash>) -> Result<Vec<u8>, Error> {
async fn to_vec(&self, verify: Option<(Hash, u64)>) -> Result<Vec<u8>, Error> {
FileSource::to_vec(self, verify).await
}
}
#[async_trait::async_trait]
impl FileSource for PathBuf {
type Reader = File;
async fn size(&self) -> Result<u64, Error> {
@@ -117,7 +139,6 @@ impl FileSource for PathBuf {
}
}
#[async_trait::async_trait]
impl FileSource for Arc<[u8]> {
type Reader = std::io::Cursor<Self>;
async fn size(&self) -> Result<u64, Error> {
@@ -134,21 +155,26 @@ impl FileSource for Arc<[u8]> {
}
}
#[async_trait::async_trait]
pub trait ArchiveSource: Clone + Send + Sync + Sized + 'static {
type Reader: AsyncRead + Unpin + Send;
async fn size(&self) -> Option<u64> {
None
fn size(&self) -> impl Future<Output = Option<u64>> + Send {
async { None }
}
async fn fetch(&self, position: u64, size: u64) -> Result<Self::Reader, Error>;
async fn copy_to<W: AsyncWrite + Unpin + Send + ?Sized>(
fn fetch(
&self,
position: u64,
size: u64,
) -> impl Future<Output = Result<Self::Reader, Error>> + Send;
fn copy_to<W: AsyncWrite + Unpin + Send + ?Sized>(
&self,
position: u64,
size: u64,
w: &mut W,
) -> Result<(), Error> {
tokio::io::copy(&mut self.fetch(position, size).await?, w).await?;
Ok(())
) -> impl Future<Output = Result<(), Error>> + Send {
async move {
tokio::io::copy(&mut self.fetch(position, size).await?, w).await?;
Ok(())
}
}
fn section(&self, position: u64, size: u64) -> Section<Self> {
Section {
@@ -159,7 +185,6 @@ pub trait ArchiveSource: Clone + Send + Sync + Sized + 'static {
}
}
#[async_trait::async_trait]
impl ArchiveSource for Arc<[u8]> {
type Reader = tokio::io::Take<std::io::Cursor<Self>>;
async fn fetch(&self, position: u64, size: u64) -> Result<Self::Reader, Error> {
@@ -177,7 +202,6 @@ pub struct Section<S> {
position: u64,
size: u64,
}
#[async_trait::async_trait]
impl<S: ArchiveSource> FileSource for Section<S> {
type Reader = S::Reader;
async fn size(&self) -> Result<u64, Error> {

View File

@@ -31,6 +31,16 @@ impl MultiCursorFile {
file: Arc::new(Mutex::new(File::open(path_from_fd(fd)).await?)),
})
}
pub async fn blake3_mmap(&self) -> Result<blake3::Hash, Error> {
let path = self.path();
tokio::task::spawn_blocking(move || {
let mut hasher = blake3::Hasher::new();
hasher.update_mmap_rayon(path)?;
Ok(hasher.finalize())
})
.await
.with_kind(ErrorKind::Unknown)?
}
}
impl From<File> for MultiCursorFile {
fn from(value: File) -> Self {
@@ -67,7 +77,6 @@ impl AsyncRead for FileSectionReader {
}
}
#[async_trait::async_trait]
impl ArchiveSource for MultiCursorFile {
type Reader = FileSectionReader;
async fn size(&self) -> Option<u64> {

View File

@@ -52,7 +52,7 @@ fn test(files: Vec<(PathBuf, String)>) -> Result<(), Error> {
}
}
let key = SigningKey::generate(&mut rand::thread_rng());
let mut a1 = MerkleArchive::new(root, key);
let mut a1 = MerkleArchive::new(root, key, "test");
tokio::runtime::Builder::new_current_thread()
.enable_io()
.build()
@@ -63,7 +63,7 @@ fn test(files: Vec<(PathBuf, String)>) -> Result<(), Error> {
a1.serialize(&mut TrackingWriter::new(0, &mut s1), true)
.await?;
let s1: Arc<[u8]> = s1.into();
let a2 = MerkleArchive::deserialize(&s1, &mut Cursor::new(s1.clone())).await?;
let a2 = MerkleArchive::deserialize(&s1, "test", &mut Cursor::new(s1.clone())).await?;
for (path, content) in check_set {
match a2

View File

@@ -17,6 +17,7 @@ use crate::s9pk::manifest::Manifest;
use crate::s9pk::merkle_archive::source::DynFileSource;
use crate::s9pk::merkle_archive::Entry;
use crate::s9pk::v2::compat::CONTAINER_TOOL;
use crate::s9pk::v2::SIG_CONTEXT;
use crate::s9pk::S9pk;
use crate::util::io::TmpDir;
use crate::util::serde::{apply_expr, HandlerExtSerde};
@@ -24,7 +25,7 @@ use crate::util::Invoke;
pub const SKIP_ENV: &[&str] = &["TERM", "container", "HOME", "HOSTNAME"];
pub fn s9pk() -> ParentHandler {
pub fn s9pk() -> ParentHandler<CliContext> {
ParentHandler::new()
.subcommand("edit", edit())
.subcommand("inspect", inspect())
@@ -35,9 +36,9 @@ struct S9pkPath {
s9pk: PathBuf,
}
fn edit() -> ParentHandler<S9pkPath> {
fn edit() -> ParentHandler<CliContext, S9pkPath> {
let only_parent = |a, _| a;
ParentHandler::<S9pkPath>::new()
ParentHandler::new()
.subcommand(
"add-image",
from_fn_async(add_image)
@@ -52,9 +53,9 @@ fn edit() -> ParentHandler<S9pkPath> {
)
}
fn inspect() -> ParentHandler<S9pkPath> {
fn inspect() -> ParentHandler<CliContext, S9pkPath> {
let only_parent = |a, _| a;
ParentHandler::<S9pkPath>::new()
ParentHandler::new()
.subcommand(
"file-tree",
from_fn_async(file_tree)
@@ -158,7 +159,7 @@ async fn add_image(
.invoke(ErrorKind::Docker)
.await?;
let archive = s9pk.as_archive_mut();
archive.set_signer(ctx.developer_key()?.clone());
archive.set_signer(ctx.developer_key()?.clone(), SIG_CONTEXT);
archive.contents_mut().insert_path(
Path::new("images")
.join(&arch)
@@ -213,7 +214,7 @@ async fn edit_manifest(
let tmp_path = s9pk_path.with_extension("s9pk.tmp");
let mut tmp_file = File::create(&tmp_path).await?;
s9pk.as_archive_mut()
.set_signer(ctx.developer_key()?.clone());
.set_signer(ctx.developer_key()?.clone(), SIG_CONTEXT);
s9pk.serialize(&mut tmp_file, true).await?;
tmp_file.sync_all().await?;
tokio::fs::rename(&tmp_path, &s9pk_path).await?;

View File

@@ -55,7 +55,6 @@ impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> DockerReader<R> {
if let Some(image) = tokio_tar::Archive::new(rdr)
.entries()?
.try_filter_map(|e| {
let arch = arch.clone();
async move {
Ok(if &*e.path()? == Path::new(&format!("{}.tar", arch)) {
Some(e)

Some files were not shown because too many files have changed in this diff Show More