Compare commits

..

11 Commits

Author SHA1 Message Date
Aiden McClelland
af08504576 use qemu for iso stage 2025-12-22 12:20:22 -07:00
Aiden McClelland
e550c5db91 undo removal of sorting 2025-12-22 12:08:43 -07:00
Matt Hill
940feebff7 fix bug with dep error exists attr 2025-12-20 20:10:22 -07:00
Aiden McClelland
4f09d7e302 handle riscv compat 2025-12-20 15:45:15 -07:00
Mariusz Kogen
b61ed14675 Fix docker platform spec in run-compat.sh 2025-12-20 17:24:52 +01:00
Aiden McClelland
c93e40dc06 simplify compat 2025-12-20 08:47:35 -07:00
Aiden McClelland
74036c71cb use docker platform for iso build emulation 2025-12-20 07:21:56 -07:00
Aiden McClelland
fd07470cab update build container 2025-12-20 04:59:00 -07:00
Aiden McClelland
58553182a2 fix tests 2025-12-20 03:52:59 -07:00
Aiden McClelland
e8a423e67b environment-based default registry 2025-12-20 03:44:23 -07:00
Aiden McClelland
e859c1adb1 refactor project structure 2025-12-20 02:55:49 -07:00
200 changed files with 2527 additions and 4872 deletions

View File

@@ -278,7 +278,7 @@ ts-bindings: core/bindings/index.ts
core/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
rm -rf core/bindings
./core/build/build-ts.sh
ls core/bindings/*.ts | sed 's/core\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/bindings/index.ts
ls core/bindings/*.ts | sed 's/core\/startos\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/bindings/index.ts
npm --prefix sdk exec -- prettier --config ./sdk/base/package.json -w ./core/bindings/*.ts
touch core/bindings/index.ts

View File

View File

@@ -3,7 +3,6 @@ avahi-utils
b3sum
bash-completion
beep
binfmt-support
bmon
btrfs-progs
ca-certificates
@@ -16,7 +15,6 @@ dnsutils
dosfstools
e2fsprogs
ecryptfs-utils
equivs
exfatprogs
flashrom
fuse3

View File

@@ -9,9 +9,6 @@ FEATURES+=("${ARCH}")
if [ "$ARCH" != "$PLATFORM" ]; then
FEATURES+=("${PLATFORM}")
fi
if [[ "$PLATFORM" =~ -nonfree$ ]]; then
FEATURES+=("nonfree")
fi
feature_file_checker='
/^#/ { next }

View File

@@ -1,10 +0,0 @@
+ firmware-amd-graphics
+ firmware-atheros
+ firmware-brcm80211
+ firmware-iwlwifi
+ firmware-libertas
+ firmware-misc-nonfree
+ firmware-realtek
+ nvidia-container-toolkit
# + nvidia-driver
# + nvidia-kernel-dkms

View File

@@ -73,7 +73,7 @@ if [ "$NON_FREE" = 1 ]; then
if [ "$IB_SUITE" = "bullseye" ]; then
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free"
else
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free non-free-firmware"
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free-firmware"
fi
fi
@@ -174,123 +174,40 @@ if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
fi
if [ "$NON_FREE" = 1 ]; then
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o config/archives/nvidia-container-toolkit.key
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
| sed 's#deb https://#deb [signed-by=/etc/apt/trusted.gpg.d/nvidia-container-toolkit.key.gpg] https://#g' \
> config/archives/nvidia-container-toolkit.list
fi
cat > config/archives/backports.pref <<-EOF
cat > config/archives/backports.pref <<- EOF
Package: linux-image-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: linux-headers-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: *nvidia*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
EOF
# Hooks
# Dependencies
## Firmware
if [ "$NON_FREE" = 1 ]; then
echo 'firmware-iwlwifi firmware-misc-nonfree firmware-brcm80211 firmware-realtek firmware-atheros firmware-libertas firmware-amd-graphics' > config/package-lists/nonfree.list.chroot
fi
cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF
#!/bin/bash
set -e
if [ "${NON_FREE}" = "1" ] && [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
# install a specific NVIDIA driver version
# ---------------- configuration ----------------
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.119.02}"
BASE_URL="https://download.nvidia.com/XFree86/Linux-${QEMU_ARCH}"
echo "[nvidia-hook] Using NVIDIA driver: \${NVIDIA_DRIVER_VERSION}" >&2
# ---------------- kernel version ----------------
# Determine target kernel version from newest /boot/vmlinuz-* in the chroot.
KVER="\$(
ls -1t /boot/vmlinuz-* 2>/dev/null \
| head -n1 \
| sed 's|.*/vmlinuz-||'
)"
if [ -z "\${KVER}" ]; then
echo "[nvidia-hook] ERROR: no /boot/vmlinuz-* found; cannot determine kernel version" >&2
exit 1
fi
echo "[nvidia-hook] Target kernel version: \${KVER}" >&2
# Ensure kernel headers are present
TEMP_APT_DEPS=(build-essential)
if [ ! -e "/lib/modules/\${KVER}/build" ]; then
TEMP_APT_DEPS+=(linux-headers-\${KVER})
fi
echo "[nvidia-hook] Installing build dependencies" >&2
/usr/lib/startos/scripts/install-equivs <<-EOF
Package: nvidia-depends
Version: \${NVIDIA_DRIVER_VERSION}
Section: unknown
Priority: optional
Depends: \${dep_list="\$(IFS=', '; echo "\${TEMP_APT_DEPS[*]}")"}
EOF
# ---------------- download and run installer ----------------
RUN_NAME="NVIDIA-Linux-${QEMU_ARCH}-\${NVIDIA_DRIVER_VERSION}.run"
RUN_PATH="/root/\${RUN_NAME}"
RUN_URL="\${BASE_URL}/\${NVIDIA_DRIVER_VERSION}/\${RUN_NAME}"
echo "[nvidia-hook] Downloading \${RUN_URL}" >&2
wget -O "\${RUN_PATH}" "\${RUN_URL}"
chmod +x "\${RUN_PATH}"
echo "[nvidia-hook] Running NVIDIA installer for kernel \${KVER}" >&2
sh "\${RUN_PATH}" \
--silent \
--kernel-name="\${KVER}" \
--no-x-check \
--no-nouveau-check \
--no-runlevel-check
# Rebuild module metadata
echo "[nvidia-hook] Running depmod for \${KVER}" >&2
depmod -a "\${KVER}"
echo "[nvidia-hook] NVIDIA \${NVIDIA_DRIVER_VERSION} installation complete for kernel \${KVER}" >&2
echo "[nvidia-hook] Removing build dependencies..." >&2
apt-get purge -y nvidia-depends
apt-get autoremove -y
echo "[nvidia-hook] Removed build dependencies." >&2
fi
cp /etc/resolv.conf /etc/resolv.conf.bak
if [ "${IB_SUITE}" = trixie ] && [ "${IB_TARGET_ARCH}" != riscv64 ]; then
echo 'deb https://deb.debian.org/debian/ bookworm main' > /etc/apt/sources.list.d/bookworm.list
apt-get update
apt-get install -y postgresql-15
rm /etc/apt/sources.list.d/bookworm.list
apt-get update
systemctl mask postgresql
echo 'deb https://deb.debian.org/debian/ bookworm main' > /etc/apt/sources.list.d/bookworm.list
apt-get update
apt-get install -y postgresql-15
rm /etc/apt/sources.list.d/bookworm.list
apt-get update
systemctl mask postgresql
fi
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
KERNEL_VERSION=${RPI_KERNEL_VERSION} sh /boot/config.sh > /boot/config.txt
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
KERNEL_VERSION=${RPI_KERNEL_VERSION} sh /boot/config.sh > /boot/config.txt
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
fi
useradd --shell /bin/bash -G startos -m start9
@@ -301,11 +218,11 @@ usermod -aG systemd-journal start9
echo "start9 ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/010_start9-nopasswd"
if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
/usr/lib/startos/scripts/enable-kiosk
/usr/lib/startos/scripts/enable-kiosk
fi
if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
passwd -l start9
passwd -l start9
fi
EOF
@@ -443,4 +360,4 @@ elif [ "${IMAGE_TYPE}" = img ]; then
fi
chown $IB_UID:$IB_UID $RESULTS_DIR/$IMAGE_BASENAME.*
chown $IB_UID:$IB_UID $RESULTS_DIR/$IMAGE_BASENAME.*

View File

@@ -1 +1 @@
usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory boot=startos
usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory quiet boot=startos

View File

@@ -4,7 +4,7 @@ parse_essential_db_info() {
DB_DUMP="/tmp/startos_db.json"
if command -v start-cli >/dev/null 2>&1; then
timeout 30 start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1
start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1
else
return 1
fi

View File

@@ -63,7 +63,7 @@ mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi
@@ -75,7 +75,7 @@ else
CHROOT_RES=$?
fi
if mountpoint /media/startos/next/sys/firmware/efi/efivars 2>&1 > /dev/null; then
if mountpoint /media/startos/next/sys/firmware/efi/efivars 2> /dev/null; then
umount /media/startos/next/sys/firmware/efi/efivars
fi

View File

@@ -35,20 +35,16 @@ if [ "$UNDO" = 1 ]; then
exit $err
fi
# DNAT: rewrite destination for incoming packets (external traffic)
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
# DNAT: rewrite destination for locally-originated packets (hairpin from host itself)
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
# MASQUERADE: rewrite source for all forwarded traffic to the destination
# This ensures responses are routed back through the host regardless of source IP
iptables -t nat -A ${NAME}_POSTROUTING -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -d "$dip" -p udp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_PREROUTING -s "$dip/$dprefix" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$dip/$dprefix" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_POSTROUTING -s "$dip/$dprefix" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -s "$dip/$dprefix" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
# Allow new connections to be forwarded to the destination
iptables -A ${NAME}_FORWARD -d $dip -p tcp --dport $dport -m state --state NEW -j ACCEPT
iptables -A ${NAME}_FORWARD -d $dip -p udp --dport $dport -m state --state NEW -j ACCEPT

View File

@@ -1,20 +0,0 @@
#!/bin/bash
export DEBIAN_FRONTEND=noninteractive
export DEBCONF_NONINTERACTIVE_SEEN=true
TMP_DIR=$(mktemp -d)
(
set -e
cd $TMP_DIR
cat > control.equivs
equivs-build control.equivs
apt-get install -y ./*.deb < /dev/null
)
rm -rf $TMP_DIR
echo Install complete. >&2
exit 0

View File

@@ -50,12 +50,12 @@ mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /boot/efi 2>&1 > /dev/null; then
if mountpoint /boot/efi 2> /dev/null; then
mkdir -p /media/startos/next/boot/efi
mount --bind /boot/efi /media/startos/next/boot/efi
fi
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi

87
build/raspberrypi/make-image.sh Executable file
View File

@@ -0,0 +1,87 @@
#!/bin/bash
set -e
function partition_for () {
if [[ "$1" =~ [0-9]+$ ]]; then
echo "$1p$2"
else
echo "$1$2"
fi
}
VERSION=$(cat VERSION.txt)
ENVIRONMENT=$(cat ENVIRONMENT.txt)
GIT_HASH=$(cat GIT_HASH.txt | head -c 7)
DATE=$(date +%Y%m%d)
ROOT_PART_END=7217792
VERSION_FULL="$VERSION-$GIT_HASH"
if [ -n "$ENVIRONMENT" ]; then
VERSION_FULL="$VERSION_FULL~$ENVIRONMENT"
fi
TARGET_NAME=startos-${VERSION_FULL}-${DATE}_raspberrypi.img
TARGET_SIZE=$[($ROOT_PART_END+1)*512]
rm -f $TARGET_NAME
truncate -s $TARGET_SIZE $TARGET_NAME
(
echo o
echo x
echo i
echo "0xcb15ae4d"
echo r
echo n
echo p
echo 1
echo 2048
echo 526335
echo t
echo c
echo n
echo p
echo 2
echo 526336
echo $ROOT_PART_END
echo a
echo 1
echo w
) | fdisk $TARGET_NAME
OUTPUT_DEVICE=$(sudo losetup --show -fP $TARGET_NAME)
sudo mkfs.ext4 `partition_for ${OUTPUT_DEVICE} 2`
sudo mkfs.vfat `partition_for ${OUTPUT_DEVICE} 1`
TMPDIR=$(mktemp -d)
sudo mount `partition_for ${OUTPUT_DEVICE} 2` $TMPDIR
sudo mkdir $TMPDIR/boot
sudo mount `partition_for ${OUTPUT_DEVICE} 1` $TMPDIR/boot
sudo unsquashfs -f -d $TMPDIR startos.raspberrypi.squashfs
REAL_GIT_HASH=$(cat $TMPDIR/usr/lib/startos/GIT_HASH.txt)
REAL_VERSION=$(cat $TMPDIR/usr/lib/startos/VERSION.txt)
REAL_ENVIRONMENT=$(cat $TMPDIR/usr/lib/startos/ENVIRONMENT.txt)
sudo sed -i 's| boot=startos| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo cp ./build/raspberrypi/fstab $TMPDIR/etc/
sudo cp ./build/raspberrypi/init_resize.sh $TMPDIR/usr/lib/startos/scripts/init_resize.sh
sudo umount $TMPDIR/boot
sudo umount $TMPDIR
sudo losetup -d $OUTPUT_DEVICE
if [ "$ALLOW_VERSION_MISMATCH" != 1 ]; then
if [ "$(cat GIT_HASH.txt)" != "$REAL_GIT_HASH" ]; then
>&2 echo "startos.raspberrypi.squashfs GIT_HASH.txt mismatch"
>&2 echo "expected $REAL_GIT_HASH (dpkg) found $(cat GIT_HASH.txt) (repo)"
exit 1
fi
if [ "$(cat VERSION.txt)" != "$REAL_VERSION" ]; then
>&2 echo "startos.raspberrypi.squashfs VERSION.txt mismatch"
exit 1
fi
if [ "$(cat ENVIRONMENT.txt)" != "$REAL_ENVIRONMENT" ]; then
>&2 echo "startos.raspberrypi.squashfs ENVIRONMENT.txt mismatch"
exit 1
fi
fi

View File

@@ -5,24 +5,25 @@ if [ -z "$VERSION" ]; then
exit 2
fi
if [ -z "$RUN_ID" ]; then
>&2 echo '$RUN_ID required'
exit 2
fi
set -e
if [ "$SKIP_DL" != "1" ]; then
if [ "$SKIP_CLEAN" != "1" ]; then
rm -rf ~/Downloads/v$VERSION
mkdir ~/Downloads/v$VERSION
cd ~/Downloads/v$VERSION
fi
rm -rf ~/Downloads/v$VERSION
mkdir ~/Downloads/v$VERSION
cd ~/Downloads/v$VERSION
if [ -n "$RUN_ID" ]; then
for arch in aarch64 aarch64-nonfree riscv64 riscv64-nonfree x86_64 x86_64-nonfree raspberrypi; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.squashfs -D $(pwd); do sleep 1; done
done
for arch in aarch64 aarch64-nonfree riscv64 riscv64-nonfree x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.iso -D $(pwd); do sleep 1; done
done
while ! gh run download -R Start9Labs/start-os $RUN_ID -n raspberrypi.img -D $(pwd); do sleep 1; done
fi
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree raspberrypi; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.squashfs -D $(pwd); do sleep 1; done
done
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.iso -D $(pwd); do sleep 1; done
done
while ! gh run download -R Start9Labs/start-os $RUN_ID -n raspberrypi.img -D $(pwd); do sleep 1; done
if [ -n "$ST_RUN_ID" ]; then
for arch in aarch64 riscv64 x86_64; do
@@ -69,7 +70,7 @@ elif [ "$SKIP_UL" != "1" ]; then
fi
if [ "$SKIP_INDEX" != "1" ]; then
for arch in aarch64 aarch64-nonfree riscv64 riscv64-nonfree x86_64 x86_64-nonfree; do
for arch in aarch64 aarch64-nonfree x86_64 x86_64-nonfree; do
for file in *_$arch.squashfs *_$arch.iso; do
start-cli --registry=https://alpha-registry-x.start9.com registry os asset add --platform=$arch --version=$VERSION $file https://github.com/Start9Labs/start-os/releases/download/v$VERSION/$(echo -n "$file" | sed 's/~/./g')
done

View File

@@ -38,7 +38,7 @@
},
"../sdk/dist": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.47",
"version": "0.4.0-beta.45",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",

View File

@@ -178,13 +178,6 @@ export function makeEffects(context: EffectContext): Effects {
T.Effects["getInstalledPackages"]
>
},
getServiceManifest(
...[options]: Parameters<T.Effects["getServiceManifest"]>
) {
return rpcRound("get-service-manifest", options) as ReturnType<
T.Effects["getServiceManifest"]
>
},
subcontainer: {
createFs(options: { imageId: string; name: string }) {
return rpcRound("subcontainer.create-fs", options) as ReturnType<

View File

@@ -10,6 +10,7 @@ import { SDKManifest } from "@start9labs/start-sdk/base/lib/types"
import { SubContainerRc } from "@start9labs/start-sdk/package/lib/util/SubContainer"
const EMBASSY_HEALTH_INTERVAL = 15 * 1000
const EMBASSY_PROPERTIES_LOOP = 30 * 1000
/**
* We wanted something to represent what the main loop is doing, and
* in this case it used to run the properties, health, and the docker/ js main.

View File

@@ -50,7 +50,6 @@ import {
transformOldConfigToNew,
} from "./transformConfigSpec"
import { partialDiff } from "@start9labs/start-sdk/base/lib/util"
import { Volume } from "@start9labs/start-sdk/package/lib/util/Volume"
type Optional<A> = A | undefined | null
function todo(): never {
@@ -62,14 +61,14 @@ export const EMBASSY_JS_LOCATION = "/usr/lib/startos/package/embassy.js"
const configFile = FileHelper.json(
{
base: new Volume("embassy"),
volumeId: "embassy",
subpath: "config.json",
},
matches.any,
)
const dependsOnFile = FileHelper.json(
{
base: new Volume("embassy"),
volumeId: "embassy",
subpath: "dependsOn.json",
},
dictionary([string, array(string)]),
@@ -331,10 +330,6 @@ export class SystemForEmbassy implements System {
) {
this.version.upstream.prerelease = ["alpha"]
}
if (this.manifest.id === "nostr") {
this.manifest.id = "nostr-rs-relay"
}
}
async init(

View File

@@ -15,7 +15,4 @@ case $ARCH in
DOCKER_PLATFORM=linux/arm64;;
esac
docker run --rm $USE_TTY --platform=$DOCKER_PLATFORM -eARCH --privileged -v "$(pwd):/root/start-os" start9/build-env /root/start-os/container-runtime/update-image.sh
if [ "$(ls -nd "rootfs.${ARCH}.squashfs" | awk '{ print $3 }')" != "$UID" ]; then
docker run --rm $USE_TTY -v "$(pwd):/root/start-os" start9/build-env chown -R $UID:$UID /root/start-os/container-runtime
fi
docker run --rm $USE_TTY --platform=$DOCKER_PLATFORM -eARCH --privileged -v "$(pwd):/root/start-os" start9/build-env /root/start-os/container-runtime/update-image.sh

2
core/.gitignore vendored
View File

@@ -8,4 +8,4 @@ secrets.db
.env
.editorconfig
proptest-regressions/**/*
/bindings/*
/startos/bindings/*

663
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -15,7 +15,7 @@ license = "MIT"
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.17" # VERSION_BUMP
version = "0.4.0-alpha.16" # VERSION_BUMP
[lib]
name = "startos"

View File

@@ -184,11 +184,7 @@ async fn cli_login<C: SessionAuthContext>(
where
CliContext: CallRemote<C>,
{
let password = if let Ok(password) = std::env::var("PASSWORD") {
password
} else {
rpassword::prompt_password("Password: ")?
};
let password = rpassword::prompt_password("Password: ")?;
ctx.call_remote::<C>(
&parent_method.into_iter().chain(method).join("."),

View File

@@ -11,89 +11,67 @@ pub mod startd;
pub mod tunnel;
#[derive(Default)]
pub struct MultiExecutable {
default: Option<&'static str>,
bins: BTreeMap<&'static str, fn(VecDeque<OsString>)>,
}
pub struct MultiExecutable(BTreeMap<&'static str, fn(VecDeque<OsString>)>);
impl MultiExecutable {
pub fn enable_startd(&mut self) -> &mut Self {
self.bins.insert("startd", startd::main);
self.bins
self.0.insert("startd", startd::main);
self.0
.insert("embassyd", |_| deprecated::renamed("embassyd", "startd"));
self.bins
self.0
.insert("embassy-init", |_| deprecated::removed("embassy-init"));
self
}
pub fn enable_start_cli(&mut self) -> &mut Self {
self.bins.insert("start-cli", start_cli::main);
self.bins.insert("embassy-cli", |_| {
self.0.insert("start-cli", start_cli::main);
self.0.insert("embassy-cli", |_| {
deprecated::renamed("embassy-cli", "start-cli")
});
self.bins
self.0
.insert("embassy-sdk", |_| deprecated::removed("embassy-sdk"));
self
}
pub fn enable_start_container(&mut self) -> &mut Self {
self.bins.insert("start-container", container_cli::main);
self.0.insert("start-container", container_cli::main);
self
}
pub fn enable_start_registryd(&mut self) -> &mut Self {
self.bins.insert("start-registryd", registry::main);
self.0.insert("start-registryd", registry::main);
self
}
pub fn enable_start_registry(&mut self) -> &mut Self {
self.bins.insert("start-registry", registry::cli);
self.0.insert("start-registry", registry::cli);
self
}
pub fn enable_start_tunneld(&mut self) -> &mut Self {
self.bins.insert("start-tunneld", tunnel::main);
self.0.insert("start-tunneld", tunnel::main);
self
}
pub fn enable_start_tunnel(&mut self) -> &mut Self {
self.bins.insert("start-tunnel", tunnel::cli);
self
}
pub fn set_default(&mut self, name: &str) -> &mut Self {
if let Some((name, _)) = self.bins.get_key_value(name) {
self.default = Some(*name);
} else {
panic!("{name} does not exist in MultiExecutable");
}
self.0.insert("start-tunnel", tunnel::cli);
self
}
fn select_executable(&self, name: &str) -> Option<fn(VecDeque<OsString>)> {
self.bins.get(&name).copied()
self.0.get(&name).copied()
}
pub fn execute(&self) {
let mut popped = Vec::with_capacity(2);
let mut args = std::env::args_os().collect::<VecDeque<_>>();
for _ in 0..2 {
if let Some(s) = args.pop_front() {
if let Some(name) = Path::new(&*s).file_name().and_then(|s| s.to_str()) {
if name == "--contents" {
for name in self.bins.keys() {
for name in self.0.keys() {
println!("{name}");
}
return;
}
if let Some(x) = self.select_executable(&name) {
args.push_front(s);
return x(args);
}
}
popped.push(s);
}
}
if let Some(default) = self.default {
while let Some(arg) = popped.pop() {
args.push_front(arg);
}
return self.bins[default](args);
}
let args = std::env::args().collect::<VecDeque<_>>();
eprintln!(
"unknown executable: {}",

View File

@@ -7,7 +7,6 @@ use std::sync::Arc;
use cookie::{Cookie, Expiration, SameSite};
use cookie_store::CookieStore;
use http::HeaderMap;
use imbl::OrdMap;
use imbl_value::InternedString;
use josekit::jwk::Jwk;
use once_cell::sync::OnceCell;
@@ -239,16 +238,10 @@ impl CliContext {
where
Self: CallRemote<RemoteContext>,
{
<Self as CallRemote<RemoteContext, Empty>>::call_remote(
&self,
method,
OrdMap::new(),
params,
Empty {},
)
.await
.map_err(Error::from)
.with_ctx(|e| (e.kind, method))
<Self as CallRemote<RemoteContext, Empty>>::call_remote(&self, method, params, Empty {})
.await
.map_err(Error::from)
.with_ctx(|e| (e.kind, method))
}
pub async fn call_remote_with<RemoteContext, T>(
&self,
@@ -259,16 +252,10 @@ impl CliContext {
where
Self: CallRemote<RemoteContext, T>,
{
<Self as CallRemote<RemoteContext, T>>::call_remote(
&self,
method,
OrdMap::new(),
params,
extra,
)
.await
.map_err(Error::from)
.with_ctx(|e| (e.kind, method))
<Self as CallRemote<RemoteContext, T>>::call_remote(&self, method, params, extra)
.await
.map_err(Error::from)
.with_ctx(|e| (e.kind, method))
}
}
impl AsRef<Jwk> for CliContext {
@@ -305,13 +292,7 @@ impl AsRef<Client> for CliContext {
}
impl CallRemote<RpcContext> for CliContext {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
if let Ok(local) = read_file_to_string(RpcContext::LOCAL_AUTH_COOKIE_PATH).await {
self.cookie_store
.lock()
@@ -338,13 +319,7 @@ impl CallRemote<RpcContext> for CliContext {
}
}
impl CallRemote<DiagnosticContext> for CliContext {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
crate::middleware::auth::signature::call_remote(
self,
self.rpc_url.clone(),
@@ -357,13 +332,7 @@ impl CallRemote<DiagnosticContext> for CliContext {
}
}
impl CallRemote<InitContext> for CliContext {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
crate::middleware::auth::signature::call_remote(
self,
self.rpc_url.clone(),
@@ -376,13 +345,7 @@ impl CallRemote<InitContext> for CliContext {
}
}
impl CallRemote<SetupContext> for CliContext {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
crate::middleware::auth::signature::call_remote(
self,
self.rpc_url.clone(),
@@ -395,13 +358,7 @@ impl CallRemote<SetupContext> for CliContext {
}
}
impl CallRemote<InstallContext> for CliContext {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
crate::middleware::auth::signature::call_remote(
self,
self.rpc_url.clone(),

View File

@@ -15,7 +15,6 @@ use josekit::jwk::Jwk;
use reqwest::{Client, Proxy};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{CallRemote, Context, Empty};
use tokio::process::Command;
use tokio::sync::{RwLock, broadcast, oneshot, watch};
use tokio::time::Instant;
use tracing::instrument;
@@ -27,10 +26,6 @@ use crate::context::config::ServerConfig;
use crate::db::model::Database;
use crate::db::model::package::TaskSeverity;
use crate::disk::OsPartitionInfo;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::{FileSystem, ReadOnly};
use crate::disk::mount::guard::MountGuard;
use crate::init::{InitResult, check_time_is_synchronized};
use crate::install::PKG_ARCHIVE_DIR;
use crate::lxc::LxcManager;
@@ -46,14 +41,12 @@ use crate::rpc_continuations::{Guid, OpenAuthedContinuations, RpcContinuations};
use crate::service::ServiceMap;
use crate::service::action::update_tasks;
use crate::service::effects::callbacks::ServiceCallbacks;
use crate::service::effects::subcontainer::NVIDIA_OVERLAY_PATH;
use crate::shutdown::Shutdown;
use crate::util::Invoke;
use crate::util::future::NonDetachingJoinHandle;
use crate::util::io::{TmpDir, delete_file};
use crate::util::io::delete_file;
use crate::util::lshw::LshwDevice;
use crate::util::sync::{SyncMutex, SyncRwLock, Watch};
use crate::{ActionId, DATA_DIR, PLATFORM, PackageId};
use crate::{ActionId, DATA_DIR, PackageId};
pub struct RpcContextSeed {
is_closed: AtomicBool,
@@ -174,124 +167,6 @@ impl RpcContext {
init_net_ctrl.complete();
tracing::info!("Initialized Net Controller");
if PLATFORM.ends_with("-nonfree") {
if let Err(e) = Command::new("nvidia-smi")
.invoke(ErrorKind::ParseSysInfo)
.await
{
tracing::warn!("nvidia-smi: {e}");
tracing::info!("The above warning can be ignored if no NVIDIA card is present");
} else {
async {
let version: InternedString = String::from_utf8(
Command::new("modinfo")
.arg("-F")
.arg("version")
.arg("nvidia")
.invoke(ErrorKind::ParseSysInfo)
.await?,
)?
.trim()
.into();
let nvidia_dir =
Path::new("/media/startos/data/package-data/nvidia").join(&*version);
// Generate single squashfs with both debian and generic overlays
let sqfs = nvidia_dir.join("container-overlay.squashfs");
if tokio::fs::metadata(&sqfs).await.is_err() {
let tmp = TmpDir::new().await?;
// Generate debian overlay (libs in /usr/lib/aarch64-linux-gnu/)
let debian_dir = tmp.join("debian");
tokio::fs::create_dir_all(&debian_dir).await?;
// Create /etc/debian_version to trigger debian path detection
tokio::fs::create_dir_all(debian_dir.join("etc")).await?;
tokio::fs::write(debian_dir.join("etc/debian_version"), "").await?;
let procfs = MountGuard::mount(
&Bind::new("/proc"),
debian_dir.join("proc"),
ReadOnly,
)
.await?;
Command::new("nvidia-container-cli")
.arg("configure")
.arg("--no-devbind")
.arg("--no-cgroups")
.arg("--utility")
.arg("--compute")
.arg("--graphics")
.arg("--video")
.arg(&debian_dir)
.invoke(ErrorKind::Unknown)
.await?;
procfs.unmount(true).await?;
// Run ldconfig to create proper symlinks for all NVIDIA libraries
Command::new("ldconfig")
.arg("-r")
.arg(&debian_dir)
.invoke(ErrorKind::Unknown)
.await?;
// Remove /etc/debian_version - it was only needed for nvidia-container-cli detection
tokio::fs::remove_file(debian_dir.join("etc/debian_version")).await?;
// Generate generic overlay (libs in /usr/lib64/)
let generic_dir = tmp.join("generic");
tokio::fs::create_dir_all(&generic_dir).await?;
// No /etc/debian_version - will use generic /usr/lib64 paths
let procfs = MountGuard::mount(
&Bind::new("/proc"),
generic_dir.join("proc"),
ReadOnly,
)
.await?;
Command::new("nvidia-container-cli")
.arg("configure")
.arg("--no-devbind")
.arg("--no-cgroups")
.arg("--utility")
.arg("--compute")
.arg("--graphics")
.arg("--video")
.arg(&generic_dir)
.invoke(ErrorKind::Unknown)
.await?;
procfs.unmount(true).await?;
// Run ldconfig to create proper symlinks for all NVIDIA libraries
Command::new("ldconfig")
.arg("-r")
.arg(&generic_dir)
.invoke(ErrorKind::Unknown)
.await?;
// Create squashfs with UID/GID mapping (avoids chown on readonly mounts)
if let Some(p) = sqfs.parent() {
tokio::fs::create_dir_all(p)
.await
.with_ctx(|_| (ErrorKind::Filesystem, format!("mkdir -p {p:?}")))?;
}
Command::new("mksquashfs")
.arg(&*tmp)
.arg(&sqfs)
.arg("-force-uid")
.arg("100000")
.arg("-force-gid")
.arg("100000")
.invoke(ErrorKind::Filesystem)
.await?;
tmp.unmount_and_delete().await?;
}
BlockDev::new(&sqfs)
.mount(NVIDIA_OVERLAY_PATH, ReadOnly)
.await?;
Ok::<_, Error>(())
}
.await
.log_err();
}
}
let services = ServiceMap::default();
let metrics_cache = Watch::<Option<crate::system::Metrics>>::new(None);
let socks_proxy_url = format!("socks5h://{socks_proxy}");
@@ -585,14 +460,8 @@ impl RpcContext {
where
Self: CallRemote<RemoteContext>,
{
<Self as CallRemote<RemoteContext, Empty>>::call_remote(
&self,
method,
OrdMap::new(),
params,
Empty {},
)
.await
<Self as CallRemote<RemoteContext, Empty>>::call_remote(&self, method, params, Empty {})
.await
}
pub async fn call_remote_with<RemoteContext, T>(
&self,
@@ -603,14 +472,7 @@ impl RpcContext {
where
Self: CallRemote<RemoteContext, T>,
{
<Self as CallRemote<RemoteContext, T>>::call_remote(
&self,
method,
OrdMap::new(),
params,
extra,
)
.await
<Self as CallRemote<RemoteContext, T>>::call_remote(&self, method, params, extra).await
}
}
impl AsRef<Client> for RpcContext {

View File

@@ -56,7 +56,8 @@ pub async fn restart(ctx: RpcContext, ControlParams { id }: ControlParams) -> Re
.as_idx_mut(&id)
.or_not_found(&id)?
.as_status_info_mut()
.restart()
.as_desired_mut()
.map_mutate(|s| Ok(s.restart()))
})
.await
.result?;

View File

@@ -94,23 +94,7 @@ impl Public {
..Default::default()
},
gateways: OrdMap::new(),
acme: {
let mut acme: BTreeMap<AcmeProvider, AcmeSettings> = Default::default();
acme.insert(
"letsencrypt".parse()?,
AcmeSettings {
contact: Vec::new(),
},
);
#[cfg(feature = "dev")]
acme.insert(
"letsencrypt-staging".parse()?,
AcmeSettings {
contact: Vec::new(),
},
);
acme
},
acme: BTreeMap::new(),
dns: Default::default(),
},
status_info: ServerStatus {

View File

@@ -416,51 +416,6 @@ impl<T: Map> Model<T> {
}
}
impl<T: Map> Model<T>
where
T::Key: FromStr,
Error: From<<T::Key as FromStr>::Err>,
{
/// Retains only the elements specified by the predicate.
/// The predicate can mutate the values and returns whether to keep each entry.
pub fn retain<F>(&mut self, mut f: F) -> Result<(), Error>
where
F: FnMut(&T::Key, &mut Model<T::Value>) -> Result<bool, Error>,
{
let mut to_remove = Vec::new();
match &mut self.value {
Value::Object(o) => {
for (k, v) in o.iter_mut() {
let key = T::Key::from_str(&**k)?;
if !f(&key, patch_db::ModelExt::value_as_mut(v))? {
to_remove.push(k.clone());
}
}
}
v => {
use serde::de::Error;
return Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!(
"expected object found {v}"
)),
kind: patch_db::value::ErrorKind::Deserialization,
}
.into());
}
}
// Remove entries that didn't pass the filter
if let Value::Object(o) = &mut self.value {
for k in to_remove {
o.remove(&k);
}
}
Ok(())
}
}
#[repr(transparent)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct JsonKey<T>(pub T);

View File

@@ -4,7 +4,6 @@ use std::path::Path;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use itertools::Itertools;
use sha2::Sha256;
use crate::disk::mount::filesystem::{FileSystem, MountType, ReadWrite};
@@ -13,13 +12,12 @@ use crate::prelude::*;
use crate::util::io::TmpDir;
pub struct OverlayFs<P0: AsRef<Path>, P1: AsRef<Path>, P2: AsRef<Path>> {
lower: Vec<P0>,
lower: P0,
upper: P1,
work: P2,
}
impl<P0: AsRef<Path>, P1: AsRef<Path>, P2: AsRef<Path>> OverlayFs<P0, P1, P2> {
/// layers are top to bottom
pub fn new(lower: Vec<P0>, upper: P1, work: P2) -> Self {
pub fn new(lower: P0, upper: P1, work: P2) -> Self {
Self { lower, upper, work }
}
}
@@ -34,10 +32,8 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
}
fn mount_options(&self) -> impl IntoIterator<Item = impl Display> {
[
Box::new(lazy_format!(
"lowerdir={}",
self.lower.iter().map(|p| p.as_ref().display()).join(":")
)) as Box<dyn Display>,
Box::new(lazy_format!("lowerdir={}", self.lower.as_ref().display()))
as Box<dyn Display>,
Box::new(lazy_format!("upperdir={}", self.upper.as_ref().display())),
Box::new(lazy_format!("workdir={}", self.work.as_ref().display())),
]
@@ -55,21 +51,18 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
tokio::fs::create_dir_all(self.work.as_ref()).await?;
let mut sha = Sha256::new();
sha.update("OverlayFs");
for lower in &self.lower {
sha.update(
tokio::fs::canonicalize(lower.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
lower.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
);
sha.update(b"\0");
}
sha.update(
tokio::fs::canonicalize(self.lower.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
self.lower.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
);
sha.update(
tokio::fs::canonicalize(self.upper.as_ref())
.await
@@ -82,7 +75,6 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
.as_os_str()
.as_bytes(),
);
sha.update(b"\0");
sha.update(
tokio::fs::canonicalize(self.work.as_ref())
.await
@@ -95,7 +87,6 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
.as_os_str()
.as_bytes(),
);
sha.update(b"\0");
Ok(sha.finalize())
}
}
@@ -107,20 +98,11 @@ pub struct OverlayGuard<G: GenericMountGuard> {
inner_guard: MountGuard,
}
impl<G: GenericMountGuard> OverlayGuard<G> {
pub async fn mount_layers<P: AsRef<Path>>(
pre: &[P],
guard: G,
post: &[P],
mountpoint: impl AsRef<Path>,
) -> Result<Self, Error> {
pub async fn mount(lower: G, mountpoint: impl AsRef<Path>) -> Result<Self, Error> {
let upper = TmpDir::new().await?;
let inner_guard = MountGuard::mount(
&OverlayFs::new(
std::iter::empty()
.chain(pre.into_iter().map(|p| p.as_ref()))
.chain([guard.path()])
.chain(post.into_iter().map(|p| p.as_ref()))
.collect(),
lower.path(),
upper.as_ref().join("upper"),
upper.as_ref().join("work"),
),
@@ -129,14 +111,11 @@ impl<G: GenericMountGuard> OverlayGuard<G> {
)
.await?;
Ok(Self {
lower: Some(guard),
lower: Some(lower),
upper: Some(upper),
inner_guard,
})
}
pub async fn mount(lower: G, mountpoint: impl AsRef<Path>) -> Result<Self, Error> {
Self::mount_layers::<&Path>(&[], lower, &[], mountpoint).await
}
pub async fn unmount(mut self, delete_mountpoint: bool) -> Result<(), Error> {
self.inner_guard.take().unmount(delete_mountpoint).await?;
if let Some(lower) = self.lower.take() {

View File

@@ -3,7 +3,6 @@ use std::path::Path;
use tracing::instrument;
use crate::Error;
use crate::prelude::*;
use crate::util::Invoke;
pub async fn is_mountpoint(path: impl AsRef<Path>) -> Result<bool, Error> {
@@ -57,42 +56,3 @@ pub async fn unmount<P: AsRef<Path>>(mountpoint: P, lazy: bool) -> Result<(), Er
.await?;
Ok(())
}
/// Unmounts all mountpoints under (and including) the given path, in reverse
/// depth order so that nested mounts are unmounted before their parents.
#[instrument(skip_all)]
pub async fn unmount_all_under<P: AsRef<Path>>(path: P, lazy: bool) -> Result<(), Error> {
let path = path.as_ref();
let canonical_path = tokio::fs::canonicalize(path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("canonicalize {path:?}")))?;
let mounts_content = tokio::fs::read_to_string("/proc/mounts")
.await
.with_ctx(|_| (ErrorKind::Filesystem, "read /proc/mounts"))?;
// Collect all mountpoints under our path
let mut mountpoints: Vec<&str> = mounts_content
.lines()
.filter_map(|line| {
let mountpoint = line.split_whitespace().nth(1)?;
// Check if this mountpoint is under our target path
let mp_path = Path::new(mountpoint);
if mp_path.starts_with(&canonical_path) {
Some(mountpoint)
} else {
None
}
})
.collect();
// Sort by path length descending so we unmount deepest first
mountpoints.sort_by(|a, b| b.len().cmp(&a.len()));
for mountpoint in mountpoints {
tracing::debug!("Unmounting nested mountpoint: {}", mountpoint);
unmount(mountpoint, lazy).await?;
}
Ok(())
}

View File

@@ -4,6 +4,7 @@ use std::sync::Arc;
use std::time::{Duration, SystemTime};
use axum::extract::ws;
use const_format::formatcp;
use futures::{StreamExt, TryStreamExt};
use itertools::Itertools;
use rpc_toolkit::{Context, Empty, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};

View File

@@ -142,16 +142,16 @@ pub async fn install(
.await?,
)?;
let (_, asset) = package
let asset = &package
.best
.get(&version)
.and_then(|i| i.s9pks.first())
.ok_or_else(|| {
Error::new(
eyre!("{id}@{version} not found on {registry}"),
ErrorKind::NotFound,
)
})?;
})?
.s9pk;
asset.validate(SIG_CONTEXT, asset.all_signers())?;

View File

@@ -5,13 +5,11 @@ use std::sync::{Arc, Weak};
use std::time::Duration;
use clap::builder::ValueParserFactory;
use futures::future::BoxFuture;
use futures::{FutureExt, StreamExt};
use futures::StreamExt;
use imbl_value::InternedString;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{RpcRequest, RpcResponse};
use serde::{Deserialize, Serialize};
use tokio::fs::ReadDir;
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::Command;
use tokio::sync::Mutex;
@@ -29,7 +27,7 @@ use crate::disk::mount::util::unmount;
use crate::prelude::*;
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::service::ServiceStats;
use crate::util::io::{open_file, write_file_owned_atomic};
use crate::util::io::open_file;
use crate::util::rpc_client::UnixRpcClient;
use crate::util::{FromStrParser, Invoke, new_guid};
use crate::{InvalidId, PackageId};
@@ -39,7 +37,6 @@ const RPC_DIR: &str = "media/startos/rpc"; // must not be absolute path
pub const CONTAINER_RPC_SERVER_SOCKET: &str = "service.sock"; // must not be absolute path
pub const HOST_RPC_SERVER_SOCKET: &str = "host.sock"; // must not be absolute path
const CONTAINER_DHCP_TIMEOUT: Duration = Duration::from_secs(30);
const HARDWARE_ACCELERATION_PATHS: &[&str] = &["/dev/dri", "/dev/nvidia*", "/dev/kfd"];
#[derive(
Clone, Debug, Serialize, Deserialize, Default, PartialEq, Eq, PartialOrd, Ord, Hash, TS,
@@ -177,8 +174,12 @@ impl LxcContainer {
let machine_id = hex::encode(rand::random::<[u8; 16]>());
let container_dir = Path::new(LXC_CONTAINER_DIR).join(&*guid);
tokio::fs::create_dir_all(&container_dir).await?;
let config_str = format!(include_str!("./config.template"), guid = &*guid);
tokio::fs::write(container_dir.join("config"), config_str).await?;
tokio::fs::write(
container_dir.join("config"),
format!(include_str!("./config.template"), guid = &*guid),
)
.await?;
// TODO: append config
let rootfs_dir = container_dir.join("rootfs");
let rootfs = OverlayGuard::mount(
TmpMountGuard::mount(
@@ -196,25 +197,8 @@ impl LxcContainer {
&rootfs_dir,
)
.await?;
Command::new("chown")
.arg("100000:100000")
.arg(&rootfs_dir)
.invoke(ErrorKind::Filesystem)
.await?;
write_file_owned_atomic(
rootfs_dir.join("etc/machine-id"),
format!("{machine_id}\n"),
100000,
100000,
)
.await?;
write_file_owned_atomic(
rootfs_dir.join("etc/hostname"),
format!("{guid}\n"),
100000,
100000,
)
.await?;
tokio::fs::write(rootfs_dir.join("etc/machine-id"), format!("{machine_id}\n")).await?;
tokio::fs::write(rootfs_dir.join("etc/hostname"), format!("{guid}\n")).await?;
Command::new("sed")
.arg("-i")
.arg(format!("s/LXC_NAME/{guid}/g"))
@@ -264,13 +248,9 @@ impl LxcContainer {
.arg("-d")
.arg("--name")
.arg(&*guid)
.arg("-o")
.arg(format!("/run/startos/LXC_{guid}.log"))
.arg("-l")
.arg("DEBUG")
.invoke(ErrorKind::Lxc)
.await?;
let res = Self {
Ok(Self {
manager: Arc::downgrade(manager),
rootfs,
guid: Arc::new(ContainerId::try_from(&*guid)?),
@@ -278,84 +258,7 @@ impl LxcContainer {
config,
exited: false,
log_mount,
};
if res.config.hardware_acceleration {
res.handle_devices(
tokio::fs::read_dir("/dev")
.await
.with_ctx(|_| (ErrorKind::Filesystem, "readdir /dev"))?,
HARDWARE_ACCELERATION_PATHS,
)
.await?;
}
Ok(res)
}
#[cfg(not(target_os = "linux"))]
async fn handle_devices(&self, _: ReadDir, _: &[&str]) -> Result<(), Error> {
Ok(())
}
#[cfg(target_os = "linux")]
fn handle_devices<'a>(
&'a self,
mut dir: ReadDir,
matches: &'a [&'a str],
) -> BoxFuture<'a, Result<(), Error>> {
use std::os::linux::fs::MetadataExt;
use std::os::unix::fs::FileTypeExt;
async move {
while let Some(ent) = dir.next_entry().await? {
let path = ent.path();
if let Some(matches) = if matches.is_empty() {
Some(Vec::new())
} else {
let mut new_matches = Vec::new();
for mut m in matches.iter().copied() {
let could_match = if let Some(prefix) = m.strip_suffix("*") {
m = prefix;
path.to_string_lossy().starts_with(m)
} else {
path.starts_with(m)
} || Path::new(m).starts_with(&path);
if could_match {
new_matches.push(m);
}
}
if new_matches.is_empty() {
None
} else {
Some(new_matches)
}
} {
let meta = ent.metadata().await?;
let ty = meta.file_type();
if ty.is_dir() {
self.handle_devices(
tokio::fs::read_dir(&path).await.with_ctx(|_| {
(ErrorKind::Filesystem, format!("readdir {path:?}"))
})?,
&matches,
)
.await?;
} else {
let ty = if ty.is_char_device() {
'c'
} else if ty.is_block_device() {
'b'
} else {
continue;
};
let rdev = meta.st_rdev();
let major = ((rdev >> 8) & 0xfff) as u32;
let minor = ((rdev & 0xff) | ((rdev >> 12) & 0xfff00)) as u32;
self.mknod(&path, ty, major, minor).await?;
}
}
}
Ok(())
}
.boxed()
})
}
pub fn rootfs_dir(&self) -> &Path {
@@ -426,7 +329,7 @@ impl LxcContainer {
.await?;
self.rpc_bind.take().unmount().await?;
if let Some(log_mount) = self.log_mount.take() {
log_mount.unmount(false).await?;
log_mount.unmount(true).await?;
}
self.rootfs.take().unmount(true).await?;
let rootfs_path = self.rootfs_dir();
@@ -448,10 +351,7 @@ impl LxcContainer {
.invoke(ErrorKind::Lxc)
.await?;
#[allow(unused_assignments)]
{
self.exited = true;
}
self.exited = true;
Ok(())
}
@@ -461,17 +361,6 @@ impl LxcContainer {
let sock_path = self.rpc_dir().join(CONTAINER_RPC_SERVER_SOCKET);
while tokio::fs::metadata(&sock_path).await.is_err() {
if timeout.map_or(false, |t| started.elapsed() > t) {
tracing::error!(
"{:?}",
Command::new("lxc-attach")
.arg(&**self.guid)
.arg("--")
.arg("systemctl")
.arg("status")
.arg("container-runtime")
.invoke(ErrorKind::Unknown)
.await
);
return Err(Error::new(
eyre!("timed out waiting for socket"),
ErrorKind::Timeout,
@@ -482,88 +371,6 @@ impl LxcContainer {
tracing::info!("Connected to socket in {:?}", started.elapsed());
Ok(UnixRpcClient::new(sock_path))
}
pub async fn mknod(&self, path: &Path, ty: char, major: u32, minor: u32) -> Result<(), Error> {
if let Ok(dev_rel) = path.strip_prefix("/dev") {
let parent = dev_rel.parent();
let media_dev = self.rootfs_dir().join("media/startos/dev");
let target_path = media_dev.join(dev_rel);
if tokio::fs::metadata(&target_path).await.is_ok() {
return Ok(());
}
if let Some(parent) = parent {
let p = media_dev.join(parent);
tokio::fs::create_dir_all(&p)
.await
.with_ctx(|_| (ErrorKind::Filesystem, format!("mkdir -p {p:?}")))?;
for p in parent.ancestors() {
Command::new("chown")
.arg("100000:100000")
.arg(media_dev.join(p))
.invoke(ErrorKind::Filesystem)
.await?;
}
}
Command::new("mknod")
.arg(&target_path)
.arg(&*InternedString::from_display(&ty))
.arg(&*InternedString::from_display(&major))
.arg(&*InternedString::from_display(&minor))
.invoke(ErrorKind::Filesystem)
.await?;
Command::new("chown")
.arg("100000:100000")
.arg(&target_path)
.invoke(ErrorKind::Filesystem)
.await?;
if let Some(parent) = parent {
Command::new("lxc-attach")
.arg(&**self.guid)
.arg("--")
.arg("mkdir")
.arg("-p")
.arg(Path::new("/dev").join(parent))
.invoke(ErrorKind::Lxc)
.await?;
}
Command::new("lxc-attach")
.arg(&**self.guid)
.arg("--")
.arg("touch")
.arg(&path)
.invoke(ErrorKind::Lxc)
.await?;
Command::new("lxc-attach")
.arg(&**self.guid)
.arg("--")
.arg("mount")
.arg("--bind")
.arg(Path::new("/media/startos/dev").join(dev_rel))
.arg(&path)
.invoke(ErrorKind::Lxc)
.await?;
} else {
let target_path = self
.rootfs_dir()
.join(path.strip_prefix("/").unwrap_or(&path));
if tokio::fs::metadata(&target_path).await.is_ok() {
return Ok(());
}
Command::new("mknod")
.arg(&target_path)
.arg(&*InternedString::from_display(&ty))
.arg(&*InternedString::from_display(&major))
.arg(&*InternedString::from_display(&minor))
.invoke(ErrorKind::Filesystem)
.await?;
Command::new("chown")
.arg("100000:100000")
.arg(&target_path)
.invoke(ErrorKind::Filesystem)
.await?;
}
Ok(())
}
}
impl Drop for LxcContainer {
fn drop(&mut self) {
@@ -607,10 +414,7 @@ impl Drop for LxcContainer {
}
#[derive(Default, Serialize)]
pub struct LxcConfig {
pub hardware_acceleration: bool,
}
pub struct LxcConfig {}
pub async fn connect(ctx: &RpcContext, container: &LxcContainer) -> Result<Guid, Error> {
use axum::extract::ws::Message;

View File

@@ -15,8 +15,5 @@ fn main() {
}) {
PREFER_DOCKER.set(true).ok();
}
MultiExecutable::default()
.enable_start_cli()
.set_default("start-cli")
.execute()
MultiExecutable::default().enable_start_cli().execute()
}

View File

@@ -3,6 +3,5 @@ use startos::bins::MultiExecutable;
fn main() {
MultiExecutable::default()
.enable_start_container()
.set_default("start-container")
.execute()
}

View File

@@ -703,22 +703,22 @@ async fn watch_ip(
.into_iter()
.map(IpNet::try_from)
.try_collect()?;
// let tables = ip4_proxy.route_data().await?.into_iter().filter_map(|d|d.table).collect::<Vec<_>>();
// if !tables.is_empty() {
// let rules = String::from_utf8(Command::new("ip").arg("rule").arg("list").invoke(ErrorKind::Network).await?)?;
// for table in tables {
// for subnet in subnets.iter().filter(|s| s.addr().is_ipv4()) {
// let subnet_string = subnet.trunc().to_string();
// let rule = ["from", &subnet_string, "lookup", &table.to_string()];
// if !rules.contains(&rule.join(" ")) {
// if rules.contains(&rule[..2].join(" ")) {
// Command::new("ip").arg("rule").arg("del").args(&rule[..2]).invoke(ErrorKind::Network).await?;
// }
// Command::new("ip").arg("rule").arg("add").args(rule).invoke(ErrorKind::Network).await?;
// }
// }
// }
// }
let tables = ip4_proxy.route_data().await?.into_iter().filter_map(|d|d.table).collect::<Vec<_>>();
if !tables.is_empty() {
let rules = String::from_utf8(Command::new("ip").arg("rule").arg("list").invoke(ErrorKind::Network).await?)?;
for table in tables {
for subnet in subnets.iter().filter(|s| s.addr().is_ipv4()) {
let subnet_string = subnet.trunc().to_string();
let rule = ["from", &subnet_string, "lookup", &table.to_string()];
if !rules.contains(&rule.join(" ")) {
if rules.contains(&rule[..2].join(" ")) {
Command::new("ip").arg("rule").arg("del").args(&rule[..2]).invoke(ErrorKind::Network).await?;
}
Command::new("ip").arg("rule").arg("add").args(rule).invoke(ErrorKind::Network).await?;
}
}
}
}
let wan_ip = if !subnets.is_empty()
&& !matches!(
device_type,

View File

@@ -15,7 +15,7 @@ use crate::util::future::NonDetachingJoinHandle;
pub const DEFAULT_SOCKS_LISTEN: SocketAddr = SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(HOST_IP[0], HOST_IP[1], HOST_IP[2], HOST_IP[3]),
1080,
9050,
));
pub struct SocksController {

View File

@@ -472,7 +472,7 @@ fn cert_send(cert: &X509, hostname: &Hostname) -> Result<Response, Error> {
)
.to_lowercase(),
)
.header(http::header::CONTENT_TYPE, "application/octet-stream")
.header(http::header::CONTENT_TYPE, "application/x-x509-ca-cert")
.header(http::header::CONTENT_LENGTH, pem.len())
.header(
http::header::CONTENT_DISPOSITION,

View File

@@ -151,112 +151,103 @@ where
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(Self::Metadata, AcceptStream), Error>> {
self.in_progress.mutate(|in_progress| {
// First, check if any in-progress handshakes have completed
if !in_progress.is_empty() {
if let Poll::Ready(Some((handler, res))) = in_progress.poll_next_unpin(cx) {
if let Some(res) = res.transpose() {
self.tls_handler = handler;
return Poll::Ready(res);
loop {
if !in_progress.is_empty() {
if let Poll::Ready(Some((handler, res))) = in_progress.poll_next_unpin(cx) {
if let Some(res) = res.transpose() {
self.tls_handler = handler;
return Poll::Ready(res);
}
continue;
}
// Connection was rejected (preprocess returned None).
// Yield to the runtime to avoid busy-looping, but wake
// immediately to continue processing.
cx.waker().wake_by_ref();
return Poll::Pending;
}
}
// Try to accept a new connection
let (metadata, stream) = ready!(self.accept.poll_accept(cx)?);
let mut tls_handler = self.tls_handler.clone();
let mut fut = async move {
let res = async {
let mut acceptor = LazyConfigAcceptor::new(
Acceptor::default(),
BackTrackingIO::new(stream),
);
let mut mid: tokio_rustls::StartHandshake<BackTrackingIO<AcceptStream>> =
match (&mut acceptor).await {
Ok(a) => a,
Err(e) => {
let mut stream =
acceptor.take_io().or_not_found("acceptor io")?;
let (_, buf) = stream.rewind();
if std::str::from_utf8(buf)
.ok()
.and_then(|buf| {
buf.lines()
.map(|l| l.trim())
.filter(|l| !l.is_empty())
.next()
})
.map_or(false, |buf| {
regex::Regex::new("[A-Z]+ (.+) HTTP/1")
.unwrap()
.is_match(buf)
})
{
handle_http_on_https(stream).await.log_err();
let (metadata, stream) = ready!(self.accept.poll_accept(cx)?);
let mut tls_handler = self.tls_handler.clone();
let mut fut = async move {
let res = async {
let mut acceptor = LazyConfigAcceptor::new(
Acceptor::default(),
BackTrackingIO::new(stream),
);
let mut mid: tokio_rustls::StartHandshake<BackTrackingIO<AcceptStream>> =
match (&mut acceptor).await {
Ok(a) => a,
Err(e) => {
let mut stream =
acceptor.take_io().or_not_found("acceptor io")?;
let (_, buf) = stream.rewind();
if std::str::from_utf8(buf)
.ok()
.and_then(|buf| {
buf.lines()
.map(|l| l.trim())
.filter(|l| !l.is_empty())
.next()
})
.map_or(false, |buf| {
regex::Regex::new("[A-Z]+ (.+) HTTP/1")
.unwrap()
.is_match(buf)
})
{
handle_http_on_https(stream).await.log_err();
return Ok(None);
} else {
return Err(e).with_kind(ErrorKind::Network);
return Ok(None);
} else {
return Err(e).with_kind(ErrorKind::Network);
}
}
}
};
let hello = mid.client_hello();
if let Some(cfg) = tls_handler.get_config(&hello, &metadata).await {
let buffered = mid.io.stop_buffering();
mid.io
.write_all(&buffered)
.await
.with_kind(ErrorKind::Network)?;
return Ok(match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => {
let s = stream.get_ref().1;
Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: s.server_name().map(InternedString::intern),
alpn: s
.alpn_protocol()
.map(|a| MaybeUtf8String(a.to_vec())),
};
let hello = mid.client_hello();
if let Some(cfg) = tls_handler.get_config(&hello, &metadata).await {
let buffered = mid.io.stop_buffering();
mid.io
.write_all(&buffered)
.await
.with_kind(ErrorKind::Network)?;
return Ok(match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => {
let s = stream.get_ref().1;
Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: s.server_name().map(InternedString::intern),
alpn: s
.alpn_protocol()
.map(|a| MaybeUtf8String(a.to_vec())),
},
},
},
Box::pin(stream) as AcceptStream,
))
}
Err(e) => {
tracing::trace!("Error completing TLS handshake: {e}");
tracing::trace!("{e:?}");
None
}
});
}
Box::pin(stream) as AcceptStream,
))
}
Err(e) => {
tracing::trace!("Error completing TLS handshake: {e}");
tracing::trace!("{e:?}");
None
}
});
}
Ok(None)
}
.await;
(tls_handler, res)
}
.boxed();
match fut.poll_unpin(cx) {
Poll::Pending => {
in_progress.push(fut);
Poll::Pending
}
Poll::Ready((handler, res)) => {
if let Some(res) = res.transpose() {
self.tls_handler = handler;
return Poll::Ready(res);
Ok(None)
}
// Connection was rejected (preprocess returned None).
// Yield to the runtime to avoid busy-looping, but wake
// immediately to continue processing.
cx.waker().wake_by_ref();
Poll::Pending
.await;
(tls_handler, res)
}
.boxed();
match fut.poll_unpin(cx) {
Poll::Pending => {
in_progress.push(fut);
return Poll::Pending;
}
Poll::Ready((handler, res)) => {
if let Some(res) = res.transpose() {
self.tls_handler = handler;
return Poll::Ready(res);
}
}
};
}
})
}

View File

@@ -43,6 +43,7 @@ const STARTING_HEALTH_TIMEOUT: u64 = 120; // 2min
const TOR_CONTROL: SocketAddr =
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 1, 1), 9051));
const TOR_SOCKS: SocketAddr = SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 1, 1), 9050));
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub struct OnionAddress(OnionAddressV3);
@@ -401,15 +402,10 @@ fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), C
#[derive(Clone)]
pub struct TorController(Arc<TorControl>);
impl TorController {
const TOR_SOCKS: &[SocketAddr] = &[
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(127, 0, 0, 1), 9050)),
SocketAddr::V4(SocketAddrV4::new(Ipv4Addr::new(10, 0, 3, 1), 9050)),
];
pub fn new() -> Result<Self, Error> {
Ok(TorController(Arc::new(TorControl::new(
TOR_CONTROL,
Self::TOR_SOCKS,
TOR_SOCKS,
))))
}
@@ -512,7 +508,7 @@ impl TorController {
}
Ok(Box::new(tcp_stream))
} else {
let mut stream = TcpStream::connect(Self::TOR_SOCKS[0])
let mut stream = TcpStream::connect(TOR_SOCKS)
.await
.with_kind(ErrorKind::Tor)?;
if let Err(e) = socket2::SockRef::from(&stream).set_keepalive(true) {
@@ -599,7 +595,7 @@ enum TorCommand {
#[instrument(skip_all)]
async fn torctl(
tor_control: SocketAddr,
tor_socks: &[SocketAddr],
tor_socks: SocketAddr,
recv: &mut mpsc::UnboundedReceiver<TorCommand>,
services: &mut Watch<
BTreeMap<
@@ -645,21 +641,10 @@ async fn torctl(
tokio::fs::remove_dir_all("/var/lib/tor").await?;
wipe_state.store(false, std::sync::atomic::Ordering::SeqCst);
}
write_file_atomic("/etc/tor/torrc", {
use std::fmt::Write;
let mut conf = String::new();
for tor_socks in tor_socks {
writeln!(&mut conf, "SocksPort {tor_socks}").unwrap();
}
writeln!(
&mut conf,
"ControlPort {tor_control}\nCookieAuthentication 1"
)
.unwrap();
conf
})
write_file_atomic(
"/etc/tor/torrc",
format!("SocksPort {TOR_SOCKS}\nControlPort {TOR_CONTROL}\nCookieAuthentication 1\n"),
)
.await?;
tokio::fs::create_dir_all("/var/lib/tor").await?;
Command::new("chown")
@@ -991,10 +976,7 @@ struct TorControl {
>,
}
impl TorControl {
pub fn new(
tor_control: SocketAddr,
tor_socks: impl AsRef<[SocketAddr]> + Send + 'static,
) -> Self {
pub fn new(tor_control: SocketAddr, tor_socks: SocketAddr) -> Self {
let (send, mut recv) = mpsc::unbounded_channel();
let services = Watch::new(BTreeMap::new());
let mut thread_services = services.clone();
@@ -1005,7 +987,7 @@ impl TorControl {
loop {
if let Err(e) = torctl(
tor_control,
tor_socks.as_ref(),
tor_socks,
&mut recv,
&mut thread_services,
&wipe_state,

View File

@@ -241,7 +241,7 @@ pub async fn mark_seen_before(
ctx.db
.mutate(|db| {
let n = db.as_private_mut().as_notifications_mut();
for id in n.keys()?.range(..=before) {
for id in n.keys()?.range(..before) {
n.as_idx_mut(&id)
.or_not_found(lazy_format!("Notification #{id}"))?
.as_seen_mut()

View File

@@ -280,11 +280,8 @@ pub async fn execute<C: Context>(
let lower = TmpMountGuard::mount(&BlockDev::new(&image_path), MountType::ReadOnly).await?;
let work = config_path.join("work");
let upper = config_path.join("overlay");
let overlay = TmpMountGuard::mount(
&OverlayFs::new(vec![lower.path()], &upper, &work),
ReadWrite,
)
.await?;
let overlay =
TmpMountGuard::mount(&OverlayFs::new(&lower.path(), &upper, &work), ReadWrite).await?;
let boot = MountGuard::mount(
&BlockDev::new(&part_info.boot),

View File

@@ -3,7 +3,7 @@ use std::path::Path;
use std::sync::Arc;
use chrono::{DateTime, Utc};
use reqwest::{Client, Response};
use reqwest::Client;
use serde::{Deserialize, Serialize};
use tokio::io::AsyncWrite;
use ts_rs::TS;
@@ -21,14 +21,14 @@ use crate::sign::{AnySignature, AnyVerifyingKey};
use crate::upload::UploadingFile;
use crate::util::future::NonDetachingJoinHandle;
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct RegistryAsset<Commitment> {
#[ts(type = "string")]
pub published_at: DateTime<Utc>,
#[ts(type = "string[]")]
pub urls: Vec<Url>,
#[ts(type = "string")]
pub url: Url,
pub commitment: Commitment,
pub signatures: HashMap<AnyVerifyingKey, AnySignature>,
}
@@ -42,48 +42,6 @@ impl<Commitment> RegistryAsset<Commitment> {
.collect(),
)
}
pub async fn load_http_source(&self, client: Client) -> Result<HttpSource, Error> {
for url in &self.urls {
if let Ok(source) = HttpSource::new(client.clone(), url.clone()).await {
return Ok(source);
}
}
Err(Error::new(
eyre!("Failed to load any http url"),
ErrorKind::Network,
))
}
pub async fn load_buffered_http_source(
&self,
client: Client,
progress: PhaseProgressTrackerHandle,
) -> Result<BufferedHttpSource, Error> {
for url in &self.urls {
if let Ok(response) = client.get(url.clone()).send().await {
return BufferedHttpSource::from_response(response, progress).await;
}
}
Err(Error::new(
eyre!("Failed to load any http url"),
ErrorKind::Network,
))
}
pub async fn load_buffered_http_source_with_path(
&self,
path: impl AsRef<Path>,
client: Client,
progress: PhaseProgressTrackerHandle,
) -> Result<BufferedHttpSource, Error> {
for url in &self.urls {
if let Ok(response) = client.get(url.clone()).send().await {
return BufferedHttpSource::from_response_with_path(path, response, progress).await;
}
}
Err(Error::new(
eyre!("Failed to load any http url"),
ErrorKind::Network,
))
}
}
impl<Commitment: Digestable> RegistryAsset<Commitment> {
pub fn validate(&self, context: &str, mut accept: AcceptSigners) -> Result<&Commitment, Error> {
@@ -101,7 +59,7 @@ impl<C: for<'a> Commitment<&'a HttpSource>> RegistryAsset<C> {
dst: &mut (impl AsyncWrite + Unpin + Send + ?Sized),
) -> Result<(), Error> {
self.commitment
.copy_to(&self.load_http_source(client).await?, dst)
.copy_to(&HttpSource::new(client, self.url.clone()).await?, dst)
.await
}
}
@@ -111,7 +69,7 @@ impl RegistryAsset<MerkleArchiveCommitment> {
client: Client,
) -> Result<S9pk<Section<Arc<HttpSource>>>, Error> {
S9pk::deserialize(
&Arc::new(self.load_http_source(client).await?),
&Arc::new(HttpSource::new(client, self.url.clone()).await?),
Some(&self.commitment),
)
.await
@@ -122,7 +80,7 @@ impl RegistryAsset<MerkleArchiveCommitment> {
progress: PhaseProgressTrackerHandle,
) -> Result<S9pk<Section<Arc<BufferedHttpSource>>>, Error> {
S9pk::deserialize(
&Arc::new(self.load_buffered_http_source(client, progress).await?),
&Arc::new(BufferedHttpSource::new(client, self.url.clone(), progress).await?),
Some(&self.commitment),
)
.await
@@ -140,8 +98,7 @@ impl RegistryAsset<MerkleArchiveCommitment> {
Error,
> {
let source = Arc::new(
self.load_buffered_http_source_with_path(path, client, progress)
.await?,
BufferedHttpSource::with_path(path, client, self.url.clone(), progress).await?,
);
Ok((
S9pk::deserialize(&source, Some(&self.commitment)).await?,
@@ -155,30 +112,26 @@ pub struct BufferedHttpSource {
file: UploadingFile,
}
impl BufferedHttpSource {
pub async fn new(
pub async fn with_path(
path: impl AsRef<Path>,
client: Client,
url: Url,
progress: PhaseProgressTrackerHandle,
) -> Result<Self, Error> {
let (mut handle, file) = UploadingFile::with_path(path, progress).await?;
let response = client.get(url).send().await?;
Self::from_response(response, progress).await
}
pub async fn from_response(
response: Response,
progress: PhaseProgressTrackerHandle,
) -> Result<Self, Error> {
let (mut handle, file) = UploadingFile::new(progress).await?;
Ok(Self {
_download: tokio::spawn(async move { handle.download(response).await }).into(),
file,
})
}
pub async fn from_response_with_path(
path: impl AsRef<Path>,
response: Response,
pub async fn new(
client: Client,
url: Url,
progress: PhaseProgressTrackerHandle,
) -> Result<Self, Error> {
let (mut handle, file) = UploadingFile::with_path(path, progress).await?;
let (mut handle, file) = UploadingFile::new(progress).await?;
let response = client.get(url).send().await?;
Ok(Self {
_download: tokio::spawn(async move { handle.download(response).await }).into(),
file,

View File

@@ -7,7 +7,6 @@ use chrono::Utc;
use clap::Parser;
use cookie::{Cookie, Expiration, SameSite};
use http::HeaderMap;
use imbl::OrdMap;
use imbl_value::InternedString;
use patch_db::PatchDb;
use patch_db::json_ptr::ROOT;
@@ -172,7 +171,6 @@ impl CallRemote<RegistryContext> for CliContext {
async fn call_remote(
&self,
mut method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
@@ -242,21 +240,14 @@ impl CallRemote<RegistryContext, RegistryUrlParams> for RpcContext {
async fn call_remote(
&self,
mut method: &str,
metadata: OrdMap<&'static str, Value>,
params: Value,
RegistryUrlParams { mut registry }: RegistryUrlParams,
) -> Result<Value, RpcError> {
let mut headers = HeaderMap::new();
let mut device_info = None;
if metadata
.get("get_device_info")
.and_then(|m| m.as_bool())
.unwrap_or(false)
{
let di = DeviceInfo::load(self).await?;
headers.insert(DEVICE_INFO_HEADER, di.to_header_value());
device_info = Some(di);
}
headers.insert(
DEVICE_INFO_HEADER,
DeviceInfo::load(self).await?.to_header_value(),
);
registry
.path_segments_mut()
@@ -267,21 +258,15 @@ impl CallRemote<RegistryContext, RegistryUrlParams> for RpcContext {
method = method.strip_prefix("registry.").unwrap_or(method);
let sig_context = registry.host_str().map(InternedString::from);
let mut res = crate::middleware::auth::signature::call_remote(
crate::middleware::auth::signature::call_remote(
self,
registry,
headers,
sig_context.as_deref(),
method,
params.clone(),
params,
)
.await?;
if let Some(device_info) = device_info {
device_info.filter_for_hardware(method, params, &mut res)?;
}
Ok(res)
.await
}
}

View File

@@ -1,4 +1,5 @@
use std::collections::BTreeMap;
use std::convert::identity;
use std::ops::Deref;
use axum::extract::Request;
@@ -6,8 +7,6 @@ use axum::response::Response;
use exver::{Version, VersionRange};
use http::HeaderValue;
use imbl_value::InternedString;
use patch_db::ModelExt;
use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::{Middleware, RpcRequest, RpcResponse};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
@@ -16,13 +15,8 @@ use url::Url;
use crate::context::RpcContext;
use crate::prelude::*;
use crate::registry::context::RegistryContext;
use crate::registry::os::index::OsVersionInfoMap;
use crate::registry::package::get::{
GetPackageParams, GetPackageResponse, GetPackageResponseFull, PackageDetailLevel,
};
use crate::registry::package::index::PackageVersionInfo;
use crate::util::VersionString;
use crate::util::lshw::LshwDevice;
use crate::util::lshw::{LshwDevice, LshwDisplay, LshwProcessor};
use crate::version::VersionT;
pub const DEVICE_INFO_HEADER: &str = "X-StartOS-Device-Info";
@@ -31,13 +25,13 @@ pub const DEVICE_INFO_HEADER: &str = "X-StartOS-Device-Info";
#[serde(rename_all = "camelCase")]
pub struct DeviceInfo {
pub os: OsInfo,
pub hardware: Option<HardwareInfo>,
pub hardware: HardwareInfo,
}
impl DeviceInfo {
pub async fn load(ctx: &RpcContext) -> Result<Self, Error> {
Ok(Self {
os: OsInfo::from(ctx),
hardware: Some(HardwareInfo::load(ctx).await?),
hardware: HardwareInfo::load(ctx).await?,
})
}
}
@@ -47,13 +41,21 @@ impl DeviceInfo {
url.query_pairs_mut()
.append_pair("os.version", &self.os.version.to_string())
.append_pair("os.compat", &self.os.compat.to_string())
.append_pair("os.platform", &*self.os.platform);
.append_pair("os.platform", &*self.os.platform)
.append_pair("hardware.arch", &*self.hardware.arch)
.append_pair("hardware.ram", &self.hardware.ram.to_string());
for device in &self.hardware.devices {
url.query_pairs_mut().append_pair(
&format!("hardware.device.{}", device.class()),
device.product(),
);
}
HeaderValue::from_str(url.query().unwrap_or_default()).unwrap()
}
pub fn from_header_value(header: &HeaderValue) -> Result<Self, Error> {
let query: BTreeMap<_, _> = form_urlencoded::parse(header.as_bytes()).collect();
let has_hw_info = query.keys().any(|k| k.starts_with("hardware."));
Ok(Self {
os: OsInfo {
version: query
@@ -67,119 +69,34 @@ impl DeviceInfo {
.deref()
.into(),
},
hardware: has_hw_info
.then(|| {
Ok::<_, Error>(HardwareInfo {
arch: query
.get("hardware.arch")
.or_not_found("hardware.arch")?
.parse()?,
ram: query
.get("hardware.ram")
.or_not_found("hardware.ram")?
.parse()?,
devices: None,
})
})
.transpose()?,
})
}
pub fn filter_for_hardware(
&self,
method: &str,
params: Value,
res: &mut Value,
) -> Result<(), Error> {
match method {
"package.get" => {
let params: Model<GetPackageParams> = ModelExt::from_value(params);
let other = params.as_other_versions().de()?;
if params.as_id().transpose_ref().is_some() {
if other.unwrap_or_default() == PackageDetailLevel::Full {
self.filter_package_get_full(ModelExt::value_as_mut(res))?;
} else {
self.filter_package_get(ModelExt::value_as_mut(res))?;
}
} else {
for (_, v) in res.as_object_mut().into_iter().flat_map(|o| o.iter_mut()) {
if other.unwrap_or_default() == PackageDetailLevel::Full {
self.filter_package_get_full(ModelExt::value_as_mut(v))?;
} else {
self.filter_package_get(ModelExt::value_as_mut(v))?;
hardware: HardwareInfo {
arch: query
.get("hardware.arch")
.or_not_found("hardware.arch")?
.parse()?,
ram: query
.get("hardware.ram")
.or_not_found("hardware.ram")?
.parse()?,
devices: identity(query)
.split_off("hardware.device.")
.into_iter()
.filter_map(|(k, v)| match k.strip_prefix("hardware.device.") {
Some("processor") => Some(LshwDevice::Processor(LshwProcessor {
product: v.into_owned(),
})),
Some("display") => Some(LshwDevice::Display(LshwDisplay {
product: v.into_owned(),
})),
Some(class) => {
tracing::warn!("unknown device class: {class}");
None
}
}
}
Ok(())
}
"os.version.get" => self.filter_os_version(ModelExt::value_as_mut(res)),
_ => Ok(()),
}
}
fn filter_package_versions(
&self,
versions: &mut Model<BTreeMap<VersionString, PackageVersionInfo>>,
) -> Result<(), Error> {
let alpha_17: Version = "0.4.0-alpha.17".parse()?;
// Filter package versions using for_device
versions.retain(|_, info| info.for_device(self))?;
// Alpha.17 compatibility: add legacy fields
if self.os.version <= alpha_17 {
for (_, info) in versions.as_entries_mut()? {
let v = info.as_value_mut();
if let Some(mut tup) = v["s9pks"].get(0).cloned() {
v["s9pk"] = tup[1].take();
v["hardwareRequirements"] = tup[0].take();
v["s9pk"]["url"] = v["s9pk"]["urls"][0].clone();
}
}
}
Ok(())
}
fn filter_package_get(&self, res: &mut Model<GetPackageResponse>) -> Result<(), Error> {
self.filter_package_versions(res.as_best_mut())
}
fn filter_package_get_full(
&self,
res: &mut Model<GetPackageResponseFull>,
) -> Result<(), Error> {
self.filter_package_versions(res.as_best_mut())?;
self.filter_package_versions(res.as_other_versions_mut())
}
fn filter_os_version(&self, res: &mut Model<OsVersionInfoMap>) -> Result<(), Error> {
let alpha_17: Version = "0.4.0-alpha.17".parse()?;
// Filter OS versions based on source_version compatibility
res.retain(|_, info| {
let source_version = info.as_source_version().de()?;
Ok(self.os.version.satisfies(&source_version))
})?;
// Alpha.17 compatibility: add url field from urls array
if self.os.version <= alpha_17 {
for (_, info) in res.as_entries_mut()? {
let v = info.as_value_mut();
for asset_ty in ["iso", "squashfs", "img"] {
for (_, asset) in v[asset_ty]
.as_object_mut()
.into_iter()
.flat_map(|o| o.iter_mut())
{
asset["url"] = asset["urls"][0].clone();
}
}
}
}
Ok(())
_ => None,
})
.collect(),
},
})
}
}
@@ -210,7 +127,7 @@ pub struct HardwareInfo {
pub arch: InternedString,
#[ts(type = "number")]
pub ram: u64,
pub devices: Option<Vec<LshwDevice>>,
pub devices: Vec<LshwDevice>,
}
impl HardwareInfo {
pub async fn load(ctx: &RpcContext) -> Result<Self, Error> {
@@ -218,7 +135,7 @@ impl HardwareInfo {
Ok(Self {
arch: s.as_arch().de()?,
ram: s.as_ram().de()?,
devices: Some(s.as_devices().de()?),
devices: s.as_devices().de()?,
})
}
}
@@ -231,17 +148,11 @@ pub struct Metadata {
#[derive(Clone)]
pub struct DeviceInfoMiddleware {
device_info_header: Option<HeaderValue>,
device_info: Option<DeviceInfo>,
req: Option<RpcRequest>,
device_info: Option<HeaderValue>,
}
impl DeviceInfoMiddleware {
pub fn new() -> Self {
Self {
device_info_header: None,
device_info: None,
req: None,
}
Self { device_info: None }
}
}
@@ -252,7 +163,7 @@ impl Middleware<RegistryContext> for DeviceInfoMiddleware {
_: &RegistryContext,
request: &mut Request,
) -> Result<(), Response> {
self.device_info_header = request.headers_mut().remove(DEVICE_INFO_HEADER);
self.device_info = request.headers_mut().remove(DEVICE_INFO_HEADER);
Ok(())
}
async fn process_rpc_request(
@@ -263,11 +174,9 @@ impl Middleware<RegistryContext> for DeviceInfoMiddleware {
) -> Result<(), RpcResponse> {
async move {
if metadata.get_device_info {
if let Some(device_info) = &self.device_info_header {
let device_info = DeviceInfo::from_header_value(device_info)?;
request.params["__DeviceInfo_device_info"] = to_value(&device_info)?;
self.device_info = Some(device_info);
self.req = Some(request.clone());
if let Some(device_info) = &self.device_info {
request.params["__DeviceInfo_device_info"] =
to_value(&DeviceInfo::from_header_value(device_info)?)?;
}
}
@@ -276,19 +185,4 @@ impl Middleware<RegistryContext> for DeviceInfoMiddleware {
.await
.map_err(|e| RpcResponse::from_result(Err(e)))
}
async fn process_rpc_response(
&mut self,
_: &RegistryContext,
response: &mut RpcResponse,
) -> () {
if let (Some(req), Some(device_info), Ok(res)) =
(&self.req, &self.device_info, &mut response.result)
{
if let Err(e) =
device_info.filter_for_hardware(req.method.as_str(), req.params.clone(), res)
{
response.result = Err(e).map_err(From::from);
}
}
}
}

View File

@@ -5,6 +5,9 @@ use crate::prelude::*;
pub struct PackageSignerScopeMigration;
impl RegistryMigration for PackageSignerScopeMigration {
fn name(&self) -> &'static str {
"PackageSignerScopeMigration"
}
fn action(&self, db: &mut Value) -> Result<(), Error> {
for (_, info) in db["index"]["package"]["packages"]
.as_object_mut()

View File

@@ -1,35 +0,0 @@
use imbl::vector;
use super::RegistryMigration;
use crate::prelude::*;
pub struct RegistryAssetArray;
impl RegistryMigration for RegistryAssetArray {
fn action(&self, db: &mut Value) -> Result<(), Error> {
for (_, info) in db["index"]["package"]["packages"]
.as_object_mut()
.unwrap()
.iter_mut()
{
for (_, info) in info["versions"].as_object_mut().unwrap().iter_mut() {
let hw_req = info["hardwareRequirements"].take();
let mut s9pk = info["s9pk"].take();
s9pk["urls"] = Value::Array(vector![s9pk["url"].take()]);
info["s9pks"] = Value::Array(vector![Value::Array(vector![hw_req, s9pk])]);
}
}
for (_, info) in db["index"]["os"]["versions"]
.as_object_mut()
.unwrap()
.iter_mut()
{
for asset_ty in ["iso", "squashfs", "img"] {
for (_, info) in info[asset_ty].as_object_mut().unwrap().iter_mut() {
info["urls"] = Value::Array(vector![info["url"].take()]);
}
}
}
Ok(())
}
}

View File

@@ -4,29 +4,22 @@ use crate::prelude::*;
use crate::registry::RegistryDatabase;
mod m_00_package_signer_scope;
mod m_01_registry_asset_array;
pub trait RegistryMigration {
fn name(&self) -> &'static str {
let val = std::any::type_name_of_val(self);
val.rsplit_once("::").map_or(val, |v| v.1)
}
fn name(&self) -> &'static str;
fn action(&self, db: &mut Value) -> Result<(), Error>;
}
pub const MIGRATIONS: &[&dyn RegistryMigration] = &[
&m_00_package_signer_scope::PackageSignerScopeMigration,
&m_01_registry_asset_array::RegistryAssetArray,
];
pub const MIGRATIONS: &[&dyn RegistryMigration] =
&[&m_00_package_signer_scope::PackageSignerScopeMigration];
#[instrument(skip_all)]
pub fn run_migrations(db: &mut Model<RegistryDatabase>) -> Result<(), Error> {
let mut migrations = db.as_migrations().de().unwrap_or_default();
for migration in MIGRATIONS {
let name = migration.name();
if !migrations.contains(name) {
if !migrations.contains(migration.name()) {
migration.action(ModelExt::as_value_mut(db))?;
migrations.insert(name.into());
migrations.insert(migration.name().into());
}
}
let mut db_deser = db.de()?;

View File

@@ -133,7 +133,7 @@ async fn add_asset(
.upsert(&platform, || {
Ok(RegistryAsset {
published_at: Utc::now(),
urls: vec![url.clone()],
url,
commitment: commitment.clone(),
signatures: HashMap::new(),
})
@@ -146,9 +146,6 @@ async fn add_asset(
))
} else {
s.signatures.insert(signer, signature);
if !s.urls.contains(&url) {
s.urls.push(url);
}
Ok(())
}
})?;

View File

@@ -187,8 +187,7 @@ pub async fn get_version(
platform,
device_info,
}: GetOsVersionParams,
) -> Result<Value, Error> // BTreeMap<Version, OsVersionInfo>
{
) -> Result<BTreeMap<Version, OsVersionInfo>, Error> {
let source = source.or_else(|| device_info.as_ref().map(|d| d.os.version.clone()));
let platform = platform.or_else(|| device_info.as_ref().map(|d| d.os.platform.clone()));
if let (Some(pool), Some(server_id), Some(arch)) = (&ctx.pool, server_id, &platform) {
@@ -203,63 +202,33 @@ pub async fn get_version(
.with_kind(ErrorKind::Database)?;
}
let target = target.unwrap_or(VersionRange::Any);
let mut res = to_value::<BTreeMap<Version, OsVersionInfo>>(
&ctx.db
.peek()
.await
.into_index()
.into_os()
.into_versions()
.into_entries()?
.into_iter()
.map(|(v, i)| i.de().map(|i| (v, i)))
.filter_ok(|(version, info)| {
platform
ctx.db
.peek()
.await
.into_index()
.into_os()
.into_versions()
.into_entries()?
.into_iter()
.map(|(v, i)| i.de().map(|i| (v, i)))
.filter_ok(|(version, info)| {
platform
.as_ref()
.map_or(true, |p| info.squashfs.contains_key(p))
&& version.satisfies(&target)
&& source
.as_ref()
.map_or(true, |p| info.squashfs.contains_key(p))
&& version.satisfies(&target)
&& source
.as_ref()
.map_or(true, |s| s.satisfies(&info.source_version))
})
.collect::<Result<_, _>>()?,
)?;
// TODO: remove
if device_info.map_or(false, |d| {
"0.4.0-alpha.17"
.parse::<Version>()
.map_or(false, |v| d.os.version <= v)
}) {
for (_, v) in res
.as_object_mut()
.into_iter()
.map(|v| v.iter_mut())
.flatten()
{
for asset_ty in ["iso", "squashfs", "img"] {
for (_, v) in v[asset_ty]
.as_object_mut()
.into_iter()
.map(|v| v.iter_mut())
.flatten()
{
v["url"] = v["urls"][0].clone();
}
}
}
}
Ok(res)
.map_or(true, |s| s.satisfies(&info.source_version))
})
.collect()
}
pub fn display_version_info<T>(
params: WithIoFormat<T>,
info: Value, // BTreeMap<Version, OsVersionInfo>,
info: BTreeMap<Version, OsVersionInfo>,
) -> Result<(), Error> {
use prettytable::*;
let info = from_value::<BTreeMap<Version, OsVersionInfo>>(info)?;
if let Some(format) = params.format {
return display_serializable(format, info);
}

View File

@@ -12,11 +12,12 @@ use url::Url;
use crate::PackageId;
use crate::context::CliContext;
use crate::prelude::*;
use crate::progress::FullProgressTracker;
use crate::progress::{FullProgressTracker, ProgressTrackerWriter, ProgressUnits};
use crate::registry::asset::BufferedHttpSource;
use crate::registry::context::RegistryContext;
use crate::registry::package::index::PackageVersionInfo;
use crate::s9pk::S9pk;
use crate::s9pk::merkle_archive::source::ArchiveSource;
use crate::s9pk::merkle_archive::source::http::HttpSource;
use crate::s9pk::v2::SIG_CONTEXT;
use crate::sign::commitment::merkle_archive::MerkleArchiveCommitment;
@@ -24,14 +25,13 @@ use crate::sign::ed25519::Ed25519;
use crate::sign::{AnySignature, AnyVerifyingKey, SignatureScheme};
use crate::util::VersionString;
use crate::util::io::TrackingIO;
use crate::util::serde::Base64;
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AddPackageParams {
#[ts(type = "string[]")]
pub urls: Vec<Url>,
#[ts(type = "string")]
pub url: Url,
#[ts(skip)]
#[serde(rename = "__Auth_signer")]
pub uploader: AnyVerifyingKey,
@@ -42,7 +42,7 @@ pub struct AddPackageParams {
pub async fn add_package(
ctx: RegistryContext,
AddPackageParams {
urls,
url,
uploader,
commitment,
signature,
@@ -53,35 +53,17 @@ pub async fn add_package(
.verify_commitment(&uploader, &commitment, SIG_CONTEXT, &signature)?;
let peek = ctx.db.peek().await;
let uploader_guid = peek.as_index().as_signers().get_signer(&uploader)?;
let Some(([url], rest)) = urls.split_at_checked(1) else {
return Err(Error::new(
eyre!("must specify at least 1 url"),
ErrorKind::InvalidRequest,
));
};
let s9pk = S9pk::deserialize(
&Arc::new(HttpSource::new(ctx.client.clone(), url.clone()).await?),
Some(&commitment),
)
.await?;
for url in rest {
S9pk::deserialize(
&Arc::new(HttpSource::new(ctx.client.clone(), url.clone()).await?),
Some(&commitment),
)
.await?;
}
let manifest = s9pk.as_manifest();
let mut info = PackageVersionInfo::from_s9pk(&s9pk, urls).await?;
for (_, s9pk) in &mut info.s9pks {
if !s9pk.signatures.contains_key(&uploader) && s9pk.commitment == commitment {
s9pk.signatures.insert(uploader.clone(), signature.clone());
}
let mut info = PackageVersionInfo::from_s9pk(&s9pk, url).await?;
if !info.s9pk.signatures.contains_key(&uploader) {
info.s9pk.signatures.insert(uploader.clone(), signature);
}
ctx.db
@@ -103,12 +85,7 @@ pub async fn add_package(
.as_package_mut()
.as_packages_mut()
.upsert(&manifest.id, || Ok(Default::default()))?;
let v = package.as_versions_mut();
if let Some(prev) = v.as_idx_mut(&manifest.version) {
prev.mutate(|p| p.merge_with(info, true))?;
} else {
v.insert(&manifest.version, &info)?;
}
package.as_versions_mut().insert(&manifest.version, &info)?;
Ok(())
} else {
@@ -124,10 +101,7 @@ pub async fn add_package(
#[serde(rename_all = "camelCase")]
pub struct CliAddPackageParams {
pub file: PathBuf,
#[arg(long)]
pub url: Vec<Url>,
#[arg(long)]
pub no_verify: bool,
pub url: Url,
}
pub async fn cli_add_package(
@@ -135,12 +109,7 @@ pub async fn cli_add_package(
context: ctx,
parent_method,
method,
params:
CliAddPackageParams {
file,
url,
no_verify,
},
params: CliAddPackageParams { file, url },
..
}: HandlerArgs<CliContext, CliAddPackageParams>,
) -> Result<(), Error> {
@@ -148,19 +117,7 @@ pub async fn cli_add_package(
let progress = FullProgressTracker::new();
let mut sign_phase = progress.add_phase(InternedString::intern("Signing File"), Some(1));
let verify = if !no_verify {
url.iter()
.map(|url| {
let phase = progress.add_phase(
InternedString::from_display(&lazy_format!("Verifying {url}")),
Some(100),
);
(url.clone(), phase)
})
.collect()
} else {
Vec::new()
};
let mut verify_phase = progress.add_phase(InternedString::intern("Verifying URL"), Some(100));
let mut index_phase = progress.add_phase(
InternedString::intern("Adding File to Registry Index"),
Some(1),
@@ -174,240 +131,11 @@ pub async fn cli_add_package(
let signature = Ed25519.sign_commitment(ctx.developer_key()?, &commitment, SIG_CONTEXT)?;
sign_phase.complete();
for (url, mut phase) in verify {
phase.start();
let source = BufferedHttpSource::new(ctx.client.clone(), url, phase).await?;
let mut src = S9pk::deserialize(&Arc::new(source), Some(&commitment)).await?;
src.serialize(&mut TrackingIO::new(0, &mut tokio::io::sink()), true)
.await?;
}
index_phase.start();
ctx.call_remote::<RegistryContext>(
&parent_method.into_iter().chain(method).join("."),
imbl_value::json!({
"urls": &url,
"signature": AnySignature::Ed25519(signature),
"commitment": commitment,
}),
)
.await?;
index_phase.complete();
progress.complete();
progress_task.await.with_kind(ErrorKind::Unknown)?;
Ok(())
}
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct RemovePackageParams {
pub id: PackageId,
pub version: VersionString,
#[arg(long)]
pub sighash: Option<Base64<[u8; 32]>>,
#[ts(skip)]
#[arg(skip)]
#[serde(rename = "__Auth_signer")]
pub signer: Option<AnyVerifyingKey>,
}
pub async fn remove_package(
ctx: RegistryContext,
RemovePackageParams {
id,
version,
sighash,
signer,
}: RemovePackageParams,
) -> Result<bool, Error> {
let peek = ctx.db.peek().await;
let signer =
signer.ok_or_else(|| Error::new(eyre!("missing signer"), ErrorKind::InvalidRequest))?;
let signer_guid = peek.as_index().as_signers().get_signer(&signer)?;
let rev = ctx
.db
.mutate(|db| {
if db.as_admins().de()?.contains(&signer_guid)
|| db
.as_index()
.as_package()
.as_packages()
.as_idx(&id)
.or_not_found(&id)?
.as_authorized()
.de()?
.get(&signer_guid)
.map_or(false, |v| version.satisfies(v))
{
if let Some(package) = db
.as_index_mut()
.as_package_mut()
.as_packages_mut()
.as_idx_mut(&id)
{
if let Some(sighash) = sighash {
if if let Some(package) = package.as_versions_mut().as_idx_mut(&version) {
package.as_s9pks_mut().mutate(|s| {
s.retain(|(_, asset)| asset.commitment.root_sighash != sighash);
Ok(s.is_empty())
})?
} else {
false
} {
package.as_versions_mut().remove(&version)?;
}
} else {
package.as_versions_mut().remove(&version)?;
}
}
Ok(())
} else {
Err(Error::new(eyre!("UNAUTHORIZED"), ErrorKind::Authorization))
}
})
.await;
rev.result.map(|_| rev.revision.is_some())
}
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AddMirrorParams {
#[ts(type = "string")]
pub url: Url,
#[ts(skip)]
#[serde(rename = "__Auth_signer")]
pub uploader: AnyVerifyingKey,
pub commitment: MerkleArchiveCommitment,
pub signature: AnySignature,
}
pub async fn add_mirror(
ctx: RegistryContext,
AddMirrorParams {
url,
uploader,
commitment,
signature,
}: AddMirrorParams,
) -> Result<(), Error> {
uploader
.scheme()
.verify_commitment(&uploader, &commitment, SIG_CONTEXT, &signature)?;
let peek = ctx.db.peek().await;
let uploader_guid = peek.as_index().as_signers().get_signer(&uploader)?;
let s9pk = S9pk::deserialize(
&Arc::new(HttpSource::new(ctx.client.clone(), url.clone()).await?),
Some(&commitment),
)
.await?;
let manifest = s9pk.as_manifest();
let mut info = PackageVersionInfo::from_s9pk(&s9pk, vec![url]).await?;
for (_, s9pk) in &mut info.s9pks {
if !s9pk.signatures.contains_key(&uploader) && s9pk.commitment == commitment {
s9pk.signatures.insert(uploader.clone(), signature.clone());
}
}
ctx.db
.mutate(|db| {
if db.as_admins().de()?.contains(&uploader_guid)
|| db
.as_index()
.as_package()
.as_packages()
.as_idx(&manifest.id)
.or_not_found(&manifest.id)?
.as_authorized()
.de()?
.get(&uploader_guid)
.map_or(false, |v| manifest.version.satisfies(v))
{
let package = db
.as_index_mut()
.as_package_mut()
.as_packages_mut()
.as_idx_mut(&manifest.id)
.and_then(|p| p.as_versions_mut().as_idx_mut(&manifest.version))
.or_not_found(&lazy_format!("{}@{}", &manifest.id, &manifest.version))?;
package.mutate(|p| p.merge_with(info, false))?;
Ok(())
} else {
Err(Error::new(eyre!("UNAUTHORIZED"), ErrorKind::Authorization))
}
})
.await
.result
}
#[derive(Debug, Deserialize, Serialize, Parser)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
pub struct CliAddMirrorParams {
pub file: PathBuf,
pub url: Url,
pub no_verify: bool,
}
pub async fn cli_add_mirror(
HandlerArgs {
context: ctx,
parent_method,
method,
params:
CliAddMirrorParams {
file,
url,
no_verify,
},
..
}: HandlerArgs<CliContext, CliAddMirrorParams>,
) -> Result<(), Error> {
let s9pk = S9pk::open(&file, None).await?;
let progress = FullProgressTracker::new();
let mut sign_phase = progress.add_phase(InternedString::intern("Signing File"), Some(1));
let verify = if !no_verify {
let url = &url;
vec![(
url.clone(),
progress.add_phase(
InternedString::from_display(&lazy_format!("Verifying {url}")),
Some(100),
),
)]
} else {
Vec::new()
};
let mut index_phase = progress.add_phase(
InternedString::intern("Adding File to Registry Index"),
Some(1),
);
let progress_task =
progress.progress_bar_task(&format!("Adding {} to registry...", file.display()));
sign_phase.start();
let commitment = s9pk.as_archive().commitment().await?;
let signature = Ed25519.sign_commitment(ctx.developer_key()?, &commitment, SIG_CONTEXT)?;
sign_phase.complete();
for (url, mut phase) in verify {
phase.start();
let source = BufferedHttpSource::new(ctx.client.clone(), url, phase).await?;
let mut src = S9pk::deserialize(&Arc::new(source), Some(&commitment)).await?;
src.serialize(&mut TrackingIO::new(0, &mut tokio::io::sink()), true)
.await?;
}
verify_phase.start();
let source = BufferedHttpSource::new(ctx.client.clone(), url.clone(), verify_phase).await?;
let mut src = S9pk::deserialize(&Arc::new(source), Some(&commitment)).await?;
src.serialize(&mut TrackingIO::new(0, &mut tokio::io::sink()), true)
.await?;
index_phase.start();
ctx.call_remote::<RegistryContext>(
@@ -431,26 +159,22 @@ pub async fn cli_add_mirror(
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct RemoveMirrorParams {
pub struct RemovePackageParams {
pub id: PackageId,
pub version: VersionString,
#[arg(long)]
#[ts(type = "string")]
pub url: Url,
#[ts(skip)]
#[arg(skip)]
#[serde(rename = "__Auth_signer")]
pub signer: Option<AnyVerifyingKey>,
}
pub async fn remove_mirror(
pub async fn remove_package(
ctx: RegistryContext,
RemoveMirrorParams {
RemovePackageParams {
id,
version,
url,
signer,
}: RemoveMirrorParams,
}: RemovePackageParams,
) -> Result<(), Error> {
let peek = ctx.db.peek().await;
let signer =
@@ -476,20 +200,8 @@ pub async fn remove_mirror(
.as_package_mut()
.as_packages_mut()
.as_idx_mut(&id)
.and_then(|p| p.as_versions_mut().as_idx_mut(&version))
{
package.as_s9pks_mut().mutate(|s| {
s.iter_mut()
.for_each(|(_, asset)| asset.urls.retain(|u| u != &url));
if s.iter().any(|(_, asset)| asset.urls.is_empty()) {
Err(Error::new(
eyre!("cannot remove last mirror from an s9pk"),
ErrorKind::InvalidRequest,
))
} else {
Ok(())
}
})?;
package.as_versions_mut().remove(&version)?;
}
Ok(())
} else {

View File

@@ -20,12 +20,12 @@ use crate::s9pk::v2::SIG_CONTEXT;
use crate::util::VersionString;
use crate::util::io::{TrackingIO, to_tmp_path};
use crate::util::serde::{WithIoFormat, display_serializable};
use crate::util::tui::{choose, choose_custom_display};
use crate::util::tui::choose;
#[derive(
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, TS, ValueEnum,
)]
#[serde(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub enum PackageDetailLevel {
None,
@@ -45,11 +45,10 @@ pub struct PackageInfoShort {
pub release_notes: String,
}
#[derive(Debug, Deserialize, Serialize, TS, Parser, HasModel)]
#[derive(Debug, Deserialize, Serialize, TS, Parser)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
#[ts(export)]
#[model = "Model<Self>"]
pub struct GetPackageParams {
pub id: Option<PackageId>,
#[ts(type = "string | null")]
@@ -61,14 +60,14 @@ pub struct GetPackageParams {
#[arg(skip)]
#[serde(rename = "__DeviceInfo_device_info")]
pub device_info: Option<DeviceInfo>,
#[serde(default)]
#[arg(default_value = "none")]
pub other_versions: Option<PackageDetailLevel>,
pub other_versions: PackageDetailLevel,
}
#[derive(Debug, Deserialize, Serialize, TS, HasModel)]
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
#[model = "Model<Self>"]
pub struct GetPackageResponse {
#[ts(type = "string[]")]
pub categories: BTreeSet<InternedString>,
@@ -109,10 +108,9 @@ impl GetPackageResponse {
}
}
#[derive(Debug, Deserialize, Serialize, TS, HasModel)]
#[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
#[model = "Model<Self>"]
pub struct GetPackageResponseFull {
#[ts(type = "string[]")]
pub categories: BTreeSet<InternedString>,
@@ -136,15 +134,15 @@ impl GetPackageResponseFull {
pub type GetPackagesResponse = BTreeMap<PackageId, GetPackageResponse>;
pub type GetPackagesResponseFull = BTreeMap<PackageId, GetPackageResponseFull>;
fn get_matching_models(
db: &Model<PackageIndex>,
fn get_matching_models<'a>(
db: &'a Model<PackageIndex>,
GetPackageParams {
id,
source_version,
device_info,
..
}: &GetPackageParams,
) -> Result<Vec<(PackageId, ExtendedVersion, Model<PackageVersionInfo>)>, Error> {
) -> Result<Vec<(PackageId, ExtendedVersion, &'a Model<PackageVersionInfo>)>, Error> {
if let Some(id) = id {
if let Some(pkg) = db.as_packages().as_idx(id) {
vec![(id.clone(), pkg)]
@@ -170,17 +168,11 @@ fn get_matching_models(
.unwrap_or(VersionRange::any()),
),
)
})? {
let mut info = info.clone();
if let Some(device_info) = &device_info {
if info.for_device(device_info)? {
Some((k.clone(), ExtendedVersion::from(v), info))
} else {
None
}
} else {
Some((k.clone(), ExtendedVersion::from(v), info))
}
})? && device_info
.as_ref()
.map_or(Ok(true), |device_info| info.works_for_device(device_info))?
{
Some((k.clone(), ExtendedVersion::from(v), info))
} else {
None
},
@@ -194,10 +186,12 @@ fn get_matching_models(
}
pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Result<Value, Error> {
use patch_db::ModelExt;
let peek = ctx.db.peek().await;
let mut best: BTreeMap<PackageId, BTreeMap<VersionString, Model<PackageVersionInfo>>> =
let mut best: BTreeMap<PackageId, BTreeMap<VersionString, &Model<PackageVersionInfo>>> =
Default::default();
let mut other: BTreeMap<PackageId, BTreeMap<VersionString, Model<PackageVersionInfo>>> =
let mut other: BTreeMap<PackageId, BTreeMap<VersionString, &Model<PackageVersionInfo>>> =
Default::default();
for (id, version, info) in get_matching_models(&peek.as_index().as_package(), &params)? {
let package_best = best.entry(id.clone()).or_default();
@@ -223,23 +217,23 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
package_other.insert(version.into(), info);
}
}
if let Some(id) = &params.id {
if let Some(id) = params.id {
let categories = peek
.as_index()
.as_package()
.as_packages()
.as_idx(id)
.as_idx(&id)
.map(|p| p.as_categories().de())
.transpose()?
.unwrap_or_default();
let best: BTreeMap<VersionString, PackageVersionInfo> = best
.remove(id)
let best = best
.remove(&id)
.unwrap_or_default()
.into_iter()
.map(|(k, i)| Ok::<_, Error>((k, i.de()?)))
.map(|(k, v)| v.de().map(|v| (k, v)))
.try_collect()?;
let other = other.remove(id).unwrap_or_default();
match params.other_versions.unwrap_or_default() {
let other = other.remove(&id).unwrap_or_default();
match params.other_versions {
PackageDetailLevel::None => to_value(&GetPackageResponse {
categories,
best,
@@ -251,7 +245,7 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
other_versions: Some(
other
.into_iter()
.map(|(k, i)| from_value(i.into()).map(|v| (k, v)))
.map(|(k, v)| from_value(v.as_value().clone()).map(|v| (k, v)))
.try_collect()?,
),
}),
@@ -260,12 +254,12 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
best,
other_versions: other
.into_iter()
.map(|(k, i)| Ok::<_, Error>((k, i.de()?)))
.map(|(k, v)| v.de().map(|v| (k, v)))
.try_collect()?,
}),
}
} else {
match params.other_versions.unwrap_or_default() {
match params.other_versions {
PackageDetailLevel::None => to_value(
&best
.into_iter()
@@ -284,7 +278,7 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
categories,
best: best
.into_iter()
.map(|(k, i)| Ok::<_, Error>((k, i.de()?)))
.map(|(k, v)| v.de().map(|v| (k, v)))
.try_collect()?,
other_versions: None,
},
@@ -311,12 +305,14 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
categories,
best: best
.into_iter()
.map(|(k, i)| Ok::<_, Error>((k, i.de()?)))
.map(|(k, v)| v.de().map(|v| (k, v)))
.try_collect()?,
other_versions: Some(
other
.into_iter()
.map(|(k, i)| from_value(i.into()).map(|v| (k, v)))
.map(|(k, v)| {
from_value(v.as_value().clone()).map(|v| (k, v))
})
.try_collect()?,
),
},
@@ -343,11 +339,11 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
categories,
best: best
.into_iter()
.map(|(k, i)| Ok::<_, Error>((k, i.de()?)))
.map(|(k, v)| v.de().map(|v| (k, v)))
.try_collect()?,
other_versions: other
.into_iter()
.map(|(k, i)| Ok::<_, Error>((k, i.de()?)))
.map(|(k, v)| v.de().map(|v| (k, v)))
.try_collect()?,
},
))
@@ -367,7 +363,7 @@ pub fn display_package_info(
}
if let Some(_) = params.rest.id {
if params.rest.other_versions.unwrap_or_default() == PackageDetailLevel::Full {
if params.rest.other_versions == PackageDetailLevel::Full {
for table in from_value::<GetPackageResponseFull>(info)?.tables() {
table.print_tty(false)?;
println!();
@@ -379,7 +375,7 @@ pub fn display_package_info(
}
}
} else {
if params.rest.other_versions.unwrap_or_default() == PackageDetailLevel::Full {
if params.rest.other_versions == PackageDetailLevel::Full {
for (_, package) in from_value::<GetPackagesResponseFull>(info)? {
for table in package.tables() {
table.print_tty(false)?;
@@ -435,9 +431,7 @@ pub async fn cli_download(
)
.await?,
)?;
let PackageVersionInfo {
s9pks: mut s9pk, ..
} = match res.best.len() {
let PackageVersionInfo { s9pk, .. } = match res.best.len() {
0 => {
return Err(Error::new(
eyre!(
@@ -458,75 +452,6 @@ pub async fn cli_download(
res.best.remove(version).unwrap()
}
};
let s9pk = match s9pk.len() {
0 => {
return Err(Error::new(
eyre!(
"Could not find a version of {id} that satisfies {}",
target_version.unwrap_or(VersionRange::Any)
),
ErrorKind::NotFound,
));
}
1 => s9pk.pop().unwrap().1,
_ => {
let (_, asset) = choose_custom_display(
&format!(concat!(
"Multiple packages with different hardware requirements found. ",
"Choose a file to download:"
)),
&s9pk,
|(hw, _)| {
use std::fmt::Write;
let mut res = String::new();
if let Some(arch) = &hw.arch {
write!(
&mut res,
"{}: {}",
if arch.len() == 1 {
"Architecture"
} else {
"Architectures"
},
arch.iter().join(", ")
)
.unwrap();
}
if !hw.device.is_empty() {
if !res.is_empty() {
write!(&mut res, "; ").unwrap();
}
write!(
&mut res,
"{}: {}",
if hw.device.len() == 1 {
"Device"
} else {
"Devices"
},
hw.device.iter().map(|d| &d.description).join(", ")
)
.unwrap();
}
if let Some(ram) = hw.ram {
if !res.is_empty() {
write!(&mut res, "; ").unwrap();
}
write!(
&mut res,
"RAM >={:.2}GiB",
ram as f64 / (1024.0 * 1024.0 * 1024.0)
)
.unwrap();
}
res
},
)
.await?;
asset.clone()
}
};
s9pk.validate(SIG_CONTEXT, s9pk.all_signers())?;
fetching_progress.complete();

View File

@@ -1,10 +1,8 @@
use std::collections::{BTreeMap, BTreeSet};
use std::u32;
use chrono::Utc;
use exver::{Version, VersionRange};
use imbl_value::InternedString;
use patch_db::ModelExt;
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use url::Url;
@@ -52,7 +50,7 @@ pub struct Category {
pub name: String,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS, PartialEq, Eq)]
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
@@ -64,10 +62,11 @@ pub struct DependencyMetadata {
pub optional: bool,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS, PartialEq, Eq)]
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
pub struct PackageMetadata {
#[ts(export)]
pub struct PackageVersionInfo {
#[ts(type = "string")]
pub title: InternedString,
pub icon: DataUrl<'static>,
@@ -94,11 +93,13 @@ pub struct PackageMetadata {
pub os_version: Version,
#[ts(type = "string | null")]
pub sdk_version: Option<Version>,
#[serde(default)]
pub hardware_acceleration: bool,
pub hardware_requirements: HardwareRequirements,
#[ts(type = "string | null")]
pub source_version: Option<VersionRange>,
pub s9pk: RegistryAsset<MerkleArchiveCommitment>,
}
impl PackageMetadata {
pub async fn load<S: FileSource + Clone>(s9pk: &S9pk<S>) -> Result<Self, Error> {
impl PackageVersionInfo {
pub async fn from_s9pk<S: FileSource + Clone>(s9pk: &S9pk<S>, url: Url) -> Result<Self, Error> {
let manifest = s9pk.as_manifest();
let mut dependency_metadata = BTreeMap::new();
for (id, info) in &manifest.dependencies.0 {
@@ -130,153 +131,67 @@ impl PackageMetadata {
dependency_metadata,
os_version: manifest.os_version.clone(),
sdk_version: manifest.sdk_version.clone(),
hardware_acceleration: manifest.hardware_acceleration.clone(),
})
}
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct PackageVersionInfo {
#[serde(flatten)]
pub metadata: PackageMetadata,
#[ts(type = "string | null")]
pub source_version: Option<VersionRange>,
pub s9pks: Vec<(HardwareRequirements, RegistryAsset<MerkleArchiveCommitment>)>,
}
impl PackageVersionInfo {
pub async fn from_s9pk<S: FileSource + Clone>(
s9pk: &S9pk<S>,
urls: Vec<Url>,
) -> Result<Self, Error> {
Ok(Self {
metadata: PackageMetadata::load(s9pk).await?,
hardware_requirements: manifest.hardware_requirements.clone(),
source_version: None, // TODO
s9pks: vec![(
s9pk.as_manifest().hardware_requirements.clone(),
RegistryAsset {
published_at: Utc::now(),
urls,
commitment: s9pk.as_archive().commitment().await?,
signatures: [(
AnyVerifyingKey::Ed25519(s9pk.as_archive().signer()),
AnySignature::Ed25519(s9pk.as_archive().signature().await?),
)]
.into_iter()
.collect(),
},
)],
s9pk: RegistryAsset {
published_at: Utc::now(),
url,
commitment: s9pk.as_archive().commitment().await?,
signatures: [(
AnyVerifyingKey::Ed25519(s9pk.as_archive().signer()),
AnySignature::Ed25519(s9pk.as_archive().signature().await?),
)]
.into_iter()
.collect(),
},
})
}
pub fn merge_with(&mut self, other: Self, replace_urls: bool) -> Result<(), Error> {
for (hw_req, asset) in other.s9pks {
if let Some((_, matching)) = self
.s9pks
.iter_mut()
.find(|(h, s)| s.commitment == asset.commitment && *h == hw_req)
{
if replace_urls {
matching.urls = asset.urls;
} else {
for url in asset.urls {
if matching.urls.contains(&url) {
continue;
}
matching.urls.push(url);
}
}
} else {
if let Some((h, matching)) = self.s9pks.iter_mut().find(|(h, _)| *h == hw_req) {
*matching = asset;
*h = hw_req;
} else {
self.s9pks.push((hw_req, asset));
}
}
}
self.s9pks.sort_by_key(|(h, _)| h.specificity_desc());
Ok(())
}
pub fn table(&self, version: &VersionString) -> prettytable::Table {
use prettytable::*;
let mut table = Table::new();
table.add_row(row![bc => &self.metadata.title]);
table.add_row(row![bc => &self.title]);
table.add_row(row![br -> "VERSION", AsRef::<str>::as_ref(version)]);
table.add_row(row![br -> "RELEASE NOTES", &self.metadata.release_notes]);
table.add_row(
row![br -> "ABOUT", &textwrap::wrap(&self.metadata.description.short, 80).join("\n")],
);
table.add_row(row![br -> "RELEASE NOTES", &self.release_notes]);
table.add_row(row![br -> "ABOUT", &textwrap::wrap(&self.description.short, 80).join("\n")]);
table.add_row(row![
br -> "DESCRIPTION",
&textwrap::wrap(&self.metadata.description.long, 80).join("\n")
&textwrap::wrap(&self.description.long, 80).join("\n")
]);
table.add_row(row![br -> "GIT HASH", self.metadata.git_hash.as_deref().unwrap_or("N/A")]);
table.add_row(row![br -> "LICENSE", &self.metadata.license]);
table.add_row(row![br -> "PACKAGE REPO", &self.metadata.wrapper_repo.to_string()]);
table.add_row(row![br -> "SERVICE REPO", &self.metadata.upstream_repo.to_string()]);
table.add_row(row![br -> "WEBSITE", &self.metadata.marketing_site.to_string()]);
table.add_row(row![br -> "SUPPORT", &self.metadata.support_site.to_string()]);
table.add_row(row![br -> "GIT HASH", self.git_hash.as_deref().unwrap_or("N/A")]);
table.add_row(row![br -> "LICENSE", &self.license]);
table.add_row(row![br -> "PACKAGE REPO", &self.wrapper_repo.to_string()]);
table.add_row(row![br -> "SERVICE REPO", &self.upstream_repo.to_string()]);
table.add_row(row![br -> "WEBSITE", &self.marketing_site.to_string()]);
table.add_row(row![br -> "SUPPORT", &self.support_site.to_string()]);
table
}
}
impl Model<PackageVersionInfo> {
/// Filters this package version for compatibility with the given device.
/// Returns false if the package is incompatible (should be removed).
/// Modifies s9pks in place to only include compatible variants.
pub fn for_device(&mut self, device_info: &DeviceInfo) -> Result<bool, Error> {
if !self
.as_metadata()
.as_os_version()
.de()?
.satisfies(&device_info.os.compat)
{
pub fn works_for_device(&self, device_info: &DeviceInfo) -> Result<bool, Error> {
if !self.as_os_version().de()?.satisfies(&device_info.os.compat) {
return Ok(false);
}
if let Some(hw) = &device_info.hardware {
self.as_s9pks_mut().mutate(|s9pks| {
s9pks.retain(|(hw_req, _)| {
if let Some(arch) = &hw_req.arch {
if !arch.contains(&hw.arch) {
return false;
}
}
if let Some(ram) = hw_req.ram {
if hw.ram < ram {
return false;
}
}
if let Some(dev) = &hw.devices {
for device_filter in &hw_req.device {
if !dev
.iter()
.filter(|d| d.class() == &*device_filter.class)
.any(|d| device_filter.matches(d))
{
return false;
}
}
}
true
});
if hw.devices.is_some() {
s9pks.sort_by_key(|(req, _)| req.specificity_desc());
} else {
s9pks.sort_by_key(|(req, _)| {
let (dev, arch, ram) = req.specificity_desc();
(u32::MAX - dev, arch, ram)
});
}
Ok(())
})?;
if ModelExt::as_value(self.as_s9pks())
.as_array()
.map_or(true, |s| s.is_empty())
let hw = self.as_hardware_requirements().de()?;
if let Some(arch) = hw.arch {
if !arch.contains(&device_info.hardware.arch) {
return Ok(false);
}
}
if let Some(ram) = hw.ram {
if device_info.hardware.ram < ram {
return Ok(false);
}
}
for device_filter in hw.device {
if !device_info
.hardware
.devices
.iter()
.filter(|d| d.class() == &*device_filter.class)
.any(|d| device_filter.pattern.as_ref().is_match(d.product()))
{
return Ok(false);
}

View File

@@ -32,44 +32,12 @@ pub fn package_api<C: Context>() -> ParentHandler<C> {
.no_display()
.with_about("Add package to registry index"),
)
.subcommand(
"add-mirror",
from_fn_async(add::add_mirror)
.with_metadata("get_signer", Value::Bool(true))
.no_cli(),
)
.subcommand(
"add-mirror",
from_fn_async(add::cli_add_mirror)
.no_display()
.with_about("Add a mirror for an s9pk"),
)
.subcommand(
"remove",
from_fn_async(add::remove_package)
.with_metadata("get_signer", Value::Bool(true))
.with_custom_display_fn(|args, changed| {
if !changed {
tracing::warn!(
"{}@{}{} does not exist, so not removed",
args.params.id,
args.params.version,
args.params
.sighash
.map_or(String::new(), |h| format!("#{h}"))
);
}
Ok(())
})
.with_about("Remove package from registry index")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove-mirror",
from_fn_async(add::remove_mirror)
.with_metadata("get_signer", Value::Bool(true))
.no_display()
.with_about("Remove a mirror from a package")
.with_about("Remove package from registry index")
.with_call_remote::<CliContext>(),
)
.subcommand(

View File

@@ -7,7 +7,7 @@ use ts_rs::TS;
use crate::prelude::*;
use crate::util::Invoke;
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, TS, PartialEq, Eq)]
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, TS)]
#[ts(type = "string")]
pub struct GitHash(String);

View File

@@ -176,7 +176,7 @@ impl S9pk<TmpSource<PackSource>> {
impl TryFrom<ManifestV1> for Manifest {
type Error = Error;
fn try_from(mut value: ManifestV1) -> Result<Self, Self::Error> {
fn try_from(value: ManifestV1) -> Result<Self, Self::Error> {
let default_url = value.upstream_repo.clone();
let mut version = ExtendedVersion::from(
exver::emver::Version::from_str(&value.version)
@@ -190,9 +190,6 @@ impl TryFrom<ManifestV1> for Manifest {
} else if &*value.id == "lightning-terminal" || &*value.id == "robosats" {
version = version.map_upstream(|v| v.with_prerelease(["alpha".into()]));
}
if &*value.id == "nostr" {
value.id = "nostr-rs-relay".parse()?;
}
Ok(Self {
id: value.id,
title: format!("{} (Legacy)", value.title).into(),
@@ -242,23 +239,18 @@ impl TryFrom<ManifestV1> for Manifest {
.device
.into_iter()
.map(|(class, product)| DeviceFilter {
description: format!(
pattern_description: format!(
"a {class} device matching the expression {}",
product.as_ref()
),
class,
product: Some(product),
..Default::default()
pattern: product,
})
.collect(),
},
git_hash: value.git_hash,
os_version: value.eos_version,
sdk_version: None,
hardware_acceleration: match value.main {
PackageProcedure::Docker(d) => d.gpu_acceleration,
PackageProcedure::Script(_) => false,
},
})
}
}

View File

@@ -15,7 +15,6 @@ use crate::s9pk::git_hash::GitHash;
use crate::s9pk::merkle_archive::directory_contents::DirectoryContents;
use crate::s9pk::merkle_archive::expected::{Expected, Filter};
use crate::s9pk::v2::pack::ImageConfig;
use crate::util::lshw::{LshwDevice, LshwDisplay, LshwProcessor};
use crate::util::serde::Regex;
use crate::util::{VersionString, mime};
use crate::version::{Current, VersionT};
@@ -63,8 +62,6 @@ pub struct Manifest {
pub dependencies: Dependencies,
#[serde(default)]
pub hardware_requirements: HardwareRequirements,
#[serde(default)]
pub hardware_acceleration: bool,
pub git_hash: Option<GitHash>,
#[serde(default = "current_version")]
#[ts(type = "string")]
@@ -116,7 +113,7 @@ impl Manifest {
if let Some(emulate_as) = &config.emulate_missing_as {
expected.check_file(
Path::new("images")
.join(emulate_as)
.join(arch)
.join(image_id)
.with_extension("squashfs"),
)?;
@@ -168,7 +165,7 @@ impl Manifest {
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS, PartialEq)]
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct HardwareRequirements {
@@ -179,122 +176,19 @@ pub struct HardwareRequirements {
#[ts(type = "string[] | null")]
pub arch: Option<BTreeSet<InternedString>>,
}
impl HardwareRequirements {
/// returns a value that can be used as a sort key to get most specific requirements first
pub fn specificity_desc(&self) -> (u32, u32, u64) {
(
u32::MAX - self.device.len() as u32, // more device requirements = more specific
self.arch.as_ref().map_or(u32::MAX, |a| a.len() as u32), // more arches = less specific
self.ram.map_or(0, |r| r), // more ram = more specific
)
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS)]
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct DeviceFilter {
pub description: String,
#[ts(type = "\"processor\" | \"display\"")]
pub class: InternedString,
#[ts(type = "string | null")]
pub product: Option<Regex>,
#[ts(type = "string | null")]
pub vendor: Option<Regex>,
#[ts(optional)]
pub capabilities: Option<BTreeSet<InternedString>>,
#[ts(optional)]
pub driver: Option<InternedString>,
}
// Omit description
impl PartialEq for DeviceFilter {
fn eq(&self, other: &Self) -> bool {
self.class == other.class
&& self.product == other.product
&& self.vendor == other.vendor
&& self.capabilities == other.capabilities
&& self.driver == other.driver
}
}
impl DeviceFilter {
pub fn matches(&self, device: &LshwDevice) -> bool {
if &*self.class != device.class() {
return false;
}
match device {
LshwDevice::Processor(LshwProcessor {
product,
vendor,
capabilities,
}) => {
if let Some(match_product) = &self.product {
if !product
.as_deref()
.map_or(false, |p| match_product.as_ref().is_match(p))
{
return false;
}
}
if let Some(match_vendor) = &self.vendor {
if !vendor
.as_deref()
.map_or(false, |v| match_vendor.as_ref().is_match(v))
{
return false;
}
}
if !self
.capabilities
.as_ref()
.map_or(true, |c| c.is_subset(capabilities))
{
return false;
}
true
}
LshwDevice::Display(LshwDisplay {
product,
vendor,
capabilities,
driver,
}) => {
if let Some(match_product) = &self.product {
if !product
.as_deref()
.map_or(false, |p| match_product.as_ref().is_match(p))
{
return false;
}
}
if let Some(match_vendor) = &self.vendor {
if !vendor
.as_deref()
.map_or(false, |v| match_vendor.as_ref().is_match(v))
{
return false;
}
}
if !self
.capabilities
.as_ref()
.map_or(true, |c| c.is_subset(capabilities))
{
return false;
}
if !self
.driver
.as_ref()
.map_or(true, |d| Some(d) == driver.as_ref())
{
return false;
}
true
}
}
}
#[ts(type = "string")]
pub pattern: Regex,
pub pattern_description: String,
}
#[derive(Clone, Debug, Deserialize, Serialize, TS, PartialEq, Eq)]
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)]
pub struct Description {
pub short: String,
@@ -318,7 +212,7 @@ impl Description {
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS, PartialEq, Eq)]
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct Alerts {

View File

@@ -265,7 +265,7 @@ impl PackParams {
}
}
#[derive(Debug, Default, Clone, Deserialize, Serialize, TS)]
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct ImageConfig {
@@ -274,8 +274,15 @@ pub struct ImageConfig {
pub arch: BTreeSet<InternedString>,
#[ts(type = "string | null")]
pub emulate_missing_as: Option<InternedString>,
#[serde(default)]
pub nvidia_container: bool,
}
impl Default for ImageConfig {
fn default() -> Self {
Self {
source: ImageSource::Packed,
arch: BTreeSet::new(),
emulate_missing_as: None,
}
}
}
#[derive(Parser)]
@@ -292,8 +299,6 @@ struct CliImageConfig {
arch: Vec<InternedString>,
#[arg(long)]
emulate_missing_as: Option<InternedString>,
#[arg(long)]
nvidia_container: bool,
}
impl TryFrom<CliImageConfig> for ImageConfig {
type Error = clap::Error;
@@ -312,7 +317,6 @@ impl TryFrom<CliImageConfig> for ImageConfig {
},
arch: value.arch.into_iter().collect(),
emulate_missing_as: value.emulate_missing_as,
nvidia_container: value.nvidia_container,
};
res.emulate_missing_as
.as_ref()
@@ -375,21 +379,20 @@ pub enum ImageSource {
DockerTag(String),
// Recipe(DirRecipe),
}
impl Default for ImageSource {
fn default() -> Self {
ImageSource::Packed
}
}
impl ImageSource {
pub fn ingredients(&self) -> Vec<PathBuf> {
match self {
Self::Packed => Vec::new(),
Self::DockerBuild { dockerfile, .. } => {
Self::DockerBuild {
dockerfile,
workdir,
..
} => {
vec![
dockerfile
workdir
.as_deref()
.unwrap_or(Path::new("Dockerfile"))
.to_owned(),
.unwrap_or(Path::new("."))
.join(dockerfile.as_deref().unwrap_or(Path::new("Dockerfile"))),
]
}
Self::DockerTag(_) => Vec::new(),

View File

@@ -2,7 +2,6 @@ use std::path::{Path, PathBuf};
use std::sync::Arc;
use clap::Parser;
use imbl::OrdMap;
use imbl_value::Value;
use once_cell::sync::OnceCell;
use rpc_toolkit::yajrc::RpcError;
@@ -54,13 +53,7 @@ impl Context for ContainerCliContext {
}
impl CallRemote<EffectContext> for ContainerCliContext {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_socket(
tokio::net::UnixStream::connect(&self.0.socket)
.await

View File

@@ -36,7 +36,6 @@ struct ServiceCallbackMap {
>,
get_status: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_container_ip: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_service_manifest: BTreeMap<PackageId, Vec<CallbackHandler>>,
}
impl ServiceCallbacks {
@@ -69,10 +68,6 @@ impl ServiceCallbacks {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_service_manifest.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
})
}
@@ -255,25 +250,6 @@ impl ServiceCallbacks {
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_get_service_manifest(&self, package_id: PackageId, handler: CallbackHandler) {
self.mutate(|this| {
this.get_service_manifest
.entry(package_id)
.or_default()
.push(handler)
})
}
#[must_use]
pub fn get_service_manifest(&self, package_id: &PackageId) -> Option<CallbackHandlers> {
self.mutate(|this| {
this.get_service_manifest
.remove(package_id)
.map(CallbackHandlers)
.filter(|cb| !cb.0.is_empty())
})
}
}
pub struct CallbackHandler {

View File

@@ -6,7 +6,7 @@ use crate::prelude::*;
use crate::service::Service;
#[derive(Clone)]
pub struct EffectContext(Weak<Service>);
pub(in crate::service) struct EffectContext(Weak<Service>);
impl EffectContext {
pub fn new(service: Weak<Service>) -> Self {
Self(service)

View File

@@ -36,7 +36,8 @@ pub async fn restart(context: EffectContext) -> Result<(), Error> {
.as_idx_mut(id)
.or_not_found(id)?
.as_status_info_mut()
.restart()
.as_desired_mut()
.map_mutate(|s| Ok(s.restart()))
})
.await
.result?;

View File

@@ -14,10 +14,7 @@ use crate::disk::mount::filesystem::bind::{Bind, FileType};
use crate::disk::mount::filesystem::idmapped::{IdMap, IdMapped};
use crate::disk::mount::filesystem::{FileSystem, MountType};
use crate::disk::mount::util::{is_mountpoint, unmount};
use crate::s9pk::manifest::Manifest;
use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*;
use crate::service::rpc::CallbackId;
use crate::status::health_check::NamedHealthCheckResult;
use crate::util::{FromStrParser, VersionString};
use crate::volume::data_dir;
@@ -370,45 +367,3 @@ pub async fn check_dependencies(
}
Ok(results)
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetServiceManifestParams {
pub package_id: PackageId,
#[ts(optional)]
#[arg(skip)]
pub callback: Option<CallbackId>,
}
pub async fn get_service_manifest(
context: EffectContext,
GetServiceManifestParams {
package_id,
callback,
}: GetServiceManifestParams,
) -> Result<Manifest, Error> {
let context = context.deref()?;
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context
.seed
.ctx
.callbacks
.add_get_service_manifest(package_id.clone(), CallbackHandler::new(&context, callback));
}
let db = context.seed.ctx.db.peek().await;
let manifest = db
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
.as_state_info()
.as_manifest(ManifestPreference::New)
.de()?;
Ok(manifest)
}

View File

@@ -15,7 +15,7 @@ mod dependency;
mod health;
mod net;
mod prelude;
pub mod subcontainer;
mod subcontainer;
mod system;
mod version;
@@ -88,10 +88,6 @@ pub fn handler<C: Context>() -> ParentHandler<C> {
"get-installed-packages",
from_fn_async(dependency::get_installed_packages).no_cli(),
)
.subcommand(
"get-service-manifest",
from_fn_async(dependency::get_service_manifest).no_cli(),
)
// health
.subcommand("set-health", from_fn_async(health::set_health).no_cli())
// subcontainer

View File

@@ -11,10 +11,6 @@ use crate::service::effects::prelude::*;
use crate::service::persistent_container::Subcontainer;
use crate::util::Invoke;
pub const NVIDIA_OVERLAY_PATH: &str = "/var/tmp/startos/nvidia-overlay";
pub const NVIDIA_OVERLAY_DEBIAN: &str = "/var/tmp/startos/nvidia-overlay/debian";
pub const NVIDIA_OVERLAY_GENERIC: &str = "/var/tmp/startos/nvidia-overlay/generic";
#[cfg(target_os = "linux")]
mod sync;
@@ -116,34 +112,8 @@ pub async fn create_subcontainer_fs(
.with_kind(ErrorKind::Incoherent)?,
);
tracing::info!("Mounting overlay {guid} for {image_id}");
// Determine which nvidia overlay to use based on distro detection
let nvidia_overlay: &[&str] = if context
.seed
.persistent_container
.s9pk
.as_manifest()
.images
.get(&image_id)
.map_or(false, |i| i.nvidia_container)
{
// Check if image is debian-based by looking for /etc/debian_version
let is_debian = tokio::fs::metadata(image.path().join("etc/debian_version"))
.await
.is_ok();
if is_debian && tokio::fs::metadata(NVIDIA_OVERLAY_DEBIAN).await.is_ok() {
&[NVIDIA_OVERLAY_DEBIAN]
} else if tokio::fs::metadata(NVIDIA_OVERLAY_GENERIC).await.is_ok() {
&[NVIDIA_OVERLAY_GENERIC]
} else {
&[]
}
} else {
&[]
};
let subcontainer_wrapper = Subcontainer {
overlay: OverlayGuard::mount_layers(&[], image, nvidia_overlay, &mountpoint).await?,
overlay: OverlayGuard::mount(image, &mountpoint).await?,
name: name
.unwrap_or_else(|| InternedString::intern(format!("subcontainer-{}", image_id))),
image_id: image_id.clone(),

View File

@@ -9,6 +9,7 @@ use std::sync::{Arc, Weak};
use std::time::Duration;
use axum::extract::ws::Utf8Bytes;
use crate::util::net::WebSocket;
use clap::Parser;
use futures::future::BoxFuture;
use futures::stream::FusedStream;
@@ -47,7 +48,6 @@ use crate::util::Never;
use crate::util::actor::concurrent::ConcurrentActor;
use crate::util::future::NonDetachingJoinHandle;
use crate::util::io::{AsyncReadStream, AtomicFile, TermSize, delete_file};
use crate::util::net::WebSocket;
use crate::util::serde::Pem;
use crate::util::sync::SyncMutex;
use crate::volume::data_dir;
@@ -575,17 +575,6 @@ impl Service {
.await
.result?;
// Trigger manifest callbacks after successful installation
let manifest = service.seed.persistent_container.s9pk.as_manifest();
if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) {
let manifest_value =
serde_json::to_value(manifest).with_kind(ErrorKind::Serialization)?;
callbacks
.call(imbl::vector![manifest_value.into()])
.await
.log_err();
}
Ok(service)
}

View File

@@ -96,9 +96,7 @@ impl PersistentContainer {
.join("logs")
.join(&s9pk.as_manifest().id),
),
LxcConfig {
hardware_acceleration: s9pk.manifest.hardware_acceleration,
},
LxcConfig::default(),
)
.await?;
let rpc_client = lxc_container.connect_rpc(Some(RPC_CONNECT_TIMEOUT)).await?;

View File

@@ -1,7 +1,5 @@
use std::path::Path;
use imbl::vector;
use crate::context::RpcContext;
use crate::db::model::package::{InstalledState, InstallingInfo, InstallingState, PackageState};
use crate::prelude::*;
@@ -67,11 +65,6 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(),
));
}
};
// Trigger manifest callbacks with null to indicate uninstall
if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) {
callbacks.call(vector![Value::Null]).await.log_err();
}
if !soft {
let path = Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(&manifest.id);
if tokio::fs::metadata(&path).await.is_ok() {

View File

@@ -11,7 +11,7 @@ use crate::sign::commitment::{Commitment, Digestable};
use crate::util::io::TrackingIO;
use crate::util::serde::Base64;
#[derive(Clone, Copy, Debug, Deserialize, Serialize, HasModel, TS, PartialEq, Eq)]
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]

View File

@@ -24,7 +24,6 @@ impl FromStr for NamedHealthCheckResult {
"success" => NamedHealthCheckResultKind::Success { message },
"disabled" => NamedHealthCheckResultKind::Disabled { message },
"starting" => NamedHealthCheckResultKind::Starting { message },
"waiting" => NamedHealthCheckResultKind::Waiting { message },
"loading" => NamedHealthCheckResultKind::Loading {
message: message.unwrap_or_default(),
},
@@ -62,7 +61,6 @@ pub enum NamedHealthCheckResultKind {
Success { message: Option<String> },
Disabled { message: Option<String> },
Starting { message: Option<String> },
Waiting { message: Option<String> },
Loading { message: String },
Failure { message: String },
}
@@ -91,13 +89,6 @@ impl std::fmt::Display for NamedHealthCheckResult {
write!(f, "{name}: Starting")
}
}
NamedHealthCheckResultKind::Waiting { message } => {
if let Some(message) = message {
write!(f, "{name}: Waiting ({message})")
} else {
write!(f, "{name}: Waiting")
}
}
NamedHealthCheckResultKind::Loading { message } => {
write!(f, "{name}: Loading ({message})")
}

View File

@@ -51,16 +51,10 @@ impl Model<StatusInfo> {
}
pub fn stopped(&mut self) -> Result<(), Error> {
self.as_started_mut().ser(&None)?;
self.as_health_mut().ser(&Default::default())?;
Ok(())
}
pub fn restart(&mut self) -> Result<(), Error> {
self.as_desired_mut().map_mutate(|s| Ok(s.restart()))?;
self.as_health_mut().ser(&Default::default())?;
Ok(())
}
pub fn init(&mut self) -> Result<(), Error> {
self.stopped()?;
self.as_started_mut().ser(&None)?;
self.as_desired_mut().map_mutate(|s| {
Ok(match s {
DesiredStatus::BackingUp {

View File

@@ -251,8 +251,6 @@ impl CallRemote<TunnelContext> for CliContext {
async fn call_remote(
&self,
mut method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
@@ -317,7 +315,6 @@ impl CallRemote<TunnelContext, TunnelUrlParams> for RpcContext {
async fn call_remote(
&self,
mut method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
TunnelUrlParams { tunnel }: TunnelUrlParams,
) -> Result<Value, RpcError> {

View File

@@ -13,7 +13,7 @@ use ts_rs::TS;
use crate::util::mime::{mime, unmime};
use crate::{Error, ErrorKind, ResultExt};
#[derive(Clone, TS, PartialEq, Eq)]
#[derive(Clone, TS)]
#[ts(type = "string")]
pub struct DataUrl<'a> {
pub mime: InternedString,

View File

@@ -16,7 +16,7 @@ use clap::builder::ValueParserFactory;
use futures::future::{BoxFuture, Fuse};
use futures::{FutureExt, Stream, TryStreamExt};
use inotify::{EventMask, EventStream, Inotify, WatchMask};
use nix::unistd::{Gid, Uid, fchown};
use nix::unistd::{Gid, Uid};
use serde::{Deserialize, Serialize};
use tokio::fs::{File, OpenOptions};
use tokio::io::{
@@ -892,16 +892,6 @@ impl TmpDir {
Ok(())
}
pub fn leak(mut self) {
std::mem::take(&mut self.path);
}
pub async fn unmount_and_delete(self) -> Result<(), Error> {
crate::disk::mount::util::unmount_all_under(&self.path, false).await?;
tokio::fs::remove_dir_all(&self.path).await?;
Ok(())
}
pub async fn gc(self: Arc<Self>) -> Result<(), Error> {
if let Ok(dir) = Arc::try_unwrap(self) {
dir.delete().await
@@ -1067,32 +1057,6 @@ pub async fn write_file_atomic(
Ok(())
}
#[instrument(skip_all)]
pub async fn write_file_owned_atomic(
path: impl AsRef<Path>,
contents: impl AsRef<[u8]>,
uid: u32,
gid: u32,
) -> Result<(), Error> {
let path = path.as_ref();
if let Some(parent) = path.parent() {
tokio::fs::create_dir_all(parent)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("mkdir -p {parent:?}")))?;
}
let mut file = AtomicFile::new(path, None::<&Path>)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}")))?;
fchown(&*file, Some(uid.into()), Some(gid.into())).with_kind(ErrorKind::Filesystem)?;
file.write_all(contents.as_ref())
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("write {path:?}")))?;
file.save()
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("save {path:?}")))?;
Ok(())
}
fn poll_flush_prefix<W: AsyncWrite>(
mut writer: Pin<&mut W>,
cx: &mut std::task::Context<'_>,

View File

@@ -1,12 +1,9 @@
use std::collections::BTreeSet;
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use ts_rs::TS;
use crate::prelude::*;
use crate::util::Invoke;
use crate::{Error, ResultExt};
const KNOWN_CLASSES: &[&str] = &["processor", "display"];
@@ -25,57 +22,22 @@ impl LshwDevice {
Self::Display(_) => "display",
}
}
pub fn from_value(value: &Value) -> Option<Self> {
match value["class"].as_str() {
Some("processor") => Some(LshwDevice::Processor(LshwProcessor::from_value(value))),
Some("display") => Some(LshwDevice::Display(LshwDisplay::from_value(value))),
_ => None,
pub fn product(&self) -> &str {
match self {
Self::Processor(hw) => hw.product.as_str(),
Self::Display(hw) => hw.product.as_str(),
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
pub struct LshwProcessor {
pub product: Option<InternedString>,
pub vendor: Option<InternedString>,
pub capabilities: BTreeSet<InternedString>,
}
impl LshwProcessor {
fn from_value(value: &Value) -> Self {
Self {
product: value["product"].as_str().map(From::from),
vendor: value["vendor"].as_str().map(From::from),
capabilities: value["capabilities"]
.as_object()
.into_iter()
.flat_map(|o| o.keys())
.map(|k| k.clone())
.collect(),
}
}
pub product: String,
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
pub struct LshwDisplay {
pub product: Option<InternedString>,
pub vendor: Option<InternedString>,
pub capabilities: BTreeSet<InternedString>,
pub driver: Option<InternedString>,
}
impl LshwDisplay {
fn from_value(value: &Value) -> Self {
Self {
product: value["product"].as_str().map(From::from),
vendor: value["vendor"].as_str().map(From::from),
capabilities: value["capabilities"]
.as_object()
.into_iter()
.flat_map(|o| o.keys())
.map(|k| k.clone())
.collect(),
driver: value["configuration"]["driver"].as_str().map(From::from),
}
}
pub product: String,
}
pub async fn lshw() -> Result<Vec<LshwDevice>, Error> {
@@ -85,10 +47,19 @@ pub async fn lshw() -> Result<Vec<LshwDevice>, Error> {
cmd.arg("-class").arg(*class);
}
Ok(
serde_json::from_slice::<Vec<Value>>(&cmd.invoke(crate::ErrorKind::Lshw).await?)
.with_kind(crate::ErrorKind::Deserialization)?
.iter()
.filter_map(LshwDevice::from_value)
.collect(),
serde_json::from_slice::<Vec<serde_json::Value>>(
&cmd.invoke(crate::ErrorKind::Lshw).await?,
)
.with_kind(crate::ErrorKind::Deserialization)?
.into_iter()
.filter_map(|v| match serde_json::from_value(v) {
Ok(a) => Some(a),
Err(e) => {
tracing::error!("Failed to parse lshw output: {e}");
tracing::debug!("{e:?}");
None
}
})
.collect(),
)
}

View File

@@ -1127,11 +1127,6 @@ impl Serialize for Regex {
serialize_display(&self.0, serializer)
}
}
impl PartialEq for Regex {
fn eq(&self, other: &Self) -> bool {
InternedString::from_display(self.as_ref()) == InternedString::from_display(other.as_ref())
}
}
// TODO: make this not allocate
#[derive(Debug)]

View File

@@ -95,7 +95,7 @@ pub async fn prompt_multiline<
Ok(res)
}
pub async fn choose_custom_display<'t, T>(
pub async fn choose_custom_display<'t, T: std::fmt::Display>(
prompt: &str,
choices: &'t [T],
mut display: impl FnMut(&T) -> String,
@@ -121,7 +121,7 @@ pub async fn choose_custom_display<'t, T>(
if choice.len() < 1 {
return Err(Error::new(eyre!("Aborted"), ErrorKind::Cancelled));
}
let (idx, choice_str) = string_choices
let (idx, _) = string_choices
.iter()
.enumerate()
.find(|(_, s)| s.as_str() == choice[0].as_str())
@@ -132,7 +132,7 @@ pub async fn choose_custom_display<'t, T>(
)
})?;
let choice = &choices[idx];
println!("{prompt} {choice_str}");
println!("{prompt} {choice}");
Ok(&choice)
}

View File

@@ -56,9 +56,8 @@ mod v0_4_0_alpha_13;
mod v0_4_0_alpha_14;
mod v0_4_0_alpha_15;
mod v0_4_0_alpha_16;
mod v0_4_0_alpha_17;
pub type Current = v0_4_0_alpha_17::Version; // VERSION_BUMP
pub type Current = v0_4_0_alpha_16::Version; // VERSION_BUMP
impl Current {
#[instrument(skip(self, db))]
@@ -176,8 +175,7 @@ enum Version {
V0_4_0_alpha_13(Wrapper<v0_4_0_alpha_13::Version>),
V0_4_0_alpha_14(Wrapper<v0_4_0_alpha_14::Version>),
V0_4_0_alpha_15(Wrapper<v0_4_0_alpha_15::Version>),
V0_4_0_alpha_16(Wrapper<v0_4_0_alpha_16::Version>),
V0_4_0_alpha_17(Wrapper<v0_4_0_alpha_17::Version>), // VERSION_BUMP
V0_4_0_alpha_16(Wrapper<v0_4_0_alpha_16::Version>), // VERSION_BUMP
Other(exver::Version),
}
@@ -236,8 +234,7 @@ impl Version {
Self::V0_4_0_alpha_13(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_14(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_15(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_16(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_17(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::V0_4_0_alpha_16(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::Other(v) => {
return Err(Error::new(
eyre!("unknown version {v}"),
@@ -288,8 +285,7 @@ impl Version {
Version::V0_4_0_alpha_13(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_14(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_15(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_16(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_17(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::V0_4_0_alpha_16(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::Other(x) => x.clone(),
}
}

View File

@@ -286,18 +286,6 @@ impl VersionT for Version {
ErrorKind::Filesystem,
));
}
if tokio::fs::metadata("/media/startos/data/package-data/volumes/nostr")
.await
.is_ok()
{
tokio::fs::rename(
"/media/startos/data/package-data/volumes/nostr",
"/media/startos/data/package-data/volumes/nostr-rs-relay",
)
.await?;
}
// Should be the name of the package
let mut paths = tokio::fs::read_dir(path).await?;
while let Some(path) = paths.next_entry().await? {

View File

@@ -1,7 +1,6 @@
use std::path::Path;
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::json;
use tokio::fs::File;
use super::v0_3_5::V0_3_0_COMPAT;
@@ -11,7 +10,7 @@ use crate::context::RpcContext;
use crate::install::PKG_ARCHIVE_DIR;
use crate::prelude::*;
use crate::s9pk::S9pk;
use crate::s9pk::manifest::Manifest;
use crate::s9pk::manifest::{DeviceFilter, Manifest};
use crate::s9pk::merkle_archive::MerkleArchive;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::s9pk::v2::SIG_CONTEXT;
@@ -85,8 +84,28 @@ impl VersionT for Version {
let mut manifest = previous_manifest.clone();
if let Some(_) = previous_manifest["hardwareRequirements"]["device"].as_object() {
manifest["hardwareRequirements"]["device"] = json!([]);
if let Some(device) =
previous_manifest["hardwareRequirements"]["device"].as_object()
{
manifest["hardwareRequirements"]["device"] = to_value(
&device
.into_iter()
.map(|(class, product)| {
Ok::<_, Error>(DeviceFilter {
pattern_description: format!(
"a {class} device matching the expression {}",
&product
),
class: class.clone(),
pattern: from_value(product.clone())?,
})
})
.fold(Ok::<_, Error>(Vec::new()), |acc, value| {
let mut acc = acc?;
acc.push(value?);
Ok(acc)
})?,
)?;
}
if previous_manifest != manifest {

View File

@@ -1,53 +0,0 @@
use exver::{PreReleaseSegment, VersionRange};
use super::v0_3_5::V0_3_0_COMPAT;
use super::{VersionT, v0_4_0_alpha_16};
use crate::db::model::public::AcmeSettings;
use crate::net::acme::AcmeProvider;
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_4_0_alpha_17: exver::Version = exver::Version::new(
[0, 4, 0],
[PreReleaseSegment::String("alpha".into()), 17.into()]
);
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_4_0_alpha_16::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_4_0_alpha_17.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
#[instrument(skip_all)]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
let acme = db["public"]["serverInfo"]["network"]["acme"]
.as_object_mut()
.or_not_found("public.serverInfo.network.acme")?;
let letsencrypt =
InternedString::intern::<&str>("letsencrypt".parse::<AcmeProvider>()?.as_ref());
if !acme.contains_key(&letsencrypt) {
acme.insert(
letsencrypt,
to_value(&AcmeSettings {
contact: Vec::new(),
})?,
);
}
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -3,39 +3,33 @@ set -e
SYSTEMCTL=systemctl
if [ -n "$DPKG_MAINTSCRIPT_PACKAGE" ]; then
SYSTEMCTL=deb-systemd-helper
SYSTEMCTL=deb-systemd-helper
fi
if [ -f /usr/sbin/grub-probe ] && ! [ -L /usr/sbin/grub-probe ]; then
mv /usr/sbin/grub-probe /usr/sbin/grub-probe-default
ln -s /usr/lib/startos/scripts/grub-probe-eos /usr/sbin/grub-probe
mv /usr/sbin/grub-probe /usr/sbin/grub-probe-default
ln -s /usr/lib/startos/scripts/grub-probe-eos /usr/sbin/grub-probe
fi
cp /usr/lib/startos/scripts/startos-initramfs-module /etc/initramfs-tools/scripts/startos
if ! grep overlay /etc/initramfs-tools/modules > /dev/null; then
echo overlay >> /etc/initramfs-tools/modules
echo overlay >> /etc/initramfs-tools/modules
fi
update-initramfs -u -k all
if [ -f /etc/default/grub ]; then
sed -i '/\(^\|#\)GRUB_CMDLINE_LINUX=/c\GRUB_CMDLINE_LINUX="boot=startos console=ttyS0,115200n8 console=tty0"' /etc/default/grub
sed -i '/\(^\|#\)GRUB_CMDLINE_LINUX_DEFAULT=/c\GRUB_CMDLINE_LINUX_DEFAULT=""' /etc/default/grub
sed -i '/\(^\|#\)GRUB_DISTRIBUTOR=/c\GRUB_DISTRIBUTOR="StartOS v$(cat /usr/lib/startos/VERSION.txt)"' /etc/default/grub
sed -i '/\(^\|#\)GRUB_TERMINAL=/c\GRUB_TERMINAL="serial"\nGRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"' /etc/default/grub
if grep '^GRUB_SERIAL_COMMAND=' /etc/default/grub > /dev/null; then
sed -i '/\(^\|#\)GRUB_SERIAL_COMMAND=/c\GRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"' /etc/default/grub
else
echo 'GRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"' >> /etc/default/grub
fi
sed -i '/\(^\|#\)GRUB_CMDLINE_LINUX=/c\GRUB_CMDLINE_LINUX="boot=startos console=ttyS0,115200n8"' /etc/default/grub
sed -i '/\(^\|#\)GRUB_DISTRIBUTOR=/c\GRUB_DISTRIBUTOR="StartOS v$(cat /usr/lib/startos/VERSION.txt)"' /etc/default/grub
sed -i '/\(^\|#\)GRUB_TERMINAL=/c\GRUB_TERMINAL="serial"\nGRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"' /etc/default/grub
fi
VERSION="$(cat /usr/lib/startos/VERSION.txt)"
ENVIRONMENT=$(cat /usr/lib/startos/ENVIRONMENT.txt)
VERSION_ENV="${VERSION}"
if [ -n "${ENVIRONMENT}" ]; then
VERSION_ENV="${VERSION} (${ENVIRONMENT})"
VERSION_ENV="${VERSION} (${ENVIRONMENT})"
fi
# set /etc/os-release
@@ -95,8 +89,8 @@ $SYSTEMCTL mask hibernate.target
$SYSTEMCTL mask hybrid-sleep.target
if which gsettings > /dev/null; then
gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-ac-timeout '0'
gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-battery-timeout '0'
gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-ac-timeout '0'
gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-battery-timeout '0'
fi
sed -i 's/PasswordAuthentication no/PasswordAuthentication yes/g' /etc/ssh/sshd_config
@@ -128,7 +122,7 @@ ln -sf /usr/lib/startos/scripts/wireguard-vps-proxy-setup /usr/bin/wireguard-vps
echo "fs.inotify.max_user_watches=1048576" > /etc/sysctl.d/97-startos.conf
if ! getent group | grep '^startos:'; then
groupadd startos
groupadd startos
fi
rm -f /etc/motd

View File

@@ -15,7 +15,6 @@ import {
CreateTaskParams,
MountParams,
StatusInfo,
Manifest,
} from "./osBindings"
import {
PackageId,
@@ -84,11 +83,6 @@ export type Effects = {
mount(options: MountParams): Promise<string>
/** Returns a list of the ids of all installed packages */
getInstalledPackages(): Promise<string[]>
/** Returns the manifest of a service */
getServiceManifest(options: {
packageId: PackageId
callback?: () => void
}): Promise<Manifest>
// health
/** sets the result of a health check */

View File

@@ -224,6 +224,8 @@ export type ListValueSpecObject = {
uniqueBy: UniqueBy
displayAs: string | null
}
// TODO Aiden do we really want this expressivity? Why not the below. Also what's with the "readonly" portion?
// export type UniqueBy = null | string | { any: string[] } | { all: string[] }
export type UniqueBy =
| null

View File

@@ -1,9 +0,0 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { AnySignature } from "./AnySignature"
import type { MerkleArchiveCommitment } from "./MerkleArchiveCommitment"
export type AddMirrorParams = {
url: string
commitment: MerkleArchiveCommitment
signature: AnySignature
}

View File

@@ -3,7 +3,7 @@ import type { AnySignature } from "./AnySignature"
import type { MerkleArchiveCommitment } from "./MerkleArchiveCommitment"
export type AddPackageParams = {
urls: string[]
url: string
commitment: MerkleArchiveCommitment
signature: AnySignature
}

View File

@@ -1,9 +0,0 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { Guid } from "./Guid"
import type { PackageId } from "./PackageId"
export type AddPackageSignerParams = {
id: PackageId
signer: Guid
versions: string | null
}

View File

@@ -1,10 +1,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type DeviceFilter = {
description: string
class: "processor" | "display"
product: string | null
vendor: string | null
capabilities?: Array<string>
driver?: string
pattern: string
patternDescription: string
}

View File

@@ -7,5 +7,5 @@ export type GetPackageParams = {
id: PackageId | null
targetVersion: string | null
sourceVersion: Version | null
otherVersions: PackageDetailLevel | null
otherVersions: PackageDetailLevel
}

View File

@@ -1,8 +0,0 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { CallbackId } from "./CallbackId"
import type { PackageId } from "./PackageId"
export type GetServiceManifestParams = {
packageId: PackageId
callback?: CallbackId
}

View File

@@ -5,5 +5,4 @@ export type ImageConfig = {
source: ImageSource
arch: string[]
emulateMissingAs: string | null
nvidiaContainer: boolean
}

View File

@@ -1,8 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type LshwDisplay = {
product: string | null
vendor: string | null
capabilities: Array<string>
driver: string | null
}
export type LshwDisplay = { product: string }

View File

@@ -1,7 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type LshwProcessor = {
product: string | null
vendor: string | null
capabilities: Array<string>
}
export type LshwProcessor = { product: string }

View File

@@ -31,7 +31,6 @@ export type Manifest = {
alerts: Alerts
dependencies: Dependencies
hardwareRequirements: HardwareRequirements
hardwareAcceleration: boolean
gitHash: GitHash | null
osVersion: string
sdkVersion: string | null

View File

@@ -4,7 +4,6 @@ export type NamedHealthCheckResult = { name: string } & (
| { result: "success"; message: string | null }
| { result: "disabled"; message: string | null }
| { result: "starting"; message: string | null }
| { result: "waiting"; message: string | null }
| { result: "loading"; message: string }
| { result: "failure"; message: string }
)

View File

@@ -4,7 +4,7 @@ import type { PackageVersionInfo } from "./PackageVersionInfo"
import type { Version } from "./Version"
export type PackageInfo = {
authorized: { [key: Guid]: string }
authorized: Array<Guid>
versions: { [key: Version]: PackageVersionInfo }
categories: string[]
}

View File

@@ -2,4 +2,4 @@
import type { Guid } from "./Guid"
import type { PackageId } from "./PackageId"
export type RemovePackageSignerParams = { id: PackageId; signer: Guid }
export type PackageSignerParams = { id: PackageId; signer: Guid }

View File

@@ -10,8 +10,6 @@ import type { PackageId } from "./PackageId"
import type { RegistryAsset } from "./RegistryAsset"
export type PackageVersionInfo = {
sourceVersion: string | null
s9pks: Array<[HardwareRequirements, RegistryAsset<MerkleArchiveCommitment>]>
title: string
icon: DataUrl
description: Description
@@ -28,5 +26,7 @@ export type PackageVersionInfo = {
dependencyMetadata: { [key: PackageId]: DependencyMetadata }
osVersion: string
sdkVersion: string | null
hardwareAcceleration: boolean
hardwareRequirements: HardwareRequirements
sourceVersion: string | null
s9pk: RegistryAsset<MerkleArchiveCommitment>
}

Some files were not shown because too many files have changed in this diff Show More