registry updates for multi-hardware-requirements

This commit is contained in:
Aiden McClelland
2026-01-05 11:20:24 -07:00
parent b9c4cd2d96
commit 0e15f18cc2
31 changed files with 668 additions and 195 deletions

View File

@@ -5,5 +5,6 @@
+ firmware-libertas
+ firmware-misc-nonfree
+ firmware-realtek
+ nvidia-driver
+ nvidia-kernel-dkms
+ nvidia-container-toolkit
# + nvidia-driver
# + nvidia-kernel-dkms

View File

@@ -174,40 +174,130 @@ if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
fi
if [ "$NON_FREE" = 1 ]; then
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o config/archives/nvidia-container-toolkit.key
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
| sed 's#deb https://#deb [signed-by=/etc/apt/trusted.gpg.d/nvidia-container-toolkit.key.gpg] https://#g' \
> config/archives/nvidia-container-toolkit.list
fi
cat > config/archives/backports.pref <<- EOF
Package: linux-image-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: nvidia-kernel-dkms
Package: linux-headers-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: *nvidia*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
EOF
# Dependencies
# Hooks
## Firmware
cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF
#!/bin/bash
set -e
if [ "${NON_FREE}" = "1" ]; then
# install latest NVIDIA driver in a given major series
# ---------------- configuration ----------------
# Major driver series, e.g. 550, 560, 570
NVIDIA_MAJOR="\${NVIDIA_MAJOR:-580}"
BASE_URL="https://download.nvidia.com/XFree86/Linux-${QEMU_ARCH}"
echo "[nvidia-hook] Using NVIDIA major series: \${NVIDIA_MAJOR}" >&2
# ---------------- kernel version ----------------
# Determine target kernel version from newest /boot/vmlinuz-* in the chroot.
KVER="\$(
ls -1t /boot/vmlinuz-* 2>/dev/null \
| head -n1 \
| sed 's|.*/vmlinuz-||'
)"
if [ -z "\${KVER}" ]; then
echo "[nvidia-hook] ERROR: no /boot/vmlinuz-* found; cannot determine kernel version" >&2
exit 1
fi
echo "[nvidia-hook] Target kernel version: \${KVER}" >&2
# Ensure kernel headers are present
if [ ! -e "/lib/modules/\${KVER}/build" ]; then
apt-get install linux-headers-\${KVER}-${IB_TARGET_ARCH}
echo "[nvidia-hook] ERROR: /lib/modules/\${KVER}/build missing; install headers for \${KVER} before this hook." >&2
exit 1
fi
# ---------------- find latest driver in major series ----------------
echo "[nvidia-hook] Fetching version list from \${BASE_URL}/" >&2
NVIDIA_VER="\$(
curl -fsSL "\${BASE_URL}/" \
| sed -n 's/.*href="([0-9][0-9][0-9][0-9.]*)/".*/\u0001/p' \
| grep -E "^\${NVIDIA_MAJOR}." \
| sort -V \
| tail -n1
)"
if [ -z "\${NVIDIA_VER}" ]; then
echo "[nvidia-hook] ERROR: could not find NVIDIA series \${NVIDIA_MAJOR} under \${BASE_URL}/" >&2
exit 1
fi
echo "[nvidia-hook] Selected NVIDIA driver version: \${NVIDIA_VER}" >&2
# ---------------- download and run installer ----------------
RUN_NAME="NVIDIA-Linux-${QEMU_ARCH}-\${NVIDIA_VER}.run"
RUN_PATH="/root/\${RUN_NAME}"
RUN_URL="\${BASE_URL}/\${NVIDIA_VER}/\${RUN_NAME}"
echo "[nvidia-hook] Downloading \${RUN_URL}" >&2
wget -O "\${RUN_PATH}" "\${RUN_URL}"
chmod +x "\${RUN_PATH}"
echo "[nvidia-hook] Running NVIDIA installer for kernel \${KVER}" >&2
sh "\${RUN_PATH}" \
--silent \
--kernel-name="\${KVER}" \
--no-x-check \
--no-nouveau-check \
--no-runlevel-check
# Rebuild module metadata
echo "[nvidia-hook] Running depmod for \${KVER}" >&2
depmod -a "\${KVER}"
echo "[nvidia-hook] NVIDIA \${NVIDIA_VER} installation complete for kernel \${KVER}" >&2
fi
cp /etc/resolv.conf /etc/resolv.conf.bak
if [ "${IB_SUITE}" = trixie ] && [ "${IB_TARGET_ARCH}" != riscv64 ]; then
echo 'deb https://deb.debian.org/debian/ bookworm main' > /etc/apt/sources.list.d/bookworm.list
apt-get update
apt-get install -y postgresql-15
rm /etc/apt/sources.list.d/bookworm.list
apt-get update
systemctl mask postgresql
echo 'deb https://deb.debian.org/debian/ bookworm main' > /etc/apt/sources.list.d/bookworm.list
apt-get update
apt-get install -y postgresql-15
rm /etc/apt/sources.list.d/bookworm.list
apt-get update
systemctl mask postgresql
fi
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
KERNEL_VERSION=${RPI_KERNEL_VERSION} sh /boot/config.sh > /boot/config.txt
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
KERNEL_VERSION=${RPI_KERNEL_VERSION} sh /boot/config.sh > /boot/config.txt
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
fi
useradd --shell /bin/bash -G startos -m start9
@@ -218,11 +308,11 @@ usermod -aG systemd-journal start9
echo "start9 ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/010_start9-nopasswd"
if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
/usr/lib/startos/scripts/enable-kiosk
/usr/lib/startos/scripts/enable-kiosk
fi
if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
passwd -l start9
passwd -l start9
fi
EOF
@@ -360,4 +450,4 @@ elif [ "${IMAGE_TYPE}" = img ]; then
fi
chown $IB_UID:$IB_UID $RESULTS_DIR/$IMAGE_BASENAME.*
chown $IB_UID:$IB_UID $RESULTS_DIR/$IMAGE_BASENAME.*

View File

@@ -1 +1 @@
usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory quiet boot=startos
usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory boot=startos

View File

@@ -63,7 +63,7 @@ mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi
@@ -75,7 +75,7 @@ else
CHROOT_RES=$?
fi
if mountpoint /media/startos/next/sys/firmware/efi/efivars 2> /dev/null; then
if mountpoint /media/startos/next/sys/firmware/efi/efivars 2>&1 > /dev/null; then
umount /media/startos/next/sys/firmware/efi/efivars
fi

View File

@@ -35,16 +35,20 @@ if [ "$UNDO" = 1 ]; then
exit $err
fi
# DNAT: rewrite destination for incoming packets (external traffic)
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
# DNAT: rewrite destination for locally-originated packets (hairpin from host itself)
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$dip/$dprefix" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$dip/$dprefix" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_POSTROUTING -s "$dip/$dprefix" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -s "$dip/$dprefix" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
# MASQUERADE: rewrite source for all forwarded traffic to the destination
# This ensures responses are routed back through the host regardless of source IP
iptables -t nat -A ${NAME}_POSTROUTING -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -d "$dip" -p udp --dport "$dport" -j MASQUERADE
# Allow new connections to be forwarded to the destination
iptables -A ${NAME}_FORWARD -d $dip -p tcp --dport $dport -m state --state NEW -j ACCEPT
iptables -A ${NAME}_FORWARD -d $dip -p udp --dport $dport -m state --state NEW -j ACCEPT

View File

@@ -50,12 +50,12 @@ mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /boot/efi 2> /dev/null; then
if mountpoint /boot/efi 2>&1 > /dev/null; then
mkdir -p /media/startos/next/boot/efi
mount --bind /boot/efi /media/startos/next/boot/efi
fi
if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi

View File

@@ -15,6 +15,7 @@ use josekit::jwk::Jwk;
use reqwest::{Client, Proxy};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{CallRemote, Context, Empty};
use tokio::process::Command;
use tokio::sync::{RwLock, broadcast, oneshot, watch};
use tokio::time::Instant;
use tracing::instrument;
@@ -26,6 +27,9 @@ use crate::context::config::ServerConfig;
use crate::db::model::Database;
use crate::db::model::package::TaskSeverity;
use crate::disk::OsPartitionInfo;
use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::guard::MountGuard;
use crate::init::{InitResult, check_time_is_synchronized};
use crate::install::PKG_ARCHIVE_DIR;
use crate::lxc::LxcManager;
@@ -41,12 +45,14 @@ use crate::rpc_continuations::{Guid, OpenAuthedContinuations, RpcContinuations};
use crate::service::ServiceMap;
use crate::service::action::update_tasks;
use crate::service::effects::callbacks::ServiceCallbacks;
use crate::service::effects::subcontainer::NVIDIA_OVERLAY_PATH;
use crate::shutdown::Shutdown;
use crate::util::Invoke;
use crate::util::future::NonDetachingJoinHandle;
use crate::util::io::delete_file;
use crate::util::lshw::LshwDevice;
use crate::util::sync::{SyncMutex, SyncRwLock, Watch};
use crate::{ActionId, DATA_DIR, PackageId};
use crate::{ActionId, DATA_DIR, PLATFORM, PackageId};
pub struct RpcContextSeed {
is_closed: AtomicBool,
@@ -167,6 +173,39 @@ impl RpcContext {
init_net_ctrl.complete();
tracing::info!("Initialized Net Controller");
if PLATFORM.ends_with("-nonfree") {
if let Err(e) = Command::new("nvidia-modprobe")
.invoke(ErrorKind::ParseSysInfo)
.await
{
tracing::warn!("nvidia-modprobe: {e}");
tracing::info!("The above warning can be ignored if no NVIDIA card is present");
} else {
if let Some(procfs) = MountGuard::mount(
&Bind::new("/proc"),
Path::new(NVIDIA_OVERLAY_PATH).join("proc"),
ReadOnly,
)
.await
.log_err()
{
Command::new("nvidia-container-cli")
.arg("configure")
.arg("--no-devbind")
.arg("--no-cgroups")
.arg("--utility")
.arg("--compute")
.arg("--graphics")
.arg("--video")
.arg(NVIDIA_OVERLAY_PATH)
.invoke(ErrorKind::Unknown)
.await
.log_err();
procfs.unmount(true).await.log_err();
}
}
}
let services = ServiceMap::default();
let metrics_cache = Watch::<Option<crate::system::Metrics>>::new(None);
let socks_proxy_url = format!("socks5h://{socks_proxy}");

View File

@@ -4,6 +4,7 @@ use std::path::Path;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use itertools::Itertools;
use sha2::Sha256;
use crate::disk::mount::filesystem::{FileSystem, MountType, ReadWrite};
@@ -12,12 +13,13 @@ use crate::prelude::*;
use crate::util::io::TmpDir;
pub struct OverlayFs<P0: AsRef<Path>, P1: AsRef<Path>, P2: AsRef<Path>> {
lower: P0,
lower: Vec<P0>,
upper: P1,
work: P2,
}
impl<P0: AsRef<Path>, P1: AsRef<Path>, P2: AsRef<Path>> OverlayFs<P0, P1, P2> {
pub fn new(lower: P0, upper: P1, work: P2) -> Self {
/// layers are top to bottom
pub fn new(lower: Vec<P0>, upper: P1, work: P2) -> Self {
Self { lower, upper, work }
}
}
@@ -32,8 +34,10 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
}
fn mount_options(&self) -> impl IntoIterator<Item = impl Display> {
[
Box::new(lazy_format!("lowerdir={}", self.lower.as_ref().display()))
as Box<dyn Display>,
Box::new(lazy_format!(
"lowerdir={}",
self.lower.iter().map(|p| p.as_ref().display()).join(":")
)) as Box<dyn Display>,
Box::new(lazy_format!("upperdir={}", self.upper.as_ref().display())),
Box::new(lazy_format!("workdir={}", self.work.as_ref().display())),
]
@@ -51,18 +55,21 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
tokio::fs::create_dir_all(self.work.as_ref()).await?;
let mut sha = Sha256::new();
sha.update("OverlayFs");
sha.update(
tokio::fs::canonicalize(self.lower.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
self.lower.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
);
for lower in &self.lower {
sha.update(
tokio::fs::canonicalize(lower.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
lower.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
);
sha.update(b"\0");
}
sha.update(
tokio::fs::canonicalize(self.upper.as_ref())
.await
@@ -75,6 +82,7 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
.as_os_str()
.as_bytes(),
);
sha.update(b"\0");
sha.update(
tokio::fs::canonicalize(self.work.as_ref())
.await
@@ -87,6 +95,7 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
.as_os_str()
.as_bytes(),
);
sha.update(b"\0");
Ok(sha.finalize())
}
}
@@ -98,11 +107,20 @@ pub struct OverlayGuard<G: GenericMountGuard> {
inner_guard: MountGuard,
}
impl<G: GenericMountGuard> OverlayGuard<G> {
pub async fn mount(lower: G, mountpoint: impl AsRef<Path>) -> Result<Self, Error> {
pub async fn mount_layers<P: AsRef<Path>>(
pre: &[P],
guard: G,
post: &[P],
mountpoint: impl AsRef<Path>,
) -> Result<Self, Error> {
let upper = TmpDir::new().await?;
let inner_guard = MountGuard::mount(
&OverlayFs::new(
lower.path(),
std::iter::empty()
.chain(pre.into_iter().map(|p| p.as_ref()))
.chain([guard.path()])
.chain(post.into_iter().map(|p| p.as_ref()))
.collect(),
upper.as_ref().join("upper"),
upper.as_ref().join("work"),
),
@@ -111,11 +129,14 @@ impl<G: GenericMountGuard> OverlayGuard<G> {
)
.await?;
Ok(Self {
lower: Some(lower),
lower: Some(guard),
upper: Some(upper),
inner_guard,
})
}
pub async fn mount(lower: G, mountpoint: impl AsRef<Path>) -> Result<Self, Error> {
Self::mount_layers::<&Path>(&[], lower, &[], mountpoint).await
}
pub async fn unmount(mut self, delete_mountpoint: bool) -> Result<(), Error> {
self.inner_guard.take().unmount(delete_mountpoint).await?;
if let Some(lower) = self.lower.take() {

View File

@@ -4,7 +4,6 @@ use std::sync::Arc;
use std::time::{Duration, SystemTime};
use axum::extract::ws;
use const_format::formatcp;
use futures::{StreamExt, TryStreamExt};
use itertools::Itertools;
use rpc_toolkit::{Context, Empty, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};

View File

@@ -142,16 +142,16 @@ pub async fn install(
.await?,
)?;
let asset = &package
let (_, asset) = package
.best
.get(&version)
.and_then(|i| i.s9pk.first())
.ok_or_else(|| {
Error::new(
eyre!("{id}@{version} not found on {registry}"),
ErrorKind::NotFound,
)
})?
.s9pk;
})?;
asset.validate(SIG_CONTEXT, asset.all_signers())?;

View File

@@ -39,7 +39,7 @@ const RPC_DIR: &str = "media/startos/rpc"; // must not be absolute path
pub const CONTAINER_RPC_SERVER_SOCKET: &str = "service.sock"; // must not be absolute path
pub const HOST_RPC_SERVER_SOCKET: &str = "host.sock"; // must not be absolute path
const CONTAINER_DHCP_TIMEOUT: Duration = Duration::from_secs(30);
const HARDWARE_ACCELERATION_PATHS: &[&str] = &["/dev/dri/", "/dev/nvidia"];
const HARDWARE_ACCELERATION_PATHS: &[&str] = &["/dev/dri", "/dev/nvidia*", "/dev/kfd"];
#[derive(
Clone, Debug, Serialize, Deserialize, Default, PartialEq, Eq, PartialOrd, Ord, Hash, TS,
@@ -305,14 +305,15 @@ impl LxcContainer {
Some(Vec::new())
} else {
let mut new_matches = Vec::new();
for m in matches {
if if m.ends_with("/") {
path.starts_with(m)
for mut m in matches.iter().copied() {
let could_match = if let Some(prefix) = m.strip_suffix("*") {
m = prefix;
path.to_string_lossy().starts_with(m)
} else {
path.to_string_lossy().starts_with(*m)
} || Path::new(*m).starts_with(&path)
{
new_matches.push(*m);
path.starts_with(m)
} || Path::new(m).starts_with(&path);
if could_match {
new_matches.push(m);
}
}
if new_matches.is_empty() {

View File

@@ -280,8 +280,11 @@ pub async fn execute<C: Context>(
let lower = TmpMountGuard::mount(&BlockDev::new(&image_path), MountType::ReadOnly).await?;
let work = config_path.join("work");
let upper = config_path.join("overlay");
let overlay =
TmpMountGuard::mount(&OverlayFs::new(&lower.path(), &upper, &work), ReadWrite).await?;
let overlay = TmpMountGuard::mount(
&OverlayFs::new(vec![lower.path()], &upper, &work),
ReadWrite,
)
.await?;
let boot = MountGuard::mount(
&BlockDev::new(&part_info.boot),

View File

@@ -3,7 +3,7 @@ use std::path::Path;
use std::sync::Arc;
use chrono::{DateTime, Utc};
use reqwest::Client;
use reqwest::{Client, Response};
use serde::{Deserialize, Serialize};
use tokio::io::AsyncWrite;
use ts_rs::TS;
@@ -21,14 +21,14 @@ use crate::sign::{AnySignature, AnyVerifyingKey};
use crate::upload::UploadingFile;
use crate::util::future::NonDetachingJoinHandle;
#[derive(Debug, Deserialize, Serialize, TS)]
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct RegistryAsset<Commitment> {
#[ts(type = "string")]
pub published_at: DateTime<Utc>,
#[ts(type = "string")]
pub url: Url,
#[ts(type = "string[]")]
pub url: Vec<Url>,
pub commitment: Commitment,
pub signatures: HashMap<AnyVerifyingKey, AnySignature>,
}
@@ -42,6 +42,48 @@ impl<Commitment> RegistryAsset<Commitment> {
.collect(),
)
}
pub async fn load_http_source(&self, client: Client) -> Result<HttpSource, Error> {
for url in &self.url {
if let Ok(source) = HttpSource::new(client.clone(), url.clone()).await {
return Ok(source);
}
}
Err(Error::new(
eyre!("Failed to load any http url"),
ErrorKind::Network,
))
}
pub async fn load_buffered_http_source(
&self,
client: Client,
progress: PhaseProgressTrackerHandle,
) -> Result<BufferedHttpSource, Error> {
for url in &self.url {
if let Ok(response) = client.get(url.clone()).send().await {
return BufferedHttpSource::from_response(response, progress).await;
}
}
Err(Error::new(
eyre!("Failed to load any http url"),
ErrorKind::Network,
))
}
pub async fn load_buffered_http_source_with_path(
&self,
path: impl AsRef<Path>,
client: Client,
progress: PhaseProgressTrackerHandle,
) -> Result<BufferedHttpSource, Error> {
for url in &self.url {
if let Ok(response) = client.get(url.clone()).send().await {
return BufferedHttpSource::from_response_with_path(path, response, progress).await;
}
}
Err(Error::new(
eyre!("Failed to load any http url"),
ErrorKind::Network,
))
}
}
impl<Commitment: Digestable> RegistryAsset<Commitment> {
pub fn validate(&self, context: &str, mut accept: AcceptSigners) -> Result<&Commitment, Error> {
@@ -59,7 +101,7 @@ impl<C: for<'a> Commitment<&'a HttpSource>> RegistryAsset<C> {
dst: &mut (impl AsyncWrite + Unpin + Send + ?Sized),
) -> Result<(), Error> {
self.commitment
.copy_to(&HttpSource::new(client, self.url.clone()).await?, dst)
.copy_to(&self.load_http_source(client).await?, dst)
.await
}
}
@@ -69,7 +111,7 @@ impl RegistryAsset<MerkleArchiveCommitment> {
client: Client,
) -> Result<S9pk<Section<Arc<HttpSource>>>, Error> {
S9pk::deserialize(
&Arc::new(HttpSource::new(client, self.url.clone()).await?),
&Arc::new(self.load_http_source(client).await?),
Some(&self.commitment),
)
.await
@@ -80,7 +122,7 @@ impl RegistryAsset<MerkleArchiveCommitment> {
progress: PhaseProgressTrackerHandle,
) -> Result<S9pk<Section<Arc<BufferedHttpSource>>>, Error> {
S9pk::deserialize(
&Arc::new(BufferedHttpSource::new(client, self.url.clone(), progress).await?),
&Arc::new(self.load_buffered_http_source(client, progress).await?),
Some(&self.commitment),
)
.await
@@ -98,7 +140,8 @@ impl RegistryAsset<MerkleArchiveCommitment> {
Error,
> {
let source = Arc::new(
BufferedHttpSource::with_path(path, client, self.url.clone(), progress).await?,
self.load_buffered_http_source_with_path(path, client, progress)
.await?,
);
Ok((
S9pk::deserialize(&source, Some(&self.commitment)).await?,
@@ -112,26 +155,30 @@ pub struct BufferedHttpSource {
file: UploadingFile,
}
impl BufferedHttpSource {
pub async fn with_path(
path: impl AsRef<Path>,
client: Client,
url: Url,
progress: PhaseProgressTrackerHandle,
) -> Result<Self, Error> {
let (mut handle, file) = UploadingFile::with_path(path, progress).await?;
let response = client.get(url).send().await?;
Ok(Self {
_download: tokio::spawn(async move { handle.download(response).await }).into(),
file,
})
}
pub async fn new(
client: Client,
url: Url,
progress: PhaseProgressTrackerHandle,
) -> Result<Self, Error> {
let (mut handle, file) = UploadingFile::new(progress).await?;
let response = client.get(url).send().await?;
Self::from_response(response, progress).await
}
pub async fn from_response(
response: Response,
progress: PhaseProgressTrackerHandle,
) -> Result<Self, Error> {
let (mut handle, file) = UploadingFile::new(progress).await?;
Ok(Self {
_download: tokio::spawn(async move { handle.download(response).await }).into(),
file,
})
}
pub async fn from_response_with_path(
path: impl AsRef<Path>,
response: Response,
progress: PhaseProgressTrackerHandle,
) -> Result<Self, Error> {
let (mut handle, file) = UploadingFile::with_path(path, progress).await?;
Ok(Self {
_download: tokio::spawn(async move { handle.download(response).await }).into(),
file,

View File

@@ -133,7 +133,7 @@ async fn add_asset(
.upsert(&platform, || {
Ok(RegistryAsset {
published_at: Utc::now(),
url,
url: vec![url.clone()],
commitment: commitment.clone(),
signatures: HashMap::new(),
})
@@ -146,6 +146,9 @@ async fn add_asset(
))
} else {
s.signatures.insert(signer, signature);
if !s.url.contains(&url) {
s.url.push(url);
}
Ok(())
}
})?;

View File

@@ -12,12 +12,11 @@ use url::Url;
use crate::PackageId;
use crate::context::CliContext;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, ProgressTrackerWriter, ProgressUnits};
use crate::progress::FullProgressTracker;
use crate::registry::asset::BufferedHttpSource;
use crate::registry::context::RegistryContext;
use crate::registry::package::index::PackageVersionInfo;
use crate::s9pk::S9pk;
use crate::s9pk::merkle_archive::source::ArchiveSource;
use crate::s9pk::merkle_archive::source::http::HttpSource;
use crate::s9pk::v2::SIG_CONTEXT;
use crate::sign::commitment::merkle_archive::MerkleArchiveCommitment;
@@ -62,8 +61,10 @@ pub async fn add_package(
let manifest = s9pk.as_manifest();
let mut info = PackageVersionInfo::from_s9pk(&s9pk, url).await?;
if !info.s9pk.signatures.contains_key(&uploader) {
info.s9pk.signatures.insert(uploader.clone(), signature);
for (_, s9pk) in &mut info.s9pk {
if !s9pk.signatures.contains_key(&uploader) && s9pk.commitment == commitment {
s9pk.signatures.insert(uploader.clone(), signature.clone());
}
}
ctx.db
@@ -85,7 +86,12 @@ pub async fn add_package(
.as_package_mut()
.as_packages_mut()
.upsert(&manifest.id, || Ok(Default::default()))?;
package.as_versions_mut().insert(&manifest.version, &info)?;
let v = package.as_versions_mut();
if let Some(prev) = v.as_idx_mut(&manifest.version) {
prev.mutate(|p| p.merge_with(info))?;
} else {
v.insert(&manifest.version, &info)?;
}
Ok(())
} else {

View File

@@ -12,15 +12,18 @@ use crate::PackageId;
use crate::context::CliContext;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, ProgressUnits};
use crate::registry::asset::RegistryAsset;
use crate::registry::context::RegistryContext;
use crate::registry::device_info::DeviceInfo;
use crate::registry::package::index::{PackageIndex, PackageVersionInfo};
use crate::s9pk::manifest::HardwareRequirements;
use crate::s9pk::merkle_archive::source::ArchiveSource;
use crate::s9pk::v2::SIG_CONTEXT;
use crate::sign::commitment::merkle_archive::MerkleArchiveCommitment;
use crate::util::VersionString;
use crate::util::io::{TrackingIO, to_tmp_path};
use crate::util::serde::{WithIoFormat, display_serializable};
use crate::util::tui::choose;
use crate::util::tui::{choose, choose_custom_display};
#[derive(
Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, TS, ValueEnum,
@@ -142,7 +145,15 @@ fn get_matching_models<'a>(
device_info,
..
}: &GetPackageParams,
) -> Result<Vec<(PackageId, ExtendedVersion, &'a Model<PackageVersionInfo>)>, Error> {
) -> Result<
Vec<(
PackageId,
ExtendedVersion,
&'a Model<PackageVersionInfo>,
Vec<(HardwareRequirements, RegistryAsset<MerkleArchiveCommitment>)>,
)>,
Error,
> {
if let Some(id) = id {
if let Some(pkg) = db.as_packages().as_idx(id) {
vec![(id.clone(), pkg)]
@@ -168,11 +179,13 @@ fn get_matching_models<'a>(
.unwrap_or(VersionRange::any()),
),
)
})? && device_info
.as_ref()
.map_or(Ok(true), |device_info| info.works_for_device(device_info))?
{
Some((k.clone(), ExtendedVersion::from(v), info))
})? {
if let Some(device_info) = &device_info {
info.for_device(device_info)?
} else {
Some(info.as_s9pk().de()?)
}
.map(|assets| (k.clone(), ExtendedVersion::from(v), info, assets))
} else {
None
},
@@ -189,11 +202,28 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
use patch_db::ModelExt;
let peek = ctx.db.peek().await;
let mut best: BTreeMap<PackageId, BTreeMap<VersionString, &Model<PackageVersionInfo>>> =
Default::default();
let mut other: BTreeMap<PackageId, BTreeMap<VersionString, &Model<PackageVersionInfo>>> =
Default::default();
for (id, version, info) in get_matching_models(&peek.as_index().as_package(), &params)? {
let mut best: BTreeMap<
PackageId,
BTreeMap<
VersionString,
(
&Model<PackageVersionInfo>,
Vec<(HardwareRequirements, RegistryAsset<MerkleArchiveCommitment>)>,
),
>,
> = Default::default();
let mut other: BTreeMap<
PackageId,
BTreeMap<
VersionString,
(
&Model<PackageVersionInfo>,
Vec<(HardwareRequirements, RegistryAsset<MerkleArchiveCommitment>)>,
),
>,
> = Default::default();
for (id, version, info, assets) in get_matching_models(&peek.as_index().as_package(), &params)?
{
let package_best = best.entry(id.clone()).or_default();
let package_other = other.entry(id.clone()).or_default();
if params
@@ -212,9 +242,9 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
package_other.insert(worse_version, info);
}
}
package_best.insert(version.into(), info);
package_best.insert(version.into(), (info, assets));
} else {
package_other.insert(version.into(), info);
package_other.insert(version.into(), (info, assets));
}
}
if let Some(id) = params.id {
@@ -230,7 +260,16 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
.remove(&id)
.unwrap_or_default()
.into_iter()
.map(|(k, v)| v.de().map(|v| (k, v)))
.map(|(k, (i, a))| {
Ok::<_, Error>((
k,
PackageVersionInfo {
metadata: i.as_metadata().de()?,
source_version: i.as_source_version().de()?,
s9pk: a,
},
))
})
.try_collect()?;
let other = other.remove(&id).unwrap_or_default();
match params.other_versions {
@@ -245,7 +284,7 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
other_versions: Some(
other
.into_iter()
.map(|(k, v)| from_value(v.as_value().clone()).map(|v| (k, v)))
.map(|(k, (i, _))| from_value(i.as_value().clone()).map(|v| (k, v)))
.try_collect()?,
),
}),
@@ -254,7 +293,16 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
best,
other_versions: other
.into_iter()
.map(|(k, v)| v.de().map(|v| (k, v)))
.map(|(k, (i, a))| {
Ok::<_, Error>((
k,
PackageVersionInfo {
metadata: i.as_metadata().de()?,
source_version: i.as_source_version().de()?,
s9pk: a,
},
))
})
.try_collect()?,
}),
}
@@ -278,7 +326,9 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
categories,
best: best
.into_iter()
.map(|(k, v)| v.de().map(|v| (k, v)))
.map(|(k, (i, _))| {
from_value(i.as_value().clone()).map(|v| (k, v))
})
.try_collect()?,
other_versions: None,
},
@@ -305,13 +355,23 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
categories,
best: best
.into_iter()
.map(|(k, v)| v.de().map(|v| (k, v)))
.into_iter()
.map(|(k, (i, a))| {
Ok::<_, Error>((
k,
PackageVersionInfo {
metadata: i.as_metadata().de()?,
source_version: i.as_source_version().de()?,
s9pk: a,
},
))
})
.try_collect()?,
other_versions: Some(
other
.into_iter()
.map(|(k, v)| {
from_value(v.as_value().clone()).map(|v| (k, v))
.map(|(k, (i, _))| {
from_value(i.as_value().clone()).map(|v| (k, v))
})
.try_collect()?,
),
@@ -339,11 +399,31 @@ pub async fn get_package(ctx: RegistryContext, params: GetPackageParams) -> Resu
categories,
best: best
.into_iter()
.map(|(k, v)| v.de().map(|v| (k, v)))
.into_iter()
.map(|(k, (i, a))| {
Ok::<_, Error>((
k,
PackageVersionInfo {
metadata: i.as_metadata().de()?,
source_version: i.as_source_version().de()?,
s9pk: a,
},
))
})
.try_collect()?,
other_versions: other
.into_iter()
.map(|(k, v)| v.de().map(|v| (k, v)))
.into_iter()
.map(|(k, (i, a))| {
Ok::<_, Error>((
k,
PackageVersionInfo {
metadata: i.as_metadata().de()?,
source_version: i.as_source_version().de()?,
s9pk: a,
},
))
})
.try_collect()?,
},
))
@@ -431,7 +511,7 @@ pub async fn cli_download(
)
.await?,
)?;
let PackageVersionInfo { s9pk, .. } = match res.best.len() {
let PackageVersionInfo { mut s9pk, .. } = match res.best.len() {
0 => {
return Err(Error::new(
eyre!(
@@ -452,6 +532,75 @@ pub async fn cli_download(
res.best.remove(version).unwrap()
}
};
let s9pk = match s9pk.len() {
0 => {
return Err(Error::new(
eyre!(
"Could not find a version of {id} that satisfies {}",
target_version.unwrap_or(VersionRange::Any)
),
ErrorKind::NotFound,
));
}
1 => s9pk.pop().unwrap().1,
_ => {
let (_, asset) = choose_custom_display(
&format!(concat!(
"Multiple packages with different hardware requirements found. ",
"Choose a file to download:"
)),
&s9pk,
|(hw, _)| {
use std::fmt::Write;
let mut res = String::new();
if let Some(arch) = &hw.arch {
write!(
&mut res,
"{}: {}",
if arch.len() == 1 {
"Architecture"
} else {
"Architectures"
},
arch.iter().join(", ")
)
.unwrap();
}
if !hw.device.is_empty() {
if !res.is_empty() {
write!(&mut res, "; ").unwrap();
}
write!(
&mut res,
"{}: {}",
if hw.device.len() == 1 {
"Device"
} else {
"Devices"
},
hw.device.iter().map(|d| &d.pattern_description).join(", ")
)
.unwrap();
}
if let Some(ram) = hw.ram {
if !res.is_empty() {
write!(&mut res, "; ").unwrap();
}
write!(
&mut res,
"RAM >={:.2}GiB",
ram as f64 / (1024.0 * 1024.0 * 1024.0)
)
.unwrap();
}
res
},
)
.await?;
asset.clone()
}
};
s9pk.validate(SIG_CONTEXT, s9pk.all_signers())?;
fetching_progress.complete();

View File

@@ -50,7 +50,7 @@ pub struct Category {
pub name: String,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[derive(Debug, Deserialize, Serialize, HasModel, TS, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
@@ -62,11 +62,10 @@ pub struct DependencyMetadata {
pub optional: bool,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[derive(Debug, Deserialize, Serialize, HasModel, TS, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct PackageVersionInfo {
pub struct PackageMetadata {
#[ts(type = "string")]
pub title: InternedString,
pub icon: DataUrl<'static>,
@@ -93,13 +92,11 @@ pub struct PackageVersionInfo {
pub os_version: Version,
#[ts(type = "string | null")]
pub sdk_version: Option<Version>,
pub hardware_requirements: HardwareRequirements,
#[ts(type = "string | null")]
pub source_version: Option<VersionRange>,
pub s9pk: RegistryAsset<MerkleArchiveCommitment>,
#[serde(default)]
pub hardware_acceleration: bool,
}
impl PackageVersionInfo {
pub async fn from_s9pk<S: FileSource + Clone>(s9pk: &S9pk<S>, url: Url) -> Result<Self, Error> {
impl PackageMetadata {
pub async fn load<S: FileSource + Clone>(s9pk: &S9pk<S>) -> Result<Self, Error> {
let manifest = s9pk.as_manifest();
let mut dependency_metadata = BTreeMap::new();
for (id, info) in &manifest.dependencies.0 {
@@ -131,73 +128,138 @@ impl PackageVersionInfo {
dependency_metadata,
os_version: manifest.os_version.clone(),
sdk_version: manifest.sdk_version.clone(),
hardware_requirements: manifest.hardware_requirements.clone(),
source_version: None, // TODO
s9pk: RegistryAsset {
published_at: Utc::now(),
url,
commitment: s9pk.as_archive().commitment().await?,
signatures: [(
AnyVerifyingKey::Ed25519(s9pk.as_archive().signer()),
AnySignature::Ed25519(s9pk.as_archive().signature().await?),
)]
.into_iter()
.collect(),
},
hardware_acceleration: manifest.hardware_acceleration.clone(),
})
}
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct PackageVersionInfo {
#[serde(flatten)]
pub metadata: PackageMetadata,
#[ts(type = "string | null")]
pub source_version: Option<VersionRange>,
pub s9pk: Vec<(HardwareRequirements, RegistryAsset<MerkleArchiveCommitment>)>,
}
impl PackageVersionInfo {
pub async fn from_s9pk<S: FileSource + Clone>(s9pk: &S9pk<S>, url: Url) -> Result<Self, Error> {
Ok(Self {
metadata: PackageMetadata::load(s9pk).await?,
source_version: None, // TODO
s9pk: vec![(
s9pk.as_manifest().hardware_requirements.clone(),
RegistryAsset {
published_at: Utc::now(),
url: vec![url],
commitment: s9pk.as_archive().commitment().await?,
signatures: [(
AnyVerifyingKey::Ed25519(s9pk.as_archive().signer()),
AnySignature::Ed25519(s9pk.as_archive().signature().await?),
)]
.into_iter()
.collect(),
},
)],
})
}
pub fn merge_with(&mut self, other: Self) -> Result<(), Error> {
for (hw_req, asset) in other.s9pk {
if let Some((_, matching)) = self
.s9pk
.iter_mut()
.find(|(h, s)| s.commitment == asset.commitment && *h == hw_req)
{
for url in asset.url {
if matching.url.contains(&url) {
continue;
}
matching.url.push(url);
}
} else {
if let Some((h, matching)) = self.s9pk.iter_mut().find(|(h, _)| *h == hw_req) {
*matching = asset;
*h = hw_req;
} else {
self.s9pk.push((hw_req, asset));
}
}
}
self.s9pk.sort_by_key(|(h, _)| h.specificity_desc());
Ok(())
}
pub fn table(&self, version: &VersionString) -> prettytable::Table {
use prettytable::*;
let mut table = Table::new();
table.add_row(row![bc => &self.title]);
table.add_row(row![bc => &self.metadata.title]);
table.add_row(row![br -> "VERSION", AsRef::<str>::as_ref(version)]);
table.add_row(row![br -> "RELEASE NOTES", &self.release_notes]);
table.add_row(row![br -> "ABOUT", &textwrap::wrap(&self.description.short, 80).join("\n")]);
table.add_row(row![br -> "RELEASE NOTES", &self.metadata.release_notes]);
table.add_row(
row![br -> "ABOUT", &textwrap::wrap(&self.metadata.description.short, 80).join("\n")],
);
table.add_row(row![
br -> "DESCRIPTION",
&textwrap::wrap(&self.description.long, 80).join("\n")
&textwrap::wrap(&self.metadata.description.long, 80).join("\n")
]);
table.add_row(row![br -> "GIT HASH", self.git_hash.as_deref().unwrap_or("N/A")]);
table.add_row(row![br -> "LICENSE", &self.license]);
table.add_row(row![br -> "PACKAGE REPO", &self.wrapper_repo.to_string()]);
table.add_row(row![br -> "SERVICE REPO", &self.upstream_repo.to_string()]);
table.add_row(row![br -> "WEBSITE", &self.marketing_site.to_string()]);
table.add_row(row![br -> "SUPPORT", &self.support_site.to_string()]);
table.add_row(row![br -> "GIT HASH", self.metadata.git_hash.as_deref().unwrap_or("N/A")]);
table.add_row(row![br -> "LICENSE", &self.metadata.license]);
table.add_row(row![br -> "PACKAGE REPO", &self.metadata.wrapper_repo.to_string()]);
table.add_row(row![br -> "SERVICE REPO", &self.metadata.upstream_repo.to_string()]);
table.add_row(row![br -> "WEBSITE", &self.metadata.marketing_site.to_string()]);
table.add_row(row![br -> "SUPPORT", &self.metadata.support_site.to_string()]);
table
}
}
impl Model<PackageVersionInfo> {
pub fn works_for_device(&self, device_info: &DeviceInfo) -> Result<bool, Error> {
if !self.as_os_version().de()?.satisfies(&device_info.os.compat) {
return Ok(false);
pub fn for_device(
&self,
device_info: &DeviceInfo,
) -> Result<Option<Vec<(HardwareRequirements, RegistryAsset<MerkleArchiveCommitment>)>>, Error>
{
if !self
.as_metadata()
.as_os_version()
.de()?
.satisfies(&device_info.os.compat)
{
return Ok(None);
}
let hw = self.as_hardware_requirements().de()?;
if let Some(arch) = hw.arch {
if !arch.contains(&device_info.hardware.arch) {
return Ok(false);
let mut s9pk = self.as_s9pk().de()?;
s9pk.retain(|(hw, _)| {
if let Some(arch) = &hw.arch {
if !arch.contains(&device_info.hardware.arch) {
return false;
}
}
}
if let Some(ram) = hw.ram {
if device_info.hardware.ram < ram {
return Ok(false);
if let Some(ram) = hw.ram {
if device_info.hardware.ram < ram {
return false;
}
}
}
for device_filter in hw.device {
if !device_info
.hardware
.devices
.iter()
.filter(|d| d.class() == &*device_filter.class)
.any(|d| device_filter.pattern.as_ref().is_match(d.product()))
{
return Ok(false);
for device_filter in &hw.device {
if !device_info
.hardware
.devices
.iter()
.filter(|d| d.class() == &*device_filter.class)
.any(|d| device_filter.pattern.as_ref().is_match(d.product()))
{
return false;
}
}
}
true
});
Ok(true)
if s9pk.is_empty() {
Ok(None)
} else {
Ok(Some(s9pk))
}
}
}

View File

@@ -7,7 +7,7 @@ use ts_rs::TS;
use crate::prelude::*;
use crate::util::Invoke;
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, TS)]
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, TS, PartialEq, Eq)]
#[ts(type = "string")]
pub struct GitHash(String);

View File

@@ -167,7 +167,7 @@ impl Manifest {
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS)]
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS, PartialEq)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct HardwareRequirements {
@@ -178,6 +178,16 @@ pub struct HardwareRequirements {
#[ts(type = "string[] | null")]
pub arch: Option<BTreeSet<InternedString>>,
}
impl HardwareRequirements {
/// returns a value that can be used as a sort key to get most specific requirements first
pub fn specificity_desc(&self) -> (u32, u32, u64) {
(
u32::MAX - self.device.len() as u32, // more device requirements = more specific
self.arch.as_ref().map_or(u32::MAX, |a| a.len() as u32), // more arches = less specific
self.ram.map_or(0, |r| r), // more ram = more specific
)
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
@@ -189,8 +199,15 @@ pub struct DeviceFilter {
pub pattern: Regex,
pub pattern_description: String,
}
impl PartialEq for DeviceFilter {
fn eq(&self, other: &Self) -> bool {
self.class == other.class
&& InternedString::from_display(self.pattern.as_ref())
== InternedString::from_display(other.pattern.as_ref())
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[derive(Clone, Debug, Deserialize, Serialize, TS, PartialEq, Eq)]
#[ts(export)]
pub struct Description {
pub short: String,
@@ -214,7 +231,7 @@ impl Description {
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS)]
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct Alerts {

View File

@@ -265,7 +265,7 @@ impl PackParams {
}
}
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
#[derive(Debug, Default, Clone, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct ImageConfig {
@@ -274,15 +274,8 @@ pub struct ImageConfig {
pub arch: BTreeSet<InternedString>,
#[ts(type = "string | null")]
pub emulate_missing_as: Option<InternedString>,
}
impl Default for ImageConfig {
fn default() -> Self {
Self {
source: ImageSource::Packed,
arch: BTreeSet::new(),
emulate_missing_as: None,
}
}
#[serde(default)]
pub nvidia_container: bool,
}
#[derive(Parser)]
@@ -299,6 +292,8 @@ struct CliImageConfig {
arch: Vec<InternedString>,
#[arg(long)]
emulate_missing_as: Option<InternedString>,
#[arg(long)]
nvidia_container: bool,
}
impl TryFrom<CliImageConfig> for ImageConfig {
type Error = clap::Error;
@@ -317,6 +312,7 @@ impl TryFrom<CliImageConfig> for ImageConfig {
},
arch: value.arch.into_iter().collect(),
emulate_missing_as: value.emulate_missing_as,
nvidia_container: value.nvidia_container,
};
res.emulate_missing_as
.as_ref()
@@ -379,6 +375,11 @@ pub enum ImageSource {
DockerTag(String),
// Recipe(DirRecipe),
}
impl Default for ImageSource {
fn default() -> Self {
ImageSource::Packed
}
}
impl ImageSource {
pub fn ingredients(&self) -> Vec<PathBuf> {
match self {

View File

@@ -15,7 +15,7 @@ mod dependency;
mod health;
mod net;
mod prelude;
mod subcontainer;
pub mod subcontainer;
mod system;
mod version;

View File

@@ -11,6 +11,8 @@ use crate::service::effects::prelude::*;
use crate::service::persistent_container::Subcontainer;
use crate::util::Invoke;
pub const NVIDIA_OVERLAY_PATH: &str = "/var/tmp/startos/nvidia-overlay";
#[cfg(target_os = "linux")]
mod sync;
@@ -113,7 +115,26 @@ pub async fn create_subcontainer_fs(
);
tracing::info!("Mounting overlay {guid} for {image_id}");
let subcontainer_wrapper = Subcontainer {
overlay: OverlayGuard::mount(image, &mountpoint).await?,
overlay: OverlayGuard::mount_layers(
if context
.seed
.persistent_container
.s9pk
.as_manifest()
.images
.get(&image_id)
.map_or(false, |i| i.nvidia_container)
&& tokio::fs::metadata(NVIDIA_OVERLAY_PATH).await.is_ok()
{
&[NVIDIA_OVERLAY_PATH]
} else {
&[]
},
image,
&[],
&mountpoint,
)
.await?,
name: name
.unwrap_or_else(|| InternedString::intern(format!("subcontainer-{}", image_id))),
image_id: image_id.clone(),

View File

@@ -11,7 +11,7 @@ use crate::sign::commitment::{Commitment, Digestable};
use crate::util::io::TrackingIO;
use crate::util::serde::Base64;
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[derive(Clone, Copy, Debug, Deserialize, Serialize, HasModel, TS, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]

View File

@@ -13,7 +13,7 @@ use ts_rs::TS;
use crate::util::mime::{mime, unmime};
use crate::{Error, ErrorKind, ResultExt};
#[derive(Clone, TS)]
#[derive(Clone, TS, PartialEq, Eq)]
#[ts(type = "string")]
pub struct DataUrl<'a> {
pub mime: InternedString,

View File

@@ -95,7 +95,7 @@ pub async fn prompt_multiline<
Ok(res)
}
pub async fn choose_custom_display<'t, T: std::fmt::Display>(
pub async fn choose_custom_display<'t, T>(
prompt: &str,
choices: &'t [T],
mut display: impl FnMut(&T) -> String,
@@ -121,7 +121,7 @@ pub async fn choose_custom_display<'t, T: std::fmt::Display>(
if choice.len() < 1 {
return Err(Error::new(eyre!("Aborted"), ErrorKind::Cancelled));
}
let (idx, _) = string_choices
let (idx, choice_str) = string_choices
.iter()
.enumerate()
.find(|(_, s)| s.as_str() == choice[0].as_str())
@@ -132,7 +132,7 @@ pub async fn choose_custom_display<'t, T: std::fmt::Display>(
)
})?;
let choice = &choices[idx];
println!("{prompt} {choice}");
println!("{prompt} {choice_str}");
Ok(&choice)
}

View File

@@ -20,9 +20,15 @@ fi
update-initramfs -u -k all
if [ -f /etc/default/grub ]; then
sed -i '/\(^\|#\)GRUB_CMDLINE_LINUX=/c\GRUB_CMDLINE_LINUX="boot=startos console=ttyS0,115200n8"' /etc/default/grub
sed -i '/\(^\|#\)GRUB_CMDLINE_LINUX=/c\GRUB_CMDLINE_LINUX="boot=startos console=ttyS0,115200n8 console=tty0"' /etc/default/grub
sed -i '/\(^\|#\)GRUB_CMDLINE_LINUX_DEFAULT=/c\GRUB_CMDLINE_LINUX_DEFAULT=""' /etc/default/grub
sed -i '/\(^\|#\)GRUB_DISTRIBUTOR=/c\GRUB_DISTRIBUTOR="StartOS v$(cat /usr/lib/startos/VERSION.txt)"' /etc/default/grub
sed -i '/\(^\|#\)GRUB_TERMINAL=/c\GRUB_TERMINAL="serial"\nGRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"' /etc/default/grub
if grep '^GRUB_SERIAL_COMMAND=' /etc/default/grub > /dev/null; then
sed -i '/\(^\|#\)GRUB_SERIAL_COMMAND=/c\GRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"' /etc/default/grub
else
echo 'GRUB_SERIAL_COMMAND="serial --unit=0 --speed=115200 --word=8 --parity=no --stop=1"' >> /etc/default/grub
fi
fi
VERSION="$(cat /usr/lib/startos/VERSION.txt)"

View File

@@ -5,4 +5,5 @@ export type ImageConfig = {
source: ImageSource
arch: string[]
emulateMissingAs: string | null
nvidiaContainer: boolean
}

View File

@@ -10,6 +10,8 @@ import type { PackageId } from "./PackageId"
import type { RegistryAsset } from "./RegistryAsset"
export type PackageVersionInfo = {
sourceVersion: string | null
s9pk: Array<[HardwareRequirements, RegistryAsset<MerkleArchiveCommitment>]>
title: string
icon: DataUrl
description: Description
@@ -26,7 +28,5 @@ export type PackageVersionInfo = {
dependencyMetadata: { [key: PackageId]: DependencyMetadata }
osVersion: string
sdkVersion: string | null
hardwareRequirements: HardwareRequirements
sourceVersion: string | null
s9pk: RegistryAsset<MerkleArchiveCommitment>
hardwareAcceleration: boolean
}

View File

@@ -4,7 +4,7 @@ import type { AnyVerifyingKey } from "./AnyVerifyingKey"
export type RegistryAsset<Commitment> = {
publishedAt: string
url: string
url: string[]
commitment: Commitment
signatures: { [key: AnyVerifyingKey]: AnySignature }
}

View File

@@ -180,6 +180,7 @@ export type SDKImageInputSpec = {
source: Exclude<ImageSource, "packed">
arch?: ArchOptions[A]
emulateMissingAs?: ArchOptions[A][number] | null
nvidiaContainer?: boolean
}
}[keyof ArchOptions]

View File

@@ -47,6 +47,7 @@ export function buildManifest<
v.emulateMissingAs = (v.arch as string[]).includes("aarch64")
? "aarch64"
: v.arch[0] || null
v.nvidiaContainer = !!v.nvidiaContainer
images[k] = v as ImageConfig
return images
},