* add support for idmapped mounts to start-sdk

* misc fixes

* misc fixes

* add default to textarea

* fix iptables masquerade rule

* fix textarea types

* more fixes

* better logging for rsync

* fix tty size

* fix wg conf generation for android

* disable file mounts on dependencies

* mostly there, some styling issues (#3069)

* mostly there, some styling issues

* fix: address comments (#3070)

* fix: address comments

* fix: fix

* show SSL for any address with secure protocol and ssl added

* better sorting and messaging

---------

Co-authored-by: Alex Inkin <alexander@inkin.ru>

* fixes for nextcloud

* allow sidebar navigation during service state traansitions

* wip: x-forwarded headers

* implement x-forwarded-for proxy

* lowercase domain names and fix warning popover bug

* fix http2 websockets

* fix websocket retry behavior

* add arch filters to s9pk pack

* use docker for start-cli install

* add version range to package signer on registry

* fix rcs < 0

* fix user information parsing

* refactor service interface getters

* disable idmaps

* build fixes

* update docker login action

* streamline build

* add start-cli workflow

* rename

* riscv64gc

* fix ui packing

* no default features on cli

* make cli depend on GIT_HASH

* more build fixes

* more build fixes

* interpolate arch within dockerfile

* fix tests

* add launch ui to service page plus other small improvements (#3075)

* add launch ui to service page plus other small improvements

* revert translation disable

* add spinner to service list if service is health and loading

* chore: some visual tune up

* chore: update Taiga UI

---------

Co-authored-by: waterplea <alexander@inkin.ru>

* fix backups

* feat: use arm hosted runners and don't fail when apt package does not exist (#3076)

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Shadowy Super Coder <musashidisciple@proton.me>
Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
Co-authored-by: Alex Inkin <alexander@inkin.ru>
Co-authored-by: Remco Ros <remcoros@live.nl>
This commit is contained in:
Aiden McClelland
2025-12-15 13:30:50 -07:00
committed by GitHub
parent b945243d1a
commit 0430e0f930
148 changed files with 2572 additions and 1761 deletions

45
core/Cargo.lock generated
View File

@@ -844,6 +844,23 @@ dependencies = [
"tracing",
]
[[package]]
name = "axum-server"
version = "0.8.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "b1df331683d982a0b9492b38127151e6453639cd34926eb9c07d4cd8c6d22bfc"
dependencies = [
"bytes",
"either",
"fs-err",
"http",
"http-body",
"hyper",
"hyper-util",
"tokio",
"tower-service",
]
[[package]]
name = "backtrace"
version = "0.3.76"
@@ -2961,6 +2978,16 @@ dependencies = [
"itertools 0.8.2",
]
[[package]]
name = "fs-err"
version = "3.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "62d91fd049c123429b018c47887d3f75a265540dd3c30ba9cb7bae9197edb03a"
dependencies = [
"autocfg",
"tokio",
]
[[package]]
name = "fs-mistrust"
version = "0.10.0"
@@ -3670,9 +3697,9 @@ dependencies = [
[[package]]
name = "hyper"
version = "1.7.0"
version = "1.8.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e"
checksum = "2ab2d4f250c3d7b1c9fcdff1cece94ea4e2dfbec68614f7b87cb205f24ca9d11"
dependencies = [
"atomic-waker",
"bytes",
@@ -3739,9 +3766,9 @@ dependencies = [
[[package]]
name = "hyper-util"
version = "0.1.17"
version = "0.1.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8"
checksum = "727805d60e7938b76b826a6ef209eb70eaa1812794f9424d4a4e2d740662df5f"
dependencies = [
"base64 0.22.1",
"bytes",
@@ -4824,6 +4851,7 @@ dependencies = [
"ed25519-dalek 2.2.0",
"exver",
"gpt",
"hyper",
"ipnet",
"lazy_static",
"lettre",
@@ -6617,9 +6645,9 @@ checksum = "7a2d987857b319362043e95f5353c0535c1f58eec5336fdfcf626430af7def58"
[[package]]
name = "reqwest"
version = "0.12.24"
version = "0.12.25"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9d0946410b9f7b082a427e4ef5c8ff541a88b357bc6c637c40db3a68ac70a36f"
checksum = "b6eff9328d40131d43bd911d42d79eb6a47312002a4daefc9e37f17e74a7701a"
dependencies = [
"base64 0.22.1",
"bytes",
@@ -7917,6 +7945,7 @@ dependencies = [
"async-stream",
"async-trait",
"axum 0.8.6",
"axum-server",
"backtrace-on-stack-overflow",
"barrage",
"base32 0.5.1",
@@ -9778,9 +9807,9 @@ dependencies = [
[[package]]
name = "tower-http"
version = "0.6.6"
version = "0.6.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
checksum = "d4e6559d53cc268e5031cd8429d05415bc4cb4aefc4aa5d6cc35fbf5b924a1f8"
dependencies = [
"bitflags 2.10.0",
"bytes",

View File

@@ -5,6 +5,21 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
source ./builder-alias.sh
set -ea
INSTALL=false
while [[ $# -gt 0 ]]; do
case $1 in
--install)
INSTALL=true
shift
;;
*)
>&2 echo "Unknown option: $1"
exit 1
;;
esac
done
shopt -s expand_aliases
PROFILE=${PROFILE:-release}
@@ -46,18 +61,7 @@ if [ -z "${TARGET:-}" ]; then
fi
cd ..
# Ensure GIT_HASH.txt exists if not created by higher-level build steps
if [ ! -f GIT_HASH.txt ] && command -v git >/dev/null 2>&1; then
git rev-parse HEAD > GIT_HASH.txt || true
fi
FEATURES="$(echo "${ENVIRONMENT:-}" | sed 's/-/,/g')"
FEATURE_ARGS="cli"
if [ -n "$FEATURES" ]; then
FEATURE_ARGS="$FEATURE_ARGS,$FEATURES"
fi
RUSTFLAGS=""
if [[ "${ENVIRONMENT:-}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
@@ -65,7 +69,11 @@ fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features $FEATURE_ARGS --locked --bin start-cli --target=$TARGET
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features=docker,$FEATURES --locked --bin start-cli --target=$TARGET
if [ "$(ls -nd "core/target/$TARGET/$PROFILE/start-cli" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
rust-zig-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /usr/local/cargo"
fi
if [ "$INSTALL" = "true" ]; then
cp "core/target/$TARGET/$PROFILE/start-cli" ~/.cargo/bin/start-cli
fi

View File

@@ -40,7 +40,7 @@ fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-registry,registry,$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl
if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/registrybox" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo"
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /usr/local/cargo"
fi

View File

@@ -40,7 +40,7 @@ fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-container,$FEATURES --locked --bin containerbox --target=$RUST_ARCH-unknown-linux-musl
if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/containerbox" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo"
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-container --target=$RUST_ARCH-unknown-linux-musl
if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/start-container" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /usr/local/cargo"
fi

View File

@@ -40,7 +40,7 @@ fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli,startd,$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl
if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/startbox" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo"
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /usr/local/cargo"
fi

View File

@@ -38,7 +38,7 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features test,$FEATURES --locked 'export_bindings_'
rust-zig-builder cargo test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features test,$FEATURES --locked 'export_bindings_'
if [ "$(ls -nd "core/startos/bindings" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID core/startos/bindings && chown -R $UID:$UID /root/.cargo"
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID core/startos/bindings && chown -R $UID:$UID /usr/local/cargo"
fi

View File

@@ -40,7 +40,7 @@ fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-tunnel,tunnel,$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl
if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/tunnelbox" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo"
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /usr/local/cargo"
fi

View File

@@ -5,4 +5,4 @@ if tty -s; then
USE_TTY="-it"
fi
alias 'rust-zig-builder'='docker run '"$USE_TTY"' --rm -e "RUSTFLAGS=$RUSTFLAGS" -e "AWS_LC_SYS_CMAKE_TOOLCHAIN_FILE_riscv64gc_unknown_linux_musl=/root/cmake-overrides/toolchain-riscv64-musl-clang.cmake" -e SCCACHE_GHA_ENABLED -e SCCACHE_GHA_VERSION -e ACTIONS_RESULTS_URL -e ACTIONS_RUNTIME_TOKEN -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$HOME/.cache/sccache":/root/.cache/sccache -v "$(pwd)":/workdir -w /workdir -P start9/cargo-zigbuild'
alias 'rust-zig-builder'='docker run '"$USE_TTY"' --rm -e "RUSTFLAGS=$RUSTFLAGS" -e "AWS_LC_SYS_CMAKE_TOOLCHAIN_FILE_riscv64gc_unknown_linux_musl=/root/cmake-overrides/toolchain-riscv64-musl-clang.cmake" -e SCCACHE_GHA_ENABLED -e SCCACHE_GHA_VERSION -e ACTIONS_RESULTS_URL -e ACTIONS_RUNTIME_TOKEN -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$HOME/.cargo/git":/usr/local/cargo/git -v "$HOME/.cache/sccache":/root/.cache/sccache -v "$HOME/.cache/cargo-zigbuild:/root/.cache/cargo-zigbuild" -v "$(pwd)":/workdir -w /workdir -P start9/cargo-zigbuild'

View File

@@ -1,19 +0,0 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -ea
shopt -s expand_aliases
web="../web/dist/static"
[ -d "$web" ] || mkdir -p "$web"
if [ -z "$PLATFORM" ]; then
PLATFORM=$(uname -m)
fi
if [ "$PLATFORM" = "arm64" ]; then
PLATFORM="aarch64"
fi
cargo install --path=./startos --no-default-features --features=cli,docker --bin start-cli --locked

View File

@@ -36,6 +36,7 @@ rustls = "0.23"
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
ssh-key = "0.6.2"
hyper = "1.8.1"
thiserror = "2.0"
tokio = { version = "1", features = ["full"] }
torut = "0.2.1"

View File

@@ -395,6 +395,11 @@ impl From<lettre::address::AddressError> for Error {
Error::new(e, ErrorKind::Smtp)
}
}
impl From<hyper::Error> for Error {
fn from(e: hyper::Error) -> Self {
Error::new(e, ErrorKind::Network)
}
}
impl From<patch_db::value::Error> for Error {
fn from(value: patch_db::value::Error) -> Self {
match value.kind {

View File

@@ -41,5 +41,5 @@ fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=test,$FEATURES --workspace --locked -- --skip export_bindings_
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo"
rust-zig-builder cargo test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=test,$FEATURES --workspace --locked --lib -- --skip export_bindings_
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /usr/local/cargo"

View File

@@ -23,23 +23,23 @@ path = "src/lib.rs"
[[bin]]
name = "startbox"
path = "src/main.rs"
path = "src/main/startbox.rs"
[[bin]]
name = "start-cli"
path = "src/main.rs"
path = "src/main/start-cli.rs"
[[bin]]
name = "containerbox"
path = "src/main.rs"
name = "start-container"
path = "src/main/start-container.rs"
[[bin]]
name = "registrybox"
path = "src/main.rs"
path = "src/main/registrybox.rs"
[[bin]]
name = "tunnelbox"
path = "src/main.rs"
path = "src/main/tunnelbox.rs"
[features]
arti = [
@@ -54,19 +54,11 @@ arti = [
"tor-proto",
"tor-rtcompat",
]
cli = ["cli-registry", "cli-startd", "cli-tunnel"]
cli-container = ["procfs", "pty-process"]
cli-registry = []
cli-startd = []
cli-tunnel = []
console = ["console-subscriber", "tokio/tracing"]
default = ["cli", "cli-container", "registry", "startd", "tunnel"]
default = ["procfs", "pty-process"]
dev = ["backtrace-on-stack-overflow"]
docker = []
registry = []
startd = ["procfs", "pty-process"]
test = []
tunnel = []
unstable = ["backtrace-on-stack-overflow"]
[dependencies]
@@ -93,7 +85,8 @@ async-compression = { version = "0.4.32", features = [
] }
async-stream = "0.3.5"
async-trait = "0.1.74"
axum = { version = "0.8.4", features = ["ws"] }
axum = { version = "0.8.4", features = ["ws", "http2"] }
axum-server = "0.8.0"
backtrace-on-stack-overflow = { version = "0.3.0", optional = true }
barrage = "0.2.3"
base32 = "0.5.0"
@@ -219,7 +212,12 @@ qrcode = "0.14.1"
r3bl_tui = "0.7.6"
rand = "0.9.2"
regex = "1.10.2"
reqwest = { version = "0.12.4", features = ["json", "socks", "stream"] }
reqwest = { version = "0.12.25", features = [
"json",
"socks",
"stream",
"http2",
] }
reqwest_cookie_store = "0.8.0"
rpassword = "7.2.0"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", rev = "068db90" }

View File

@@ -301,14 +301,14 @@ lazy_static::lazy_static! {
Mutex::new(BTreeMap::new());
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct MountParams {
target_id: BackupTargetId,
#[arg(long)]
server_id: Option<String>,
password: String,
password: String, // TODO: rpassword
#[arg(long)]
allow_partial: bool,
}

View File

@@ -1,91 +1,85 @@
use std::collections::VecDeque;
use std::collections::{BTreeMap, VecDeque};
use std::ffi::OsString;
use std::path::Path;
#[cfg(feature = "cli-container")]
pub mod container_cli;
pub mod deprecated;
#[cfg(any(feature = "registry", feature = "cli-registry"))]
pub mod registry;
#[cfg(feature = "cli")]
pub mod start_cli;
#[cfg(feature = "startd")]
pub mod start_init;
#[cfg(feature = "startd")]
pub mod startd;
#[cfg(any(feature = "tunnel", feature = "cli-tunnel"))]
pub mod tunnel;
fn select_executable(name: &str) -> Option<fn(VecDeque<OsString>)> {
match name {
#[cfg(feature = "startd")]
"startd" => Some(startd::main),
#[cfg(feature = "startd")]
"embassyd" => Some(|_| deprecated::renamed("embassyd", "startd")),
#[cfg(feature = "startd")]
"embassy-init" => Some(|_| deprecated::removed("embassy-init")),
#[cfg(feature = "cli-startd")]
"start-cli" => Some(start_cli::main),
#[cfg(feature = "cli-startd")]
"embassy-cli" => Some(|_| deprecated::renamed("embassy-cli", "start-cli")),
#[cfg(feature = "cli-startd")]
"embassy-sdk" => Some(|_| deprecated::removed("embassy-sdk")),
#[cfg(feature = "cli-container")]
"start-container" => Some(container_cli::main),
#[cfg(feature = "registry")]
"start-registryd" => Some(registry::main),
#[cfg(feature = "cli-registry")]
"start-registry" => Some(registry::cli),
#[cfg(feature = "tunnel")]
"start-tunneld" => Some(tunnel::main),
#[cfg(feature = "cli-tunnel")]
"start-tunnel" => Some(tunnel::cli),
"contents" => Some(|_| {
#[cfg(feature = "startd")]
println!("startd");
#[cfg(feature = "cli-startd")]
println!("start-cli");
#[cfg(feature = "cli-container")]
println!("start-container");
#[cfg(feature = "registry")]
println!("start-registryd");
#[cfg(feature = "cli-registry")]
println!("start-registry");
#[cfg(feature = "tunnel")]
println!("start-tunneld");
#[cfg(feature = "cli-tunnel")]
println!("start-tunnel");
}),
_ => None,
#[derive(Default)]
pub struct MultiExecutable(BTreeMap<&'static str, fn(VecDeque<OsString>)>);
impl MultiExecutable {
pub fn enable_startd(&mut self) -> &mut Self {
self.0.insert("startd", startd::main);
self.0
.insert("embassyd", |_| deprecated::renamed("embassyd", "startd"));
self.0
.insert("embassy-init", |_| deprecated::removed("embassy-init"));
self
}
pub fn enable_start_cli(&mut self) -> &mut Self {
self.0.insert("start-cli", start_cli::main);
self.0.insert("embassy-cli", |_| {
deprecated::renamed("embassy-cli", "start-cli")
});
self.0
.insert("embassy-sdk", |_| deprecated::removed("embassy-sdk"));
self
}
pub fn enable_start_container(&mut self) -> &mut Self {
self.0.insert("start-container", container_cli::main);
self
}
pub fn enable_start_registryd(&mut self) -> &mut Self {
self.0.insert("start-registryd", registry::main);
self
}
pub fn enable_start_registry(&mut self) -> &mut Self {
self.0.insert("start-registry", registry::cli);
self
}
pub fn enable_start_tunneld(&mut self) -> &mut Self {
self.0.insert("start-tunneld", tunnel::main);
self
}
pub fn enable_start_tunnel(&mut self) -> &mut Self {
self.0.insert("start-tunnel", tunnel::cli);
self
}
}
pub fn startbox() {
let mut args = std::env::args_os().collect::<VecDeque<_>>();
for _ in 0..2 {
if let Some(s) = args.pop_front() {
if let Some(x) = Path::new(&*s)
.file_name()
.and_then(|s| s.to_str())
.and_then(|s| select_executable(&s))
{
args.push_front(s);
return x(args);
fn select_executable(&self, name: &str) -> Option<fn(VecDeque<OsString>)> {
self.0.get(&name).copied()
}
pub fn execute(&self) {
let mut args = std::env::args_os().collect::<VecDeque<_>>();
for _ in 0..2 {
if let Some(s) = args.pop_front() {
if let Some(name) = Path::new(&*s).file_name().and_then(|s| s.to_str()) {
if name == "--contents" {
for name in self.0.keys() {
println!("{name}");
}
}
if let Some(x) = self.select_executable(&name) {
args.push_front(s);
return x(args);
}
}
}
}
let args = std::env::args().collect::<VecDeque<_>>();
eprintln!(
"unknown executable: {}",
args.get(1)
.or_else(|| args.get(0))
.map(|s| s.as_str())
.unwrap_or("N/A")
);
std::process::exit(1);
}
let args = std::env::args().collect::<VecDeque<_>>();
eprintln!(
"unknown executable: {}",
args.get(1)
.or_else(|| args.get(0))
.map(|s| s.as_str())
.unwrap_or("N/A")
);
std::process::exit(1);
}

View File

@@ -45,7 +45,6 @@ use crate::service::ServiceMap;
use crate::service::action::update_tasks;
use crate::service::effects::callbacks::ServiceCallbacks;
use crate::shutdown::Shutdown;
use crate::status::DesiredStatus;
use crate::util::io::delete_file;
use crate::util::lshw::LshwDevice;
use crate::util::sync::{SyncMutex, SyncRwLock, Watch};
@@ -436,9 +435,7 @@ impl RpcContext {
.into_iter()
.any(|(_, t)| t.active && t.task.severity == TaskSeverity::Critical)
{
pde.as_status_info_mut()
.as_desired_mut()
.ser(&DesiredStatus::Stopped)?;
pde.as_status_info_mut().stop()?;
}
}
Ok(())

View File

@@ -1,6 +1,6 @@
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::sync::{Arc, OnceLock};
use chrono::{DateTime, Utc};
use exver::{Version, VersionRange};
@@ -33,6 +33,8 @@ use crate::util::serde::MaybeUtf8String;
use crate::version::{Current, VersionT};
use crate::{ARCH, PLATFORM};
pub static DB_UI_SEED_CELL: OnceLock<&'static str> = OnceLock::new();
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
@@ -65,9 +67,10 @@ impl Public {
preferred_external_port: 80,
add_ssl: Some(AddSslOptions {
preferred_external_port: 443,
add_x_forwarded_headers: false,
alpn: Some(AlpnInfo::Specified(vec![
MaybeUtf8String("http/1.1".into()),
MaybeUtf8String("h2".into()),
MaybeUtf8String("http/1.1".into()),
])),
}),
secure: None,
@@ -123,20 +126,8 @@ impl Public {
kiosk,
},
package_data: AllPackageData::default(),
ui: {
#[cfg(feature = "startd")]
{
serde_json::from_str(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../web/patchdb-ui-seed.json"
)))
.with_kind(ErrorKind::Deserialization)?
}
#[cfg(not(feature = "startd"))]
{
Value::Null
}
},
ui: serde_json::from_str(*DB_UI_SEED_CELL.get().unwrap_or(&"null"))
.with_kind(ErrorKind::Deserialization)?,
})
}
}

View File

@@ -19,6 +19,11 @@ pub enum FileType {
Directory,
Infer,
}
impl Default for FileType {
fn default() -> Self {
FileType::Directory
}
}
pub struct Bind<Src: AsRef<Path>> {
src: Src,

View File

@@ -2,34 +2,86 @@ use std::ffi::OsStr;
use std::fmt::Display;
use std::os::unix::fs::MetadataExt;
use std::path::Path;
use std::str::FromStr;
use clap::Parser;
use clap::builder::ValueParserFactory;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use models::FromStrParser;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use tokio::process::Command;
use ts_rs::TS;
use super::{FileSystem, MountType};
use crate::disk::mount::filesystem::default_mount_command;
use super::FileSystem;
use crate::prelude::*;
use crate::util::Invoke;
#[derive(Clone, Copy, Debug, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
pub struct IdMap {
pub from_id: u32,
pub to_id: u32,
pub range: u32,
}
impl IdMap {
pub fn stack(a: Vec<IdMap>, b: Vec<IdMap>) -> Vec<IdMap> {
let mut res = Vec::with_capacity(a.len() + b.len());
res.extend_from_slice(&a);
for mut b in b {
for a in &a {
if a.from_id <= b.to_id && a.from_id + a.range > b.to_id {
b.to_id += a.to_id;
}
}
res.push(b);
}
res
}
}
impl FromStr for IdMap {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let split = s.splitn(3, ":").collect::<Vec<_>>();
if let Some([u, k, r]) = split.get(0..3) {
Ok(Self {
from_id: u.parse()?,
to_id: k.parse()?,
range: r.parse()?,
})
} else if let Some([u, k]) = split.get(0..2) {
Ok(Self {
from_id: u.parse()?,
to_id: k.parse()?,
range: 1,
})
} else {
Err(Error::new(
eyre!("{s} is not a valid idmap"),
ErrorKind::ParseNumber,
))
}
}
}
impl ValueParserFactory for IdMap {
type Parser = FromStrParser<IdMap>;
fn value_parser() -> Self::Parser {
<Self::Parser>::new()
}
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct IdMapped<Fs: FileSystem> {
filesystem: Fs,
from_id: u32,
to_id: u32,
range: u32,
idmap: Vec<IdMap>,
}
impl<Fs: FileSystem> IdMapped<Fs> {
pub fn new(filesystem: Fs, from_id: u32, to_id: u32, range: u32) -> Self {
Self {
filesystem,
from_id,
to_id,
range,
}
pub fn new(filesystem: Fs, idmap: Vec<IdMap>) -> Self {
Self { filesystem, idmap }
}
}
impl<Fs: FileSystem> FileSystem for IdMapped<Fs> {
@@ -44,12 +96,17 @@ impl<Fs: FileSystem> FileSystem for IdMapped<Fs> {
.mount_options()
.into_iter()
.map(|a| Box::new(a) as Box<dyn Display>)
.chain(std::iter::once(Box::new(lazy_format!(
"X-mount.idmap=b:{}:{}:{}",
self.from_id,
self.to_id,
self.range,
)) as Box<dyn Display>))
.chain(if self.idmap.is_empty() {
None
} else {
use std::fmt::Write;
let mut option = "X-mount.idmap=".to_owned();
for i in &self.idmap {
write!(&mut option, "b:{}:{}:{} ", i.from_id, i.to_id, i.range).unwrap();
}
Some(Box::new(option) as Box<dyn Display>)
})
}
async fn source(&self) -> Result<Option<impl AsRef<Path>>, Error> {
self.filesystem.source().await
@@ -57,26 +114,28 @@ impl<Fs: FileSystem> FileSystem for IdMapped<Fs> {
async fn pre_mount(&self, mountpoint: &Path) -> Result<(), Error> {
self.filesystem.pre_mount(mountpoint).await?;
let info = tokio::fs::metadata(mountpoint).await?;
let uid_in_range = self.from_id <= info.uid() && self.from_id + self.range > info.uid();
let gid_in_range = self.from_id <= info.gid() && self.from_id + self.range > info.gid();
if uid_in_range || gid_in_range {
Command::new("chown")
.arg(format!(
"{uid}:{gid}",
uid = if uid_in_range {
self.to_id + info.uid() - self.from_id
} else {
info.uid()
},
gid = if gid_in_range {
self.to_id + info.gid() - self.from_id
} else {
info.gid()
},
))
.arg(&mountpoint)
.invoke(crate::ErrorKind::Filesystem)
.await?;
for i in &self.idmap {
let uid_in_range = i.from_id <= info.uid() && i.from_id + i.range > info.uid();
let gid_in_range = i.from_id <= info.gid() && i.from_id + i.range > info.gid();
if uid_in_range || gid_in_range {
Command::new("chown")
.arg(format!(
"{uid}:{gid}",
uid = if uid_in_range {
i.to_id + info.uid() - i.from_id
} else {
info.uid()
},
gid = if gid_in_range {
i.to_id + info.gid() - i.from_id
} else {
info.gid()
},
))
.arg(&mountpoint)
.invoke(crate::ErrorKind::Filesystem)
.await?;
}
}
Ok(())
}
@@ -86,9 +145,12 @@ impl<Fs: FileSystem> FileSystem for IdMapped<Fs> {
let mut sha = Sha256::new();
sha.update("IdMapped");
sha.update(self.filesystem.source_hash().await?);
sha.update(u32::to_be_bytes(self.from_id));
sha.update(u32::to_be_bytes(self.to_id));
sha.update(u32::to_be_bytes(self.range));
sha.update(usize::to_be_bytes(self.idmap.len()));
for i in &self.idmap {
sha.update(u32::to_be_bytes(i.from_id));
sha.update(u32::to_be_bytes(i.to_id));
sha.update(u32::to_be_bytes(i.range));
}
Ok(sha.finalize())
}
}

View File

@@ -1,5 +1,6 @@
use std::convert::Infallible;
use std::ops::{Deref, DerefMut};
use std::path::Path;
use std::process::Stdio;
use std::str::FromStr;
use std::time::{Duration, UNIX_EPOCH};
@@ -27,7 +28,6 @@ use tracing::instrument;
use crate::context::{CliContext, RpcContext};
use crate::error::ResultExt;
use crate::lxc::ContainerId;
use crate::prelude::*;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::util::Invoke;
@@ -223,7 +223,7 @@ fn deserialize_log_message<'de, D: serde::de::Deserializer<'de>>(
pub enum LogSource {
Kernel,
Unit(&'static str),
Container(ContainerId),
Package(PackageId),
}
pub const SYSTEM_UNIT: &str = "startd";
@@ -499,22 +499,10 @@ fn logs_follow<
}
async fn get_package_id(
ctx: &RpcContext,
_: &RpcContext,
PackageIdParams { id }: PackageIdParams,
) -> Result<LogSource, Error> {
let container_id = ctx
.services
.get(&id)
.await
.as_ref()
.map(|x| x.container_id())
.ok_or_else(|| {
Error::new(
eyre!("No service found with id: {}", id),
ErrorKind::NotFound,
)
})??;
Ok(LogSource::Container(container_id))
Ok(LogSource::Package(id))
}
pub fn package_logs() -> ParentHandler<RpcContext, LogsParams<PackageIdParams>> {
@@ -596,16 +584,8 @@ pub async fn journalctl(
}
fn gen_journalctl_command(id: &LogSource) -> Command {
let mut cmd = match id {
LogSource::Container(container_id) => {
let mut cmd = Command::new("lxc-attach");
cmd.arg(format!("{}", container_id))
.arg("--")
.arg("journalctl");
cmd
}
_ => Command::new("journalctl"),
};
let mut cmd = Command::new("journalctl");
cmd.kill_on_drop(true);
cmd.arg("--output=json");
@@ -618,8 +598,11 @@ fn gen_journalctl_command(id: &LogSource) -> Command {
cmd.arg("-u");
cmd.arg(id);
}
LogSource::Container(_container_id) => {
cmd.arg("-u").arg("container-runtime.service");
LogSource::Package(id) => {
cmd.arg("-u")
.arg("container-runtime.service")
.arg("-D")
.arg(Path::new("/media/startos/data/package-data/logs").join(id));
}
};
cmd

View File

@@ -20,7 +20,7 @@ use ts_rs::TS;
use crate::context::RpcContext;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::idmapped::IdMapped;
use crate::disk::mount::filesystem::idmapped::{IdMap, IdMapped};
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::disk::mount::filesystem::{MountType, ReadOnly, ReadWrite};
use crate::disk::mount::guard::{GenericMountGuard, MountGuard, TmpMountGuard};
@@ -185,9 +185,11 @@ impl LxcContainer {
TmpMountGuard::mount(
&IdMapped::new(
BlockDev::new("/usr/lib/startos/container-runtime/rootfs.squashfs"),
0,
100000,
65536,
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
),
ReadOnly,
)

View File

@@ -1,7 +0,0 @@
fn main() {
#[cfg(feature = "backtrace-on-stack-overflow")]
unsafe {
backtrace_on_stack_overflow::enable()
};
startos::bins::startbox()
}

View File

@@ -0,0 +1,8 @@
use startos::bins::MultiExecutable;
fn main() {
MultiExecutable::default()
.enable_start_registry()
.enable_start_registryd()
.execute()
}

View File

@@ -0,0 +1,5 @@
use startos::bins::MultiExecutable;
fn main() {
MultiExecutable::default().enable_start_cli().execute()
}

View File

@@ -0,0 +1,7 @@
use startos::bins::MultiExecutable;
fn main() {
MultiExecutable::default()
.enable_start_container()
.execute()
}

View File

@@ -0,0 +1,29 @@
use startos::bins::MultiExecutable;
fn main() {
startos::net::static_server::UI_CELL
.set(include_dir::include_dir!(
"$CARGO_MANIFEST_DIR/../../web/dist/static/ui"
))
.ok();
startos::net::static_server::SETUP_WIZARD_CELL
.set(include_dir::include_dir!(
"$CARGO_MANIFEST_DIR/../../web/dist/static/setup-wizard"
))
.ok();
startos::net::static_server::INSTALL_WIZARD_CELL
.set(include_dir::include_dir!(
"$CARGO_MANIFEST_DIR/../../web/dist/static/install-wizard"
))
.ok();
startos::db::model::public::DB_UI_SEED_CELL
.set(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../web/patchdb-ui-seed.json"
)))
.ok();
MultiExecutable::default()
.enable_startd()
.enable_start_cli()
.execute()
}

View File

@@ -0,0 +1,13 @@
use startos::bins::MultiExecutable;
fn main() {
startos::tunnel::context::TUNNEL_UI_CELL
.set(include_dir::include_dir!(
"$CARGO_MANIFEST_DIR/../../web/dist/static/start-tunnel"
))
.ok();
MultiExecutable::default()
.enable_start_tunnel()
.enable_start_tunneld()
.execute()
}

View File

@@ -408,9 +408,10 @@ impl Resolver {
a => a,
};
self.resolve.peek(|r| {
if r.private_domains
.get(&*name.to_lowercase().to_utf8().trim_end_matches('.'))
.map_or(false, |d| d.strong_count() > 0)
if !src.is_loopback()
&& r.private_domains
.get(&*name.to_lowercase().to_utf8().trim_end_matches('.'))
.map_or(false, |d| d.strong_count() > 0)
{
if let Some(res) = self.net_iface.peek(|i| {
i.values()
@@ -429,8 +430,9 @@ impl Resolver {
}
if STARTOS.zone_of(name) || EMBASSY.zone_of(name) {
let Ok(pkg) = name
.trim_to(2)
.iter()
.rev()
.skip(1)
.next()
.map(std::str::from_utf8)
.transpose()

View File

@@ -190,27 +190,6 @@ impl PortForwardController {
.arg("net.ipv4.ip_forward=1")
.invoke(ErrorKind::Network)
.await?;
if Command::new("iptables")
.arg("-t")
.arg("nat")
.arg("-C")
.arg("POSTROUTING")
.arg("-j")
.arg("MASQUERADE")
.invoke(ErrorKind::Network)
.await
.is_err()
{
Command::new("iptables")
.arg("-t")
.arg("nat")
.arg("-A")
.arg("POSTROUTING")
.arg("-j")
.arg("MASQUERADE")
.invoke(ErrorKind::Network)
.await?;
}
Ok::<_, Error>(())
}
.await
@@ -474,7 +453,7 @@ impl From<&InterfaceForwardState> for ForwardTable {
entry.external,
ForwardTarget {
target: *target,
filter: format!("{:?}", filter),
filter: format!("{:#?}", filter),
},
)
})

View File

@@ -165,8 +165,8 @@ pub struct BindOptions {
#[ts(export)]
pub struct AddSslOptions {
pub preferred_external_port: u16,
// #[serde(default)]
// pub add_x_forwarded_headers: bool, // TODO
#[serde(default)]
pub add_x_forwarded_headers: bool, // TODO
pub alpn: Option<AlpnInfo>,
}

View File

@@ -0,0 +1,225 @@
use std::net::IpAddr;
use std::sync::Arc;
use futures::FutureExt;
use http::HeaderValue;
use hyper::service::service_fn;
use hyper_util::rt::{TokioExecutor, TokioIo, TokioTimer};
use tokio::sync::Mutex;
use crate::prelude::*;
use crate::util::io::ReadWriter;
use crate::util::serde::MaybeUtf8String;
pub async fn handle_http_on_https(stream: impl ReadWriter + Unpin + 'static) -> Result<(), Error> {
use axum::body::Body;
use axum::extract::Request;
use axum::response::Response;
use http::Uri;
use crate::net::static_server::server_error;
hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new())
.serve_connection(
hyper_util::rt::TokioIo::new(stream),
hyper_util::service::TowerToHyperService::new(axum::Router::new().fallback(
axum::routing::method_routing::any(move |req: Request| async move {
match async move {
let host = req
.headers()
.get(http::header::HOST)
.and_then(|host| host.to_str().ok());
if let Some(host) = host {
let uri = Uri::from_parts({
let mut parts = req.uri().to_owned().into_parts();
parts.scheme = Some("https".parse()?);
parts.authority = Some(host.parse()?);
parts
})?;
Response::builder()
.status(http::StatusCode::TEMPORARY_REDIRECT)
.header(http::header::LOCATION, uri.to_string())
.body(Body::default())
} else {
Response::builder()
.status(http::StatusCode::BAD_REQUEST)
.body(Body::from("Host header required"))
}
}
.await
{
Ok(a) => a,
Err(e) => {
tracing::warn!("Error redirecting http request on ssl port: {e}");
tracing::error!("{e:?}");
server_error(Error::new(e, ErrorKind::Network))
}
}
}),
)),
)
.await
.map_err(|e| Error::new(color_eyre::eyre::Report::msg(e), ErrorKind::Network))
}
pub async fn run_http_proxy<F, T>(
from: F,
to: T,
alpn: Option<MaybeUtf8String>,
src_ip: Option<IpAddr>,
) -> Result<(), Error>
where
F: ReadWriter + Unpin + Send + 'static,
T: ReadWriter + Unpin + Send + 'static,
{
if alpn
.as_ref()
.map(|alpn| alpn.0.as_slice() == b"h2")
.unwrap_or(false)
{
run_http2_proxy(from, to, src_ip).await
} else {
run_http1_proxy(from, to, src_ip).await
}
}
pub async fn run_http2_proxy<F, T>(from: F, to: T, src_ip: Option<IpAddr>) -> Result<(), Error>
where
F: ReadWriter + Unpin + Send + 'static,
T: ReadWriter + Unpin + Send + 'static,
{
let (client, to) = hyper::client::conn::http2::Builder::new(TokioExecutor::new())
.timer(TokioTimer::new())
.handshake(TokioIo::new(to))
.await?;
let from = hyper::server::conn::http2::Builder::new(TokioExecutor::new())
.timer(TokioTimer::new())
.enable_connect_protocol()
.serve_connection(
TokioIo::new(from),
service_fn(|mut req| {
let mut client = client.clone();
async move {
req.headers_mut()
.insert("X-Forwarded-Proto", HeaderValue::from_static("https"));
if let Some(src_ip) = src_ip
.map(|s| s.to_string())
.as_deref()
.and_then(|s| HeaderValue::from_str(s).ok())
{
req.headers_mut().insert("X-Forwarded-For", src_ip);
}
let upgrade = if req.method() == http::method::Method::CONNECT
&& req.extensions().get::<hyper::ext::Protocol>().is_some()
{
Some(hyper::upgrade::on(&mut req))
} else {
None
};
let mut res = client.send_request(req).await?;
if let Some(from) = upgrade {
let to = hyper::upgrade::on(&mut res);
tokio::task::spawn(async move {
if let Some((from, to)) = futures::future::try_join(from, to).await.ok()
{
tokio::io::copy_bidirectional(
&mut TokioIo::new(from),
&mut TokioIo::new(to),
)
.await
.ok();
}
});
}
Ok::<_, hyper::Error>(res)
}
}),
);
futures::future::try_join(from.boxed(), to.boxed()).await?;
Ok(())
}
pub async fn run_http1_proxy<F, T>(from: F, to: T, src_ip: Option<IpAddr>) -> Result<(), Error>
where
F: ReadWriter + Unpin + Send + 'static,
T: ReadWriter + Unpin + Send + 'static,
{
let (client, to) = hyper::client::conn::http1::Builder::new()
.title_case_headers(true)
.preserve_header_case(true)
.handshake(TokioIo::new(to))
.await?;
let client = Arc::new(Mutex::new(client));
let from = hyper::server::conn::http1::Builder::new()
.timer(TokioTimer::new())
.serve_connection(
TokioIo::new(from),
service_fn(|mut req| {
let client = client.clone();
async move {
req.headers_mut()
.insert("X-Forwarded-Proto", HeaderValue::from_static("https"));
if let Some(src_ip) = src_ip
.map(|s| s.to_string())
.as_deref()
.and_then(|s| HeaderValue::from_str(s).ok())
{
req.headers_mut().insert("X-Forwarded-For", src_ip);
}
let upgrade =
if req
.headers()
.get(http::header::CONNECTION)
.map_or(false, |h| {
h.to_str()
.unwrap_or_default()
.split(",")
.any(|s| s.trim().eq_ignore_ascii_case("upgrade"))
})
{
Some(hyper::upgrade::on(&mut req))
} else {
None
};
let mut res = client.lock().await.send_request(req).await?;
if let Some(from) = upgrade {
let kind = res
.headers()
.get(http::header::UPGRADE)
.map(|h| h.to_owned());
let to = hyper::upgrade::on(&mut res);
tokio::task::spawn(async move {
if let Some((from, to)) = futures::future::try_join(from, to).await.ok()
{
if kind.map_or(false, |k| k == "HTTP/2.0") {
run_http2_proxy(TokioIo::new(from), TokioIo::new(to), src_ip)
.await
.ok();
} else {
tokio::io::copy_bidirectional(
&mut TokioIo::new(from),
&mut TokioIo::new(to),
)
.await
.ok();
}
}
});
}
Ok::<_, hyper::Error>(res)
}
}),
);
futures::future::try_join(from.with_upgrades().boxed(), to.with_upgrades().boxed()).await?;
Ok(())
}

View File

@@ -5,6 +5,7 @@ pub mod dns;
pub mod forward;
pub mod gateway;
pub mod host;
pub mod http;
pub mod keys;
pub mod mdns;
pub mod net_controller;

View File

@@ -133,9 +133,10 @@ impl NetController {
preferred_external_port: 80,
add_ssl: Some(AddSslOptions {
preferred_external_port: 443,
add_x_forwarded_headers: false,
alpn: Some(AlpnInfo::Specified(vec![
MaybeUtf8String("http/1.1".into()),
MaybeUtf8String("h2".into()),
MaybeUtf8String("http/1.1".into()),
])),
}),
secure: None,
@@ -283,6 +284,7 @@ impl NetServiceData {
filter: bind.net.clone().into_dyn(),
acme: None,
addr,
add_x_forwarded_headers: ssl.add_x_forwarded_headers,
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
@@ -306,6 +308,7 @@ impl NetServiceData {
.into_dyn(),
acme: None,
addr,
add_x_forwarded_headers: ssl.add_x_forwarded_headers,
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
@@ -335,6 +338,8 @@ impl NetServiceData {
.into_dyn(),
acme: public.acme.clone(),
addr,
add_x_forwarded_headers: ssl
.add_x_forwarded_headers,
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
@@ -362,6 +367,8 @@ impl NetServiceData {
.into_dyn(),
acme: public.acme.clone(),
addr,
add_x_forwarded_headers: ssl
.add_x_forwarded_headers,
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
@@ -378,6 +385,8 @@ impl NetServiceData {
.into_dyn(),
acme: None,
addr,
add_x_forwarded_headers: ssl
.add_x_forwarded_headers,
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
@@ -405,6 +414,8 @@ impl NetServiceData {
.into_dyn(),
acme: public.acme.clone(),
addr,
add_x_forwarded_headers: ssl
.add_x_forwarded_headers,
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
@@ -421,6 +432,8 @@ impl NetServiceData {
.into_dyn(),
acme: None,
addr,
add_x_forwarded_headers: ssl
.add_x_forwarded_headers,
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
@@ -503,6 +516,9 @@ impl NetServiceData {
private,
} = address
{
if public.is_none() {
private_dns.insert(address.clone());
}
let private = private && !info.public();
let public =
public.as_ref().map_or(false, |p| &p.gateway == gateway_id);
@@ -581,7 +597,6 @@ impl NetServiceData {
}
}
hostname_info.insert(*port, bind_hostname_info);
private_dns.append(&mut hostnames);
}
}

View File

@@ -2,7 +2,7 @@ use std::cmp::min;
use std::future::Future;
use std::io::Cursor;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::{Arc, OnceLock};
use std::time::UNIX_EPOCH;
use async_compression::tokio::bufread::GzipEncoder;
@@ -59,22 +59,8 @@ const PROXY_STRIP_HEADERS: &[&str] = &["cookie", "host", "origin", "referer", "u
pub const EMPTY_DIR: Dir<'_> = Dir::new("", &[]);
#[macro_export]
macro_rules! else_empty_dir {
($cfg:meta => $dir:expr) => {{
#[cfg(all($cfg, not(feature = "test")))]
{
$dir
}
#[cfg(not(all($cfg, not(feature = "test"))))]
{
crate::net::static_server::EMPTY_DIR
}
}};
}
pub trait UiContext: Context + AsRef<RpcContinuations> + Clone + Sized {
const UI_DIR: &'static Dir<'static>;
fn ui_dir() -> &'static Dir<'static>;
fn api() -> ParentHandler<Self>;
fn middleware(server: Server<Self>) -> HttpServer<Self>;
fn extend_router(self, router: Router) -> Router {
@@ -82,11 +68,12 @@ pub trait UiContext: Context + AsRef<RpcContinuations> + Clone + Sized {
}
}
pub static UI_CELL: OnceLock<Dir<'static>> = OnceLock::new();
impl UiContext for RpcContext {
const UI_DIR: &'static Dir<'static> = &else_empty_dir!(
feature = "startd" =>
include_dir::include_dir!("$CARGO_MANIFEST_DIR/../../web/dist/static/ui")
);
fn ui_dir() -> &'static Dir<'static> {
UI_CELL.get().unwrap_or(&EMPTY_DIR)
}
fn api() -> ParentHandler<Self> {
main_api()
}
@@ -149,10 +136,9 @@ impl UiContext for RpcContext {
}
impl UiContext for InitContext {
const UI_DIR: &'static Dir<'static> = &else_empty_dir!(
feature = "startd" =>
include_dir::include_dir!("$CARGO_MANIFEST_DIR/../../web/dist/static/ui")
);
fn ui_dir() -> &'static Dir<'static> {
UI_CELL.get().unwrap_or(&EMPTY_DIR)
}
fn api() -> ParentHandler<Self> {
main_api()
}
@@ -162,10 +148,9 @@ impl UiContext for InitContext {
}
impl UiContext for DiagnosticContext {
const UI_DIR: &'static Dir<'static> = &else_empty_dir!(
feature = "startd" =>
include_dir::include_dir!("$CARGO_MANIFEST_DIR/../../web/dist/static/ui")
);
fn ui_dir() -> &'static Dir<'static> {
UI_CELL.get().unwrap_or(&EMPTY_DIR)
}
fn api() -> ParentHandler<Self> {
main_api()
}
@@ -174,11 +159,12 @@ impl UiContext for DiagnosticContext {
}
}
pub static SETUP_WIZARD_CELL: OnceLock<Dir<'static>> = OnceLock::new();
impl UiContext for SetupContext {
const UI_DIR: &'static Dir<'static> = &else_empty_dir!(
feature = "startd" =>
include_dir::include_dir!("$CARGO_MANIFEST_DIR/../../web/dist/static/setup-wizard")
);
fn ui_dir() -> &'static Dir<'static> {
SETUP_WIZARD_CELL.get().unwrap_or(&EMPTY_DIR)
}
fn api() -> ParentHandler<Self> {
main_api()
}
@@ -187,11 +173,12 @@ impl UiContext for SetupContext {
}
}
pub static INSTALL_WIZARD_CELL: OnceLock<Dir<'static>> = OnceLock::new();
impl UiContext for InstallContext {
const UI_DIR: &'static Dir<'static> = &else_empty_dir!(
feature = "startd" =>
include_dir::include_dir!("$CARGO_MANIFEST_DIR/../../web/dist/static/install-wizard")
);
fn ui_dir() -> &'static Dir<'static> {
INSTALL_WIZARD_CELL.get().unwrap_or(&EMPTY_DIR)
}
fn api() -> ParentHandler<Self> {
main_api()
}
@@ -208,7 +195,7 @@ pub fn rpc_router<C: Context + Clone + AsRef<RpcContinuations>>(
.route("/rpc/{*path}", any(server))
.route(
"/ws/rpc/{guid}",
get({
any({
let ctx = ctx.clone();
move |x::Path(guid): x::Path<Guid>,
ws: axum::extract::ws::WebSocketUpgrade| async move {
@@ -243,12 +230,12 @@ fn serve_ui<C: UiContext>(req: Request) -> Result<Response, Error> {
.strip_prefix('/')
.unwrap_or(request_parts.uri.path());
let file = C::UI_DIR
let file = C::ui_dir()
.get_file(uri_path)
.or_else(|| C::UI_DIR.get_file("index.html"));
.or_else(|| C::ui_dir().get_file("index.html"));
if let Some(file) = file {
FileData::from_embedded(&request_parts, file, C::UI_DIR)?
FileData::from_embedded(&request_parts, file, C::ui_dir())?
.into_response(&request_parts)
} else {
Ok(not_found())

View File

@@ -15,9 +15,10 @@ use tokio_rustls::rustls::sign::CertifiedKey;
use tokio_rustls::rustls::{ClientConfig, RootCertStore, ServerConfig};
use visit_rs::{Visit, VisitFields};
use crate::net::http::handle_http_on_https;
use crate::net::web_server::{Accept, AcceptStream, MetadataVisitor};
use crate::prelude::*;
use crate::util::io::{BackTrackingIO, ReadWriter};
use crate::util::io::BackTrackingIO;
use crate::util::serde::MaybeUtf8String;
use crate::util::sync::SyncMutex;
@@ -35,7 +36,7 @@ impl<V: MetadataVisitor<Result = ()>, M: Visit<V>> Visit<V> for TlsMetadata<M> {
#[derive(Debug, Clone)]
pub struct TlsHandshakeInfo {
pub sni: Option<InternedString>,
pub alpn: Vec<MaybeUtf8String>,
pub alpn: Option<MaybeUtf8String>,
}
impl<V: MetadataVisitor> Visit<V> for TlsHandshakeInfo {
fn visit(&self, visitor: &mut V) -> <V as visit_rs::Visitor>::Result {
@@ -200,32 +201,33 @@ where
};
let hello = mid.client_hello();
if let Some(cfg) = tls_handler.get_config(&hello, &metadata).await {
let metadata = TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: hello.server_name().map(InternedString::intern),
alpn: hello
.alpn()
.into_iter()
.flatten()
.map(|a| MaybeUtf8String(a.to_vec()))
.collect(),
},
};
let buffered = mid.io.stop_buffering();
mid.io
.write_all(&buffered)
.await
.with_kind(ErrorKind::Network)?;
let stream = match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => Box::pin(stream) as AcceptStream,
return Ok(match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => {
let s = stream.get_ref().1;
Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: s.server_name().map(InternedString::intern),
alpn: s
.alpn_protocol()
.map(|a| MaybeUtf8String(a.to_vec())),
},
},
Box::pin(stream) as AcceptStream,
))
}
Err(e) => {
tracing::trace!("Error completing TLS handshake: {e}");
tracing::trace!("{e:?}");
return Ok(None);
None
}
};
return Ok(Some((metadata, stream)));
});
}
Ok(None)
@@ -251,57 +253,6 @@ where
}
}
async fn handle_http_on_https(stream: impl ReadWriter + Unpin + 'static) -> Result<(), Error> {
use axum::body::Body;
use axum::extract::Request;
use axum::response::Response;
use http::Uri;
use crate::net::static_server::server_error;
hyper_util::server::conn::auto::Builder::new(hyper_util::rt::TokioExecutor::new())
.serve_connection(
hyper_util::rt::TokioIo::new(stream),
hyper_util::service::TowerToHyperService::new(axum::Router::new().fallback(
axum::routing::method_routing::any(move |req: Request| async move {
match async move {
let host = req
.headers()
.get(http::header::HOST)
.and_then(|host| host.to_str().ok());
if let Some(host) = host {
let uri = Uri::from_parts({
let mut parts = req.uri().to_owned().into_parts();
parts.scheme = Some("https".parse()?);
parts.authority = Some(host.parse()?);
parts
})?;
Response::builder()
.status(http::StatusCode::TEMPORARY_REDIRECT)
.header(http::header::LOCATION, uri.to_string())
.body(Body::default())
} else {
Response::builder()
.status(http::StatusCode::BAD_REQUEST)
.body(Body::from("Host header required"))
}
}
.await
{
Ok(a) => a,
Err(e) => {
tracing::warn!("Error redirecting http request on ssl port: {e}");
tracing::error!("{e:?}");
server_error(Error::new(e, ErrorKind::Network))
}
}
}),
)),
)
.await
.map_err(|e| Error::new(color_eyre::eyre::Report::msg(e), ErrorKind::Network))
}
pub fn client_config<'a, I: IntoIterator<Item = &'a X509Ref>>(
crypto_provider: Arc<CryptoProvider>,
root_certs: I,

View File

@@ -57,6 +57,9 @@ impl FromStr for OnionAddress {
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(
s.strip_suffix(".onion")
.unwrap_or(s)
.rsplit(".")
.next()
.unwrap_or(s)
.parse::<OnionAddressV3>()
.with_kind(ErrorKind::Tor)?,
@@ -752,6 +755,12 @@ async fn torctl(
)
.await?;
services.send_modify(|s| {
for (_, _, s) in s.values_mut() {
*s = Some(SyncState::Add);
}
});
let handler = async {
loop {
let recv = recv.recv();

View File

@@ -148,7 +148,7 @@ impl VHostController {
JsonKey::new(k.clone()),
v.iter()
.filter(|(_, v)| v.strong_count() > 0)
.map(|(k, _)| format!("{k:?}"))
.map(|(k, _)| format!("{k:#?}"))
.collect(),
)
})
@@ -188,7 +188,13 @@ pub trait VHostTarget<A: Accept>: std::fmt::Debug + Eq {
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> impl Future<Output = Option<(ServerConfig, Self::PreprocessRes)>> + Send + 'a;
fn handle_stream(&self, stream: AcceptStream, prev: Self::PreprocessRes, rc: Weak<()>);
fn handle_stream(
&self,
stream: AcceptStream,
metadata: TlsMetadata<<A as Accept>::Metadata>,
prev: Self::PreprocessRes,
rc: Weak<()>,
);
}
pub trait DynVHostTargetT<A: Accept>: std::fmt::Debug + Any {
@@ -199,8 +205,16 @@ pub trait DynVHostTargetT<A: Accept>: std::fmt::Debug + Any {
prev: ServerConfig,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> BoxFuture<'a, Option<(ServerConfig, Box<dyn Any + Send>)>>;
fn handle_stream(&self, stream: AcceptStream, prev: Box<dyn Any + Send>, rc: Weak<()>);
) -> BoxFuture<'a, Option<(ServerConfig, Box<dyn Any + Send>)>>
where
<A as Accept>::Metadata: Visit<ExtractVisitor<TcpMetadata>>;
fn handle_stream(
&self,
stream: AcceptStream,
metadata: TlsMetadata<<A as Accept>::Metadata>,
prev: Box<dyn Any + Send>,
rc: Weak<()>,
);
fn eq(&self, other: &dyn DynVHostTargetT<A>) -> bool;
}
impl<A: Accept, T: VHostTarget<A> + 'static> DynVHostTargetT<A> for T {
@@ -220,9 +234,15 @@ impl<A: Accept, T: VHostTarget<A> + 'static> DynVHostTargetT<A> for T {
.map(|o| o.map(|(cfg, res)| (cfg, Box::new(res) as Box<dyn Any + Send>)))
.boxed()
}
fn handle_stream(&self, stream: AcceptStream, prev: Box<dyn Any + Send>, rc: Weak<()>) {
fn handle_stream(
&self,
stream: AcceptStream,
metadata: TlsMetadata<<A as Accept>::Metadata>,
prev: Box<dyn Any + Send>,
rc: Weak<()>,
) {
if let Ok(prev) = prev.downcast() {
VHostTarget::handle_stream(self, stream, *prev, rc);
VHostTarget::handle_stream(self, stream, metadata, *prev, rc);
}
}
fn eq(&self, other: &dyn DynVHostTargetT<A>) -> bool {
@@ -265,22 +285,26 @@ impl<A: Accept + 'static> DynVHostTarget<A> {
prev: ServerConfig,
hello: &ClientHello<'_>,
metadata: &<A as Accept>::Metadata,
) -> Option<(ServerConfig, Preprocessed<A>)> {
) -> Option<(ServerConfig, Preprocessed<A>)>
where
<A as Accept>::Metadata: Visit<ExtractVisitor<TcpMetadata>>,
{
let (cfg, res) = self.0.preprocess(prev, hello, metadata).await?;
Some((cfg, Preprocessed(self, rc, res)))
}
}
impl<A: Accept + 'static> Preprocessed<A> {
fn finish(self, stream: AcceptStream) {
(self.0).0.handle_stream(stream, self.2, self.1);
fn finish(self, stream: AcceptStream, metadata: TlsMetadata<<A as Accept>::Metadata>) {
(self.0).0.handle_stream(stream, metadata, self.2, self.1);
}
}
#[derive(Debug, Clone)]
#[derive(Clone)]
pub struct ProxyTarget {
pub filter: DynInterfaceFilter,
pub acme: Option<AcmeProvider>,
pub addr: SocketAddr,
pub add_x_forwarded_headers: bool,
pub connect_ssl: Result<Arc<ClientConfig>, AlpnInfo>, // Ok: yes, connect using ssl, pass through alpn; Err: connect tcp, use provided strategy for alpn
}
impl PartialEq for ProxyTarget {
@@ -293,11 +317,26 @@ impl PartialEq for ProxyTarget {
}
}
impl Eq for ProxyTarget {}
impl fmt::Debug for ProxyTarget {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ProxyTarget")
.field("filter", &self.filter)
.field("acme", &self.acme)
.field("addr", &self.addr)
.field("add_x_forwarded_headers", &self.add_x_forwarded_headers)
.field("connect_ssl", &self.connect_ssl.as_ref().map(|_| ()))
.finish()
}
}
impl<A> VHostTarget<A> for ProxyTarget
where
A: Accept + 'static,
<A as Accept>::Metadata: Visit<ExtractVisitor<GatewayInfo>> + Clone + Send + Sync,
<A as Accept>::Metadata: Visit<ExtractVisitor<GatewayInfo>>
+ Visit<ExtractVisitor<TcpMetadata>>
+ Clone
+ Send
+ Sync,
{
type PreprocessRes = AcceptStream;
fn filter(&self, metadata: &<A as Accept>::Metadata) -> bool {
@@ -356,9 +395,32 @@ where
}
Some((prev, Box::pin(tcp_stream)))
}
fn handle_stream(&self, mut stream: AcceptStream, mut prev: Self::PreprocessRes, rc: Weak<()>) {
fn handle_stream(
&self,
mut stream: AcceptStream,
metadata: TlsMetadata<<A as Accept>::Metadata>,
mut prev: Self::PreprocessRes,
rc: Weak<()>,
) {
let add_x_forwarded_headers = self.add_x_forwarded_headers;
tokio::spawn(async move {
WeakFuture::new(rc, tokio::io::copy_bidirectional(&mut stream, &mut prev)).await
WeakFuture::new(rc, async move {
if add_x_forwarded_headers {
crate::net::http::run_http_proxy(
stream,
prev,
metadata.tls_info.alpn,
extract::<TcpMetadata, _>(&metadata.inner).map(|m| m.peer_addr.ip()),
)
.await
.ok();
} else {
tokio::io::copy_bidirectional(&mut stream, &mut prev)
.await
.ok();
}
})
.await
});
}
}
@@ -429,7 +491,8 @@ impl<A: Accept + 'static> Clone for VHostConnector<A> {
impl<A> WrapTlsHandler<A> for VHostConnector<A>
where
A: Accept + 'static,
<A as Accept>::Metadata: Visit<ExtractVisitor<GatewayInfo>> + Send + Sync,
<A as Accept>::Metadata:
Visit<ExtractVisitor<GatewayInfo>> + Visit<ExtractVisitor<TcpMetadata>> + Send + Sync,
{
async fn wrap<'a>(
&'a mut self,
@@ -559,7 +622,7 @@ where
async fn handle_next(&mut self) -> Result<(), Error> {
let (metadata, stream) = futures::future::poll_fn(|cx| self.poll_accept(cx)).await?;
metadata.preprocessed.finish(stream);
metadata.preprocessed.finish(stream, metadata.inner);
Ok(())
}
@@ -634,8 +697,8 @@ impl<A: Accept> VHostServer<A> {
));
loop {
if let Err(e) = listener.handle_next().await {
tracing::error!("VHostServer: failed to accept connection: {e}");
tracing::debug!("{e:?}");
tracing::trace!("VHostServer: failed to accept connection: {e}");
tracing::trace!("{e:?}");
}
}
})

View File

@@ -2,6 +2,7 @@ use std::collections::BTreeMap;
use std::path::PathBuf;
use clap::Parser;
use exver::VersionRange;
use itertools::Itertools;
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
@@ -162,6 +163,37 @@ pub fn display_signers<T>(
Ok(())
}
pub fn display_package_signers<T>(
params: WithIoFormat<T>,
signers: BTreeMap<Guid, (SignerInfo, VersionRange)>,
) -> Result<(), Error> {
use prettytable::*;
if let Some(format) = params.format {
return display_serializable(format, signers);
}
let mut table = Table::new();
table.add_row(row![bc =>
"ID",
"NAME",
"CONTACT",
"KEYS",
"AUTHORIZED VERSIONS"
]);
for (id, (info, versions)) in signers {
table.add_row(row![
id.as_ref(),
&info.name,
&info.contact.into_iter().join("\n"),
&info.keys.into_iter().join("\n"),
&versions.to_string()
]);
}
table.print_tty(false)?;
Ok(())
}
pub async fn add_signer(ctx: RegistryContext, signer: SignerInfo) -> Result<Guid, Error> {
ctx.db
.mutate(|db| db.as_index_mut().as_signers_mut().add_signer(&signer))

View File

@@ -24,6 +24,7 @@ use crate::middleware::signature::SignatureAuthContext;
use crate::prelude::*;
use crate::registry::RegistryDatabase;
use crate::registry::device_info::{DEVICE_INFO_HEADER, DeviceInfo};
use crate::registry::migrations::run_migrations;
use crate::registry::signer::SignerInfo;
use crate::rpc_continuations::RpcContinuations;
use crate::sign::AnyVerifyingKey;
@@ -98,9 +99,10 @@ impl RegistryContext {
let db_path = datadir.join("registry.db");
let db = TypedPatchDb::<RegistryDatabase>::load_or_init(
PatchDb::open(&db_path).await?,
|| async { Ok(Default::default()) },
|| async { Ok(RegistryDatabase::init()) },
)
.await?;
db.mutate(|db| run_migrations(db)).await.result?;
let tor_proxy_url = config
.tor_proxy
.clone()

View File

@@ -0,0 +1,30 @@
use imbl_value::json;
use super::RegistryMigration;
use crate::prelude::*;
pub struct PackageSignerScopeMigration;
impl RegistryMigration for PackageSignerScopeMigration {
fn name(&self) -> &'static str {
"PackageSignerScopeMigration"
}
fn action(&self, db: &mut Value) -> Result<(), Error> {
for (_, info) in db["index"]["package"]["packages"]
.as_object_mut()
.unwrap()
.iter_mut()
{
let prev = info["authorized"].clone();
if let Some(prev) = prev.as_array() {
info["authorized"] = Value::Object(
prev.iter()
.filter_map(|g| g.as_str())
.map(|g| (g.into(), json!("*")))
.collect(),
)
}
}
Ok(())
}
}

View File

@@ -0,0 +1,28 @@
use patch_db::ModelExt;
use crate::prelude::*;
use crate::registry::RegistryDatabase;
mod m_00_package_signer_scope;
pub trait RegistryMigration {
fn name(&self) -> &'static str;
fn action(&self, db: &mut Value) -> Result<(), Error>;
}
pub const MIGRATIONS: &[&dyn RegistryMigration] =
&[&m_00_package_signer_scope::PackageSignerScopeMigration];
pub fn run_migrations(db: &mut Model<RegistryDatabase>) -> Result<(), Error> {
let mut migrations = db.as_migrations().de()?;
for migration in MIGRATIONS {
if !migrations.contains(migration.name()) {
migration.action(ModelExt::as_value_mut(db))?;
migrations.insert(migration.name().into());
}
}
let mut db_deser = db.de()?;
db_deser.migrations = migrations;
db.ser(&db_deser)?;
Ok(())
}

View File

@@ -2,6 +2,7 @@ use std::collections::{BTreeMap, BTreeSet};
use axum::Router;
use futures::future::ready;
use imbl_value::InternedString;
use models::DataUrl;
use rpc_toolkit::{Context, HandlerExt, ParentHandler, Server, from_fn_async};
use serde::{Deserialize, Serialize};
@@ -26,6 +27,7 @@ pub mod context;
pub mod db;
pub mod device_info;
pub mod info;
mod migrations;
pub mod os;
pub mod package;
pub mod signer;
@@ -34,10 +36,23 @@ pub mod signer;
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
pub struct RegistryDatabase {
#[serde(default)]
pub migrations: BTreeSet<InternedString>,
pub admins: BTreeSet<Guid>,
pub index: FullIndex,
}
impl RegistryDatabase {}
impl RegistryDatabase {
pub fn init() -> Self {
Self {
migrations: migrations::MIGRATIONS
.iter()
.map(|m| m.name().into())
.collect(),
..Default::default()
}
}
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]

View File

@@ -21,7 +21,6 @@ pub fn asset_api<C: Context>() -> ParentHandler<C> {
.no_display()
.with_about("Sign file and add to registry index"),
)
// TODO: remove signature api
.subcommand(
"get",
get::get_api::<C>().with_about("Commands to download image, iso, or squashfs files"),

View File

@@ -75,7 +75,8 @@ pub async fn add_package(
.or_not_found(&manifest.id)?
.as_authorized()
.de()?
.contains(&uploader_guid)
.get(&uploader_guid)
.map_or(false, |v| manifest.version.satisfies(v))
{
let package = db
.as_index_mut()
@@ -197,7 +198,8 @@ pub async fn remove_package(
.or_not_found(&id)?
.as_authorized()
.de()?
.contains(&signer_guid)
.get(&signer_guid)
.map_or(false, |v| version.satisfies(v))
{
if let Some(package) = db
.as_index_mut()

View File

@@ -34,7 +34,8 @@ pub struct PackageIndex {
#[model = "Model<Self>"]
#[ts(export)]
pub struct PackageInfo {
pub authorized: BTreeSet<Guid>,
#[ts(as = "BTreeMap::<Guid, String>")]
pub authorized: BTreeMap<Guid, VersionRange>,
pub versions: BTreeMap<VersionString, PackageVersionInfo>,
#[ts(type = "string[]")]
pub categories: BTreeSet<InternedString>,

View File

@@ -1,6 +1,7 @@
use std::collections::BTreeMap;
use clap::Parser;
use exver::VersionRange;
use models::PackageId;
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
@@ -8,7 +9,7 @@ use ts_rs::TS;
use crate::context::CliContext;
use crate::prelude::*;
use crate::registry::admin::display_signers;
use crate::registry::admin::display_package_signers;
use crate::registry::context::RegistryContext;
use crate::registry::signer::SignerInfo;
use crate::rpc_continuations::Guid;
@@ -36,7 +37,9 @@ pub fn signer_api<C: Context>() -> ParentHandler<C> {
"list",
from_fn_async(list_package_signers)
.with_display_serializable()
.with_custom_display_fn(|handle, result| display_signers(handle.params, result))
.with_custom_display_fn(|handle, result| {
display_package_signers(handle.params, result)
})
.with_about("List package signers and related signer info")
.with_call_remote::<CliContext>(),
)
@@ -46,14 +49,21 @@ pub fn signer_api<C: Context>() -> ParentHandler<C> {
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct PackageSignerParams {
pub struct AddPackageSignerParams {
pub id: PackageId,
pub signer: Guid,
#[arg(long)]
#[ts(type = "string | null")]
pub versions: Option<VersionRange>,
}
pub async fn add_package_signer(
ctx: RegistryContext,
PackageSignerParams { id, signer }: PackageSignerParams,
AddPackageSignerParams {
id,
signer,
versions,
}: AddPackageSignerParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
@@ -69,7 +79,7 @@ pub async fn add_package_signer(
.as_idx_mut(&id)
.or_not_found(&id)?
.as_authorized_mut()
.mutate(|s| Ok(s.insert(signer)))?;
.insert(&signer, &versions.unwrap_or_default())?;
Ok(())
})
@@ -77,20 +87,30 @@ pub async fn add_package_signer(
.result
}
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[command(rename_all = "kebab-case")]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct RemovePackageSignerParams {
pub id: PackageId,
pub signer: Guid,
}
pub async fn remove_package_signer(
ctx: RegistryContext,
PackageSignerParams { id, signer }: PackageSignerParams,
RemovePackageSignerParams { id, signer }: RemovePackageSignerParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
if !db
if db
.as_index_mut()
.as_package_mut()
.as_packages_mut()
.as_idx_mut(&id)
.or_not_found(&id)?
.as_authorized_mut()
.mutate(|s| Ok(s.remove(&signer)))?
.remove(&signer)?
.is_some()
{
return Err(Error::new(
eyre!("signer {signer} is not authorized to sign for {id}"),
@@ -115,7 +135,7 @@ pub struct ListPackageSignersParams {
pub async fn list_package_signers(
ctx: RegistryContext,
ListPackageSignersParams { id }: ListPackageSignersParams,
) -> Result<BTreeMap<Guid, SignerInfo>, Error> {
) -> Result<BTreeMap<Guid, (SignerInfo, VersionRange)>, Error> {
let db = ctx.db.peek().await;
db.as_index()
.as_package()
@@ -125,11 +145,11 @@ pub async fn list_package_signers(
.as_authorized()
.de()?
.into_iter()
.filter_map(|guid| {
.filter_map(|(guid, versions)| {
db.as_index()
.as_signers()
.as_idx(&guid)
.map(|s| s.de().map(|s| (guid, s)))
.map(|s| s.de().map(|s| (guid, (s, versions))))
})
.collect()
}

View File

@@ -151,6 +151,8 @@ pub struct PackParams {
pub assets: Option<PathBuf>,
#[arg(long, conflicts_with = "assets")]
pub no_assets: bool,
#[arg(long, help = "Architecture Mask")]
pub arch: Vec<InternedString>,
}
impl PackParams {
fn path(&self) -> &Path {
@@ -416,8 +418,6 @@ impl ImageSource {
"--platform=linux/amd64".to_owned()
} else if arch == "aarch64" {
"--platform=linux/arm64".to_owned()
} else if arch == "riscv64" {
"--platform=linux/riscv64".to_owned()
} else {
format!("--platform=linux/{arch}")
};
@@ -480,43 +480,29 @@ impl ImageSource {
"--platform=linux/amd64".to_owned()
} else if arch == "aarch64" {
"--platform=linux/arm64".to_owned()
} else if arch == "riscv64" {
"--platform=linux/riscv64".to_owned()
} else {
format!("--platform=linux/{arch}")
};
let mut inspect_cmd = Command::new(CONTAINER_TOOL);
inspect_cmd
.arg("image")
.arg("inspect")
.arg("--format")
.arg("{{json .Config}}")
.arg(&tag);
let inspect_res = match inspect_cmd.invoke(ErrorKind::Docker).await {
Ok(a) => a,
Err(e)
if {
let msg = e.source.to_string();
#[cfg(feature = "docker")]
let matches = msg.contains("No such image:");
#[cfg(not(feature = "docker"))]
let matches = msg.contains(": image not known");
matches
} =>
{
Command::new(CONTAINER_TOOL)
.arg("pull")
.arg(&docker_platform)
.arg(tag)
.capture(false)
.invoke(ErrorKind::Docker)
.await?;
inspect_cmd.invoke(ErrorKind::Docker).await?
}
Err(e) => return Err(e),
};
let config = serde_json::from_slice::<DockerImageConfig>(&inspect_res)
.with_kind(ErrorKind::Deserialization)?;
let container = String::from_utf8(
Command::new(CONTAINER_TOOL)
.arg("create")
.arg(&docker_platform)
.arg(&tag)
.invoke(ErrorKind::Docker)
.await?,
)?;
let container = container.trim();
let config = serde_json::from_slice::<DockerImageConfig>(
&Command::new(CONTAINER_TOOL)
.arg("container")
.arg("inspect")
.arg("--format")
.arg("{{json .Config}}")
.arg(container)
.invoke(ErrorKind::Docker)
.await?,
)
.with_kind(ErrorKind::Deserialization)?;
let base_path = Path::new("images").join(arch).join(image_id);
into.insert_path(
base_path.with_extension("json"),
@@ -558,25 +544,17 @@ impl ImageSource {
let dest = tmp_dir
.join(Guid::new().as_ref())
.with_extension("squashfs");
let container = String::from_utf8(
Command::new(CONTAINER_TOOL)
.arg("create")
.arg(&docker_platform)
.arg("--entrypoint=/bin/sh")
.arg(&tag)
.invoke(ErrorKind::Docker)
.await?,
)?;
Command::new(CONTAINER_TOOL)
.arg("export")
.arg(container.trim())
.arg(container)
.pipe(&mut tar2sqfs(&dest)?)
.capture(false)
.invoke(ErrorKind::Docker)
.await?;
Command::new(CONTAINER_TOOL)
.arg("rm")
.arg(container.trim())
.arg(container)
.invoke(ErrorKind::Docker)
.await?;
into.insert_path(
@@ -686,7 +664,24 @@ pub async fn pack(ctx: CliContext, params: PackParams) -> Result<(), Error> {
)
.await?;
s9pk.as_manifest_mut().git_hash = Some(GitHash::from_path(params.path()).await?);
let manifest = s9pk.as_manifest_mut();
manifest.git_hash = Some(GitHash::from_path(params.path()).await?);
if !params.arch.is_empty() {
let arches = match manifest.hardware_requirements.arch.take() {
Some(a) => params
.arch
.iter()
.filter(|x| a.contains(*x))
.cloned()
.collect(),
None => params.arch.iter().cloned().collect(),
};
manifest
.images
.values_mut()
.for_each(|c| c.arch = c.arch.intersection(&arches).cloned().collect());
manifest.hardware_requirements.arch = Some(arches);
}
if !params.no_assets {
let assets_dir = params.assets();

View File

@@ -13,7 +13,7 @@ use crate::db::model::package::{
TaskEntry,
};
use crate::disk::mount::filesystem::bind::{Bind, FileType};
use crate::disk::mount::filesystem::idmapped::IdMapped;
use crate::disk::mount::filesystem::idmapped::{IdMap, IdMapped};
use crate::disk::mount::filesystem::{FileSystem, MountType};
use crate::disk::mount::util::{is_mountpoint, unmount};
use crate::service::effects::prelude::*;
@@ -28,8 +28,13 @@ pub struct MountTarget {
volume_id: VolumeId,
subpath: Option<PathBuf>,
readonly: bool,
#[serde(skip_deserializing)]
#[ts(skip)]
filetype: FileType,
#[serde(default)]
idmap: Vec<IdMap>,
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
@@ -48,6 +53,7 @@ pub async fn mount(
subpath,
readonly,
filetype,
idmap,
},
}: MountParams,
) -> Result<(), Error> {
@@ -68,16 +74,27 @@ pub async fn mount(
if is_mountpoint(&mountpoint).await? {
unmount(&mountpoint, true).await?;
}
IdMapped::new(Bind::new(source).with_type(filetype), 0, 100000, 65536)
.mount(
mountpoint,
if readonly {
MountType::ReadOnly
} else {
MountType::ReadWrite
},
)
.await?;
IdMapped::new(
Bind::new(source).with_type(filetype),
IdMap::stack(
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
idmap,
),
)
.mount(
mountpoint,
if readonly {
MountType::ReadOnly
} else {
MountType::ReadWrite
},
)
.await?;
Ok(())
}

View File

@@ -11,14 +11,14 @@ use crate::service::effects::prelude::*;
use crate::service::persistent_container::Subcontainer;
use crate::util::Invoke;
#[cfg(any(feature = "cli-container", feature = "startd"))]
#[cfg(all(feature = "pty-process", feature = "procfs"))]
mod sync;
#[cfg(not(any(feature = "cli-container", feature = "startd")))]
#[cfg(not(all(feature = "pty-process", feature = "procfs")))]
mod sync_dummy;
pub use sync::*;
#[cfg(not(any(feature = "cli-container", feature = "startd")))]
#[cfg(not(all(feature = "pty-process", feature = "procfs")))]
use sync_dummy as sync;
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
@@ -41,7 +41,7 @@ pub async fn destroy_subcontainer_fs(
.await
.remove(&guid)
{
#[cfg(feature = "startd")]
#[cfg(all(feature = "pty-process", feature = "procfs"))]
if tokio::fs::metadata(overlay.overlay.path().join("proc/1"))
.await
.is_ok()

View File

@@ -7,6 +7,7 @@ use std::path::{Path, PathBuf};
use std::process::{Command as StdCommand, Stdio};
use std::sync::Arc;
use nix::errno::Errno;
use nix::sched::CloneFlags;
use nix::unistd::Pid;
use signal_hook::consts::signal::*;
@@ -134,6 +135,80 @@ impl ExecParams {
ErrorKind::InvalidRequest,
));
};
let mut cmd = StdCommand::new(command);
let passwd = std::fs::read_to_string(chroot.join("etc/passwd"))
.with_ctx(|_| (ErrorKind::Filesystem, "read /etc/passwd"))
.log_err()
.unwrap_or_default();
let mut home = None;
if let Some((uid, gid)) =
if let Some(uid) = user.as_deref().and_then(|u| u.parse::<u32>().ok()) {
Some((uid, uid))
} else if let Some((uid, gid)) = user
.as_deref()
.and_then(|u| u.split_once(":"))
.and_then(|(u, g)| Some((u.parse::<u32>().ok()?, g.parse::<u32>().ok()?)))
{
Some((uid, gid))
} else if let Some(user) = user {
Some(
if let Some((uid, gid)) = passwd.lines().find_map(|l| {
let l = l.trim();
let mut split = l.split(":");
if user != split.next()? {
return None;
}
split.next(); // throw away x
let uid = split.next()?.parse().ok()?;
let gid = split.next()?.parse().ok()?;
split.next(); // throw away group name
home = split.next();
Some((uid, gid))
// uid gid
}) {
(uid, gid)
} else if user == "root" {
(0, 0)
} else {
None.or_not_found(lazy_format!("{user} in /etc/passwd"))?
},
)
} else {
None
}
{
if home.is_none() {
home = passwd.lines().find_map(|l| {
let l = l.trim();
let mut split = l.split(":");
split.next(); // throw away user name
split.next(); // throw away x
if split.next()?.parse::<u32>().ok()? != uid {
return None;
}
split.next(); // throw away gid
split.next(); // throw away group name
split.next()
})
};
std::os::unix::fs::chown("/proc/self/fd/0", Some(uid), Some(gid)).log_err();
std::os::unix::fs::chown("/proc/self/fd/1", Some(uid), Some(gid)).log_err();
std::os::unix::fs::chown("/proc/self/fd/2", Some(uid), Some(gid)).log_err();
cmd.uid(uid);
cmd.gid(gid);
} else {
home = Some("/root");
}
cmd.env("HOME", home.unwrap_or("/"));
let env_string = if let Some(env_file) = &env_file {
std::fs::read_to_string(env_file)
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("read {env:?}")))?
@@ -148,45 +223,11 @@ impl ExecParams {
.collect::<BTreeMap<_, _>>();
std::os::unix::fs::chroot(chroot)
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("chroot {chroot:?}")))?;
let mut cmd = StdCommand::new(command);
cmd.args(args);
for (k, v) in env {
cmd.env(k, v);
}
if let Some((uid, gid)) =
if let Some(uid) = user.as_deref().and_then(|u| u.parse::<u32>().ok()) {
Some((uid, uid))
} else if let Some(user) = user {
let passwd = std::fs::read_to_string("/etc/passwd")
.with_ctx(|_| (ErrorKind::Filesystem, "read /etc/passwd"));
Some(if passwd.is_err() && user == "root" {
(0, 0)
} else {
let (uid, gid) = passwd?
.lines()
.find_map(|l| {
let mut split = l.trim().split(":");
if user != split.next()? {
return None;
}
split.next(); // throw away x
Some((split.next()?.parse().ok()?, split.next()?.parse().ok()?))
// uid gid
})
.or_not_found(lazy_format!("{user} in /etc/passwd"))?;
(uid, gid)
})
} else {
None
}
{
std::os::unix::fs::chown("/proc/self/fd/0", Some(uid), Some(gid)).log_err();
std::os::unix::fs::chown("/proc/self/fd/1", Some(uid), Some(gid)).log_err();
std::os::unix::fs::chown("/proc/self/fd/2", Some(uid), Some(gid)).log_err();
cmd.uid(uid);
cmd.gid(gid);
}
if let Some(workdir) = workdir {
cmd.current_dir(workdir);
} else {
@@ -218,11 +259,14 @@ pub fn launch(
std::thread::spawn(move || {
if let Ok(pid) = recv_pid.blocking_recv() {
for sig in sig.forever() {
nix::sys::signal::kill(
match nix::sys::signal::kill(
Pid::from_raw(pid),
Some(nix::sys::signal::Signal::try_from(sig).unwrap()),
)
.unwrap();
) {
Err(Errno::ESRCH) => Ok(()),
a => a,
}
.unwrap()
}
}
});
@@ -322,9 +366,9 @@ pub fn launch(
send_pid.send(child.id() as i32).unwrap_or_default();
if let Some(pty_size) = pty_size {
let size = if let Some((x, y)) = pty_size.pixels {
::pty_process::Size::new_with_pixel(pty_size.size.0, pty_size.size.1, x, y)
::pty_process::Size::new_with_pixel(pty_size.rows, pty_size.cols, x, y)
} else {
::pty_process::Size::new(pty_size.size.0, pty_size.size.1)
::pty_process::Size::new(pty_size.rows, pty_size.cols)
};
pty.resize(size).with_kind(ErrorKind::Filesystem)?;
}
@@ -579,9 +623,9 @@ pub fn exec(
send_pid.send(child.id() as i32).unwrap_or_default();
if let Some(pty_size) = pty_size {
let size = if let Some((x, y)) = pty_size.pixels {
::pty_process::Size::new_with_pixel(pty_size.size.0, pty_size.size.1, x, y)
::pty_process::Size::new_with_pixel(pty_size.rows, pty_size.cols, x, y)
} else {
::pty_process::Size::new(pty_size.size.0, pty_size.size.1)
::pty_process::Size::new(pty_size.rows, pty_size.cols)
};
pty.resize(size).with_kind(ErrorKind::Filesystem)?;
}

View File

@@ -73,11 +73,6 @@ pub const SYNC_RETRY_COOLDOWN_SECONDS: u64 = 10;
pub type Task<'a> = BoxFuture<'a, Result<(), Error>>;
/// TODO
pub enum BackupReturn {
TODO,
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum LoadDisposition {
Retry,
@@ -224,6 +219,17 @@ impl Service {
recovery_source: Option<impl GenericMountGuard>,
) -> Result<ServiceRef, Error> {
let id = s9pk.as_manifest().id.clone();
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&id)
.or_not_found(&id)?
.as_status_info_mut()
.init()
})
.await
.result?;
let persistent_container = PersistentContainer::new(&ctx, s9pk).await?;
let seed = Arc::new(ServiceActorSeed {
id,
@@ -532,8 +538,16 @@ impl Service {
.or_not_found(&manifest.id)?;
let actions = entry.as_actions().keys()?;
if entry.as_tasks_mut().mutate(|t| {
t.retain(|_, v| {
v.task.package_id != manifest.id || actions.contains(&v.task.action_id)
t.retain(|id, v| {
v.task.package_id != manifest.id
|| if actions.contains(&v.task.action_id) {
true
} else {
tracing::warn!(
"Deleting task {id} because action no longer exists"
);
false
}
});
Ok(t.iter()
.any(|(_, t)| t.active && t.task.severity == TaskSeverity::Critical))
@@ -570,6 +584,15 @@ impl Service {
.await?;
file.save().await.with_kind(ErrorKind::Filesystem)?;
// TODO: reverify?
let backup = self
.actor
.send(
Guid::new(),
transition::backup::Backup {
path: guard.path().to_owned(),
},
)
.await??;
self.seed
.ctx
.db
@@ -584,7 +607,8 @@ impl Service {
})
.await
.result?;
Ok(())
backup.await
}
pub fn container_id(&self) -> Result<ContainerId, Error> {

View File

@@ -1,5 +1,4 @@
use std::collections::{BTreeMap, BTreeSet};
use std::ops::Deref;
use std::path::Path;
use std::sync::{Arc, Weak};
use std::time::Duration;
@@ -18,7 +17,7 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::idmapped::IdMapped;
use crate::disk::mount::filesystem::idmapped::{IdMap, IdMapped};
use crate::disk::mount::filesystem::loop_dev::LoopDev;
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::disk::mount::filesystem::{MountType, ReadOnly};
@@ -135,9 +134,11 @@ impl PersistentContainer {
let mount = MountGuard::mount(
&IdMapped::new(
Bind::new(data_dir(DATA_DIR, &s9pk.as_manifest().id, volume)),
0,
100000,
65536,
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
),
mountpoint,
MountType::ReadWrite,
@@ -155,7 +156,14 @@ impl PersistentContainer {
{
vec![
MountGuard::mount(
&IdMapped::new(LoopDev::from(&**sqfs), 0, 100000, 65536),
&IdMapped::new(
LoopDev::from(&**sqfs),
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
),
mountpoint,
MountType::ReadWrite,
)
@@ -179,7 +187,14 @@ impl PersistentContainer {
};
assets.push(
MountGuard::mount(
&IdMapped::new(LoopDev::from(&**sqfs), 0, 100000, 65536),
&IdMapped::new(
LoopDev::from(&**sqfs),
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
),
mountpoint,
MountType::ReadWrite,
)
@@ -228,7 +243,14 @@ impl PersistentContainer {
image.clone(),
Arc::new(
MountGuard::mount(
&IdMapped::new(LoopDev::from(&**sqfs), 0, 100000, 65536),
&IdMapped::new(
LoopDev::from(&**sqfs),
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
),
&mountpoint,
ReadOnly,
)
@@ -396,7 +418,6 @@ impl PersistentContainer {
#[instrument(skip_all)]
fn destroy(
&mut self,
error: bool,
uninit: Option<ExitParams>,
) -> Option<impl Future<Output = Result<(), Error>> + 'static> {
if self.destroyed {
@@ -414,24 +435,6 @@ impl PersistentContainer {
self.destroyed = true;
Some(async move {
let mut errs = ErrorCollection::new();
if error {
if let Some(lxc_container) = &lxc_container {
if let Some(logs) = errs.handle(
crate::logs::fetch_logs(
crate::logs::LogSource::Container(lxc_container.guid.deref().clone()),
Some(50),
None,
None,
false,
)
.await,
) {
for log in logs.entries.iter() {
eprintln!("{log}");
}
}
}
}
if let Some((hdl, shutdown)) = rpc_server {
errs.handle(
rpc_client
@@ -466,7 +469,7 @@ impl PersistentContainer {
#[instrument(skip_all)]
pub async fn exit(mut self, uninit: Option<ExitParams>) -> Result<(), Error> {
if let Some(destroy) = self.destroy(false, uninit) {
if let Some(destroy) = self.destroy(uninit) {
destroy.await?;
}
tracing::info!("Service for {} exited", self.s9pk.as_manifest().id);
@@ -584,7 +587,7 @@ impl PersistentContainer {
impl Drop for PersistentContainer {
fn drop(&mut self) {
if let Some(destroy) = self.destroy(true, None) {
if let Some(destroy) = self.destroy(None) {
tokio::spawn(async move { destroy.await.log_err() });
}
}

View File

@@ -36,7 +36,26 @@ impl ServiceActorSeed {
pub fn stop(&self) -> Transition<'_> {
Transition {
kind: TransitionKind::Stopping,
future: self.persistent_container.stop().boxed(),
future: async {
self.persistent_container.stop().await?;
let id = &self.id;
self.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_status_info_mut()
.as_started_mut()
.ser(&None)
})
.await
.result?;
Ok(())
}
.boxed(),
}
}
}

View File

@@ -3,6 +3,7 @@ use std::path::Path;
use models::PackageId;
use crate::context::RpcContext;
use crate::db::model::package::{InstalledState, InstallingInfo, InstallingState, PackageState};
use crate::prelude::*;
use crate::volume::PKG_VOLUME_DIR;
use crate::{DATA_DIR, PACKAGE_DATA};
@@ -43,18 +44,37 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(),
.await
.result?
{
let state = pde.state_info.expect_removing()?;
let manifest = match pde.state_info {
PackageState::Installing(InstallingState {
installing_info:
InstallingInfo {
new_manifest: manifest,
..
},
})
| PackageState::Restoring(InstallingState {
installing_info:
InstallingInfo {
new_manifest: manifest,
..
},
})
| PackageState::Removing(InstalledState { manifest }) => manifest,
s => {
return Err(Error::new(
eyre!("Invalid package state for cleanup: {s:?}"),
ErrorKind::InvalidRequest,
));
}
};
if !soft {
let path = Path::new(DATA_DIR)
.join(PKG_VOLUME_DIR)
.join(&state.manifest.id);
let path = Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(&manifest.id);
if tokio::fs::metadata(&path).await.is_ok() {
tokio::fs::remove_dir_all(&path).await?;
}
let logs_dir = Path::new(PACKAGE_DATA)
.join("logs")
.join(&state.manifest.id);
let logs_dir = Path::new(PACKAGE_DATA).join("logs").join(&manifest.id);
if tokio::fs::metadata(&logs_dir).await.is_ok() {
#[cfg(not(feature = "dev"))]
tokio::fs::remove_dir_all(&logs_dir).await?;
}
}

View File

@@ -33,6 +33,23 @@ impl Model<StatusInfo> {
self.as_health_mut().ser(&Default::default())?;
Ok(())
}
pub fn init(&mut self) -> Result<(), Error> {
self.as_started_mut().ser(&None)?;
self.as_desired_mut().map_mutate(|s| {
Ok(match s {
DesiredStatus::BackingUp {
on_complete: StartStop::Start,
} => DesiredStatus::Running,
DesiredStatus::BackingUp {
on_complete: StartStop::Stop,
} => DesiredStatus::Stopped,
DesiredStatus::Restarting => DesiredStatus::Running,
x => x,
})
})?;
Ok(())
}
}
#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, TS)]

View File

@@ -1,7 +1,7 @@
# StartTunnel config for {name}
[Interface]
Address = {addr}/24
Address = {addr}
PrivateKey = {privkey}
[Peer]
@@ -9,4 +9,4 @@ PublicKey = {server_pubkey}
PresharedKey = {psk}
AllowedIPs = {subnet}
Endpoint = {server_addr}
PersistentKeepalive = 25
PersistentKeepalive = 25

View File

@@ -2,13 +2,14 @@ use std::collections::{BTreeMap, BTreeSet};
use std::net::{IpAddr, SocketAddr, SocketAddrV4};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::sync::{Arc, OnceLock};
use clap::Parser;
use cookie::{Cookie, Expiration, SameSite};
use http::HeaderMap;
use imbl::OrdMap;
use imbl_value::InternedString;
use include_dir::Dir;
use models::GatewayId;
use patch_db::PatchDb;
use rpc_toolkit::yajrc::RpcError;
@@ -23,11 +24,10 @@ use crate::auth::Sessions;
use crate::context::config::ContextConfig;
use crate::context::{CliContext, RpcContext};
use crate::db::model::public::{NetworkInterfaceInfo, NetworkInterfaceType};
use crate::else_empty_dir;
use crate::middleware::auth::{Auth, AuthContext};
use crate::middleware::cors::Cors;
use crate::net::forward::PortForwardController;
use crate::net::static_server::UiContext;
use crate::net::static_server::{EMPTY_DIR, UiContext};
use crate::prelude::*;
use crate::rpc_continuations::{OpenAuthedContinuations, RpcContinuations};
use crate::tunnel::TUNNEL_DEFAULT_LISTEN;
@@ -321,11 +321,12 @@ impl CallRemote<TunnelContext, TunnelUrlParams> for RpcContext {
}
}
pub static TUNNEL_UI_CELL: OnceLock<Dir<'static>> = OnceLock::new();
impl UiContext for TunnelContext {
const UI_DIR: &'static include_dir::Dir<'static> = &else_empty_dir!(
feature = "tunnel" =>
include_dir::include_dir!("$CARGO_MANIFEST_DIR/../../web/dist/static/start-tunnel")
);
fn ui_dir() -> &'static Dir<'static> {
TUNNEL_UI_CELL.get().unwrap_or(&EMPTY_DIR)
}
fn api() -> ParentHandler<Self> {
tracing::info!("loading tunnel api...");
tunnel_api()

View File

@@ -27,7 +27,7 @@ use crate::tunnel::auth::SetPasswordParams;
use crate::tunnel::context::TunnelContext;
use crate::tunnel::db::TunnelDatabase;
use crate::util::serde::{HandlerExtSerde, Pem, display_serializable};
use crate::util::tui::{choose, choose_custom_display, parse_as, prompt, prompt_multiline};
use crate::util::tui::{choose, parse_as, prompt, prompt_multiline};
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]

View File

@@ -228,8 +228,11 @@ impl std::fmt::Display for ClientConfig {
name = self.client_config.name,
privkey = self.client_config.key.to_padded_string(),
psk = self.client_config.psk.to_padded_string(),
addr = self.client_addr,
subnet = self.subnet,
addr = Ipv4Net::new_assert(
self.client_addr,
self.subnet.prefix_len()
),
subnet = self.subnet.trunc(),
server_pubkey = self.server_pubkey.to_padded_string(),
server_addr = self.server_addr,
)

View File

@@ -92,7 +92,6 @@ impl<A: Actor + Clone> Future for ConcurrentRunner<A> {
#[allow(clippy::let_underscore_future)]
let (_, _, f, reply, _) = this.handlers.swap_remove(idx);
reply.send(res).ok();
// TODO: replace with Vec::extract_if once stable
if this.shutdown.is_some() {
let mut i = 0;
while i < this.waiting.len() {

View File

@@ -1477,14 +1477,16 @@ impl<T: std::io::Read> std::io::Read for SharedIO<T> {
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
pub struct TermSize {
pub size: (u16, u16),
pub pixels: Option<(u16, u16)>,
pub rows: u16,
pub cols: u16,
pub pixels: Option<(u16, u16)>, // x, y
}
impl TermSize {
pub fn get_current() -> Option<Self> {
if let Some(size) = termion::terminal_size().ok() {
if let Some((cols, rows)) = termion::terminal_size().log_err() {
Some(Self {
size,
rows,
cols,
pixels: termion::terminal_size_pixels().ok(),
})
} else {
@@ -1497,9 +1499,8 @@ impl FromStr for TermSize {
fn from_str(s: &str) -> Result<Self, Self::Err> {
(|| {
let mut split = s.split(":");
let row: u16 = split.next()?.parse().ok()?;
let col: u16 = split.next()?.parse().ok()?;
let size = (row, col);
let rows: u16 = split.next()?.parse().ok()?;
let cols: u16 = split.next()?.parse().ok()?;
let pixels = if let Some(x) = split.next() {
let x: u16 = x.parse().ok()?;
let y: u16 = split.next()?.parse().ok()?;
@@ -1508,14 +1509,14 @@ impl FromStr for TermSize {
None
};
Some(Self { size, pixels }).filter(|_| split.next().is_none())
Some(Self { rows, cols, pixels }).filter(|_| split.next().is_none())
})()
.ok_or_else(|| Error::new(eyre!("invalid pty size"), ErrorKind::ParseNumber))
}
}
impl std::fmt::Display for TermSize {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}:{}", self.size.0, self.size.1)?;
write!(f, "{}:{}", self.rows, self.cols)?;
if let Some(pixels) = self.pixels {
write!(f, ":{}:{}", pixels.0, pixels.1)?;
}

View File

@@ -28,7 +28,7 @@ impl VersionT for Version {
&V0_3_0_COMPAT
}
#[instrument(skip_all)]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {

View File

@@ -3,7 +3,7 @@ Description=StartOS Registry
[Service]
Type=simple
Environment=RUST_LOG=startos=debug,patch_db=warn
Environment=RUST_LOG=startos=debug,patch_db=warn,models=debug
ExecStart=/usr/bin/start-registryd
Restart=always
RestartSec=3

View File

@@ -3,7 +3,7 @@ Description=StartTunnel
[Service]
Type=simple
Environment=RUST_LOG=startos=debug,patch_db=warn
Environment=RUST_LOG=startos=debug,patch_db=warn,models=debug
ExecStart=/usr/bin/start-tunneld
Restart=always
RestartSec=3

View File

@@ -3,7 +3,7 @@ Description=StartOS Daemon
[Service]
Type=simple
Environment=RUST_LOG=startos=debug,patch_db=warn
Environment=RUST_LOG=startos=debug,patch_db=warn,models=debug
ExecStart=/usr/bin/startd
Restart=always
RestartSec=3