with todos

This commit is contained in:
Aiden McClelland
2025-08-15 16:07:23 -06:00
parent bfc88a2225
commit 8f573386c6
67 changed files with 4084 additions and 9988 deletions

3
.gitignore vendored
View File

@@ -1,8 +1,5 @@
.DS_Store
.idea
system-images/binfmt/binfmt.tar
system-images/compat/compat.tar
system-images/util/util.tar
/*.img
/*.img.gz
/*.img.xz

View File

@@ -1,3 +1,5 @@
ls-files = $(shell git ls-files --cached --others --exclude-standard $1)
PLATFORM_FILE := $(shell ./check-platform.sh)
ENVIRONMENT_FILE := $(shell ./check-environment.sh)
GIT_HASH_FILE := $(shell ./check-git-hash.sh)
@@ -9,22 +11,19 @@ IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html web/dist/raw/install-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html web/dist/static/install-wizard/index.html
FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
DEBIAN_SRC := $(shell git ls-files debian/)
IMAGE_RECIPE_SRC := $(shell git ls-files image-recipe/)
BUILD_SRC := $(call ls-files, build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
DEBIAN_SRC := $(call ls-files, debian/)
IMAGE_RECIPE_SRC := $(call ls-files, image-recipe/)
STARTD_SRC := core/startos/startd.service $(BUILD_SRC)
COMPAT_SRC := $(shell git ls-files system-images/compat/)
UTILS_SRC := $(shell git ls-files system-images/utils/)
BINFMT_SRC := $(shell git ls-files system-images/binfmt/)
CORE_SRC := $(shell git ls-files core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(shell git ls-files web/projects/shared) $(shell git ls-files web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
WEB_UI_SRC := $(shell git ls-files web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(shell git ls-files web/projects/setup-wizard)
WEB_INSTALL_WIZARD_SRC := $(shell git ls-files web/projects/install-wizard)
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(call ls-files, web/projects/shared) $(call ls-files, web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
WEB_UI_SRC := $(call ls-files, web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(call ls-files, web/projects/setup-wizard)
WEB_INSTALL_WIZARD_SRC := $(call ls-files, web/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip)
TAR_BIN := $(shell which gtar || which tar)
COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar container-runtime/rootfs.$(ARCH).squashfs
COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox container-runtime/rootfs.$(ARCH).squashfs
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; fi') $(PLATFORM_FILE)
REBUILD_TYPES = 1
@@ -59,8 +58,6 @@ touch:
metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
clean:
rm -f system-images/**/*.tar
rm -rf system-images/compat/target
rm -rf core/target
rm -rf core/startos/bindings
rm -rf web/.angular
@@ -95,10 +92,10 @@ test: | test-core test-sdk test-container-runtime
test-core: $(CORE_SRC) $(ENVIRONMENT_FILE)
./core/run-tests.sh
test-sdk: $(shell git ls-files sdk) sdk/base/lib/osBindings/index.ts
test-sdk: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
cd sdk && make test
test-container-runtime: container-runtime/node_modules/.package-lock.json $(shell git ls-files container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
cd container-runtime && npm test
cli:
@@ -151,10 +148,6 @@ install: $(ALL_TARGETS)
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
$(call mkdir,$(DESTDIR)/usr/lib/startos/system-images)
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/compat.tar)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/utils.tar)
$(call cp,firmware/$(PLATFORM),$(DESTDIR)/usr/lib/startos/firmware)
update-overlay: $(ALL_TARGETS)
@@ -237,20 +230,20 @@ sdk/base/lib/osBindings/index.ts: $(shell if [ "$(REBUILD_TYPES)" -ne 0 ]; then
rsync -ac --delete core/startos/bindings/ sdk/base/lib/osBindings/
touch sdk/base/lib/osBindings/index.ts
core/startos/bindings/index.ts: $(shell git ls-files core) $(ENVIRONMENT_FILE)
core/startos/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
rm -rf core/startos/bindings
./core/build-ts.sh
ls core/startos/bindings/*.ts | sed 's/core\/startos\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/startos/bindings/index.ts
npm --prefix sdk exec -- prettier --config ./sdk/base/package.json -w ./core/startos/bindings/*.ts
touch core/startos/bindings/index.ts
sdk/dist/package.json sdk/baseDist/package.json: $(shell git ls-files sdk) sdk/base/lib/osBindings/index.ts
sdk/dist/package.json sdk/baseDist/package.json: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
(cd sdk && make bundle)
touch sdk/dist/package.json
touch sdk/baseDist/package.json
# TODO: make container-runtime its own makefile?
container-runtime/dist/index.js: container-runtime/node_modules/.package-lock.json $(shell git ls-files container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
container-runtime/dist/index.js: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
npm --prefix container-runtime run build
container-runtime/dist/node_modules/.package-lock.json container-runtime/dist/package.json container-runtime/dist/package-lock.json: container-runtime/package.json container-runtime/package-lock.json sdk/dist/package.json container-runtime/install-dist-deps.sh
@@ -266,15 +259,6 @@ build/lib/depends build/lib/conflicts: build/dpkg-deps/*
$(FIRMWARE_ROMS): build/lib/firmware.json download-firmware.sh $(PLATFORM_FILE)
./download-firmware.sh $(PLATFORM)
system-images/compat/docker-images/$(ARCH).tar: $(COMPAT_SRC)
cd system-images/compat && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
system-images/utils/docker-images/$(ARCH).tar: $(UTILS_SRC)
cd system-images/utils && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
system-images/binfmt/docker-images/$(ARCH).tar: $(BINFMT_SRC)
cd system-images/binfmt && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
core/target/$(ARCH)-unknown-linux-musl/release/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build-startbox.sh
touch core/target/$(ARCH)-unknown-linux-musl/release/startbox

3218
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -6,6 +6,7 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
arti-client = { version = "0.33", features = ["full"] }
axum = "0.8.4"
base64 = "0.22.1"
color-eyre = "0.6.2"
@@ -29,16 +30,10 @@ rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch =
rustls = "0.23"
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
sqlx = { version = "0.8.6", features = [
"chrono",
"runtime-tokio-rustls",
"postgres",
] }
ssh-key = "0.6.2"
ts-rs = { git = "https://github.com/dr-bonez/ts-rs.git", branch = "feature/top-level-as" } # "8"
thiserror = "2.0"
tokio = { version = "1", features = ["full"] }
torut = { git = "https://github.com/Start9Labs/torut.git", branch = "update/dependencies" }
tracing = "0.1.39"
yasi = "0.1.5"
zbus = "5"

View File

@@ -288,11 +288,6 @@ impl From<patch_db::Error> for Error {
Error::new(e, ErrorKind::Database)
}
}
impl From<sqlx::Error> for Error {
fn from(e: sqlx::Error) -> Self {
Error::new(e, ErrorKind::Database)
}
}
impl From<ed25519_dalek::SignatureError> for Error {
fn from(e: ed25519_dalek::SignatureError) -> Self {
Error::new(e, ErrorKind::InvalidSignature)
@@ -303,11 +298,6 @@ impl From<std::net::AddrParseError> for Error {
Error::new(e, ErrorKind::ParseNetAddress)
}
}
impl From<torut::control::ConnError> for Error {
fn from(e: torut::control::ConnError) -> Self {
Error::new(e, ErrorKind::Tor)
}
}
impl From<ipnet::AddrParseError> for Error {
fn from(e: ipnet::AddrParseError) -> Self {
Error::new(e, ErrorKind::ParseNetAddress)
@@ -353,8 +343,8 @@ impl From<reqwest::Error> for Error {
Error::new(e, kind)
}
}
impl From<torut::onion::OnionAddressParseError> for Error {
fn from(e: torut::onion::OnionAddressParseError) -> Self {
impl From<arti_client::Error> for Error {
fn from(e: arti_client::Error) -> Self {
Error::new(e, ErrorKind::Tor)
}
}

View File

@@ -60,20 +60,3 @@ impl AsRef<Path> for HostId {
self.0.as_ref().as_ref()
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for HostId {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::Database>::ArgumentBuffer<'q>,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
<&str as sqlx::Encode<'q, sqlx::Postgres>>::encode_by_ref(&&**self, buf)
}
}
impl sqlx::Type<sqlx::Postgres> for HostId {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<&str as sqlx::Type<sqlx::Postgres>>::type_info()
}
fn compatible(ty: &sqlx::postgres::PgTypeInfo) -> bool {
<&str as sqlx::Type<sqlx::Postgres>>::compatible(ty)
}
}

View File

@@ -118,20 +118,3 @@ impl Serialize for Id {
serializer.serialize_str(self)
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for Id {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::Database>::ArgumentBuffer<'q>,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
<&str as sqlx::Encode<'q, sqlx::Postgres>>::encode_by_ref(&&**self, buf)
}
}
impl sqlx::Type<sqlx::Postgres> for Id {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<&str as sqlx::Type<sqlx::Postgres>>::type_info()
}
fn compatible(ty: &sqlx::postgres::PgTypeInfo) -> bool {
<&str as sqlx::Type<sqlx::Postgres>>::compatible(ty)
}
}

View File

@@ -87,20 +87,3 @@ impl Serialize for PackageId {
Serialize::serialize(&self.0, serializer)
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for PackageId {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::Database>::ArgumentBuffer<'q>,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
<&str as sqlx::Encode<'q, sqlx::Postgres>>::encode_by_ref(&&**self, buf)
}
}
impl sqlx::Type<sqlx::Postgres> for PackageId {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<&str as sqlx::Type<sqlx::Postgres>>::type_info()
}
fn compatible(ty: &sqlx::postgres::PgTypeInfo) -> bool {
<&str as sqlx::Type<sqlx::Postgres>>::compatible(ty)
}
}

View File

@@ -44,23 +44,6 @@ impl AsRef<Path> for ServiceInterfaceId {
self.0.as_ref().as_ref()
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for ServiceInterfaceId {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::Database>::ArgumentBuffer<'q>,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
<&str as sqlx::Encode<'q, sqlx::Postgres>>::encode_by_ref(&&**self, buf)
}
}
impl sqlx::Type<sqlx::Postgres> for ServiceInterfaceId {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<&str as sqlx::Type<sqlx::Postgres>>::type_info()
}
fn compatible(ty: &sqlx::postgres::PgTypeInfo) -> bool {
<&str as sqlx::Type<sqlx::Postgres>>::compatible(ty)
}
}
impl FromStr for ServiceInterfaceId {
type Err = <Id as FromStr>::Err;
fn from_str(s: &str) -> Result<Self, Self::Err> {

View File

@@ -2,7 +2,7 @@
authors = ["Aiden McClelland <me@drbonez.dev>"]
description = "The core of StartOS"
documentation = "https://docs.rs/start-os"
edition = "2021"
edition = "2024"
keywords = [
"self-hosted",
"raspberry-pi",
@@ -57,6 +57,13 @@ tunnel = []
unstable = ["console-subscriber", "tokio/tracing"]
[dependencies]
arti-client = { version = "0.33", features = [
"compression",
"experimental-api",
"rustls",
"static",
"tokio",
], default-features = false }
aes = { version = "0.7.5", features = ["ctr"] }
async-acme = { version = "0.6.0", git = "https://github.com/dr-bonez/async-acme.git", features = [
"use_rustls",
@@ -89,6 +96,7 @@ cookie_store = "0.21.0"
der = { version = "0.7.9", features = ["derive", "pem"] }
digest = "0.10.7"
divrem = "1.0.0"
dns-lookup = "2.1.0"
ed25519 = { version = "2.2.3", features = ["pkcs8", "pem", "alloc"] }
ed25519-dalek = { version = "2.2.0", features = [
"serde",
@@ -107,6 +115,8 @@ futures = "0.3.28"
gpt = "4.1.0"
helpers = { path = "../helpers" }
hex = "0.4.3"
hickory-client = "0.25.2"
hickory-server = "0.25.2"
hmac = "0.12.1"
http = "1.0.0"
http-body-util = "0.1"
@@ -124,10 +134,7 @@ id-pool = { version = "0.2.2", default-features = false, features = [
"serde",
"u16",
] }
imbl = { version = "6", git = "https://github.com/dr-bonez/imbl.git", branch = "bugfix/ordmap-lifetimes", features = [
"serde",
"small-chunks",
] }
imbl = { version = "6", features = ["serde", "small-chunks"] }
imbl-value = "0.4.0"
include_dir = { version = "0.7.3", features = ["metadata"] }
indexmap = { version = "2.0.2", features = ["serde"] }
@@ -186,6 +193,7 @@ rpassword = "7.2.0"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "master" }
rust-argon2 = "2.0.0"
rustyline-async = "0.4.1"
safelog = "0.4.8"
semver = { version = "1.0.20", features = ["serde"] }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.1" }
@@ -199,12 +207,11 @@ sha2 = "0.10.2"
shell-words = "1"
signal-hook = "0.3.17"
simple-logging = "2.0.2"
socket2 = "0.5.7"
socket2 = { version = "0.6.0", features = ["all"] }
sqlx = { version = "0.8.6", features = [
"chrono",
"runtime-tokio-rustls",
"postgres",
] }
], default-features = false }
sscanf = "0.4.1"
ssh-key = { version = "0.6.2", features = ["ed25519"] }
tar = "0.4.40"
@@ -218,17 +225,17 @@ tokio-stream = { version = "0.1.14", features = ["io-util", "sync", "net"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.26.2", features = ["native-tls", "url"] }
tokio-util = { version = "0.7.9", features = ["io"] }
torut = { git = "https://github.com/Start9Labs/torut.git", branch = "update/dependencies", features = [
"serialize",
] }
tor-hscrypto = { version = "0.33", features = ["full"] }
tor-hsservice = { version = "0.33" }
tor-keymgr = { version = "0.33", features = ["ephemeral-keystore"] }
tor-llcrypto = { version = "0.33", features = ["full"] }
tor-rtcompat = { version = "0.33", features = ["tokio", "rustls"] }
tower-service = "0.3.3"
tracing = "0.1.39"
tracing-error = "0.2.0"
tracing-futures = "0.2.5"
tracing-journald = "0.3.0"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
trust-dns-server = "0.23.2"
trust-dns-client = "0.23.2"
ts-rs = { git = "https://github.com/dr-bonez/ts-rs.git", branch = "feature/top-level-as" } # "8.1.0"
typed-builder = "0.21.0"
unix-named-pipe = "0.2.0"

View File

@@ -3,11 +3,11 @@ use std::time::SystemTime;
use imbl_value::InternedString;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use torut::onion::TorSecretKeyV3;
use crate::db::model::DatabaseModel;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::ssl::{generate_key, make_root_cert};
use crate::net::tor::TorSecretKey;
use crate::prelude::*;
use crate::util::serde::Pem;
@@ -20,12 +20,12 @@ fn hash_password(password: &str) -> Result<String, Error> {
.with_kind(crate::ErrorKind::PasswordHashGeneration)
}
#[derive(Debug, Clone)]
#[derive(Clone)]
pub struct AccountInfo {
pub server_id: String,
pub hostname: Hostname,
pub password: String,
pub tor_keys: Vec<TorSecretKeyV3>,
pub tor_keys: Vec<TorSecretKey>,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
pub ssh_key: ssh_key::PrivateKey,
@@ -35,7 +35,7 @@ impl AccountInfo {
pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> {
let server_id = generate_id();
let hostname = generate_hostname();
let tor_key = vec![TorSecretKeyV3::generate()];
let tor_key = vec![TorSecretKey::generate()];
let root_ca_key = generate_key()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?;
let ssh_key = ssh_key::PrivateKey::from(ssh_key::private::Ed25519Keypair::random(
@@ -104,7 +104,7 @@ impl AccountInfo {
&self
.tor_keys
.iter()
.map(|tor_key| tor_key.public().get_onion_address())
.map(|tor_key| tor_key.onion_address())
.collect(),
)?;
db.as_private_mut().as_password_mut().ser(&self.password)?;
@@ -142,7 +142,7 @@ impl AccountInfo {
.chain(
self.tor_keys
.iter()
.map(|k| InternedString::from_display(&k.public().get_onion_address())),
.map(|k| InternedString::from_display(&k.onion_address())),
)
}
}

View File

@@ -4,10 +4,10 @@ use openssl::x509::X509;
use patch_db::Value;
use serde::{Deserialize, Serialize};
use ssh_key::private::Ed25519Keypair;
use torut::onion::TorSecretKeyV3;
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::tor::TorSecretKey;
use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
use crate::util::serde::{Base32, Base64, Pem};
@@ -85,7 +85,10 @@ impl OsBackupV0 {
&mut ssh_key::rand_core::OsRng::default(),
ssh_key::Algorithm::Ed25519,
)?,
tor_keys: vec![TorSecretKeyV3::from(self.tor_key.0)],
tor_keys: TorSecretKey::from_bytes(self.tor_key.0)
.ok()
.into_iter()
.collect(),
developer_key: ed25519_dalek::SigningKey::generate(
&mut ssh_key::rand_core::OsRng::default(),
),
@@ -116,7 +119,10 @@ impl OsBackupV1 {
root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0,
ssh_key: ssh_key::PrivateKey::from(Ed25519Keypair::from_seed(&self.net_key.0)),
tor_keys: vec![TorSecretKeyV3::from(ed25519_expand_key(&self.net_key.0))],
tor_keys: TorSecretKey::from_bytes(ed25519_expand_key(&self.net_key.0))
.ok()
.into_iter()
.collect(),
developer_key: ed25519_dalek::SigningKey::from_bytes(&self.net_key),
},
ui: self.ui,
@@ -134,7 +140,7 @@ struct OsBackupV2 {
root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key
root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate
ssh_key: Pem<ssh_key::PrivateKey>, // PEM Encoded OpenSSH Key
tor_keys: Vec<TorSecretKeyV3>, // Base64 Encoded Ed25519 Expanded Secret Key
tor_keys: Vec<TorSecretKey>, // Base64 Encoded Ed25519 Expanded Secret Key
compat_s9pk_key: Pem<ed25519_dalek::SigningKey>, // PEM Encoded ED25519 Key
ui: Value, // JSON Value
}

View File

@@ -131,7 +131,11 @@ impl CliContext {
tunnel_listen: config.tunnel_listen,
client: {
let mut builder = Client::builder().cookie_provider(cookie_store.clone());
if let Some(proxy) = config.proxy {
if let Some(proxy) = config.proxy.or_else(|| {
config
.socks_listen
.and_then(|socks| format!("socks5h://{socks}").parse::<Url>().log_err())
}) {
builder =
builder.proxy(Proxy::all(proxy).with_kind(crate::ErrorKind::ParseUrl)?)
}

View File

@@ -74,6 +74,8 @@ pub struct ClientConfig {
pub tunnel_listen: Option<SocketAddr>,
#[arg(short = 'p', long)]
pub proxy: Option<Url>,
#[arg(skip)]
pub socks_listen: Option<SocketAddr>,
#[arg(long)]
pub cookie_path: Option<PathBuf>,
#[arg(long)]
@@ -114,9 +116,7 @@ pub struct ServerConfig {
#[arg(skip)]
pub os_partitions: Option<OsPartitionInfo>,
#[arg(long)]
pub tor_control: Option<SocketAddr>,
#[arg(long)]
pub tor_socks: Option<SocketAddr>,
pub socks_listen: Option<SocketAddr>,
#[arg(long)]
pub revision_cache_size: Option<usize>,
#[arg(long)]
@@ -133,8 +133,7 @@ impl ContextConfig for ServerConfig {
fn merge_with(&mut self, other: Self) {
self.ethernet_interface = self.ethernet_interface.take().or(other.ethernet_interface);
self.os_partitions = self.os_partitions.take().or(other.os_partitions);
self.tor_control = self.tor_control.take().or(other.tor_control);
self.tor_socks = self.tor_socks.take().or(other.tor_socks);
self.socks_listen = self.socks_listen.take().or(other.socks_listen);
self.revision_cache_size = self
.revision_cache_size
.take()

View File

@@ -65,7 +65,6 @@ pub struct RpcContextSeed {
pub cancellable_installs: SyncMutex<BTreeMap<PackageId, oneshot::Sender<()>>>,
pub metrics_cache: Watch<Option<crate::system::Metrics>>,
pub shutdown: broadcast::Sender<Option<Shutdown>>,
pub tor_socks: SocketAddr,
pub lxc_manager: Arc<LxcManager>,
pub open_authed_continuations: OpenAuthedContinuations<Option<InternedString>>,
pub rpc_continuations: RpcContinuations,
@@ -138,7 +137,9 @@ impl RpcContext {
run_migrations,
}: InitRpcContextPhases,
) -> Result<Self, Error> {
let tor_proxy = config.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
let socks_proxy = config
.socks_listen
.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(127, 0, 0, 1),
9050,
)));
@@ -163,17 +164,7 @@ impl RpcContext {
{
(net_ctrl, os_net_service)
} else {
let net_ctrl = Arc::new(
NetController::init(
db.clone(),
config
.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
&account.hostname,
)
.await?,
);
let net_ctrl = Arc::new(NetController::init(db.clone(), &account.hostname).await?);
webserver.try_upgrade(|a| net_ctrl.net_iface.watcher.upgrade_listener(a))?;
let os_net_service = net_ctrl.os_bindings().await?;
(net_ctrl, os_net_service)
@@ -183,7 +174,7 @@ impl RpcContext {
let services = ServiceMap::default();
let metrics_cache = Watch::<Option<crate::system::Metrics>>::new(None);
let tor_proxy_url = format!("socks5h://{tor_proxy}");
let socks_proxy_url = format!("socks5h://{socks_proxy}");
let crons = SyncMutex::new(BTreeMap::new());
@@ -251,7 +242,6 @@ impl RpcContext {
cancellable_installs: SyncMutex::new(BTreeMap::new()),
metrics_cache,
shutdown,
tor_socks: tor_proxy,
lxc_manager: Arc::new(LxcManager::new()),
open_authed_continuations: OpenAuthedContinuations::new(),
rpc_continuations: RpcContinuations::new(),
@@ -267,13 +257,7 @@ impl RpcContext {
})?,
),
client: Client::builder()
.proxy(Proxy::custom(move |url| {
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {
Some(tor_proxy_url.clone())
} else {
None
}
}))
.proxy(Proxy::all(socks_proxy_url)?)
.build()
.with_kind(crate::ErrorKind::ParseUrl)?,
start_time: Instant::now(),

View File

@@ -55,7 +55,7 @@ impl TryFrom<&AccountInfo> for SetupResult {
tor_addresses: value
.tor_keys
.iter()
.map(|tor_key| format!("https://{}", tor_key.public().get_onion_address()))
.map(|tor_key| format!("https://{}", tor_key.onion_address()))
.collect(),
hostname: value.hostname.clone(),
lan_address: value.hostname.lan_address(),

View File

@@ -18,8 +18,9 @@ use ts_rs::TS;
use crate::account::AccountInfo;
use crate::db::model::package::AllPackageData;
use crate::net::acme::AcmeProvider;
use crate::net::forward::START9_BRIDGE_IFACE;
use crate::net::host::binding::{AddSslOptions, BindInfo, BindOptions, NetInfo};
use crate::net::host::Host;
use crate::net::host::{Domains, Host};
use crate::net::utils::ipv6_is_local;
use crate::net::vhost::AlpnInfo;
use crate::prelude::*;
@@ -29,7 +30,7 @@ use crate::util::cpupower::Governor;
use crate::util::lshw::LshwDevice;
use crate::util::serde::MaybeUtf8String;
use crate::version::{Current, VersionT};
use crate::{ARCH, PLATFORM};
use crate::{ARCH, HOST_IP, PLATFORM};
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
@@ -80,12 +81,8 @@ impl Public {
)]
.into_iter()
.collect(),
onions: account
.tor_keys
.iter()
.map(|k| k.public().get_onion_address())
.collect(),
domains: BTreeMap::new(),
onions: account.tor_keys.iter().map(|k| k.onion_address()).collect(),
domains: Domains::default(),
hostname_info: BTreeMap::new(),
},
wifi: WifiInfo {
@@ -95,6 +92,7 @@ impl Public {
gateways: OrdMap::new(),
acme: BTreeMap::new(),
domains: BTreeMap::new(),
dns: Default::default(),
},
status_info: ServerStatus {
backup_progress: None,
@@ -198,6 +196,18 @@ pub struct NetworkInfo {
#[serde(default)]
#[ts(as = "BTreeMap::<String, DomainSettings>")]
pub domains: BTreeMap<InternedString, DomainSettings>,
#[serde(default)]
pub dns: DnsSettings,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct DnsSettings {
pub dhcp: Vec<IpAddr>,
#[serde(rename = "static")]
pub static_servers: Option<Vec<IpAddr>>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel, TS)]
@@ -226,13 +236,42 @@ impl NetworkInterfaceInfo {
]
.into_iter()
.collect(),
lan_ip: [
IpAddr::from(Ipv4Addr::LOCALHOST),
IpAddr::from(Ipv6Addr::LOCALHOST)
]
.into_iter()
.collect(),
wan_ip: None,
ntp_servers: Default::default(),
dns_servers: Default::default(),
}),
};
}
(&*LO, &*LOOPBACK)
}
pub fn lxc_bridge() -> (&'static GatewayId, &'static Self) {
lazy_static! {
static ref LXCBR0: GatewayId = GatewayId::from(START9_BRIDGE_IFACE);
static ref LXC_BRIDGE: NetworkInterfaceInfo = NetworkInterfaceInfo {
public: Some(false),
secure: Some(true),
ip_info: Some(IpInfo {
name: START9_BRIDGE_IFACE.into(),
scope_id: 0,
device_type: None,
subnets: [IpNet::new(HOST_IP.into(), 24).unwrap()]
.into_iter()
.collect(),
lan_ip: [IpAddr::from(HOST_IP)].into_iter().collect(),
wan_ip: None,
ntp_servers: Default::default(),
dns_servers: Default::default(),
}),
};
}
(&*LXCBR0, &*LXC_BRIDGE)
}
pub fn public(&self) -> bool {
self.public.unwrap_or_else(|| {
!self.ip_info.as_ref().map_or(true, |ip_info| {
@@ -285,9 +324,13 @@ pub struct IpInfo {
pub device_type: Option<NetworkInterfaceType>,
#[ts(type = "string[]")]
pub subnets: OrdSet<IpNet>,
#[ts(type = "string[]")]
pub lan_ip: OrdSet<IpAddr>,
pub wan_ip: Option<Ipv4Addr>,
#[ts(type = "string[]")]
pub ntp_servers: OrdSet<InternedString>,
#[ts(type = "string[]")]
pub dns_servers: OrdSet<IpAddr>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, TS)]

View File

@@ -96,14 +96,13 @@ fn display_disk_info(params: WithIoFormat<Empty>, args: Vec<DiskInfo>) -> Result
"N/A"
},
part.capacity,
if let Some(used) = part
&if let Some(used) = part
.used
.map(|u| format!("{:.2} GiB", u as f64 / 1024.0 / 1024.0 / 1024.0))
.as_ref()
{
used
} else {
"N/A"
"N/A".to_owned()
},
&if part.start_os.is_empty() {
"N/A".to_owned()

View File

@@ -1,5 +1,4 @@
use std::io::Cursor;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::path::Path;
use std::sync::Arc;
use std::time::{Duration, SystemTime};
@@ -203,19 +202,7 @@ pub async fn init(
let account = AccountInfo::load(&peek)?;
start_net.start();
let net_ctrl = Arc::new(
NetController::init(
db.clone(),
cfg.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
cfg.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(127, 0, 0, 1),
9050,
))),
&account.hostname,
)
.await?,
);
let net_ctrl = Arc::new(NetController::init(db.clone(), &account.hostname).await?);
webserver.try_upgrade(|a| net_ctrl.net_iface.watcher.upgrade_listener(a))?;
let os_net_service = net_ctrl.os_bindings().await?;
start_net.complete();

View File

@@ -89,10 +89,22 @@ impl SignatureAuthContext for RpcContext {
.as_network()
.as_host()
.as_domains()
.as_public()
.keys()
.map(|k| k.into_iter())
.transpose(),
)
.chain(
peek.as_public()
.as_server_info()
.as_network()
.as_host()
.as_domains()
.as_private()
.de()
.map(|k| k.into_iter())
.transpose(),
)
.collect::<Vec<_>>()
}
fn check_pubkey(

View File

@@ -1,42 +1,150 @@
use std::borrow::Borrow;
use std::collections::BTreeMap;
use std::future::Future;
use std::net::Ipv4Addr;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr};
use std::sync::{Arc, Weak};
use std::time::Duration;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::{FutureExt, TryStreamExt};
use helpers::NonDetachingJoinHandle;
use models::PackageId;
use hickory_client::client::Client;
use hickory_client::proto::runtime::TokioRuntimeProvider;
use hickory_client::proto::udp::UdpClientStream;
use hickory_client::proto::xfer::DnsRequestOptions;
use hickory_client::proto::DnsHandle;
use hickory_server::authority::MessageResponseBuilder;
use hickory_server::proto::op::{Header, ResponseCode};
use hickory_server::proto::rr::{Name, Record, RecordType};
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use hickory_server::ServerFuture;
use imbl::OrdMap;
use imbl_value::InternedString;
use models::{GatewayId, PackageId};
use rpc_toolkit::{from_fn_blocking, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::net::{TcpListener, UdpSocket};
use tokio::process::Command;
use tokio::sync::RwLock;
use tracing::instrument;
use trust_dns_server::authority::MessageResponseBuilder;
use trust_dns_server::proto::op::{Header, ResponseCode};
use trust_dns_server::proto::rr::{Name, Record, RecordType};
use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use trust_dns_server::ServerFuture;
use crate::net::forward::START9_BRIDGE_IFACE;
use crate::util::Invoke;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::gateway::NetworkInterfaceWatcher;
use crate::util::serde::{display_serializable, HandlerExtSerde};
use crate::util::sync::{SyncRwLock, Watch};
use crate::{Error, ErrorKind, ResultExt};
pub fn dns_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"query",
from_fn_blocking(query_dns::<C>)
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
if let Some(format) = params.format {
return display_serializable(format, res);
}
if let Some(ip) = res {
println!("{}", ip)
}
Ok(())
})
.with_about("Test the DNS configuration for a domain"),
)
}
#[derive(Deserialize, Serialize, Parser)]
pub struct QueryDnsParams {
pub fqdn: InternedString,
}
pub fn query_dns<C: Context>(
_: C,
QueryDnsParams { fqdn }: QueryDnsParams,
) -> Result<Option<Ipv4Addr>, Error> {
let hints = dns_lookup::AddrInfoHints {
flags: 0,
address: libc::AF_INET,
socktype: 0,
protocol: 0,
};
dns_lookup::getaddrinfo(Some(&*fqdn), None, Some(hints))
.map(Some)
.or_else(|e| {
if matches!(
e.kind(),
dns_lookup::LookupErrorKind::NoName | dns_lookup::LookupErrorKind::NoData
) {
Ok(None)
} else {
Err(std::io::Error::from(e))
}
})
.with_kind(ErrorKind::Network)?
.into_iter()
.flatten()
.find_map(|a| match a.map(|a| a.sockaddr.ip()) {
Ok(IpAddr::V4(a)) => Some(Ok(a)),
Err(e) => Some(Err(e)),
_ => None,
})
.transpose()
.map_err(Error::from)
}
// #[test]
// fn test_dns() {
// assert!(query_dns(
// (),
// QueryDnsParams {
// fqdn: "fakedomain-definitely-not-real.com"
// }
// )
// .unwrap()
// .is_none())
// }
#[derive(Default)]
struct ResolveMap {
private_domains: BTreeMap<InternedString, Weak<()>>,
services: BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>,
}
pub struct DnsController {
services: Weak<RwLock<BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>>>,
resolve: Weak<SyncRwLock<ResolveMap>>,
#[allow(dead_code)]
dns_server: NonDetachingJoinHandle<Result<(), Error>>,
dns_server: NonDetachingJoinHandle<()>,
}
struct Resolver {
services: Arc<RwLock<BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>>>,
client: hickory_client::client::Client,
net_iface: Watch<OrdMap<GatewayId, NetworkInterfaceInfo>>,
resolve: Arc<SyncRwLock<ResolveMap>>,
}
impl Resolver {
async fn resolve(&self, name: &Name) -> Option<Vec<Ipv4Addr>> {
fn resolve(&self, name: &Name, src: IpAddr) -> Option<Vec<IpAddr>> {
self.resolve.peek(|r| {
if r.private_domains
.get(&*name.to_lowercase().to_ascii())
.map_or(false, |d| d.strong_count() > 0)
{
if let Some(res) = self.net_iface.peek(|i| {
i.values()
.chain([NetworkInterfaceInfo::lxc_bridge().1])
.flat_map(|i| i.ip_info.as_ref())
.find(|i| i.subnets.iter().any(|s| s.contains(&src)))
.map(|ip_info| {
let mut res = ip_info.subnets.iter().collect::<Vec<_>>();
res.sort_by_cached_key(|a| !a.contains(&src));
res.into_iter().map(|s| s.addr()).collect()
})
}) {
return Some(res);
}
}
match name.iter().next_back() {
Some(b"embassy") | Some(b"startos") => {
if let Some(pkg) = name.iter().rev().skip(1).next() {
if let Some(ip) = self.services.read().await.get(&Some(
if let Some(ip) = r.services.get(&Some(
std::str::from_utf8(pkg)
.unwrap_or_default()
.parse()
@@ -45,17 +153,17 @@ impl Resolver {
Some(
ip.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(ip, _)| *ip)
.map(|(ip, _)| (*ip).into())
.collect(),
)
} else {
None
}
} else if let Some(ip) = self.services.read().await.get(&None) {
} else if let Some(ip) = r.services.get(&None) {
Some(
ip.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(ip, _)| *ip)
.map(|(ip, _)| (*ip).into())
.collect(),
)
} else {
@@ -64,6 +172,7 @@ impl Resolver {
}
_ => None,
}
})
}
}
@@ -74,8 +183,10 @@ impl RequestHandler for Resolver {
request: &Request,
mut response_handle: R,
) -> ResponseInfo {
let query = request.request_info().query;
if let Some(ip) = self.resolve(query.name().borrow()).await {
async {
let req = request.request_info()?;
let query = req.query;
if let Some(ip) = self.resolve(query.name().borrow(), req.src.ip()) {
match query.query_type() {
RecordType::A => {
response_handle
@@ -83,11 +194,46 @@ impl RequestHandler for Resolver {
MessageResponseBuilder::from_message_request(&*request).build(
Header::response_from_request(request.header()),
&ip.into_iter()
.filter_map(|a| {
if let IpAddr::V4(a) = a {
Some(a)
} else {
None
}
})
.map(|ip| {
Record::from_rdata(
request.request_info().query.name().to_owned().into(),
query.name().to_owned().into(),
0,
trust_dns_server::proto::rr::RData::A(ip.into()),
hickory_server::proto::rr::RData::A(ip.into()),
)
})
.collect::<Vec<_>>(),
[],
[],
[],
),
)
.await
}
RecordType::AAAA => {
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
Header::response_from_request(request.header()),
&ip.into_iter()
.filter_map(|a| {
if let IpAddr::V6(a) = a {
Some(a)
} else {
None
}
})
.map(|ip| {
Record::from_rdata(
query.name().to_owned().into(),
0,
hickory_server::proto::rr::RData::AAAA(ip.into()),
)
})
.collect::<Vec<_>>(),
@@ -114,20 +260,33 @@ impl RequestHandler for Resolver {
}
}
} else {
let mut res = Header::response_from_request(request.header());
res.set_response_code(ResponseCode::NXDomain);
let query = query.original().clone();
let mut stream = self.client.lookup(query, DnsRequestOptions::default());
let mut res = None;
while let Some(msg) = stream.try_next().await? {
res = Some(
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
res.into(),
[],
[],
[],
[],
msg.header().clone(),
msg.answers(),
msg.name_servers(),
&msg.soa().map(|s| s.to_owned().into_record_of_rdata()),
msg.additionals(),
),
)
.await
.await?,
);
}
res.ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::NotFound,
eyre!("no response from server"),
)
})
}
}
.await
.unwrap_or_else(|e| {
tracing::error!("{}", e);
tracing::debug!("{:?}", e);
@@ -140,68 +299,73 @@ impl RequestHandler for Resolver {
impl DnsController {
#[instrument(skip_all)]
pub async fn init(
bridge_activated: impl Future<Output = ()> + Send + Sync + 'static,
) -> Result<Self, Error> {
let services = Arc::new(RwLock::new(BTreeMap::new()));
pub async fn init(watcher: &NetworkInterfaceWatcher) -> Result<Self, Error> {
let resolve = Arc::new(SyncRwLock::new(ResolveMap::default()));
let stream =
UdpClientStream::builder(([127, 0, 0, 53], 5355).into(), TokioRuntimeProvider::new())
.build();
let (client, bg) = Client::connect(stream)
.await
.with_kind(ErrorKind::Network)?;
let mut server = ServerFuture::new(Resolver {
services: services.clone(),
client,
net_iface: watcher.subscribe(),
resolve: resolve.clone(),
});
let dns_server = tokio::spawn(async move {
let dns_server = tokio::spawn(
futures::future::join(
async move {
server.register_listener(
TcpListener::bind((Ipv4Addr::LOCALHOST, 53))
TcpListener::bind((Ipv6Addr::UNSPECIFIED, 53))
.await
.with_kind(ErrorKind::Network)?,
Duration::from_secs(30),
);
server.register_socket(
UdpSocket::bind((Ipv4Addr::LOCALHOST, 53))
UdpSocket::bind((Ipv6Addr::UNSPECIFIED, 53))
.await
.with_kind(ErrorKind::Network)?,
);
bridge_activated.await;
Command::new("resolvectl")
.arg("dns")
.arg(START9_BRIDGE_IFACE)
.arg("127.0.0.1")
.invoke(ErrorKind::Network)
.await?;
Command::new("resolvectl")
.arg("domain")
.arg(START9_BRIDGE_IFACE)
.arg("embassy")
.invoke(ErrorKind::Network)
.await?;
server
.block_until_done()
.await
.map_err(|e| Error::new(e, ErrorKind::Network))
})
.with_kind(ErrorKind::Network)
}
.map(|r| {
r.log_err();
}),
bg.map(|r| {
r.log_err();
}),
)
.map(|_| ()),
)
.into();
Ok(Self {
services: Arc::downgrade(&services),
resolve: Arc::downgrade(&resolve),
dns_server,
})
}
pub async fn add(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<Arc<()>, Error> {
if let Some(services) = Weak::upgrade(&self.services) {
let mut writable = services.write().await;
let mut ips = writable.remove(&pkg_id).unwrap_or_default();
let rc = if let Some(rc) = Weak::upgrade(&ips.remove(&ip).unwrap_or_default()) {
pub fn add_service(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<Arc<()>, Error> {
if let Some(resolve) = Weak::upgrade(&self.resolve) {
resolve.mutate(|writable| {
let ips = writable.services.entry(pkg_id).or_default();
let weak = ips.entry(ip).or_default();
let rc = if let Some(rc) = Weak::upgrade(&*weak) {
rc
} else {
Arc::new(())
let new = Arc::new(());
*weak = Arc::downgrade(&new);
new
};
ips.insert(ip, Arc::downgrade(&rc));
writable.insert(pkg_id, ips);
Ok(rc)
})
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
@@ -210,17 +374,65 @@ impl DnsController {
}
}
pub async fn gc(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<(), Error> {
if let Some(services) = Weak::upgrade(&self.services) {
let mut writable = services.write().await;
let mut ips = writable.remove(&pkg_id).unwrap_or_default();
pub fn gc_service(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<(), Error> {
if let Some(resolve) = Weak::upgrade(&self.resolve) {
resolve.mutate(|writable| {
let mut ips = writable.services.remove(&pkg_id).unwrap_or_default();
if let Some(rc) = Weak::upgrade(&ips.remove(&ip).unwrap_or_default()) {
ips.insert(ip, Arc::downgrade(&rc));
}
if !ips.is_empty() {
writable.insert(pkg_id, ips);
writable.services.insert(pkg_id, ips);
}
Ok(())
})
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
crate::ErrorKind::Network,
))
}
}
pub fn add_private_domain(&self, fqdn: InternedString) -> Result<Arc<()>, Error> {
if let Some(resolve) = Weak::upgrade(&self.resolve) {
resolve.mutate(|writable| {
let weak = writable.private_domains.entry(fqdn).or_default();
let rc = if let Some(rc) = Weak::upgrade(&*weak) {
rc
} else {
let new = Arc::new(());
*weak = Arc::downgrade(&new);
new
};
Ok(rc)
})
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
crate::ErrorKind::Network,
))
}
}
pub fn gc_private_domains<'a, BK: Ord + 'a>(
&self,
domains: impl IntoIterator<Item = &'a BK> + 'a,
) -> Result<(), Error>
where
InternedString: Borrow<BK>,
{
if let Some(resolve) = Weak::upgrade(&self.resolve) {
resolve.mutate(|writable| {
for domain in domains {
if let Some((k, v)) = writable.private_domains.remove_entry(domain) {
if v.strong_count() > 0 {
writable.private_domains.insert(k, v);
}
}
}
Ok(())
})
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),

View File

@@ -1,204 +0,0 @@
use std::collections::BTreeMap;
use clap::Parser;
use futures::TryFutureExt;
use helpers::NonDetachingJoinHandle;
use imbl_value::InternedString;
use models::GatewayId;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use crate::context::{CliContext, RpcContext};
use crate::db::model::public::DomainSettings;
use crate::prelude::*;
use crate::util::new_guid;
use crate::util::serde::{display_serializable, HandlerExtSerde};
pub fn domain_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"list",
from_fn_async(list)
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
if let Some(format) = params.format {
return display_serializable(format, res);
}
let mut table = Table::new();
table.add_row(row![bc => "DOMAIN", "GATEWAY"]);
for (domain, info) in res {
table.add_row(row![domain, info.gateway]);
}
table.print_tty(false)?;
Ok(())
})
.with_about("List domains available to StartOS")
.with_call_remote::<CliContext>(),
)
.subcommand(
"add",
from_fn_async(add)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_about("Add a domain for use with StartOS")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_about("Remove a domain for use with StartOS")
.with_call_remote::<CliContext>(),
)
.subcommand(
"test-dns",
from_fn_async(test_dns)
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
if let Some(format) = params.format {
return display_serializable(format, res);
}
let mut table = Table::new();
table.add_row(row![bc -> "ROOT", if res.root { "✅️" } else { "❌️" }]);
table.add_row(row![bc -> "WILDCARD", if res.wildcard { "✅️" } else { "❌️" }]);
table.print_tty(false)?;
Ok(())
})
.with_about("Test the DNS configuration for a domain"),
)
}
pub async fn list(ctx: RpcContext) -> Result<BTreeMap<InternedString, DomainSettings>, Error> {
ctx.db
.peek()
.await
.into_public()
.into_server_info()
.into_network()
.into_domains()
.de()
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddDomainParams {
pub fqdn: InternedString,
pub gateway: GatewayId,
}
pub async fn add(
ctx: RpcContext,
AddDomainParams { fqdn, gateway }: AddDomainParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_domains_mut()
.insert(&fqdn, &DomainSettings { gateway })
})
.await
.result?;
Ok(())
}
#[derive(Deserialize, Serialize, Parser)]
pub struct RemoveDomainParams {
pub fqdn: InternedString,
}
pub async fn remove(
ctx: RpcContext,
RemoveDomainParams { fqdn }: RemoveDomainParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_domains_mut()
.remove(&fqdn)
})
.await
.result?;
Ok(())
}
#[derive(Deserialize, Serialize)]
pub struct TestDnsResult {
pub root: bool,
pub wildcard: bool,
}
pub async fn test_dns(
ctx: RpcContext,
AddDomainParams { fqdn, ref gateway }: AddDomainParams,
) -> Result<TestDnsResult, Error> {
use tokio::net::UdpSocket;
use trust_dns_client::client::{AsyncClient, ClientHandle};
use trust_dns_client::op::DnsResponse;
use trust_dns_client::proto::error::ProtoError;
use trust_dns_client::rr::{DNSClass, Name, RecordType};
use trust_dns_client::udp::UdpClientStream;
let wan_ip = ctx
.db
.peek()
.await
.into_public()
.into_server_info()
.into_network()
.into_gateways()
.into_idx(&gateway)
.or_not_found(&gateway)?
.into_ip_info()
.transpose()
.and_then(|i| i.into_wan_ip().transpose())
.or_not_found(lazy_format!("WAN IP for {gateway}"))?
.de()?;
let stream = UdpClientStream::<UdpSocket>::new(([127, 0, 0, 53], 53).into());
let (mut client, bg) = AsyncClient::connect(stream.map_err(ProtoError::from))
.await
.with_kind(ErrorKind::Network)?;
let bg: NonDetachingJoinHandle<_> = tokio::spawn(bg).into();
let root = fqdn.parse::<Name>().with_kind(ErrorKind::Network)?;
let wildcard = new_guid()
.parse::<Name>()
.with_kind(ErrorKind::Network)?
.append_domain(&root)
.with_kind(ErrorKind::Network)?;
let q_root = client
.query(root, DNSClass::IN, RecordType::A)
.await
.with_kind(ErrorKind::Network)?;
let q_wildcard = client
.query(wildcard, DNSClass::IN, RecordType::A)
.await
.with_kind(ErrorKind::Network)?;
bg.abort();
let check_q = |q: DnsResponse| {
q.answers().iter().any(|a| {
a.data()
.and_then(|d| d.as_a())
.map_or(false, |d| d.0 == wan_ip)
})
};
Ok(TestDnsResult {
root: check_q(q_root),
wildcard: check_q(q_wildcard),
})
}

View File

@@ -261,6 +261,12 @@ trait ConnectionSettings {
trait Ip4Config {
#[zbus(property)]
fn address_data(&self) -> Result<Vec<AddressData>, Error>;
#[zbus(property)]
fn gateway(&self) -> Result<String, Error>;
#[zbus(property)]
fn nameserver_data(&self) -> Result<Vec<NameserverData>, Error>;
}
#[proxy(
@@ -270,6 +276,12 @@ trait Ip4Config {
trait Ip6Config {
#[zbus(property)]
fn address_data(&self) -> Result<Vec<AddressData>, Error>;
#[zbus(property)]
fn gateway(&self) -> Result<String, Error>;
#[zbus(property)]
fn nameserver_data(&self) -> Result<Vec<NameserverData>, Error>;
}
#[derive(Clone, Debug, DeserializeDict, ZValue, ZType)]
@@ -285,6 +297,12 @@ impl TryFrom<AddressData> for IpNet {
}
}
#[derive(Clone, Debug, DeserializeDict, ZValue, ZType)]
#[zvariant(signature = "dict")]
struct NameserverData {
address: String,
}
#[proxy(
interface = "org.freedesktop.NetworkManager.DHCP4Config",
default_service = "org.freedesktop.NetworkManager"
@@ -573,7 +591,15 @@ async fn watch_ip(
Ip6ConfigProxy::new(&connection, ip6_config.clone()).await?;
let mut until = Until::new()
.with_stream(ip4_proxy.receive_address_data_changed().await.stub())
.with_stream(ip6_proxy.receive_address_data_changed().await.stub());
.with_stream(ip4_proxy.receive_gateway_changed().await.stub())
.with_stream(
ip4_proxy.receive_nameserver_data_changed().await.stub(),
)
.with_stream(ip6_proxy.receive_address_data_changed().await.stub())
.with_stream(ip6_proxy.receive_gateway_changed().await.stub())
.with_stream(
ip6_proxy.receive_nameserver_data_changed().await.stub(),
);
let dhcp4_proxy = if &*dhcp4_config != "/" {
let dhcp4_proxy =
@@ -595,6 +621,12 @@ async fn watch_ip(
.into_iter()
.chain(ip6_proxy.address_data().await?)
.collect_vec();
let lan_ip = [
ip4_proxy.gateway().await?.parse::<IpAddr>()?,
ip6_proxy.gateway().await?.parse::<IpAddr>()?,
]
.into_iter()
.collect();
let mut ntp_servers = OrdSet::new();
if let Some(dhcp4_proxy) = &dhcp4_proxy {
let dhcp = dhcp4_proxy.options().await?;
@@ -605,14 +637,22 @@ async fn watch_ip(
);
}
}
let dns_servers = []
.into_iter()
.chain(ip4_proxy.nameserver_data().await?)
.chain(ip6_proxy.nameserver_data().await?)
.map(|NameserverData { address }| {
address.parse::<IpAddr>()
})
.collect::<Result<_, _>>()?;
let scope_id = if_nametoindex(iface.as_str())
.with_kind(ErrorKind::Network)?;
let subnets: OrdSet<IpNet> = addresses
.into_iter()
.map(IpNet::try_from)
.try_collect()?;
let ip_info = if !subnets.is_empty() {
let wan_ip = match get_wan_ipv4(iface.as_str()).await {
let wan_ip = if !subnets.is_empty() {
match get_wan_ipv4(iface.as_str()).await {
Ok(a) => a,
Err(e) => {
tracing::error!(
@@ -621,18 +661,20 @@ async fn watch_ip(
tracing::debug!("{e:?}");
None
}
}
} else {
None
};
Some(IpInfo {
let ip_info = Some(IpInfo {
name: name.clone(),
scope_id,
device_type,
subnets,
lan_ip,
wan_ip,
ntp_servers,
})
} else {
None
};
dns_servers,
});
write_to.send_if_modified(
|m: &mut OrdMap<GatewayId, NetworkInterfaceInfo>| {
@@ -810,12 +852,12 @@ impl NetworkInterfaceController {
) -> Result<(), Error> {
tracing::debug!("syncronizing {info:?} to db");
let dns = todo!();
db.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_gateways_mut()
.ser(info)
let net = db.as_public_mut().as_server_info_mut().as_network_mut();
net.as_dns_mut().as_dhcp_mut().ser(&dns)?;
net.as_gateways_mut().ser(info)
})
.await
.result?;
@@ -1170,15 +1212,11 @@ impl InterfaceFilter for bool {
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct LoopbackFilter;
impl InterfaceFilter for LoopbackFilter {
fn filter(&self, _: &GatewayId, info: &NetworkInterfaceInfo) -> bool {
info.ip_info.as_ref().map_or(false, |i| {
i.subnets
.iter()
.any(|i| i.contains(&IpAddr::V4(Ipv4Addr::LOCALHOST)))
})
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct IdFilter(pub GatewayId);
impl InterfaceFilter for IdFilter {
fn filter(&self, id: &GatewayId, _: &NetworkInterfaceInfo) -> bool {
id == &self.0
}
}
@@ -1428,7 +1466,10 @@ impl ListenerMap {
let mut keep = BTreeSet::<SocketAddr>::new();
for (_, info) in ip_info
.iter()
.chain([NetworkInterfaceInfo::loopback()])
.chain([
NetworkInterfaceInfo::loopback(),
NetworkInterfaceInfo::lxc_bridge(),
])
.filter(|(id, info)| filter.filter(*id, *info))
{
if let Some(ip_info) = &info.ip_info {
@@ -1503,7 +1544,10 @@ pub fn lookup_info_by_addr(
) -> Option<(&GatewayId, &NetworkInterfaceInfo)> {
ip_info
.iter()
.chain([NetworkInterfaceInfo::loopback()])
.chain([
NetworkInterfaceInfo::loopback(),
NetworkInterfaceInfo::lxc_bridge(),
])
.find(|(_, i)| {
i.ip_info
.as_ref()

View File

@@ -2,48 +2,43 @@ use std::collections::BTreeSet;
use clap::Parser;
use imbl_value::InternedString;
use models::GatewayId;
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::net::acme::AcmeProvider;
use crate::net::host::{all_hosts, HostApiKind};
use crate::net::tor::OnionAddress;
use crate::prelude::*;
use crate::util::serde::{display_serializable, HandlerExtSerde};
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
#[serde(rename_all_fields = "camelCase")]
#[serde(tag = "kind")]
#[ts(export)]
pub enum HostAddress {
Onion {
#[ts(type = "string")]
address: OnionAddressV3,
address: OnionAddress,
},
Domain {
#[ts(type = "string")]
address: InternedString,
public: bool,
acme: Option<AcmeProvider>,
public: Option<PublicDomainConfig>,
},
}
#[derive(Debug, Deserialize, Serialize, TS)]
pub struct DomainConfig {
#[ts(type = "string")]
pub root: InternedString,
pub public: bool,
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
pub struct PublicDomainConfig {
pub gateway: GatewayId,
pub acme: Option<AcmeProvider>,
}
fn check_duplicates(db: &DatabaseModel) -> Result<(), Error> {
let mut onions = BTreeSet::<OnionAddressV3>::new();
let mut onions = BTreeSet::<OnionAddress>::new();
let mut domains = BTreeSet::<InternedString>::new();
let mut check_onion = |onion: OnionAddressV3| {
let mut check_onion = |onion: OnionAddress| {
if onions.contains(&onion) {
return Err(Error::new(
eyre!("onion address {onion} is already in use"),
@@ -68,7 +63,10 @@ fn check_duplicates(db: &DatabaseModel) -> Result<(), Error> {
for onion in host.as_onions().de()? {
check_onion(onion)?;
}
for domain in host.as_domains().keys()? {
for domain in host.as_domains().as_public().keys()? {
check_domain(domain)?;
}
for domain in host.as_domains().as_private().de()? {
check_domain(domain)?;
}
}
@@ -80,25 +78,53 @@ pub fn address_api<C: Context, Kind: HostApiKind>(
ParentHandler::<C, Kind::Params, Kind::InheritedParams>::new()
.subcommand(
"domain",
ParentHandler::<C, Empty, Kind::Inheritance>::new()
.subcommand(
"public",
ParentHandler::<C, Empty, Kind::Inheritance>::new()
.subcommand(
"add",
from_fn_async(add_domain::<Kind>)
from_fn_async(add_public_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Add an address to this host")
.with_about("Add a public domain to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_domain::<Kind>)
from_fn_async(remove_public_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Remove an address from this host")
.with_about("Remove a public domain from this host")
.with_call_remote::<CliContext>(),
)
.with_inherited(|_, a| a),
)
.subcommand(
"private",
ParentHandler::<C, Empty, Kind::Inheritance>::new()
.subcommand(
"add",
from_fn_async(add_private_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Add a private domain to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_private_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Remove a private domain from this host")
.with_call_remote::<CliContext>(),
)
.with_inherited(|_, a| a),
)
.with_inherited(Kind::inheritance),
)
.subcommand(
@@ -146,15 +172,20 @@ pub fn address_api<C: Context, Kind: HostApiKind>(
}
HostAddress::Domain {
address,
public,
acme,
public: Some(PublicDomainConfig { gateway, acme }),
} => {
table.add_row(row![
address,
*public,
&format!("YES ({gateway})"),
acme.as_ref().map(|a| a.0.as_str()).unwrap_or("NONE")
]);
}
HostAddress::Domain {
address,
public: None,
} => {
table.add_row(row![address, &format!("NO"), "N/A"]);
}
}
}
@@ -168,40 +199,24 @@ pub fn address_api<C: Context, Kind: HostApiKind>(
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddDomainParams {
pub struct AddPublicDomainParams {
pub domain: InternedString,
#[arg(long)]
pub private: bool,
#[arg(long)]
pub acme: Option<AcmeProvider>,
pub gateway: GatewayId,
}
pub async fn add_domain<Kind: HostApiKind>(
pub async fn add_public_domain<Kind: HostApiKind>(
ctx: RpcContext,
AddDomainParams {
AddPublicDomainParams {
ref domain,
private,
acme,
}: AddDomainParams,
gateway,
}: AddPublicDomainParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
let root = db
.as_public()
.as_server_info()
.as_network()
.as_domains()
.keys()?
.into_iter()
.find(|root| {
domain == root
|| domain
.strip_suffix(&**root)
.map_or(false, |d| d.ends_with("."))
})
.or_not_found(lazy_format!("root domain for {domain}"))?;
if let Some(acme) = &acme {
if !db
.as_public()
@@ -214,14 +229,10 @@ pub async fn add_domain<Kind: HostApiKind>(
}
}
Kind::host_for(&inheritance, db)?.as_domains_mut().insert(
domain,
&DomainConfig {
root,
public: !private,
acme,
},
)?;
Kind::host_for(&inheritance, db)?
.as_domains_mut()
.as_public_mut()
.insert(domain, &PublicDomainConfig { acme, gateway })?;
check_duplicates(db)
})
.await
@@ -236,7 +247,7 @@ pub struct RemoveDomainParams {
pub domain: InternedString,
}
pub async fn remove_domain<Kind: HostApiKind>(
pub async fn remove_public_domain<Kind: HostApiKind>(
ctx: RpcContext,
RemoveDomainParams { domain }: RemoveDomainParams,
inheritance: Kind::Inheritance,
@@ -245,6 +256,7 @@ pub async fn remove_domain<Kind: HostApiKind>(
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_domains_mut()
.as_public_mut()
.remove(&domain)
})
.await
@@ -254,6 +266,50 @@ pub async fn remove_domain<Kind: HostApiKind>(
Ok(())
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddPrivateDomainParams {
pub domain: InternedString,
}
pub async fn add_private_domain<Kind: HostApiKind>(
ctx: RpcContext,
AddPrivateDomainParams { domain }: AddPrivateDomainParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_domains_mut()
.as_private_mut()
.mutate(|d| Ok(d.insert(domain)))?;
check_duplicates(db)
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
pub async fn remove_private_domain<Kind: HostApiKind>(
ctx: RpcContext,
RemoveDomainParams { domain }: RemoveDomainParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_domains_mut()
.as_private_mut()
.mutate(|d| Ok(d.remove(&domain)))
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
#[derive(Deserialize, Serialize, Parser)]
pub struct OnionParams {
pub onion: String,
@@ -272,7 +328,7 @@ pub async fn add_onion<Kind: HostApiKind>(
ErrorKind::InvalidOnionAddress,
)
})?
.parse::<OnionAddressV3>()?;
.parse::<OnionAddress>()?;
ctx.db
.mutate(|db| {
db.as_private().as_key_store().as_onion().get_key(&onion)?;
@@ -303,7 +359,7 @@ pub async fn remove_onion<Kind: HostApiKind>(
ErrorKind::InvalidOnionAddress,
)
})?
.parse::<OnionAddressV3>()?;
.parse::<OnionAddress>()?;
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?

View File

@@ -8,15 +8,15 @@ use itertools::Itertools;
use models::{HostId, PackageId};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, OrEmpty, ParentHandler};
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use ts_rs::TS;
use crate::context::RpcContext;
use crate::db::model::DatabaseModel;
use crate::net::forward::AvailablePorts;
use crate::net::host::address::{address_api, DomainConfig, HostAddress};
use crate::net::host::address::{address_api, HostAddress, PublicDomainConfig};
use crate::net::host::binding::{binding, BindInfo, BindOptions};
use crate::net::service_interface::HostnameInfo;
use crate::net::tor::OnionAddress;
use crate::prelude::*;
pub mod address;
@@ -29,12 +29,23 @@ pub mod binding;
pub struct Host {
pub bindings: BTreeMap<u16, BindInfo>,
#[ts(type = "string[]")]
pub onions: BTreeSet<OnionAddressV3>,
#[ts(as = "BTreeMap::<String, DomainConfig>")]
pub domains: BTreeMap<InternedString, DomainConfig>,
pub onions: BTreeSet<OnionAddress>,
pub domains: Domains,
/// COMPUTED: NetService::update
pub hostname_info: BTreeMap<u16, Vec<HostnameInfo>>, // internal port -> Hostnames
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct Domains {
#[ts(as = "BTreeMap::<String, PublicDomainConfig>")]
pub public: BTreeMap<InternedString, PublicDomainConfig>,
#[ts(as = "BTreeSet::<String>")]
pub private: BTreeSet<InternedString>,
}
impl AsRef<Host> for Host {
fn as_ref(&self) -> &Host {
self
@@ -51,14 +62,21 @@ impl Host {
.map(|address| HostAddress::Onion { address })
.chain(
self.domains
.public
.iter()
.map(
|(address, DomainConfig { public, acme, .. })| HostAddress::Domain {
.map(|(address, config)| HostAddress::Domain {
address: address.clone(),
public: *public,
acme: acme.clone(),
},
),
public: Some(config.clone()),
}),
)
.chain(
self.domains
.private
.iter()
.map(|address| HostAddress::Domain {
address: address.clone(),
public: None,
}),
)
}
}
@@ -115,12 +133,8 @@ pub fn host_for<'a>(
};
host_info(db, package_id)?.upsert(host_id, || {
let mut h = Host::new();
h.onions.insert(
tor_key
.or_not_found("generated tor key")?
.public()
.get_onion_address(),
);
h.onions
.insert(tor_key.or_not_found("generated tor key")?.onion_address());
Ok(h)
})
}

View File

@@ -2,7 +2,6 @@ use rpc_toolkit::{Context, HandlerExt, ParentHandler};
pub mod acme;
pub mod dns;
pub mod domain;
pub mod forward;
pub mod gateway;
pub mod host;
@@ -30,8 +29,8 @@ pub fn net_api<C: Context>() -> ParentHandler<C> {
acme::acme_api::<C>().with_about("Setup automatic clearnet certificate acquisition"),
)
.subcommand(
"domain",
domain::domain_api::<C>().with_about("Setup clearnet domains"),
"dns",
dns::dns_api::<C>().with_about("Manage and query DNS"),
)
.subcommand(
"gateway",

View File

@@ -9,23 +9,23 @@ use ipnet::IpNet;
use models::{HostId, OptionExt, PackageId};
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::db::model::Database;
use crate::error::ErrorCollection;
use crate::hostname::Hostname;
use crate::net::dns::DnsController;
use crate::net::forward::{PortForwardController, START9_BRIDGE_IFACE};
use crate::net::forward::PortForwardController;
use crate::net::gateway::{
AndFilter, DynInterfaceFilter, InterfaceFilter, LoopbackFilter, NetworkInterfaceController,
SecureFilter,
AndFilter, DynInterfaceFilter, IdFilter, InterfaceFilter, NetworkInterfaceController, OrFilter,
PublicFilter, SecureFilter,
};
use crate::net::host::address::HostAddress;
use crate::net::host::binding::{AddSslOptions, BindId, BindOptions};
use crate::net::host::{host_for, Host, Hosts};
use crate::net::service_interface::{HostnameInfo, IpHostname, OnionHostname};
use crate::net::tor::TorController;
use crate::net::tor::{OnionAddress, TorController, TorSecretKey};
use crate::net::utils::ipv6_is_local;
use crate::net::vhost::{AlpnInfo, TargetInfo, VHostController};
use crate::prelude::*;
@@ -45,23 +45,13 @@ pub struct NetController {
}
impl NetController {
pub async fn init(
db: TypedPatchDb<Database>,
tor_control: SocketAddr,
tor_socks: SocketAddr,
hostname: &Hostname,
) -> Result<Self, Error> {
pub async fn init(db: TypedPatchDb<Database>, hostname: &Hostname) -> Result<Self, Error> {
let net_iface = Arc::new(NetworkInterfaceController::new(db.clone()));
Ok(Self {
db: db.clone(),
tor: TorController::new(tor_control, tor_socks),
tor: TorController::new().await?,
vhost: VHostController::new(db, net_iface.clone()),
dns: DnsController::init(
net_iface
.watcher
.wait_for_activated(START9_BRIDGE_IFACE.into()),
)
.await?,
dns: DnsController::init(&net_iface.watcher).await?,
forward: PortForwardController::new(net_iface.watcher.subscribe()),
net_iface,
server_hostnames: vec![
@@ -86,7 +76,7 @@ impl NetController {
package: PackageId,
ip: Ipv4Addr,
) -> Result<NetService, Error> {
let dns = self.dns.add(Some(package.clone()), ip).await?;
let dns = self.dns.add_service(Some(package.clone()), ip)?;
let res = NetService::new(NetServiceData {
id: Some(package),
@@ -100,7 +90,7 @@ impl NetController {
}
pub async fn os_bindings(self: &Arc<Self>) -> Result<NetService, Error> {
let dns = self.dns.add(None, HOST_IP.into()).await?;
let dns = self.dns.add_service(None, HOST_IP.into())?;
let service = NetService::new(NetServiceData {
id: None,
@@ -136,7 +126,8 @@ impl NetController {
struct HostBinds {
forwards: BTreeMap<u16, (SocketAddr, DynInterfaceFilter, Arc<()>)>,
vhosts: BTreeMap<(Option<InternedString>, u16), (TargetInfo, Arc<()>)>,
tor: BTreeMap<OnionAddressV3, (OrdMap<u16, SocketAddr>, Vec<Arc<()>>)>,
private_dns: BTreeMap<InternedString, Arc<()>>,
tor: BTreeMap<OnionAddress, (OrdMap<u16, SocketAddr>, Vec<Arc<()>>)>,
}
pub struct NetServiceData {
@@ -227,7 +218,8 @@ impl NetServiceData {
async fn update(&mut self, ctrl: &NetController, id: HostId, host: Host) -> Result<(), Error> {
let mut forwards: BTreeMap<u16, (SocketAddr, DynInterfaceFilter)> = BTreeMap::new();
let mut vhosts: BTreeMap<(Option<InternedString>, u16), TargetInfo> = BTreeMap::new();
let mut tor: BTreeMap<OnionAddressV3, (TorSecretKeyV3, OrdMap<u16, SocketAddr>)> =
let mut private_dns: BTreeSet<InternedString> = BTreeSet::new();
let mut tor: BTreeMap<OnionAddress, (TorSecretKey, OrdMap<u16, SocketAddr>)> =
BTreeMap::new();
let mut hostname_info: BTreeMap<u16, Vec<HostnameInfo>> = BTreeMap::new();
let binds = self.binds.entry(id.clone()).or_default();
@@ -278,38 +270,85 @@ impl NetServiceData {
vhosts.insert(
(Some(hostname), external),
TargetInfo {
filter: LoopbackFilter.into_dyn(),
filter: OrFilter(
IdFilter(
NetworkInterfaceInfo::loopback().0.clone(),
),
IdFilter(
NetworkInterfaceInfo::lxc_bridge().0.clone(),
),
)
.into_dyn(),
acme: None,
addr,
connect_ssl: connect_ssl.clone(),
},
); // TODO: wrap onion ssl stream directly in tor ctrl
}
}
HostAddress::Domain { address, public } => {
if hostnames.insert(address.clone()) {
let address = Some(address.clone());
if ssl.preferred_external_port == 443 {
if let Some(public) = &public {
vhosts.insert(
(address.clone(), 5443),
TargetInfo {
filter: AndFilter(
bind.net.clone(),
AndFilter(
IdFilter(public.gateway.clone()),
PublicFilter { public: false },
),
)
.into_dyn(),
acme: public.acme.clone(),
addr,
connect_ssl: connect_ssl.clone(),
},
);
vhosts.insert(
(address.clone(), 443),
TargetInfo {
filter: AndFilter(
bind.net.clone(),
OrFilter(
IdFilter(public.gateway.clone()),
PublicFilter { public: false },
),
)
.into_dyn(),
acme: public.acme.clone(),
addr,
connect_ssl: connect_ssl.clone(),
},
);
} else {
vhosts.insert(
(address.clone(), 443),
TargetInfo {
filter: AndFilter(
bind.net.clone(),
PublicFilter { public: false },
)
.into_dyn(),
acme: None,
addr,
connect_ssl: connect_ssl.clone(),
},
);
}
}
HostAddress::Domain {
address,
public,
acme,
} => {
if hostnames.insert(address.clone()) {
let address = Some(address.clone());
if ssl.preferred_external_port == 443 {
if public {
} else {
if let Some(public) = public {
vhosts.insert(
(address.clone(), 5443),
(address.clone(), external),
TargetInfo {
filter: bind.net.clone().into_dyn(),
acme: acme.clone(),
addr,
connect_ssl: connect_ssl.clone(),
},
);
}
vhosts.insert(
(address.clone(), 443),
TargetInfo {
filter: bind.net.clone().into_dyn(),
acme,
filter: AndFilter(
bind.net.clone(),
IdFilter(public.gateway.clone()),
)
.into_dyn(),
acme: public.acme.clone(),
addr,
connect_ssl: connect_ssl.clone(),
},
@@ -319,7 +358,7 @@ impl NetServiceData {
(address.clone(), external),
TargetInfo {
filter: bind.net.clone().into_dyn(),
acme,
acme: None,
addr,
connect_ssl: connect_ssl.clone(),
},
@@ -330,6 +369,7 @@ impl NetServiceData {
}
}
}
}
if bind
.options
.secure
@@ -383,7 +423,7 @@ impl NetServiceData {
{
bind_hostname_info.push(HostnameInfo::Ip {
gateway_id: interface.clone(),
public, // TODO: check if port forward is active
public: public.is_some(),
hostname: IpHostname::Domain {
value: address.clone(),
port: None,
@@ -393,7 +433,7 @@ impl NetServiceData {
} else {
bind_hostname_info.push(HostnameInfo::Ip {
gateway_id: interface.clone(),
public,
public: public.is_some(),
hostname: IpHostname::Domain {
value: address.clone(),
port: bind.net.assigned_port,
@@ -448,6 +488,7 @@ impl NetServiceData {
}
}
hostname_info.insert(*port, bind_hostname_info);
private_dns.append(&mut hostnames);
}
}
@@ -497,7 +538,7 @@ impl NetServiceData {
.as_key_store()
.as_onion()
.get_key(tor_addr)?;
tor.insert(key.public().get_onion_address(), (key, tor_binds.clone()));
tor.insert(key.onion_address(), (key, tor_binds.clone()));
for (internal, ports) in &tor_hostname_ports {
let mut bind_hostname_info = hostname_info.remove(internal).unwrap_or_default();
bind_hostname_info.push(HostnameInfo::Onion {
@@ -563,6 +604,22 @@ impl NetServiceData {
}
}
let mut rm = BTreeSet::new();
binds.private_dns.retain(|fqdn, _| {
if private_dns.remove(fqdn) {
true
} else {
rm.insert(fqdn.clone());
false
}
});
for fqdn in private_dns {
binds
.private_dns
.insert(fqdn.clone(), ctrl.dns.add_private_domain(fqdn)?);
}
ctrl.dns.gc_private_domains(&rm)?;
let all = binds
.tor
.keys()
@@ -578,17 +635,15 @@ impl NetServiceData {
if let Some(prev) = prev {
prev
} else {
let rcs = ctrl
.tor
.add(key, tor_binds.iter().map(|(k, v)| (*k, *v)).collect())
.await?;
let service = ctrl.tor.service(key)?;
let rcs = service.proxy_all(tor_binds.iter().map(|(k, v)| (*k, *v)));
(tor_binds, rcs)
},
);
} else {
if let Some((_, rc)) = prev {
drop(rc);
ctrl.tor.gc(Some(onion), None).await?;
ctrl.tor.gc(Some(onion)).await?;
}
}
}

View File

@@ -1,7 +1,6 @@
use std::net::{Ipv4Addr, Ipv6Addr};
use imbl_value::InternedString;
use lazy_format::lazy_format;
use models::{GatewayId, HostId, ServiceInterfaceId};
use serde::{Deserialize, Serialize};
use ts_rs::TS;

File diff suppressed because it is too large Load Diff

View File

@@ -1,13 +1,12 @@
use std::collections::BTreeMap;
use chrono::Utc;
use chrono::{DateTime, NaiveDate, NaiveDateTime, Utc};
use clap::Parser;
use exver::{Version, VersionRange};
use imbl_value::InternedString;
use itertools::Itertools;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use sqlx::query;
use ts_rs::TS;
use crate::context::CliContext;
@@ -151,6 +150,33 @@ pub struct GetOsVersionParams {
pub device_info: Option<DeviceInfo>,
}
struct PgDateTime(DateTime<Utc>);
impl sqlx::Type<sqlx::Postgres> for PgDateTime {
fn type_info() -> <sqlx::Postgres as sqlx::Database>::TypeInfo {
sqlx::postgres::PgTypeInfo::with_oid(sqlx::postgres::types::Oid(1184))
}
}
impl sqlx::Encode<'_, sqlx::Postgres> for PgDateTime {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::Database>::ArgumentBuffer<'_>,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
fn postgres_epoch_datetime() -> NaiveDateTime {
NaiveDate::from_ymd_opt(2000, 1, 1)
.expect("expected 2000-01-01 to be a valid NaiveDate")
.and_hms_opt(0, 0, 0)
.expect("expected 2000-01-01T00:00:00 to be a valid NaiveDateTime")
}
let micros = (self.0.naive_utc() - postgres_epoch_datetime())
.num_microseconds()
.ok_or_else(|| format!("NaiveDateTime out of range for Postgres: {:?}", self.0))?;
micros.encode(buf)
}
fn size_hint(&self) -> usize {
std::mem::size_of::<i64>()
}
}
pub async fn get_version(
ctx: RegistryContext,
GetOsVersionParams {
@@ -166,14 +192,13 @@ pub async fn get_version(
if let (Some(pool), Some(server_id), Some(arch)) = (&ctx.pool, server_id, &platform) {
let created_at = Utc::now();
query!(
"INSERT INTO user_activity (created_at, server_id, arch) VALUES ($1, $2, $3)",
created_at,
server_id,
&**arch
)
sqlx::query("INSERT INTO user_activity (created_at, server_id, arch) VALUES ($1, $2, $3)")
.bind(PgDateTime(created_at))
.bind(server_id)
.bind(&**arch)
.execute(pool)
.await?;
.await
.with_kind(ErrorKind::Database)?;
}
let target = target.unwrap_or(VersionRange::Any);
ctx.db

View File

@@ -31,8 +31,7 @@ pub async fn set_health(
.as_status_mut()
.mutate(|main| {
match main {
MainStatus::Running { ref mut health, .. }
| MainStatus::Starting { ref mut health } => {
MainStatus::Running { health, .. } | MainStatus::Starting { health } => {
health.insert(id, result);
}
_ => (),

View File

@@ -59,7 +59,8 @@ pub async fn get_ssl_certificate(
.de()?
.iter()
.map(InternedString::from_display)
.chain(m.as_domains().keys()?)
.chain(m.as_domains().as_public().keys()?)
.chain(m.as_domains().as_private().de()?)
.chain(
m.as_hostname_info()
.de()?
@@ -184,7 +185,8 @@ pub async fn get_ssl_key(
.de()?
.iter()
.map(InternedString::from_display)
.chain(m.as_domains().keys()?)
.chain(m.as_domains().as_public().keys()?)
.chain(m.as_domains().as_private().de()?)
.chain(
m.as_hostname_info()
.de()?

View File

@@ -23,13 +23,13 @@ use crate::install::PKG_ARCHIVE_DIR;
use crate::notifications::{notify, NotificationLevel};
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle, ProgressTrackerWriter};
use crate::sign::commitment::merkle_archive::MerkleArchiveCommitment;
use crate::s9pk::manifest::PackageId;
use crate::s9pk::merkle_archive::source::FileSource;
use crate::s9pk::S9pk;
use crate::service::rpc::ExitParams;
use crate::service::start_stop::StartStop;
use crate::service::{LoadDisposition, Service, ServiceRef};
use crate::sign::commitment::merkle_archive::MerkleArchiveCommitment;
use crate::status::MainStatus;
use crate::util::serde::{Base32, Pem};
use crate::util::sync::SyncMutex;
@@ -382,7 +382,7 @@ impl ServiceMap {
id: PackageId,
soft: bool,
force: bool,
) -> Result<impl Future<Output = Result<(), Error>> + Send, Error> {
) -> Result<impl Future<Output = Result<(), Error>> + Send + 'static, Error> {
let mut guard = self.get_mut(&id).await;
ctx.db
.mutate(|db| {

View File

@@ -21,6 +21,20 @@ impl<T> SyncMutex<T> {
}
}
#[derive(Debug, Default)]
pub struct SyncRwLock<T>(std::sync::RwLock<T>);
impl<T> SyncRwLock<T> {
pub fn new(t: T) -> Self {
Self(std::sync::RwLock::new(t))
}
pub fn mutate<F: FnOnce(&mut T) -> U, U>(&self, f: F) -> U {
f(&mut *self.0.write().unwrap())
}
pub fn peek<F: FnOnce(&T) -> U, U>(&self, f: F) -> U {
f(&*self.0.read().unwrap())
}
}
struct WatchShared<T> {
version: u64,
data: T,

View File

@@ -13,7 +13,6 @@ use openssl::x509::X509;
use sqlx::postgres::PgConnectOptions;
use sqlx::{PgPool, Row};
use tokio::process::Command;
use torut::onion::TorSecretKeyV3;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_5_2, VersionT};
@@ -26,6 +25,7 @@ use crate::disk::mount::util::unmount;
use crate::hostname::Hostname;
use crate::net::forward::AvailablePorts;
use crate::net::keys::KeyStore;
use crate::net::tor::TorSecretKey;
use crate::notifications::Notifications;
use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
@@ -198,12 +198,9 @@ async fn init_postgres(datadir: impl AsRef<Path>) -> Result<PgPool, Error> {
.port(5433)
.socket("/var/run/postgresql"),
)
.await?
};
sqlx::migrate!()
.run(&secret_store)
.await
.with_kind(crate::ErrorKind::Database)?;
.with_kind(ErrorKind::Database)?
};
Ok(secret_store)
}
@@ -422,10 +419,11 @@ impl VersionT for Version {
async fn previous_cifs(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<CifsTargets, Error> {
let cifs = sqlx::query(r#"SELECT * FROM cifs_shares"#)
.fetch_all(pg)
.await?
.await
.with_kind(ErrorKind::Database)?
.into_iter()
.map(|row| {
let id: i32 = row.try_get("id")?;
let id: i32 = row.try_get("id").with_kind(ErrorKind::Database)?;
Ok::<_, Error>((
id,
Cifs {
@@ -458,13 +456,14 @@ async fn previous_cifs(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<CifsTargets, E
async fn previous_account_info(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<AccountInfo, Error> {
let account_query = sqlx::query(r#"SELECT * FROM account"#)
.fetch_one(pg)
.await?;
.await
.with_kind(ErrorKind::Database)?;
let account = {
AccountInfo {
password: account_query
.try_get("password")
.with_ctx(|_| (ErrorKind::Database, "password"))?,
tor_keys: vec![TorSecretKeyV3::try_from(
tor_keys: vec![TorSecretKey::from_bytes(
if let Some(bytes) = account_query
.try_get::<Option<Vec<u8>>, _>("tor_key")
.with_ctx(|_| (ErrorKind::Database, "tor_key"))?
@@ -479,7 +478,11 @@ async fn previous_account_info(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<Accoun
.with_ctx(|_| (ErrorKind::Database, "password.u8 64"))?
} else {
ed25519_expand_key(
&<[u8; 32]>::try_from(account_query.try_get::<Vec<u8>, _>("network_key")?)
&<[u8; 32]>::try_from(
account_query
.try_get::<Vec<u8>, _>("network_key")
.with_kind(ErrorKind::Database)?,
)
.map_err(|e| {
Error::new(
eyre!("expected vec of len 32, got len {}", e.len()),
@@ -527,7 +530,8 @@ async fn previous_account_info(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<Accoun
async fn previous_ssh_keys(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<SshKeys, Error> {
let ssh_query = sqlx::query(r#"SELECT * FROM ssh_keys"#)
.fetch_all(pg)
.await?;
.await
.with_kind(ErrorKind::Database)?;
let ssh_keys: SshKeys = {
let keys = ssh_query.into_iter().fold(
Ok::<_, Error>(BTreeMap::<InternedString, WithTimeData<SshPubKey>>::new()),
@@ -535,12 +539,12 @@ async fn previous_ssh_keys(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<SshKeys, E
let mut ssh_keys = ssh_keys?;
let time = row
.try_get::<String, _>("created_at")
.map_err(Error::from)
.with_kind(ErrorKind::Database)
.and_then(|x| x.parse::<DateTime<Utc>>().with_kind(ErrorKind::Database))
.with_ctx(|_| (ErrorKind::Database, "openssh_pubkey::created_at"))?;
let value: SshPubKey = row
.try_get::<String, _>("openssh_pubkey")
.map_err(Error::from)
.with_kind(ErrorKind::Database)
.and_then(|x| x.parse().map(SshPubKey).with_kind(ErrorKind::Database))
.with_ctx(|_| (ErrorKind::Database, "openssh_pubkey"))?;
let data = WithTimeData {

View File

@@ -2,12 +2,13 @@ use std::collections::{BTreeMap, BTreeSet};
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::InternedString;
use models::GatewayId;
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_6_alpha_9, VersionT};
use crate::net::host::address::DomainConfig;
use crate::net::host::address::PublicDomainConfig;
use crate::net::tor::OnionAddress;
use crate::prelude::*;
lazy_static::lazy_static! {
@@ -21,7 +22,7 @@ lazy_static::lazy_static! {
#[serde(rename_all = "camelCase")]
#[serde(tag = "kind")]
enum HostAddress {
Onion { address: OnionAddressV3 },
Onion { address: OnionAddress },
Domain { address: InternedString },
}
@@ -73,9 +74,8 @@ impl VersionT for Version {
HostAddress::Domain { address } => {
domains.insert(
address.clone(),
DomainConfig {
root: address,
public: true,
PublicDomainConfig {
gateway: GatewayId::from("lo"),
acme: None,
},
);

View File

@@ -1,4 +1,4 @@
use std::collections::BTreeSet;
use std::collections::{BTreeMap, BTreeSet};
use std::sync::Arc;
use exver::{PreReleaseSegment, VersionRange};
@@ -39,7 +39,6 @@ impl VersionT for Version {
.flatten()
.find(|(_, i)| i["ipInfo"]["wanIp"].is_string())
.map(|(g, _)| g.clone());
let mut roots = BTreeSet::new();
for (_, package) in db["public"]["packageData"]
.as_object_mut()
.ok_or_else(|| {
@@ -60,10 +59,8 @@ impl VersionT for Version {
})?
.iter_mut()
{
if default_gateway.is_none() {
host["domains"] = json!({});
continue;
}
let mut public = BTreeMap::new();
let mut private = BTreeSet::new();
for (domain, info) in host["domains"]
.as_object_mut()
.ok_or_else(|| {
@@ -79,26 +76,25 @@ impl VersionT for Version {
let Some(info) = info.as_object_mut() else {
continue;
};
let root = domain.clone();
info.insert("root".into(), Value::String(Arc::new((&*root).to_owned())));
roots.insert(root);
if info["public"].as_bool().unwrap_or_default() && let Some(gateway) = &default_gateway {
info.insert(
"gateway".into(),
Value::String(Arc::new((&**gateway).to_owned())),
);
public.insert(domain.clone(), info.clone());
} else {
private.insert(domain.clone());
}
}
host["domains"] = json!({ "public": &public, "private": &private });
}
}
}
let network = db["public"]["serverInfo"]["network"]
.as_object_mut()
.ok_or_else(|| {
Error::new(
eyre!("expected public.serverInfo.network to be an object"),
ErrorKind::Database,
)
})?;
let network = &mut db["public"]["serverInfo"]["network"];
network["gateways"] = network["networkInterfaces"].clone();
if let Some(gateway) = default_gateway {
for root in roots {
network["domains"][&*root] = json!({ "gateway": gateway });
}
}
network["dns"] = json!({
"dhcp": [],
});
Ok(Value::Null)
}

3
debian/postinst vendored
View File

@@ -83,7 +83,6 @@ $SYSTEMCTL enable ssh.service
$SYSTEMCTL disable wpa_supplicant.service
$SYSTEMCTL mask systemd-networkd-wait-online.service # currently use `NetworkManager-wait-online.service`
$SYSTEMCTL disable docker.service
$SYSTEMCTL disable postgresql.service
$SYSTEMCTL disable tor.service
$SYSTEMCTL disable bluetooth.service
@@ -111,6 +110,8 @@ sed -i '/^\s*#\?\s*issue_discards\s*=\s*/c\issue_discards = 1' /etc/lvm/lvm.conf
sed -i '/\(^\|#\)\s*unqualified-search-registries\s*=\s*/c\unqualified-search-registries = ["docker.io"]' /etc/containers/registries.conf
sed -i 's/\(#\|\^\)\s*\([^=]\+\)=\(suspend\|hibernate\)\s*$/\2=ignore/g' /etc/systemd/logind.conf
sed -i '/\(^\|#\)MulticastDNS=/c\MulticastDNS=no' /etc/systemd/resolved.conf
sed -i '/\(^\|#\)DNS=/c\DNS=127.0.0.1:5355' /etc/systemd/resolved.conf
sed -i '/\(^\|#\)DNSStubListenerExtra=/c\DNSStubListenerExtra=0.0.0.0:53' /etc/systemd/resolved.conf
sed -i 's/\[Service\]/[Service]\nEnvironment=SYSTEMD_LOG_LEVEL=debug/' /lib/systemd/system/systemd-timesyncd.service
sed -i "s/\.debian\./\./g;s/#FallbackNTP=/FallbackNTP=/" /etc/systemd/timesyncd.conf
sed -i '/\(^\|#\)RootDistanceMaxSec=/c\RootDistanceMaxSec=10' /etc/systemd/timesyncd.conf

View File

@@ -161,12 +161,6 @@ if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
fi
curl -fsSL https://deb.torproject.org/torproject.org/A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89.asc > config/archives/tor.key
echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/tor.key.gpg] https://deb.torproject.org/torproject.org ${IB_SUITE} main" > config/archives/tor.list
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o config/archives/docker.key
echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/docker.key.gpg] https://download.docker.com/linux/debian ${IB_SUITE} stable" > config/archives/docker.list
# Dependencies
## Base dependencies

View File

@@ -0,0 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type DnsSettings = { dhcp: Array<string>; static: Array<string> | null }

View File

@@ -0,0 +1,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { PublicDomainConfig } from "./PublicDomainConfig"
export type Domains = {
public: { [key: string]: PublicDomainConfig }
private: Array<string>
}

View File

@@ -1,12 +1,12 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { BindInfo } from "./BindInfo"
import type { DomainConfig } from "./DomainConfig"
import type { Domains } from "./Domains"
import type { HostnameInfo } from "./HostnameInfo"
export type Host = {
bindings: { [key: number]: BindInfo }
onions: string[]
domains: { [key: string]: DomainConfig }
domains: Domains
/**
* COMPUTED: NetService::update
*/

View File

@@ -1,11 +0,0 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { AcmeProvider } from "./AcmeProvider"
export type HostAddress =
| { kind: "onion"; address: string }
| {
kind: "domain"
address: string
public: boolean
acme: AcmeProvider | null
}

View File

@@ -6,6 +6,8 @@ export type IpInfo = {
scopeId: number
deviceType: NetworkInterfaceType | null
subnets: string[]
lanIp: string[]
wanIp: string | null
ntpServers: string[]
dnsServers: string[]
}

View File

@@ -1,6 +1,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { AcmeProvider } from "./AcmeProvider"
import type { AcmeSettings } from "./AcmeSettings"
import type { DnsSettings } from "./DnsSettings"
import type { DomainSettings } from "./DomainSettings"
import type { GatewayId } from "./GatewayId"
import type { Host } from "./Host"
@@ -13,4 +14,5 @@ export type NetworkInfo = {
gateways: { [key: GatewayId]: NetworkInterfaceInfo }
acme: { [key: AcmeProvider]: AcmeSettings }
domains: { [key: string]: DomainSettings }
dns: DnsSettings
}

View File

@@ -1,8 +1,8 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { AcmeProvider } from "./AcmeProvider"
import type { GatewayId } from "./GatewayId"
export type DomainConfig = {
root: string
public: boolean
export type PublicDomainConfig = {
gateway: GatewayId
acme: AcmeProvider | null
}

View File

@@ -66,8 +66,9 @@ export { DepInfo } from "./DepInfo"
export { Description } from "./Description"
export { DestroySubcontainerFsParams } from "./DestroySubcontainerFsParams"
export { DeviceFilter } from "./DeviceFilter"
export { DomainConfig } from "./DomainConfig"
export { DnsSettings } from "./DnsSettings"
export { DomainSettings } from "./DomainSettings"
export { Domains } from "./Domains"
export { Duration } from "./Duration"
export { EchoParams } from "./EchoParams"
export { EditSignerParams } from "./EditSignerParams"
@@ -100,7 +101,6 @@ export { Governor } from "./Governor"
export { Guid } from "./Guid"
export { HardwareRequirements } from "./HardwareRequirements"
export { HealthCheckId } from "./HealthCheckId"
export { HostAddress } from "./HostAddress"
export { HostId } from "./HostId"
export { HostnameInfo } from "./HostnameInfo"
export { Hosts } from "./Hosts"
@@ -161,6 +161,7 @@ export { PathOrUrl } from "./PathOrUrl"
export { Percentage } from "./Percentage"
export { Progress } from "./Progress"
export { ProgressUnits } from "./ProgressUnits"
export { PublicDomainConfig } from "./PublicDomainConfig"
export { Public } from "./Public"
export { RecoverySource } from "./RecoverySource"
export { RegistryAsset } from "./RegistryAsset"

View File

@@ -1,3 +0,0 @@
# System Images
There once was a time that I served to provide backwards compatibility for service configuration and properties rendering. Utilizing me for this functionality has since been deprecated in favor of [JS procedures](https://start9.com/latest/developer-docs/specification/js-procedure). Now I simply serve to provide service backup functionality. This will soon be converted to using JS procedure scripts as well.

View File

@@ -1 +0,0 @@
/docker-images

View File

@@ -1 +0,0 @@
FROM tonistiigi/binfmt

View File

@@ -1,15 +0,0 @@
.DELETE_ON_ERROR:
all: docker-images/aarch64.tar docker-images/x86_64.tar
clean:
rm -rf docker-images
docker-images:
mkdir docker-images
docker-images/aarch64.tar: Dockerfile docker-images
docker buildx build --tag start9/x_system/binfmt --platform=linux/arm64 -o type=docker,dest=docker-images/aarch64.tar .
docker-images/x86_64.tar: Dockerfile docker-images
docker buildx build --tag start9/x_system/binfmt --platform=linux/amd64 -o type=docker,dest=docker-images/x86_64.tar .

View File

@@ -1 +0,0 @@
[{"Config":"3f7fd8db05afd7a5fa804ebfb0e40038068af52b45d8f9a04c1c9fa8d316e511.json","RepoTags":["multitest/binfmt:latest"],"Layers":["753827173463aa2b7471a5be8ac2323a1cf78ccedfdfcc7bb21831d9e0141732/layer.tar","e0747212aee318099c729893bb4cca2ff164f6bd154841d5777636f82b7f18d4/layer.tar"]},{"Config":"143329dfbcc6abb472070a2f0bae5cf9865f178e5391a15592e75b349c803c69.json","RepoTags":["multitest/binfmt:latest"],"Layers":["de6e72d83667be34f782cf1f0a376e0507d189872142e1934f14d3be4813715a/layer.tar","12670876b666af35f388c3d2160436257b390a163a4c7178863badd05394577a/layer.tar"]}]

View File

@@ -1,5 +0,0 @@
/target
**/*.rs.bk
.DS_Store
.vscode
/docker-images

File diff suppressed because it is too large Load Diff

View File

@@ -1,33 +0,0 @@
[package]
authors = ["Aiden McClelland <me@drbonez.dev>"]
edition = "2018"
name = "compat"
version = "0.1.0"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
anyhow = { version = "1.0.40", features = ["backtrace"] }
beau_collector = "0.2.1"
clap = "2.33.3"
dashmap = "5.3.2"
start-os = { path = "../../core/startos", default-features = false }
emver = { version = "0.1.7", git = "https://github.com/Start9Labs/emver-rs.git", features = [
"serde",
] }
failure = "0.1.8"
indexmap = { version = "1.6.2", features = ["serde"] }
imbl-value = "0.1.2"
itertools = "0.10.0"
lazy_static = "1.4"
linear-map = { version = "1.2", features = ["serde_impl"] }
log = "0.4.11"
nix = "0.25.0"
pest = "2.1"
pest_derive = "2.1"
rand = "0.8.5"
regex = "1.4.2"
rust-argon2 = "1.0.0"
serde = { version = "1.0.118", features = ["derive", "rc"] }
serde_json = "1.0.67"
serde_yaml = "0.8.17"

View File

@@ -1 +0,0 @@
FROM start9/compat

View File

@@ -1,24 +0,0 @@
COMPAT_SRC := $(shell find ./src) Cargo.toml Cargo.lock
.DELETE_ON_ERROR:
all: docker-images/aarch64.tar docker-images/x86_64.tar
clean:
cargo clean
rm -rf docker-images
docker-images:
mkdir docker-images
docker-images/aarch64.tar: Dockerfile docker-images
docker buildx build --build-arg ARCH=aarch64 --tag start9/x_system/compat --platform=linux/arm64 -o type=docker,dest=docker-images/aarch64.tar .
docker-images/x86_64.tar: Dockerfile docker-images
docker buildx build --build-arg ARCH=x86_64 --tag start9/x_system/compat --platform=linux/amd64 -o type=docker,dest=docker-images/x86_64.tar .
target/aarch64-unknown-linux-musl/release/compat: $(COMPAT_SRC) ../../core/Cargo.lock
ARCH=aarch64 ./build.sh
target/x86_64-unknown-linux-musl/release/compat: $(COMPAT_SRC) ../../core/Cargo.lock
ARCH=x86_64 ./build.sh

View File

@@ -1,27 +0,0 @@
#!/bin/bash
set -e
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
if [ "$0" != "./build.sh" ]; then
>&2 echo "Must be run from compat directory"
exit 1
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src messense/rust-musl-cross:${ARCH}-musl'
cd ../..
rust-musl-builder sh -c "(git config --global --add safe.directory '*'; cd system-images/compat && cargo build --release --target=${ARCH}-unknown-linux-musl --no-default-features)"
cd system-images/compat
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo

View File

@@ -1,79 +0,0 @@
use std::{path::Path, process::Stdio};
use startos::disk::main::DEFAULT_PASSWORD;
pub fn create_backup(
mountpoint: impl AsRef<Path>,
data_path: impl AsRef<Path>,
) -> Result<(), anyhow::Error> {
let mountpoint = std::fs::canonicalize(mountpoint)?;
let data_path = std::fs::canonicalize(data_path)?;
let ignore_path = data_path.join(".backupignore");
let exclude = if ignore_path.is_file() {
std::fs::read_to_string(ignore_path)?
} else {
String::new()
};
let mut data_cmd = std::process::Command::new("duplicity");
for exclude in exclude.lines().map(|s| s.trim()).filter(|s| !s.is_empty()) {
if exclude.to_string().starts_with('!') {
data_cmd
.arg(format!(
"--include={}",
data_path
.join(exclude.to_string().trim_start_matches('!'))
.display()
))
.arg("--allow-source-mismatch");
} else {
data_cmd
.arg(format!(
"--exclude={}",
data_path.join(exclude.to_string()).display()
))
.arg("--allow-source-mismatch");
}
}
let data_output = data_cmd
.env("PASSPHRASE", DEFAULT_PASSWORD)
.arg(data_path)
.arg(format!("file://{}", mountpoint.display().to_string()))
.arg("--allow-source-mismatch")
.stderr(Stdio::piped())
.output()?;
if !data_output.status.success() {
return Err(anyhow::anyhow!(
"duplicity error: {}",
String::from_utf8(data_output.stderr).unwrap()
));
}
Ok(())
}
pub fn restore_backup(
mountpoint: impl AsRef<Path>,
data_path: impl AsRef<Path>,
) -> Result<(), anyhow::Error> {
let mountpoint = std::fs::canonicalize(mountpoint)?;
let data_path = std::fs::canonicalize(data_path)?;
let data_output = std::process::Command::new("duplicity")
.arg("--allow-source-mismatch")
.env("PASSPHRASE", DEFAULT_PASSWORD)
.arg("--force")
.arg(format!("file://{}", mountpoint.display().to_string()))
.arg(&data_path)
.stderr(Stdio::piped())
.output()?;
if !data_output.status.success() {
return Err(anyhow::anyhow!(
"duplicity error: {}",
String::from_utf8(data_output.stderr).unwrap()
));
}
Ok(())
}

View File

@@ -1,117 +0,0 @@
use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet, HashMap};
use std::path::Path;
use beau_collector::BeauCollector;
use linear_map::LinearMap;
use startos::config::action::SetResult;
use startos::config::{spec, Config};
use startos::s9pk::manifest::PackageId;
use startos::status::health_check::HealthCheckId;
pub mod rules;
use anyhow::anyhow;
pub use rules::{ConfigRuleEntry, ConfigRuleEntryWithSuggestions};
use self::rules::ConfigRule;
pub type DepInfo = HashMap<PackageId, DepRuleInfo>;
#[derive(Clone, Debug, serde::Deserialize)]
pub struct DepRuleInfo {
condition: ConfigRule,
health_checks: BTreeSet<HealthCheckId>,
}
pub fn validate_configuration(
name: &str,
config: Config,
rules_path: &Path,
config_path: &Path,
maybe_deps_path: Option<&str>,
) -> Result<SetResult, anyhow::Error> {
let rules: Vec<ConfigRuleEntry> = serde_yaml::from_reader(std::fs::File::open(rules_path)?)?;
let mut cfgs = LinearMap::new();
cfgs.insert(name, Cow::Borrowed(&config));
let mut depends_on = BTreeMap::new();
if let Some(deps_path) = maybe_deps_path.map(Path::new) {
if deps_path.exists() {
let deps: DepInfo = serde_yaml::from_reader(std::fs::File::open(deps_path)?)?;
// check if new config is set to depend on any optional dependencies
depends_on.extend(
deps.into_iter()
.filter(|(_, data)| (data.condition.compiled)(&config, &cfgs))
.map(|(pkg_id, data)| (pkg_id, data.health_checks)),
);
};
}
// check that all configuration rules
let rule_check = rules
.into_iter()
.map(|r| r.check(&config, &cfgs))
.bcollect::<Vec<_>>();
match rule_check {
Ok(_) => {
// create temp config file
serde_yaml::to_writer(
std::fs::create_file(config_path.with_extension("tmp"))?,
&config,
)?;
std::fs::rename(config_path.with_extension("tmp"), config_path)?;
// return set result
Ok(SetResult { depends_on })
}
Err(e) => Err(anyhow!("{}", e)),
}
}
pub fn validate_dependency_configuration(
name: &str,
config: &Option<Config>,
parent_name: &str,
parent_config: Config,
rules_path: &Path,
) -> Result<(), anyhow::Error> {
let rules: Vec<ConfigRuleEntry> = serde_yaml::from_reader(std::fs::File::open(rules_path)?)?;
let mut cfgs = LinearMap::new();
cfgs.insert(parent_name, Cow::Borrowed(&parent_config));
if let Some(config) = config {
cfgs.insert(name, Cow::Borrowed(&config))
} else {
cfgs.insert(name, Cow::Owned(imbl_value::InOMap::new()))
};
let rule_check = rules
.into_iter()
.map(|r| r.check(&parent_config, &cfgs))
.bcollect::<Vec<_>>();
match rule_check {
Ok(_) => Ok(()),
Err(e) => Err(anyhow!("{}", e)),
}
}
pub fn apply_dependency_configuration(
package_id: &str,
config: Option<Config>,
dependency_id: &str,
mut dep_config: Config,
rules_path: &Path,
) -> Result<Config, anyhow::Error> {
let rules: Vec<ConfigRuleEntryWithSuggestions> =
serde_yaml::from_reader(std::fs::File::open(rules_path)?)?;
let mut cfgs = LinearMap::new();
cfgs.insert(dependency_id, Cow::Owned(dep_config.clone()));
match config {
Some(config) => cfgs.insert(package_id, Cow::Owned(config.clone())),
None => cfgs.insert(package_id, Cow::Owned(imbl_value::InOMap::new())),
};
let rule_check = rules
.into_iter()
.map(|r| r.apply(dependency_id, &mut dep_config, &mut cfgs))
.bcollect::<Vec<_>>();
match rule_check {
Ok(_) => Ok(dep_config),
Err(e) => Err(anyhow!("{}", e)),
}
}

View File

@@ -1,76 +0,0 @@
num = @{ int ~ ("." ~ ASCII_DIGIT*)? ~ (^"e" ~ int)? }
int = @{ ("+" | "-")? ~ ASCII_DIGIT+ }
raw_string = @{ (!("\\" | "\"") ~ ANY)+ }
predefined = @{ "n" | "r" | "t" | "\\" | "0" | "\"" | "'" }
escape = @{ "\\" ~ predefined }
str = @{ "\"" ~ (raw_string | escape)* ~ "\"" }
ident_char = @{ ASCII_ALPHANUMERIC | "-" }
sub_ident = _{ sub_ident_regular | sub_ident_index | sub_ident_any | sub_ident_all | sub_ident_fn }
sub_ident_regular = { sub_ident_regular_base | sub_ident_regular_expr }
sub_ident_regular_base = @{ ASCII_ALPHA ~ ident_char* }
sub_ident_regular_expr = ${ "[" ~ str_expr ~ "]" }
sub_ident_index = { sub_ident_index_base | sub_ident_index_expr }
sub_ident_index_base = @{ ASCII_DIGIT+ }
sub_ident_index_expr = ${ "[" ~ num_expr ~ "]" }
sub_ident_any = @{ "*" }
sub_ident_all = @{ "&" }
sub_ident_fn = ${ "[" ~ list_access_function ~ "]"}
list_access_function = _{ list_access_function_first | list_access_function_last | list_access_function_any | list_access_function_all }
list_access_function_first = !{ "first" ~ "(" ~ sub_ident_regular ~ "=>" ~ bool_expr ~ ")" }
list_access_function_last = !{ "last" ~ "(" ~ sub_ident_regular ~ "=>" ~ bool_expr ~ ")" }
list_access_function_any = !{ "any" ~ "(" ~ sub_ident_regular ~ "=>" ~ bool_expr ~ ")" }
list_access_function_all = !{ "all" ~ "(" ~ sub_ident_regular ~ "=>" ~ bool_expr ~ ")" }
app_id = ${ "[" ~ sub_ident_regular ~ "]" }
ident = _{ (app_id ~ ".")? ~ sub_ident_regular ~ ("." ~ sub_ident)* }
bool_var = ${ ident ~ "?" }
num_var = ${ "#" ~ ident }
str_var = ${ "'" ~ ident }
any_var = ${ ident }
bool_op = _{ and | or | xor }
and = { "AND" }
or = { "OR" }
xor = { "XOR" }
num_cmp_op = _{ lt | lte | eq | neq | gt | gte }
str_cmp_op = _{ lt | lte | eq | neq | gt | gte }
lt = { "<" }
lte = { "<=" }
eq = { "=" }
neq = { "!=" }
gt = { ">" }
gte = { ">=" }
num_op = _{ add | sub | mul | div | pow }
str_op = _{ add }
add = { "+" }
sub = { "-" }
mul = { "*" }
div = { "/" }
pow = { "^" }
num_expr = !{ num_term ~ (num_op ~ num_term)* }
num_term = _{ num | num_var | "(" ~ num_expr ~ ")" }
str_expr = !{ str_term ~ (str_op ~ str_term)* }
str_term = _{ str | str_var | "(" ~ str_expr ~ ")" }
num_cmp_expr = { num_expr ~ num_cmp_op ~ num_expr }
str_cmp_expr = { str_expr ~ str_cmp_op ~ str_expr }
bool_expr = !{ bool_term ~ (bool_op ~ bool_term)* }
inv_bool_expr = { "!(" ~ bool_expr ~ ")" }
bool_term = _{ bool_var | "(" ~ bool_expr ~ ")" | inv_bool_expr | num_cmp_expr | str_cmp_expr }
val_expr = _{ any_var | str_expr | num_expr | bool_expr }
rule = _{ SOI ~ bool_expr ~ EOI }
reference = _{ SOI ~ any_var ~ EOI }
value = _{ SOI ~ val_expr ~ EOI }
del_action = _{ SOI ~ "FROM" ~ any_var ~ "AS" ~ sub_ident_regular ~ "WHERE" ~ bool_expr ~ EOI }
obj_key = _{ SOI ~ sub_ident_regular ~ EOI }
WHITESPACE = _{ " " | "\t" }

File diff suppressed because it is too large Load Diff

View File

@@ -1,337 +0,0 @@
use std::{
env,
fs::File,
io::{stdin, stdout},
path::Path,
};
#[macro_use]
extern crate failure;
extern crate pest;
#[macro_use]
extern crate pest_derive;
mod backup;
mod config;
use anyhow::anyhow;
use backup::{create_backup, restore_backup};
use clap::{App, Arg, SubCommand};
use config::{
apply_dependency_configuration, validate_configuration, validate_dependency_configuration,
};
use serde_json::json;
use startos::config::action::ConfigRes;
const PROPERTIES_FALLBACK_MESSAGE: &str =
"Could not find properties. The service might still be starting";
pub enum CompatRes {
SetResult,
ConfigRes,
}
fn main() {
match inner_main() {
Ok(a) => a,
Err(e) => {
eprintln!("{}", e);
log::debug!("{:?}", e.backtrace());
drop(e);
std::process::exit(1)
}
}
}
fn inner_main() -> Result<(), anyhow::Error> {
let app = App::new("compat")
.subcommand(
SubCommand::with_name("config")
.subcommand(
SubCommand::with_name("get")
.arg(
Arg::with_name("mountpoint")
.help("Path to the data mountpoint")
.required(true),
)
.arg(
Arg::with_name("spec")
.help("The path to the config spec in the container")
.required(true),
),
)
.subcommand(
SubCommand::with_name("set")
.arg(
Arg::with_name("package_id")
.help("The `id` field from the manifest file")
.required(true),
)
.arg(
Arg::with_name("mountpoint")
.help("Path to the data mountpoint")
.required(true),
)
.arg(
Arg::with_name("assets")
.help("Path to the rules file")
.required(true),
)
.arg(
Arg::with_name("dependencies")
.help("Path to rules for optional dependency config")
),
),
)
.subcommand(
SubCommand::with_name("dependency")
.subcommand(
SubCommand::with_name("check")
.arg(
Arg::with_name("dependent_package_id")
.help("Package identifier of this package (the child/depdendent)")
.required(true),
)
.arg(
Arg::with_name("dependency_package_id")
.help("Identifier of the dependency")
.required(true),
)
.arg(
Arg::with_name("mountpoint")
.help(" ountpoint for the dependent's config file")
.required(true),
)
.arg(
Arg::with_name("assets")
.help("Path to the dependency's config rules file")
.required(true),
),
)
.subcommand(
SubCommand::with_name("auto-configure")
.arg(
Arg::with_name("dependent_package_id")
.help("Package identifier of this package (the child/depdendent)")
.required(true),
)
.arg(
Arg::with_name("dependency_package_id")
.help("Package identifier of the parent/dependency")
.required(true),
)
.arg(
Arg::with_name("mountpoint")
.help("Mountpoint for the dependent's config file")
.required(true),
)
.arg(
Arg::with_name("assets")
.help("Path to the dependency's config rules file")
.required(true),
),
),
)
.subcommand(
SubCommand::with_name("duplicity")
.subcommand(
SubCommand::with_name("create")
.arg(
Arg::with_name("mountpoint")
.help("The backups mount point")
.required(true),
)
.arg(
Arg::with_name("datapath")
.help("The path to the data to be backed up in the container")
.required(true),
),
)
.subcommand(
SubCommand::with_name("restore")
.arg(
Arg::with_name("mountpoint")
.help("The backups mount point")
.required(true),
)
.arg(
Arg::with_name("datapath")
.help("The path to the data to be restored to the container")
.required(true),
),
),
)
.subcommand(
SubCommand::with_name("properties").arg(
Arg::with_name("mountpoint")
.help("The data directory of the service to mount to.")
.required(true),
).arg(
Arg::with_name("fallbackMessage")
.help("The message to indicate that the startup is still working, or stats.yaml couldn't be found")
.required(false),
),
);
let matches = app.get_matches();
match matches.subcommand() {
("config", Some(sub_m)) => match sub_m.subcommand() {
("get", Some(sub_m)) => {
let cfg_path =
Path::new(sub_m.value_of("mountpoint").unwrap()).join("start9/config.yaml");
let cfg = if cfg_path.exists() {
Some(serde_yaml::from_reader(File::open(cfg_path).unwrap()).unwrap())
} else {
None
};
let spec_path = Path::new(sub_m.value_of("spec").unwrap());
let spec = serde_yaml::from_reader(File::open(spec_path).unwrap()).unwrap();
serde_yaml::to_writer(stdout(), &ConfigRes { config: cfg, spec })?;
Ok(())
}
("set", Some(sub_m)) => {
let config = serde_yaml::from_reader(stdin())?;
let cfg_path = Path::new(sub_m.value_of("mountpoint").unwrap()).join("start9");
if !cfg_path.exists() {
std::fs::create_dir_all(&cfg_path).unwrap();
};
let rules_path = Path::new(sub_m.value_of("assets").unwrap());
let name = sub_m.value_of("package_id").unwrap();
let deps_path = sub_m.value_of("dependencies");
match validate_configuration(
&name,
config,
rules_path,
&cfg_path.join("config.yaml"),
deps_path,
) {
Ok(a) => {
serde_yaml::to_writer(stdout(), &a)?;
Ok(())
}
Err(e) => Err(e),
}
}
(subcmd, _) => {
panic!("Unknown subcommand: {}", subcmd);
}
},
("dependency", Some(sub_m)) => match sub_m.subcommand() {
("check", Some(sub_m)) => {
let parent_config = serde_yaml::from_reader(stdin())?;
let cfg_path =
Path::new(sub_m.value_of("mountpoint").unwrap()).join("start9/config.yaml");
let config = if cfg_path.exists() {
Some(serde_yaml::from_reader(File::open(cfg_path).unwrap()).unwrap())
} else {
None
};
let rules_path = Path::new(sub_m.value_of("assets").unwrap());
let name = sub_m.value_of("dependent_package_id").unwrap();
let parent_name = sub_m.value_of("dependency_package_id").unwrap();
match validate_dependency_configuration(
name,
&config,
parent_name,
parent_config,
rules_path,
) {
Ok(a) => {
serde_yaml::to_writer(stdout(), &a)?;
Ok(())
}
Err(e) => {
// error string is configs rules failure description
Err(e)
}
}
}
("auto-configure", Some(sub_m)) => {
let dep_config = serde_yaml::from_reader(stdin())?;
let cfg_path =
Path::new(sub_m.value_of("mountpoint").unwrap()).join("start9/config.yaml");
let config = if cfg_path.exists() {
Some(serde_yaml::from_reader(File::open(cfg_path).unwrap()).unwrap())
} else {
None
};
let rules_path = Path::new(sub_m.value_of("assets").unwrap());
let package_id = sub_m.value_of("dependent_package_id").unwrap();
let dependency_id = sub_m.value_of("dependency_package_id").unwrap();
match apply_dependency_configuration(
package_id,
config,
dependency_id,
dep_config,
rules_path,
) {
Ok(a) => {
serde_yaml::to_writer(stdout(), &a)?;
Ok(())
}
Err(e) => Err(e),
}
}
(subcmd, _) => {
panic!("Unknown subcommand: {}", subcmd);
}
},
("duplicity", Some(sub_m)) => match sub_m.subcommand() {
("create", Some(sub_m)) => {
let res = create_backup(
sub_m.value_of("mountpoint").unwrap(),
sub_m.value_of("datapath").unwrap(),
);
match res {
Ok(r) => {
serde_yaml::to_writer(stdout(), &r)?;
Ok(())
}
Err(e) => Err(anyhow!("Could not create backup: {}", e)),
}
}
("restore", Some(sub_m)) => {
let res = restore_backup(
sub_m.value_of("mountpoint").unwrap(),
sub_m.value_of("datapath").unwrap(),
);
match res {
Ok(r) => {
serde_yaml::to_writer(stdout(), &r)?;
Ok(())
}
Err(e) => Err(anyhow!("Could not restore backup: {}", e)),
}
}
(subcmd, _) => {
panic!("Unknown subcommand: {}", subcmd);
}
},
("properties", Some(sub_m)) => {
let stats_path =
Path::new(sub_m.value_of("mountpoint").unwrap()).join("start9/stats.yaml");
let stats: serde_json::Value = if stats_path.exists() {
serde_yaml::from_reader(File::open(stats_path).unwrap()).unwrap()
} else {
let fallback_message: &str = sub_m
.value_of("fallbackMessage")
.unwrap_or_else(|| PROPERTIES_FALLBACK_MESSAGE);
json!({
"version": 2i64,
"data": {
"Not Ready": {
"type": "string",
"value": fallback_message,
"qr": false,
"copyable": false,
"masked": false,
"description":"Fallback Message When Properties could not be found"
}
}
})
};
serde_json::to_writer(stdout(), &stats)?;
Ok(())
}
(subcmd, _) => {
panic!("Unknown subcommand: {}", subcmd);
}
}
}

View File

@@ -1 +0,0 @@
/docker-images

View File

@@ -1,3 +0,0 @@
FROM alpine:latest
RUN apk update && apk add --no-cache yq jq curl bash

View File

@@ -1,15 +0,0 @@
.DELETE_ON_ERROR:
all: docker-images/aarch64.tar docker-images/x86_64.tar
clean:
rm -rf docker-images
docker-images:
mkdir docker-images
docker-images/aarch64.tar: Dockerfile docker-images
docker buildx build --tag start9/x_system/utils --platform=linux/arm64 -o type=docker,dest=docker-images/aarch64.tar .
docker-images/x86_64.tar: Dockerfile docker-images
docker buildx build --tag start9/x_system/utils --platform=linux/amd64 -o type=docker,dest=docker-images/x86_64.tar .