enabling support for wireguard and firewall (#2713)

* wip: enabling support for wireguard and firewall

* wip

* wip

* wip

* wip

* wip

* implement some things

* fix warning

* wip

* alpha.23

* misc fixes

* remove ufw since no longer required

* remove debug info

* add cli bindings

* debugging

* fixes

* individualized acme and privacy settings for domains and bindings

* sdk version bump

* migration

* misc fixes

* refactor Host::update

* debug info

* refactor webserver

* misc fixes

* misc fixes

* refactor port forwarding

* recheck interfaces every 5 min if no dbus event

* misc fixes and cleanup

* misc fixes
This commit is contained in:
Aiden McClelland
2025-01-09 16:34:34 -07:00
committed by GitHub
parent 45ca9405d3
commit 29e8210782
144 changed files with 4878 additions and 2398 deletions

View File

@@ -1,40 +0,0 @@
# Setting up clearnet for a service interface
NOTE: this guide is for HTTPS only! Other configurations may require a more bespoke setup depending on the service. Please consult the service documentation or the Start9 Community for help with non-HTTPS applications
## Initialize ACME certificate generation
The following command will register your device with an ACME certificate provider, such as letsencrypt
This only needs to be done once.
```
start-cli net acme init --provider=letsencrypt --contact="mailto:me@drbonez.dev"
```
- `provider` can be `letsencrypt`, `letsencrypt-staging` (useful if you're doing a lot of testing and want to avoid being rate limited), or the url of any provider that supports the [RFC8555](https://datatracker.ietf.org/doc/html/rfc8555) ACME api
- `contact` can be any valid contact url, typically `mailto:` urls. it can be specified multiple times to set multiple contacts
## Whitelist a domain for ACME certificate acquisition
The following command will tell the OS to use ACME certificates instead of system signed ones for the provided url. In this example, `testing.drbonez.dev`
This must be done for every domain you wish to host on clearnet.
```
start-cli net acme domain add "testing.drbonez.dev"
```
## Forward clearnet port
Go into your router settings, and map port 443 on your router to port 5443 on your start-os device. This one port should cover most use cases
## Add domain to service host
The following command will tell the OS to route https requests from the WAN to the provided hostname to the specified service. In this example, we are adding `testing.drbonez.dev` to the host `ui-multi` on the package `hello-world`. To see a list of available host IDs for a given package, run `start-cli package host <PACKAGE> list`
This must be done for every domain you wish to host on clearnet.
```
start-cli package host hello-world address ui-multi add testing.drbonez.dev
```

View File

@@ -26,6 +26,7 @@ GZIP_BIN := $(shell which pigz || which gzip)
TAR_BIN := $(shell which gtar || which tar) TAR_BIN := $(shell which gtar || which tar)
COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar container-runtime/rootfs.$(ARCH).squashfs COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar container-runtime/rootfs.$(ARCH).squashfs
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; fi') $(PLATFORM_FILE) ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; fi') $(PLATFORM_FILE)
REBUILD_TYPES = 1
ifeq ($(REMOTE),) ifeq ($(REMOTE),)
mkdir = mkdir -p $1 mkdir = mkdir -p $1
@@ -226,7 +227,7 @@ container-runtime/node_modules/.package-lock.json: container-runtime/package.jso
npm --prefix container-runtime ci npm --prefix container-runtime ci
touch container-runtime/node_modules/.package-lock.json touch container-runtime/node_modules/.package-lock.json
sdk/base/lib/osBindings/index.ts: core/startos/bindings/index.ts sdk/base/lib/osBindings/index.ts: $(shell if [ "$(REBUILD_TYPES)" -ne 0 ]; then echo core/startos/bindings/index.ts; fi)
mkdir -p sdk/base/lib/osBindings mkdir -p sdk/base/lib/osBindings
rsync -ac --delete core/startos/bindings/ sdk/base/lib/osBindings/ rsync -ac --delete core/startos/bindings/ sdk/base/lib/osBindings/
touch sdk/base/lib/osBindings/index.ts touch sdk/base/lib/osBindings/index.ts

View File

@@ -11,6 +11,7 @@ cryptsetup
curl curl
dnsutils dnsutils
dmidecode dmidecode
dnsutils
dosfstools dosfstools
e2fsprogs e2fsprogs
ecryptfs-utils ecryptfs-utils
@@ -57,4 +58,5 @@ systemd-timesyncd
tor tor
util-linux util-linux
vim vim
wireguard-tools
wireless-tools wireless-tools

View File

@@ -1 +0,0 @@
start-cli net dhcp update $interface

View File

@@ -4,7 +4,7 @@ set -e
# install dependencies # install dependencies
/usr/bin/apt update /usr/bin/apt update
/usr/bin/apt install --no-install-recommends -y xserver-xorg x11-xserver-utils xinit firefox-esr matchbox-window-manager libnss3-tools /usr/bin/apt install --no-install-recommends -y xserver-xorg x11-xserver-utils xinit firefox-esr matchbox-window-manager libnss3-tools p11-kit-modules
#Change a default preference set by stock debian firefox-esr #Change a default preference set by stock debian firefox-esr
sed -i 's|^pref("extensions.update.enabled", true);$|pref("extensions.update.enabled", false);|' /etc/firefox-esr/firefox-esr.js sed -i 's|^pref("extensions.update.enabled", true);$|pref("extensions.update.enabled", false);|' /etc/firefox-esr/firefox-esr.js
@@ -83,6 +83,8 @@ user_pref("toolkit.telemetry.updatePing.enabled", false);
user_pref("toolkit.telemetry.cachedClientID", ""); user_pref("toolkit.telemetry.cachedClientID", "");
EOF EOF
ln -sf /usr/lib/$(uname -m)-linux-gnu/pkcs11/p11-kit-trust.so /usr/lib/firefox-esr/libnssckbi.so
# create kiosk script # create kiosk script
cat > /home/kiosk/kiosk.sh << 'EOF' cat > /home/kiosk/kiosk.sh << 'EOF'
#!/bin/sh #!/bin/sh

View File

@@ -216,12 +216,6 @@ export function makeEffects(context: EffectContext): Effects {
}) as ReturnType<T.Effects["getServiceInterface"]> }) as ReturnType<T.Effects["getServiceInterface"]>
}, },
getPrimaryUrl(...[options]: Parameters<T.Effects["getPrimaryUrl"]>) {
return rpcRound("get-primary-url", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getPrimaryUrl"]>
},
getServicePortForward( getServicePortForward(
...[options]: Parameters<T.Effects["getServicePortForward"]> ...[options]: Parameters<T.Effects["getServicePortForward"]>
) { ) {

View File

@@ -212,16 +212,22 @@ export class RpcListener {
s.on("data", (a) => s.on("data", (a) =>
Promise.resolve(a) Promise.resolve(a)
.then((b) => b.toString()) .then((b) => b.toString())
.then(logData("dataIn")) .then((buf) => {
.then(jsonParse) for (let s of buf.split("\n")) {
.then(captureId) if (s)
.then((x) => this.dealWithInput(x)) Promise.resolve(s)
.catch(mapError) .then(logData("dataIn"))
.then(logData("response")) .then(jsonParse)
.then(writeDataToSocket) .then(captureId)
.catch((e) => { .then((x) => this.dealWithInput(x))
console.error(`Major error in socket handling: ${e}`) .catch(mapError)
console.debug(`Data in: ${a.toString()}`) .then(logData("response"))
.then(writeDataToSocket)
.catch((e) => {
console.error(`Major error in socket handling: ${e}`)
console.debug(`Data in: ${a.toString()}`)
})
}
}), }),
) )
}) })
@@ -390,7 +396,7 @@ export class RpcListener {
.defaultToLazy(() => { .defaultToLazy(() => {
console.warn( console.warn(
`Coudln't parse the following input ${JSON.stringify(input)}`, `Couldn't parse the following input ${JSON.stringify(input)}`,
) )
return { return {
jsonrpc, jsonrpc,

View File

@@ -425,7 +425,6 @@ export class SystemForEmbassy implements System {
name: interfaceValue.name, name: interfaceValue.name,
id: `${id}-${internal}`, id: `${id}-${internal}`,
description: interfaceValue.description, description: interfaceValue.description,
hasPrimary: false,
type: type:
interfaceValue.ui && interfaceValue.ui &&
(origin.scheme === "http" || origin.sslScheme === "https") (origin.scheme === "http" || origin.sslScheme === "https")

View File

@@ -74,8 +74,8 @@ export class SystemForStartOs implements System {
async exit(): Promise<void> {} async exit(): Promise<void> {}
async start(effects: Effects): Promise<void> { async start(effects: Effects): Promise<void> {
if (this.runningMain) return
effects.constRetry = utils.once(() => effects.restart()) effects.constRetry = utils.once(() => effects.restart())
if (this.runningMain) await this.stop()
let mainOnTerm: () => Promise<void> | undefined let mainOnTerm: () => Promise<void> | undefined
const started = async (onTerm: () => Promise<void>) => { const started = async (onTerm: () => Promise<void>) => {
await effects.setMainStatus({ status: "running" }) await effects.setMainStatus({ status: "running" })
@@ -98,8 +98,11 @@ export class SystemForStartOs implements System {
async stop(): Promise<void> { async stop(): Promise<void> {
if (this.runningMain) { if (this.runningMain) {
await this.runningMain.stop() try {
this.runningMain = undefined await this.runningMain.stop()
} finally {
this.runningMain = undefined
}
} }
} }
} }

1149
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -40,3 +40,4 @@ tokio = { version = "1", features = ["full"] }
torut = { git = "https://github.com/Start9Labs/torut.git", branch = "update/dependencies" } torut = { git = "https://github.com/Start9Labs/torut.git", branch = "update/dependencies" }
tracing = "0.1.39" tracing = "0.1.39"
yasi = "0.1.5" yasi = "0.1.5"
zbus = "5"

View File

@@ -90,6 +90,7 @@ pub enum ErrorKind {
Lxc = 72, Lxc = 72,
Cancelled = 73, Cancelled = 73,
Git = 74, Git = 74,
DBus = 75,
} }
impl ErrorKind { impl ErrorKind {
pub fn as_str(&self) -> &'static str { pub fn as_str(&self) -> &'static str {
@@ -169,6 +170,7 @@ impl ErrorKind {
Lxc => "LXC Error", Lxc => "LXC Error",
Cancelled => "Cancelled", Cancelled => "Cancelled",
Git => "Git Error", Git => "Git Error",
DBus => "DBus Error",
} }
} }
} }
@@ -327,6 +329,11 @@ impl From<torut::onion::OnionAddressParseError> for Error {
Error::new(e, ErrorKind::Tor) Error::new(e, ErrorKind::Tor)
} }
} }
impl From<zbus::Error> for Error {
fn from(e: zbus::Error) -> Self {
Error::new(e, ErrorKind::DBus)
}
}
impl From<rustls::Error> for Error { impl From<rustls::Error> for Error {
fn from(e: rustls::Error) -> Self { fn from(e: rustls::Error) -> Self {
Error::new(e, ErrorKind::OpenSsl) Error::new(e, ErrorKind::OpenSsl)

View File

@@ -14,7 +14,7 @@ keywords = [
name = "start-os" name = "start-os"
readme = "README.md" readme = "README.md"
repository = "https://github.com/Start9Labs/start-os" repository = "https://github.com/Start9Labs/start-os"
version = "0.3.6-alpha.9" version = "0.3.6-alpha.10"
license = "MIT" license = "MIT"
[lib] [lib]
@@ -50,7 +50,7 @@ test = []
[dependencies] [dependencies]
aes = { version = "0.7.5", features = ["ctr"] } aes = { version = "0.7.5", features = ["ctr"] }
async-acme = { version = "0.5.0", git = "https://github.com/dr-bonez/async-acme.git", features = [ async-acme = { version = "0.6.0", git = "https://github.com/dr-bonez/async-acme.git", features = [
"use_rustls", "use_rustls",
"use_tokio", "use_tokio",
] } ] }
@@ -62,7 +62,6 @@ async-compression = { version = "0.4.4", features = [
async-stream = "0.3.5" async-stream = "0.3.5"
async-trait = "0.1.74" async-trait = "0.1.74"
axum = { version = "0.7.3", features = ["ws"] } axum = { version = "0.7.3", features = ["ws"] }
axum-server = "0.6.0"
barrage = "0.2.3" barrage = "0.2.3"
backhand = "0.18.0" backhand = "0.18.0"
base32 = "0.5.0" base32 = "0.5.0"
@@ -76,6 +75,7 @@ clap = "4.4.12"
color-eyre = "0.6.2" color-eyre = "0.6.2"
console = "0.15.7" console = "0.15.7"
console-subscriber = { version = "0.3.0", optional = true } console-subscriber = { version = "0.3.0", optional = true }
const_format = "0.2.34"
cookie = "0.18.0" cookie = "0.18.0"
cookie_store = "0.21.0" cookie_store = "0.21.0"
der = { version = "0.7.9", features = ["derive", "pem"] } der = { version = "0.7.9", features = ["derive", "pem"] }
@@ -102,11 +102,15 @@ hex = "0.4.3"
hmac = "0.12.1" hmac = "0.12.1"
http = "1.0.0" http = "1.0.0"
http-body-util = "0.1" http-body-util = "0.1"
hyper-util = { version = "0.1.5", features = [ hyper = { version = "1.5", features = ["server", "http1", "http2"] }
"tokio", hyper-util = { version = "0.1.10", features = [
"server",
"server-auto",
"server-graceful",
"service", "service",
"http1", "http1",
"http2", "http2",
"tokio",
] } ] }
id-pool = { version = "0.2.2", default-features = false, features = [ id-pool = { version = "0.2.2", default-features = false, features = [
"serde", "serde",
@@ -131,12 +135,14 @@ lazy_format = "2.0"
lazy_static = "1.4.0" lazy_static = "1.4.0"
libc = "0.2.149" libc = "0.2.149"
log = "0.4.20" log = "0.4.20"
mio = "1"
mbrman = "0.5.2" mbrman = "0.5.2"
models = { version = "*", path = "../models" } models = { version = "*", path = "../models" }
new_mime_guess = "4" new_mime_guess = "4"
nix = { version = "0.29.0", features = [ nix = { version = "0.29.0", features = [
"fs", "fs",
"mount", "mount",
"net",
"process", "process",
"sched", "sched",
"signal", "signal",
@@ -216,6 +222,7 @@ unix-named-pipe = "0.2.0"
url = { version = "2.4.1", features = ["serde"] } url = { version = "2.4.1", features = ["serde"] }
urlencoding = "2.1.3" urlencoding = "2.1.3"
uuid = { version = "1.4.1", features = ["v4"] } uuid = { version = "1.4.1", features = ["v4"] }
zbus = "5.1.1"
zeroize = "1.6.0" zeroize = "1.6.0"
mail-send = { git = "https://github.com/dr-bonez/mail-send.git", branch = "main" } mail-send = { git = "https://github.com/dr-bonez/mail-send.git", branch = "main" }
rustls = "0.23.20" rustls = "0.23.20"

View File

@@ -1,4 +1,3 @@
use std::collections::BTreeMap;
use std::fmt; use std::fmt;
use clap::{CommandFactory, FromArgMatches, Parser}; use clap::{CommandFactory, FromArgMatches, Parser};

View File

@@ -187,9 +187,8 @@ pub fn check_password_against_db(db: &DatabaseModel, password: &str) -> Result<(
Ok(()) Ok(())
} }
#[derive(Deserialize, Serialize, Parser, TS)] #[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
#[ts(export)] #[ts(export)]
pub struct LoginParams { pub struct LoginParams {
password: Option<PasswordType>, password: Option<PasswordType>,

View File

@@ -109,9 +109,10 @@ pub async fn recover_full_embassy(
db.put(&ROOT, &Database::init(&os_backup.account)?).await?; db.put(&ROOT, &Database::init(&os_backup.account)?).await?;
drop(db); drop(db);
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?; let InitResult { net_ctrl } = init(&ctx.webserver, &ctx.config, init_phases).await?;
let rpc_ctx = RpcContext::init( let rpc_ctx = RpcContext::init(
&ctx.webserver,
&ctx.config, &ctx.config,
disk_guid.clone(), disk_guid.clone(),
Some(net_ctrl), Some(net_ctrl),

View File

@@ -4,7 +4,7 @@ use rpc_toolkit::CliApp;
use serde_json::Value; use serde_json::Value;
use crate::service::cli::{ContainerCliContext, ContainerClientConfig}; use crate::service::cli::{ContainerCliContext, ContainerClientConfig};
use crate::util::logger::EmbassyLogger; use crate::util::logger::LOGGER;
use crate::version::{Current, VersionT}; use crate::version::{Current, VersionT};
lazy_static::lazy_static! { lazy_static::lazy_static! {
@@ -12,7 +12,7 @@ lazy_static::lazy_static! {
} }
pub fn main(args: impl IntoIterator<Item = OsString>) { pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init(); LOGGER.enable();
if let Err(e) = CliApp::new( if let Err(e) = CliApp::new(
|cfg: ContainerClientConfig| Ok(ContainerCliContext::init(cfg)), |cfg: ContainerClientConfig| Ok(ContainerCliContext::init(cfg)),
crate::service::effects::handler(), crate::service::effects::handler(),

View File

@@ -1,20 +1,20 @@
use std::ffi::OsString; use std::ffi::OsString;
use clap::Parser; use clap::Parser;
use futures::FutureExt; use futures::{FutureExt};
use tokio::signal::unix::signal; use tokio::signal::unix::signal;
use tracing::instrument; use tracing::instrument;
use crate::net::web_server::WebServer; use crate::net::web_server::{Acceptor, WebServer};
use crate::prelude::*; use crate::prelude::*;
use crate::registry::context::{RegistryConfig, RegistryContext}; use crate::registry::context::{RegistryConfig, RegistryContext};
use crate::util::logger::EmbassyLogger; use crate::util::logger::LOGGER;
#[instrument(skip_all)] #[instrument(skip_all)]
async fn inner_main(config: &RegistryConfig) -> Result<(), Error> { async fn inner_main(config: &RegistryConfig) -> Result<(), Error> {
let server = async { let server = async {
let ctx = RegistryContext::init(config).await?; let ctx = RegistryContext::init(config).await?;
let mut server = WebServer::new(ctx.listen); let mut server = WebServer::new(Acceptor::bind([ctx.listen]).await?);
server.serve_registry(ctx.clone()); server.serve_registry(ctx.clone());
let mut shutdown_recv = ctx.shutdown.subscribe(); let mut shutdown_recv = ctx.shutdown.subscribe();
@@ -63,7 +63,7 @@ async fn inner_main(config: &RegistryConfig) -> Result<(), Error> {
} }
pub fn main(args: impl IntoIterator<Item = OsString>) { pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init(); LOGGER.enable();
let config = RegistryConfig::parse_from(args).load().unwrap(); let config = RegistryConfig::parse_from(args).load().unwrap();

View File

@@ -5,7 +5,7 @@ use serde_json::Value;
use crate::context::config::ClientConfig; use crate::context::config::ClientConfig;
use crate::context::CliContext; use crate::context::CliContext;
use crate::util::logger::EmbassyLogger; use crate::util::logger::LOGGER;
use crate::version::{Current, VersionT}; use crate::version::{Current, VersionT};
lazy_static::lazy_static! { lazy_static::lazy_static! {
@@ -13,7 +13,8 @@ lazy_static::lazy_static! {
} }
pub fn main(args: impl IntoIterator<Item = OsString>) { pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init(); LOGGER.enable();
if let Err(e) = CliApp::new( if let Err(e) = CliApp::new(
|cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?), |cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?),
crate::expanded_api(), crate::expanded_api(),

View File

@@ -1,3 +1,4 @@
use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use tokio::process::Command; use tokio::process::Command;
@@ -11,16 +12,16 @@ use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::REPAIR_DISK_PATH; use crate::disk::REPAIR_DISK_PATH;
use crate::firmware::{check_for_firmware_update, update_firmware}; use crate::firmware::{check_for_firmware_update, update_firmware};
use crate::init::{InitPhases, InitResult, STANDBY_MODE_PATH}; use crate::init::{InitPhases, InitResult, STANDBY_MODE_PATH};
use crate::net::web_server::WebServer; use crate::net::web_server::{UpgradableListener, WebServer};
use crate::prelude::*; use crate::prelude::*;
use crate::progress::FullProgressTracker; use crate::progress::FullProgressTracker;
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::util::Invoke; use crate::util::Invoke;
use crate::PLATFORM; use crate::{DATA_DIR, PLATFORM};
#[instrument(skip_all)] #[instrument(skip_all)]
async fn setup_or_init( async fn setup_or_init(
server: &mut WebServer, server: &mut WebServer<UpgradableListener>,
config: &ServerConfig, config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> { ) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if let Some(firmware) = check_for_firmware_update() if let Some(firmware) = check_for_firmware_update()
@@ -111,7 +112,7 @@ async fn setup_or_init(
.await .await
.is_err() .is_err()
{ {
let ctx = SetupContext::init(config)?; let ctx = SetupContext::init(server, config)?;
server.serve_setup(ctx.clone()); server.serve_setup(ctx.clone());
@@ -156,7 +157,7 @@ async fn setup_or_init(
let disk_guid = Arc::new(String::from(guid_string.trim())); let disk_guid = Arc::new(String::from(guid_string.trim()));
let requires_reboot = crate::disk::main::import( let requires_reboot = crate::disk::main::import(
&**disk_guid, &**disk_guid,
config.datadir(), DATA_DIR,
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() { if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive RepairStrategy::Aggressive
} else { } else {
@@ -178,18 +179,26 @@ async fn setup_or_init(
tracing::info!("Loaded Disk"); tracing::info!("Loaded Disk");
if requires_reboot.0 { if requires_reboot.0 {
tracing::info!("Rebooting...");
let mut reboot_phase = handle.add_phase("Rebooting".into(), Some(1)); let mut reboot_phase = handle.add_phase("Rebooting".into(), Some(1));
reboot_phase.start(); reboot_phase.start();
return Ok(Err(Shutdown { return Ok(Err(Shutdown {
export_args: Some((disk_guid, config.datadir().to_owned())), export_args: Some((disk_guid, Path::new(DATA_DIR).to_owned())),
restart: true, restart: true,
})); }));
} }
let InitResult { net_ctrl } = crate::init::init(config, init_phases).await?; let InitResult { net_ctrl } =
crate::init::init(&server.acceptor_setter(), config, init_phases).await?;
let rpc_ctx = let rpc_ctx = RpcContext::init(
RpcContext::init(config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?; &server.acceptor_setter(),
config,
disk_guid,
Some(net_ctrl),
rpc_ctx_phases,
)
.await?;
Ok::<_, Error>(Ok((rpc_ctx, handle))) Ok::<_, Error>(Ok((rpc_ctx, handle)))
} }
@@ -203,7 +212,7 @@ async fn setup_or_init(
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn main( pub async fn main(
server: &mut WebServer, server: &mut WebServer<UpgradableListener>,
config: &ServerConfig, config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> { ) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() { if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {

View File

@@ -1,6 +1,6 @@
use std::cmp::max; use std::cmp::max;
use std::ffi::OsString; use std::ffi::OsString;
use std::net::{Ipv6Addr, SocketAddr}; use std::net::IpAddr;
use std::sync::Arc; use std::sync::Arc;
use clap::Parser; use clap::Parser;
@@ -12,21 +12,26 @@ use tracing::instrument;
use crate::context::config::ServerConfig; use crate::context::config::ServerConfig;
use crate::context::rpc::InitRpcContextPhases; use crate::context::rpc::InitRpcContextPhases;
use crate::context::{DiagnosticContext, InitContext, RpcContext}; use crate::context::{DiagnosticContext, InitContext, RpcContext};
use crate::net::web_server::WebServer; use crate::net::utils::ipv6_is_local;
use crate::net::web_server::{Acceptor, UpgradableListener, WebServer};
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::system::launch_metrics_task; use crate::system::launch_metrics_task;
use crate::util::logger::EmbassyLogger; use crate::util::io::append_file;
use crate::util::logger::LOGGER;
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip_all)] #[instrument(skip_all)]
async fn inner_main( async fn inner_main(
server: &mut WebServer, server: &mut WebServer<UpgradableListener>,
config: &ServerConfig, config: &ServerConfig,
) -> Result<Option<Shutdown>, Error> { ) -> Result<Option<Shutdown>, Error> {
let rpc_ctx = if !tokio::fs::metadata("/run/startos/initialized") let rpc_ctx = if !tokio::fs::metadata("/run/startos/initialized")
.await .await
.is_ok() .is_ok()
{ {
LOGGER.set_logfile(Some(
append_file("/run/startos/init.log").await?.into_std().await,
));
let (ctx, handle) = match super::start_init::main(server, &config).await? { let (ctx, handle) = match super::start_init::main(server, &config).await? {
Err(s) => return Ok(Some(s)), Err(s) => return Ok(Some(s)),
Ok(ctx) => ctx, Ok(ctx) => ctx,
@@ -34,6 +39,7 @@ async fn inner_main(
tokio::fs::write("/run/startos/initialized", "").await?; tokio::fs::write("/run/startos/initialized", "").await?;
server.serve_main(ctx.clone()); server.serve_main(ctx.clone());
LOGGER.set_logfile(None);
handle.complete(); handle.complete();
ctx ctx
@@ -44,6 +50,7 @@ async fn inner_main(
server.serve_init(init_ctx); server.serve_init(init_ctx);
let ctx = RpcContext::init( let ctx = RpcContext::init(
&server.acceptor_setter(),
config, config,
Arc::new( Arc::new(
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
@@ -131,7 +138,7 @@ async fn inner_main(
} }
pub fn main(args: impl IntoIterator<Item = OsString>) { pub fn main(args: impl IntoIterator<Item = OsString>) {
EmbassyLogger::init(); LOGGER.enable();
let config = ServerConfig::parse_from(args).load().unwrap(); let config = ServerConfig::parse_from(args).load().unwrap();
@@ -142,7 +149,18 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
.build() .build()
.expect("failed to initialize runtime"); .expect("failed to initialize runtime");
rt.block_on(async { rt.block_on(async {
let mut server = WebServer::new(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80)); let addrs = crate::net::utils::all_socket_addrs_for(80).await?;
let mut server = WebServer::new(
Acceptor::bind_upgradable(addrs.into_iter().filter(|addr| match addr.ip() {
IpAddr::V4(ip4) => {
ip4.is_loopback()
|| (ip4.is_private() && !ip4.octets().starts_with(&[10, 59])) // reserving 10.59 for public wireguard configurations
|| ip4.is_link_local()
}
IpAddr::V6(ip6) => ipv6_is_local(ip6),
}))
.await?,
);
match inner_main(&mut server, &config).await { match inner_main(&mut server, &config).await {
Ok(a) => { Ok(a) => {
server.shutdown().await; server.shutdown().await;

View File

@@ -13,6 +13,7 @@ use crate::disk::OsPartitionInfo;
use crate::init::init_postgres; use crate::init::init_postgres;
use crate::prelude::*; use crate::prelude::*;
use crate::util::serde::IoFormat; use crate::util::serde::IoFormat;
use crate::MAIN_DATA;
pub const DEVICE_CONFIG_PATH: &str = "/media/startos/config/config.yaml"; // "/media/startos/config/config.yaml"; pub const DEVICE_CONFIG_PATH: &str = "/media/startos/config/config.yaml"; // "/media/startos/config/config.yaml";
pub const CONFIG_PATH: &str = "/etc/startos/config.yaml"; pub const CONFIG_PATH: &str = "/etc/startos/config.yaml";
@@ -103,8 +104,6 @@ pub struct ServerConfig {
#[arg(skip)] #[arg(skip)]
pub os_partitions: Option<OsPartitionInfo>, pub os_partitions: Option<OsPartitionInfo>,
#[arg(long)] #[arg(long)]
pub bind_rpc: Option<SocketAddr>,
#[arg(long)]
pub tor_control: Option<SocketAddr>, pub tor_control: Option<SocketAddr>,
#[arg(long)] #[arg(long)]
pub tor_socks: Option<SocketAddr>, pub tor_socks: Option<SocketAddr>,
@@ -112,8 +111,6 @@ pub struct ServerConfig {
pub dns_bind: Option<Vec<SocketAddr>>, pub dns_bind: Option<Vec<SocketAddr>>,
#[arg(long)] #[arg(long)]
pub revision_cache_size: Option<usize>, pub revision_cache_size: Option<usize>,
#[arg(short, long)]
pub datadir: Option<PathBuf>,
#[arg(long)] #[arg(long)]
pub disable_encryption: Option<bool>, pub disable_encryption: Option<bool>,
#[arg(long)] #[arg(long)]
@@ -126,7 +123,6 @@ impl ContextConfig for ServerConfig {
fn merge_with(&mut self, other: Self) { fn merge_with(&mut self, other: Self) {
self.ethernet_interface = self.ethernet_interface.take().or(other.ethernet_interface); self.ethernet_interface = self.ethernet_interface.take().or(other.ethernet_interface);
self.os_partitions = self.os_partitions.take().or(other.os_partitions); self.os_partitions = self.os_partitions.take().or(other.os_partitions);
self.bind_rpc = self.bind_rpc.take().or(other.bind_rpc);
self.tor_control = self.tor_control.take().or(other.tor_control); self.tor_control = self.tor_control.take().or(other.tor_control);
self.tor_socks = self.tor_socks.take().or(other.tor_socks); self.tor_socks = self.tor_socks.take().or(other.tor_socks);
self.dns_bind = self.dns_bind.take().or(other.dns_bind); self.dns_bind = self.dns_bind.take().or(other.dns_bind);
@@ -134,7 +130,6 @@ impl ContextConfig for ServerConfig {
.revision_cache_size .revision_cache_size
.take() .take()
.or(other.revision_cache_size); .or(other.revision_cache_size);
self.datadir = self.datadir.take().or(other.datadir);
self.disable_encryption = self.disable_encryption.take().or(other.disable_encryption); self.disable_encryption = self.disable_encryption.take().or(other.disable_encryption);
self.multi_arch_s9pks = self.multi_arch_s9pks.take().or(other.multi_arch_s9pks); self.multi_arch_s9pks = self.multi_arch_s9pks.take().or(other.multi_arch_s9pks);
} }
@@ -148,13 +143,8 @@ impl ServerConfig {
self.load_path_rec(Some(CONFIG_PATH))?; self.load_path_rec(Some(CONFIG_PATH))?;
Ok(self) Ok(self)
} }
pub fn datadir(&self) -> &Path {
self.datadir
.as_deref()
.unwrap_or_else(|| Path::new("/embassy-data"))
}
pub async fn db(&self) -> Result<PatchDb, Error> { pub async fn db(&self) -> Result<PatchDb, Error> {
let db_path = self.datadir().join("main").join("embassy.db"); let db_path = Path::new(MAIN_DATA).join("embassy.db");
let db = PatchDb::open(&db_path) let db = PatchDb::open(&db_path)
.await .await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?; .with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
@@ -163,7 +153,7 @@ impl ServerConfig {
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn secret_store(&self) -> Result<PgPool, Error> { pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(self.datadir()).await?; init_postgres("/media/startos/data").await?;
let secret_store = let secret_store =
PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root")) PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root"))
.await?; .await?;

View File

@@ -1,5 +1,4 @@
use std::ops::Deref; use std::ops::Deref;
use std::path::PathBuf;
use std::sync::Arc; use std::sync::Arc;
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
@@ -13,7 +12,6 @@ use crate::shutdown::Shutdown;
use crate::Error; use crate::Error;
pub struct DiagnosticContextSeed { pub struct DiagnosticContextSeed {
pub datadir: PathBuf,
pub shutdown: Sender<Shutdown>, pub shutdown: Sender<Shutdown>,
pub error: Arc<RpcError>, pub error: Arc<RpcError>,
pub disk_guid: Option<Arc<String>>, pub disk_guid: Option<Arc<String>>,
@@ -25,7 +23,7 @@ pub struct DiagnosticContext(Arc<DiagnosticContextSeed>);
impl DiagnosticContext { impl DiagnosticContext {
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn init( pub fn init(
config: &ServerConfig, _config: &ServerConfig,
disk_guid: Option<Arc<String>>, disk_guid: Option<Arc<String>>,
error: Error, error: Error,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
@@ -35,7 +33,6 @@ impl DiagnosticContext {
let (shutdown, _) = tokio::sync::broadcast::channel(1); let (shutdown, _) = tokio::sync::broadcast::channel(1);
Ok(Self(Arc::new(DiagnosticContextSeed { Ok(Self(Arc::new(DiagnosticContextSeed {
datadir: config.datadir().to_owned(),
shutdown, shutdown,
disk_guid, disk_guid,
error: Arc::new(error.into()), error: Arc::new(error.into()),

View File

@@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet};
use std::future::Future; use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::ops::Deref; use std::ops::Deref;
use std::path::PathBuf;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
@@ -31,6 +30,7 @@ use crate::init::check_time_is_synchronized;
use crate::lxc::{ContainerId, LxcContainer, LxcManager}; use crate::lxc::{ContainerId, LxcContainer, LxcManager};
use crate::net::net_controller::{NetController, PreInitNetController}; use crate::net::net_controller::{NetController, PreInitNetController};
use crate::net::utils::{find_eth_iface, find_wifi_iface}; use crate::net::utils::{find_eth_iface, find_wifi_iface};
use crate::net::web_server::{UpgradableListener, WebServerAcceptorSetter};
use crate::net::wifi::WpaCli; use crate::net::wifi::WpaCli;
use crate::prelude::*; use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle}; use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle};
@@ -47,7 +47,6 @@ pub struct RpcContextSeed {
pub os_partitions: OsPartitionInfo, pub os_partitions: OsPartitionInfo,
pub wifi_interface: Option<String>, pub wifi_interface: Option<String>,
pub ethernet_interface: String, pub ethernet_interface: String,
pub datadir: PathBuf,
pub disk_guid: Arc<String>, pub disk_guid: Arc<String>,
pub ephemeral_sessions: SyncMutex<Sessions>, pub ephemeral_sessions: SyncMutex<Sessions>,
pub db: TypedPatchDb<Database>, pub db: TypedPatchDb<Database>,
@@ -117,6 +116,7 @@ pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext { impl RpcContext {
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn init( pub async fn init(
webserver: &WebServerAcceptorSetter<UpgradableListener>,
config: &ServerConfig, config: &ServerConfig,
disk_guid: Arc<String>, disk_guid: Arc<String>,
net_ctrl: Option<PreInitNetController>, net_ctrl: Option<PreInitNetController>,
@@ -149,7 +149,7 @@ impl RpcContext {
if let Some(net_ctrl) = net_ctrl { if let Some(net_ctrl) = net_ctrl {
net_ctrl net_ctrl
} else { } else {
PreInitNetController::init( let net_ctrl = PreInitNetController::init(
db.clone(), db.clone(),
config config
.tor_control .tor_control
@@ -158,7 +158,9 @@ impl RpcContext {
&account.hostname, &account.hostname,
account.tor_key.clone(), account.tor_key.clone(),
) )
.await? .await?;
webserver.try_upgrade(|a| net_ctrl.net_iface.upgrade_listener(a))?;
net_ctrl
}, },
config config
.dns_bind .dns_bind
@@ -210,7 +212,6 @@ impl RpcContext {
let seed = Arc::new(RpcContextSeed { let seed = Arc::new(RpcContextSeed {
is_closed: AtomicBool::new(false), is_closed: AtomicBool::new(false),
datadir: config.datadir().to_path_buf(),
os_partitions: config.os_partitions.clone().ok_or_else(|| { os_partitions: config.os_partitions.clone().ok_or_else(|| {
Error::new( Error::new(
eyre!("OS Partition Information Missing"), eyre!("OS Partition Information Missing"),

View File

@@ -1,5 +1,5 @@
use std::ops::Deref; use std::ops::Deref;
use std::path::PathBuf; use std::path::{Path};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
@@ -10,8 +10,6 @@ use josekit::jwk::Jwk;
use patch_db::PatchDb; use patch_db::PatchDb;
use rpc_toolkit::Context; use rpc_toolkit::Context;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use tokio::sync::broadcast::Sender; use tokio::sync::broadcast::Sender;
use tokio::sync::OnceCell; use tokio::sync::OnceCell;
use tracing::instrument; use tracing::instrument;
@@ -22,12 +20,13 @@ use crate::context::config::ServerConfig;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::disk::OsPartitionInfo; use crate::disk::OsPartitionInfo;
use crate::hostname::Hostname; use crate::hostname::Hostname;
use crate::init::init_postgres; use crate::net::web_server::{UpgradableListener, WebServer, WebServerAcceptorSetter};
use crate::prelude::*; use crate::prelude::*;
use crate::progress::FullProgressTracker; use crate::progress::FullProgressTracker;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations}; use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::setup::SetupProgress; use crate::setup::SetupProgress;
use crate::util::net::WebSocketExt; use crate::util::net::WebSocketExt;
use crate::MAIN_DATA;
lazy_static::lazy_static! { lazy_static::lazy_static! {
pub static ref CURRENT_SECRET: Jwk = Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).unwrap_or_else(|e| { pub static ref CURRENT_SECRET: Jwk = Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).unwrap_or_else(|e| {
@@ -61,6 +60,7 @@ impl TryFrom<&AccountInfo> for SetupResult {
} }
pub struct SetupContextSeed { pub struct SetupContextSeed {
pub webserver: WebServerAcceptorSetter<UpgradableListener>,
pub config: ServerConfig, pub config: ServerConfig,
pub os_partitions: OsPartitionInfo, pub os_partitions: OsPartitionInfo,
pub disable_encryption: bool, pub disable_encryption: bool,
@@ -68,7 +68,6 @@ pub struct SetupContextSeed {
pub task: OnceCell<NonDetachingJoinHandle<()>>, pub task: OnceCell<NonDetachingJoinHandle<()>>,
pub result: OnceCell<Result<(SetupResult, RpcContext), Error>>, pub result: OnceCell<Result<(SetupResult, RpcContext), Error>>,
pub shutdown: Sender<()>, pub shutdown: Sender<()>,
pub datadir: PathBuf,
pub rpc_continuations: RpcContinuations, pub rpc_continuations: RpcContinuations,
} }
@@ -76,10 +75,13 @@ pub struct SetupContextSeed {
pub struct SetupContext(Arc<SetupContextSeed>); pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext { impl SetupContext {
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn init(config: &ServerConfig) -> Result<Self, Error> { pub fn init(
webserver: &WebServer<UpgradableListener>,
config: &ServerConfig,
) -> Result<Self, Error> {
let (shutdown, _) = tokio::sync::broadcast::channel(1); let (shutdown, _) = tokio::sync::broadcast::channel(1);
let datadir = config.datadir().to_owned();
Ok(Self(Arc::new(SetupContextSeed { Ok(Self(Arc::new(SetupContextSeed {
webserver: webserver.acceptor_setter(),
config: config.clone(), config: config.clone(),
os_partitions: config.os_partitions.clone().ok_or_else(|| { os_partitions: config.os_partitions.clone().ok_or_else(|| {
Error::new( Error::new(
@@ -92,13 +94,12 @@ impl SetupContext {
task: OnceCell::new(), task: OnceCell::new(),
result: OnceCell::new(), result: OnceCell::new(),
shutdown, shutdown,
datadir,
rpc_continuations: RpcContinuations::new(), rpc_continuations: RpcContinuations::new(),
}))) })))
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn db(&self) -> Result<PatchDb, Error> { pub async fn db(&self) -> Result<PatchDb, Error> {
let db_path = self.datadir.join("main").join("embassy.db"); let db_path = Path::new(MAIN_DATA).join("embassy.db");
let db = PatchDb::open(&db_path) let db = PatchDb::open(&db_path)
.await .await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?; .with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;

View File

@@ -1,10 +1,10 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::net::{Ipv4Addr, Ipv6Addr}; use std::net::{IpAddr, Ipv4Addr};
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use exver::{Version, VersionRange}; use exver::{Version, VersionRange};
use imbl_value::InternedString; use imbl_value::InternedString;
use ipnet::{Ipv4Net, Ipv6Net}; use ipnet::IpNet;
use isocountry::CountryCode; use isocountry::CountryCode;
use itertools::Itertools; use itertools::Itertools;
use models::PackageId; use models::PackageId;
@@ -17,7 +17,7 @@ use ts_rs::TS;
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::db::model::package::AllPackageData; use crate::db::model::package::AllPackageData;
use crate::net::utils::{get_iface_ipv4_addr, get_iface_ipv6_addr}; use crate::net::acme::AcmeProvider;
use crate::prelude::*; use crate::prelude::*;
use crate::progress::FullProgress; use crate::progress::FullProgress;
use crate::system::SmtpValue; use crate::system::SmtpValue;
@@ -54,8 +54,8 @@ impl Public {
tor_address: format!("https://{}", account.tor_key.public().get_onion_address()) tor_address: format!("https://{}", account.tor_key.public().get_onion_address())
.parse() .parse()
.unwrap(), .unwrap(),
ip_info: BTreeMap::new(), network_interfaces: BTreeMap::new(),
acme: None, acme: BTreeMap::new(),
status_info: ServerStatus { status_info: ServerStatus {
backup_progress: None, backup_progress: None,
updated: false, updated: false,
@@ -130,8 +130,11 @@ pub struct ServerInfo {
/// for backwards compatibility /// for backwards compatibility
#[ts(type = "string")] #[ts(type = "string")]
pub tor_address: Url, pub tor_address: Url,
pub ip_info: BTreeMap<String, IpInfo>, #[ts(as = "BTreeMap::<String, NetworkInterfaceInfo>")]
pub acme: Option<AcmeSettings>, #[serde(default)]
pub network_interfaces: BTreeMap<InternedString, NetworkInterfaceInfo>,
#[serde(default)]
pub acme: BTreeMap<AcmeProvider, AcmeSettings>,
#[serde(default)] #[serde(default)]
pub status_info: ServerStatus, pub status_info: ServerStatus,
pub wifi: WifiInfo, pub wifi: WifiInfo,
@@ -151,43 +154,61 @@ pub struct ServerInfo {
pub devices: Vec<LshwDevice>, pub devices: Vec<LshwDevice>,
} }
#[derive(Debug, Deserialize, Serialize, HasModel, TS)] #[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[model = "Model<Self>"] #[model = "Model<Self>"]
#[ts(export)] #[ts(export)]
pub struct IpInfo { pub struct NetworkInterfaceInfo {
#[ts(type = "string | null")] pub public: Option<bool>,
pub ipv4_range: Option<Ipv4Net>, pub ip_info: Option<IpInfo>,
pub ipv4: Option<Ipv4Addr>,
#[ts(type = "string | null")]
pub ipv6_range: Option<Ipv6Net>,
pub ipv6: Option<Ipv6Addr>,
} }
impl IpInfo { impl NetworkInterfaceInfo {
pub async fn for_interface(iface: &str) -> Result<Self, Error> { pub fn public(&self) -> bool {
let (ipv4, ipv4_range) = get_iface_ipv4_addr(iface).await?.unzip(); self.public.unwrap_or_else(|| {
let (ipv6, ipv6_range) = get_iface_ipv6_addr(iface).await?.unzip(); !self.ip_info.as_ref().map_or(true, |ip_info| {
Ok(Self { ip_info.subnets.iter().all(|ipnet| {
ipv4_range, match ipnet.addr() {
ipv4, IpAddr::V4(ip4) => {
ipv6_range, ip4.is_loopback()
ipv6, || (ip4.is_private() && !ip4.octets().starts_with(&[10, 59])) // reserving 10.59 for public wireguard configurations
|| ip4.is_link_local()
}
IpAddr::V6(_) => true,
}
})
})
}) })
} }
} }
#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct IpInfo {
pub scope_id: u32,
pub device_type: Option<NetworkInterfaceType>,
#[ts(type = "string[]")]
pub subnets: BTreeSet<IpNet>,
pub wan_ip: Option<Ipv4Addr>,
#[ts(type = "string[]")]
pub ntp_servers: BTreeSet<InternedString>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "kebab-case")]
pub enum NetworkInterfaceType {
Ethernet,
Wireless,
Wireguard,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)] #[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[model = "Model<Self>"] #[model = "Model<Self>"]
#[ts(export)] #[ts(export)]
pub struct AcmeSettings { pub struct AcmeSettings {
#[ts(type = "string")]
pub provider: Url,
/// email addresses for letsencrypt
pub contact: Vec<String>, pub contact: Vec<String>,
#[ts(type = "string[]")]
/// domains to get letsencrypt certs for
pub domains: BTreeSet<InternedString>,
} }
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)] #[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]

View File

@@ -10,7 +10,7 @@ use crate::context::{CliContext, DiagnosticContext, RpcContext};
use crate::init::SYSTEM_REBUILD_PATH; use crate::init::SYSTEM_REBUILD_PATH;
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::util::io::delete_file; use crate::util::io::delete_file;
use crate::Error; use crate::{Error, DATA_DIR};
pub fn diagnostic<C: Context>() -> ParentHandler<C> { pub fn diagnostic<C: Context>() -> ParentHandler<C> {
ParentHandler::new() ParentHandler::new()
@@ -71,7 +71,7 @@ pub fn restart(ctx: DiagnosticContext) -> Result<(), Error> {
export_args: ctx export_args: ctx
.disk_guid .disk_guid
.clone() .clone()
.map(|guid| (guid, ctx.datadir.clone())), .map(|guid| (guid, Path::new(DATA_DIR).to_owned())),
restart: true, restart: true,
}) })
.expect("receiver dropped"); .expect("receiver dropped");

View File

@@ -7,7 +7,6 @@ use models::PackageId;
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
use tracing::instrument; use tracing::instrument;
use super::filesystem::ecryptfs::EcryptFS;
use super::guard::{GenericMountGuard, TmpMountGuard}; use super::guard::{GenericMountGuard, TmpMountGuard};
use crate::auth::check_password; use crate::auth::check_password;
use crate::backup::target::BackupInfo; use crate::backup::target::BackupInfo;

View File

@@ -1,7 +1,6 @@
use std::ffi::OsStr; use std::ffi::OsStr;
use std::fmt::{Display, Write}; use std::fmt::{Display, Write};
use std::path::Path; use std::path::Path;
use std::time::Duration;
use digest::generic_array::GenericArray; use digest::generic_array::GenericArray;
use digest::OutputSizeUser; use digest::OutputSizeUser;

View File

@@ -7,6 +7,7 @@ use std::time::{Duration, SystemTime};
use axum::extract::ws::{self}; use axum::extract::ws::{self};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use const_format::formatcp;
use futures::{StreamExt, TryStreamExt}; use futures::{StreamExt, TryStreamExt};
use itertools::Itertools; use itertools::Itertools;
use models::ResultExt; use models::ResultExt;
@@ -25,6 +26,7 @@ use crate::db::model::Database;
use crate::disk::mount::util::unmount; use crate::disk::mount::util::unmount;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH; use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::net::net_controller::PreInitNetController; use crate::net::net_controller::PreInitNetController;
use crate::net::web_server::{UpgradableListener, WebServerAcceptorSetter};
use crate::prelude::*; use crate::prelude::*;
use crate::progress::{ use crate::progress::{
FullProgress, FullProgressTracker, PhaseProgressTrackerHandle, PhasedProgressBar, FullProgress, FullProgressTracker, PhaseProgressTrackerHandle, PhasedProgressBar,
@@ -37,7 +39,7 @@ use crate::util::io::{create_file, IOHook};
use crate::util::lshw::lshw; use crate::util::lshw::lshw;
use crate::util::net::WebSocketExt; use crate::util::net::WebSocketExt;
use crate::util::{cpupower, Invoke}; use crate::util::{cpupower, Invoke};
use crate::Error; use crate::{Error, MAIN_DATA, PACKAGE_DATA};
pub const SYSTEM_REBUILD_PATH: &str = "/media/startos/config/system-rebuild"; pub const SYSTEM_REBUILD_PATH: &str = "/media/startos/config/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/media/startos/config/standby"; pub const STANDBY_MODE_PATH: &str = "/media/startos/config/standby";
@@ -274,6 +276,7 @@ pub async fn run_script<P: AsRef<Path>>(path: P, mut progress: PhaseProgressTrac
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn init( pub async fn init(
webserver: &WebServerAcceptorSetter<UpgradableListener>,
cfg: &ServerConfig, cfg: &ServerConfig,
InitPhases { InitPhases {
preinit, preinit,
@@ -317,7 +320,7 @@ pub async fn init(
})?; })?;
tokio::fs::set_permissions(LOCAL_AUTH_COOKIE_PATH, Permissions::from_mode(0o046)).await?; tokio::fs::set_permissions(LOCAL_AUTH_COOKIE_PATH, Permissions::from_mode(0o046)).await?;
Command::new("chown") Command::new("chown")
.arg("root:embassy") .arg("root:startos")
.arg(LOCAL_AUTH_COOKIE_PATH) .arg(LOCAL_AUTH_COOKIE_PATH)
.invoke(crate::ErrorKind::Filesystem) .invoke(crate::ErrorKind::Filesystem)
.await?; .await?;
@@ -356,10 +359,11 @@ pub async fn init(
account.tor_key, account.tor_key,
) )
.await?; .await?;
webserver.try_upgrade(|a| net_ctrl.net_iface.upgrade_listener(a))?;
start_net.complete(); start_net.complete();
mount_logs.start(); mount_logs.start();
let log_dir = cfg.datadir().join("main/logs"); let log_dir = Path::new(MAIN_DATA).join("logs");
if tokio::fs::metadata(&log_dir).await.is_err() { if tokio::fs::metadata(&log_dir).await.is_err() {
tokio::fs::create_dir_all(&log_dir).await?; tokio::fs::create_dir_all(&log_dir).await?;
} }
@@ -419,36 +423,28 @@ pub async fn init(
load_ca_cert.complete(); load_ca_cert.complete();
load_wifi.start(); load_wifi.start();
crate::net::wifi::synchronize_wpa_supplicant_conf( crate::net::wifi::synchronize_network_manager(MAIN_DATA, &mut server_info.wifi).await?;
&cfg.datadir().join("main"),
&mut server_info.wifi,
)
.await?;
load_wifi.complete(); load_wifi.complete();
tracing::info!("Synchronized WiFi"); tracing::info!("Synchronized WiFi");
init_tmp.start(); init_tmp.start();
let tmp_dir = cfg.datadir().join("package-data/tmp"); let tmp_dir = Path::new(PACKAGE_DATA).join("tmp");
if tokio::fs::metadata(&tmp_dir).await.is_ok() { if tokio::fs::metadata(&tmp_dir).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_dir).await?; tokio::fs::remove_dir_all(&tmp_dir).await?;
} }
if tokio::fs::metadata(&tmp_dir).await.is_err() { if tokio::fs::metadata(&tmp_dir).await.is_err() {
tokio::fs::create_dir_all(&tmp_dir).await?; tokio::fs::create_dir_all(&tmp_dir).await?;
} }
let tmp_var = cfg.datadir().join(format!("package-data/tmp/var")); let tmp_var = Path::new(PACKAGE_DATA).join("tmp/var");
if tokio::fs::metadata(&tmp_var).await.is_ok() { if tokio::fs::metadata(&tmp_var).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_var).await?; tokio::fs::remove_dir_all(&tmp_var).await?;
} }
crate::disk::mount::util::bind(&tmp_var, "/var/tmp", false).await?; crate::disk::mount::util::bind(&tmp_var, "/var/tmp", false).await?;
let downloading = cfg let downloading = Path::new(PACKAGE_DATA).join("archive/downloading");
.datadir()
.join(format!("package-data/archive/downloading"));
if tokio::fs::metadata(&downloading).await.is_ok() { if tokio::fs::metadata(&downloading).await.is_ok() {
tokio::fs::remove_dir_all(&downloading).await?; tokio::fs::remove_dir_all(&downloading).await?;
} }
let tmp_docker = cfg let tmp_docker = Path::new(PACKAGE_DATA).join(formatcp!("tmp/{CONTAINER_TOOL}"));
.datadir()
.join(format!("package-data/tmp/{CONTAINER_TOOL}"));
crate::disk::mount::util::bind(&tmp_docker, CONTAINER_DATADIR, false).await?; crate::disk::mount::util::bind(&tmp_docker, CONTAINER_DATADIR, false).await?;
init_tmp.complete(); init_tmp.complete();
@@ -509,7 +505,6 @@ pub async fn init(
enable_zram.complete(); enable_zram.complete();
update_server_info.start(); update_server_info.start();
server_info.ip_info = crate::net::dhcp::init_ips().await?;
server_info.ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024; server_info.ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
server_info.devices = lshw().await?; server_info.devices = lshw().await?;
server_info.status_info = ServerStatus { server_info.status_info = ServerStatus {

View File

@@ -202,9 +202,6 @@ pub async fn sideload(
use axum::extract::ws::Message; use axum::extract::ws::Message;
async move { async move {
if let Err(e) = async { if let Err(e) = async {
type RpcResponse = rpc_toolkit::yajrc::RpcResponse<
GenericRpcMethod<&'static str, (), FullProgress>,
>;
tokio::select! { tokio::select! {
res = async { res = async {
while let Some(progress) = progress_listener.next().await { while let Some(progress) = progress_listener.next().await {

View File

@@ -1,6 +1,11 @@
use const_format::formatcp;
pub const DATA_DIR: &str = "/media/startos/data";
pub const MAIN_DATA: &str = formatcp!("{DATA_DIR}/main");
pub const PACKAGE_DATA: &str = formatcp!("{DATA_DIR}/package-data");
pub const DEFAULT_REGISTRY: &str = "https://registry.start9.com"; pub const DEFAULT_REGISTRY: &str = "https://registry.start9.com";
// pub const COMMUNITY_MARKETPLACE: &str = "https://community-registry.start9.com"; // pub const COMMUNITY_MARKETPLACE: &str = "https://community-registry.start9.com";
pub const HOST_IP: [u8; 4] = [172, 18, 0, 1]; pub const HOST_IP: [u8; 4] = [10, 0, 3, 1];
pub use std::env::consts::ARCH; pub use std::env::consts::ARCH;
lazy_static::lazy_static! { lazy_static::lazy_static! {
pub static ref PLATFORM: String = { pub static ref PLATFORM: String = {

View File

@@ -1,5 +1,4 @@
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::ffi::OsString;
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use std::path::Path; use std::path::Path;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};

View File

@@ -1,6 +1,7 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::str::FromStr; use std::str::FromStr;
use async_acme::acme::Identifier;
use clap::builder::ValueParserFactory; use clap::builder::ValueParserFactory;
use clap::Parser; use clap::Parser;
use imbl_value::InternedString; use imbl_value::InternedString;
@@ -10,6 +11,7 @@ use openssl::pkey::{PKey, Private};
use openssl::x509::X509; use openssl::x509::X509;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler}; use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use ts_rs::TS;
use url::Url; use url::Url;
use crate::context::{CliContext, RpcContext}; use crate::context::{CliContext, RpcContext};
@@ -78,10 +80,18 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
async fn read_certificate( async fn read_certificate(
&self, &self,
domains: &[String], identifiers: &[Identifier],
directory_url: &str, directory_url: &str,
) -> Result<Option<(String, String)>, Self::Error> { ) -> Result<Option<(String, String)>, Self::Error> {
let domains = JsonKey::new(domains.into_iter().map(InternedString::intern).collect()); let identifiers = JsonKey::new(
identifiers
.into_iter()
.map(|d| match d {
Identifier::Dns(d) => d.into(),
Identifier::Ip(ip) => InternedString::from_display(ip),
})
.collect(),
);
let directory_url = directory_url let directory_url = directory_url
.parse::<Url>() .parse::<Url>()
.with_kind(ErrorKind::ParseUrl)?; .with_kind(ErrorKind::ParseUrl)?;
@@ -94,7 +104,7 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
.into_acme() .into_acme()
.into_certs() .into_certs()
.into_idx(&directory_url) .into_idx(&directory_url)
.and_then(|a| a.into_idx(&domains)) .and_then(|a| a.into_idx(&identifiers))
else { else {
return Ok(None); return Ok(None);
}; };
@@ -120,13 +130,21 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
async fn write_certificate( async fn write_certificate(
&self, &self,
domains: &[String], identifiers: &[Identifier],
directory_url: &str, directory_url: &str,
key_pem: &str, key_pem: &str,
certificate_pem: &str, certificate_pem: &str,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
tracing::info!("Saving new certificate for {domains:?}"); tracing::info!("Saving new certificate for {identifiers:?}");
let domains = JsonKey::new(domains.into_iter().map(InternedString::intern).collect()); let identifiers = JsonKey::new(
identifiers
.into_iter()
.map(|d| match d {
Identifier::Dns(d) => d.into(),
Identifier::Ip(ip) => InternedString::from_display(ip),
})
.collect(),
);
let directory_url = directory_url let directory_url = directory_url
.parse::<Url>() .parse::<Url>()
.with_kind(ErrorKind::ParseUrl)?; .with_kind(ErrorKind::ParseUrl)?;
@@ -146,7 +164,7 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
.as_acme_mut() .as_acme_mut()
.as_certs_mut() .as_certs_mut()
.upsert(&directory_url, || Ok(BTreeMap::new()))? .upsert(&directory_url, || Ok(BTreeMap::new()))?
.insert(&domains, &cert) .insert(&identifiers, &cert)
}) })
.await?; .await?;
@@ -155,22 +173,17 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
} }
pub fn acme<C: Context>() -> ParentHandler<C> { pub fn acme<C: Context>() -> ParentHandler<C> {
ParentHandler::new() ParentHandler::new().subcommand(
.subcommand( "init",
"init", from_fn_async(init)
from_fn_async(init) .no_display()
.no_display() .with_about("Setup ACME certificate acquisition")
.with_about("Setup ACME certificate acquisition") .with_call_remote::<CliContext>(),
.with_call_remote::<CliContext>(), )
)
.subcommand(
"domain",
domain::<C>()
.with_about("Add, remove, or view domains for which to acquire ACME certificates"),
)
} }
#[derive(Clone, Deserialize, Serialize)] #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, TS)]
#[ts(type = "string")]
pub struct AcmeProvider(pub Url); pub struct AcmeProvider(pub Url);
impl FromStr for AcmeProvider { impl FromStr for AcmeProvider {
type Err = <Url as FromStr>::Err; type Err = <Url as FromStr>::Err;
@@ -183,6 +196,11 @@ impl FromStr for AcmeProvider {
.map(Self) .map(Self)
} }
} }
impl AsRef<str> for AcmeProvider {
fn as_ref(&self) -> &str {
self.0.as_str()
}
}
impl ValueParserFactory for AcmeProvider { impl ValueParserFactory for AcmeProvider {
type Parser = FromStrParser<Self>; type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser { fn value_parser() -> Self::Parser {
@@ -200,125 +218,15 @@ pub struct InitAcmeParams {
pub async fn init( pub async fn init(
ctx: RpcContext, ctx: RpcContext,
InitAcmeParams { InitAcmeParams { provider, contact }: InitAcmeParams,
provider: AcmeProvider(provider),
contact,
}: InitAcmeParams,
) -> Result<(), Error> { ) -> Result<(), Error> {
ctx.db ctx.db
.mutate(|db| { .mutate(|db| {
db.as_public_mut() db.as_public_mut()
.as_server_info_mut() .as_server_info_mut()
.as_acme_mut() .as_acme_mut()
.map_mutate(|acme| { .insert(&provider, &AcmeSettings { contact })
Ok(Some(AcmeSettings {
provider,
contact,
domains: acme.map(|acme| acme.domains).unwrap_or_default(),
}))
})
}) })
.await?; .await?;
Ok(()) Ok(())
} }
pub fn domain<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"add",
from_fn_async(add_domain)
.no_display()
.with_about("Add a domain for which to acquire ACME certificates")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_domain)
.no_display()
.with_about("Remove a domain for which to acquire ACME certificates")
.with_call_remote::<CliContext>(),
)
.subcommand(
"list",
from_fn_async(list_domains)
.with_custom_display_fn(|_, res| {
for domain in res {
println!("{domain}")
}
Ok(())
})
.with_about("List domains for which to acquire ACME certificates")
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser)]
pub struct DomainParams {
pub domain: InternedString,
}
pub async fn add_domain(
ctx: RpcContext,
DomainParams { domain }: DomainParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_acme_mut()
.transpose_mut()
.ok_or_else(|| {
Error::new(
eyre!("Please call `start-cli net acme init` before adding a domain"),
ErrorKind::InvalidRequest,
)
})?
.as_domains_mut()
.mutate(|domains| {
domains.insert(domain);
Ok(())
})
})
.await?;
Ok(())
}
pub async fn remove_domain(
ctx: RpcContext,
DomainParams { domain }: DomainParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
if let Some(acme) = db
.as_public_mut()
.as_server_info_mut()
.as_acme_mut()
.transpose_mut()
{
acme.as_domains_mut().mutate(|domains| {
domains.remove(&domain);
Ok(())
})
} else {
Ok(())
}
})
.await?;
Ok(())
}
pub async fn list_domains(ctx: RpcContext) -> Result<BTreeSet<InternedString>, Error> {
if let Some(acme) = ctx
.db
.peek()
.await
.into_public()
.into_server_info()
.into_acme()
.transpose()
{
acme.into_domains().de()
} else {
Ok(BTreeSet::new())
}
}

View File

@@ -1,99 +0,0 @@
use std::collections::{BTreeMap, BTreeSet};
use std::net::IpAddr;
use clap::Parser;
use futures::TryStreamExt;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::sync::RwLock;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::public::IpInfo;
use crate::net::utils::{iface_is_physical, list_interfaces};
use crate::prelude::*;
use crate::Error;
lazy_static::lazy_static! {
static ref CACHED_IPS: RwLock<BTreeSet<IpAddr>> = RwLock::new(BTreeSet::new());
}
async fn _ips() -> Result<BTreeSet<IpAddr>, Error> {
Ok(init_ips()
.await?
.values()
.flat_map(|i| {
std::iter::empty()
.chain(i.ipv4.map(IpAddr::from))
.chain(i.ipv6.map(IpAddr::from))
})
.collect())
}
pub async fn ips() -> Result<BTreeSet<IpAddr>, Error> {
let ips = CACHED_IPS.read().await.clone();
if !ips.is_empty() {
return Ok(ips);
}
let ips = _ips().await?;
*CACHED_IPS.write().await = ips.clone();
Ok(ips)
}
pub async fn init_ips() -> Result<BTreeMap<String, IpInfo>, Error> {
let mut res = BTreeMap::new();
let mut ifaces = list_interfaces();
while let Some(iface) = ifaces.try_next().await? {
if iface_is_physical(&iface).await {
let ip_info = IpInfo::for_interface(&iface).await?;
res.insert(iface, ip_info);
}
}
Ok(res)
}
// #[command(subcommands(update))]
pub fn dhcp<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand(
"update",
from_fn_async::<_, _, (), Error, (RpcContext, UpdateParams)>(update)
.no_display()
.with_about("Update IP assigned by dhcp")
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct UpdateParams {
interface: String,
}
pub async fn update(
ctx: RpcContext,
UpdateParams { interface }: UpdateParams,
) -> Result<(), Error> {
if iface_is_physical(&interface).await {
let ip_info = IpInfo::for_interface(&interface).await?;
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_ip_info_mut()
.insert(&interface, &ip_info)
})
.await?;
let mut cached = CACHED_IPS.write().await;
if cached.is_empty() {
*cached = _ips().await?;
} else {
cached.extend(
std::iter::empty()
.chain(ip_info.ipv4.map(IpAddr::from))
.chain(ip_info.ipv6.map(IpAddr::from)),
);
}
}
Ok(())
}

View File

@@ -1,12 +1,16 @@
use std::collections::BTreeMap; use std::collections::{BTreeMap, BTreeSet};
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use futures::channel::oneshot;
use helpers::NonDetachingJoinHandle;
use id_pool::IdPool; use id_pool::IdPool;
use imbl_value::InternedString;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::process::Command; use tokio::process::Command;
use tokio::sync::Mutex; use tokio::sync::{mpsc, watch};
use crate::db::model::public::NetworkInterfaceInfo;
use crate::prelude::*; use crate::prelude::*;
use crate::util::Invoke; use crate::util::Invoke;
@@ -34,144 +38,269 @@ impl AvailablePorts {
} }
} }
#[derive(Debug)]
struct ForwardRequest {
public: bool,
target: SocketAddr,
rc: Weak<()>,
}
#[derive(Debug, Default)]
struct ForwardState {
requested: BTreeMap<u16, ForwardRequest>,
current: BTreeMap<u16, BTreeMap<InternedString, SocketAddr>>,
}
impl ForwardState {
async fn sync(&mut self, interfaces: &BTreeMap<InternedString, bool>) -> Result<(), Error> {
let private_interfaces = interfaces
.iter()
.filter(|(_, public)| !*public)
.map(|(i, _)| i)
.collect::<BTreeSet<_>>();
let all_interfaces = interfaces.keys().collect::<BTreeSet<_>>();
self.requested.retain(|_, req| req.rc.strong_count() > 0);
for external in self
.requested
.keys()
.chain(self.current.keys())
.copied()
.collect::<BTreeSet<_>>()
{
match (
self.requested.get(&external),
self.current.get_mut(&external),
) {
(Some(req), Some(cur)) => {
let expected = if req.public {
&all_interfaces
} else {
&private_interfaces
};
let actual = cur.keys().collect::<BTreeSet<_>>();
let mut to_rm = actual
.difference(expected)
.copied()
.cloned()
.collect::<BTreeSet<_>>();
let mut to_add = expected
.difference(&actual)
.copied()
.cloned()
.collect::<BTreeSet<_>>();
for interface in actual.intersection(expected).copied() {
if cur[interface] != req.target {
to_rm.insert(interface.clone());
to_add.insert(interface.clone());
}
}
for interface in to_rm {
unforward(external, &*interface, cur[&interface]).await?;
cur.remove(&interface);
}
for interface in to_add {
forward(external, &*interface, req.target).await?;
cur.insert(interface, req.target);
}
}
(Some(req), None) => {
let cur = self.current.entry(external).or_default();
for interface in if req.public {
&all_interfaces
} else {
&private_interfaces
}
.into_iter()
.copied()
.cloned()
{
forward(external, &*interface, req.target).await?;
cur.insert(interface, req.target);
}
}
(None, Some(cur)) => {
let to_rm = cur.keys().cloned().collect::<BTreeSet<_>>();
for interface in to_rm {
unforward(external, &*interface, cur[&interface]).await?;
cur.remove(&interface);
}
self.current.remove(&external);
}
_ => (),
}
}
Ok(())
}
}
fn err_has_exited<T>(_: T) -> Error {
Error::new(
eyre!("PortForwardController thread has exited"),
ErrorKind::Unknown,
)
}
pub struct LanPortForwardController { pub struct LanPortForwardController {
forwards: Mutex<BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>, req: mpsc::UnboundedSender<(
Option<(u16, ForwardRequest)>,
oneshot::Sender<Result<(), Error>>,
)>,
_thread: NonDetachingJoinHandle<()>,
} }
impl LanPortForwardController { impl LanPortForwardController {
pub fn new() -> Self { pub fn new(
mut net_iface: watch::Receiver<BTreeMap<InternedString, NetworkInterfaceInfo>>,
) -> Self {
let (req_send, mut req_recv) = mpsc::unbounded_channel();
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
let mut state = ForwardState::default();
let mut interfaces = net_iface
.borrow_and_update()
.iter()
.map(|(iface, info)| (iface.clone(), info.public()))
.collect();
let mut reply: Option<oneshot::Sender<Result<(), Error>>> = None;
loop {
tokio::select! {
msg = req_recv.recv() => {
if let Some((msg, re)) = msg {
if let Some((external, req)) = msg {
state.requested.insert(external, req);
}
reply = Some(re);
} else {
break;
}
}
_ = net_iface.changed() => {
interfaces = net_iface
.borrow()
.iter()
.map(|(iface, info)| (iface.clone(), info.public()))
.collect();
}
}
let res = state.sync(&interfaces).await;
if let Err(e) = &res {
tracing::error!("Error in PortForwardController: {e}");
tracing::debug!("{e:?}");
}
if let Some(re) = reply.take() {
let _ = re.send(res);
}
}
}));
Self { Self {
forwards: Mutex::new(BTreeMap::new()), req: req_send,
_thread: thread,
} }
} }
pub async fn add(&self, port: u16, addr: SocketAddr) -> Result<Arc<()>, Error> { pub async fn add(&self, port: u16, public: bool, target: SocketAddr) -> Result<Arc<()>, Error> {
let mut writable = self.forwards.lock().await;
let (prev, mut forward) = if let Some(forward) = writable.remove(&port) {
(
forward.keys().next().cloned(),
forward
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect(),
)
} else {
(None, BTreeMap::new())
};
let rc = Arc::new(()); let rc = Arc::new(());
forward.insert(addr, Arc::downgrade(&rc)); let (send, recv) = oneshot::channel();
let next = forward.keys().next().cloned(); self.req
if !forward.is_empty() { .send((
writable.insert(port, forward); Some((
} port,
ForwardRequest {
public,
target,
rc: Arc::downgrade(&rc),
},
)),
send,
))
.map_err(err_has_exited)?;
update_forward(port, prev, next).await?; recv.await.map_err(err_has_exited)?.map(|_| rc)
Ok(rc)
} }
pub async fn gc(&self, external: u16) -> Result<(), Error> { pub async fn gc(&self) -> Result<(), Error> {
let mut writable = self.forwards.lock().await; let (send, recv) = oneshot::channel();
let (prev, forward) = if let Some(forward) = writable.remove(&external) { self.req.send((None, send)).map_err(err_has_exited)?;
(
forward.keys().next().cloned(),
forward
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect(),
)
} else {
(None, BTreeMap::new())
};
let next = forward.keys().next().cloned();
if !forward.is_empty() {
writable.insert(external, forward);
}
update_forward(external, prev, next).await recv.await.map_err(err_has_exited)?
} }
} }
async fn update_forward(
external: u16,
prev: Option<SocketAddr>,
next: Option<SocketAddr>,
) -> Result<(), Error> {
if prev != next {
if let Some(prev) = prev {
unforward(START9_BRIDGE_IFACE, external, prev).await?;
}
if let Some(next) = next {
forward(START9_BRIDGE_IFACE, external, next).await?;
}
}
Ok(())
}
// iptables -I FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT // iptables -I FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT
// iptables -t nat -I PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333 // iptables -t nat -I PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333
async fn forward(iface: &str, external: u16, addr: SocketAddr) -> Result<(), Error> { async fn forward(external: u16, interface: &str, target: SocketAddr) -> Result<(), Error> {
Command::new("iptables") for proto in ["tcp", "udp"] {
.arg("-I") Command::new("iptables")
.arg("FORWARD") .arg("-I")
.arg("-o") .arg("FORWARD")
.arg(iface) .arg("-i")
.arg("-p") .arg(interface)
.arg("tcp") .arg("-o")
.arg("-d") .arg(START9_BRIDGE_IFACE)
.arg(addr.ip().to_string()) .arg("-p")
.arg("--dport") .arg(proto)
.arg(addr.port().to_string()) .arg("-d")
.arg("-j") .arg(target.ip().to_string())
.arg("ACCEPT") .arg("--dport")
.invoke(crate::ErrorKind::Network) .arg(target.port().to_string())
.await?; .arg("-j")
Command::new("iptables") .arg("ACCEPT")
.arg("-t") .invoke(crate::ErrorKind::Network)
.arg("nat") .await?;
.arg("-I") Command::new("iptables")
.arg("PREROUTING") .arg("-t")
.arg("-p") .arg("nat")
.arg("tcp") .arg("-I")
.arg("--dport") .arg("PREROUTING")
.arg(external.to_string()) .arg("-i")
.arg("-j") .arg(interface)
.arg("DNAT") .arg("-p")
.arg("--to") .arg(proto)
.arg(addr.to_string()) .arg("--dport")
.invoke(crate::ErrorKind::Network) .arg(external.to_string())
.await?; .arg("-j")
.arg("DNAT")
.arg("--to")
.arg(target.to_string())
.invoke(crate::ErrorKind::Network)
.await?;
}
Ok(()) Ok(())
} }
// iptables -D FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT // iptables -D FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT
// iptables -t nat -D PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333 // iptables -t nat -D PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333
async fn unforward(iface: &str, external: u16, addr: SocketAddr) -> Result<(), Error> { async fn unforward(external: u16, interface: &str, target: SocketAddr) -> Result<(), Error> {
Command::new("iptables") for proto in ["tcp", "udp"] {
.arg("-D") Command::new("iptables")
.arg("FORWARD") .arg("-D")
.arg("-o") .arg("FORWARD")
.arg(iface) .arg("-i")
.arg("-p") .arg(interface)
.arg("tcp") .arg("-o")
.arg("-d") .arg(START9_BRIDGE_IFACE)
.arg(addr.ip().to_string()) .arg("-p")
.arg("--dport") .arg(proto)
.arg(addr.port().to_string()) .arg("-d")
.arg("-j") .arg(target.ip().to_string())
.arg("ACCEPT") .arg("--dport")
.invoke(crate::ErrorKind::Network) .arg(target.port().to_string())
.await?; .arg("-j")
Command::new("iptables") .arg("ACCEPT")
.arg("-t") .invoke(crate::ErrorKind::Network)
.arg("nat") .await?;
.arg("-D") Command::new("iptables")
.arg("PREROUTING") .arg("-t")
.arg("-p") .arg("nat")
.arg("tcp") .arg("-D")
.arg("--dport") .arg("PREROUTING")
.arg(external.to_string()) .arg("-i")
.arg("-j") .arg(interface)
.arg("DNAT") .arg("-p")
.arg("--to") .arg(proto)
.arg(addr.to_string()) .arg("--dport")
.invoke(crate::ErrorKind::Network) .arg(external.to_string())
.await?; .arg("-j")
.arg("DNAT")
.arg("--to")
.arg(target.to_string())
.invoke(crate::ErrorKind::Network)
.await?;
}
Ok(()) Ok(())
} }

View File

@@ -1,57 +1,298 @@
use std::fmt; use clap::Parser;
use std::str::FromStr;
use clap::builder::ValueParserFactory;
use imbl_value::InternedString; use imbl_value::InternedString;
use models::FromStrParser; use models::{HostId, PackageId};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3; use torut::onion::OnionAddressV3;
use ts_rs::TS; use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::net::acme::AcmeProvider;
use crate::prelude::*; use crate::prelude::*;
use crate::util::serde::{display_serializable, HandlerExtSerde};
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord, TS)] #[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "kind")]
#[ts(export)]
pub enum HostAddress { pub enum HostAddress {
Onion { Onion {
#[ts(type = "string")]
address: OnionAddressV3, address: OnionAddressV3,
}, },
Domain { Domain {
#[ts(type = "string")]
address: InternedString, address: InternedString,
public: bool,
acme: Option<AcmeProvider>,
}, },
} }
impl FromStr for HostAddress { #[derive(Debug, Deserialize, Serialize, TS)]
type Err = Error; pub struct DomainConfig {
fn from_str(s: &str) -> Result<Self, Self::Err> { pub public: bool,
if let Some(addr) = s.strip_suffix(".onion") { pub acme: Option<AcmeProvider>,
Ok(HostAddress::Onion {
address: addr
.parse::<OnionAddressV3>()
.with_kind(ErrorKind::ParseUrl)?,
})
} else {
Ok(HostAddress::Domain { address: s.into() })
}
}
} }
impl fmt::Display for HostAddress { #[derive(Deserialize, Serialize, Parser)]
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { pub struct AddressApiParams {
match self { host: HostId,
Self::Onion { address } => write!(f, "{address}"),
Self::Domain { address } => write!(f, "{address}"),
}
}
} }
impl ValueParserFactory for HostAddress { pub fn address<C: Context>() -> ParentHandler<C, AddressApiParams, PackageId> {
type Parser = FromStrParser<Self>; ParentHandler::<C, AddressApiParams, PackageId>::new()
fn value_parser() -> Self::Parser { .subcommand(
Self::Parser::new() "domain",
} ParentHandler::<C, Empty, (PackageId, HostId)>::new()
.subcommand(
"add",
from_fn_async(add_domain)
.with_inherited(|_, a| a)
.no_display()
.with_about("Add an address to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_domain)
.with_inherited(|_, a| a)
.no_display()
.with_about("Remove an address from this host")
.with_call_remote::<CliContext>(),
)
.with_inherited(|AddressApiParams { host }, package| (package, host)),
)
.subcommand(
"onion",
ParentHandler::<C, Empty, (PackageId, HostId)>::new()
.subcommand(
"add",
from_fn_async(add_onion)
.with_inherited(|_, a| a)
.no_display()
.with_about("Add an address to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_onion)
.with_inherited(|_, a| a)
.no_display()
.with_about("Remove an address from this host")
.with_call_remote::<CliContext>(),
)
.with_inherited(|AddressApiParams { host }, package| (package, host)),
)
.subcommand(
"list",
from_fn_async(list_addresses)
.with_inherited(|AddressApiParams { host }, package| (package, host))
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
if let Some(format) = params.format {
display_serializable(format, res);
return Ok(());
}
let mut table = Table::new();
table.add_row(row![bc => "ADDRESS", "PUBLIC", "ACME PROVIDER"]);
for address in &res {
match address {
HostAddress::Onion { address } => {
table.add_row(row![address, true, "N/A"]);
}
HostAddress::Domain {
address,
public,
acme,
} => {
table.add_row(row![
address,
*public,
acme.as_ref().map(|a| a.0.as_str()).unwrap_or("NONE")
]);
}
}
}
table.print_tty(false)?;
Ok(())
})
.with_about("List addresses for this host")
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddDomainParams {
pub domain: InternedString,
#[arg(long)]
pub private: bool,
#[arg(long)]
pub acme: Option<AcmeProvider>,
}
pub async fn add_domain(
ctx: RpcContext,
AddDomainParams {
domain,
private,
acme,
}: AddDomainParams,
(package, host): (PackageId, HostId),
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
if let Some(acme) = &acme {
if !db.as_public().as_server_info().as_acme().contains_key(&acme)? {
return Err(Error::new(eyre!("unknown acme provider {}, please run acme.init for this provider first", acme.0), ErrorKind::InvalidRequest));
}
}
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_hosts_mut()
.as_idx_mut(&host)
.or_not_found(&host)?
.as_domains_mut()
.insert(
&domain,
&DomainConfig {
public: !private,
acme,
},
)
})
.await?;
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.update_host(host).await?;
Ok(())
}
#[derive(Deserialize, Serialize, Parser)]
pub struct RemoveDomainParams {
pub domain: InternedString,
}
pub async fn remove_domain(
ctx: RpcContext,
RemoveDomainParams { domain }: RemoveDomainParams,
(package, host): (PackageId, HostId),
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_hosts_mut()
.as_idx_mut(&host)
.or_not_found(&host)?
.as_domains_mut()
.remove(&domain)
})
.await?;
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.update_host(host).await?;
Ok(())
}
#[derive(Deserialize, Serialize, Parser)]
pub struct OnionParams {
pub onion: String,
}
pub async fn add_onion(
ctx: RpcContext,
OnionParams { onion }: OnionParams,
(package, host): (PackageId, HostId),
) -> Result<(), Error> {
let onion = onion
.strip_suffix(".onion")
.ok_or_else(|| {
Error::new(
eyre!("onion hostname must end in .onion"),
ErrorKind::InvalidOnionAddress,
)
})?
.parse::<OnionAddressV3>()?;
ctx.db
.mutate(|db| {
db.as_private().as_key_store().as_onion().get_key(&onion)?;
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_hosts_mut()
.as_idx_mut(&host)
.or_not_found(&host)?
.as_onions_mut()
.mutate(|a| Ok(a.insert(onion)))
})
.await?;
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.update_host(host).await?;
Ok(())
}
pub async fn remove_onion(
ctx: RpcContext,
OnionParams { onion }: OnionParams,
(package, host): (PackageId, HostId),
) -> Result<(), Error> {
let onion = onion
.strip_suffix(".onion")
.ok_or_else(|| {
Error::new(
eyre!("onion hostname must end in .onion"),
ErrorKind::InvalidOnionAddress,
)
})?
.parse::<OnionAddressV3>()?;
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_hosts_mut()
.as_idx_mut(&host)
.or_not_found(&host)?
.as_onions_mut()
.mutate(|a| Ok(a.remove(&onion)))
})
.await?;
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.update_host(host).await?;
Ok(())
}
pub async fn list_addresses(
ctx: RpcContext,
_: Empty,
(package, host): (PackageId, HostId),
) -> Result<Vec<HostAddress>, Error> {
Ok(ctx
.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&package)
.or_not_found(&package)?
.into_hosts()
.into_idx(&host)
.or_not_found(&host)?
.de()?
.addresses()
.collect())
} }

View File

@@ -1,13 +1,18 @@
use std::collections::BTreeMap;
use std::str::FromStr; use std::str::FromStr;
use clap::builder::ValueParserFactory; use clap::builder::ValueParserFactory;
use models::{FromStrParser, HostId}; use clap::Parser;
use models::{FromStrParser, HostId, PackageId};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use ts_rs::TS; use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::net::forward::AvailablePorts; use crate::net::forward::AvailablePorts;
use crate::net::vhost::AlpnInfo; use crate::net::vhost::AlpnInfo;
use crate::prelude::*; use crate::prelude::*;
use crate::util::serde::{display_serializable, HandlerExtSerde};
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, TS)] #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, TS)]
#[ts(export)] #[ts(export)]
@@ -41,12 +46,14 @@ impl FromStr for BindId {
pub struct BindInfo { pub struct BindInfo {
pub enabled: bool, pub enabled: bool,
pub options: BindOptions, pub options: BindOptions,
pub lan: LanInfo, pub net: NetInfo,
} }
#[derive(Clone, Copy, Debug, Deserialize, Serialize, TS, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Copy, Debug, Deserialize, Serialize, TS, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[ts(export)] #[ts(export)]
pub struct LanInfo { pub struct NetInfo {
pub public: bool,
pub assigned_port: Option<u16>, pub assigned_port: Option<u16>,
pub assigned_ssl_port: Option<u16>, pub assigned_ssl_port: Option<u16>,
} }
@@ -63,7 +70,8 @@ impl BindInfo {
Ok(Self { Ok(Self {
enabled: true, enabled: true,
options, options,
lan: LanInfo { net: NetInfo {
public: false,
assigned_port, assigned_port,
assigned_ssl_port, assigned_ssl_port,
}, },
@@ -74,7 +82,7 @@ impl BindInfo {
available_ports: &mut AvailablePorts, available_ports: &mut AvailablePorts,
options: BindOptions, options: BindOptions,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let Self { mut lan, .. } = self; let Self { net: mut lan, .. } = self;
if options if options
.secure .secure
.map_or(false, |s| !(s.ssl && options.add_ssl.is_some())) .map_or(false, |s| !(s.ssl && options.add_ssl.is_some()))
@@ -104,7 +112,7 @@ impl BindInfo {
Ok(Self { Ok(Self {
enabled: true, enabled: true,
options, options,
lan, net: lan,
}) })
} }
pub fn disable(&mut self) { pub fn disable(&mut self) {
@@ -137,3 +145,122 @@ pub struct AddSslOptions {
// pub add_x_forwarded_headers: bool, // TODO // pub add_x_forwarded_headers: bool, // TODO
pub alpn: Option<AlpnInfo>, pub alpn: Option<AlpnInfo>,
} }
#[derive(Deserialize, Serialize, Parser)]
pub struct BindingApiParams {
host: HostId,
}
pub fn binding<C: Context>() -> ParentHandler<C, BindingApiParams, PackageId> {
ParentHandler::<C, BindingApiParams, PackageId>::new()
.subcommand(
"list",
from_fn_async(list_bindings)
.with_inherited(|BindingApiParams { host }, package| (package, host))
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
if let Some(format) = params.format {
return Ok(display_serializable(format, res));
}
let mut table = Table::new();
table.add_row(row![bc => "INTERNAL PORT", "ENABLED", "PUBLIC", "EXTERNAL PORT", "EXTERNAL SSL PORT"]);
for (internal, info) in res {
table.add_row(row![
internal,
info.enabled,
info.net.public,
if let Some(port) = info.net.assigned_port {
port.to_string()
} else {
"N/A".to_owned()
},
if let Some(port) = info.net.assigned_ssl_port {
port.to_string()
} else {
"N/A".to_owned()
},
]);
}
table.print_tty(false).unwrap();
Ok(())
})
.with_about("List bindinges for this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"set-public",
from_fn_async(set_public)
.with_inherited(|BindingApiParams { host }, package| (package, host))
.no_display()
.with_about("Add an binding to this host")
.with_call_remote::<CliContext>(),
)
}
pub async fn list_bindings(
ctx: RpcContext,
_: Empty,
(package, host): (PackageId, HostId),
) -> Result<BTreeMap<u16, BindInfo>, Error> {
ctx.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&package)
.or_not_found(&package)?
.into_hosts()
.into_idx(&host)
.or_not_found(&host)?
.into_bindings()
.de()
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
pub struct SetPublicParams {
internal_port: u16,
#[arg(long)]
public: Option<bool>,
}
pub async fn set_public(
ctx: RpcContext,
SetPublicParams {
internal_port,
public,
}: SetPublicParams,
(package, host): (PackageId, HostId),
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_hosts_mut()
.as_idx_mut(&host)
.or_not_found(&host)?
.as_bindings_mut()
.mutate(|b| {
b.get_mut(&internal_port)
.or_not_found(internal_port)?
.net
.public = public.unwrap_or(true);
Ok(())
})
})
.await?;
ctx.services
.get(&package)
.await
.as_ref()
.or_not_found(&package)?
.update_host(host)
.await
}

View File

@@ -5,13 +5,14 @@ use imbl_value::InternedString;
use models::{HostId, PackageId}; use models::{HostId, PackageId};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler}; use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use ts_rs::TS; use ts_rs::TS;
use crate::context::{CliContext, RpcContext}; use crate::context::RpcContext;
use crate::db::model::DatabaseModel; use crate::db::model::DatabaseModel;
use crate::net::forward::AvailablePorts; use crate::net::forward::AvailablePorts;
use crate::net::host::address::HostAddress; use crate::net::host::address::{address, DomainConfig, HostAddress};
use crate::net::host::binding::{BindInfo, BindOptions}; use crate::net::host::binding::{binding, BindInfo, BindOptions};
use crate::net::service_interface::HostnameInfo; use crate::net::service_interface::HostnameInfo;
use crate::prelude::*; use crate::prelude::*;
@@ -25,7 +26,10 @@ pub mod binding;
pub struct Host { pub struct Host {
pub kind: HostKind, pub kind: HostKind,
pub bindings: BTreeMap<u16, BindInfo>, pub bindings: BTreeMap<u16, BindInfo>,
pub addresses: BTreeSet<HostAddress>, #[ts(type = "string[]")]
pub onions: BTreeSet<OnionAddressV3>,
#[ts(as = "BTreeMap::<String, DomainConfig>")]
pub domains: BTreeMap<InternedString, DomainConfig>,
/// COMPUTED: NetService::update /// COMPUTED: NetService::update
pub hostname_info: BTreeMap<u16, Vec<HostnameInfo>>, // internal port -> Hostnames pub hostname_info: BTreeMap<u16, Vec<HostnameInfo>>, // internal port -> Hostnames
} }
@@ -39,13 +43,27 @@ impl Host {
Self { Self {
kind, kind,
bindings: BTreeMap::new(), bindings: BTreeMap::new(),
addresses: BTreeSet::new(), onions: BTreeSet::new(),
domains: BTreeMap::new(),
hostname_info: BTreeMap::new(), hostname_info: BTreeMap::new(),
} }
} }
pub fn addresses(&self) -> impl Iterator<Item = &HostAddress> { pub fn addresses<'a>(&'a self) -> impl Iterator<Item = HostAddress> + 'a {
// TODO: handle primary self.onions
self.addresses.iter() .iter()
.cloned()
.map(|address| HostAddress::Onion { address })
.chain(
self.domains
.iter()
.map(
|(address, DomainConfig { public, acme })| HostAddress::Domain {
address: address.clone(),
public: *public,
acme: acme.clone(),
},
),
)
} }
} }
@@ -104,12 +122,12 @@ pub fn host_for<'a>(
}; };
host_info(db, package_id)?.upsert(host_id, || { host_info(db, package_id)?.upsert(host_id, || {
let mut h = Host::new(host_kind); let mut h = Host::new(host_kind);
h.addresses.insert(HostAddress::Onion { h.onions.insert(
address: tor_key tor_key
.or_not_found("generated tor key")? .or_not_found("generated tor key")?
.public() .public()
.get_onion_address(), .get_onion_address(),
}); );
Ok(h) Ok(h)
}) })
} }
@@ -161,6 +179,10 @@ pub fn host<C: Context>() -> ParentHandler<C, HostParams> {
"address", "address",
address::<C>().with_inherited(|HostParams { package }, _| package), address::<C>().with_inherited(|HostParams { package }, _| package),
) )
.subcommand(
"binding",
binding::<C>().with_inherited(|HostParams { package }, _| package),
)
} }
pub async fn list_hosts( pub async fn list_hosts(
@@ -178,122 +200,3 @@ pub async fn list_hosts(
.into_hosts() .into_hosts()
.keys() .keys()
} }
#[derive(Deserialize, Serialize, Parser)]
pub struct AddressApiParams {
host: HostId,
}
pub fn address<C: Context>() -> ParentHandler<C, AddressApiParams, PackageId> {
ParentHandler::<C, AddressApiParams, PackageId>::new()
.subcommand(
"add",
from_fn_async(add_address)
.with_inherited(|AddressApiParams { host }, package| (package, host))
.no_display()
.with_about("Add an address to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_address)
.with_inherited(|AddressApiParams { host }, package| (package, host))
.no_display()
.with_about("Remove an address from this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"list",
from_fn_async(list_addresses)
.with_inherited(|AddressApiParams { host }, package| (package, host))
.with_custom_display_fn(|_, res| {
for address in res {
println!("{address}")
}
Ok(())
})
.with_about("List addresses for this host")
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddressParams {
pub address: HostAddress,
}
pub async fn add_address(
ctx: RpcContext,
AddressParams { address }: AddressParams,
(package, host): (PackageId, HostId),
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
if let HostAddress::Onion { address } = address {
db.as_private()
.as_key_store()
.as_onion()
.get_key(&address)?;
}
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_hosts_mut()
.as_idx_mut(&host)
.or_not_found(&host)?
.as_addresses_mut()
.mutate(|a| Ok(a.insert(address)))
})
.await?;
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.update_host(host).await?;
Ok(())
}
pub async fn remove_address(
ctx: RpcContext,
AddressParams { address }: AddressParams,
(package, host): (PackageId, HostId),
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_hosts_mut()
.as_idx_mut(&host)
.or_not_found(&host)?
.as_addresses_mut()
.mutate(|a| Ok(a.remove(&address)))
})
.await?;
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.update_host(host).await?;
Ok(())
}
pub async fn list_addresses(
ctx: RpcContext,
_: Empty,
(package, host): (PackageId, HostId),
) -> Result<BTreeSet<HostAddress>, Error> {
ctx.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&package)
.or_not_found(&package)?
.into_hosts()
.into_idx(&host)
.or_not_found(&host)?
.into_addresses()
.de()
}

View File

@@ -1,13 +1,13 @@
use rpc_toolkit::{Context, HandlerExt, ParentHandler}; use rpc_toolkit::{Context, HandlerExt, ParentHandler};
pub mod acme; pub mod acme;
pub mod dhcp;
pub mod dns; pub mod dns;
pub mod forward; pub mod forward;
pub mod host; pub mod host;
pub mod keys; pub mod keys;
pub mod mdns; pub mod mdns;
pub mod net_controller; pub mod net_controller;
pub mod network_interface;
pub mod service_interface; pub mod service_interface;
pub mod ssl; pub mod ssl;
pub mod static_server; pub mod static_server;
@@ -17,20 +17,23 @@ pub mod vhost;
pub mod web_server; pub mod web_server;
pub mod wifi; pub mod wifi;
pub const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
pub fn net<C: Context>() -> ParentHandler<C> { pub fn net<C: Context>() -> ParentHandler<C> {
ParentHandler::new() ParentHandler::new()
.subcommand( .subcommand(
"tor", "tor",
tor::tor::<C>().with_about("Tor commands such as list-services, logs, and reset"), tor::tor::<C>().with_about("Tor commands such as list-services, logs, and reset"),
) )
.subcommand(
"dhcp",
dhcp::dhcp::<C>().with_about("Command to update IP assigned from dhcp"),
)
.subcommand( .subcommand(
"acme", "acme",
acme::acme::<C>().with_about("Setup automatic clearnet certificate acquisition"), acme::acme::<C>().with_about("Setup automatic clearnet certificate acquisition"),
) )
.subcommand(
"network-interface",
network_interface::network_interface_api::<C>()
.with_about("View and edit network interface configurations"),
)
.subcommand(
"vhost",
vhost::vhost_api::<C>().with_about("Manage ssl virtual host proxy"),
)
} }

View File

@@ -5,6 +5,7 @@ use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use imbl::OrdMap; use imbl::OrdMap;
use imbl_value::InternedString; use imbl_value::InternedString;
use ipnet::IpNet;
use models::{HostId, OptionExt, PackageId}; use models::{HostId, OptionExt, PackageId};
use torut::onion::{OnionAddressV3, TorSecretKeyV3}; use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument; use tracing::instrument;
@@ -15,11 +16,13 @@ use crate::hostname::Hostname;
use crate::net::dns::DnsController; use crate::net::dns::DnsController;
use crate::net::forward::LanPortForwardController; use crate::net::forward::LanPortForwardController;
use crate::net::host::address::HostAddress; use crate::net::host::address::HostAddress;
use crate::net::host::binding::{AddSslOptions, BindId, BindOptions, LanInfo}; use crate::net::host::binding::{BindId, BindOptions};
use crate::net::host::{host_for, Host, HostKind, Hosts}; use crate::net::host::{host_for, Host, HostKind, Hosts};
use crate::net::network_interface::NetworkInterfaceController;
use crate::net::service_interface::{HostnameInfo, IpHostname, OnionHostname}; use crate::net::service_interface::{HostnameInfo, IpHostname, OnionHostname};
use crate::net::tor::TorController; use crate::net::tor::TorController;
use crate::net::vhost::{AlpnInfo, VHostController}; use crate::net::utils::ipv6_is_local;
use crate::net::vhost::{AlpnInfo, TargetInfo, VHostController};
use crate::prelude::*; use crate::prelude::*;
use crate::util::serde::MaybeUtf8String; use crate::util::serde::MaybeUtf8String;
use crate::HOST_IP; use crate::HOST_IP;
@@ -28,6 +31,7 @@ pub struct PreInitNetController {
pub db: TypedPatchDb<Database>, pub db: TypedPatchDb<Database>,
tor: TorController, tor: TorController,
vhost: VHostController, vhost: VHostController,
pub net_iface: Arc<NetworkInterfaceController>,
os_bindings: Vec<Arc<()>>, os_bindings: Vec<Arc<()>>,
server_hostnames: Vec<Option<InternedString>>, server_hostnames: Vec<Option<InternedString>>,
} }
@@ -40,10 +44,12 @@ impl PreInitNetController {
hostname: &Hostname, hostname: &Hostname,
os_tor_key: TorSecretKeyV3, os_tor_key: TorSecretKeyV3,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let net_iface = Arc::new(NetworkInterfaceController::new(db.clone()));
let mut res = Self { let mut res = Self {
db: db.clone(), db: db.clone(),
tor: TorController::new(tor_control, tor_socks), tor: TorController::new(tor_control, tor_socks),
vhost: VHostController::new(db), vhost: VHostController::new(db, net_iface.clone()),
net_iface,
os_bindings: Vec::new(), os_bindings: Vec::new(),
server_hostnames: Vec::new(), server_hostnames: Vec::new(),
}; };
@@ -56,11 +62,6 @@ impl PreInitNetController {
hostname: &Hostname, hostname: &Hostname,
tor_key: TorSecretKeyV3, tor_key: TorSecretKeyV3,
) -> Result<(), Error> { ) -> Result<(), Error> {
let alpn = Err(AlpnInfo::Specified(vec![
MaybeUtf8String("http/1.1".into()),
MaybeUtf8String("h2".into()),
]));
self.server_hostnames = vec![ self.server_hostnames = vec![
// LAN IP // LAN IP
None, None,
@@ -74,27 +75,29 @@ impl PreInitNetController {
Some(hostname.local_domain_name()), Some(hostname.local_domain_name()),
]; ];
let vhost_target = TargetInfo {
public: false,
acme: None,
addr: ([127, 0, 0, 1], 80).into(),
connect_ssl: Err(AlpnInfo::Specified(vec![
MaybeUtf8String("http/1.1".into()),
MaybeUtf8String("h2".into()),
])),
};
for hostname in self.server_hostnames.iter().cloned() { for hostname in self.server_hostnames.iter().cloned() {
self.os_bindings.push( self.os_bindings
self.vhost .push(self.vhost.add(hostname, 443, vhost_target.clone())?);
.add(hostname, 443, ([127, 0, 0, 1], 80).into(), alpn.clone())
.await?,
);
} }
// Tor // Tor
self.os_bindings.push( self.os_bindings.push(self.vhost.add(
self.vhost Some(InternedString::from_display(
.add( &tor_key.public().get_onion_address(),
Some(InternedString::from_display( )),
&tor_key.public().get_onion_address(), 443,
)), vhost_target,
443, )?);
([127, 0, 0, 1], 80).into(),
alpn.clone(),
)
.await?,
);
self.os_bindings.extend( self.os_bindings.extend(
self.tor self.tor
.add( .add(
@@ -115,6 +118,7 @@ pub struct NetController {
db: TypedPatchDb<Database>, db: TypedPatchDb<Database>,
pub(super) tor: TorController, pub(super) tor: TorController,
pub(super) vhost: VHostController, pub(super) vhost: VHostController,
pub net_iface: Arc<NetworkInterfaceController>,
pub(super) dns: DnsController, pub(super) dns: DnsController,
pub(super) forward: LanPortForwardController, pub(super) forward: LanPortForwardController,
pub(super) os_bindings: Vec<Arc<()>>, pub(super) os_bindings: Vec<Arc<()>>,
@@ -127,6 +131,7 @@ impl NetController {
db, db,
tor, tor,
vhost, vhost,
net_iface,
os_bindings, os_bindings,
server_hostnames, server_hostnames,
}: PreInitNetController, }: PreInitNetController,
@@ -137,7 +142,8 @@ impl NetController {
tor, tor,
vhost, vhost,
dns: DnsController::init(dns_bind).await?, dns: DnsController::init(dns_bind).await?,
forward: LanPortForwardController::new(), forward: LanPortForwardController::new(net_iface.subscribe()),
net_iface,
os_bindings, os_bindings,
server_hostnames, server_hostnames,
}; };
@@ -169,15 +175,8 @@ impl NetController {
#[derive(Default, Debug)] #[derive(Default, Debug)]
struct HostBinds { struct HostBinds {
lan: BTreeMap< forwards: BTreeMap<u16, (SocketAddr, bool, Arc<()>)>,
u16, vhosts: BTreeMap<(Option<InternedString>, u16), (TargetInfo, Arc<()>)>,
(
LanInfo,
Option<AddSslOptions>,
BTreeSet<InternedString>,
Vec<Arc<()>>,
),
>,
tor: BTreeMap<OnionAddressV3, (OrdMap<u16, SocketAddr>, Vec<Arc<()>>)>, tor: BTreeMap<OnionAddressV3, (OrdMap<u16, SocketAddr>, Vec<Arc<()>>)>,
} }
@@ -206,7 +205,7 @@ impl NetService {
internal_port: u16, internal_port: u16,
options: BindOptions, options: BindOptions,
) -> Result<(), Error> { ) -> Result<(), Error> {
dbg!("bind", &kind, &id, internal_port, &options); crate::dbg!("bind", &kind, &id, internal_port, &options);
let pkg_id = &self.id; let pkg_id = &self.id;
let host = self let host = self
.net_controller()? .net_controller()?
@@ -263,134 +262,161 @@ impl NetService {
pub async fn update(&mut self, id: HostId, host: Host) -> Result<(), Error> { pub async fn update(&mut self, id: HostId, host: Host) -> Result<(), Error> {
let ctrl = self.net_controller()?; let ctrl = self.net_controller()?;
let mut hostname_info = BTreeMap::new(); let mut forwards: BTreeMap<u16, (SocketAddr, bool)> = BTreeMap::new();
let mut vhosts: BTreeMap<(Option<InternedString>, u16), TargetInfo> = BTreeMap::new();
let mut tor: BTreeMap<OnionAddressV3, (TorSecretKeyV3, OrdMap<u16, SocketAddr>)> =
BTreeMap::new();
let mut hostname_info: BTreeMap<u16, Vec<HostnameInfo>> = BTreeMap::new();
let binds = self.binds.entry(id.clone()).or_default(); let binds = self.binds.entry(id.clone()).or_default();
let peek = ctrl.db.peek().await; let peek = ctrl.db.peek().await;
// LAN // LAN
let server_info = peek.as_public().as_server_info(); let server_info = peek.as_public().as_server_info();
let ip_info = server_info.as_ip_info().de()?; let net_ifaces = server_info.as_network_interfaces().de()?;
let hostname = server_info.as_hostname().de()?; let hostname = server_info.as_hostname().de()?;
for (port, bind) in &host.bindings { for (port, bind) in &host.bindings {
if !bind.enabled { if !bind.enabled {
continue; continue;
} }
let old_lan_bind = binds.lan.remove(port); if bind.net.assigned_port.is_some() || bind.net.assigned_ssl_port.is_some() {
let lan_bind = old_lan_bind let mut hostnames = BTreeSet::new();
.as_ref() if let Some(ssl) = &bind.options.add_ssl {
.filter(|(external, ssl, _, _)| { let external = bind
ssl == &bind.options.add_ssl && bind.lan == *external .net
}) .assigned_ssl_port
.cloned(); // only keep existing binding if relevant details match .or_not_found("assigned ssl port")?;
if bind.lan.assigned_port.is_some() || bind.lan.assigned_ssl_port.is_some() { let addr = (self.ip, *port).into();
let new_lan_bind = if let Some(b) = lan_bind { let connect_ssl = if let Some(alpn) = ssl.alpn.clone() {
b Err(alpn)
} else { } else {
let mut rcs = Vec::with_capacity(2 + host.addresses.len()); if bind.options.secure.as_ref().map_or(false, |s| s.ssl) {
let mut hostnames = BTreeSet::new(); Ok(())
if let Some(ssl) = &bind.options.add_ssl {
let external = bind
.lan
.assigned_ssl_port
.or_not_found("assigned ssl port")?;
let target = (self.ip, *port).into();
let connect_ssl = if let Some(alpn) = ssl.alpn.clone() {
Err(alpn)
} else { } else {
if bind.options.secure.as_ref().map_or(false, |s| s.ssl) { Err(AlpnInfo::Reflect)
Ok(())
} else {
Err(AlpnInfo::Reflect)
}
};
for hostname in ctrl.server_hostnames.iter().cloned() {
rcs.push(
ctrl.vhost
.add(hostname, external, target, connect_ssl.clone())
.await?,
);
} }
for address in host.addresses() { };
match address { for hostname in ctrl.server_hostnames.iter().cloned() {
HostAddress::Onion { address } => { vhosts.insert(
let hostname = InternedString::from_display(address); (hostname, external),
if hostnames.insert(hostname.clone()) { TargetInfo {
rcs.push( public: bind.net.public,
ctrl.vhost acme: None,
.add( addr,
Some(hostname), connect_ssl: connect_ssl.clone(),
external, },
target, );
connect_ssl.clone(), }
) for address in host.addresses() {
.await?, match address {
); HostAddress::Onion { address } => {
} let hostname = InternedString::from_display(&address);
if hostnames.insert(hostname.clone()) {
vhosts.insert(
(Some(hostname), external),
TargetInfo {
public: false,
acme: None,
addr,
connect_ssl: connect_ssl.clone(),
},
);
} }
HostAddress::Domain { address } => { }
if hostnames.insert(address.clone()) { HostAddress::Domain {
let address = Some(address.clone()); address,
rcs.push( public,
ctrl.vhost acme,
.add( } => {
address.clone(), if hostnames.insert(address.clone()) {
external, let address = Some(address.clone());
target, if ssl.preferred_external_port == 443 {
connect_ssl.clone(), if public && bind.net.public {
) vhosts.insert(
.await?, (address.clone(), 5443),
); TargetInfo {
if ssl.preferred_external_port == 443 { public: false,
rcs.push( acme: acme.clone(),
ctrl.vhost addr,
.add( connect_ssl: connect_ssl.clone(),
address.clone(), },
5443,
target,
connect_ssl.clone(),
)
.await?,
); );
} }
vhosts.insert(
(address.clone(), 443),
TargetInfo {
public: public && bind.net.public,
acme,
addr,
connect_ssl: connect_ssl.clone(),
},
);
} else {
vhosts.insert(
(address.clone(), external),
TargetInfo {
public: public && bind.net.public,
acme,
addr,
connect_ssl: connect_ssl.clone(),
},
);
} }
} }
} }
} }
} }
if let Some(security) = bind.options.secure { }
if bind.options.add_ssl.is_some() && security.ssl { if let Some(security) = bind.options.secure {
// doesn't make sense to have 2 listening ports, both with ssl if bind.options.add_ssl.is_some() && security.ssl {
} else { // doesn't make sense to have 2 listening ports, both with ssl
let external = } else {
bind.lan.assigned_port.or_not_found("assigned lan port")?; let external = bind.net.assigned_port.or_not_found("assigned lan port")?;
rcs.push(ctrl.forward.add(external, (self.ip, *port).into()).await?); forwards.insert(external, ((self.ip, *port).into(), bind.net.public));
}
} }
(bind.lan, bind.options.add_ssl.clone(), hostnames, rcs) }
};
let mut bind_hostname_info: Vec<HostnameInfo> = let mut bind_hostname_info: Vec<HostnameInfo> =
hostname_info.remove(port).unwrap_or_default(); hostname_info.remove(port).unwrap_or_default();
for (interface, ip_info) in &ip_info { for (interface, public, ip_info) in
bind_hostname_info.push(HostnameInfo::Ip { net_ifaces.iter().filter_map(|(interface, info)| {
network_interface_id: interface.clone(), if let Some(ip_info) = &info.ip_info {
public: false, Some((interface, info.public(), ip_info))
hostname: IpHostname::Local { } else {
value: InternedString::from_display(&{ None
let hostname = &hostname; }
lazy_format!("{hostname}.local") })
}), {
port: new_lan_bind.0.assigned_port, if !public {
ssl_port: new_lan_bind.0.assigned_ssl_port, bind_hostname_info.push(HostnameInfo::Ip {
}, network_interface_id: interface.clone(),
}); public: false,
hostname: IpHostname::Local {
value: InternedString::from_display(&{
let hostname = &hostname;
lazy_format!("{hostname}.local")
}),
port: bind.net.assigned_port,
ssl_port: bind.net.assigned_ssl_port,
},
});
}
for address in host.addresses() { for address in host.addresses() {
if let HostAddress::Domain { address } = address { if let HostAddress::Domain {
if let Some(ssl) = &new_lan_bind.1 { address,
if ssl.preferred_external_port == 443 { public: domain_public,
..
} = address
{
if !public || (domain_public && bind.net.public) {
if bind
.options
.add_ssl
.as_ref()
.map_or(false, |ssl| ssl.preferred_external_port == 443)
{
bind_hostname_info.push(HostnameInfo::Ip { bind_hostname_info.push(HostnameInfo::Ip {
network_interface_id: interface.clone(), network_interface_id: interface.clone(),
public: false, public: public && domain_public && bind.net.public, // TODO: check if port forward is active
hostname: IpHostname::Domain { hostname: IpHostname::Domain {
domain: address.clone(), domain: address.clone(),
subdomain: None, subdomain: None,
@@ -398,71 +424,65 @@ impl NetService {
ssl_port: Some(443), ssl_port: Some(443),
}, },
}); });
} else {
bind_hostname_info.push(HostnameInfo::Ip {
network_interface_id: interface.clone(),
public,
hostname: IpHostname::Domain {
domain: address.clone(),
subdomain: None,
port: bind.net.assigned_port,
ssl_port: bind.net.assigned_ssl_port,
},
});
} }
} }
} }
} }
if let Some(ipv4) = ip_info.ipv4 { if !public || bind.net.public {
bind_hostname_info.push(HostnameInfo::Ip { if let Some(wan_ip) = ip_info.wan_ip.filter(|_| public) {
network_interface_id: interface.clone(), bind_hostname_info.push(HostnameInfo::Ip {
public: false, network_interface_id: interface.clone(),
hostname: IpHostname::Ipv4 { public,
value: ipv4, hostname: IpHostname::Ipv4 {
port: new_lan_bind.0.assigned_port, value: wan_ip,
ssl_port: new_lan_bind.0.assigned_ssl_port, port: bind.net.assigned_port,
}, ssl_port: bind.net.assigned_ssl_port,
}); },
} });
if let Some(ipv6) = ip_info.ipv6 { }
bind_hostname_info.push(HostnameInfo::Ip { for ipnet in &ip_info.subnets {
network_interface_id: interface.clone(), match ipnet {
public: false, IpNet::V4(net) => {
hostname: IpHostname::Ipv6 { if !public {
value: ipv6, bind_hostname_info.push(HostnameInfo::Ip {
port: new_lan_bind.0.assigned_port, network_interface_id: interface.clone(),
ssl_port: new_lan_bind.0.assigned_ssl_port, public,
}, hostname: IpHostname::Ipv4 {
}); value: net.addr(),
port: bind.net.assigned_port,
ssl_port: bind.net.assigned_ssl_port,
},
});
}
}
IpNet::V6(net) => {
bind_hostname_info.push(HostnameInfo::Ip {
network_interface_id: interface.clone(),
public: public && !ipv6_is_local(net.addr()),
hostname: IpHostname::Ipv6 {
value: net.addr(),
scope_id: ip_info.scope_id,
port: bind.net.assigned_port,
ssl_port: bind.net.assigned_ssl_port,
},
});
}
}
}
} }
} }
hostname_info.insert(*port, bind_hostname_info); hostname_info.insert(*port, bind_hostname_info);
binds.lan.insert(*port, new_lan_bind);
}
if let Some((lan, _, hostnames, _)) = old_lan_bind {
if let Some(external) = lan.assigned_ssl_port {
for hostname in ctrl.server_hostnames.iter().cloned() {
ctrl.vhost.gc(hostname, external).await?;
}
for hostname in hostnames {
ctrl.vhost.gc(Some(hostname), external).await?;
}
}
if let Some(external) = lan.assigned_port {
ctrl.forward.gc(external).await?;
}
}
}
let mut removed = BTreeSet::new();
binds.lan.retain(|internal, (external, _, hostnames, _)| {
if host.bindings.get(internal).map_or(false, |b| b.enabled) {
true
} else {
removed.insert((*external, std::mem::take(hostnames)));
false
}
});
for (lan, hostnames) in removed {
if let Some(external) = lan.assigned_ssl_port {
for hostname in ctrl.server_hostnames.iter().cloned() {
ctrl.vhost.gc(hostname, external).await?;
}
for hostname in hostnames {
ctrl.vhost.gc(Some(hostname), external).await?;
}
}
if let Some(external) = lan.assigned_port {
ctrl.forward.gc(external).await?;
} }
} }
@@ -481,7 +501,7 @@ impl NetService {
SocketAddr::from((self.ip, *internal)), SocketAddr::from((self.ip, *internal)),
); );
if let (Some(ssl), Some(ssl_internal)) = if let (Some(ssl), Some(ssl_internal)) =
(&info.options.add_ssl, info.lan.assigned_ssl_port) (&info.options.add_ssl, info.net.assigned_ssl_port)
{ {
tor_binds.insert( tor_binds.insert(
ssl.preferred_external_port, ssl.preferred_external_port,
@@ -506,31 +526,13 @@ impl NetService {
} }
} }
let mut keep_tor_addrs = BTreeSet::new(); for tor_addr in host.onions.iter() {
for tor_addr in host.addresses().filter_map(|a| { let key = peek
if let HostAddress::Onion { address } = a { .as_private()
Some(address) .as_key_store()
} else { .as_onion()
None .get_key(tor_addr)?;
} tor.insert(key.public().get_onion_address(), (key, tor_binds.clone()));
}) {
keep_tor_addrs.insert(tor_addr);
let old_tor_bind = binds.tor.remove(tor_addr);
let tor_bind = old_tor_bind.filter(|(ports, _)| ports == &tor_binds);
let new_tor_bind = if let Some(tor_bind) = tor_bind {
tor_bind
} else {
let key = peek
.as_private()
.as_key_store()
.as_onion()
.get_key(tor_addr)?;
let rcs = ctrl
.tor
.add(key, tor_binds.clone().into_iter().collect())
.await?;
(tor_binds.clone(), rcs)
};
for (internal, ports) in &tor_hostname_ports { for (internal, ports) in &tor_hostname_ports {
let mut bind_hostname_info = hostname_info.remove(internal).unwrap_or_default(); let mut bind_hostname_info = hostname_info.remove(internal).unwrap_or_default();
bind_hostname_info.push(HostnameInfo::Onion { bind_hostname_info.push(HostnameInfo::Onion {
@@ -542,16 +544,91 @@ impl NetService {
}); });
hostname_info.insert(*internal, bind_hostname_info); hostname_info.insert(*internal, bind_hostname_info);
} }
binds.tor.insert(tor_addr.clone(), new_tor_bind);
} }
for addr in binds.tor.keys() {
if !keep_tor_addrs.contains(addr) { let all = binds
ctrl.tor.gc(Some(addr.clone()), None).await?; .forwards
.keys()
.chain(forwards.keys())
.copied()
.collect::<BTreeSet<_>>();
for external in all {
let mut prev = binds.forwards.remove(&external);
if let Some((internal, public)) = forwards.remove(&external) {
prev = prev.filter(|(i, p, _)| i == &internal && *p == public);
binds.forwards.insert(
external,
if let Some(prev) = prev {
prev
} else {
(
internal,
public,
ctrl.forward.add(external, public, internal).await?,
)
},
);
}
}
ctrl.forward.gc().await?;
let all = binds
.vhosts
.keys()
.chain(vhosts.keys())
.cloned()
.collect::<BTreeSet<_>>();
for key in all {
let mut prev = binds.vhosts.remove(&key);
if let Some(target) = vhosts.remove(&key) {
prev = prev.filter(|(t, _)| t == &target);
binds.vhosts.insert(
key.clone(),
if let Some(prev) = prev {
prev
} else {
(target.clone(), ctrl.vhost.add(key.0, key.1, target)?)
},
);
} else {
if let Some((_, rc)) = prev {
drop(rc);
ctrl.vhost.gc(key.0, key.1);
}
} }
} }
self.net_controller()? let all = binds
.db .tor
.keys()
.chain(tor.keys())
.cloned()
.collect::<BTreeSet<_>>();
for onion in all {
let mut prev = binds.tor.remove(&onion);
if let Some((key, tor_binds)) = tor.remove(&onion) {
prev = prev.filter(|(b, _)| b == &tor_binds);
binds.tor.insert(
onion,
if let Some(prev) = prev {
prev
} else {
let rcs = ctrl
.tor
.add(key, tor_binds.iter().map(|(k, v)| (*k, *v)).collect())
.await?;
(tor_binds, rcs)
},
);
} else {
if let Some((_, rc)) = prev {
drop(rc);
ctrl.tor.gc(Some(onion), None).await?;
}
}
}
ctrl.db
.mutate(|db| { .mutate(|db| {
host_for(db, &self.id, &id, host.kind)? host_for(db, &self.id, &id, host.kind)?
.as_hostname_info_mut() .as_hostname_info_mut()
@@ -579,29 +656,6 @@ impl NetService {
pub fn get_ip(&self) -> Ipv4Addr { pub fn get_ip(&self) -> Ipv4Addr {
self.ip self.ip
} }
pub fn get_lan_port(&self, host_id: HostId, internal_port: u16) -> Result<LanInfo, Error> {
let host_id_binds = self.binds.get_key_value(&host_id);
match host_id_binds {
Some((_, binds)) => {
if let Some((lan, _, _, _)) = binds.lan.get(&internal_port) {
Ok(*lan)
} else {
Err(Error::new(
eyre!(
"Internal Port {} not found in NetService binds",
internal_port
),
crate::ErrorKind::NotFound,
))
}
}
None => Err(Error::new(
eyre!("HostID {} not found in NetService binds", host_id),
crate::ErrorKind::NotFound,
)),
}
}
} }
impl Drop for NetService { impl Drop for NetService {

File diff suppressed because it is too large Load Diff

View File

@@ -12,7 +12,8 @@ use ts_rs::TS;
#[serde(tag = "kind")] #[serde(tag = "kind")]
pub enum HostnameInfo { pub enum HostnameInfo {
Ip { Ip {
network_interface_id: String, #[ts(type = "string")]
network_interface_id: InternedString,
public: bool, public: bool,
hostname: IpHostname, hostname: IpHostname,
}, },
@@ -43,6 +44,8 @@ pub enum IpHostname {
}, },
Ipv6 { Ipv6 {
value: Ipv6Addr, value: Ipv6Addr,
#[serde(default)]
scope_id: u32,
port: Option<u16>, port: Option<u16>,
ssl_port: Option<u16>, ssl_port: Option<u16>,
}, },
@@ -69,7 +72,6 @@ pub struct ServiceInterface {
pub id: ServiceInterfaceId, pub id: ServiceInterfaceId,
pub name: String, pub name: String,
pub description: String, pub description: String,
pub has_primary: bool,
pub masked: bool, pub masked: bool,
pub address_info: AddressInfo, pub address_info: AddressInfo,
#[serde(rename = "type")] #[serde(rename = "type")]

View File

@@ -17,7 +17,6 @@ use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::*; use openssl::*;
use patch_db::HasModel; use patch_db::HasModel;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::time::Instant;
use tracing::instrument; use tracing::instrument;
use crate::account::AccountInfo; use crate::account::AccountInfo;

View File

@@ -8,15 +8,15 @@ use std::time::UNIX_EPOCH;
use async_compression::tokio::bufread::GzipEncoder; use async_compression::tokio::bufread::GzipEncoder;
use axum::body::Body; use axum::body::Body;
use axum::extract::{self as x, Request}; use axum::extract::{self as x, Request};
use axum::response::Response; use axum::response::{Redirect, Response};
use axum::routing::{any, get, post}; use axum::routing::{any, get};
use axum::Router; use axum::Router;
use base64::display::Base64Display; use base64::display::Base64Display;
use digest::Digest; use digest::Digest;
use futures::future::ready; use futures::future::ready;
use http::header::{ use http::header::{
ACCEPT_ENCODING, ACCEPT_RANGES, CACHE_CONTROL, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, ACCEPT_ENCODING, ACCEPT_RANGES, CACHE_CONTROL, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH,
CONTENT_RANGE, CONTENT_TYPE, ETAG, RANGE, CONTENT_RANGE, CONTENT_TYPE, ETAG, HOST, RANGE,
}; };
use http::request::Parts as RequestParts; use http::request::Parts as RequestParts;
use http::{HeaderValue, Method, StatusCode}; use http::{HeaderValue, Method, StatusCode};
@@ -26,7 +26,6 @@ use new_mime_guess::MimeGuess;
use openssl::hash::MessageDigest; use openssl::hash::MessageDigest;
use openssl::x509::X509; use openssl::x509::X509;
use rpc_toolkit::{Context, HttpServer, Server}; use rpc_toolkit::{Context, HttpServer, Server};
use sqlx::query;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader};
use tokio_util::io::ReaderStream; use tokio_util::io::ReaderStream;
use url::Url; use url::Url;
@@ -47,7 +46,7 @@ use crate::s9pk::S9pk;
use crate::util::io::open_file; use crate::util::io::open_file;
use crate::util::net::SyncBody; use crate::util::net::SyncBody;
use crate::util::serde::BASE64; use crate::util::serde::BASE64;
use crate::{diagnostic_api, init_api, install_api, main_api, setup_api}; use crate::{diagnostic_api, init_api, install_api, main_api, setup_api, DATA_DIR};
const NOT_FOUND: &[u8] = b"Not Found"; const NOT_FOUND: &[u8] = b"Not Found";
const METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed"; const METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed";
@@ -230,6 +229,20 @@ pub fn refresher() -> Router {
})) }))
} }
pub fn redirecter() -> Router {
Router::new().fallback(get(|request: Request| async move {
Redirect::temporary(&format!(
"https://{}{}",
request
.headers()
.get(HOST)
.and_then(|s| s.to_str().ok())
.unwrap_or("localhost"),
request.uri()
))
}))
}
async fn proxy_request(ctx: RpcContext, request: Request, url: String) -> Result<Response, Error> { async fn proxy_request(ctx: RpcContext, request: Request, url: String) -> Result<Response, Error> {
if_authorized(&ctx, request, |mut request| async { if_authorized(&ctx, request, |mut request| async {
for header in PROXY_STRIP_HEADERS { for header in PROXY_STRIP_HEADERS {
@@ -253,7 +266,7 @@ fn s9pk_router(ctx: RpcContext) -> Router {
let (parts, _) = request.into_parts(); let (parts, _) = request.into_parts();
match FileData::from_path( match FileData::from_path(
&parts, &parts,
&ctx.datadir &Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR) .join(PKG_ARCHIVE_DIR)
.join("installed") .join("installed")
.join(s9pk), .join(s9pk),
@@ -279,7 +292,7 @@ fn s9pk_router(ctx: RpcContext) -> Router {
let s9pk = S9pk::deserialize( let s9pk = S9pk::deserialize(
&MultiCursorFile::from( &MultiCursorFile::from(
open_file( open_file(
ctx.datadir Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR) .join(PKG_ARCHIVE_DIR)
.join("installed") .join("installed")
.join(s9pk), .join(s9pk),

View File

@@ -1,16 +1,25 @@
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV6};
use std::path::Path; use std::path::Path;
use async_stream::try_stream; use async_stream::try_stream;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use futures::stream::BoxStream; use futures::stream::BoxStream;
use futures::{StreamExt, TryStreamExt}; use futures::{StreamExt, TryStreamExt};
use ipnet::{Ipv4Net, Ipv6Net}; use ipnet::{IpNet, Ipv4Net, Ipv6Net};
use nix::net::if_::if_nametoindex;
use tokio::net::{TcpListener, TcpStream}; use tokio::net::{TcpListener, TcpStream};
use tokio::process::Command; use tokio::process::Command;
use crate::prelude::*;
use crate::util::Invoke; use crate::util::Invoke;
use crate::Error;
pub fn ipv6_is_link_local(addr: Ipv6Addr) -> bool {
(addr.segments()[0] & 0xffc0) == 0xfe80
}
pub fn ipv6_is_local(addr: Ipv6Addr) -> bool {
addr.is_loopback() || (addr.segments()[0] & 0xfe00) == 0xfc00 || ipv6_is_link_local(addr)
}
fn parse_iface_ip(output: &str) -> Result<Vec<&str>, Error> { fn parse_iface_ip(output: &str) -> Result<Vec<&str>, Error> {
let output = output.trim(); let output = output.trim();
@@ -112,6 +121,52 @@ pub async fn find_eth_iface() -> Result<String, Error> {
)) ))
} }
pub async fn all_socket_addrs_for(port: u16) -> Result<Vec<SocketAddr>, Error> {
let mut res = Vec::new();
let raw = String::from_utf8(
Command::new("ip")
.arg("-o")
.arg("addr")
.arg("show")
.invoke(ErrorKind::ParseSysInfo)
.await?,
)?;
let err = |item: &str, lineno: usize, line: &str| {
Error::new(
eyre!("failed to parse ip info ({item}[line:{lineno}]) from {line:?}"),
ErrorKind::ParseSysInfo,
)
};
for (idx, line) in raw
.lines()
.map(|l| l.trim())
.enumerate()
.filter(|(_, l)| !l.is_empty())
{
let mut split = line.split_whitespace();
let _num = split.next();
let ifname = split.next().ok_or_else(|| err("ifname", idx, line))?;
let _kind = split.next();
let ipnet_str = split.next().ok_or_else(|| err("ipnet", idx, line))?;
let ipnet = ipnet_str
.parse::<IpNet>()
.with_ctx(|_| (ErrorKind::ParseSysInfo, err("ipnet", idx, ipnet_str)))?;
match ipnet.addr() {
IpAddr::V4(ip4) => res.push(SocketAddr::new(ip4.into(), port)),
IpAddr::V6(ip6) => res.push(SocketAddr::V6(SocketAddrV6::new(
ip6,
port,
0,
if_nametoindex(ifname)
.with_ctx(|_| (ErrorKind::ParseSysInfo, "reading scope_id"))?,
))),
}
}
Ok(res)
}
pub struct TcpListeners { pub struct TcpListeners {
listeners: Vec<TcpListener>, listeners: Vec<TcpListener>,
} }

File diff suppressed because it is too large Load Diff

View File

@@ -1,134 +1,299 @@
use std::convert::Infallible; use std::future::Future;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::ops::Deref;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, RwLock};
use std::task::Poll; use std::task::Poll;
use std::time::Duration; use std::time::Duration;
use axum::extract::Request;
use axum::Router; use axum::Router;
use axum_server::Handle; use futures::future::{BoxFuture, Either};
use bytes::Bytes;
use futures::future::{ready, BoxFuture};
use futures::FutureExt; use futures::FutureExt;
use helpers::NonDetachingJoinHandle; use helpers::NonDetachingJoinHandle;
use hyper_util::rt::{TokioIo, TokioTimer};
use hyper_util::service::TowerToHyperService;
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{oneshot, watch}; use tokio::sync::{oneshot, watch};
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext}; use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::net::network_interface::NetworkInterfaceListener;
use crate::net::static_server::{ use crate::net::static_server::{
diagnostic_ui_router, init_ui_router, install_ui_router, main_ui_router, refresher, diagnostic_ui_router, init_ui_router, install_ui_router, main_ui_router, redirecter, refresher,
setup_ui_router, setup_ui_router,
}; };
use crate::prelude::*; use crate::prelude::*;
use crate::util::actor::background::BackgroundJobQueue;
#[derive(Clone)] pub struct Accepted {
pub struct SwappableRouter(watch::Sender<Router>); pub https_redirect: bool,
impl SwappableRouter { pub stream: TcpStream,
pub fn new(router: Router) -> Self {
Self(watch::channel(router).0)
}
pub fn swap(&self, router: Router) {
let _ = self.0.send_replace(router);
}
} }
pub struct SwappableRouterService { pub trait Accept {
router: watch::Receiver<Router>, fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>>;
changed: Option<BoxFuture<'static, ()>>,
} }
impl SwappableRouterService {
fn router(&self) -> Router { impl Accept for Vec<TcpListener> {
self.router.borrow().clone() fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
} for listener in &*self {
fn changed(&mut self, cx: &mut std::task::Context<'_>) -> Poll<()> { if let Poll::Ready((stream, _)) = listener.poll_accept(cx)? {
let mut changed = if let Some(changed) = self.changed.take() { return Poll::Ready(Ok(Accepted {
changed https_redirect: false,
} else { stream,
let mut router = self.router.clone(); }));
async move {
router.changed().await;
} }
.boxed()
};
if changed.poll_unpin(cx).is_ready() {
return Poll::Ready(());
} }
self.changed = Some(changed);
Poll::Pending Poll::Pending
} }
} }
impl Clone for SwappableRouterService { impl Accept for NetworkInterfaceListener {
fn clone(&self) -> Self { fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
NetworkInterfaceListener::poll_accept(self, cx, true).map(|res| {
res.map(|a| Accepted {
https_redirect: a.is_public,
stream: a.stream,
})
})
}
}
impl<A: Accept, B: Accept> Accept for Either<A, B> {
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
match self {
Either::Left(a) => a.poll_accept(cx),
Either::Right(b) => b.poll_accept(cx),
}
}
}
impl<A: Accept> Accept for Option<A> {
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
match self {
None => Poll::Pending,
Some(a) => a.poll_accept(cx),
}
}
}
#[pin_project::pin_project]
pub struct Acceptor<A: Accept> {
acceptor: (watch::Sender<A>, watch::Receiver<A>),
changed: Option<BoxFuture<'static, ()>>,
}
impl<A: Accept + Send + Sync + 'static> Acceptor<A> {
pub fn new(acceptor: A) -> Self {
Self { Self {
router: self.router.clone(), acceptor: watch::channel(acceptor),
changed: None, changed: None,
} }
} }
}
impl<B> tower_service::Service<Request<B>> for SwappableRouterService fn poll_changed(&mut self, cx: &mut std::task::Context<'_>) -> Poll<()> {
where let mut changed = if let Some(changed) = self.changed.take() {
B: axum::body::HttpBody<Data = Bytes> + Send + 'static, changed
B::Error: Into<axum::BoxError>, } else {
{ let mut recv = self.acceptor.1.clone();
type Response = <Router as tower_service::Service<Request<B>>>::Response; async move {
type Error = <Router as tower_service::Service<Request<B>>>::Error; let _ = recv.changed().await;
type Future = <Router as tower_service::Service<Request<B>>>::Future; }
#[inline] .boxed()
fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> { };
if self.changed(cx).is_ready() { let res = changed.poll_unpin(cx);
return Poll::Ready(Ok(())); if res.is_pending() {
self.changed = Some(changed);
} }
tower_service::Service::<Request<B>>::poll_ready(&mut self.router(), cx) res
} }
fn call(&mut self, req: Request<B>) -> Self::Future {
self.router().call(req) fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
let _ = self.poll_changed(cx);
let mut res = Poll::Pending;
self.acceptor.0.send_if_modified(|a| {
res = a.poll_accept(cx);
false
});
res
}
async fn accept(&mut self) -> Result<Accepted, Error> {
std::future::poll_fn(|cx| self.poll_accept(cx)).await
}
}
impl Acceptor<Vec<TcpListener>> {
pub async fn bind(listen: impl IntoIterator<Item = SocketAddr>) -> Result<Self, Error> {
Ok(Self::new(
futures::future::try_join_all(listen.into_iter().map(TcpListener::bind)).await?,
))
} }
} }
impl<T> tower_service::Service<T> for SwappableRouter { pub type UpgradableListener = Option<Either<Vec<TcpListener>, NetworkInterfaceListener>>;
type Response = SwappableRouterService;
type Error = Infallible; impl Acceptor<UpgradableListener> {
type Future = futures::future::Ready<Result<Self::Response, Self::Error>>; pub async fn bind_upgradable(
#[inline] listen: impl IntoIterator<Item = SocketAddr>,
fn poll_ready( ) -> Result<Self, Error> {
&mut self, Ok(Self::new(Some(Either::Left(
_: &mut std::task::Context<'_>, futures::future::try_join_all(listen.into_iter().map(TcpListener::bind)).await?,
) -> std::task::Poll<Result<(), Self::Error>> { ))))
Poll::Ready(Ok(()))
}
fn call(&mut self, _: T) -> Self::Future {
ready(Ok(SwappableRouterService {
router: self.0.subscribe(),
changed: None,
}))
} }
} }
pub struct WebServer { pub struct WebServerAcceptorSetter<A: Accept> {
acceptor: watch::Sender<A>,
}
impl<A: Accept, B: Accept> WebServerAcceptorSetter<Option<Either<A, B>>> {
pub fn try_upgrade<F: FnOnce(A) -> Result<B, Error>>(&self, f: F) -> Result<(), Error> {
let mut res = Ok(());
self.acceptor.send_modify(|a| {
*a = match a.take() {
Some(Either::Left(a)) => match f(a) {
Ok(b) => Some(Either::Right(b)),
Err(e) => {
res = Err(e);
None
}
},
x => x,
}
});
res
}
}
impl<A: Accept> Deref for WebServerAcceptorSetter<A> {
type Target = watch::Sender<A>;
fn deref(&self) -> &Self::Target {
&self.acceptor
}
}
pub struct WebServer<A: Accept> {
shutdown: oneshot::Sender<()>, shutdown: oneshot::Sender<()>,
router: SwappableRouter, router: watch::Sender<Option<Router>>,
acceptor: watch::Sender<A>,
thread: NonDetachingJoinHandle<()>, thread: NonDetachingJoinHandle<()>,
} }
impl WebServer { impl<A: Accept + Send + Sync + 'static> WebServer<A> {
pub fn new(bind: SocketAddr) -> Self { pub fn acceptor_setter(&self) -> WebServerAcceptorSetter<A> {
let router = SwappableRouter::new(refresher()); WebServerAcceptorSetter {
let thread_router = router.clone(); acceptor: self.acceptor.clone(),
}
}
pub fn new(mut acceptor: Acceptor<A>) -> Self {
let acceptor_send = acceptor.acceptor.0.clone();
let (router, service) = watch::channel::<Option<Router>>(None);
let (shutdown, shutdown_recv) = oneshot::channel(); let (shutdown, shutdown_recv) = oneshot::channel();
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move { let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
let handle = Handle::new(); #[derive(Clone)]
let mut server = axum_server::bind(bind).handle(handle.clone()); struct QueueRunner {
server.http_builder().http1().preserve_header_case(true); queue: Arc<RwLock<Option<BackgroundJobQueue>>>,
server.http_builder().http1().title_case_headers(true); }
impl<Fut> hyper::rt::Executor<Fut> for QueueRunner
where
Fut: Future + Send + 'static,
{
fn execute(&self, fut: Fut) {
if let Some(q) = &*self.queue.read().unwrap() {
q.add_job(fut);
} else {
tracing::warn!("job queued after shutdown");
}
}
}
if let (Err(e), _) = tokio::join!(server.serve(thread_router), async { let accept = AtomicBool::new(true);
let _ = shutdown_recv.await; let queue_cell = Arc::new(RwLock::new(None));
handle.graceful_shutdown(Some(Duration::from_secs(0))); let graceful = hyper_util::server::graceful::GracefulShutdown::new();
}) { let mut server = hyper_util::server::conn::auto::Builder::new(QueueRunner {
tracing::error!("Spawning hyper server error: {}", e); queue: queue_cell.clone(),
});
server
.http1()
.timer(TokioTimer::new())
.title_case_headers(true)
.preserve_header_case(true)
.http2()
.timer(TokioTimer::new())
.enable_connect_protocol()
.keep_alive_interval(Duration::from_secs(60))
.keep_alive_timeout(Duration::from_secs(300));
let (queue, mut runner) = BackgroundJobQueue::new();
*queue_cell.write().unwrap() = Some(queue.clone());
let handler = async {
loop {
if let Err(e) = async {
let accepted = acceptor.accept().await?;
if accepted.https_redirect {
queue.add_job(
graceful.watch(
server
.serve_connection_with_upgrades(
TokioIo::new(accepted.stream),
TowerToHyperService::new(redirecter().into_service()),
)
.into_owned(),
),
);
} else {
let service = { service.borrow().clone() };
if let Some(service) = service {
queue.add_job(
graceful.watch(
server
.serve_connection_with_upgrades(
TokioIo::new(accepted.stream),
TowerToHyperService::new(service.into_service()),
)
.into_owned(),
),
);
} else {
queue.add_job(
graceful.watch(
server
.serve_connection_with_upgrades(
TokioIo::new(accepted.stream),
TowerToHyperService::new(
refresher().into_service(),
),
)
.into_owned(),
),
);
}
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error accepting HTTP connection: {e}");
tracing::debug!("{e:?}");
}
}
}
.boxed();
tokio::select! {
_ = shutdown_recv => (),
_ = handler => (),
_ = &mut runner => (),
}
accept.store(false, std::sync::atomic::Ordering::SeqCst);
drop(queue);
drop(queue_cell.write().unwrap().take());
if !runner.is_empty() {
runner.await;
} }
})); }));
Self { Self {
shutdown, shutdown,
router, router,
thread, thread,
acceptor: acceptor_send,
} }
} }
@@ -138,7 +303,7 @@ impl WebServer {
} }
pub fn serve_router(&mut self, router: Router) { pub fn serve_router(&mut self, router: Router) {
self.router.swap(router) self.router.send_replace(Some(router));
} }
pub fn serve_main(&mut self, ctx: RpcContext) { pub fn serve_main(&mut self, ctx: RpcContext) {

View File

@@ -298,7 +298,7 @@ fn display_wifi_info(params: WithIoFormat<Empty>, info: WifiListInfo) {
let mut table_global = Table::new(); let mut table_global = Table::new();
table_global.add_row(row![bc => table_global.add_row(row![bc =>
"CONNECTED", "CONNECTED",
"SIGNAL_STRENGTH", "SIGNAL STRENGTH",
"COUNTRY", "COUNTRY",
"ETHERNET", "ETHERNET",
]); ]);
@@ -306,12 +306,12 @@ fn display_wifi_info(params: WithIoFormat<Empty>, info: WifiListInfo) {
&info &info
.connected .connected
.as_ref() .as_ref()
.map_or("[N/A]".to_owned(), |c| c.0.clone()), .map_or("N/A".to_owned(), |c| c.0.clone()),
&info &info
.connected .connected
.as_ref() .as_ref()
.and_then(|x| info.ssids.get(x)) .and_then(|x| info.ssids.get(x))
.map_or("[N/A]".to_owned(), |ss| format!("{}", ss.0)), .map_or("N/A".to_owned(), |ss| format!("{}", ss.0)),
info.country.as_ref().map(|c| c.alpha2()).unwrap_or("00"), info.country.as_ref().map(|c| c.alpha2()).unwrap_or("00"),
&format!("{}", info.ethernet) &format!("{}", info.ethernet)
]); ]);
@@ -897,32 +897,29 @@ impl TypedValueParser for CountryCodeParser {
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>( pub async fn synchronize_network_manager<P: AsRef<Path>>(
main_datadir: P, main_datadir: P,
wifi: &mut WifiInfo, wifi: &mut WifiInfo,
) -> Result<(), Error> { ) -> Result<(), Error> {
wifi.interface = find_wifi_iface().await?; wifi.interface = find_wifi_iface().await?;
let Some(wifi_iface) = &wifi.interface else {
return Ok(());
};
let persistent = main_datadir.as_ref().join("system-connections"); let persistent = main_datadir.as_ref().join("system-connections");
tracing::debug!("persistent: {:?}", persistent);
// let supplicant = Path::new("/etc/wpa_supplicant.conf");
if tokio::fs::metadata(&persistent).await.is_err() { if tokio::fs::metadata(&persistent).await.is_err() {
tokio::fs::create_dir_all(&persistent).await?; tokio::fs::create_dir_all(&persistent).await?;
} }
crate::disk::mount::util::bind(&persistent, "/etc/NetworkManager/system-connections", false) crate::disk::mount::util::bind(&persistent, "/etc/NetworkManager/system-connections", false)
.await?; .await?;
// if tokio::fs::metadata(&supplicant).await.is_err() {
// tokio::fs::write(&supplicant, include_str!("wpa_supplicant.conf.base")).await?;
// }
Command::new("systemctl") Command::new("systemctl")
.arg("restart") .arg("restart")
.arg("NetworkManager") .arg("NetworkManager")
.invoke(ErrorKind::Wifi) .invoke(ErrorKind::Wifi)
.await?; .await?;
let Some(wifi_iface) = &wifi.interface else {
return Ok(());
};
Command::new("ifconfig") Command::new("ifconfig")
.arg(wifi_iface) .arg(wifi_iface)
.arg("up") .arg("up")

View File

@@ -50,7 +50,7 @@ pub async fn partition(disk: &DiskInfo, overwrite: bool) -> Result<OsPartitionIn
if part_info.guid.is_some() { if part_info.guid.is_some() {
if entry.first_lba < if use_efi { 33759266 } else { 33570850 } { if entry.first_lba < if use_efi { 33759266 } else { 33570850 } {
return Err(Error::new( return Err(Error::new(
eyre!("Not enough space before embassy data"), eyre!("Not enough space before StartOS data"),
crate::ErrorKind::InvalidRequest, crate::ErrorKind::InvalidRequest,
)); ));
} }

View File

@@ -6,3 +6,20 @@ pub use tracing::instrument;
pub use crate::db::prelude::*; pub use crate::db::prelude::*;
pub use crate::ensure_code; pub use crate::ensure_code;
pub use crate::error::{Error, ErrorCollection, ErrorKind, ResultExt}; pub use crate::error::{Error, ErrorCollection, ErrorKind, ResultExt};
#[macro_export]
macro_rules! dbg {
() => {{
tracing::debug!("[{}:{}:{}]", file!(), line!(), column!());
}};
($e:expr) => {{
let e = $e;
tracing::debug!("[{}:{}:{}] {} = {e:?}", file!(), line!(), column!(), stringify!($e));
e
}};
($($e:expr),+) => {
($(
crate::dbg!($e)
),+)
}
}

View File

@@ -19,7 +19,6 @@ use crate::context::config::{ContextConfig, CONFIG_PATH};
use crate::context::{CliContext, RpcContext}; use crate::context::{CliContext, RpcContext};
use crate::prelude::*; use crate::prelude::*;
use crate::registry::auth::{SignatureHeader, AUTH_SIG_HEADER}; use crate::registry::auth::{SignatureHeader, AUTH_SIG_HEADER};
use crate::registry::device_info::{DeviceInfo, DEVICE_INFO_HEADER};
use crate::registry::signer::sign::AnySigningKey; use crate::registry::signer::sign::AnySigningKey;
use crate::registry::RegistryDatabase; use crate::registry::RegistryDatabase;
use crate::rpc_continuations::RpcContinuations; use crate::rpc_continuations::RpcContinuations;

View File

@@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet};
use axum::Router; use axum::Router;
use futures::future::ready; use futures::future::ready;
use imbl_value::InternedString;
use models::DataUrl; use models::DataUrl;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler, Server}; use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler, Server};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -11,13 +10,13 @@ use ts_rs::TS;
use crate::context::CliContext; use crate::context::CliContext;
use crate::middleware::cors::Cors; use crate::middleware::cors::Cors;
use crate::net::static_server::{bad_request, not_found, server_error}; use crate::net::static_server::{bad_request, not_found, server_error};
use crate::net::web_server::WebServer; use crate::net::web_server::{Accept, WebServer};
use crate::prelude::*; use crate::prelude::*;
use crate::registry::auth::Auth; use crate::registry::auth::Auth;
use crate::registry::context::RegistryContext; use crate::registry::context::RegistryContext;
use crate::registry::device_info::DeviceInfoMiddleware; use crate::registry::device_info::DeviceInfoMiddleware;
use crate::registry::os::index::OsIndex; use crate::registry::os::index::OsIndex;
use crate::registry::package::index::{Category, PackageIndex}; use crate::registry::package::index::PackageIndex;
use crate::registry::signer::SignerInfo; use crate::registry::signer::SignerInfo;
use crate::rpc_continuations::Guid; use crate::rpc_continuations::Guid;
use crate::util::serde::HandlerExtSerde; use crate::util::serde::HandlerExtSerde;
@@ -144,7 +143,7 @@ pub fn registry_router(ctx: RegistryContext) -> Router {
) )
} }
impl WebServer { impl<A: Accept + Send + Sync + 'static> WebServer<A> {
pub fn serve_registry(&mut self, ctx: RegistryContext) { pub fn serve_registry(&mut self, ctx: RegistryContext) {
self.serve_router(registry_router(ctx)) self.serve_router(registry_router(ctx))
} }

View File

@@ -72,7 +72,6 @@ pub struct PackageVersionInfo {
pub icon: DataUrl<'static>, pub icon: DataUrl<'static>,
pub description: Description, pub description: Description,
pub release_notes: String, pub release_notes: String,
#[ts(type = "string")]
pub git_hash: GitHash, pub git_hash: GitHash,
#[ts(type = "string")] #[ts(type = "string")]
pub license: InternedString, pub license: InternedString,

View File

@@ -24,10 +24,10 @@ impl MerkleArchiveCommitment {
pub fn from_query(query: &str) -> Result<Option<Self>, Error> { pub fn from_query(query: &str) -> Result<Option<Self>, Error> {
let mut root_sighash = None; let mut root_sighash = None;
let mut root_maxsize = None; let mut root_maxsize = None;
for (k, v) in form_urlencoded::parse(dbg!(query).as_bytes()) { for (k, v) in form_urlencoded::parse(query.as_bytes()) {
match &*k { match &*k {
"rootSighash" => { "rootSighash" => {
root_sighash = Some(dbg!(v).parse()?); root_sighash = Some(v.parse()?);
} }
"rootMaxsize" => { "rootMaxsize" => {
root_maxsize = Some(v.parse()?); root_maxsize = Some(v.parse()?);

View File

@@ -1,11 +1,13 @@
use std::path::Path; use std::path::Path;
use tokio::process::Command; use tokio::process::Command;
use ts_rs::TS;
use crate::prelude::*; use crate::prelude::*;
use crate::util::Invoke; use crate::util::Invoke;
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)] #[derive(Clone, Debug, serde::Serialize, serde::Deserialize, TS)]
#[ts(type = "string")]
pub struct GitHash(String); pub struct GitHash(String);
impl GitHash { impl GitHash {
@@ -31,6 +33,31 @@ impl GitHash {
} }
Ok(GitHash(hash)) Ok(GitHash(hash))
} }
pub fn load_sync() -> Option<GitHash> {
let mut hash = String::from_utf8(
std::process::Command::new("git")
.arg("rev-parse")
.arg("HEAD")
.output()
.ok()?
.stdout,
)
.ok()?;
if !std::process::Command::new("git")
.arg("diff-index")
.arg("--quiet")
.arg("HEAD")
.arg("--")
.output()
.ok()?
.status
.success()
{
hash += "-modified";
}
Some(GitHash(hash))
}
} }
impl AsRef<str> for GitHash { impl AsRef<str> for GitHash {

View File

@@ -3,7 +3,6 @@ use std::path::Path;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use exver::{Version, VersionRange}; use exver::{Version, VersionRange};
use helpers::const_true;
use imbl_value::InternedString; use imbl_value::InternedString;
pub use models::PackageId; pub use models::PackageId;
use models::{mime, ImageId, VolumeId}; use models::{mime, ImageId, VolumeId};
@@ -62,8 +61,8 @@ pub struct Manifest {
pub dependencies: Dependencies, pub dependencies: Dependencies,
#[serde(default)] #[serde(default)]
pub hardware_requirements: HardwareRequirements, pub hardware_requirements: HardwareRequirements,
#[serde(default)] #[ts(optional)]
#[ts(type = "string | null")] #[serde(default = "GitHash::load_sync")]
pub git_hash: Option<GitHash>, pub git_hash: Option<GitHash>,
#[serde(default = "current_version")] #[serde(default = "current_version")]
#[ts(type = "string")] #[ts(type = "string")]

View File

@@ -294,7 +294,7 @@ impl CallbackHandler {
} }
} }
pub async fn call(mut self, args: Vector<Value>) -> Result<(), Error> { pub async fn call(mut self, args: Vector<Value>) -> Result<(), Error> {
dbg!(eyre!("callback fired: {}", self.handle.is_active())); crate::dbg!(eyre!("callback fired: {}", self.handle.is_active()));
if let Some(seed) = self.seed.upgrade() { if let Some(seed) = self.seed.upgrade() {
seed.persistent_container seed.persistent_container
.callback(self.handle.take(), args) .callback(self.handle.take(), args)

View File

@@ -17,11 +17,11 @@ use crate::db::model::package::{
use crate::disk::mount::filesystem::bind::Bind; use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::idmapped::IdMapped; use crate::disk::mount::filesystem::idmapped::IdMapped;
use crate::disk::mount::filesystem::{FileSystem, MountType}; use crate::disk::mount::filesystem::{FileSystem, MountType};
use crate::rpc_continuations::Guid;
use crate::service::effects::prelude::*; use crate::service::effects::prelude::*;
use crate::status::health_check::NamedHealthCheckResult; use crate::status::health_check::NamedHealthCheckResult;
use crate::util::Invoke; use crate::util::Invoke;
use crate::volume::data_dir; use crate::volume::data_dir;
use crate::DATA_DIR;
#[derive(Debug, Clone, Serialize, Deserialize, TS)] #[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)] #[ts(export)]
@@ -55,7 +55,7 @@ pub async fn mount(
let context = context.deref()?; let context = context.deref()?;
let subpath = subpath.unwrap_or_default(); let subpath = subpath.unwrap_or_default();
let subpath = subpath.strip_prefix("/").unwrap_or(&subpath); let subpath = subpath.strip_prefix("/").unwrap_or(&subpath);
let source = data_dir(&context.seed.ctx.datadir, &package_id, &volume_id).join(subpath); let source = data_dir(DATA_DIR, &package_id, &volume_id).join(subpath);
if tokio::fs::metadata(&source).await.is_err() { if tokio::fs::metadata(&source).await.is_err() {
tokio::fs::create_dir_all(&source).await?; tokio::fs::create_dir_all(&source).await?;
} }

View File

@@ -130,10 +130,6 @@ pub fn handler<C: Context>() -> ParentHandler<C> {
"get-host-info", "get-host-info",
from_fn_async(net::host::get_host_info).no_cli(), from_fn_async(net::host::get_host_info).no_cli(),
) )
.subcommand(
"get-primary-url",
from_fn_async(net::host::get_primary_url).no_cli(),
)
.subcommand( .subcommand(
"get-container-ip", "get-container-ip",
from_fn_async(net::info::get_container_ip).no_cli(), from_fn_async(net::info::get_container_ip).no_cli(),

View File

@@ -1,6 +1,6 @@
use models::{HostId, PackageId}; use models::{HostId, PackageId};
use crate::net::host::binding::{BindId, BindOptions, LanInfo}; use crate::net::host::binding::{BindId, BindOptions, NetInfo};
use crate::net::host::HostKind; use crate::net::host::HostKind;
use crate::service::effects::prelude::*; use crate::service::effects::prelude::*;
@@ -53,15 +53,36 @@ pub struct GetServicePortForwardParams {
#[ts(optional)] #[ts(optional)]
package_id: Option<PackageId>, package_id: Option<PackageId>,
host_id: HostId, host_id: HostId,
internal_port: u32, internal_port: u16,
} }
pub async fn get_service_port_forward( pub async fn get_service_port_forward(
context: EffectContext, context: EffectContext,
data: GetServicePortForwardParams, GetServicePortForwardParams {
) -> Result<LanInfo, Error> { package_id,
let internal_port = data.internal_port as u16; host_id,
internal_port,
}: GetServicePortForwardParams,
) -> Result<NetInfo, Error> {
let context = context.deref()?; let context = context.deref()?;
let net_service = context.seed.persistent_container.net_service.lock().await;
net_service.get_lan_port(data.host_id, internal_port) let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
Ok(context
.seed
.ctx
.db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
.as_hosts()
.as_idx(&host_id)
.or_not_found(&host_id)?
.as_bindings()
.de()?
.get(&internal_port)
.or_not_found(lazy_format!("binding for port {internal_port}"))?
.net)
} }

View File

@@ -1,35 +1,10 @@
use models::{HostId, PackageId}; use models::{HostId, PackageId};
use crate::net::host::address::HostAddress;
use crate::net::host::Host; use crate::net::host::Host;
use crate::service::effects::callbacks::CallbackHandler; use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*; use crate::service::effects::prelude::*;
use crate::service::rpc::CallbackId; use crate::service::rpc::CallbackId;
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct GetPrimaryUrlParams {
#[ts(optional)]
package_id: Option<PackageId>,
host_id: HostId,
#[ts(optional)]
callback: Option<CallbackId>,
}
pub async fn get_primary_url(
context: EffectContext,
GetPrimaryUrlParams {
package_id,
host_id,
callback,
}: GetPrimaryUrlParams,
) -> Result<Option<HostAddress>, Error> {
let context = context.deref()?;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
Ok(None) // TODO
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, TS)] #[derive(Debug, Clone, serde::Serialize, serde::Deserialize, TS)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[ts(export)] #[ts(export)]

View File

@@ -15,7 +15,6 @@ pub struct ExportServiceInterfaceParams {
id: ServiceInterfaceId, id: ServiceInterfaceId,
name: String, name: String,
description: String, description: String,
has_primary: bool,
masked: bool, masked: bool,
address_info: AddressInfo, address_info: AddressInfo,
r#type: ServiceInterfaceType, r#type: ServiceInterfaceType,
@@ -26,7 +25,6 @@ pub async fn export_service_interface(
id, id,
name, name,
description, description,
has_primary,
masked, masked,
address_info, address_info,
r#type, r#type,
@@ -39,7 +37,6 @@ pub async fn export_service_interface(
id: id.clone(), id: id.clone(),
name, name,
description, description,
has_primary,
masked, masked,
address_info, address_info,
interface_type: r#type, interface_type: r#type,

View File

@@ -51,10 +51,16 @@ pub async fn get_ssl_certificate(
.iter() .iter()
.map(|(_, m)| m.as_hosts().as_entries()) .map(|(_, m)| m.as_hosts().as_entries())
.flatten_ok() .flatten_ok()
.map_ok(|(_, m)| m.as_addresses().de()) .map_ok(|(_, m)| {
Ok(m.as_onions()
.de()?
.iter()
.map(InternedString::from_display)
.chain(m.as_domains().keys()?)
.collect::<Vec<_>>())
})
.map(|a| a.and_then(|a| a)) .map(|a| a.and_then(|a| a))
.flatten_ok() .flatten_ok()
.map_ok(|a| InternedString::from_display(&a))
.try_collect::<_, BTreeSet<_>, _>()?; .try_collect::<_, BTreeSet<_>, _>()?;
for hostname in &hostnames { for hostname in &hostnames {
if let Some(internal) = hostname if let Some(internal) = hostname
@@ -135,10 +141,16 @@ pub async fn get_ssl_key(
.into_iter() .into_iter()
.map(|m| m.as_hosts().as_entries()) .map(|m| m.as_hosts().as_entries())
.flatten_ok() .flatten_ok()
.map_ok(|(_, m)| m.as_addresses().de()) .map_ok(|(_, m)| {
Ok(m.as_onions()
.de()?
.iter()
.map(InternedString::from_display)
.chain(m.as_domains().keys()?)
.collect::<Vec<_>>())
})
.map(|a| a.and_then(|a| a)) .map(|a| a.and_then(|a| a))
.flatten_ok() .flatten_ok()
.map_ok(|a| InternedString::from_display(&a))
.try_collect::<_, BTreeSet<_>, _>()?; .try_collect::<_, BTreeSet<_>, _>()?;
for hostname in &hostnames { for hostname in &hostnames {
if let Some(internal) = hostname if let Some(internal) = hostname

View File

@@ -26,7 +26,7 @@ pub async fn get_store(
callback, callback,
}: GetStoreParams, }: GetStoreParams,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
dbg!(&callback); crate::dbg!(&callback);
let context = context.deref()?; let context = context.deref()?;
let peeked = context.seed.ctx.db.peek().await; let peeked = context.seed.ctx.db.peek().await;
let package_id = package_id.unwrap_or(context.seed.id.clone()); let package_id = package_id.unwrap_or(context.seed.id.clone());

View File

@@ -48,7 +48,7 @@ use crate::util::net::WebSocketExt;
use crate::util::serde::{NoOutput, Pem}; use crate::util::serde::{NoOutput, Pem};
use crate::util::Never; use crate::util::Never;
use crate::volume::data_dir; use crate::volume::data_dir;
use crate::CAP_1_KiB; use crate::{CAP_1_KiB, DATA_DIR, PACKAGE_DATA};
pub mod action; pub mod action;
pub mod cli; pub mod cli;
@@ -149,10 +149,10 @@ impl ServiceRef {
.values() .values()
.flat_map(|h| h.bindings.values()) .flat_map(|h| h.bindings.values())
.flat_map(|b| { .flat_map(|b| {
b.lan b.net
.assigned_port .assigned_port
.into_iter() .into_iter()
.chain(b.lan.assigned_ssl_port) .chain(b.net.assigned_ssl_port)
}), }),
); );
Ok(()) Ok(())
@@ -167,17 +167,18 @@ impl ServiceRef {
{ {
let state = pde.state_info.expect_removing()?; let state = pde.state_info.expect_removing()?;
for volume_id in &state.manifest.volumes { for volume_id in &state.manifest.volumes {
let path = data_dir(&ctx.datadir, &state.manifest.id, volume_id); let path = data_dir(DATA_DIR, &state.manifest.id, volume_id);
if tokio::fs::metadata(&path).await.is_ok() { if tokio::fs::metadata(&path).await.is_ok() {
tokio::fs::remove_dir_all(&path).await?; tokio::fs::remove_dir_all(&path).await?;
} }
} }
let logs_dir = ctx.datadir.join("logs").join(&state.manifest.id); let logs_dir = Path::new(PACKAGE_DATA)
.join("logs")
.join(&state.manifest.id);
if tokio::fs::metadata(&logs_dir).await.is_ok() { if tokio::fs::metadata(&logs_dir).await.is_ok() {
tokio::fs::remove_dir_all(&logs_dir).await?; tokio::fs::remove_dir_all(&logs_dir).await?;
} }
let archive_path = ctx let archive_path = Path::new(PACKAGE_DATA)
.datadir
.join("archive") .join("archive")
.join("installed") .join("installed")
.join(&state.manifest.id); .join(&state.manifest.id);
@@ -278,7 +279,7 @@ impl Service {
let ctx = ctx.clone(); let ctx = ctx.clone();
move |s9pk: S9pk, i: Model<PackageDataEntry>| async move { move |s9pk: S9pk, i: Model<PackageDataEntry>| async move {
for volume_id in &s9pk.as_manifest().volumes { for volume_id in &s9pk.as_manifest().volumes {
let path = data_dir(&ctx.datadir, &s9pk.as_manifest().id, volume_id); let path = data_dir(DATA_DIR, &s9pk.as_manifest().id, volume_id);
if tokio::fs::metadata(&path).await.is_err() { if tokio::fs::metadata(&path).await.is_err() {
tokio::fs::create_dir_all(&path).await?; tokio::fs::create_dir_all(&path).await?;
} }
@@ -291,7 +292,7 @@ impl Service {
Self::new(ctx, s9pk, start_stop).await.map(Some) Self::new(ctx, s9pk, start_stop).await.map(Some)
} }
}; };
let s9pk_dir = ctx.datadir.join(PKG_ARCHIVE_DIR).join("installed"); // TODO: make this based on hash let s9pk_dir = Path::new(DATA_DIR).join(PKG_ARCHIVE_DIR).join("installed"); // TODO: make this based on hash
let s9pk_path = s9pk_dir.join(id).with_extension("s9pk"); let s9pk_path = s9pk_dir.join(id).with_extension("s9pk");
let Some(entry) = ctx let Some(entry) = ctx
.db .db
@@ -605,6 +606,7 @@ impl Service {
} }
pub async fn update_host(&self, host_id: HostId) -> Result<(), Error> { pub async fn update_host(&self, host_id: HostId) -> Result<(), Error> {
let mut service = self.seed.persistent_container.net_service.lock().await;
let host = self let host = self
.seed .seed
.ctx .ctx
@@ -619,13 +621,7 @@ impl Service {
.as_idx(&host_id) .as_idx(&host_id)
.or_not_found(&host_id)? .or_not_found(&host_id)?
.de()?; .de()?;
self.seed service.update(host_id, host).await
.persistent_container
.net_service
.lock()
.await
.update(host_id, host)
.await
} }
} }
@@ -934,7 +930,6 @@ pub async fn attach(
.with_kind(ErrorKind::Network)?; .with_kind(ErrorKind::Network)?;
current_out = "stdout"; current_out = "stdout";
} }
dbg!(&current_out);
ws.send(Message::Binary(out)) ws.send(Message::Binary(out))
.await .await
.with_kind(ErrorKind::Network)?; .with_kind(ErrorKind::Network)?;
@@ -948,7 +943,6 @@ pub async fn attach(
.with_kind(ErrorKind::Network)?; .with_kind(ErrorKind::Network)?;
current_out = "stderr"; current_out = "stderr";
} }
dbg!(&current_out);
ws.send(Message::Binary(err)) ws.send(Message::Binary(err))
.await .await
.with_kind(ErrorKind::Network)?; .with_kind(ErrorKind::Network)?;

View File

@@ -39,7 +39,7 @@ use crate::util::io::create_file;
use crate::util::rpc_client::UnixRpcClient; use crate::util::rpc_client::UnixRpcClient;
use crate::util::Invoke; use crate::util::Invoke;
use crate::volume::data_dir; use crate::volume::data_dir;
use crate::ARCH; use crate::{ARCH, DATA_DIR, PACKAGE_DATA};
const RPC_CONNECT_TIMEOUT: Duration = Duration::from_secs(10); const RPC_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
@@ -121,8 +121,8 @@ impl PersistentContainer {
.lxc_manager .lxc_manager
.create( .create(
Some( Some(
&ctx.datadir &Path::new(PACKAGE_DATA)
.join("package-data/logs") .join("logs")
.join(&s9pk.as_manifest().id), .join(&s9pk.as_manifest().id),
), ),
LxcConfig::default(), LxcConfig::default(),
@@ -157,7 +157,7 @@ impl PersistentContainer {
.await?; .await?;
let mount = MountGuard::mount( let mount = MountGuard::mount(
&IdMapped::new( &IdMapped::new(
Bind::new(data_dir(&ctx.datadir, &s9pk.as_manifest().id, volume)), Bind::new(data_dir(DATA_DIR, &s9pk.as_manifest().id, volume)),
0, 0,
100000, 100000,
65536, 65536,
@@ -452,7 +452,7 @@ impl PersistentContainer {
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn exit(mut self) -> Result<(), Error> { pub async fn exit(mut self) -> Result<(), Error> {
if let Some(destroy) = self.destroy(false) { if let Some(destroy) = self.destroy(false) {
dbg!(destroy.await)?; destroy.await?;
} }
tracing::info!("Service for {} exited", self.s9pk.as_manifest().id); tracing::info!("Service for {} exited", self.s9pk.as_manifest().id);

View File

@@ -155,7 +155,7 @@ impl serde::Serialize for Sandbox {
pub struct CallbackId(u64); pub struct CallbackId(u64);
impl CallbackId { impl CallbackId {
pub fn register(self, container: &PersistentContainer) -> CallbackHandle { pub fn register(self, container: &PersistentContainer) -> CallbackHandle {
dbg!(eyre!( crate::dbg!(eyre!(
"callback {} registered for {}", "callback {} registered for {}",
self.0, self.0,
container.s9pk.as_manifest().id container.s9pk.as_manifest().id

View File

@@ -36,7 +36,41 @@ impl Actor for ServiceActor {
ServiceActorLoopNext::DontWait => (), ServiceActorLoopNext::DontWait => (),
} }
} }
}) });
let seed = self.0.clone();
let mut ip_info = seed.ctx.net_controller.net_iface.subscribe();
jobs.add_job(async move {
loop {
if let Err(e) = async {
let mut service = seed.persistent_container.net_service.lock().await;
let hosts = seed
.ctx
.db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(&seed.id)
.or_not_found(&seed.id)?
.as_hosts()
.de()?;
for (host_id, host) in hosts.0 {
service.update(host_id, host).await?;
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error syncronizing net host after network change: {e}");
tracing::debug!("{e:?}");
}
if ip_info.changed().await.is_err() {
break;
};
}
});
} }
} }
@@ -92,7 +126,6 @@ async fn service_actor_loop(
.. ..
} => MainStatus::Stopped, } => MainStatus::Stopped,
}; };
let previous = i.as_status().de()?;
i.as_status_mut().ser(&main_status)?; i.as_status_mut().ser(&main_status)?;
return Ok(previous return Ok(previous
.major_changes(&main_status) .major_changes(&main_status)

View File

@@ -1,3 +1,4 @@
use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
@@ -27,6 +28,7 @@ use crate::service::start_stop::StartStop;
use crate::service::{LoadDisposition, Service, ServiceRef}; use crate::service::{LoadDisposition, Service, ServiceRef};
use crate::status::MainStatus; use crate::status::MainStatus;
use crate::util::serde::Pem; use crate::util::serde::Pem;
use crate::DATA_DIR;
pub type DownloadInstallFuture = BoxFuture<'static, Result<InstallFuture, Error>>; pub type DownloadInstallFuture = BoxFuture<'static, Result<InstallFuture, Error>>;
pub type InstallFuture = BoxFuture<'static, Result<(), Error>>; pub type InstallFuture = BoxFuture<'static, Result<(), Error>>;
@@ -220,8 +222,7 @@ impl ServiceMap {
Ok(async move { Ok(async move {
let (installed_path, sync_progress_task) = reload_guard let (installed_path, sync_progress_task) = reload_guard
.handle(async { .handle(async {
let download_path = ctx let download_path = Path::new(DATA_DIR)
.datadir
.join(PKG_ARCHIVE_DIR) .join(PKG_ARCHIVE_DIR)
.join("downloading") .join("downloading")
.join(&id) .join(&id)
@@ -251,8 +252,7 @@ impl ServiceMap {
file.sync_all().await?; file.sync_all().await?;
download_progress.complete(); download_progress.complete();
let installed_path = ctx let installed_path = Path::new(DATA_DIR)
.datadir
.join(PKG_ARCHIVE_DIR) .join(PKG_ARCHIVE_DIR)
.join("installed") .join("installed")
.join(&id) .join(&id)

View File

@@ -15,6 +15,7 @@ use crate::service::ServiceActor;
use crate::util::actor::background::BackgroundJobQueue; use crate::util::actor::background::BackgroundJobQueue;
use crate::util::actor::{ConflictBuilder, Handler}; use crate::util::actor::{ConflictBuilder, Handler};
use crate::util::future::RemoteCancellable; use crate::util::future::RemoteCancellable;
use crate::util::serde::NoOutput;
pub(in crate::service) struct Backup { pub(in crate::service) struct Backup {
pub path: PathBuf, pub path: PathBuf,
@@ -48,7 +49,7 @@ impl Handler<Backup> for ServiceActor {
.mount_backup(path, ReadWrite) .mount_backup(path, ReadWrite)
.await?; .await?;
seed.persistent_container seed.persistent_container
.execute(id, ProcedureName::CreateBackup, Value::Null, None) .execute::<NoOutput>(id, ProcedureName::CreateBackup, Value::Null, None)
.await?; .await?;
backup_guard.unmount(true).await?; backup_guard.unmount(true).await?;

View File

@@ -11,6 +11,7 @@ use crate::service::ServiceActor;
use crate::util::actor::background::BackgroundJobQueue; use crate::util::actor::background::BackgroundJobQueue;
use crate::util::actor::{ConflictBuilder, Handler}; use crate::util::actor::{ConflictBuilder, Handler};
use crate::util::future::RemoteCancellable; use crate::util::future::RemoteCancellable;
use crate::util::serde::NoOutput;
pub(in crate::service) struct Restore { pub(in crate::service) struct Restore {
pub path: PathBuf, pub path: PathBuf,
@@ -38,7 +39,7 @@ impl Handler<Restore> for ServiceActor {
.mount_backup(path, ReadOnly) .mount_backup(path, ReadOnly)
.await?; .await?;
seed.persistent_container seed.persistent_container
.execute(id, ProcedureName::RestoreBackup, Value::Null, None) .execute::<NoOutput>(id, ProcedureName::RestoreBackup, Value::Null, None)
.await?; .await?;
backup_guard.unmount(true).await?; backup_guard.unmount(true).await?;
@@ -48,7 +49,7 @@ impl Handler<Restore> for ServiceActor {
Ok::<_, Error>(()) Ok::<_, Error>(())
} }
.map(|x| { .map(|x| {
if let Err(err) = dbg!(x) { if let Err(err) = x {
tracing::debug!("{:?}", err); tracing::debug!("{:?}", err);
tracing::warn!("{}", err); tracing::warn!("{}", err);
} }

View File

@@ -4,6 +4,7 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use const_format::formatcp;
use josekit::jwk::Jwk; use josekit::jwk::Jwk;
use patch_db::json_ptr::ROOT; use patch_db::json_ptr::ROOT;
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
@@ -38,7 +39,7 @@ use crate::rpc_continuations::Guid;
use crate::util::crypto::EncryptedWire; use crate::util::crypto::EncryptedWire;
use crate::util::io::{create_file, dir_copy, dir_size, Counter}; use crate::util::io::{create_file, dir_copy, dir_size, Counter};
use crate::util::Invoke; use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt, DATA_DIR, MAIN_DATA, PACKAGE_DATA};
pub fn setup<C: Context>() -> ParentHandler<C> { pub fn setup<C: Context>() -> ParentHandler<C> {
ParentHandler::new() ParentHandler::new()
@@ -80,7 +81,7 @@ async fn setup_init(
password: Option<String>, password: Option<String>,
init_phases: InitPhases, init_phases: InitPhases,
) -> Result<(AccountInfo, PreInitNetController), Error> { ) -> Result<(AccountInfo, PreInitNetController), Error> {
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?; let InitResult { net_ctrl } = init(&ctx.webserver, &ctx.config, init_phases).await?;
let account = net_ctrl let account = net_ctrl
.db .db
@@ -140,7 +141,7 @@ pub async fn attach(
disk_phase.start(); disk_phase.start();
let requires_reboot = crate::disk::main::import( let requires_reboot = crate::disk::main::import(
&*disk_guid, &*disk_guid,
&setup_ctx.datadir, DATA_DIR,
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() { if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive RepairStrategy::Aggressive
} else { } else {
@@ -155,7 +156,7 @@ pub async fn attach(
.with_ctx(|_| (ErrorKind::Filesystem, REPAIR_DISK_PATH))?; .with_ctx(|_| (ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
} }
if requires_reboot.0 { if requires_reboot.0 {
crate::disk::main::export(&*disk_guid, &setup_ctx.datadir).await?; crate::disk::main::export(&*disk_guid, DATA_DIR).await?;
return Err(Error::new( return Err(Error::new(
eyre!( eyre!(
"Errors were corrected with your disk, but the server must be restarted in order to proceed" "Errors were corrected with your disk, but the server must be restarted in order to proceed"
@@ -167,7 +168,7 @@ pub async fn attach(
let (account, net_ctrl) = setup_init(&setup_ctx, password, init_phases).await?; let (account, net_ctrl) = setup_init(&setup_ctx, password, init_phases).await?;
let rpc_ctx = RpcContext::init(&setup_ctx.config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?; let rpc_ctx = RpcContext::init(&setup_ctx.webserver, &setup_ctx.config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
Ok(((&account).try_into()?, rpc_ctx)) Ok(((&account).try_into()?, rpc_ctx))
})?; })?;
@@ -391,18 +392,13 @@ pub async fn execute_inner(
crate::disk::main::create( crate::disk::main::create(
&[start_os_logicalname], &[start_os_logicalname],
&pvscan().await?, &pvscan().await?,
&ctx.datadir, DATA_DIR,
encryption_password, encryption_password,
) )
.await?, .await?,
); );
let _ = crate::disk::main::import( let _ = crate::disk::main::import(&*guid, DATA_DIR, RepairStrategy::Preen, encryption_password)
&*guid, .await?;
&ctx.datadir,
RepairStrategy::Preen,
encryption_password,
)
.await?;
disk_phase.complete(); disk_phase.complete();
let progress = SetupExecuteProgress { let progress = SetupExecuteProgress {
@@ -456,9 +452,16 @@ async fn fresh_setup(
db.put(&ROOT, &Database::init(&account)?).await?; db.put(&ROOT, &Database::init(&account)?).await?;
drop(db); drop(db);
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?; let InitResult { net_ctrl } = init(&ctx.webserver, &ctx.config, init_phases).await?;
let rpc_ctx = RpcContext::init(&ctx.config, guid, Some(net_ctrl), rpc_ctx_phases).await?; let rpc_ctx = RpcContext::init(
&ctx.webserver,
&ctx.config,
guid,
Some(net_ctrl),
rpc_ctx_phases,
)
.await?;
Ok(((&account).try_into()?, rpc_ctx)) Ok(((&account).try_into()?, rpc_ctx))
} }
@@ -513,10 +516,10 @@ async fn migrate(
) )
.await?; .await?;
let main_transfer_args = ("/media/startos/migrate/main/", "/embassy-data/main/"); let main_transfer_args = ("/media/startos/migrate/main/", formatcp!("{MAIN_DATA}/"));
let package_data_transfer_args = ( let package_data_transfer_args = (
"/media/startos/migrate/package-data/", "/media/startos/migrate/package-data/",
"/embassy-data/package-data/", formatcp!("{PACKAGE_DATA}/"),
); );
let tmpdir = Path::new(package_data_transfer_args.0).join("tmp"); let tmpdir = Path::new(package_data_transfer_args.0).join("tmp");
@@ -571,7 +574,14 @@ async fn migrate(
let (account, net_ctrl) = setup_init(&ctx, Some(start_os_password), init_phases).await?; let (account, net_ctrl) = setup_init(&ctx, Some(start_os_password), init_phases).await?;
let rpc_ctx = RpcContext::init(&ctx.config, guid, Some(net_ctrl), rpc_ctx_phases).await?; let rpc_ctx = RpcContext::init(
&ctx.webserver,
&ctx.config,
guid,
Some(net_ctrl),
rpc_ctx_phases,
)
.await?;
Ok(((&account).try_into()?, rpc_ctx)) Ok(((&account).try_into()?, rpc_ctx))
} }

View File

@@ -1,4 +1,4 @@
use std::path::PathBuf; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use crate::context::RpcContext; use crate::context::RpcContext;
@@ -7,7 +7,7 @@ use crate::init::{STANDBY_MODE_PATH, SYSTEM_REBUILD_PATH};
use crate::prelude::*; use crate::prelude::*;
use crate::sound::SHUTDOWN; use crate::sound::SHUTDOWN;
use crate::util::Invoke; use crate::util::Invoke;
use crate::PLATFORM; use crate::{DATA_DIR, PLATFORM};
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Shutdown { pub struct Shutdown {
@@ -87,7 +87,7 @@ pub async fn shutdown(ctx: RpcContext) -> Result<(), Error> {
.await?; .await?;
ctx.shutdown ctx.shutdown
.send(Some(Shutdown { .send(Some(Shutdown {
export_args: Some((ctx.disk_guid.clone(), ctx.datadir.clone())), export_args: Some((ctx.disk_guid.clone(), Path::new(DATA_DIR).to_owned())),
restart: false, restart: false,
})) }))
.map_err(|_| ()) .map_err(|_| ())
@@ -107,7 +107,7 @@ pub async fn restart(ctx: RpcContext) -> Result<(), Error> {
.await?; .await?;
ctx.shutdown ctx.shutdown
.send(Some(Shutdown { .send(Some(Shutdown {
export_args: Some((ctx.disk_guid.clone(), ctx.datadir.clone())), export_args: Some((ctx.disk_guid.clone(), Path::new(DATA_DIR).to_owned())),
restart: true, restart: true,
})) }))
.map_err(|_| ()) .map_err(|_| ())

View File

@@ -80,7 +80,7 @@ impl MainStatus {
} }
} }
pub fn backing_up(self) -> Self { pub fn backing_up(&self) -> Self {
MainStatus::BackingUp { MainStatus::BackingUp {
on_complete: if self.running() { on_complete: if self.running() {
StartStop::Start StartStop::Start

View File

@@ -30,6 +30,7 @@ use crate::util::cpupower::{get_available_governors, set_governor, Governor};
use crate::util::io::open_file; use crate::util::io::open_file;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat}; use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::util::Invoke; use crate::util::Invoke;
use crate::{MAIN_DATA, PACKAGE_DATA};
pub fn experimental<C: Context>() -> ParentHandler<C> { pub fn experimental<C: Context>() -> ParentHandler<C> {
ParentHandler::new() ParentHandler::new()
@@ -808,10 +809,10 @@ pub async fn get_mem_info() -> Result<MetricsMemory, Error> {
#[instrument(skip_all)] #[instrument(skip_all)]
async fn get_disk_info() -> Result<MetricsDisk, Error> { async fn get_disk_info() -> Result<MetricsDisk, Error> {
let package_used_task = get_used("/embassy-data/package-data"); let package_used_task = get_used(PACKAGE_DATA);
let package_available_task = get_available("/embassy-data/package-data"); let package_available_task = get_available(PACKAGE_DATA);
let os_used_task = get_used("/embassy-data/main"); let os_used_task = get_used(MAIN_DATA);
let os_available_task = get_available("/embassy-data/main"); let os_available_task = get_available(MAIN_DATA);
let (package_used, package_available, os_used, os_available) = futures::try_join!( let (package_used, package_available, os_used, os_available) = futures::try_join!(
package_used_task, package_used_task,

View File

@@ -20,7 +20,7 @@ use ts_rs::TS;
use crate::context::{CliContext, RpcContext}; use crate::context::{CliContext, RpcContext};
use crate::disk::mount::filesystem::bind::Bind; use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev; use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::efivarfs::{self, EfiVarFs}; use crate::disk::mount::filesystem::efivarfs::{ EfiVarFs};
use crate::disk::mount::filesystem::overlayfs::OverlayGuard; use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::disk::mount::filesystem::MountType; use crate::disk::mount::filesystem::MountType;
use crate::disk::mount::guard::{GenericMountGuard, MountGuard, TmpMountGuard}; use crate::disk::mount::guard::{GenericMountGuard, MountGuard, TmpMountGuard};

View File

@@ -15,8 +15,13 @@ impl BackgroundJobQueue {
}, },
) )
} }
pub fn add_job(&self, fut: impl Future<Output = ()> + Send + 'static) { pub fn add_job(&self, fut: impl Future + Send + 'static) {
let _ = self.0.send(fut.boxed()); let _ = self.0.send(
async {
fut.await;
}
.boxed(),
);
} }
} }

View File

@@ -1,11 +1,13 @@
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use futures::future::abortable; use futures::future::{abortable, pending, BoxFuture, FusedFuture};
use futures::stream::{AbortHandle, Abortable}; use futures::stream::{AbortHandle, Abortable, BoxStream};
use futures::Future; use futures::{Future, FutureExt, Stream, StreamExt};
use tokio::sync::watch; use tokio::sync::watch;
use crate::prelude::*;
#[pin_project::pin_project(PinnedDrop)] #[pin_project::pin_project(PinnedDrop)]
pub struct DropSignaling<F> { pub struct DropSignaling<F> {
#[pin] #[pin]
@@ -102,6 +104,60 @@ impl CancellationHandle {
} }
} }
#[derive(Default)]
pub struct Until<'a> {
streams: Vec<BoxStream<'a, Result<(), Error>>>,
async_fns: Vec<Box<dyn FnMut() -> BoxFuture<'a, Result<(), Error>> + Send + 'a>>,
}
impl<'a> Until<'a> {
pub fn new() -> Self {
Self::default()
}
pub fn with_stream(
mut self,
stream: impl Stream<Item = Result<(), Error>> + Send + 'a,
) -> Self {
self.streams.push(stream.boxed());
self
}
pub fn with_async_fn<F, Fut>(mut self, mut f: F) -> Self
where
F: FnMut() -> Fut + Send + 'a,
Fut: Future<Output = Result<(), Error>> + FusedFuture + Send + 'a,
{
self.async_fns.push(Box::new(move || f().boxed()));
self
}
pub async fn run<Fut: Future<Output = Result<(), Error>> + Send>(
&mut self,
fut: Fut,
) -> Result<(), Error> {
let (res, _, _) = futures::future::select_all(
self.streams
.iter_mut()
.map(|s| {
async {
s.next().await.transpose()?.ok_or_else(|| {
Error::new(eyre!("stream is empty"), ErrorKind::Cancelled)
})
}
.boxed()
})
.chain(self.async_fns.iter_mut().map(|f| f()))
.chain([async {
fut.await?;
pending().await
}
.boxed()]),
)
.await;
res
}
}
#[tokio::test] #[tokio::test]
async fn test_cancellable() { async fn test_cancellable() {
use std::sync::Arc; use std::sync::Arc;

View File

@@ -15,7 +15,7 @@ use futures::future::{BoxFuture, Fuse};
use futures::{AsyncSeek, FutureExt, Stream, TryStreamExt}; use futures::{AsyncSeek, FutureExt, Stream, TryStreamExt};
use helpers::NonDetachingJoinHandle; use helpers::NonDetachingJoinHandle;
use nix::unistd::{Gid, Uid}; use nix::unistd::{Gid, Uid};
use tokio::fs::File; use tokio::fs::{File, OpenOptions};
use tokio::io::{ use tokio::io::{
duplex, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, DuplexStream, ReadBuf, WriteHalf, duplex, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, DuplexStream, ReadBuf, WriteHalf,
}; };
@@ -460,18 +460,30 @@ impl<T> BackTrackingIO<T> {
} }
} }
} }
pub fn rewind(&mut self) -> Vec<u8> { pub fn rewind<'a>(&'a mut self) -> (Vec<u8>, &'a [u8]) {
match std::mem::take(&mut self.buffer) { match std::mem::take(&mut self.buffer) {
BTBuffer::Buffering { read, write } => { BTBuffer::Buffering { read, write } => {
self.buffer = BTBuffer::Rewound { self.buffer = BTBuffer::Rewound {
read: Cursor::new(read), read: Cursor::new(read),
}; };
write (
write,
match &self.buffer {
BTBuffer::Rewound { read } => read.get_ref(),
_ => unreachable!(),
},
)
} }
BTBuffer::NotBuffering => Vec::new(), BTBuffer::NotBuffering => (Vec::new(), &[]),
BTBuffer::Rewound { read } => { BTBuffer::Rewound { read } => {
self.buffer = BTBuffer::Rewound { read }; self.buffer = BTBuffer::Rewound { read };
Vec::new() (
Vec::new(),
match &self.buffer {
BTBuffer::Rewound { read } => read.get_ref(),
_ => unreachable!(),
},
)
} }
} }
} }
@@ -529,7 +541,6 @@ impl<T: std::io::Read> std::io::Read for BackTrackingIO<T> {
} }
BTBuffer::NotBuffering => self.io.read(buf), BTBuffer::NotBuffering => self.io.read(buf),
BTBuffer::Rewound { read } => { BTBuffer::Rewound { read } => {
let mut ready = false;
if (read.position() as usize) < read.get_ref().len() { if (read.position() as usize) < read.get_ref().len() {
let n = std::io::Read::read(read, buf)?; let n = std::io::Read::read(read, buf)?;
if n != 0 { if n != 0 {
@@ -923,6 +934,21 @@ pub async fn create_file(path: impl AsRef<Path>) -> Result<File, Error> {
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}"))) .with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}")))
} }
pub async fn append_file(path: impl AsRef<Path>) -> Result<File, Error> {
let path = path.as_ref();
if let Some(parent) = path.parent() {
tokio::fs::create_dir_all(parent)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("mkdir -p {parent:?}")))?;
}
OpenOptions::new()
.create(true)
.append(true)
.open(path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}")))
}
pub async fn delete_file(path: impl AsRef<Path>) -> Result<(), Error> { pub async fn delete_file(path: impl AsRef<Path>) -> Result<(), Error> {
let path = path.as_ref(); let path = path.as_ref();
tokio::fs::remove_file(path) tokio::fs::remove_file(path)

View File

@@ -1,13 +1,62 @@
use std::io; use std::fs::File;
use std::io::{self, Write};
use std::sync::{Arc, Mutex, MutexGuard};
use lazy_static::lazy_static;
use tracing::Subscriber; use tracing::Subscriber;
use tracing_subscriber::fmt::MakeWriter;
use tracing_subscriber::util::SubscriberInitExt; use tracing_subscriber::util::SubscriberInitExt;
#[derive(Clone)] lazy_static! {
pub struct EmbassyLogger {} pub static ref LOGGER: StartOSLogger = StartOSLogger::init();
}
impl EmbassyLogger { #[derive(Clone)]
fn base_subscriber() -> impl Subscriber { pub struct StartOSLogger {
logfile: LogFile,
}
#[derive(Clone, Default)]
struct LogFile(Arc<Mutex<Option<File>>>);
impl<'a> MakeWriter<'a> for LogFile {
type Writer = Box<dyn Write + 'a>;
fn make_writer(&'a self) -> Self::Writer {
let f = self.0.lock().unwrap();
if f.is_some() {
struct TeeWriter<'a>(MutexGuard<'a, Option<File>>);
impl<'a> Write for TeeWriter<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = if let Some(f) = &mut *self.0 {
f.write(buf)?
} else {
buf.len()
};
io::stderr().write_all(&buf[..n])?;
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
if let Some(f) = &mut *self.0 {
f.flush()?;
}
Ok(())
}
}
Box::new(TeeWriter(f))
} else {
drop(f);
Box::new(io::stderr())
}
}
}
impl StartOSLogger {
pub fn enable(&self) {}
pub fn set_logfile(&self, logfile: Option<File>) {
*self.logfile.0.lock().unwrap() = logfile;
}
fn base_subscriber(logfile: LogFile) -> impl Subscriber {
use tracing_error::ErrorLayer; use tracing_error::ErrorLayer;
use tracing_subscriber::prelude::*; use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter}; use tracing_subscriber::{fmt, EnvFilter};
@@ -24,7 +73,7 @@ impl EmbassyLogger {
.add_directive("tokio=trace".parse().unwrap()) .add_directive("tokio=trace".parse().unwrap())
.add_directive("runtime=trace".parse().unwrap()); .add_directive("runtime=trace".parse().unwrap());
let fmt_layer = fmt::layer() let fmt_layer = fmt::layer()
.with_writer(io::stderr) .with_writer(logfile)
.with_line_number(true) .with_line_number(true)
.with_file(true) .with_file(true)
.with_target(true); .with_target(true);
@@ -39,11 +88,12 @@ impl EmbassyLogger {
sub sub
} }
pub fn init() -> Self { fn init() -> Self {
Self::base_subscriber().init(); let logfile = LogFile::default();
Self::base_subscriber(logfile.clone()).init();
color_eyre::install().unwrap_or_else(|_| tracing::warn!("tracing too many times")); color_eyre::install().unwrap_or_else(|_| tracing::warn!("tracing too many times"));
EmbassyLogger {} StartOSLogger { logfile }
} }
} }

View File

@@ -3,7 +3,6 @@ use std::path::Path;
use clap::Parser; use clap::Parser;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler}; use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use url::Url;
use crate::context::CliContext; use crate::context::CliContext;
use crate::prelude::*; use crate::prelude::*;

View File

@@ -47,7 +47,7 @@ impl RpcClient {
let mut lines = BufReader::new(reader).lines(); let mut lines = BufReader::new(reader).lines();
while let Some(line) = lines.next_line().await.transpose() { while let Some(line) = lines.next_line().await.transpose() {
match line.map_err(Error::from).and_then(|l| { match line.map_err(Error::from).and_then(|l| {
serde_json::from_str::<RpcResponse>(dbg!(&l)) serde_json::from_str::<RpcResponse>(crate::dbg!(&l))
.with_kind(ErrorKind::Deserialization) .with_kind(ErrorKind::Deserialization)
}) { }) {
Ok(l) => { Ok(l) => {
@@ -114,7 +114,7 @@ impl RpcClient {
let (send, recv) = oneshot::channel(); let (send, recv) = oneshot::channel();
w.lock().await.insert(id.clone(), send); w.lock().await.insert(id.clone(), send);
self.writer self.writer
.write_all((dbg!(serde_json::to_string(&request))? + "\n").as_bytes()) .write_all((crate::dbg!(serde_json::to_string(&request))? + "\n").as_bytes())
.await .await
.map_err(|e| { .map_err(|e| {
let mut err = rpc_toolkit::yajrc::INTERNAL_ERROR.clone(); let mut err = rpc_toolkit::yajrc::INTERNAL_ERROR.clone();
@@ -154,7 +154,7 @@ impl RpcClient {
params, params,
}; };
self.writer self.writer
.write_all((dbg!(serde_json::to_string(&request))? + "\n").as_bytes()) .write_all((crate::dbg!(serde_json::to_string(&request))? + "\n").as_bytes())
.await .await
.map_err(|e| { .map_err(|e| {
let mut err = rpc_toolkit::yajrc::INTERNAL_ERROR.clone(); let mut err = rpc_toolkit::yajrc::INTERNAL_ERROR.clone();

View File

@@ -1,3 +1,4 @@
#[derive(Debug, Default)]
pub struct SyncMutex<T>(std::sync::Mutex<T>); pub struct SyncMutex<T>(std::sync::Mutex<T>);
impl<T> SyncMutex<T> { impl<T> SyncMutex<T> {
pub fn new(t: T) -> Self { pub fn new(t: T) -> Self {

View File

@@ -7,12 +7,10 @@ use futures::future::BoxFuture;
use futures::{Future, FutureExt}; use futures::{Future, FutureExt};
use imbl::Vector; use imbl::Vector;
use imbl_value::{to_value, InternedString}; use imbl_value::{to_value, InternedString};
use patch_db::json_ptr::{JsonPointer, ROOT}; use patch_db::json_ptr::{ ROOT};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::Database;
use crate::prelude::*; use crate::prelude::*;
use crate::progress::PhaseProgressTrackerHandle;
use crate::Error; use crate::Error;
mod v0_3_5; mod v0_3_5;
@@ -29,7 +27,9 @@ mod v0_3_6_alpha_7;
mod v0_3_6_alpha_8; mod v0_3_6_alpha_8;
mod v0_3_6_alpha_9; mod v0_3_6_alpha_9;
pub type Current = v0_3_6_alpha_9::Version; // VERSION_BUMP mod v0_3_6_alpha_10;
pub type Current = v0_3_6_alpha_10::Version; // VERSION_BUMP
impl Current { impl Current {
#[instrument(skip(self, db))] #[instrument(skip(self, db))]
@@ -108,6 +108,7 @@ enum Version {
V0_3_6_alpha_7(Wrapper<v0_3_6_alpha_7::Version>), V0_3_6_alpha_7(Wrapper<v0_3_6_alpha_7::Version>),
V0_3_6_alpha_8(Wrapper<v0_3_6_alpha_8::Version>), V0_3_6_alpha_8(Wrapper<v0_3_6_alpha_8::Version>),
V0_3_6_alpha_9(Wrapper<v0_3_6_alpha_9::Version>), V0_3_6_alpha_9(Wrapper<v0_3_6_alpha_9::Version>),
V0_3_6_alpha_10(Wrapper<v0_3_6_alpha_10::Version>),
Other(exver::Version), Other(exver::Version),
} }
@@ -141,6 +142,7 @@ impl Version {
Self::V0_3_6_alpha_7(v) => DynVersion(Box::new(v.0)), Self::V0_3_6_alpha_7(v) => DynVersion(Box::new(v.0)),
Self::V0_3_6_alpha_8(v) => DynVersion(Box::new(v.0)), Self::V0_3_6_alpha_8(v) => DynVersion(Box::new(v.0)),
Self::V0_3_6_alpha_9(v) => DynVersion(Box::new(v.0)), Self::V0_3_6_alpha_9(v) => DynVersion(Box::new(v.0)),
Self::V0_3_6_alpha_10(v) => DynVersion(Box::new(v.0)),
Self::Other(v) => { Self::Other(v) => {
return Err(Error::new( return Err(Error::new(
eyre!("unknown version {v}"), eyre!("unknown version {v}"),
@@ -166,6 +168,7 @@ impl Version {
Version::V0_3_6_alpha_7(Wrapper(x)) => x.semver(), Version::V0_3_6_alpha_7(Wrapper(x)) => x.semver(),
Version::V0_3_6_alpha_8(Wrapper(x)) => x.semver(), Version::V0_3_6_alpha_8(Wrapper(x)) => x.semver(),
Version::V0_3_6_alpha_9(Wrapper(x)) => x.semver(), Version::V0_3_6_alpha_9(Wrapper(x)) => x.semver(),
Version::V0_3_6_alpha_10(Wrapper(x)) => x.semver(),
Version::Other(x) => x.clone(), Version::Other(x) => x.clone(),
} }
} }

View File

@@ -1,19 +1,16 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::future::Future;
use std::path::Path; use std::path::Path;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use const_format::formatcp;
use ed25519_dalek::SigningKey; use ed25519_dalek::SigningKey;
use exver::{PreReleaseSegment, VersionRange}; use exver::{PreReleaseSegment, VersionRange};
use imbl_value::{json, InternedString}; use imbl_value::{json, InternedString};
use itertools::Itertools;
use models::PackageId; use models::PackageId;
use openssl::pkey::{PKey, Private}; use openssl::pkey::PKey;
use openssl::x509::X509; use openssl::x509::X509;
use patch_db::ModelExt;
use sqlx::postgres::PgConnectOptions; use sqlx::postgres::PgConnectOptions;
use sqlx::{PgPool, Row}; use sqlx::{PgPool, Row};
use ssh_key::Fingerprint;
use tokio::process::Command; use tokio::process::Command;
use torut::onion::TorSecretKeyV3; use torut::onion::TorSecretKeyV3;
@@ -23,15 +20,11 @@ use crate::account::AccountInfo;
use crate::auth::Sessions; use crate::auth::Sessions;
use crate::backup::target::cifs::CifsTargets; use crate::backup::target::cifs::CifsTargets;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::Database;
use crate::disk::mount::filesystem::cifs::Cifs; use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::util::unmount; use crate::disk::mount::util::unmount;
use crate::hostname::Hostname; use crate::hostname::Hostname;
use crate::net::forward::AvailablePorts; use crate::net::forward::AvailablePorts;
use crate::net::keys::KeyStore; use crate::net::keys::KeyStore;
use crate::net::ssl::CertStore;
use crate::net::tor;
use crate::net::tor::OnionStore;
use crate::notifications::{Notification, Notifications}; use crate::notifications::{Notification, Notifications};
use crate::prelude::*; use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile; use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
@@ -39,6 +32,7 @@ use crate::ssh::{SshKeys, SshPubKey};
use crate::util::crypto::ed25519_expand_key; use crate::util::crypto::ed25519_expand_key;
use crate::util::serde::{Pem, PemEncoding}; use crate::util::serde::{Pem, PemEncoding};
use crate::util::Invoke; use crate::util::Invoke;
use crate::{DATA_DIR, PACKAGE_DATA};
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref V0_3_6_alpha_0: exver::Version = exver::Version::new( static ref V0_3_6_alpha_0: exver::Version = exver::Version::new(
@@ -191,7 +185,6 @@ async fn init_postgres(datadir: impl AsRef<Path>) -> Result<PgPool, Error> {
.run(&secret_store) .run(&secret_store)
.await .await
.with_kind(crate::ErrorKind::Database)?; .with_kind(crate::ErrorKind::Database)?;
dbg!("Init Postgres Done");
Ok(secret_store) Ok(secret_store)
} }
@@ -208,7 +201,7 @@ impl VersionT for Version {
&V0_3_0_COMPAT &V0_3_0_COMPAT
} }
async fn pre_up(self) -> Result<Self::PreUpRes, Error> { async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
let pg = init_postgres("/embassy-data").await?; let pg = init_postgres(DATA_DIR).await?;
let account = previous_account_info(&pg).await?; let account = previous_account_info(&pg).await?;
let ssh_keys = previous_ssh_keys(&pg).await?; let ssh_keys = previous_ssh_keys(&pg).await?;
@@ -315,7 +308,6 @@ impl VersionT for Version {
"private": private, "private": private,
}); });
dbg!("Should be done with the up");
*db = next; *db = next;
Ok(()) Ok(())
} }
@@ -329,7 +321,7 @@ impl VersionT for Version {
#[instrument(skip(self, ctx))] #[instrument(skip(self, ctx))]
/// MUST be idempotent, and is run after *all* db migrations /// MUST be idempotent, and is run after *all* db migrations
async fn post_up(self, ctx: &RpcContext) -> Result<(), Error> { async fn post_up(self, ctx: &RpcContext) -> Result<(), Error> {
let path = Path::new("/embassy-data/package-data/archive/"); let path = Path::new(formatcp!("{PACKAGE_DATA}/archive/"));
if !path.is_dir() { if !path.is_dir() {
return Err(Error::new( return Err(Error::new(
eyre!( eyre!(

View File

@@ -0,0 +1,94 @@
use std::collections::{BTreeMap, BTreeSet};
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_6_alpha_9, VersionT};
use crate::net::host::address::DomainConfig;
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_3_6_alpha_10: exver::Version = exver::Version::new(
[0, 3, 6],
[PreReleaseSegment::String("alpha".into()), 10.into()]
);
}
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "kind")]
enum HostAddress {
Onion { address: OnionAddressV3 },
Domain { address: InternedString },
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_3_6_alpha_9::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_3_6_alpha_10.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
for (_, package) in db["public"]["packageData"]
.as_object_mut()
.ok_or_else(|| {
Error::new(
eyre!("expected public.packageData to be an object"),
ErrorKind::Database,
)
})?
.iter_mut()
{
for (_, host) in package["hosts"]
.as_object_mut()
.ok_or_else(|| {
Error::new(
eyre!("expected public.packageData[id].hosts to be an object"),
ErrorKind::Database,
)
})?
.iter_mut()
{
let mut onions = BTreeSet::new();
let mut domains = BTreeMap::new();
let addresses = from_value::<BTreeSet<HostAddress>>(host["addresses"].clone())?;
for address in addresses {
match address {
HostAddress::Onion { address } => {
onions.insert(address);
}
HostAddress::Domain { address } => {
domains.insert(
address,
DomainConfig {
public: true,
acme: None,
},
);
}
}
}
host["onions"] = to_value(&onions)?;
host["domains"] = to_value(&domains)?;
}
}
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -27,7 +27,7 @@ impl VersionT for Version {
async fn pre_up(self) -> Result<Self::PreUpRes, Error> { async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(()) Ok(())
} }
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> { fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(()) Ok(())
} }
async fn post_up<'a>(self, ctx: &'a crate::context::RpcContext) -> Result<(), Error> { async fn post_up<'a>(self, ctx: &'a crate::context::RpcContext) -> Result<(), Error> {

View File

@@ -1,5 +1,5 @@
use exver::{PreReleaseSegment, VersionRange}; use exver::{PreReleaseSegment, VersionRange};
use imbl_value::{json, InOMap}; use imbl_value::json;
use tokio::process::Command; use tokio::process::Command;
use super::v0_3_5::V0_3_0_COMPAT; use super::v0_3_5::V0_3_0_COMPAT;

View File

@@ -1,3 +1,5 @@
use std::path::Path;
use exver::{PreReleaseSegment, VersionRange}; use exver::{PreReleaseSegment, VersionRange};
use tokio::fs::File; use tokio::fs::File;
@@ -12,6 +14,7 @@ use crate::s9pk::v2::SIG_CONTEXT;
use crate::s9pk::S9pk; use crate::s9pk::S9pk;
use crate::service::LoadDisposition; use crate::service::LoadDisposition;
use crate::util::io::create_file; use crate::util::io::create_file;
use crate::DATA_DIR;
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref V0_3_6_alpha_8: exver::Version = exver::Version::new( static ref V0_3_6_alpha_8: exver::Version = exver::Version::new(
@@ -40,7 +43,7 @@ impl VersionT for Version {
Ok(()) Ok(())
} }
async fn post_up(self, ctx: &crate::context::RpcContext) -> Result<(), Error> { async fn post_up(self, ctx: &crate::context::RpcContext) -> Result<(), Error> {
let s9pk_dir = ctx.datadir.join(PKG_ARCHIVE_DIR).join("installed"); let s9pk_dir = Path::new(DATA_DIR).join(PKG_ARCHIVE_DIR).join("installed");
if tokio::fs::metadata(&s9pk_dir).await.is_ok() { if tokio::fs::metadata(&s9pk_dir).await.is_ok() {
let mut read_dir = tokio::fs::read_dir(&s9pk_dir).await?; let mut read_dir = tokio::fs::read_dir(&s9pk_dir).await?;

View File

@@ -1,10 +1,9 @@
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
pub use helpers::script_dir; pub use helpers::script_dir;
use models::PackageId;
pub use models::VolumeId; pub use models::VolumeId;
use models::{HostId, PackageId};
use crate::net::PACKAGE_CERT_PATH;
use crate::prelude::*; use crate::prelude::*;
use crate::util::VersionString; use crate::util::VersionString;
@@ -36,7 +35,3 @@ pub fn asset_dir<P: AsRef<Path>>(
pub fn backup_dir(pkg_id: &PackageId) -> PathBuf { pub fn backup_dir(pkg_id: &PackageId) -> PathBuf {
Path::new(BACKUP_DIR).join(pkg_id).join("data") Path::new(BACKUP_DIR).join(pkg_id).join("data")
} }
pub fn cert_dir(pkg_id: &PackageId, host_id: &HostId) -> PathBuf {
Path::new(PACKAGE_CERT_PATH).join(pkg_id).join(host_id)
}

14
debian/postinst vendored
View File

@@ -86,6 +86,8 @@ sed -i '/^\s*#\?\s*issue_discards\s*=\s*/c\issue_discards = 1' /etc/lvm/lvm.conf
sed -i '/\(^\|#\)\s*unqualified-search-registries\s*=\s*/c\unqualified-search-registries = ["docker.io"]' /etc/containers/registries.conf sed -i '/\(^\|#\)\s*unqualified-search-registries\s*=\s*/c\unqualified-search-registries = ["docker.io"]' /etc/containers/registries.conf
sed -i 's/\(#\|\^\)\s*\([^=]\+\)=\(suspend\|hibernate\)\s*$/\2=ignore/g' /etc/systemd/logind.conf sed -i 's/\(#\|\^\)\s*\([^=]\+\)=\(suspend\|hibernate\)\s*$/\2=ignore/g' /etc/systemd/logind.conf
sed -i '/\(^\|#\)MulticastDNS=/c\MulticastDNS=no' /etc/systemd/resolved.conf sed -i '/\(^\|#\)MulticastDNS=/c\MulticastDNS=no' /etc/systemd/resolved.conf
sed -i 's/\[Service\]/[Service]\nEnvironment=SYSTEMD_LOG_LEVEL=debug/' /lib/systemd/system/systemd-timesyncd.service
sed -i '/\(^\|#\)RootDistanceMaxSec=/c\RootDistanceMaxSec=10' /etc/systemd/timesyncd.conf
mkdir -p /etc/nginx/ssl mkdir -p /etc/nginx/ssl
@@ -103,7 +105,7 @@ rm -rf /var/lib/tor/*
ln -sf /usr/lib/startos/scripts/tor-check.sh /usr/bin/tor-check ln -sf /usr/lib/startos/scripts/tor-check.sh /usr/bin/tor-check
ln -sf /usr/lib/startos/scripts/gather_debug_info.sh /usr/bin/gather-debug ln -sf /usr/lib/startos/scripts/gather_debug_info.sh /usr/bin/gather-debug
echo "fs.inotify.max_user_watches=1048576" > /etc/sysctl.d/97-embassy.conf echo "fs.inotify.max_user_watches=1048576" > /etc/sysctl.d/97-startos.conf
# Old pi was set with this locale, because of pg we are now stuck with including that locale # Old pi was set with this locale, because of pg we are now stuck with including that locale
locale-gen en_GB en_GB.UTF-8 locale-gen en_GB en_GB.UTF-8
@@ -112,16 +114,14 @@ update-locale LANGUAGE
rm -f "/etc/locale.gen" rm -f "/etc/locale.gen"
dpkg-reconfigure --frontend noninteractive locales dpkg-reconfigure --frontend noninteractive locales
if ! getent group | grep '^embassy:'; then if ! getent group | grep '^startos:'; then
groupadd embassy groupadd startos
fi fi
ln -sf /usr/lib/startos/scripts/dhclient-exit-hook /etc/dhcp/dhclient-exit-hooks.d/embassy
rm -f /etc/motd rm -f /etc/motd
ln -sf /usr/lib/startos/motd /etc/update-motd.d/00-embassy ln -sf /usr/lib/startos/motd /etc/update-motd.d/00-startos
chmod -x /etc/update-motd.d/* chmod -x /etc/update-motd.d/*
chmod +x /etc/update-motd.d/00-embassy chmod +x /etc/update-motd.d/00-startos
# LXC # LXC
cat /etc/subuid | grep -v '^root:' > /etc/subuid.tmp || true cat /etc/subuid | grep -v '^root:' > /etc/subuid.tmp || true

View File

@@ -166,7 +166,7 @@ echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/docker.key.gp
# Dependencies # Dependencies
## Base dependencies ## Base dependencies
dpkg-deb --fsys-tarfile $base_dir/deb/${IMAGE_BASENAME}.deb | tar --to-stdout -xvf - ./usr/lib/startos/depends > config/package-lists/embassy-depends.list.chroot dpkg-deb --fsys-tarfile $base_dir/deb/${IMAGE_BASENAME}.deb | tar --to-stdout -xvf - ./usr/lib/startos/depends > config/package-lists/startos-depends.list.chroot
## Firmware ## Firmware
if [ "$NON_FREE" = 1 ]; then if [ "$NON_FREE" = 1 ]; then
@@ -210,7 +210,7 @@ if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
mkinitramfs -c gzip -o /boot/initramfs_2712 6.6.62-v8-16k+ mkinitramfs -c gzip -o /boot/initramfs_2712 6.6.62-v8-16k+
fi fi
useradd --shell /bin/bash -G embassy -m start9 useradd --shell /bin/bash -G startos -m start9
echo start9:embassy | chpasswd echo start9:embassy | chpasswd
usermod -aG sudo start9 usermod -aG sudo start9

View File

@@ -8,7 +8,7 @@ import {
SetHealth, SetHealth,
BindParams, BindParams,
HostId, HostId,
LanInfo, NetInfo,
Host, Host,
ExportServiceInterfaceParams, ExportServiceInterfaceParams,
ServiceInterface, ServiceInterface,
@@ -117,7 +117,7 @@ export type Effects = {
packageId?: PackageId packageId?: PackageId
hostId: HostId hostId: HostId
internalPort: number internalPort: number
}): Promise<LanInfo> }): Promise<NetInfo>
/** Removes all network bindings, called in the setupInputSpec */ /** Removes all network bindings, called in the setupInputSpec */
clearBindings(options: { clearBindings(options: {
except: { id: HostId; internalPort: number }[] except: { id: HostId; internalPort: number }[]
@@ -129,12 +129,6 @@ export type Effects = {
hostId: HostId hostId: HostId
callback?: () => void callback?: () => void
}): Promise<Host | null> }): Promise<Host | null>
/** Returns the primary url that a user has selected for a host, if it exists */
getPrimaryUrl(options: {
packageId?: PackageId
hostId: HostId
callback?: () => void
}): Promise<UrlString | null>
/** Returns the IP address of the container */ /** Returns the IP address of the container */
getContainerIp(): Promise<string> getContainerIp(): Promise<string>
// interface // interface

View File

@@ -94,8 +94,8 @@ export class InputSpec<Type extends Record<string, any>, Store = never> {
}, },
public validator: Parser<unknown, Type>, public validator: Parser<unknown, Type>,
) {} ) {}
_TYPE: Type = null as any as Type public _TYPE: Type = null as any as Type
_PARTIAL: DeepPartial<Type> = null as any as DeepPartial<Type> public _PARTIAL: DeepPartial<Type> = null as any as DeepPartial<Type>
async build(options: LazyBuildOptions<Store>) { async build(options: LazyBuildOptions<Store>) {
const answer = {} as { const answer = {} as {
[K in keyof Type]: ValueSpec [K in keyof Type]: ValueSpec

View File

@@ -49,6 +49,9 @@ export class Value<Type, Store> {
public build: LazyBuild<Store, ValueSpec>, public build: LazyBuild<Store, ValueSpec>,
public validator: Parser<unknown, Type>, public validator: Parser<unknown, Type>,
) {} ) {}
public _TYPE: Type = null as any as Type
public _PARTIAL: DeepPartial<Type> = null as any as DeepPartial<Type>
static toggle(a: { static toggle(a: {
name: string name: string
description?: string | null description?: string | null

View File

@@ -31,7 +31,7 @@ export type CurrentDependenciesResult<Manifest extends T.SDKManifest> = {
[K in RequiredDependenciesOf<Manifest>]: DependencyRequirement [K in RequiredDependenciesOf<Manifest>]: DependencyRequirement
} & { } & {
[K in OptionalDependenciesOf<Manifest>]?: DependencyRequirement [K in OptionalDependenciesOf<Manifest>]?: DependencyRequirement
} & Record<string, DependencyRequirement> }
export function setupDependencies<Manifest extends T.SDKManifest>( export function setupDependencies<Manifest extends T.SDKManifest>(
fn: (options: { fn: (options: {
@@ -48,14 +48,16 @@ export function setupDependencies<Manifest extends T.SDKManifest>(
} }
const dependencyType = await fn(options) const dependencyType = await fn(options)
return await options.effects.setDependencies({ return await options.effects.setDependencies({
dependencies: Object.entries(dependencyType).map( dependencies: Object.entries(dependencyType)
([id, { versionRange, ...x }, ,]) => .map(([k, v]) => [k, v as DependencyRequirement] as const)
({ .map(
// id, ([id, { versionRange, ...x }]) =>
...x, ({
versionRange: versionRange.toString(), id,
}) as T.DependencyRequirement, ...x,
), versionRange: versionRange.toString(),
}) as T.DependencyRequirement,
),
}) })
} }
return cell.updater return cell.updater

View File

@@ -46,7 +46,6 @@ export class Origin<T extends Host> {
const { const {
name, name,
description, description,
hasPrimary,
id, id,
type, type,
username, username,
@@ -67,7 +66,6 @@ export class Origin<T extends Host> {
id, id,
name, name,
description, description,
hasPrimary,
addressInfo, addressInfo,
type, type,
masked, masked,

View File

@@ -20,7 +20,6 @@ export class ServiceInterfaceBuilder {
name: string name: string
id: string id: string
description: string description: string
hasPrimary: boolean
type: ServiceInterfaceType type: ServiceInterfaceType
username: string | null username: string | null
path: string path: string

Some files were not shown because too many files have changed in this diff Show More