Feature/lxc container runtime (#2562)

* wip(fix): Dependencies

* wip: context

* wip(fix) Sorta auth

* wip: warnings

* wip(fix): registry/admin

* wip(fix) marketplace

* wip(fix) Some more converted and fixed with the linter and config

* wip: Working on the static server

* wip(fix)static server

* wip: Remove some asynnc

* wip: Something about the request and regular rpc

* wip: gut install

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* wip: Convert the static server into the new system

* wip delete file

* test

* wip(fix) vhost does not need the with safe defaults

* wip: Adding in the wifi

* wip: Fix the developer and the verify

* wip: new install flow

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* fix middleware

* wip

* wip: Fix the auth

* wip

* continue service refactor

* feature: Service get_config

* feat: Action

* wip: Fighting the great fight against the borrow checker

* wip: Remove an error in a file that I just need to deel with later

* chore: Add in some more lifetime stuff to the services

* wip: Install fix on lifetime

* cleanup

* wip: Deal with the borrow later

* more cleanup

* resolve borrowchecker errors

* wip(feat): add in the handler for the socket, for now

* wip(feat): Update the service_effect_handler::action

* chore: Add in the changes to make sure the from_service goes to context

* chore: Change the

* refactor service map

* fix references to service map

* fill out restore

* wip: Before I work on the store stuff

* fix backup module

* handle some warnings

* feat: add in the ui components on the rust side

* feature: Update the procedures

* chore: Update the js side of the main and a few of the others

* chore: Update the rpc listener to match the persistant container

* wip: Working on updating some things to have a better name

* wip(feat): Try and get the rpc to return the correct shape?

* lxc wip

* wip(feat): Try and get the rpc to return the correct shape?

* build for container runtime wip

* remove container-init

* fix build

* fix error

* chore: Update to work I suppose

* lxc wip

* remove docker module and feature

* download alpine squashfs automatically

* overlays effect

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* chore: Add the overlay effect

* feat: Add the mounter in the main

* chore: Convert to use the mounts, still need to work with the sandbox

* install fixes

* fix ssl

* fixes from testing

* implement tmpfile for upload

* wip

* misc fixes

* cleanup

* cleanup

* better progress reporting

* progress for sideload

* return real guid

* add devmode script

* fix lxc rootfs path

* fix percentage bar

* fix progress bar styling

* fix build for unstable

* tweaks

* label progress

* tweaks

* update progress more often

* make symlink in rpc_client

* make socket dir

* fix parent path

* add start-cli to container

* add echo and gitInfo commands

* wip: Add the init + errors

* chore: Add in the exit effect for the system

* chore: Change the type to null for failure to parse

* move sigterm timeout to stopping status

* update order

* chore: Update the return type

* remove dbg

* change the map error

* chore: Update the thing to capture id

* chore add some life changes

* chore: Update the loging

* chore: Update the package to run module

* us From for RpcError

* chore: Update to use import instead

* chore: update

* chore: Use require for the backup

* fix a default

* update the type that is wrong

* chore: Update the type of the manifest

* chore: Update to make null

* only symlink if not exists

* get rid of double result

* better debug info for ErrorCollection

* chore: Update effects

* chore: fix

* mount assets and volumes

* add exec instead of spawn

* fix mounting in image

* fix overlay mounts

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* misc fixes

* feat: Fix two

* fix: systemForEmbassy main

* chore: Fix small part of main loop

* chore: Modify the bundle

* merge

* fixMain loop"

* move tsc to makefile

* chore: Update the return types of the health check

* fix client

* chore: Convert the todo to use tsmatches

* add in the fixes for the seen and create the hack to allow demo

* chore: Update to include the systemForStartOs

* chore UPdate to the latest types from the expected outout

* fixes

* fix typo

* Don't emit if failure on tsc

* wip

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* add s9pk api

* add inspection

* add inspect manifest

* newline after display serializable

* fix squashfs in image name

* edit manifest

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* wait for response on repl

* ignore sig for now

* ignore sig for now

* re-enable sig verification

* fix

* wip

* env and chroot

* add profiling logs

* set uid & gid in squashfs to 100000

* set uid of sqfs to 100000

* fix mksquashfs args

* add env to compat

* fix

* re-add docker feature flag

* fix docker output format being stupid

* here be dragons

* chore: Add in the cross compiling for something

* fix npm link

* extract logs from container on exit

* chore: Update for testing

* add log capture to drop trait

* chore: add in the modifications that I make

* chore: Update small things for no updates

* chore: Update the types of something

* chore: Make main not complain

* idmapped mounts

* idmapped volumes

* re-enable kiosk

* chore: Add in some logging for the new system

* bring in start-sdk

* remove avahi

* chore: Update the deps

* switch to musl

* chore: Update the version of prettier

* chore: Organize'

* chore: Update some of the headers back to the standard of fetch

* fix musl build

* fix idmapped mounts

* fix cross build

* use cross compiler for correct arch

* feat: Add in the faked ssl stuff for the effects

* @dr_bonez Did a solution here

* chore: Something that DrBonez

* chore: up

* wip: We have a working server!!!

* wip

* uninstall

* wip

* tes

* misc fixes

* fix cli

* replace interface with host

* chore: Fix the types in some ts files

* chore: quick update for the system for embassy to update the types

* replace br-start9 with lxcbr0

* split patchdb into public/private

* chore: Add changes for config set

* Feat: Adding some debugging for the errors

* wip: Working on getting the set config to work

* chore: Update and fix the small issue with the deserialization

* lightning, masked, schemeOverride, invert host-iface relationship

* feat: Add in the changes for just the sdk

* feat: Add in the changes for the new effects I suppose for now

---------

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
Co-authored-by: J H <Blu-J@users.noreply.github.com>
Co-authored-by: J H <dragondef@gmail.com>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
This commit is contained in:
Aiden McClelland
2024-02-22 14:00:49 -07:00
committed by GitHub
parent d7bc7a2d38
commit 089199e7c2
58 changed files with 1058 additions and 3058 deletions

View File

@@ -38,7 +38,10 @@ export class HostSystemStartOs implements Effects {
constructor(readonly callbackHolder: CallbackHolder) {}
id = 0
rpcRound(method: string, params: unknown) {
rpcRound<K extends keyof Effects | "getStore" | "setStore">(
method: K,
params: unknown,
) {
const id = this.id++
const client = net.createConnection({ path: SOCKET_PATH }, () => {
client.write(
@@ -74,7 +77,7 @@ export class HostSystemStartOs implements Effects {
console.error("Debug: " + res.error.data.debug)
}
}
reject(new Error(message))
reject(new Error(`${message}@${method}`))
} else if (testRpcResult(res)) {
resolve(res.result)
} else {
@@ -91,13 +94,7 @@ export class HostSystemStartOs implements Effects {
})
})
}
started =
// @ts-ignore
this.method !== MAIN
? null
: () => {
return this.rpcRound("started", null)
}
bind(...[options]: Parameters<T.Effects["bind"]>) {
return this.rpcRound("bind", options) as ReturnType<T.Effects["bind"]>
}
@@ -131,9 +128,9 @@ export class HostSystemStartOs implements Effects {
T.Effects["exportAction"]
>
}
exportServiceInterface(
...[options]: Parameters<T.Effects["exportServiceInterface"]>
) {
exportServiceInterface: Effects["exportServiceInterface"] = (
...[options]: Parameters<Effects["exportServiceInterface"]>
) => {
return this.rpcRound("exportServiceInterface", options) as ReturnType<
T.Effects["exportServiceInterface"]
>
@@ -158,31 +155,24 @@ export class HostSystemStartOs implements Effects {
T.Effects["getContainerIp"]
>
}
getHostnames: any = (...[allOptions]: any[]) => {
getHostInfo: Effects["getHostInfo"] = (...[allOptions]: any[]) => {
const options = {
...allOptions,
callback: this.callbackHolder.addCallback(allOptions.callback),
}
return this.rpcRound("getHostnames", options) as ReturnType<
T.Effects["getHostnames"]
>
return this.rpcRound("getHostInfo", options) as ReturnType<
T.Effects["getHostInfo"]
> as any
}
getInterface(...[options]: Parameters<T.Effects["getInterface"]>) {
return this.rpcRound("getInterface", {
getServiceInterface(
...[options]: Parameters<T.Effects["getServiceInterface"]>
) {
return this.rpcRound("getServiceInterface", {
...options,
callback: this.callbackHolder.addCallback(options.callback),
}) as ReturnType<T.Effects["getInterface"]>
}
getIPHostname(...[]: Parameters<T.Effects["getIPHostname"]>) {
return this.rpcRound("getIPHostname", null) as ReturnType<
T.Effects["getIPHostname"]
>
}
getLocalHostname(...[]: Parameters<T.Effects["getLocalHostname"]>) {
return this.rpcRound("getLocalHostname", null) as ReturnType<
T.Effects["getLocalHostname"]
>
}) as ReturnType<T.Effects["getServiceInterface"]>
}
getPrimaryUrl(...[options]: Parameters<T.Effects["getPrimaryUrl"]>) {
return this.rpcRound("getPrimaryUrl", {
...options,
@@ -196,14 +186,6 @@ export class HostSystemStartOs implements Effects {
T.Effects["getServicePortForward"]
>
}
getServiceTorHostname(
...[interfaceId, packageId]: Parameters<T.Effects["getServiceTorHostname"]>
) {
return this.rpcRound("getServiceTorHostname", {
interfaceId,
packageId,
}) as ReturnType<T.Effects["getServiceTorHostname"]>
}
getSslCertificate(
...[packageId, algorithm]: Parameters<T.Effects["getSslCertificate"]>
) {
@@ -223,11 +205,13 @@ export class HostSystemStartOs implements Effects {
callback: this.callbackHolder.addCallback(options.callback),
}) as ReturnType<T.Effects["getSystemSmtp"]>
}
listInterface(...[options]: Parameters<T.Effects["listInterface"]>) {
return this.rpcRound("listInterface", {
listServiceInterfaces(
...[options]: Parameters<T.Effects["listServiceInterfaces"]>
) {
return this.rpcRound("listServiceInterfaces", {
...options,
callback: this.callbackHolder.addCallback(options.callback),
}) as ReturnType<T.Effects["listInterface"]>
}) as ReturnType<T.Effects["listServiceInterfaces"]>
}
mount(...[options]: Parameters<T.Effects["mount"]>) {
return this.rpcRound("mount", options) as ReturnType<T.Effects["mount"]>
@@ -304,17 +288,4 @@ export class HostSystemStartOs implements Effects {
T.Effects["store"]["set"]
>,
}
/**
* So, this is created
* @param options
* @returns
*/
embassyGetInterface(options: {
target: "tor-key" | "tor-address" | "lan-address"
packageId: string
interface: string
}) {
return this.rpcRound("embassyGetInterface", options) as Promise<string>
}
}

View File

@@ -11,6 +11,7 @@ import {
matches,
any,
shape,
anyOf,
} from "ts-matches"
import { types as T } from "@start9labs/start-sdk"
@@ -24,16 +25,28 @@ import { HostSystem } from "../Interfaces/HostSystem"
import { jsonPath } from "../Models/JsonPath"
import { System } from "../Interfaces/System"
type MaybePromise<T> = T | Promise<T>
type SocketResponse = { jsonrpc: "2.0"; id: IdType } & (
| { result: unknown }
| {
error: {
code: number
message: string
data: { details: string; debug?: string }
}
}
export const matchRpcResult = anyOf(
object({ result: any }),
object({
error: object(
{
code: number,
message: string,
data: object(
{
details: string,
debug: any,
},
["details", "debug"],
),
},
["data"],
),
}),
)
export type RpcResult = typeof matchRpcResult._TYPE
type SocketResponse = { jsonrpc: "2.0"; id: IdType } & RpcResult
const SOCKET_PARENT = "/media/startos/rpc"
const SOCKET_PATH = "/media/startos/rpc/service.sock"
const jsonrpc = "2.0" as const
@@ -186,23 +199,11 @@ export class RpcListener {
input: params.input,
timeout: params.timeout,
})
.then((result) =>
"ok" in result
? {
jsonrpc,
id,
result: result.ok === undefined ? null : result.ok,
}
: {
jsonrpc,
id,
error: {
code: result.err.code,
message: "Package Root Error",
data: { details: result.err.message },
},
},
)
.then((result) => ({
jsonrpc,
id,
...result,
}))
.catch((error) => ({
jsonrpc,
id,

View File

@@ -33,8 +33,11 @@ export class DockerProcedureContainer {
await overlay.mount({ type: "assets", id: mount }, mounts[mount])
} else if (volumeMount.type === "certificate") {
volumeMount
const certChain = await effects.getSslCertificate()
const key = await effects.getSslKey()
const certChain = await effects.getSslCertificate(
null,
volumeMount["interface-id"],
)
const key = await effects.getSslKey(null, volumeMount["interface-id"])
await fs.writeFile(
`${path}/${volumeMount["interface-id"]}.cert.pem`,
certChain.join("\n"),

View File

@@ -28,6 +28,9 @@ import {
import { HostSystemStartOs } from "../../HostSystemStartOs"
import { JsonPath, unNestPath } from "../../../Models/JsonPath"
import { HostSystem } from "../../../Interfaces/HostSystem"
import { RpcResult, matchRpcResult } from "../../RpcListener"
import { ServiceInterface } from "../../../../../sdk/dist/cjs/lib/types"
import { createUtils } from "../../../../../sdk/dist/cjs/lib/util"
type Optional<A> = A | undefined | null
function todo(): never {
@@ -68,7 +71,7 @@ export class SystemForEmbassy implements System {
input: unknown
timeout?: number | undefined
},
): Promise<ExecuteResult> {
): Promise<RpcResult> {
return this._execute(effects, options)
.then((x) =>
matches(x)
@@ -76,16 +79,14 @@ export class SystemForEmbassy implements System {
object({
result: any,
}),
(x) => ({
ok: x.result,
}),
(x) => x,
)
.when(
object({
error: string,
}),
(x) => ({
err: {
error: {
code: 0,
message: x.error,
},
@@ -96,20 +97,34 @@ export class SystemForEmbassy implements System {
"error-code": tuple(number, string),
}),
({ "error-code": [code, message] }) => ({
err: {
error: {
code,
message,
},
}),
)
.defaultTo({ ok: x }),
.defaultTo({ result: x }),
)
.catch((error) => ({
err: {
code: 0,
message: "" + error,
},
}))
.catch((error: unknown) => {
if (error instanceof Error)
return {
error: {
code: 0,
message: error.name,
data: {
details: error.message,
debug: `${error?.cause ?? "[noCause]"}:${error?.stack ?? "[noStack]"}`,
},
},
}
if (matchRpcResult.test(error)) return error
return {
error: {
code: 0,
message: String(error),
},
}
})
}
async exit(effects: HostSystemStartOs): Promise<void> {
if (this.currentRunning) await this.currentRunning.clean()
@@ -157,6 +172,7 @@ export class SystemForEmbassy implements System {
return this.dependenciesAutoconfig(effects, procedures[2], input)
}
}
throw new Error(`Could not find the path for ${options.procedure}`)
}
private async init(
effects: HostSystemStartOs,
@@ -864,6 +880,7 @@ async function updateConfig(
) {
if (!dictionary([string, unknown]).test(spec)) return
if (!dictionary([string, unknown]).test(mutConfigValue)) return
const utils = createUtils(effects)
for (const key in spec) {
const specValue = spec[key]
@@ -890,11 +907,18 @@ async function updateConfig(
mutConfigValue[key] = configValue
}
if (matchPointerPackage.test(specValue)) {
mutConfigValue[key] = await effects.embassyGetInterface({
target: specValue.target,
packageId: specValue["package-id"],
interface: specValue["interface"],
})
const filled = await utils.serviceInterface
.get({
packageId: specValue["package-id"],
id: specValue.interface,
})
.once()
if (specValue.target === "tor-key")
throw new Error("This service uses an unsupported target TorKey")
mutConfigValue[key] =
specValue.target === "lan-address"
? filled.addressInfo.localHostnames[0]
: filled.addressInfo.onionHostnames[0]
}
}
}

View File

@@ -3,6 +3,7 @@ import { unNestPath } from "../../Models/JsonPath"
import { string } from "ts-matches"
import { HostSystemStartOs } from "../HostSystemStartOs"
import { Effects } from "../../Models/Effects"
import { RpcResult } from "../RpcListener"
const LOCATION = "/usr/lib/startos/package/startos"
export class SystemForStartOs implements System {
private onTerm: (() => Promise<void>) | undefined
@@ -30,8 +31,8 @@ export class SystemForStartOs implements System {
input: unknown
timeout?: number | undefined
},
): Promise<ExecuteResult> {
return { ok: await this._execute(effects, options) }
): Promise<RpcResult> {
return { result: await this._execute(effects, options) }
}
async _execute(
effects: Effects,

View File

@@ -1,6 +1,7 @@
import { types as T } from "@start9labs/start-sdk"
import { JsonPath } from "../Models/JsonPath"
import { HostSystemStartOs } from "../Adapters/HostSystemStartOs"
import { RpcResult } from "../Adapters/RpcListener"
export type ExecuteResult =
| { ok: unknown }
| { err: { code: number; message: string } }
@@ -17,7 +18,7 @@ export interface System {
input: unknown
timeout?: number
},
): Promise<ExecuteResult>
): Promise<RpcResult>
// sandbox(
// effects: Effects,
// options: {

View File

@@ -7,7 +7,9 @@ export class CallbackHolder {
return this.root + (this.inc++).toString(36)
}
addCallback(callback: Function) {
return this.callbacks.set(this.newId(), callback)
const id = this.newId()
this.callbacks.set(id, callback)
return id
}
callCallback(index: string, args: any[]): Promise<unknown> {
const callback = this.callbacks.get(index)

497
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -22,7 +22,7 @@ if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
set +e
fail=

View File

@@ -6,48 +6,48 @@ use serde::{Deserialize, Deserializer, Serialize};
use crate::{Id, InvalidId};
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
pub struct InterfaceId(Id);
impl FromStr for InterfaceId {
pub struct HostId(Id);
impl FromStr for HostId {
type Err = InvalidId;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(Id::try_from(s.to_owned())?))
}
}
impl From<Id> for InterfaceId {
impl From<Id> for HostId {
fn from(id: Id) -> Self {
Self(id)
}
}
impl std::fmt::Display for InterfaceId {
impl std::fmt::Display for HostId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0)
}
}
impl std::ops::Deref for InterfaceId {
impl std::ops::Deref for HostId {
type Target = str;
fn deref(&self) -> &Self::Target {
&*self.0
}
}
impl AsRef<str> for InterfaceId {
impl AsRef<str> for HostId {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl<'de> Deserialize<'de> for InterfaceId {
impl<'de> Deserialize<'de> for HostId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(InterfaceId(Deserialize::deserialize(deserializer)?))
Ok(HostId(Deserialize::deserialize(deserializer)?))
}
}
impl AsRef<Path> for InterfaceId {
impl AsRef<Path> for HostId {
fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref()
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for InterfaceId {
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for HostId {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::database::HasArguments<'q>>::ArgumentBuffer,
@@ -55,7 +55,7 @@ impl<'q> sqlx::Encode<'q, sqlx::Postgres> for InterfaceId {
<&str as sqlx::Encode<'q, sqlx::Postgres>>::encode_by_ref(&&**self, buf)
}
}
impl sqlx::Type<sqlx::Postgres> for InterfaceId {
impl sqlx::Type<sqlx::Postgres> for HostId {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<&str as sqlx::Type<sqlx::Postgres>>::type_info()
}

View File

@@ -7,8 +7,8 @@ use yasi::InternedString;
mod action;
mod address;
mod health_check;
mod host;
mod image;
mod interface;
mod invalid_id;
mod package;
mod volume;
@@ -16,8 +16,8 @@ mod volume;
pub use action::ActionId;
pub use address::AddressId;
pub use health_check::HealthCheckId;
pub use host::HostId;
pub use image::ImageId;
pub use interface::InterfaceId;
pub use invalid_id::InvalidId;
pub use package::{PackageId, SYSTEM_PACKAGE_ID};
pub use volume::VolumeId;

View File

@@ -437,7 +437,8 @@ pub async fn reset_password_impl(
let account_password = &account.password;
ctx.db
.mutate(|d| {
d.as_server_info_mut()
d.as_public_mut()
.as_server_info_mut()
.as_password_hash_mut()
.ser(account_password)
})

View File

@@ -141,7 +141,8 @@ pub async fn backup_all(
}
ctx.db
.mutate(|v| {
v.as_server_info_mut()
v.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut()
.ser(&None)
@@ -159,6 +160,7 @@ async fn assure_backing_up(
) -> Result<(), Error> {
db.mutate(|v| {
let backing_up = v
.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut();
@@ -221,7 +223,7 @@ async fn perform_backup(
)
})?;
let ui = ctx.db.peek().await.into_ui().de()?;
let ui = ctx.db.peek().await.into_public().into_ui().de()?;
let mut os_backup_file =
AtomicFile::new(backup_guard.path().join("os-backup.cbor"), None::<PathBuf>)
@@ -261,7 +263,12 @@ async fn perform_backup(
backup_guard.save_and_unmount().await?;
ctx.db
.mutate(|v| v.as_server_info_mut().as_last_backup_mut().ser(&timestamp))
.mutate(|v| {
v.as_public_mut()
.as_server_info_mut()
.as_last_backup_mut()
.ser(&timestamp)
})
.await?;
Ok(backup_report)

View File

@@ -1,13 +1,12 @@
use std::collections::BTreeMap;
use chrono::{DateTime, Utc};
use models::PackageId;
use models::{HostId, PackageId};
use reqwest::Url;
use rpc_toolkit::{from_fn_async, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use crate::context::CliContext;
use crate::net::interface::InterfaceId;
#[allow(unused_imports)]
use crate::prelude::*;
use crate::util::serde::{Base32, Base64};
@@ -50,8 +49,8 @@ pub fn backup() -> ParentHandler {
struct BackupMetadata {
pub timestamp: DateTime<Utc>,
#[serde(default)]
pub network_keys: BTreeMap<InterfaceId, Base64<[u8; 32]>>,
pub network_keys: BTreeMap<HostId, Base64<[u8; 32]>>,
#[serde(default)]
pub tor_keys: BTreeMap<InterfaceId, Base32<[u8; 64]>>, // DEPRECATED
pub tor_keys: BTreeMap<HostId, Base32<[u8; 64]>>, // DEPRECATED
pub marketplace_url: Option<Url>,
}

View File

@@ -1,10 +1,9 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use std::time::Duration;
use clap::Parser;
use color_eyre::eyre::eyre;
use indexmap::IndexSet;
use indexmap::{IndexMap, IndexSet};
use itertools::Itertools;
use models::{ErrorKind, OptionExt, PackageId};
use patch_db::value::InternedString;
@@ -18,15 +17,15 @@ use crate::context::{CliContext, RpcContext};
use crate::prelude::*;
use crate::util::serde::{HandlerExtSerde, StdinDeserializable};
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ConfigSpec(pub IndexMap<InternedString, Value>);
pub mod action;
pub mod spec;
pub mod util;
pub use spec::{ConfigSpec, Defaultable};
use util::NumRange;
use self::action::ConfigRes;
use self::spec::ValueSpecPointer;
pub type Config = patch_db::value::InOMap<InternedString, Value>;
pub trait TypeOf {
@@ -53,8 +52,6 @@ pub enum ConfigurationError {
NoMatch(#[from] NoMatchWithPath),
#[error("System Error: {0}")]
SystemError(Error),
#[error("Permission Denied: {0}")]
PermissionDenied(ValueSpecPointer),
}
impl From<ConfigurationError> for Error {
fn from(err: ConfigurationError) -> Self {
@@ -122,8 +119,6 @@ pub enum MatchError {
PropertyMatchesUnionTag(InternedString, String),
#[error("Name of Property {0:?} Conflicts With Map Tag Name")]
PropertyNameMatchesMapTag(String),
#[error("Pointer Is Invalid: {0}")]
InvalidPointer(spec::ValueSpecPointer),
#[error("Object Key Is Invalid: {0}")]
InvalidKey(String),
#[error("Value In List Is Not Unique")]
@@ -178,65 +173,19 @@ pub struct SetParams {
// )]
#[instrument(skip_all)]
pub fn set() -> ParentHandler<SetParams, PackageId> {
ParentHandler::new()
.root_handler(
from_fn_async(set_impl)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|set_params, id| (id, set_params))
.no_display()
.with_remote_cli::<CliContext>(),
)
.subcommand(
"dry",
from_fn_async(set_dry)
.with_inherited(|set_params, id| (id, set_params))
.with_display_serializable()
.with_remote_cli::<CliContext>(),
)
}
pub async fn set_dry(
ctx: RpcContext,
_: Empty,
(
id,
SetParams {
timeout,
config: StdinDeserializable(config),
},
): (PackageId, SetParams),
) -> Result<BTreeMap<PackageId, String>, Error> {
let breakages = BTreeMap::new();
let overrides = Default::default();
let configure_context = ConfigureContext {
breakages,
timeout: timeout.map(|t| *t),
config,
dry_run: true,
overrides,
};
ctx.services
.get(&id)
.await
.as_ref()
.ok_or_else(|| {
Error::new(
eyre!("There is no manager running for {id}"),
ErrorKind::Unknown,
)
})?
.configure(configure_context)
.await
ParentHandler::new().root_handler(
from_fn_async(set_impl)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|set_params, id| (id, set_params))
.no_display()
.with_remote_cli::<CliContext>(),
)
}
#[derive(Default)]
pub struct ConfigureContext {
pub breakages: BTreeMap<PackageId, String>,
pub timeout: Option<Duration>,
pub config: Option<Config>,
pub overrides: BTreeMap<PackageId, Config>,
pub dry_run: bool,
}
#[instrument(skip_all)]
@@ -251,15 +200,9 @@ pub async fn set_impl(
},
): (PackageId, SetParams),
) -> Result<(), Error> {
let breakages = BTreeMap::new();
let overrides = Default::default();
let configure_context = ConfigureContext {
breakages,
timeout: timeout.map(|t| *t),
config,
dry_run: false,
overrides,
};
ctx.services
.get(&id)

File diff suppressed because it is too large Load Diff

View File

@@ -116,15 +116,27 @@ impl RpcContext {
let devices = lshw().await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
if !db.peek().await.as_server_info().as_ntp_synced().de()? {
if !db
.peek()
.await
.as_public()
.as_server_info()
.as_ntp_synced()
.de()?
{
let db = db.clone();
tokio::spawn(async move {
while !check_time_is_synchronized().await.unwrap() {
tokio::time::sleep(Duration::from_secs(30)).await;
}
db.mutate(|v| v.as_server_info_mut().as_ntp_synced_mut().ser(&true))
.await
.unwrap()
db.mutate(|v| {
v.as_public_mut()
.as_server_info_mut()
.as_ntp_synced_mut()
.ser(&true)
})
.await
.unwrap()
});
}
@@ -208,12 +220,15 @@ impl RpcContext {
self.db
.mutate(|f| {
let mut current_dependents = f
.as_public_mut()
.as_package_data()
.keys()?
.into_iter()
.map(|k| (k.clone(), BTreeMap::new()))
.collect::<BTreeMap<_, _>>();
for (package_id, package) in f.as_package_data_mut().as_entries_mut()? {
for (package_id, package) in
f.as_public_mut().as_package_data_mut().as_entries_mut()?
{
for (k, v) in package
.as_installed_mut()
.into_iter()
@@ -228,6 +243,7 @@ impl RpcContext {
}
for (package_id, current_dependents) in current_dependents {
if let Some(deps) = f
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.and_then(|pde| pde.expect_as_installed_mut().ok())
@@ -235,6 +251,7 @@ impl RpcContext {
{
deps.ser(&CurrentDependents(current_dependents))?;
} else if let Some(deps) = f
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.and_then(|pde| pde.expect_as_removing_mut().ok())
@@ -252,7 +269,7 @@ impl RpcContext {
let mut all_dependency_config_errs = BTreeMap::new();
let peek = self.db.peek().await;
for (package_id, package) in peek.as_package_data().as_entries()?.into_iter() {
for (package_id, package) in peek.as_public().as_package_data().as_entries()?.into_iter() {
let package = package.clone();
if let Some(current_dependencies) = package
.as_installed()
@@ -276,6 +293,7 @@ impl RpcContext {
.mutate(|v| {
for (package_id, errs) in all_dependency_config_errs {
if let Some(config_errors) = v
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.and_then(|pde| pde.as_installed_mut())

View File

@@ -11,7 +11,7 @@ use clap::Parser;
use futures::{FutureExt, StreamExt};
use http::header::COOKIE;
use http::HeaderMap;
use patch_db::json_ptr::JsonPointer;
use patch_db::json_ptr::{JsonPointer, ROOT};
use patch_db::{Dump, Revision};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, from_fn_async, CallRemote, HandlerExt, ParentHandler};
@@ -25,13 +25,17 @@ use crate::middleware::auth::{HasValidSession, HashSessionToken};
use crate::prelude::*;
use crate::util::serde::{apply_expr, HandlerExtSerde};
lazy_static::lazy_static! {
static ref PUBLIC: JsonPointer = "/public".parse().unwrap();
}
#[instrument(skip_all)]
async fn ws_handler(
ctx: RpcContext,
session: Option<(HasValidSession, HashSessionToken)>,
mut stream: WebSocket,
) -> Result<(), Error> {
let (dump, sub) = ctx.db.dump_and_sub().await;
let (dump, sub) = ctx.db.dump_and_sub(PUBLIC.clone()).await;
if let Some((session, token)) = session {
let kill = subscribe_to_session_kill(&ctx, token).await;
@@ -181,7 +185,7 @@ pub enum RevisionsRes {
#[instrument(skip_all)]
async fn cli_dump(ctx: CliContext, DumpParams { path }: DumpParams) -> Result<Dump, RpcError> {
let dump = if let Some(path) = path {
PatchDb::open(path).await?.dump().await
PatchDb::open(path).await?.dump(&ROOT).await
} else {
from_value::<Dump>(ctx.call_remote("db.dump", imbl_value::json!({})).await?)?
};
@@ -201,7 +205,7 @@ pub struct DumpParams {
// display(display_serializable)
// )]
pub async fn dump(ctx: RpcContext, _: DumpParams) -> Result<Dump, Error> {
Ok(ctx.db.dump().await)
Ok(ctx.db.dump(&*PUBLIC).await)
}
#[instrument(skip_all)]

View File

@@ -7,7 +7,7 @@ use imbl_value::InternedString;
use ipnet::{Ipv4Net, Ipv6Net};
use isocountry::CountryCode;
use itertools::Itertools;
use models::{DataUrl, HealthCheckId, InterfaceId, PackageId};
use models::{DataUrl, HealthCheckId, HostId, PackageId};
use openssl::hash::MessageDigest;
use patch_db::json_ptr::JsonPointer;
use patch_db::{HasModel, Value};
@@ -16,7 +16,6 @@ use serde::{Deserialize, Serialize};
use ssh_key::public::Ed25519PublicKey;
use crate::account::AccountInfo;
use crate::config::spec::PackagePointerSpec;
use crate::net::utils::{get_iface_ipv4_addr, get_iface_ipv6_addr};
use crate::prelude::*;
use crate::progress::FullProgress;
@@ -30,72 +29,85 @@ use crate::{ARCH, PLATFORM};
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
// #[macro_debug]
pub struct Database {
pub server_info: ServerInfo,
pub package_data: AllPackageData,
pub ui: Value,
pub public: Public,
pub private: (), // TODO
}
impl Database {
pub fn init(account: &AccountInfo) -> Self {
let lan_address = account.hostname.lan_address().parse().unwrap();
Database {
server_info: ServerInfo {
arch: get_arch(),
platform: get_platform(),
id: account.server_id.clone(),
version: Current::new().semver().into(),
hostname: account.hostname.no_dot_host_name(),
last_backup: None,
last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(),
lan_address,
tor_address: format!("https://{}", account.key.tor_address())
.parse()
.unwrap(),
ip_info: BTreeMap::new(),
status_info: ServerStatus {
backup_progress: None,
updated: false,
update_progress: None,
shutting_down: false,
restarting: false,
},
wifi: WifiInfo {
ssids: Vec::new(),
connected: None,
selected: None,
},
unread_notification_count: 0,
connection_addresses: ConnectionAddresses {
tor: Vec::new(),
clearnet: Vec::new(),
},
password_hash: account.password.clone(),
pubkey: ssh_key::PublicKey::from(Ed25519PublicKey::from(&account.key.ssh_key()))
public: Public {
server_info: ServerInfo {
arch: get_arch(),
platform: get_platform(),
id: account.server_id.clone(),
version: Current::new().semver().into(),
hostname: account.hostname.no_dot_host_name(),
last_backup: None,
last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(),
lan_address,
tor_address: format!("https://{}", account.key.tor_address())
.parse()
.unwrap(),
ip_info: BTreeMap::new(),
status_info: ServerStatus {
backup_progress: None,
updated: false,
update_progress: None,
shutting_down: false,
restarting: false,
},
wifi: WifiInfo {
ssids: Vec::new(),
connected: None,
selected: None,
},
unread_notification_count: 0,
connection_addresses: ConnectionAddresses {
tor: Vec::new(),
clearnet: Vec::new(),
},
password_hash: account.password.clone(),
pubkey: ssh_key::PublicKey::from(Ed25519PublicKey::from(
&account.key.ssh_key(),
))
.to_openssh()
.unwrap(),
ca_fingerprint: account
.root_ca_cert
.digest(MessageDigest::sha256())
.unwrap()
.iter()
.map(|x| format!("{x:X}"))
.join(":"),
ntp_synced: false,
zram: true,
governor: None,
ca_fingerprint: account
.root_ca_cert
.digest(MessageDigest::sha256())
.unwrap()
.iter()
.map(|x| format!("{x:X}"))
.join(":"),
ntp_synced: false,
zram: true,
governor: None,
},
package_data: AllPackageData::default(),
ui: serde_json::from_str(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../web/patchdb-ui-seed.json"
)))
.unwrap(),
},
package_data: AllPackageData::default(),
ui: serde_json::from_str(include_str!(concat!(
env!("CARGO_MANIFEST_DIR"),
"/../../web/patchdb-ui-seed.json"
)))
.unwrap(),
private: (), // TODO
}
}
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
// #[macro_debug]
pub struct Public {
pub server_info: ServerInfo,
pub package_data: AllPackageData,
pub ui: Value,
}
pub type DatabaseModel = Model<Database>;
fn get_arch() -> InternedString {
@@ -532,14 +544,13 @@ pub struct StaticDependencyInfo {
#[model = "Model<Self>"]
pub struct CurrentDependencyInfo {
#[serde(default)]
pub pointers: BTreeSet<PackagePointerSpec>,
pub health_checks: BTreeSet<HealthCheckId>,
}
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct InterfaceAddressMap(pub BTreeMap<InterfaceId, InterfaceAddresses>);
pub struct InterfaceAddressMap(pub BTreeMap<HostId, InterfaceAddresses>);
impl Map for InterfaceAddressMap {
type Key = InterfaceId;
type Key = HostId;
type Value = InterfaceAddresses;
}

View File

@@ -3,6 +3,7 @@ use std::marker::PhantomData;
use std::panic::UnwindSafe;
pub use imbl_value::Value;
use patch_db::json_ptr::ROOT;
use patch_db::value::InternedString;
pub use patch_db::{HasModel, PatchDb};
use serde::de::DeserializeOwned;
@@ -42,7 +43,7 @@ pub trait PatchDbExt {
#[async_trait::async_trait]
impl PatchDbExt for PatchDb {
async fn peek(&self) -> DatabaseModel {
DatabaseModel::from(self.dump().await.value)
DatabaseModel::from(self.dump(&ROOT).await.value)
}
async fn mutate<U: UnwindSafe + Send>(
&self,

View File

@@ -8,7 +8,6 @@ use rpc_toolkit::{command, from_fn_async, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::config::spec::PackagePointerSpec;
use crate::config::{Config, ConfigSpec, ConfigureContext};
use crate::context::{CliContext, RpcContext};
use crate::db::model::{CurrentDependencies, Database};
@@ -66,19 +65,11 @@ pub struct ConfigureParams {
dependency_id: PackageId,
}
pub fn configure() -> ParentHandler<ConfigureParams> {
ParentHandler::new()
.root_handler(
from_fn_async(configure_impl)
.with_inherited(|params, _| params)
.no_cli(),
)
.subcommand(
"dry",
from_fn_async(configure_dry)
.with_inherited(|params, _| params)
.with_display_serializable()
.with_remote_cli::<CliContext>(),
)
ParentHandler::new().root_handler(
from_fn_async(configure_impl)
.with_inherited(|params, _| params)
.no_cli(),
)
}
pub async fn configure_impl(
@@ -89,8 +80,6 @@ pub async fn configure_impl(
dependency_id,
}: ConfigureParams,
) -> Result<(), Error> {
let breakages = BTreeMap::new();
let overrides = Default::default();
let ConfigDryRes {
old_config: _,
new_config,
@@ -98,11 +87,8 @@ pub async fn configure_impl(
} = configure_logic(ctx.clone(), (dependent_id, dependency_id.clone())).await?;
let configure_context = ConfigureContext {
breakages,
timeout: Some(Duration::from_secs(3).into()),
config: Some(new_config),
dry_run: false,
overrides,
};
ctx.services
.get(&dependency_id)
@@ -127,19 +113,6 @@ pub struct ConfigDryRes {
pub spec: ConfigSpec,
}
// #[command(rename = "dry", display(display_serializable))]
#[instrument(skip_all)]
pub async fn configure_dry(
ctx: RpcContext,
_: Empty,
ConfigureParams {
dependent_id,
dependency_id,
}: ConfigureParams,
) -> Result<ConfigDryRes, Error> {
configure_logic(ctx, (dependent_id, dependency_id)).await
}
pub async fn configure_logic(
ctx: RpcContext,
(dependent_id, dependency_id): (PackageId, PackageId),
@@ -226,6 +199,7 @@ pub fn add_dependent_to_current_dependents_lists(
) -> Result<(), Error> {
for (dependency, dep_info) in &current_dependencies.0 {
if let Some(dependency_dependents) = db
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(dependency)
.and_then(|pde| pde.as_installed_mut())
@@ -237,46 +211,6 @@ pub fn add_dependent_to_current_dependents_lists(
Ok(())
}
pub fn set_dependents_with_live_pointers_to_needs_config(
db: &mut Peeked,
id: &PackageId,
) -> Result<Vec<(PackageId, Version)>, Error> {
let mut res = Vec::new();
for (dep, info) in db
.as_package_data()
.as_idx(id)
.or_not_found(id)?
.as_installed()
.or_not_found(id)?
.as_current_dependents()
.de()?
.0
{
if info.pointers.iter().any(|ptr| match ptr {
// dependency id matches the package being uninstalled
PackagePointerSpec::TorAddress(ptr) => &ptr.package_id == id && &dep != id,
PackagePointerSpec::LanAddress(ptr) => &ptr.package_id == id && &dep != id,
// we never need to retarget these
PackagePointerSpec::TorKey(_) => false,
PackagePointerSpec::Config(_) => false,
}) {
let installed = db
.as_package_data_mut()
.as_idx_mut(&dep)
.or_not_found(&dep)?
.as_installed_mut()
.or_not_found(&dep)?;
let version = installed.as_manifest().as_version().de()?;
let configured = installed.as_status_mut().as_configured_mut();
if configured.de()? {
configured.ser(&false)?;
res.push((dep, version));
}
}
}
Ok(res)
}
#[instrument(skip_all)]
pub async fn compute_dependency_config_errs(
ctx: &RpcContext,

View File

@@ -218,7 +218,7 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
let db = cfg.db(&account).await?;
tracing::info!("Opened PatchDB");
let peek = db.peek().await;
let mut server_info = peek.as_server_info().de()?;
let mut server_info = peek.as_public().as_server_info().de()?;
// write to ca cert store
tokio::fs::write(
@@ -343,7 +343,7 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
};
db.mutate(|v| {
v.as_server_info_mut().ser(&server_info)?;
v.as_public_mut().as_server_info_mut().ser(&server_info)?;
Ok(())
})
.await?;

View File

@@ -41,7 +41,7 @@ pub const PKG_WASM_DIR: &str = "package-data/wasm";
// #[command(display(display_serializable))]
pub async fn list(ctx: RpcContext) -> Result<Value, Error> {
Ok(ctx.db.peek().await.as_package_data().as_entries()?
Ok(ctx.db.peek().await.as_public().as_package_data().as_entries()?
.iter()
.filter_map(|(id, pde)| {
let status = match pde.as_match() {
@@ -185,7 +185,13 @@ pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
let (err_send, err_recv) = oneshot::channel();
let progress = RequestGuid::new();
let db = ctx.db.clone();
let mut sub = db.subscribe().await;
let mut sub = db
.subscribe(
"/package-data/{id}/install-progress"
.parse::<JsonPointer>()
.with_kind(ErrorKind::Database)?,
)
.await;
ctx.add_continuation(
progress.clone(),
RpcContinuation::ws(
@@ -199,17 +205,15 @@ pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
ErrorKind::Cancelled,
)
})?;
let progress_path =
JsonPointer::parse(format!("/package-data/{id}/install-progress"))
.with_kind(ErrorKind::Database)?;
tokio::select! {
res = async {
while let Some(rev) = sub.recv().await {
if rev.patch.affects_path(&progress_path) {
if !rev.patch.0.is_empty() { // TODO: don't send empty patches?
ws.send(Message::Text(
serde_json::to_string(&if let Some(p) = db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(&id)
.and_then(|e| e.as_install_progress())
@@ -230,16 +234,18 @@ pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
} => res?,
err = err_recv => {
if let Ok(e) = err {
ws.send(Message::Text(
serde_json::to_string(&Err::<(), _>(e))
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
ws.send(Message::Text(
serde_json::to_string(&Err::<(), _>(e))
.with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
}
}
ws.close().await.with_kind(ErrorKind::Network)?;
Ok::<_, Error>(())
}
.await
@@ -250,7 +256,7 @@ pub async fn sideload(ctx: RpcContext) -> Result<SideloadResponse, Error> {
}
.boxed()
}),
Duration::from_secs(30),
Duration::from_secs(600),
),
)
.await;
@@ -405,26 +411,31 @@ pub async fn uninstall(
) -> Result<PackageId, Error> {
ctx.db
.mutate(|db| {
let (manifest, static_files, installed) =
match db.as_package_data().as_idx(&id).or_not_found(&id)?.de()? {
PackageDataEntry::Installed(PackageDataEntryInstalled {
manifest,
static_files,
installed,
}) => (manifest, static_files, installed),
_ => {
return Err(Error::new(
eyre!("Package is not installed."),
crate::ErrorKind::NotFound,
));
}
};
let (manifest, static_files, installed) = match db
.as_public()
.as_package_data()
.as_idx(&id)
.or_not_found(&id)?
.de()?
{
PackageDataEntry::Installed(PackageDataEntryInstalled {
manifest,
static_files,
installed,
}) => (manifest, static_files, installed),
_ => {
return Err(Error::new(
eyre!("Package is not installed."),
crate::ErrorKind::NotFound,
));
}
};
let pde = PackageDataEntry::Removing(PackageDataEntryRemoving {
manifest,
static_files,
removing: installed,
});
db.as_package_data_mut().insert(&id, &pde)
db.as_public_mut().as_package_data_mut().insert(&id, &pde)
})
.await?;

View File

@@ -75,7 +75,8 @@ pub async fn update(
let ip_info = IpInfo::for_interface(&interface).await?;
ctx.db
.mutate(|db| {
db.as_server_info_mut()
db.as_public_mut()
.as_server_info_mut()
.as_ip_info_mut()
.insert(&interface, &ip_info)
})

View File

@@ -163,13 +163,13 @@ impl DnsController {
Command::new("resolvectl")
.arg("dns")
.arg("br-start9")
.arg("lxcbr0")
.arg("127.0.0.1")
.invoke(ErrorKind::Network)
.await?;
Command::new("resolvectl")
.arg("domain")
.arg("br-start9")
.arg("lxcbr0")
.arg("embassy")
.invoke(ErrorKind::Network)
.await?;

View File

@@ -0,0 +1,29 @@
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use crate::net::host::multi::MultiHost;
pub mod multi;
pub enum Host {
Multi(MultiHost),
// Single(SingleHost),
// Static(StaticHost),
}
#[derive(Deserialize, Serialize)]
pub struct BindOptions {
scheme: InternedString,
preferred_external_port: u16,
add_ssl: Option<AddSslOptions>,
secure: bool,
ssl: bool,
}
#[derive(Deserialize, Serialize)]
pub struct AddSslOptions {
scheme: InternedString,
preferred_external_port: u16,
#[serde(default)]
add_x_forwarded_headers: bool,
}

View File

@@ -0,0 +1,13 @@
use std::collections::BTreeMap;
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use crate::net::host::BindOptions;
use crate::net::keys::Key;
pub struct MultiHost {
id: InternedString,
key: Key,
binds: BTreeMap<u16, BindOptions>,
}

View File

@@ -1,122 +0,0 @@
use std::collections::BTreeMap;
use indexmap::IndexSet;
pub use models::InterfaceId;
use models::PackageId;
use serde::{Deserialize, Deserializer, Serialize};
use sqlx::{Executor, Postgres};
use tracing::instrument;
use crate::db::model::{InterfaceAddressMap, InterfaceAddresses};
use crate::net::keys::Key;
use crate::util::serde::Port;
use crate::{Error, ResultExt};
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Interfaces(pub BTreeMap<InterfaceId, Interface>); // TODO
impl Interfaces {
#[instrument(skip_all)]
pub fn validate(&self) -> Result<(), Error> {
for (_, interface) in &self.0 {
interface.validate().with_ctx(|_| {
(
crate::ErrorKind::ValidateS9pk,
format!("Interface {}", interface.name),
)
})?;
}
Ok(())
}
#[instrument(skip_all)]
pub async fn install<Ex>(
&self,
secrets: &mut Ex,
package_id: &PackageId,
) -> Result<InterfaceAddressMap, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let mut interface_addresses = InterfaceAddressMap(BTreeMap::new());
for (id, iface) in &self.0 {
let mut addrs = InterfaceAddresses {
tor_address: None,
lan_address: None,
};
if iface.tor_config.is_some() || iface.lan_config.is_some() {
let key =
Key::for_interface(secrets, Some((package_id.clone(), id.clone()))).await?;
if iface.tor_config.is_some() {
addrs.tor_address = Some(key.tor_address().to_string());
}
if iface.lan_config.is_some() {
addrs.lan_address = Some(key.local_address());
}
}
interface_addresses.0.insert(id.clone(), addrs);
}
Ok(interface_addresses)
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Interface {
pub name: String,
pub description: String,
pub tor_config: Option<TorConfig>,
pub lan_config: Option<BTreeMap<Port, LanPortConfig>>,
pub ui: bool,
pub protocols: IndexSet<String>,
}
impl Interface {
#[instrument(skip_all)]
pub fn validate(&self) -> Result<(), color_eyre::eyre::Report> {
if self.tor_config.is_some() && !self.protocols.contains("tcp") {
color_eyre::eyre::bail!("must support tcp to set up a tor hidden service");
}
if self.lan_config.is_some() && !self.protocols.contains("http") {
color_eyre::eyre::bail!("must support http to set up a lan service");
}
if self.ui && !(self.protocols.contains("http") || self.protocols.contains("https")) {
color_eyre::eyre::bail!("must support http or https to serve a ui");
}
Ok(())
}
}
#[derive(Clone, Debug, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct TorConfig {
pub port_mapping: BTreeMap<Port, Port>,
}
#[derive(Clone, Debug, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct LanPortConfig {
pub ssl: bool,
pub internal: u16,
}
impl<'de> Deserialize<'de> for LanPortConfig {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(rename_all = "kebab-case")]
struct PermissiveLanPortConfig {
ssl: bool,
internal: Option<u16>,
mapping: Option<u16>,
}
let config = PermissiveLanPortConfig::deserialize(deserializer)?;
Ok(LanPortConfig {
ssl: config.ssl,
internal: config
.internal
.or(config.mapping)
.ok_or_else(|| serde::de::Error::missing_field("internal"))?,
})
}
}

View File

@@ -1,6 +1,6 @@
use clap::Parser;
use color_eyre::eyre::eyre;
use models::{Id, InterfaceId, PackageId};
use models::{HostId, Id, PackageId};
use openssl::pkey::{PKey, Private};
use openssl::sha::Sha256;
use openssl::x509::X509;
@@ -22,13 +22,13 @@ use crate::util::crypto::ed25519_expand_key;
// TODO: delete once we may change tor addresses
async fn compat(
secrets: impl PgExecutor<'_>,
interface: &Option<(PackageId, InterfaceId)>,
host: &Option<(PackageId, HostId)>,
) -> Result<Option<[u8; 64]>, Error> {
if let Some((package, interface)) = interface {
if let Some((package, host)) = host {
if let Some(r) = sqlx::query!(
"SELECT key FROM tor WHERE package = $1 AND interface = $2",
package,
interface
host
)
.fetch_optional(secrets)
.await?
@@ -60,19 +60,19 @@ async fn compat(
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Key {
interface: Option<(PackageId, InterfaceId)>,
host: Option<(PackageId, HostId)>,
base: [u8; 32],
tor_key: [u8; 64], // Does NOT necessarily match base
}
impl Key {
pub fn interface(&self) -> Option<(PackageId, InterfaceId)> {
self.interface.clone()
pub fn host(&self) -> Option<(PackageId, HostId)> {
self.host.clone()
}
pub fn as_bytes(&self) -> [u8; 32] {
self.base
}
pub fn internal_address(&self) -> String {
self.interface
self.host
.as_ref()
.map(|(pkg_id, _)| format!("{}.embassy", pkg_id))
.unwrap_or_else(|| "embassy".to_owned())
@@ -111,21 +111,21 @@ impl Key {
Ed25519PrivateKey::from_bytes(&self.base)
}
pub(crate) fn from_pair(
interface: Option<(PackageId, InterfaceId)>,
host: Option<(PackageId, HostId)>,
bytes: [u8; 32],
tor_key: [u8; 64],
) -> Self {
Self {
interface,
host,
tor_key,
base: bytes,
}
}
pub fn from_bytes(interface: Option<(PackageId, InterfaceId)>, bytes: [u8; 32]) -> Self {
Self::from_pair(interface, bytes, ed25519_expand_key(&bytes))
pub fn from_bytes(host: Option<(PackageId, HostId)>, bytes: [u8; 32]) -> Self {
Self::from_pair(host, bytes, ed25519_expand_key(&bytes))
}
pub fn new(interface: Option<(PackageId, InterfaceId)>) -> Self {
Self::from_bytes(interface, rand::random())
pub fn new(host: Option<(PackageId, HostId)>) -> Self {
Self::from_bytes(host, rand::random())
}
pub(super) fn with_certs(self, certs: CertPair, int: X509, root: X509) -> KeyInfo {
KeyInfo {
@@ -163,10 +163,7 @@ impl Key {
.await?
.into_iter()
.map(|row| {
let interface = Some((
package.clone(),
InterfaceId::from(Id::try_from(row.interface)?),
));
let host = Some((package.clone(), HostId::from(Id::try_from(row.interface)?)));
let bytes = row.key.try_into().map_err(|e: Vec<u8>| {
Error::new(
eyre!("Invalid length for network key {} expected 32", e.len()),
@@ -175,7 +172,7 @@ impl Key {
})?;
Ok(match row.tor_key {
Some(tor_key) => Key::from_pair(
interface,
host,
bytes,
tor_key.try_into().map_err(|e: Vec<u8>| {
Error::new(
@@ -184,20 +181,20 @@ impl Key {
)
})?,
),
None => Key::from_bytes(interface, bytes),
None => Key::from_bytes(host, bytes),
})
})
.collect()
}
pub async fn for_interface<Ex>(
pub async fn for_host<Ex>(
secrets: &mut Ex,
interface: Option<(PackageId, InterfaceId)>,
host: Option<(PackageId, HostId)>,
) -> Result<Self, Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let tentative = rand::random::<[u8; 32]>();
let actual = if let Some((pkg, iface)) = &interface {
let actual = if let Some((pkg, iface)) = &host {
let k = tentative.as_slice();
let actual = sqlx::query!(
"INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key",
@@ -229,8 +226,8 @@ impl Key {
})?);
bytes
};
let mut res = Self::from_bytes(interface, actual);
if let Some(tor_key) = compat(secrets, &res.interface).await? {
let mut res = Self::from_bytes(host, actual);
if let Some(tor_key) = compat(secrets, &res.host).await? {
res.tor_key = tor_key;
}
Ok(res)
@@ -288,43 +285,43 @@ pub fn display_requires_reboot(_: RotateKeysParams, args: RequiresReboot) {
#[command(rename_all = "kebab-case")]
pub struct RotateKeysParams {
package: Option<PackageId>,
interface: Option<InterfaceId>,
host: Option<HostId>,
}
// #[command(display(display_requires_reboot))]
pub async fn rotate_key(
ctx: RpcContext,
RotateKeysParams { package, interface }: RotateKeysParams,
RotateKeysParams { package, host }: RotateKeysParams,
) -> Result<RequiresReboot, Error> {
let mut pgcon = ctx.secret_store.acquire().await?;
let mut tx = pgcon.begin().await?;
if let Some(package) = package {
let Some(interface) = interface else {
let Some(host) = host else {
return Err(Error::new(
eyre!("Must specify interface"),
eyre!("Must specify host"),
ErrorKind::InvalidRequest,
));
};
sqlx::query!(
"DELETE FROM tor WHERE package = $1 AND interface = $2",
&package,
&interface,
&host,
)
.execute(&mut *tx)
.await?;
sqlx::query!(
"DELETE FROM network_keys WHERE package = $1 AND interface = $2",
&package,
&interface,
&host,
)
.execute(&mut *tx)
.await?;
let new_key =
Key::for_interface(&mut *tx, Some((package.clone(), interface.clone()))).await?;
let new_key = Key::for_host(&mut *tx, Some((package.clone(), host.clone()))).await?;
let needs_config = ctx
.db
.mutate(|v| {
let installed = v
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
@@ -332,8 +329,8 @@ pub async fn rotate_key(
.or_not_found("installed")?;
let addrs = installed
.as_interface_addresses_mut()
.as_idx_mut(&interface)
.or_not_found(&interface)?;
.as_idx_mut(&host)
.or_not_found(&host)?;
if let Some(lan) = addrs.as_lan_address_mut().transpose_mut() {
lan.ser(&new_key.local_address())?;
}
@@ -380,10 +377,15 @@ pub async fn rotate_key(
sqlx::query!("UPDATE account SET tor_key = NULL, network_key = gen_random_bytes(32)")
.execute(&mut *tx)
.await?;
let new_key = Key::for_interface(&mut *tx, None).await?;
let new_key = Key::for_host(&mut *tx, None).await?;
let url = format!("https://{}", new_key.tor_address()).parse()?;
ctx.db
.mutate(|v| v.as_server_info_mut().as_tor_address_mut().ser(&url))
.mutate(|v| {
v.as_public_mut()
.as_server_info_mut()
.as_tor_address_mut()
.ser(&url)
})
.await?;
tx.commit().await?;
Ok(RequiresReboot(true))

View File

@@ -4,7 +4,7 @@ use crate::context::CliContext;
pub mod dhcp;
pub mod dns;
pub mod interface;
pub mod host;
pub mod keys;
pub mod mdns;
pub mod net_controller;

View File

@@ -3,7 +3,7 @@ use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre;
use models::{InterfaceId, PackageId};
use models::{HostId, PackageId};
use sqlx::PgExecutor;
use tracing::instrument;
@@ -20,7 +20,7 @@ use crate::{Error, HOST_IP};
pub struct NetController {
pub(super) tor: TorController,
pub(super) vhost: VHostController,
// pub(super) dns: DnsController,
pub(super) dns: DnsController,
pub(super) ssl: Arc<SslManager>,
pub(super) os_bindings: Vec<Arc<()>>,
}
@@ -39,7 +39,7 @@ impl NetController {
let mut res = Self {
tor: TorController::new(tor_control, tor_socks),
vhost: VHostController::new(ssl.clone()),
// dns: DnsController::init(dns_bind).await?,
dns: DnsController::init(dns_bind).await?,
ssl,
os_bindings: Vec::new(),
};
@@ -60,8 +60,8 @@ impl NetController {
alpn.clone(),
)
.await?;
// self.os_bindings
// .push(self.dns.add(None, HOST_IP.into()).await?);
self.os_bindings
.push(self.dns.add(None, HOST_IP.into()).await?);
// LAN IP
self.os_bindings.push(
@@ -147,13 +147,13 @@ impl NetController {
package: PackageId,
ip: Ipv4Addr,
) -> Result<NetService, Error> {
// let dns = self.dns.add(Some(package.clone()), ip).await?;
let dns = self.dns.add(Some(package.clone()), ip).await?;
Ok(NetService {
shutdown: false,
id: package,
ip,
// dns,
dns,
controller: Arc::downgrade(self),
tor: BTreeMap::new(),
lan: BTreeMap::new(),
@@ -212,10 +212,10 @@ pub struct NetService {
shutdown: bool,
id: PackageId,
ip: Ipv4Addr,
// dns: Arc<()>,
dns: Arc<()>,
controller: Weak<NetController>,
tor: BTreeMap<(InterfaceId, u16), (Key, Vec<Arc<()>>)>,
lan: BTreeMap<(InterfaceId, u16), (Key, Vec<Arc<()>>)>,
tor: BTreeMap<(HostId, u16), (Key, Vec<Arc<()>>)>,
lan: BTreeMap<(HostId, u16), (Key, Vec<Arc<()>>)>,
}
impl NetService {
fn net_controller(&self) -> Result<Arc<NetController>, Error> {
@@ -229,14 +229,14 @@ impl NetService {
pub async fn add_tor<Ex>(
&mut self,
secrets: &mut Ex,
id: InterfaceId,
id: HostId,
external: u16,
internal: u16,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_interface(secrets, Some((self.id.clone(), id.clone()))).await?;
let key = Key::for_host(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let tor_idx = (id, external);
let mut tor = self
@@ -251,7 +251,7 @@ impl NetService {
self.tor.insert(tor_idx, tor);
Ok(())
}
pub async fn remove_tor(&mut self, id: InterfaceId, external: u16) -> Result<(), Error> {
pub async fn remove_tor(&mut self, id: HostId, external: u16) -> Result<(), Error> {
let ctrl = self.net_controller()?;
if let Some((key, rcs)) = self.tor.remove(&(id, external)) {
ctrl.remove_tor(&key, external, rcs).await?;
@@ -261,7 +261,7 @@ impl NetService {
pub async fn add_lan<Ex>(
&mut self,
secrets: &mut Ex,
id: InterfaceId,
id: HostId,
external: u16,
internal: u16,
connect_ssl: Result<(), AlpnInfo>,
@@ -269,7 +269,7 @@ impl NetService {
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_interface(secrets, Some((self.id.clone(), id.clone()))).await?;
let key = Key::for_host(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let lan_idx = (id, external);
let mut lan = self
@@ -289,7 +289,7 @@ impl NetService {
self.lan.insert(lan_idx, lan);
Ok(())
}
pub async fn remove_lan(&mut self, id: InterfaceId, external: u16) -> Result<(), Error> {
pub async fn remove_lan(&mut self, id: HostId, external: u16) -> Result<(), Error> {
let ctrl = self.net_controller()?;
if let Some((key, rcs)) = self.lan.remove(&(id, external)) {
ctrl.remove_lan(&key, external, rcs).await?;
@@ -299,13 +299,13 @@ impl NetService {
pub async fn export_cert<Ex>(
&self,
secrets: &mut Ex,
id: &InterfaceId,
id: &HostId,
ip: IpAddr,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_interface(secrets, Some((self.id.clone(), id.clone()))).await?;
let key = Key::for_host(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let cert = ctrl.ssl.with_certs(key, ip).await?;
let cert_dir = cert_dir(&self.id, id);
@@ -332,8 +332,8 @@ impl NetService {
for ((_, external), (key, rcs)) in std::mem::take(&mut self.tor) {
errors.handle(ctrl.remove_tor(&key, external, rcs).await);
}
// std::mem::take(&mut self.dns);
// errors.handle(ctrl.dns.gc(Some(self.id.clone()), self.ip).await);
std::mem::take(&mut self.dns);
errors.handle(ctrl.dns.gc(Some(self.id.clone()), self.ip).await);
errors.into_result()
} else {
tracing::warn!("NetService dropped after NetController is shutdown");
@@ -355,7 +355,7 @@ impl Drop for NetService {
shutdown: true,
id: Default::default(),
ip: Ipv4Addr::new(0, 0, 0, 0),
// dns: Default::default(),
dns: Default::default(),
controller: Default::default(),
tor: Default::default(),
lan: Default::default(),

View File

@@ -336,7 +336,7 @@ pub struct SANInfo {
impl SANInfo {
pub fn new(key: &Key, hostname: &Hostname, ips: BTreeSet<IpAddr>) -> Self {
let mut dns = BTreeSet::new();
if let Some((id, _)) = key.interface() {
if let Some((id, _)) = key.host() {
dns.insert(MaybeWildcard::WithWildcard(format!("{id}.embassy")));
dns.insert(MaybeWildcard::WithWildcard(key.local_address().to_string()));
} else {

View File

@@ -682,7 +682,8 @@ impl WpaCli {
pub async fn save_config(&mut self, db: PatchDb) -> Result<(), Error> {
let new_country = self.get_country_low().await?;
db.mutate(|d| {
d.as_server_info_mut()
d.as_public_mut()
.as_server_info_mut()
.as_last_wifi_region_mut()
.ser(&new_country)
})

View File

@@ -102,7 +102,8 @@ pub async fn list(
ctx.db
.mutate(|d| {
d.as_server_info_mut()
d.as_public_mut()
.as_server_info_mut()
.as_unread_notification_count_mut()
.ser(&0)
})
@@ -308,7 +309,11 @@ impl NotificationManager {
{
return Ok(());
}
let mut count = peek.as_server_info().as_unread_notification_count().de()?;
let mut count = peek
.as_public()
.as_server_info()
.as_unread_notification_count()
.de()?;
let sql_package_id = package_id.as_ref().map(|p| &**p);
let sql_code = T::CODE;
let sql_level = format!("{}", level);
@@ -325,7 +330,8 @@ impl NotificationManager {
).execute(&self.sqlite).await?;
count += 1;
db.mutate(|db| {
db.as_server_info_mut()
db.as_public_mut()
.as_server_info_mut()
.as_unread_notification_count_mut()
.ser(&count)
})

View File

@@ -1,6 +1,6 @@
use std::collections::BTreeMap;
use models::PackageId;
use models::{ActionId, PackageId, ProcedureName};
use crate::config::ConfigureContext;
use crate::prelude::*;
@@ -9,14 +9,13 @@ use crate::service::Service;
impl Service {
pub async fn configure(
&self,
ConfigureContext {
breakages,
timeout,
config,
overrides,
dry_run,
}: ConfigureContext,
) -> Result<BTreeMap<PackageId, String>, Error> {
todo!()
ConfigureContext { timeout, config }: ConfigureContext,
) -> Result<(), Error> {
let container = &self.seed.persistent_container;
container
.execute::<Value>(ProcedureName::SetConfig, to_value(&config)?, timeout)
.await
.with_kind(ErrorKind::Action)?;
Ok(())
}
}

View File

@@ -7,7 +7,7 @@ use futures::future::BoxFuture;
use imbl::OrdMap;
use models::{ActionId, HealthCheckId, PackageId, ProcedureName};
use persistent_container::PersistentContainer;
use rpc_toolkit::{from_fn_async, CallRemoteHandler, Handler, HandlerArgs};
use rpc_toolkit::{from_fn_async, CallRemoteHandler, Empty, Handler, HandlerArgs};
use serde::{Deserialize, Serialize};
use start_stop::StartStop;
use tokio::sync::{watch, Notify};
@@ -128,6 +128,7 @@ impl Service {
.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(id)
.map(|pde| pde.into_match())
@@ -151,7 +152,7 @@ impl Service {
}
// TODO: delete s9pk?
ctx.db
.mutate(|v| v.as_package_data_mut().remove(id))
.mutate(|v| v.as_public_mut().as_package_data_mut().remove(id))
.await?;
Ok(None)
}
@@ -188,7 +189,8 @@ impl Service {
.mutate({
let manifest = s9pk.as_manifest().clone();
|db| {
db.as_package_data_mut()
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&manifest.id)
.or_not_found(&manifest.id)?
.ser(&PackageDataEntry::Installed(PackageDataEntryInstalled {
@@ -229,7 +231,7 @@ impl Service {
}
ctx.db
.mutate(|v| v.as_package_data_mut().remove(id))
.mutate(|v| v.as_public_mut().as_package_data_mut().remove(id))
.await?;
Ok(None)
@@ -274,7 +276,8 @@ impl Service {
}
ctx.db
.mutate(|d| {
d.as_package_data_mut()
d.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&manifest.id)
.or_not_found(&manifest.id)?
.ser(&PackageDataEntry::Installed(PackageDataEntryInstalled {
@@ -346,6 +349,11 @@ impl Service {
.await;
if let Some((hdl, shutdown)) = self.seed.persistent_container.rpc_server.send_replace(None)
{
self.seed
.persistent_container
.rpc_client
.request(rpc::Exit, Empty {})
.await?;
shutdown.shutdown();
hdl.await.with_kind(ErrorKind::Cancelled)?;
}
@@ -367,6 +375,12 @@ impl Service {
.persistent_container
.execute(ProcedureName::Uninit, to_value(&target_version)?, None) // TODO timeout
.await?;
let id = self.seed.persistent_container.s9pk.as_manifest().id.clone();
self.seed
.ctx
.db
.mutate(|d| d.as_public_mut().as_package_data_mut().remove(&id))
.await?;
self.shutdown().await
}
pub async fn backup(&self, guard: impl GenericMountGuard) -> Result<BackupReturn, Error> {
@@ -416,6 +430,7 @@ impl Actor for ServiceActor {
.db
.mutate(|d| {
if let Some(i) = d
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&id)
.and_then(|p| p.as_installed_mut())

View File

@@ -46,7 +46,7 @@ struct ProcedureId(u64);
pub struct PersistentContainer {
pub(super) s9pk: S9pk,
pub(super) lxc_container: OnceCell<LxcContainer>,
rpc_client: UnixRpcClient,
pub(super) rpc_client: UnixRpcClient,
pub(super) rpc_server: watch::Sender<Option<(NonDetachingJoinHandle<()>, ShutdownHandle)>>,
// procedures: Mutex<Vec<(ProcedureName, ProcedureId)>>,
js_mount: MountGuard,
@@ -239,8 +239,8 @@ impl PersistentContainer {
let lxc_container = self.lxc_container.take();
async move {
let mut errs = ErrorCollection::new();
errs.handle(dbg!(rpc_client.request(rpc::Exit, Empty {}).await));
if let Some((hdl, shutdown)) = rpc_server {
errs.handle(rpc_client.request(rpc::Exit, Empty {}).await);
shutdown.shutdown();
errs.handle(hdl.await.with_kind(ErrorKind::Cancelled));
}

View File

@@ -12,6 +12,7 @@ use patch_db::json_ptr::JsonPointer;
use rpc_toolkit::{from_fn, from_fn_async, AnyContext, Context, Empty, HandlerExt, ParentHandler};
use tokio::process::Command;
use crate::db::model::ExposedUI;
use crate::disk::mount::filesystem::idmapped::IdMapped;
use crate::disk::mount::filesystem::loop_dev::LoopDev;
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
@@ -23,8 +24,7 @@ use crate::service::ServiceActorSeed;
use crate::status::health_check::HealthCheckResult;
use crate::status::MainStatus;
use crate::util::clap::FromStrParser;
use crate::util::new_guid;
use crate::{db::model::ExposedUI, util::Invoke};
use crate::util::{new_guid, Invoke};
use crate::{echo, ARCH};
#[derive(Clone)]
@@ -120,6 +120,10 @@ pub fn service_effect_handler() -> ParentHandler {
from_fn_async(get_ssl_certificate).no_cli(),
)
.subcommand("getSslKey", from_fn_async(get_ssl_key).no_cli())
.subcommand(
"getServiceInterface",
from_fn_async(get_service_interface).no_cli(),
)
// TODO @DrBonez when we get the new api for 4.0
// .subcommand("setDependencies",from_fn(set_dependencies))
// .subcommand("embassyGetInterface",from_fn(embassy_get_interface))
@@ -144,6 +148,43 @@ pub fn service_effect_handler() -> ParentHandler {
// .subcommand("reverseProxy",from_fn(reverse_pro)xy)
// TODO Callbacks
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Parser)]
#[serde(rename_all = "camelCase")]
struct GetServiceInterfaceParams {
package_id: Option<PackageId>,
service_interface_id: String,
callback: String,
}
async fn get_service_interface(
_: AnyContext,
GetServiceInterfaceParams {
callback,
package_id,
service_interface_id,
}: GetServiceInterfaceParams,
) -> Result<Value, Error> {
// TODO @Dr_Bonez
Ok(json!({
"id": service_interface_id,
"name": service_interface_id,
"description": "This is a fake",
"hasPrimary": false,
"disabled": false,
"addressInfo": json!({
"username": Value::Null,
"hostId": "HostId?",
"options": json!({
"scheme": Value::Null,
"preferredExternalPort": 80,
"addSsl":Value::Null,
"secure": false,
"ssl": false
}),
"suffix": "http"
}),
"type": "ui"
}))
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Parser)]
#[serde(rename_all = "camelCase")]
@@ -255,6 +296,7 @@ async fn get_store(
let peeked = context.ctx.db.peek().await;
let package_id = package_id.unwrap_or(context.id.clone());
let value = peeked
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
@@ -286,6 +328,7 @@ async fn set_store(
.db
.mutate(|db| {
let model = db
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
@@ -317,7 +360,8 @@ async fn expose_for_dependents(
.ctx
.db
.mutate(|db| {
db.as_package_data_mut()
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_installed_mut()
@@ -344,7 +388,8 @@ async fn expose_ui(
.ctx
.db
.mutate(|db| {
db.as_package_data_mut()
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_installed_mut()
@@ -369,7 +414,11 @@ struct ParamsMaybePackageId {
async fn exists(context: EffectContext, params: ParamsPackageId) -> Result<Value, Error> {
let context = context.deref()?;
let peeked = context.ctx.db.peek().await;
let package = peeked.as_package_data().as_idx(&params.package).is_some();
let package = peeked
.as_public()
.as_package_data()
.as_idx(&params.package)
.is_some();
Ok(json!(package))
}
@@ -408,6 +457,7 @@ async fn get_configured(context: EffectContext, _: Empty) -> Result<Value, Error
let peeked = context.ctx.db.peek().await;
let package_id = &context.id;
let package = peeked
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
@@ -424,6 +474,7 @@ async fn stopped(context: EffectContext, params: ParamsMaybePackageId) -> Result
let peeked = context.ctx.db.peek().await;
let package_id = params.package_id.unwrap_or_else(|| context.id.clone());
let package = peeked
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
@@ -439,6 +490,7 @@ async fn running(context: EffectContext, params: ParamsMaybePackageId) -> Result
let peeked = context.ctx.db.peek().await;
let package_id = params.package_id.unwrap_or_else(|| context.id.clone());
let package = peeked
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
@@ -489,7 +541,8 @@ async fn set_configured(context: EffectContext, params: SetConfigured) -> Result
.ctx
.db
.mutate(|db| {
db.as_package_data_mut()
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(package_id)
.or_not_found(package_id)?
.as_installed_mut()
@@ -578,6 +631,7 @@ async fn set_health(context: EffectContext, params: SetHealth) -> Result<Value,
.db
.mutate(move |db| {
let mut main = db
.as_public()
.as_package_data()
.as_idx(package_id)
.or_not_found(package_id)?
@@ -600,7 +654,8 @@ async fn set_health(context: EffectContext, params: SetHealth) -> Result<Value,
}
_ => return Ok(()),
};
db.as_package_data_mut()
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(package_id)
.or_not_found(package_id)?
.as_installed_mut()

View File

@@ -61,7 +61,7 @@ impl ServiceMap {
#[instrument(skip_all)]
pub async fn init(&self, ctx: &RpcContext) -> Result<(), Error> {
for id in ctx.db.peek().await.as_package_data().keys()? {
for id in ctx.db.peek().await.as_public().as_package_data().keys()? {
if let Err(e) = self.load(ctx, &id, LoadDisposition::Retry).await {
tracing::error!("Error loading installed package as service: {e}");
tracing::debug!("{e:?}");
@@ -136,6 +136,7 @@ impl ServiceMap {
let install_progress = progress.snapshot();
move |db| {
let pde = match db
.as_public()
.as_package_data()
.as_idx(&id)
.map(|x| x.de())
@@ -174,7 +175,9 @@ impl ServiceMap {
))
}
};
db.as_package_data_mut().insert(&manifest.id, &pde)
db.as_public_mut()
.as_package_data_mut()
.insert(&manifest.id, &pde)
}
}))
.await?;
@@ -194,7 +197,8 @@ impl ServiceMap {
NonDetachingJoinHandle::from(tokio::spawn(progress.sync_to_db(
ctx.db.clone(),
move |v| {
v.as_package_data_mut()
v.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&deref_id)
.and_then(|e| e.as_install_progress_mut())
},

View File

@@ -84,7 +84,8 @@ async fn setup_init(
account.set_password(&password)?;
account.save(secrets_tx.as_mut()).await?;
db.mutate(|m| {
m.as_server_info_mut()
m.as_public_mut()
.as_server_info_mut()
.as_password_hash_mut()
.ser(&account.password)
})
@@ -310,11 +311,6 @@ pub async fn execute(
tokio::task::spawn({
async move {
let ctx = ctx.clone();
let recovery_source = recovery_source;
let embassy_password = embassy_password;
let recovery_source = recovery_source;
let recovery_password = recovery_password;
match execute_inner(
ctx.clone(),
embassy_logicalname,

View File

@@ -78,7 +78,8 @@ impl Shutdown {
pub async fn shutdown(ctx: RpcContext) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_server_info_mut()
db.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_shutting_down_mut()
.ser(&true)
@@ -97,7 +98,8 @@ pub async fn shutdown(ctx: RpcContext) -> Result<(), Error> {
pub async fn restart(ctx: RpcContext) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_server_info_mut()
db.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_restarting_mut()
.ser(&true)

View File

@@ -83,7 +83,7 @@ pub struct ZramParams {
pub async fn zram(ctx: RpcContext, ZramParams { enable }: ZramParams) -> Result<(), Error> {
let db = ctx.db.peek().await;
let zram = db.as_server_info().as_zram().de()?;
let zram = db.as_public().as_server_info().as_zram().de()?;
if enable == zram {
return Ok(());
}
@@ -100,7 +100,10 @@ pub async fn zram(ctx: RpcContext, ZramParams { enable }: ZramParams) -> Result<
}
ctx.db
.mutate(|v| {
v.as_server_info_mut().as_zram_mut().ser(&enable)?;
v.as_public_mut()
.as_server_info_mut()
.as_zram_mut()
.ser(&enable)?;
Ok(())
})
.await?;
@@ -153,10 +156,22 @@ pub async fn governor(
}
set_governor(&set).await?;
ctx.db
.mutate(|d| d.as_server_info_mut().as_governor_mut().ser(&Some(set)))
.mutate(|d| {
d.as_public_mut()
.as_server_info_mut()
.as_governor_mut()
.ser(&Some(set))
})
.await?;
}
let current = ctx.db.peek().await.as_server_info().as_governor().de()?;
let current = ctx
.db
.peek()
.await
.as_public()
.as_server_info()
.as_governor()
.de()?;
Ok(GovernorInfo { current, available })
}

View File

@@ -93,7 +93,7 @@ async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option
.await
.with_kind(ErrorKind::Network)?
.version;
let current_version = peeked.as_server_info().as_version().de()?;
let current_version = peeked.as_public().as_server_info().as_version().de()?;
if latest_version < *current_version {
return Ok(None);
}
@@ -105,7 +105,7 @@ async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option
let status = ctx
.db
.mutate(|db| {
let mut status = peeked.as_server_info().as_status_info().de()?;
let mut status = peeked.as_public().as_server_info().as_status_info().de()?;
if status.update_progress.is_some() {
return Err(Error::new(
eyre!("Server is already updating!"),
@@ -117,7 +117,10 @@ async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option
size: None,
downloaded: 0,
});
db.as_server_info_mut().as_status_info_mut().ser(&status)?;
db.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.ser(&status)?;
Ok(status)
})
.await?;
@@ -130,7 +133,8 @@ async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option
let res = do_update(ctx.clone(), eos_url).await;
ctx.db
.mutate(|db| {
db.as_server_info_mut()
db.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_update_progress_mut()
.ser(&None)
@@ -140,7 +144,8 @@ async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option
Ok(()) => {
ctx.db
.mutate(|db| {
db.as_server_info_mut()
db.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_updated_mut()
.ser(&true)
@@ -199,7 +204,8 @@ async fn do_update(ctx: RpcContext, eos_url: EosUrl) -> Result<(), Error> {
while let Some(progress) = rsync.progress.next().await {
ctx.db
.mutate(|db| {
db.as_server_info_mut()
db.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_update_progress_mut()
.ser(&Some(UpdateProgress {

View File

@@ -97,7 +97,7 @@ impl<A: Actor> SimpleActor<A> {
Some((msg, reply)) if shutdown_recv.try_recv() == Err(TryRecvError::Empty) => {
let mut new_bg = BackgroundJobs::default();
tokio::select! {
res = msg.handle_with(&mut actor, &mut new_bg) => { reply.send(res); },
res = msg.handle_with(&mut actor, &mut new_bg) => { let _ = reply.send(res); },
_ = &mut bg => (),
}
bg.jobs.append(&mut new_bg.jobs);
@@ -129,7 +129,9 @@ impl<A: Actor> SimpleActor<A> {
))));
}
let (reply_send, reply_recv) = oneshot::channel();
self.messenger.send((Box::new(message), reply_send));
self.messenger
.send((Box::new(message), reply_send))
.unwrap();
futures::future::Either::Right(
reply_recv
.map_err(|_| Error::new(eyre!("actor runtime has exited"), ErrorKind::Unknown))
@@ -159,11 +161,11 @@ impl<A: Actor> SimpleActor<A> {
drop(self.messenger);
let timeout = match strategy {
PendingMessageStrategy::CancelAll => {
self.shutdown.send(());
self.shutdown.send(()).unwrap();
Some(Duration::from_secs(0))
}
PendingMessageStrategy::FinishCurrentCancelPending { timeout } => {
self.shutdown.send(());
self.shutdown.send(()).unwrap();
timeout
}
PendingMessageStrategy::FinishAll { timeout } => timeout,

View File

@@ -70,8 +70,12 @@ where
let semver = self.semver().into();
let compat = self.compat().clone();
db.mutate(|d| {
d.as_server_info_mut().as_version_mut().ser(&semver)?;
d.as_server_info_mut()
d.as_public_mut()
.as_server_info_mut()
.as_version_mut()
.ser(&semver)?;
d.as_public_mut()
.as_server_info_mut()
.as_eos_version_compat_mut()
.ser(&compat)?;
Ok(())
@@ -166,7 +170,14 @@ where
}
pub async fn init(db: &PatchDb, secrets: &PgPool) -> Result<(), Error> {
let version = Version::from_util_version(db.peek().await.as_server_info().as_version().de()?);
let version = Version::from_util_version(
db.peek()
.await
.as_public()
.as_server_info()
.as_version()
.de()?,
);
match version {
Version::V0_3_4(v) => v.0.migrate_to(&Current::new(), db.clone(), secrets).await?,

View File

@@ -56,20 +56,23 @@ impl VersionT for Version {
let mut account = AccountInfo::load(secrets).await?;
let account = db
.mutate(|d| {
d.as_server_info_mut().as_pubkey_mut().ser(
d.as_public_mut().as_server_info_mut().as_pubkey_mut().ser(
&ssh_key::PublicKey::from(Ed25519PublicKey::from(&account.key.ssh_key()))
.to_openssh()?,
)?;
d.as_server_info_mut().as_ca_fingerprint_mut().ser(
&account
.root_ca_cert
.digest(MessageDigest::sha256())
.unwrap()
.iter()
.map(|x| format!("{x:X}"))
.join(":"),
)?;
let server_info = d.as_server_info();
d.as_public_mut()
.as_server_info_mut()
.as_ca_fingerprint_mut()
.ser(
&account
.root_ca_cert
.digest(MessageDigest::sha256())
.unwrap()
.iter()
.map(|x| format!("{x:X}"))
.join(":"),
)?;
let server_info = d.as_public_mut().as_server_info();
account.hostname = server_info.as_hostname().de().map(Hostname)?;
account.server_id = server_info.as_id().de()?;
@@ -81,15 +84,16 @@ impl VersionT for Version {
let parsed_url = Some(COMMUNITY_URL.parse().unwrap());
db.mutate(|d| {
let mut ui = d.as_ui().de()?;
let mut ui = d.as_public().as_ui().de()?;
use imbl_value::json;
ui["marketplace"]["known-hosts"][COMMUNITY_URL] = json!({});
ui["marketplace"]["known-hosts"][MAIN_REGISTRY] = json!({});
for package_id in d.as_package_data().keys()? {
for package_id in d.as_public().as_package_data().keys()? {
if !COMMUNITY_SERVICES.contains(&&*package_id.to_string()) {
continue;
}
d.as_package_data_mut()
d.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_installed_mut()
@@ -100,19 +104,20 @@ impl VersionT for Version {
ui["theme"] = json!("Dark".to_string());
ui["widgets"] = json!([]);
d.as_ui_mut().ser(&ui)
d.as_public_mut().as_ui_mut().ser(&ui)
})
.await
}
async fn down(&self, db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
db.mutate(|d| {
let mut ui = d.as_ui().de()?;
let mut ui = d.as_public().as_ui().de()?;
let parsed_url = Some(MAIN_REGISTRY.parse().unwrap());
for package_id in d.as_package_data().keys()? {
for package_id in d.as_public().as_package_data().keys()? {
if !COMMUNITY_SERVICES.contains(&&*package_id.to_string()) {
continue;
}
d.as_package_data_mut()
d.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_installed_mut()
@@ -128,7 +133,7 @@ impl VersionT for Version {
ui["marketplace"]["known-hosts"][COMMUNITY_URL].take();
ui["marketplace"]["known-hosts"][MAIN_REGISTRY].take();
d.as_ui_mut().ser(&ui)
d.as_public_mut().as_ui_mut().ser(&ui)
})
.await
}

View File

@@ -26,7 +26,7 @@ impl VersionT for Version {
}
async fn up(&self, db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
db.mutate(|v| {
let tor_address_lens = v.as_server_info_mut().as_tor_address_mut();
let tor_address_lens = v.as_public_mut().as_server_info_mut().as_tor_address_mut();
let mut tor_addr = tor_address_lens.de()?;
tor_addr
.set_scheme("https")

View File

@@ -30,7 +30,7 @@ impl VersionT for Version {
async fn up(&self, db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
let peek = db.peek().await;
let mut url_replacements = BTreeMap::new();
for (_, pde) in peek.as_package_data().as_entries()? {
for (_, pde) in peek.as_public().as_package_data().as_entries()? {
for (dependency, info) in pde
.as_installed()
.map(|i| i.as_dependency_info().as_entries())
@@ -63,7 +63,7 @@ impl VersionT for Version {
}
let prev_zram = db
.mutate(|v| {
for (_, pde) in v.as_package_data_mut().as_entries_mut()? {
for (_, pde) in v.as_public_mut().as_package_data_mut().as_entries_mut()? {
for (dependency, info) in pde
.as_installed_mut()
.map(|i| i.as_dependency_info_mut().as_entries_mut())
@@ -95,7 +95,10 @@ impl VersionT for Version {
}
}
}
v.as_server_info_mut().as_zram_mut().replace(&true)
v.as_public_mut()
.as_server_info_mut()
.as_zram_mut()
.replace(&true)
})
.await?;
if !prev_zram {

View File

@@ -3,17 +3,15 @@ use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
pub use helpers::script_dir;
use models::PackageId;
pub use models::VolumeId;
use models::{HostId, PackageId};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::context::RpcContext;
use crate::net::interface::{InterfaceId, Interfaces};
use crate::net::PACKAGE_CERT_PATH;
use crate::prelude::*;
use crate::util::Version;
use crate::{Error, ResultExt};
pub const PKG_VOLUME_DIR: &str = "package-data/volumes";
pub const BACKUP_DIR: &str = "/media/embassy/backups";
@@ -21,21 +19,6 @@ pub const BACKUP_DIR: &str = "/media/embassy/backups";
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Volumes(BTreeMap<VolumeId, Volume>);
impl Volumes {
#[instrument(skip_all)]
pub fn validate(&self, interfaces: &Interfaces) -> Result<(), Error> {
for (id, volume) in &self.0 {
volume
.validate(interfaces)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, format!("Volume {}", id)))?;
if let Volume::Backup { .. } = volume {
return Err(Error::new(
eyre!("Invalid volume type \"backup\""),
ErrorKind::ParseS9pk,
)); // Volume::Backup is for internal use and shouldn't be declared in manifest
}
}
Ok(())
}
#[instrument(skip_all)]
pub async fn install(
&self,
@@ -112,8 +95,8 @@ pub fn backup_dir(pkg_id: &PackageId) -> PathBuf {
Path::new(BACKUP_DIR).join(pkg_id).join("data")
}
pub fn cert_dir(pkg_id: &PackageId, interface_id: &InterfaceId) -> PathBuf {
Path::new(PACKAGE_CERT_PATH).join(pkg_id).join(interface_id)
pub fn cert_dir(pkg_id: &PackageId, host_id: &HostId) -> PathBuf {
Path::new(PACKAGE_CERT_PATH).join(pkg_id).join(host_id)
}
#[derive(Clone, Debug, Deserialize, Serialize)]
@@ -135,23 +118,11 @@ pub enum Volume {
readonly: bool,
},
#[serde(rename_all = "kebab-case")]
Certificate { interface_id: InterfaceId },
Certificate { interface_id: HostId },
#[serde(rename_all = "kebab-case")]
Backup { readonly: bool },
}
impl Volume {
#[instrument(skip_all)]
pub fn validate(&self, interfaces: &Interfaces) -> Result<(), color_eyre::eyre::Report> {
match self {
Volume::Certificate { interface_id } => {
if !interfaces.0.contains_key(interface_id) {
color_eyre::eyre::bail!("unknown interface: {}", interface_id);
}
}
_ => (),
}
Ok(())
}
pub async fn install(
&self,
path: &PathBuf,

View File

@@ -35,6 +35,11 @@ const knownProtocols = {
ssl: false,
defaultPort: 8333,
},
lightning: {
secure: true,
ssl: true,
defaultPort: 9735,
},
grpc: {
secure: true,
ssl: true,
@@ -47,11 +52,11 @@ const knownProtocols = {
},
} as const
type Scheme = string | null
export type Scheme = string | null
type AddSslOptions = {
preferredExternalPort: number
scheme: Scheme
preferredExternalPort: number
addXForwardedHeaders?: boolean /** default: false */
}
type Security = { secure: false; ssl: false } | { secure: true; ssl: boolean }
@@ -73,7 +78,7 @@ type NotProtocolsWithSslVariants = Exclude<
ProtocolsWithSslVariants
>
type PortOptionsByKnownProtocol =
type BindOptionsByKnownProtocol =
| ({
protocol: ProtocolsWithSslVariants
preferredExternalPort?: number
@@ -85,7 +90,7 @@ type PortOptionsByKnownProtocol =
scheme?: Scheme
addSsl?: AddSslOptions | null
}
type PortOptionsByProtocol = PortOptionsByKnownProtocol | BindOptions
type BindOptionsByProtocol = BindOptionsByKnownProtocol | BindOptions
export type HostKind = "static" | "single" | "multi"
@@ -104,7 +109,7 @@ export class Host {
async bindPort(
internalPort: number,
options: PortOptionsByProtocol,
options: BindOptionsByProtocol,
): Promise<Origin<this>> {
if (hasStringProtocol(options)) {
return await this.bindPortForKnown(options, internalPort)
@@ -138,7 +143,7 @@ export class Host {
}
private async bindPortForKnown(
options: PortOptionsByKnownProtocol,
options: BindOptionsByKnownProtocol,
internalPort: number,
) {
const scheme =
@@ -174,7 +179,7 @@ export class Host {
}
private getAddSsl(
options: PortOptionsByKnownProtocol,
options: BindOptionsByKnownProtocol,
protoInfo: KnownProtocols[keyof KnownProtocols],
): AddSslOptions | null {
if ("noAddSsl" in options && options.noAddSsl) return null

View File

@@ -1,5 +1,7 @@
import { AddressInfo } from "../types"
import { Host, BindOptions } from "./Host"
import { AddressReceipt } from "./AddressReceipt"
import { Host, BindOptions, Scheme } from "./Host"
import { ServiceInterfaceBuilder } from "./ServiceInterfaceBuilder"
export class Origin<T extends Host> {
constructor(
@@ -7,7 +9,7 @@ export class Origin<T extends Host> {
readonly options: BindOptions,
) {}
build({ username, path, search }: BuildOptions): AddressInfo {
build({ username, path, search, schemeOverride }: BuildOptions): AddressInfo {
const qpEntries = Object.entries(search)
.map(
([key, val]) => `${encodeURIComponent(key)}=${encodeURIComponent(val)}`,
@@ -18,15 +20,77 @@ export class Origin<T extends Host> {
return {
hostId: this.host.options.id,
options: this.options,
bindOptions: {
...this.options,
scheme: schemeOverride ? schemeOverride.noSsl : this.options.scheme,
addSsl: this.options.addSsl
? {
...this.options.addSsl,
scheme: schemeOverride
? schemeOverride.ssl
: this.options.addSsl.scheme,
}
: null,
},
suffix: `${path}${qp}`,
username,
}
}
/**
* A function to register a group of origins (<PROTOCOL> :// <HOSTNAME> : <PORT>) with StartOS
*
* The returned addressReceipt serves as proof that the addresses were registered
*
* @param addressInfo
* @returns
*/
async export(
serviceInterfaces: ServiceInterfaceBuilder[],
): Promise<AddressInfo[] & AddressReceipt> {
const addressesInfo = []
for (let serviceInterface of serviceInterfaces) {
const {
name,
description,
hasPrimary,
disabled,
id,
type,
username,
path,
search,
schemeOverride,
masked,
} = serviceInterface.options
const addressInfo = this.build({
username,
path,
search,
schemeOverride,
})
await serviceInterface.options.effects.exportServiceInterface({
id,
name,
description,
hasPrimary,
disabled,
addressInfo,
type,
masked,
})
addressesInfo.push(addressInfo)
}
return addressesInfo as AddressInfo[] & AddressReceipt
}
}
type BuildOptions = {
scheme: string | null
schemeOverride: { ssl: Scheme; noSsl: Scheme } | null
username: string | null
path: string
search: Record<string, string>

View File

@@ -1,8 +1,6 @@
import { AddressInfo, Effects } from "../types"
import { Effects } from "../types"
import { ServiceInterfaceType } from "../util/utils"
import { AddressReceipt } from "./AddressReceipt"
import { Host } from "./Host"
import { Origin } from "./Origin"
import { Scheme } from "./Host"
/**
* A helper class for creating a Network Interface
@@ -25,47 +23,11 @@ export class ServiceInterfaceBuilder {
hasPrimary: boolean
disabled: boolean
type: ServiceInterfaceType
username: null | string
username: string | null
path: string
search: Record<string, string>
schemeOverride: { ssl: Scheme; noSsl: Scheme } | null
masked: boolean
},
) {}
/**
* A function to register a group of origins (<PROTOCOL> :// <HOSTNAME> : <PORT>) with StartOS
*
* The returned addressReceipt serves as proof that the addresses were registered
*
* @param addressInfo
* @returns
*/
async export<OriginForHost extends Origin<Host>>(
origin: OriginForHost,
): Promise<AddressInfo & AddressReceipt> {
const {
name,
description,
hasPrimary,
disabled,
id,
type,
username,
path,
search,
} = this.options
const addressInfo = origin.build({ username, path, search, scheme: null })
await this.options.effects.exportServiceInterface({
id,
name,
description,
hasPrimary,
disabled,
addressInfo,
type,
})
return addressInfo as AddressInfo & AddressReceipt
}
}

View File

@@ -19,9 +19,11 @@ describe("host", () => {
username: "bar",
path: "/baz",
search: { qux: "yes" },
schemeOverride: null,
masked: false,
})
await fooInterface.export([fooOrigin])
await fooOrigin.export([fooInterface])
}
})
})

View File

@@ -1,7 +1,7 @@
export * as configTypes from "./config/configTypes"
import { InputSpec } from "./config/configTypes"
import { DependenciesReceipt } from "./config/setupConfig"
import { HostKind, BindOptions } from "./interfaces/Host"
import { BindOptions } from "./interfaces/Host"
import { Daemons } from "./mainFn/Daemons"
import { UrlString } from "./util/getServiceInterface"
import { ServiceInterfaceType, Signals } from "./util/utils"
@@ -10,7 +10,7 @@ export type ExportedAction = (options: {
effects: Effects
input?: Record<string, unknown>
}) => Promise<ActionResult>
export type MaybePromise<A> = A | Promise<A>
export type MaybePromise<A> = Promise<A> | A
export namespace ExpectedExports {
version: 1
/** Set configuration is called after we have modified and saved the configuration in the start9 ui. Use this to make a file for the docker to read from for configuration. */
@@ -164,19 +164,21 @@ export type ActionMetadata = {
group?: string
}
export declare const hostName: unique symbol
// asdflkjadsf.onion | 1.2.3.4
export type Hostname = string & { [hostName]: never }
/** ${scheme}://${username}@${host}:${externalPort}${suffix} */
export type AddressInfo = {
username: string | null
hostId: string
options: BindOptions
bindOptions: BindOptions
suffix: string
}
export type HostnameInfoIp = {
kind: "ip"
networkInterfaceId: string
public: boolean
hostname:
| {
kind: "ipv4" | "ipv6" | "local"
@@ -201,11 +203,13 @@ export type HostnameInfoOnion = {
export type HostnameInfo = HostnameInfoIp | HostnameInfoOnion
export type SingleHost = {
id: string
kind: "single" | "static"
hostname: HostnameInfo | null
}
export type MultiHost = {
id: string
kind: "multi"
hostnames: HostnameInfo[]
}
@@ -224,11 +228,18 @@ export type ServiceInterface = {
hasPrimary: boolean
/** Disabled interfaces do not serve, but they retain their metadata and addresses */
disabled: boolean
/** Whether or not to mask the URIs for this interface. Useful if the URIs contain sensitive information, such as a password, macaroon, or API key */
masked: boolean
/** URI Information */
addressInfo: AddressInfo
/** The network interface could be several types, something like ui, p2p, or network */
type: ServiceInterfaceType
}
export type ServiceInterfaceWithHostInfo = ServiceInterface & {
hostInfo: HostInfo
}
// prettier-ignore
export type ExposeAllServicePaths<Store, PreviousPath extends string = ""> =
Store extends Record<string, unknown> ? {[K in keyof Store & string]: ExposeAllServicePaths<Store[K], `${PreviousPath}/${K & string}`>}[keyof Store & string] :
@@ -278,18 +289,18 @@ export type Effects = {
} & BindOptions,
): Promise<void>
/** Retrieves the current hostname(s) associated with a host id */
getHostnames(options: {
getHostInfo(options: {
kind: "static" | "single"
hostId: string
serviceInterfaceId: string
packageId?: string
callback: () => void
}): Promise<[] | [Hostname]>
getHostnames(options: {
}): Promise<SingleHost>
getHostInfo(options: {
kind?: "multi"
serviceInterfaceId: string
packageId?: string
hostId: string
callback: () => void
}): Promise<Hostname[]>
}): Promise<MultiHost>
// /**
// * Run rsync between two volumes. This is used to backup data between volumes.
@@ -329,13 +340,6 @@ export type Effects = {
callback: (config: unknown, previousConfig: unknown) => void
}): Promise<SmtpValue>
getLocalHostname(): Promise<string>
getIPHostname(): Promise<string[]>
/** Get the address for another service for tor interfaces */
getServiceTorHostname(
serviceInterfaceId: ServiceInterfaceId,
packageId?: string,
): Promise<string>
/** Get the IP address of the container */
getContainerIp(): Promise<string>
/**
@@ -419,14 +423,16 @@ export type Effects = {
* @returns PEM encoded fullchain (ecdsa)
*/
getSslCertificate: (
packageId?: string,
packageId: string | null,
hostId: string,
algorithm?: "ecdsa" | "ed25519",
) => Promise<[string, string, string]>
/**
* @returns PEM encoded ssl key (ecdsa)
*/
getSslKey: (
packageId?: string,
packageId: string | null,
hostId: string,
algorithm?: "ecdsa" | "ed25519",
) => Promise<string>

View File

@@ -1,4 +1,11 @@
import { AddressInfo, Effects, Hostname, ServiceInterface } from "../types"
import {
AddressInfo,
Effects,
HostInfo,
Hostname,
HostnameInfo,
ServiceInterface,
} from "../types"
import * as regexes from "./regexes"
import { ServiceInterfaceType } from "./utils"
@@ -22,7 +29,6 @@ export type Filled = {
ipv4Hostnames: Hostname[]
ipv6Hostnames: Hostname[]
nonIpHostnames: Hostname[]
allHostnames: Hostname[]
urls: UrlString[]
onionUrls: UrlString[]
@@ -31,7 +37,6 @@ export type Filled = {
ipv4Urls: UrlString[]
ipv6Urls: UrlString[]
nonIpUrls: UrlString[]
allUrls: UrlString[]
}
export type FilledAddressInfo = AddressInfo & Filled
export type ServiceInterfaceFilled = {
@@ -44,6 +49,10 @@ export type ServiceInterfaceFilled = {
hasPrimary: boolean
/** Whether or not the interface disabled */
disabled: boolean
/** Whether or not to mask the URIs for this interface. Useful if the URIs contain sensitive information, such as a password, macaroon, or API key */
masked: boolean
/** Information about the host for this binding */
hostInfo: HostInfo
/** URI information */
addressInfo: FilledAddressInfo
/** Indicates if we are a ui/p2p/api for the kind of interface that this is representing */
@@ -62,75 +71,110 @@ const negate =
(a: A) =>
!fn(a)
const unique = <A>(values: A[]) => Array.from(new Set(values))
function stringifyHostname(info: HostnameInfo): Hostname {
let base: string
if ("kind" in info.hostname && info.hostname.kind === "domain") {
base = info.hostname.subdomain
? `${info.hostname.subdomain}.${info.hostname.domain}`
: info.hostname.domain
} else {
base = info.hostname.value
}
if (info.hostname.port && info.hostname.sslPort) {
return `${base}:${info.hostname.port}` as Hostname
} else if (info.hostname.sslPort) {
return `${base}:${info.hostname.sslPort}` as Hostname
} else if (info.hostname.port) {
return `${base}:${info.hostname.port}` as Hostname
}
return base as Hostname
}
const addressHostToUrl = (
{ options, username, suffix }: AddressInfo,
{ bindOptions, username, suffix }: AddressInfo,
host: Hostname,
): UrlString => {
const scheme = host.endsWith(".onion")
? options.scheme
: options.addSsl
? options.addSsl.scheme
: options.scheme // TODO: encode whether hostname transport is "secure"?
? bindOptions.scheme
: bindOptions.addSsl
? bindOptions.addSsl.scheme
: bindOptions.scheme // TODO: encode whether hostname transport is "secure"?
return `${scheme ? `${scheme}//` : ""}${
username ? `${username}@` : ""
}${host}${suffix}`
}
export const filledAddress = (
hostnames: Hostname[],
hostInfo: HostInfo,
addressInfo: AddressInfo,
): FilledAddressInfo => {
const toUrl = addressHostToUrl.bind(null, addressInfo)
const hostnameInfo =
hostInfo.kind == "multi"
? hostInfo.hostnames
: hostInfo.hostname
? [hostInfo.hostname]
: []
return {
...addressInfo,
hostnames,
hostnames: hostnameInfo.flatMap((h) => stringifyHostname(h)),
get onionHostnames() {
return hostnames.filter(regexes.torHostname.test)
return hostnameInfo
.filter((h) => h.kind === "onion")
.map((h) => stringifyHostname(h))
},
get localHostnames() {
return hostnames.filter(regexes.localHostname.test)
return hostnameInfo
.filter((h) => h.kind === "ip" && h.hostname.kind === "local")
.map((h) => stringifyHostname(h))
},
get ipHostnames() {
return hostnames.filter(either(regexes.ipv4.test, regexes.ipv6.test))
return hostnameInfo
.filter(
(h) =>
h.kind === "ip" &&
(h.hostname.kind === "ipv4" || h.hostname.kind === "ipv6"),
)
.map((h) => stringifyHostname(h))
},
get ipv4Hostnames() {
return hostnames.filter(regexes.ipv4.test)
return hostnameInfo
.filter((h) => h.kind === "ip" && h.hostname.kind === "ipv4")
.map((h) => stringifyHostname(h))
},
get ipv6Hostnames() {
return hostnames.filter(regexes.ipv6.test)
return hostnameInfo
.filter((h) => h.kind === "ip" && h.hostname.kind === "ipv6")
.map((h) => stringifyHostname(h))
},
get nonIpHostnames() {
return hostnames.filter(
negate(either(regexes.ipv4.test, regexes.ipv6.test)),
)
return hostnameInfo
.filter(
(h) =>
h.kind === "ip" &&
h.hostname.kind !== "ipv4" &&
h.hostname.kind !== "ipv6",
)
.map((h) => stringifyHostname(h))
},
allHostnames: hostnames,
get urls() {
return hostnames.map(toUrl)
return this.hostnames.map(toUrl)
},
get onionUrls() {
return hostnames.filter(regexes.torHostname.test).map(toUrl)
return this.onionHostnames.map(toUrl)
},
get localUrls() {
return hostnames.filter(regexes.localHostname.test).map(toUrl)
return this.localHostnames.map(toUrl)
},
get ipUrls() {
return hostnames
.filter(either(regexes.ipv4.test, regexes.ipv6.test))
.map(toUrl)
return this.ipHostnames.map(toUrl)
},
get ipv4Urls() {
return hostnames.filter(regexes.ipv4.test).map(toUrl)
return this.ipv4Hostnames.map(toUrl)
},
get ipv6Urls() {
return hostnames.filter(regexes.ipv6.test).map(toUrl)
return this.ipv6Hostnames.map(toUrl)
},
get nonIpUrls() {
return hostnames
.filter(negate(either(regexes.ipv4.test, regexes.ipv6.test)))
.map(toUrl)
},
get allUrls() {
return hostnames.map(toUrl)
return this.nonIpHostnames.map(toUrl)
},
}
}
@@ -151,9 +195,9 @@ const makeInterfaceFilled = async ({
packageId,
callback,
})
const hostIdRecord = await effects.getHostnames({
const hostInfo = await effects.getHostInfo({
packageId,
hostId: serviceInterfaceValue.addressInfo.hostId,
serviceInterfaceId: serviceInterfaceValue.id,
callback,
})
const primaryUrl = await effects.getPrimaryUrl({
@@ -165,7 +209,8 @@ const makeInterfaceFilled = async ({
const interfaceFilled: ServiceInterfaceFilled = {
...serviceInterfaceValue,
primaryUrl: primaryUrl,
addressInfo: filledAddress(hostIdRecord, serviceInterfaceValue.addressInfo),
hostInfo,
addressInfo: filledAddress(hostInfo, serviceInterfaceValue.addressInfo),
get primaryHostname() {
if (primaryUrl == null) return null
return getHostname(primaryUrl)

View File

@@ -20,19 +20,13 @@ const makeManyInterfaceFilled = async ({
})
const hostIdsRecord = Object.fromEntries(
await Promise.all(
Array.from(
new Set(
serviceInterfaceValues
.flatMap((x) => x.addressInfo)
.map((x) => x.hostId),
),
).map(
async (hostId) =>
Array.from(new Set(serviceInterfaceValues.map((x) => x.id))).map(
async (id) =>
[
hostId,
await effects.getHostnames({
id,
await effects.getHostInfo({
packageId,
hostId,
serviceInterfaceId: id,
callback,
}),
] as const,
@@ -42,9 +36,9 @@ const makeManyInterfaceFilled = async ({
const serviceInterfacesFilled: ServiceInterfaceFilled[] = await Promise.all(
serviceInterfaceValues.map(async (serviceInterfaceValue) => {
const hostIdRecord = await effects.getHostnames({
const hostInfo = await effects.getHostInfo({
packageId,
hostId: serviceInterfaceValue.addressInfo.hostId,
serviceInterfaceId: serviceInterfaceValue.id,
callback,
})
const primaryUrl = await effects.getPrimaryUrl({
@@ -55,10 +49,8 @@ const makeManyInterfaceFilled = async ({
return {
...serviceInterfaceValue,
primaryUrl: primaryUrl,
addressInfo: filledAddress(
hostIdRecord,
serviceInterfaceValue.addressInfo,
),
hostInfo,
addressInfo: filledAddress(hostInfo, serviceInterfaceValue.addressInfo),
get primaryHostname() {
if (primaryUrl == null) return null
return getHostname(primaryUrl)

View File

@@ -25,7 +25,7 @@ import {
NamedPath,
Path,
} from "../dependency/setupDependencyMounts"
import { MultiHost, SingleHost, StaticHost } from "../interfaces/Host"
import { MultiHost, Scheme, SingleHost, StaticHost } from "../interfaces/Host"
import { ServiceInterfaceBuilder } from "../interfaces/ServiceInterfaceBuilder"
import { GetServiceInterface, getServiceInterface } from "./getServiceInterface"
import {
@@ -83,6 +83,8 @@ export type Utils<
username: null | string
path: string
search: Record<string, string>
schemeOverride: { ssl: Scheme; noSsl: Scheme } | null
masked: boolean
}) => ServiceInterfaceBuilder
getSystemSmtp: () => GetSystemSmtp & WrapperOverWrite
host: {
@@ -158,6 +160,8 @@ export const createUtils = <
username: null | string
path: string
search: Record<string, string>
schemeOverride: { ssl: Scheme; noSsl: Scheme } | null
masked: boolean
}) => new ServiceInterfaceBuilder({ ...options, effects }),
childProcess,
getSystemSmtp: () =>