Refactor/project structure (#3085)

* refactor project structure

* environment-based default registry

* fix tests

* update build container

* use docker platform for iso build emulation

* simplify compat

* Fix docker platform spec in run-compat.sh

* handle riscv compat

* fix bug with dep error exists attr

* undo removal of sorting

* use qemu for iso stage

---------

Co-authored-by: Mariusz Kogen <k0gen@pm.me>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
This commit is contained in:
Aiden McClelland
2025-12-22 13:39:38 -07:00
committed by GitHub
parent eda08d5b0f
commit 96ae532879
389 changed files with 744 additions and 4005 deletions

209
core/src/service/action.rs Normal file
View File

@@ -0,0 +1,209 @@
use std::collections::BTreeMap;
use std::time::Duration;
use imbl_value::json;
use crate::action::{ActionInput, ActionResult};
use crate::db::model::package::{
ActionVisibility, AllowedStatuses, TaskCondition, TaskEntry, TaskInput, TaskSeverity,
};
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::service::{ProcedureName, Service, ServiceActor};
use crate::util::actor::background::BackgroundJobQueue;
use crate::util::actor::{ConflictBuilder, Handler};
use crate::util::serde::is_partial_of;
use crate::{ActionId, PackageId, ReplayId};
pub(super) struct GetActionInput {
id: ActionId,
}
impl Handler<GetActionInput> for ServiceActor {
type Response = Result<Option<ActionInput>, Error>;
fn conflicts_with(_: &GetActionInput) -> ConflictBuilder<Self> {
ConflictBuilder::nothing()
}
async fn handle(
&mut self,
id: Guid,
GetActionInput { id: action_id }: GetActionInput,
_: &BackgroundJobQueue,
) -> Self::Response {
let container = &self.0.persistent_container;
container
.execute::<Option<ActionInput>>(
id,
ProcedureName::GetActionInput(action_id),
Value::Null,
Some(Duration::from_secs(30)),
)
.await
.with_kind(ErrorKind::Action)
}
}
impl Service {
pub async fn get_action_input(
&self,
id: Guid,
action_id: ActionId,
) -> Result<Option<ActionInput>, Error> {
if !self
.seed
.ctx
.db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(&self.seed.id)
.or_not_found(&self.seed.id)?
.as_actions()
.as_idx(&action_id)
.or_not_found(&action_id)?
.as_has_input()
.de()?
{
return Ok(None);
}
self.actor
.send(id, GetActionInput { id: action_id })
.await?
}
}
pub fn update_tasks(
tasks: &mut BTreeMap<ReplayId, TaskEntry>,
package_id: &PackageId,
action_id: &ActionId,
input: &Value,
was_run: bool,
) -> bool {
let mut critical_activated = false;
tasks.retain(|_, v| {
if &v.task.package_id != package_id || &v.task.action_id != action_id {
return true;
}
if let Some(when) = &v.task.when {
match &when.condition {
TaskCondition::InputNotMatches => match &v.task.input {
Some(TaskInput::Partial { value }) => {
if is_partial_of(value, input) {
if when.once {
return !was_run;
} else {
v.active = false;
}
} else {
v.active = true;
if v.task.severity == TaskSeverity::Critical {
critical_activated = true;
}
}
}
None => {
tracing::error!("action request exists in an invalid state {:?}", v.task);
}
},
}
true
} else {
!was_run
}
});
critical_activated
}
pub(super) struct RunAction {
action_id: ActionId,
input: Value,
}
impl Handler<RunAction> for ServiceActor {
type Response = Result<Option<ActionResult>, Error>;
fn conflicts_with(_: &RunAction) -> ConflictBuilder<Self> {
ConflictBuilder::everything().except::<GetActionInput>()
}
async fn handle(
&mut self,
id: Guid,
RunAction {
ref action_id,
input,
}: RunAction,
_: &BackgroundJobQueue,
) -> Self::Response {
let container = &self.0.persistent_container;
let package_id = &self.0.id;
let pde = self
.0
.ctx
.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(package_id)
.or_not_found(package_id)?;
let action = pde
.as_actions()
.as_idx(action_id)
.or_not_found(lazy_format!("{package_id} action {action_id}"))?
.de()?;
if matches!(&action.visibility, ActionVisibility::Disabled(_)) {
return Err(Error::new(
eyre!("action {action_id} is disabled"),
ErrorKind::Action,
));
}
let running = pde.as_status_info().as_started().transpose_ref().is_some();
if match action.allowed_statuses {
AllowedStatuses::OnlyRunning => !running,
AllowedStatuses::OnlyStopped => running,
_ => false,
} {
return Err(Error::new(
eyre!("service is not in allowed status for {action_id}"),
ErrorKind::Action,
));
}
let result = container
.execute::<Option<ActionResult>>(
id.clone(),
ProcedureName::RunAction(action_id.clone()),
json!({
"input": input,
}),
Some(Duration::from_secs(30)),
)
.await
.with_kind(ErrorKind::Action)?;
let package_id = package_id.clone();
self.0
.ctx
.db
.mutate(|db| {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
if pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, &package_id, action_id, &input, true))
})? {
pde.as_status_info_mut().stop()?;
}
}
Ok(())
})
.await
.result?;
Ok(result)
}
}
impl Service {
pub async fn run_action(
&self,
id: Guid,
action_id: ActionId,
input: Value,
) -> Result<Option<ActionResult>, Error> {
self.actor.send(id, RunAction { action_id, input }).await?
}
}

69
core/src/service/cli.rs Normal file
View File

@@ -0,0 +1,69 @@
use std::path::{Path, PathBuf};
use std::sync::Arc;
use clap::Parser;
use imbl_value::Value;
use once_cell::sync::OnceCell;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{CallRemote, Context, Empty, call_remote_socket, yajrc};
use tokio::runtime::Runtime;
use crate::lxc::HOST_RPC_SERVER_SOCKET;
use crate::service::effects::context::EffectContext;
#[derive(Debug, Default, Parser)]
pub struct ContainerClientConfig {
#[arg(long = "socket")]
pub socket: Option<PathBuf>,
}
pub struct ContainerCliSeed {
socket: PathBuf,
runtime: OnceCell<Arc<Runtime>>,
}
#[derive(Clone)]
pub struct ContainerCliContext(Arc<ContainerCliSeed>);
impl ContainerCliContext {
pub fn init(cfg: ContainerClientConfig) -> Self {
Self(Arc::new(ContainerCliSeed {
socket: cfg
.socket
.unwrap_or_else(|| Path::new("/media/startos/rpc").join(HOST_RPC_SERVER_SOCKET)),
runtime: OnceCell::new(),
}))
}
}
impl Context for ContainerCliContext {
fn runtime(&self) -> Option<Arc<Runtime>> {
Some(
self.0
.runtime
.get_or_init(|| {
Arc::new(
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.unwrap(),
)
})
.clone(),
)
}
}
impl CallRemote<EffectContext> for ContainerCliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_socket(
tokio::net::UnixStream::connect(&self.0.socket)
.await
.map_err(|e| RpcError {
data: Some(e.to_string().into()),
..yajrc::INTERNAL_ERROR
})?,
method,
params,
)
.await
}
}

View File

@@ -0,0 +1,323 @@
use std::collections::BTreeSet;
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
use crate::action::{ActionInput, ActionResult, display_action_result};
use crate::db::model::package::{
ActionMetadata, Task, TaskCondition, TaskEntry, TaskSeverity, TaskTrigger,
};
use crate::rpc_continuations::Guid;
use crate::service::cli::ContainerCliContext;
use crate::service::effects::prelude::*;
use crate::util::serde::HandlerExtSerde;
use crate::{ActionId, PackageId, ReplayId};
pub fn action_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("export", from_fn_async(export_action).no_cli())
.subcommand(
"clear",
from_fn_async(clear_actions)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"get-input",
from_fn_async(get_action_input)
.with_display_serializable()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"run",
from_fn_async(run_action)
.with_display_serializable()
.with_custom_display_fn(|args, res| display_action_result(args.params, res))
.with_call_remote::<ContainerCliContext>(),
)
.subcommand("create-task", from_fn_async(create_task).no_cli())
.subcommand(
"clear-tasks",
from_fn_async(clear_tasks)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct ExportActionParams {
id: ActionId,
metadata: ActionMetadata,
}
pub async fn export_action(
context: EffectContext,
ExportActionParams { id, metadata }: ExportActionParams,
) -> Result<(), Error> {
let context = context.deref()?;
let package_id = context.seed.id.clone();
context
.seed
.ctx
.db
.mutate(|db| {
let model = db
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_actions_mut();
let mut value = model.de()?;
value.insert(id, metadata);
model.ser(&value)
})
.await
.result?;
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct ClearActionsParams {
#[arg(long)]
pub except: Vec<ActionId>,
}
async fn clear_actions(
context: EffectContext,
ClearActionsParams { except }: ClearActionsParams,
) -> Result<(), Error> {
let except: BTreeSet<_> = except.into_iter().collect();
let context = context.deref()?;
let package_id = context.seed.id.clone();
context
.seed
.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_actions_mut()
.mutate(|a| Ok(a.retain(|e, _| except.contains(e))))
})
.await
.result?;
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetActionInputParams {
#[serde(default)]
#[ts(skip)]
#[arg(skip)]
procedure_id: Guid,
#[ts(optional)]
package_id: Option<PackageId>,
action_id: ActionId,
}
async fn get_action_input(
context: EffectContext,
GetActionInputParams {
procedure_id,
package_id,
action_id,
}: GetActionInputParams,
) -> Result<Option<ActionInput>, Error> {
let context = context.deref()?;
if let Some(package_id) = package_id {
context
.seed
.ctx
.services
.get(&package_id)
.await
.as_ref()
.or_not_found(&package_id)?
.get_action_input(procedure_id, action_id)
.await
} else {
context.get_action_input(procedure_id, action_id).await
}
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct RunActionParams {
#[serde(default)]
#[ts(skip)]
#[arg(skip)]
procedure_id: Guid,
#[ts(optional)]
package_id: Option<PackageId>,
action_id: ActionId,
#[ts(type = "any")]
input: Value,
}
async fn run_action(
context: EffectContext,
RunActionParams {
procedure_id,
package_id,
action_id,
input,
}: RunActionParams,
) -> Result<Option<ActionResult>, Error> {
let context = context.deref()?;
let package_id = package_id.as_ref().unwrap_or(&context.seed.id);
if package_id != &context.seed.id {
return Err(Error::new(
eyre!("calling actions on other packages is unsupported at this time"),
ErrorKind::InvalidRequest,
));
context
.seed
.ctx
.services
.get(&package_id)
.await
.as_ref()
.or_not_found(&package_id)?
.run_action(procedure_id, action_id, input)
.await
} else {
context.run_action(procedure_id, action_id, input).await
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CreateTaskParams {
#[serde(default)]
#[ts(skip)]
procedure_id: Guid,
replay_id: ReplayId,
#[serde(flatten)]
task: Task,
}
async fn create_task(
context: EffectContext,
CreateTaskParams {
procedure_id,
replay_id,
task,
}: CreateTaskParams,
) -> Result<(), Error> {
let context = context.deref()?;
let src_id = &context.seed.id;
let active = match &task.when {
Some(TaskTrigger { once, condition }) => match condition {
TaskCondition::InputNotMatches => {
let Some(input) = task.input.as_ref() else {
return Err(Error::new(
eyre!("input-not-matches trigger requires input to be specified"),
ErrorKind::InvalidRequest,
));
};
if let Some(service) = context
.seed
.ctx
.services
.get(&task.package_id)
.await
.as_ref()
{
let Some(prev) = service
.get_action_input(procedure_id.clone(), task.action_id.clone())
.await?
else {
return Err(Error::new(
eyre!(
"action {} of {} has no input",
task.action_id,
task.package_id
),
ErrorKind::InvalidRequest,
));
};
if input.matches(prev.value.as_ref()) {
if *once {
return Ok(());
} else {
false
}
} else {
true
}
} else {
true // update when service is installed
}
}
},
None => true,
};
context
.seed
.ctx
.db
.mutate(|db| {
let pde = db
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(src_id)
.or_not_found(src_id)?;
if active && task.severity == TaskSeverity::Critical {
pde.as_status_info_mut().stop()?;
}
pde.as_tasks_mut()
.insert(&replay_id, &TaskEntry { active, task })
})
.await
.result?;
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[ts(type = "{ only: string[] } | { except: string[] }")]
#[ts(export)]
pub struct ClearTasksParams {
#[arg(long, conflicts_with = "except")]
pub only: Option<Vec<ReplayId>>,
#[arg(long, conflicts_with = "only")]
pub except: Option<Vec<ReplayId>>,
}
async fn clear_tasks(
context: EffectContext,
ClearTasksParams { only, except }: ClearTasksParams,
) -> Result<(), Error> {
let context = context.deref()?;
let package_id = context.seed.id.clone();
let only = only.map(|only| only.into_iter().collect::<BTreeSet<_>>());
let except = except.map(|except| except.into_iter().collect::<BTreeSet<_>>());
context
.seed
.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_tasks_mut()
.mutate(|a| {
Ok(a.retain(|e, _| {
only.as_ref().map_or(true, |only| !only.contains(e))
&& except.as_ref().map_or(true, |except| except.contains(e))
}))
})
})
.await
.result?;
Ok(())
}

View File

@@ -0,0 +1,319 @@
use std::cmp::min;
use std::collections::{BTreeMap, BTreeSet};
use std::sync::{Arc, Mutex, Weak};
use std::time::{Duration, SystemTime};
use clap::Parser;
use futures::future::join_all;
use imbl::{Vector, vector};
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use tracing::warn;
use ts_rs::TS;
use crate::net::ssl::FullchainCertData;
use crate::prelude::*;
use crate::service::effects::context::EffectContext;
use crate::service::effects::net::ssl::Algorithm;
use crate::service::rpc::{CallbackHandle, CallbackId};
use crate::service::{Service, ServiceActorSeed};
use crate::util::collections::EqMap;
use crate::util::future::NonDetachingJoinHandle;
use crate::{HostId, PackageId, ServiceInterfaceId};
#[derive(Default)]
pub struct ServiceCallbacks(Mutex<ServiceCallbackMap>);
#[derive(Default)]
struct ServiceCallbackMap {
get_service_interface: BTreeMap<(PackageId, ServiceInterfaceId), Vec<CallbackHandler>>,
list_service_interfaces: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_system_smtp: Vec<CallbackHandler>,
get_host_info: BTreeMap<(PackageId, HostId), Vec<CallbackHandler>>,
get_ssl_certificate: EqMap<
(BTreeSet<InternedString>, FullchainCertData, Algorithm),
(NonDetachingJoinHandle<()>, Vec<CallbackHandler>),
>,
get_status: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_container_ip: BTreeMap<PackageId, Vec<CallbackHandler>>,
}
impl ServiceCallbacks {
fn mutate<T>(&self, f: impl FnOnce(&mut ServiceCallbackMap) -> T) -> T {
let mut this = self.0.lock().unwrap();
f(&mut *this)
}
pub fn gc(&self) {
self.mutate(|this| {
this.get_service_interface.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.list_service_interfaces.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_system_smtp
.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
this.get_host_info.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_ssl_certificate.retain(|_, (_, v)| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
this.get_status.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty()
});
})
}
pub(super) fn add_get_service_interface(
&self,
package_id: PackageId,
service_interface_id: ServiceInterfaceId,
handler: CallbackHandler,
) {
self.mutate(|this| {
this.get_service_interface
.entry((package_id, service_interface_id))
.or_default()
.push(handler);
})
}
#[must_use]
pub fn get_service_interface(
&self,
id: &(PackageId, ServiceInterfaceId),
) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(
this.get_service_interface.remove(id).unwrap_or_default(),
))
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_list_service_interfaces(
&self,
package_id: PackageId,
handler: CallbackHandler,
) {
self.mutate(|this| {
this.list_service_interfaces
.entry(package_id)
.or_default()
.push(handler);
})
}
#[must_use]
pub fn list_service_interfaces(&self, id: &PackageId) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(
this.list_service_interfaces.remove(id).unwrap_or_default(),
))
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_get_system_smtp(&self, handler: CallbackHandler) {
self.mutate(|this| {
this.get_system_smtp.push(handler);
})
}
#[must_use]
pub fn get_system_smtp(&self) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(std::mem::take(&mut this.get_system_smtp)))
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_get_host_info(
&self,
package_id: PackageId,
host_id: HostId,
handler: CallbackHandler,
) {
self.mutate(|this| {
this.get_host_info
.entry((package_id, host_id))
.or_default()
.push(handler);
})
}
#[must_use]
pub fn get_host_info(&self, id: &(PackageId, HostId)) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(
this.get_host_info.remove(id).unwrap_or_default(),
))
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_get_ssl_certificate(
&self,
ctx: EffectContext,
hostnames: BTreeSet<InternedString>,
cert: FullchainCertData,
algorithm: Algorithm,
handler: CallbackHandler,
) {
self.mutate(|this| {
this.get_ssl_certificate
.entry((hostnames.clone(), cert.clone(), algorithm))
.or_insert_with(|| {
(
tokio::spawn(async move {
if let Err(e) = async {
loop {
match cert
.expiration()
.ok()
.and_then(|e| e.duration_since(SystemTime::now()).ok())
{
Some(d) => {
tokio::time::sleep(min(Duration::from_secs(86400), d))
.await
}
_ => break,
}
}
let Ok(ctx) = ctx.deref() else {
return Ok(());
};
if let Some((_, callbacks)) =
ctx.seed.ctx.callbacks.mutate(|this| {
this.get_ssl_certificate
.remove(&(hostnames, cert, algorithm))
})
{
CallbackHandlers(callbacks).call(vector![]).await?;
}
Ok::<_, Error>(())
}
.await
{
tracing::error!(
"Error in callback handler for getSslCertificate: {e}"
);
tracing::debug!("{e:?}");
}
})
.into(),
Vec::new(),
)
})
.1
.push(handler);
})
}
pub(super) fn add_get_status(&self, package_id: PackageId, handler: CallbackHandler) {
self.mutate(|this| this.get_status.entry(package_id).or_default().push(handler))
}
#[must_use]
pub fn get_status(&self, package_id: &PackageId) -> Option<CallbackHandlers> {
self.mutate(|this| {
if let Some(watched) = this.get_status.remove(package_id) {
Some(CallbackHandlers(watched))
} else {
None
}
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_get_container_ip(&self, package_id: PackageId, handler: CallbackHandler) {
self.mutate(|this| {
this.get_container_ip
.entry(package_id)
.or_default()
.push(handler)
})
}
#[must_use]
pub fn get_container_ip(&self, package_id: &PackageId) -> Option<CallbackHandlers> {
self.mutate(|this| {
this.get_container_ip
.remove(package_id)
.map(CallbackHandlers)
.filter(|cb| !cb.0.is_empty())
})
}
}
pub struct CallbackHandler {
handle: CallbackHandle,
seed: Weak<ServiceActorSeed>,
}
impl CallbackHandler {
pub fn new(service: &Service, handle: CallbackHandle) -> Self {
Self {
handle,
seed: Arc::downgrade(&service.seed),
}
}
pub async fn call(mut self, args: Vector<Value>) -> Result<(), Error> {
if let Some(seed) = self.seed.upgrade() {
seed.persistent_container
.callback(self.handle.take(), args)
.await?;
}
Ok(())
}
}
impl Drop for CallbackHandler {
fn drop(&mut self) {
if self.handle.is_active() {
warn!("Callback handler dropped while still active!");
}
}
}
pub struct CallbackHandlers(Vec<CallbackHandler>);
impl CallbackHandlers {
pub async fn call(self, args: Vector<Value>) -> Result<(), Error> {
let mut err = ErrorCollection::new();
for res in join_all(self.0.into_iter().map(|cb| cb.call(args.clone()))).await {
err.handle(res);
}
err.into_result()
}
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[ts(type = "{ only: number[] } | { except: number[] }")]
#[ts(export)]
pub struct ClearCallbacksParams {
#[arg(long, conflicts_with = "except")]
pub only: Option<Vec<CallbackId>>,
#[arg(long, conflicts_with = "only")]
pub except: Option<Vec<CallbackId>>,
}
pub(super) fn clear_callbacks(
context: EffectContext,
ClearCallbacksParams { only, except }: ClearCallbacksParams,
) -> Result<(), Error> {
let context = context.deref()?;
let only = only.map(|only| only.into_iter().collect::<BTreeSet<_>>());
let except = except.map(|except| except.into_iter().collect::<BTreeSet<_>>());
context.seed.persistent_container.state.send_modify(|s| {
s.callbacks.retain(|cb| {
only.as_ref().map_or(true, |only| !only.contains(cb))
&& except.as_ref().map_or(true, |except| except.contains(cb))
})
});
context.seed.ctx.callbacks.gc();
Ok(())
}

View File

@@ -0,0 +1,27 @@
use std::sync::{Arc, Weak};
use rpc_toolkit::Context;
use crate::prelude::*;
use crate::service::Service;
#[derive(Clone)]
pub(in crate::service) struct EffectContext(Weak<Service>);
impl EffectContext {
pub fn new(service: Weak<Service>) -> Self {
Self(service)
}
}
impl Context for EffectContext {}
impl EffectContext {
pub(super) fn deref(&self) -> Result<Arc<Service>, Error> {
if let Some(seed) = Weak::upgrade(&self.0) {
Ok(seed)
} else {
Err(Error::new(
eyre!("Service has already been destroyed"),
ErrorKind::InvalidRequest,
))
}
}
}

View File

@@ -0,0 +1,173 @@
use std::str::FromStr;
use chrono::Utc;
use clap::builder::ValueParserFactory;
use crate::PackageId;
use crate::service::RebuildParams;
use crate::service::effects::prelude::*;
use crate::service::rpc::CallbackId;
use crate::status::{DesiredStatus, StatusInfo};
use crate::util::FromStrParser;
pub async fn rebuild(context: EffectContext) -> Result<(), Error> {
let seed = context.deref()?.seed.clone();
let ctx = seed.ctx.clone();
let id = seed.id.clone();
drop(seed);
tokio::spawn(async move {
super::super::rebuild(ctx, RebuildParams { id })
.await
.log_err()
});
Ok(())
}
pub async fn restart(context: EffectContext) -> Result<(), Error> {
let context = context.deref()?;
let id = &context.seed.id;
context
.seed
.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_status_info_mut()
.as_desired_mut()
.map_mutate(|s| Ok(s.restart()))
})
.await
.result?;
Ok(())
}
pub async fn shutdown(context: EffectContext) -> Result<(), Error> {
let context = context.deref()?;
let id = &context.seed.id;
context
.seed
.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_status_info_mut()
.stop()
})
.await
.result?;
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetStatusParams {
#[ts(optional)]
pub package_id: Option<PackageId>,
#[ts(optional)]
#[arg(skip)]
pub callback: Option<CallbackId>,
}
pub async fn get_status(
context: EffectContext,
GetStatusParams {
package_id,
callback,
}: GetStatusParams,
) -> Result<StatusInfo, Error> {
let context = context.deref()?;
let id = package_id.unwrap_or_else(|| context.seed.id.clone());
let db = context.seed.ctx.db.peek().await;
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_get_status(
id.clone(),
super::callbacks::CallbackHandler::new(&context, callback),
);
}
let status = db
.as_public()
.as_package_data()
.as_idx(&id)
.or_not_found(&id)?
.as_status_info()
.de()?;
Ok(status)
}
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub enum SetMainStatusStatus {
Running,
Stopped,
}
impl FromStr for SetMainStatusStatus {
type Err = color_eyre::eyre::Report;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"running" => Ok(Self::Running),
"stopped" => Ok(Self::Stopped),
_ => Err(eyre!("unknown status {s}")),
}
}
}
impl ValueParserFactory for SetMainStatusStatus {
type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser {
FromStrParser::new()
}
}
#[derive(Debug, Clone, Serialize, Deserialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct SetMainStatus {
status: SetMainStatusStatus,
}
pub async fn set_main_status(
context: EffectContext,
SetMainStatus { status }: SetMainStatus,
) -> Result<(), Error> {
let context = context.deref()?;
let id = &context.seed.id;
context
.seed
.ctx
.db
.mutate(|db| {
let s = db
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_status_info_mut();
let prev = s.as_started_mut().replace(&match status {
SetMainStatusStatus::Running => Some(Utc::now()),
SetMainStatusStatus::Stopped => None,
})?;
if prev.is_none() && status == SetMainStatusStatus::Running {
s.as_desired_mut().map_mutate(|s| {
Ok(match s {
DesiredStatus::Restarting => DesiredStatus::Running,
x => x,
})
})?;
}
Ok(())
})
.await
.result?;
Ok(())
}

View File

@@ -0,0 +1,369 @@
use std::collections::{BTreeMap, BTreeSet};
use std::path::PathBuf;
use std::str::FromStr;
use clap::builder::ValueParserFactory;
use exver::VersionRange;
use imbl_value::InternedString;
use crate::db::model::package::{
CurrentDependencies, CurrentDependencyInfo, CurrentDependencyKind, ManifestPreference,
TaskEntry,
};
use crate::disk::mount::filesystem::bind::{Bind, FileType};
use crate::disk::mount::filesystem::idmapped::{IdMap, IdMapped};
use crate::disk::mount::filesystem::{FileSystem, MountType};
use crate::disk::mount::util::{is_mountpoint, unmount};
use crate::service::effects::prelude::*;
use crate::status::health_check::NamedHealthCheckResult;
use crate::util::{FromStrParser, VersionString};
use crate::volume::data_dir;
use crate::{DATA_DIR, HealthCheckId, PackageId, ReplayId, VolumeId};
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct MountTarget {
package_id: PackageId,
volume_id: VolumeId,
subpath: Option<PathBuf>,
readonly: bool,
#[serde(skip_deserializing)]
#[ts(skip)]
filetype: FileType,
#[serde(default)]
idmap: Vec<IdMap>,
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct MountParams {
location: PathBuf,
target: MountTarget,
}
pub async fn mount(
context: EffectContext,
MountParams {
location,
target:
MountTarget {
package_id,
volume_id,
subpath,
readonly,
filetype,
idmap,
},
}: MountParams,
) -> Result<(), Error> {
let context = context.deref()?;
let subpath = subpath.unwrap_or_default();
let subpath = subpath.strip_prefix("/").unwrap_or(&subpath);
let source = data_dir(DATA_DIR, &package_id, &volume_id).join(subpath);
let location = location.strip_prefix("/").unwrap_or(&location);
let mountpoint = context
.seed
.persistent_container
.lxc_container
.get()
.or_not_found("lxc container")?
.rootfs_dir()
.join(location);
if is_mountpoint(&mountpoint).await? {
unmount(&mountpoint, true).await?;
}
IdMapped::new(
Bind::new(source).with_type(filetype),
IdMap::stack(
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
idmap,
),
)
.mount(
mountpoint,
if readonly {
MountType::ReadOnly
} else {
MountType::ReadWrite
},
)
.await?;
Ok(())
}
pub async fn get_installed_packages(context: EffectContext) -> Result<BTreeSet<PackageId>, Error> {
context
.deref()?
.seed
.ctx
.db
.peek()
.await
.into_public()
.into_package_data()
.keys()
}
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase", tag = "kind")]
#[serde(rename_all_fields = "camelCase")]
#[ts(export)]
pub enum DependencyRequirement {
Running {
id: PackageId,
health_checks: BTreeSet<HealthCheckId>,
#[ts(type = "string")]
version_range: VersionRange,
},
Exists {
id: PackageId,
#[ts(type = "string")]
version_range: VersionRange,
},
}
// filebrowser:exists,bitcoind:running:foo+bar+baz
impl FromStr for DependencyRequirement {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once(':') {
Some((id, "e")) | Some((id, "exists")) => Ok(Self::Exists {
id: id.parse()?,
version_range: "*".parse()?, // TODO
}),
Some((id, rest)) => {
let health_checks = match rest.split_once(':') {
Some(("r", rest)) | Some(("running", rest)) => rest
.split('+')
.map(|id| id.parse().map_err(Error::from))
.collect(),
Some((kind, _)) => Err(Error::new(
eyre!("unknown dependency kind {kind}"),
ErrorKind::InvalidRequest,
)),
None => match rest {
"r" | "running" => Ok(BTreeSet::new()),
kind => Err(Error::new(
eyre!("unknown dependency kind {kind}"),
ErrorKind::InvalidRequest,
)),
},
}?;
Ok(Self::Running {
id: id.parse()?,
health_checks,
version_range: "*".parse()?, // TODO
})
}
None => Ok(Self::Running {
id: s.parse()?,
health_checks: BTreeSet::new(),
version_range: "*".parse()?, // TODO
}),
}
}
}
impl ValueParserFactory for DependencyRequirement {
type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser {
FromStrParser::new()
}
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "camelCase")]
#[ts(export)]
pub struct SetDependenciesParams {
dependencies: Vec<DependencyRequirement>,
}
pub async fn set_dependencies(
context: EffectContext,
SetDependenciesParams { dependencies }: SetDependenciesParams,
) -> Result<(), Error> {
let context = context.deref()?;
let id = &context.seed.id;
let mut deps = BTreeMap::new();
for dependency in dependencies {
let (dep_id, kind, version_range) = match dependency {
DependencyRequirement::Exists { id, version_range } => {
(id, CurrentDependencyKind::Exists, version_range)
}
DependencyRequirement::Running {
id,
health_checks,
version_range,
} => (
id,
CurrentDependencyKind::Running { health_checks },
version_range,
),
};
let info = CurrentDependencyInfo {
title: context
.seed
.persistent_container
.s9pk
.dependency_metadata(&dep_id)
.await?
.map(|m| m.title),
icon: context
.seed
.persistent_container
.s9pk
.dependency_icon_data_url(&dep_id)
.await?,
kind,
version_range,
};
deps.insert(dep_id, info);
}
context
.seed
.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_current_dependencies_mut()
.ser(&CurrentDependencies(deps))
})
.await
.result
}
pub async fn get_dependencies(context: EffectContext) -> Result<Vec<DependencyRequirement>, Error> {
let context = context.deref()?;
let id = &context.seed.id;
let db = context.seed.ctx.db.peek().await;
let data = db
.as_public()
.as_package_data()
.as_idx(id)
.or_not_found(id)?
.as_current_dependencies()
.de()?;
Ok(data
.0
.into_iter()
.map(|(id, current_dependency_info)| {
let CurrentDependencyInfo {
version_range,
kind,
..
} = current_dependency_info;
match kind {
CurrentDependencyKind::Exists => {
DependencyRequirement::Exists { id, version_range }
}
CurrentDependencyKind::Running { health_checks } => {
DependencyRequirement::Running {
id,
health_checks,
version_range,
}
}
}
})
.collect())
}
#[derive(Debug, Clone, Serialize, Deserialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CheckDependenciesParam {
#[ts(optional)]
package_ids: Option<Vec<PackageId>>,
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CheckDependenciesResult {
package_id: PackageId,
#[ts(type = "string | null")]
title: Option<InternedString>,
installed_version: Option<VersionString>,
satisfies: BTreeSet<VersionString>,
is_running: bool,
tasks: BTreeMap<ReplayId, TaskEntry>,
health_checks: BTreeMap<HealthCheckId, NamedHealthCheckResult>,
}
pub async fn check_dependencies(
context: EffectContext,
CheckDependenciesParam { package_ids }: CheckDependenciesParam,
) -> Result<Vec<CheckDependenciesResult>, Error> {
let context = context.deref()?;
let db = context.seed.ctx.db.peek().await;
let pde = db
.as_public()
.as_package_data()
.as_idx(&context.seed.id)
.or_not_found(&context.seed.id)?;
let current_dependencies = pde.as_current_dependencies().de()?;
let tasks = pde.as_tasks().de()?;
let package_dependency_info: Vec<_> = package_ids
.unwrap_or_else(|| current_dependencies.0.keys().cloned().collect())
.into_iter()
.filter_map(|x| {
let info = current_dependencies.0.get(&x)?;
Some((x, info))
})
.collect();
let mut results = Vec::with_capacity(package_dependency_info.len());
for (package_id, dependency_info) in package_dependency_info {
let title = dependency_info.title.clone();
let Some(package) = db.as_public().as_package_data().as_idx(&package_id) else {
let tasks = tasks
.iter()
.filter(|(_, v)| v.task.package_id == package_id)
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
results.push(CheckDependenciesResult {
package_id,
title,
installed_version: None,
satisfies: BTreeSet::new(),
is_running: false,
tasks,
health_checks: Default::default(),
});
continue;
};
let manifest = package.as_state_info().as_manifest(ManifestPreference::New);
let installed_version = manifest.as_version().de()?.into_version();
let satisfies = manifest.as_satisfies().de()?;
let installed_version = Some(installed_version.clone().into());
let is_running = package
.as_status_info()
.as_started()
.transpose_ref()
.is_some();
let health_checks = package.as_status_info().as_health().de()?;
let tasks = tasks
.iter()
.filter(|(_, v)| v.task.package_id == package_id)
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
results.push(CheckDependenciesResult {
package_id,
title,
installed_version,
satisfies,
is_running,
tasks,
health_checks,
});
}
Ok(results)
}

View File

@@ -0,0 +1,36 @@
use crate::HealthCheckId;
use crate::service::effects::prelude::*;
use crate::status::health_check::NamedHealthCheckResult;
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct SetHealth {
id: HealthCheckId,
#[serde(flatten)]
result: NamedHealthCheckResult,
}
pub async fn set_health(
context: EffectContext,
SetHealth { id, result }: SetHealth,
) -> Result<(), Error> {
let context = context.deref()?;
let package_id = &context.seed.id;
context
.seed
.ctx
.db
.mutate(move |db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(package_id)
.or_not_found(package_id)?
.as_status_info_mut()
.as_health_mut()
.insert(&id, &result)
})
.await
.result?;
Ok(())
}

View File

@@ -0,0 +1,190 @@
use std::net::Ipv4Addr;
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn, from_fn_async, from_fn_blocking};
use crate::prelude::*;
use crate::service::cli::ContainerCliContext;
use crate::service::effects::context::EffectContext;
use crate::{HOST_IP, echo};
mod action;
pub mod callbacks;
pub mod context;
mod control;
mod dependency;
mod health;
mod net;
mod prelude;
mod subcontainer;
mod system;
mod version;
pub fn handler<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("git-info", from_fn(|_: C| crate::version::git_info()))
.subcommand(
"echo",
from_fn(echo::<EffectContext>).with_call_remote::<ContainerCliContext>(),
)
// action
.subcommand("action", action::action_api::<C>())
// callbacks
.subcommand(
"clear-callbacks",
from_fn(callbacks::clear_callbacks).no_cli(),
)
// control
.subcommand(
"rebuild",
from_fn_async(control::rebuild)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"restart",
from_fn_async(control::restart)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"shutdown",
from_fn_async(control::shutdown)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"set-main-status",
from_fn_async(control::set_main_status)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"get-status",
from_fn_async(control::get_status)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
// dependency
.subcommand(
"set-dependencies",
from_fn_async(dependency::set_dependencies)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"get-dependencies",
from_fn_async(dependency::get_dependencies)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"check-dependencies",
from_fn_async(dependency::check_dependencies)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand("mount", from_fn_async(dependency::mount).no_cli())
.subcommand(
"get-installed-packages",
from_fn_async(dependency::get_installed_packages).no_cli(),
)
// health
.subcommand("set-health", from_fn_async(health::set_health).no_cli())
// subcontainer
.subcommand(
"subcontainer",
ParentHandler::<C>::new()
.subcommand(
"launch",
from_fn_blocking(subcontainer::launch).no_display(),
)
.subcommand(
"launch-init",
from_fn_blocking(subcontainer::launch_init).no_display(),
)
.subcommand("exec", from_fn_blocking(subcontainer::exec).no_display())
.subcommand(
"exec-command",
from_fn_blocking(subcontainer::exec_command).no_display(),
)
.subcommand(
"create-fs",
from_fn_async(subcontainer::create_subcontainer_fs)
.with_custom_display_fn(|_, (path, _)| Ok(println!("{}", path.display())))
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"destroy-fs",
from_fn_async(subcontainer::destroy_subcontainer_fs)
.no_display()
.with_call_remote::<ContainerCliContext>(),
),
)
// net
.subcommand("bind", from_fn_async(net::bind::bind).no_cli())
.subcommand(
"get-service-port-forward",
from_fn_async(net::bind::get_service_port_forward).no_cli(),
)
.subcommand(
"clear-bindings",
from_fn_async(net::bind::clear_bindings).no_cli(),
)
.subcommand(
"get-host-info",
from_fn_async(net::host::get_host_info).no_cli(),
)
.subcommand(
"get-container-ip",
from_fn_async(net::info::get_container_ip).no_cli(),
)
.subcommand(
"get-os-ip",
from_fn(|_: C| Ok::<_, Error>(Ipv4Addr::from(HOST_IP))),
)
.subcommand(
"export-service-interface",
from_fn_async(net::interface::export_service_interface).no_cli(),
)
.subcommand(
"get-service-interface",
from_fn_async(net::interface::get_service_interface).no_cli(),
)
.subcommand(
"list-service-interfaces",
from_fn_async(net::interface::list_service_interfaces).no_cli(),
)
.subcommand(
"clear-service-interfaces",
from_fn_async(net::interface::clear_service_interfaces).no_cli(),
)
.subcommand(
"get-ssl-certificate",
from_fn_async(net::ssl::get_ssl_certificate).no_cli(),
)
.subcommand("get-ssl-key", from_fn_async(net::ssl::get_ssl_key).no_cli())
.subcommand(
"set-data-version",
from_fn_async(version::set_data_version)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
.subcommand(
"get-data-version",
from_fn_async(version::get_data_version)
.with_custom_display_fn(|_, v| {
if let Some(v) = v {
println!("{v}")
} else {
println!("N/A")
}
Ok(())
})
.with_call_remote::<ContainerCliContext>(),
)
// system
.subcommand(
"get-system-smtp",
from_fn_async(system::get_system_smtp).no_cli(),
)
}

View File

@@ -0,0 +1,93 @@
use crate::net::host::binding::{BindId, BindOptions, NetInfo};
use crate::service::effects::prelude::*;
use crate::{HostId, PackageId};
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct BindParams {
id: HostId,
internal_port: u16,
#[serde(flatten)]
options: BindOptions,
}
pub async fn bind(
context: EffectContext,
BindParams {
id,
internal_port,
options,
}: BindParams,
) -> Result<(), Error> {
let context = context.deref()?;
context
.seed
.persistent_container
.net_service
.bind(id, internal_port, options)
.await
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct ClearBindingsParams {
#[serde(default)]
pub except: Vec<BindId>,
}
pub async fn clear_bindings(
context: EffectContext,
ClearBindingsParams { except }: ClearBindingsParams,
) -> Result<(), Error> {
let context = context.deref()?;
context
.seed
.persistent_container
.net_service
.clear_bindings(except.into_iter().collect())
.await?;
Ok(())
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct GetServicePortForwardParams {
#[ts(optional)]
package_id: Option<PackageId>,
host_id: HostId,
internal_port: u16,
}
pub async fn get_service_port_forward(
context: EffectContext,
GetServicePortForwardParams {
package_id,
host_id,
internal_port,
}: GetServicePortForwardParams,
) -> Result<NetInfo, Error> {
let context = context.deref()?;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
Ok(context
.seed
.ctx
.db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(&package_id)
.or_not_found(&package_id)?
.as_hosts()
.as_idx(&host_id)
.or_not_found(&host_id)?
.as_bindings()
.de()?
.get(&internal_port)
.or_not_found(lazy_format!("binding for port {internal_port}"))?
.net
.clone())
}

View File

@@ -0,0 +1,47 @@
use crate::net::host::Host;
use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*;
use crate::service::rpc::CallbackId;
use crate::{HostId, PackageId};
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetHostInfoParams {
host_id: HostId,
#[ts(optional)]
package_id: Option<PackageId>,
#[ts(optional)]
callback: Option<CallbackId>,
}
pub async fn get_host_info(
context: EffectContext,
GetHostInfoParams {
host_id,
package_id,
callback,
}: GetHostInfoParams,
) -> Result<Option<Host>, Error> {
let context = context.deref()?;
let db = context.seed.ctx.db.peek().await;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_get_host_info(
package_id.clone(),
host_id.clone(),
CallbackHandler::new(&context, callback),
);
}
let res = db
.as_public()
.as_package_data()
.as_idx(&package_id)
.and_then(|m| m.as_hosts().as_idx(&host_id))
.map(|m| m.de())
.transpose()?;
Ok(res)
}

View File

@@ -0,0 +1,53 @@
use std::net::Ipv4Addr;
use crate::PackageId;
use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*;
use crate::service::rpc::CallbackId;
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetContainerIpParams {
#[ts(optional)]
package_id: Option<PackageId>,
#[ts(optional)]
callback: Option<CallbackId>,
}
pub async fn get_container_ip(
context: EffectContext,
GetContainerIpParams {
package_id,
callback,
}: GetContainerIpParams,
) -> Result<Option<Ipv4Addr>, Error> {
let context = context.deref()?;
if let Some(package_id) = package_id.filter(|id| id != &context.seed.id) {
if let Some(callback) = callback {
// ip is static for the lifetime of the container, so callback unnecessary for self
let callback = callback.register(&context.seed.persistent_container);
context
.seed
.ctx
.callbacks
.add_get_container_ip(package_id.clone(), CallbackHandler::new(&context, callback));
}
let Some(svc) = &*context.seed.ctx.services.get(&package_id).await else {
return Ok(None);
};
let Some(lxc) = svc.seed.persistent_container.lxc_container.get() else {
return Ok(None);
};
let res = lxc.ip().await?;
Ok(Some(res))
} else {
let Some(lxc) = context.seed.persistent_container.lxc_container.get() else {
return Ok(None);
};
lxc.ip().await.map(Some)
}
}

View File

@@ -0,0 +1,231 @@
use std::collections::BTreeMap;
use imbl::vector;
use crate::net::service_interface::{AddressInfo, ServiceInterface, ServiceInterfaceType};
use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*;
use crate::service::rpc::CallbackId;
use crate::{PackageId, ServiceInterfaceId};
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct ExportServiceInterfaceParams {
id: ServiceInterfaceId,
name: String,
description: String,
masked: bool,
address_info: AddressInfo,
r#type: ServiceInterfaceType,
}
pub async fn export_service_interface(
context: EffectContext,
ExportServiceInterfaceParams {
id,
name,
description,
masked,
address_info,
r#type,
}: ExportServiceInterfaceParams,
) -> Result<(), Error> {
let context = context.deref()?;
let package_id = context.seed.id.clone();
let service_interface = ServiceInterface {
id: id.clone(),
name,
description,
masked,
address_info,
interface_type: r#type,
};
let res = context
.seed
.ctx
.db
.mutate(|db| {
let ifaces = db
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_service_interfaces_mut();
ifaces.insert(&id, &service_interface)?;
Ok(())
})
.await;
res.result?;
if res.revision.is_some() {
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.get_service_interface(&(package_id.clone(), id))
{
callbacks.call(vector![]).await?;
}
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.list_service_interfaces(&package_id)
{
callbacks.call(vector![]).await?;
}
}
Ok(())
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetServiceInterfaceParams {
#[ts(optional)]
package_id: Option<PackageId>,
service_interface_id: ServiceInterfaceId,
#[ts(optional)]
callback: Option<CallbackId>,
}
pub async fn get_service_interface(
context: EffectContext,
GetServiceInterfaceParams {
package_id,
service_interface_id,
callback,
}: GetServiceInterfaceParams,
) -> Result<Option<ServiceInterface>, Error> {
let context = context.deref()?;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
let db = context.seed.ctx.db.peek().await;
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_get_service_interface(
package_id.clone(),
service_interface_id.clone(),
CallbackHandler::new(&context, callback),
);
}
let interface = db
.as_public()
.as_package_data()
.as_idx(&package_id)
.and_then(|m| m.as_service_interfaces().as_idx(&service_interface_id))
.map(|m| m.de())
.transpose()?;
Ok(interface)
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct ListServiceInterfacesParams {
#[ts(optional)]
package_id: Option<PackageId>,
#[ts(optional)]
callback: Option<CallbackId>,
}
pub async fn list_service_interfaces(
context: EffectContext,
ListServiceInterfacesParams {
package_id,
callback,
}: ListServiceInterfacesParams,
) -> Result<BTreeMap<ServiceInterfaceId, ServiceInterface>, Error> {
let context = context.deref()?;
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_list_service_interfaces(
package_id.clone(),
CallbackHandler::new(&context, callback),
);
}
let res = context
.seed
.ctx
.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&package_id)
.map(|m| m.into_service_interfaces().de())
.transpose()?
.unwrap_or_default();
Ok(res)
}
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct ClearServiceInterfacesParams {
pub except: Vec<ServiceInterfaceId>,
}
pub async fn clear_service_interfaces(
context: EffectContext,
ClearServiceInterfacesParams { except }: ClearServiceInterfacesParams,
) -> Result<(), Error> {
let context = context.deref()?;
let package_id = context.seed.id.clone();
let res = context
.seed
.ctx
.db
.mutate(|db| {
let mut removed = Vec::new();
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_service_interfaces_mut()
.mutate(|s| {
Ok(s.retain(|id, _| {
if except.contains(id) {
true
} else {
removed.push(id.clone());
false
}
}))
})?;
Ok(removed)
})
.await;
let removed = res.result?;
if res.revision.is_some() {
for id in removed {
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.get_service_interface(&(package_id.clone(), id))
{
callbacks.call(vector![]).await?;
}
}
if let Some(callbacks) = context
.seed
.ctx
.callbacks
.list_service_interfaces(&package_id)
{
callbacks.call(vector![]).await?;
}
}
Ok(())
}

View File

@@ -0,0 +1,5 @@
pub mod bind;
pub mod host;
pub mod info;
pub mod interface;
pub mod ssl;

View File

@@ -0,0 +1,230 @@
use std::collections::BTreeSet;
use std::net::IpAddr;
use imbl_value::InternedString;
use ipnet::IpNet;
use itertools::Itertools;
use openssl::pkey::{PKey, Private};
use crate::HOST_IP;
use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*;
use crate::service::rpc::CallbackId;
use crate::util::serde::Pem;
#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize, TS, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub enum Algorithm {
Ecdsa,
Ed25519,
}
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetSslCertificateParams {
#[ts(type = "string[]")]
hostnames: BTreeSet<InternedString>,
#[ts(optional)]
algorithm: Option<Algorithm>, //"ecdsa" | "ed25519"
#[ts(optional)]
callback: Option<CallbackId>,
}
pub async fn get_ssl_certificate(
ctx: EffectContext,
GetSslCertificateParams {
hostnames,
algorithm,
callback,
}: GetSslCertificateParams,
) -> Result<Vec<String>, Error> {
let context = ctx.deref()?;
let algorithm = algorithm.unwrap_or(Algorithm::Ecdsa);
let cert = context
.seed
.ctx
.db
.mutate(|db| {
let errfn = |h: &str| Error::new(eyre!("unknown hostname: {h}"), ErrorKind::NotFound);
let entries = db.as_public().as_package_data().as_entries()?;
let packages = entries.iter().map(|(k, _)| k).collect::<BTreeSet<_>>();
let allowed_hostnames = entries
.iter()
.map(|(_, m)| m.as_hosts().as_entries())
.flatten_ok()
.map_ok(|(_, m)| {
Ok(m.as_onions()
.de()?
.iter()
.map(InternedString::from_display)
.chain(m.as_public_domains().keys()?)
.chain(m.as_private_domains().de()?)
.chain(
m.as_hostname_info()
.de()?
.values()
.flatten()
.map(|h| h.to_san_hostname()),
)
.collect::<Vec<_>>())
})
.map(|a| a.and_then(|a| a))
.flatten_ok()
.try_collect::<_, BTreeSet<_>, _>()?;
for hostname in &hostnames {
if let Some(internal) = hostname
.strip_suffix(".embassy")
.or_else(|| hostname.strip_suffix(".startos"))
{
if !packages.contains(internal) {
return Err(errfn(&*hostname));
}
} else if let Ok(ip) = hostname.parse::<IpAddr>() {
if IpNet::new(HOST_IP.into(), 24)
.with_kind(ErrorKind::ParseNetAddress)?
.contains(&ip)
{
Ok(())
} else if db
.as_public()
.as_server_info()
.as_network()
.as_gateways()
.as_entries()?
.into_iter()
.flat_map(|(_, net)| net.as_ip_info().transpose_ref())
.flat_map(|net| net.as_deref().as_subnets().de().log_err())
.flatten()
.any(|s| s.addr() == ip)
{
Ok(())
} else {
Err(errfn(&*hostname))
}?;
} else {
if !allowed_hostnames.contains(hostname) {
return Err(errfn(&*hostname));
}
}
}
db.as_private_mut()
.as_key_store_mut()
.as_local_certs_mut()
.cert_for(&hostnames)
})
.await
.result?;
let fullchain = match algorithm {
Algorithm::Ecdsa => cert.fullchain_nistp256(),
Algorithm::Ed25519 => cert.fullchain_ed25519(),
};
let res = fullchain
.into_iter()
.map(|c| c.to_pem())
.map_ok(String::from_utf8)
.map(|a| Ok::<_, Error>(a??))
.try_collect()?;
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_get_ssl_certificate(
ctx,
hostnames,
cert,
algorithm,
CallbackHandler::new(&context, callback),
);
}
Ok(res)
}
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct GetSslKeyParams {
#[ts(type = "string[]")]
hostnames: BTreeSet<InternedString>,
#[ts(optional)]
algorithm: Option<Algorithm>, //"ecdsa" | "ed25519"
}
pub async fn get_ssl_key(
context: EffectContext,
GetSslKeyParams {
hostnames,
algorithm,
}: GetSslKeyParams,
) -> Result<Pem<PKey<Private>>, Error> {
let context = context.deref()?;
let package_id = &context.seed.id;
let algorithm = algorithm.unwrap_or(Algorithm::Ecdsa);
let container_ip = if let Some(lxc) = context.seed.persistent_container.lxc_container.get() {
Some(lxc.ip().await?)
} else {
None
};
let cert = context
.seed
.ctx
.db
.mutate(|db| {
let errfn = |h: &str| Error::new(eyre!("unknown hostname: {h}"), ErrorKind::NotFound);
let mut allowed_hostnames = db
.as_public()
.as_package_data()
.as_idx(package_id)
.into_iter()
.map(|m| m.as_hosts().as_entries())
.flatten_ok()
.map_ok(|(_, m)| {
Ok(m.as_onions()
.de()?
.iter()
.map(InternedString::from_display)
.chain(m.as_public_domains().keys()?)
.chain(m.as_private_domains().de()?)
.chain(
m.as_hostname_info()
.de()?
.values()
.flatten()
.map(|h| h.to_san_hostname()),
)
.collect::<Vec<_>>())
})
.map(|a| a.and_then(|a| a))
.flatten_ok()
.try_collect::<_, BTreeSet<_>, _>()?;
allowed_hostnames.extend(container_ip.as_ref().map(InternedString::from_display));
for hostname in &hostnames {
if let Some(internal) = hostname
.strip_suffix(".embassy")
.or_else(|| hostname.strip_suffix(".startos"))
{
if internal != &**package_id {
return Err(errfn(&*hostname));
}
} else {
if !allowed_hostnames.contains(hostname) {
return Err(errfn(&*hostname));
}
}
}
db.as_private_mut()
.as_key_store_mut()
.as_local_certs_mut()
.cert_for(&hostnames)
})
.await
.result?;
let key = match algorithm {
Algorithm::Ecdsa => cert.leaf.keys.nistp256,
Algorithm::Ed25519 => cert.leaf.keys.ed25519,
};
Ok(Pem(key))
}

View File

@@ -0,0 +1,16 @@
pub use clap::Parser;
pub use serde::{Deserialize, Serialize};
pub use ts_rs::TS;
pub use crate::prelude::*;
use crate::rpc_continuations::Guid;
pub(super) use crate::service::effects::context::EffectContext;
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct EventId {
#[serde(default)]
#[arg(default_value_t, long)]
pub event_id: Guid,
}

View File

@@ -0,0 +1,142 @@
use std::path::{Path, PathBuf};
use imbl_value::InternedString;
use tokio::process::Command;
use crate::ImageId;
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::disk::mount::guard::GenericMountGuard;
use crate::rpc_continuations::Guid;
use crate::service::effects::prelude::*;
use crate::service::persistent_container::Subcontainer;
use crate::util::Invoke;
#[cfg(target_os = "linux")]
mod sync;
#[cfg(not(target_os = "linux"))]
mod sync_dummy;
pub use sync::*;
#[cfg(not(target_os = "linux"))]
use sync_dummy as sync;
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct DestroySubcontainerFsParams {
guid: Guid,
}
#[instrument(skip_all)]
pub async fn destroy_subcontainer_fs(
context: EffectContext,
DestroySubcontainerFsParams { guid }: DestroySubcontainerFsParams,
) -> Result<(), Error> {
let context = context.deref()?;
if let Some(overlay) = context
.seed
.persistent_container
.subcontainers
.lock()
.await
.remove(&guid)
{
#[cfg(target_os = "linux")]
if tokio::fs::metadata(overlay.overlay.path().join("proc/1"))
.await
.is_ok()
{
let procfs = context
.seed
.persistent_container
.lxc_container
.get()
.or_not_found("lxc container")?
.rootfs_dir()
.join("proc");
let overlay_path = overlay.overlay.path().to_owned();
tokio::task::spawn_blocking(move || sync::kill_init(&procfs, &overlay_path))
.await
.with_kind(ErrorKind::Unknown)??;
}
overlay.overlay.unmount(true).await?;
} else {
tracing::warn!(
"Could not find a subcontainer fs to destroy; assumming that it already is destroyed and will be skipping"
);
}
Ok(())
}
#[derive(Debug, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CreateSubcontainerFsParams {
image_id: ImageId,
#[ts(type = "string | null")]
name: Option<InternedString>,
}
#[instrument(skip_all)]
pub async fn create_subcontainer_fs(
context: EffectContext,
CreateSubcontainerFsParams { image_id, name }: CreateSubcontainerFsParams,
) -> Result<(PathBuf, Guid), Error> {
let context = context.deref()?;
if let Some(image) = context
.seed
.persistent_container
.images
.get(&image_id)
.cloned()
{
let guid = Guid::new();
let rootfs_dir = context
.seed
.persistent_container
.lxc_container
.get()
.ok_or_else(|| {
Error::new(
eyre!("PersistentContainer has been destroyed"),
ErrorKind::Incoherent,
)
})?
.rootfs_dir();
let mountpoint = rootfs_dir
.join("media/startos/subcontainers")
.join(guid.as_ref());
tokio::fs::create_dir_all(&mountpoint).await?;
let container_mountpoint = Path::new("/").join(
mountpoint
.strip_prefix(rootfs_dir)
.with_kind(ErrorKind::Incoherent)?,
);
tracing::info!("Mounting overlay {guid} for {image_id}");
let subcontainer_wrapper = Subcontainer {
overlay: OverlayGuard::mount(image, &mountpoint).await?,
name: name
.unwrap_or_else(|| InternedString::intern(format!("subcontainer-{}", image_id))),
image_id: image_id.clone(),
};
Command::new("chown")
.arg("100000:100000")
.arg(&mountpoint)
.invoke(ErrorKind::Filesystem)
.await?;
tracing::info!("Mounted overlay {guid} for {image_id}");
context
.seed
.persistent_container
.subcontainers
.lock()
.await
.insert(guid.clone(), subcontainer_wrapper);
Ok((container_mountpoint, guid))
} else {
Err(Error::new(
eyre!("image {image_id} not found in s9pk"),
ErrorKind::NotFound,
))
}
}

View File

@@ -0,0 +1,718 @@
use std::collections::BTreeMap;
use std::ffi::{OsStr, OsString, c_int};
use std::fs::File;
use std::io::{IsTerminal, Read};
use std::os::unix::process::{CommandExt, ExitStatusExt};
use std::path::{Path, PathBuf};
use std::process::{Command as StdCommand, Stdio};
use std::sync::Arc;
use nix::errno::Errno;
use nix::sched::CloneFlags;
use nix::unistd::Pid;
use signal_hook::consts::signal::*;
use termion::raw::IntoRawMode;
use tokio::sync::oneshot;
use crate::CAP_1_KiB;
use crate::service::effects::ContainerCliContext;
use crate::service::effects::prelude::*;
use crate::util::io::TermSize;
const FWD_SIGNALS: &[c_int] = &[
SIGABRT, SIGALRM, SIGCONT, SIGHUP, SIGINT, SIGIO, SIGPIPE, SIGPROF, SIGQUIT, SIGTERM, SIGTRAP,
SIGTSTP, SIGTTIN, SIGTTOU, SIGURG, SIGUSR1, SIGUSR2, SIGVTALRM,
];
pub fn kill_init(procfs: &Path, chroot: &Path) -> Result<(), Error> {
if chroot.join("proc/1").exists() {
let ns_id = procfs::process::Process::new_with_root(chroot.join("proc/1"))
.with_ctx(|_| (ErrorKind::Filesystem, "open subcontainer procfs"))?
.namespaces()
.with_ctx(|_| (ErrorKind::Filesystem, "read subcontainer pid 1 ns"))?
.0
.get(OsStr::new("pid"))
.or_not_found("pid namespace")?
.identifier;
for proc in procfs::process::all_processes_with_root(procfs)
.with_ctx(|_| (ErrorKind::Filesystem, "open procfs"))?
{
let proc = proc.with_ctx(|_| (ErrorKind::Filesystem, "read single process details"))?;
let pid = proc.pid();
if proc
.namespaces()
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("read pid {} ns", pid)))?
.0
.get(OsStr::new("pid"))
.map_or(false, |ns| ns.identifier == ns_id)
{
let pids = proc.read::<_, NSPid>("status").with_ctx(|_| {
(
ErrorKind::Filesystem,
lazy_format!("read pid {} NSpid", pid),
)
})?;
if pids.0.len() == 2 && pids.0[1] == 1 {
match nix::sys::signal::kill(
Pid::from_raw(pid),
Some(nix::sys::signal::SIGKILL),
) {
Err(Errno::ESRCH) => Ok(()),
a => a,
}
.with_ctx(|_| {
(
ErrorKind::Filesystem,
lazy_format!(
"kill pid {} (determined to be pid 1 in subcontainer)",
pid
),
)
})?;
}
}
}
nix::mount::umount(&chroot.join("proc"))
.with_ctx(|_| (ErrorKind::Filesystem, "unmounting subcontainer procfs"))?;
}
Ok(())
}
struct NSPid(Vec<i32>);
impl procfs::FromBufRead for NSPid {
fn from_buf_read<R: std::io::BufRead>(r: R) -> procfs::ProcResult<Self> {
for line in r.lines() {
let line = line?;
if let Some(row) = line.trim().strip_prefix("NSpid:") {
return Ok(Self(
row.trim()
.split_ascii_whitespace()
.map(|pid| pid.parse::<i32>())
.collect::<Result<Vec<_>, _>>()?,
));
}
}
Err(procfs::ProcError::Incomplete(None))
}
}
fn open_file_read(path: impl AsRef<Path>) -> Result<File, Error> {
File::open(&path).with_ctx(|_| {
(
ErrorKind::Filesystem,
lazy_format!("open r {}", path.as_ref().display()),
)
})
}
#[derive(Debug, Clone, Serialize, Deserialize, Parser)]
pub struct ExecParams {
#[arg(long)]
force_tty: bool,
#[arg(long)]
force_stderr_tty: bool,
#[arg(long)]
pty_size: Option<TermSize>,
#[arg(short, long)]
env: Vec<String>,
#[arg(long)]
env_file: Option<PathBuf>,
#[arg(short, long)]
workdir: Option<PathBuf>,
#[arg(short, long)]
user: Option<String>,
chroot: PathBuf,
#[arg(trailing_var_arg = true)]
command: Vec<OsString>,
}
impl ExecParams {
fn exec(&self) -> Result<(), Error> {
let ExecParams {
env,
env_file,
workdir,
user,
chroot,
command,
..
} = self;
let Some(([command], args)) = command.split_at_checked(1) else {
return Err(Error::new(
eyre!("command cannot be empty"),
ErrorKind::InvalidRequest,
));
};
let mut cmd = StdCommand::new(command);
let passwd = std::fs::read_to_string(chroot.join("etc/passwd"))
.with_ctx(|_| (ErrorKind::Filesystem, "read /etc/passwd"))
.log_err()
.unwrap_or_default();
let mut home = None;
if let Some((uid, gid)) =
if let Some(uid) = user.as_deref().and_then(|u| u.parse::<u32>().ok()) {
Some((uid, uid))
} else if let Some((uid, gid)) = user
.as_deref()
.and_then(|u| u.split_once(":"))
.and_then(|(u, g)| Some((u.parse::<u32>().ok()?, g.parse::<u32>().ok()?)))
{
Some((uid, gid))
} else if let Some(user) = user {
Some(
if let Some((uid, gid)) = passwd.lines().find_map(|l| {
let l = l.trim();
let mut split = l.split(":");
if user != split.next()? {
return None;
}
split.next(); // throw away x
let uid = split.next()?.parse().ok()?;
let gid = split.next()?.parse().ok()?;
split.next(); // throw away group name
home = split.next();
Some((uid, gid))
// uid gid
}) {
(uid, gid)
} else if user == "root" {
(0, 0)
} else {
None.or_not_found(lazy_format!("{user} in /etc/passwd"))?
},
)
} else {
None
}
{
if home.is_none() {
home = passwd.lines().find_map(|l| {
let l = l.trim();
let mut split = l.split(":");
split.next(); // throw away user name
split.next(); // throw away x
if split.next()?.parse::<u32>().ok()? != uid {
return None;
}
split.next(); // throw away gid
split.next(); // throw away group name
split.next()
})
};
std::os::unix::fs::chown("/proc/self/fd/0", Some(uid), Some(gid)).ok();
std::os::unix::fs::chown("/proc/self/fd/1", Some(uid), Some(gid)).ok();
std::os::unix::fs::chown("/proc/self/fd/2", Some(uid), Some(gid)).ok();
cmd.uid(uid);
cmd.gid(gid);
} else {
home = Some("/root");
}
cmd.env("HOME", home.unwrap_or("/"));
let env_string = if let Some(env_file) = &env_file {
std::fs::read_to_string(env_file)
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("read {env:?}")))?
} else {
Default::default()
};
let env = env_string
.lines()
.chain(env.iter().map(|l| l.as_str()))
.map(|l| l.trim())
.filter_map(|l| l.split_once("="))
.collect::<BTreeMap<_, _>>();
std::os::unix::fs::chroot(chroot)
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("chroot {chroot:?}")))?;
cmd.args(args);
for (k, v) in env {
cmd.env(k, v);
}
if let Some(workdir) = workdir {
cmd.current_dir(workdir);
} else {
cmd.current_dir("/");
}
Err(cmd.exec().into())
}
}
pub fn launch(
_: ContainerCliContext,
ExecParams {
force_tty,
force_stderr_tty,
pty_size,
env,
env_file,
workdir,
user,
chroot,
command,
}: ExecParams,
) -> Result<(), Error> {
use std::io::Write;
kill_init(Path::new("/proc"), &chroot)?;
let mut sig = signal_hook::iterator::Signals::new(FWD_SIGNALS)?;
let (send_pid, recv_pid) = oneshot::channel();
std::thread::spawn(move || {
if let Ok(pid) = recv_pid.blocking_recv() {
for sig in sig.forever() {
match nix::sys::signal::kill(
Pid::from_raw(pid),
Some(nix::sys::signal::Signal::try_from(sig).unwrap()),
) {
Err(Errno::ESRCH) => Ok(()),
a => a,
}
.unwrap()
}
}
});
let mut stdin = std::io::stdin();
let stdout = std::io::stdout();
let stderr = std::io::stderr();
let stderr_tty = force_stderr_tty || stderr.is_terminal();
let tty = force_tty || (stdin.is_terminal() && stdout.is_terminal());
let raw = if stdin.is_terminal() && stdout.is_terminal() {
Some(termion::get_tty()?.into_raw_mode()?)
} else {
None
};
let (stdin_send, stdin_recv) = oneshot::channel::<Box<dyn Write + Send>>();
std::thread::spawn(move || {
if let Ok(mut cstdin) = stdin_recv.blocking_recv() {
if tty {
let mut buf = [0_u8; CAP_1_KiB];
while let Ok(n) = stdin.read(&mut buf) {
if n == 0 {
break;
}
cstdin.write_all(&buf[..n]).ok();
cstdin.flush().ok();
}
} else {
std::io::copy(&mut stdin, &mut cstdin).unwrap();
}
}
});
let (stdout_send, stdout_recv) = oneshot::channel::<Box<dyn std::io::Read + Send>>();
let stdout_thread = std::thread::spawn(move || {
if let Ok(mut cstdout) = stdout_recv.blocking_recv() {
if tty {
let mut stdout = stdout.lock();
let mut buf = [0_u8; CAP_1_KiB];
while let Ok(n) = cstdout.read(&mut buf) {
if n == 0 {
break;
}
stdout.write_all(&buf[..n]).ok();
stdout.flush().ok();
}
} else {
std::io::copy(&mut cstdout, &mut stdout.lock()).unwrap();
}
}
});
let (stderr_send, stderr_recv) = oneshot::channel::<Box<dyn std::io::Read + Send>>();
let stderr_thread = if !stderr_tty {
Some(std::thread::spawn(move || {
if let Ok(mut cstderr) = stderr_recv.blocking_recv() {
std::io::copy(&mut cstderr, &mut stderr.lock()).unwrap();
}
}))
} else {
None
};
nix::sched::unshare(CloneFlags::CLONE_NEWPID)
.with_ctx(|_| (ErrorKind::Filesystem, "unshare pid ns"))?;
nix::sched::unshare(CloneFlags::CLONE_NEWCGROUP)
.with_ctx(|_| (ErrorKind::Filesystem, "unshare cgroup ns"))?;
nix::sched::unshare(CloneFlags::CLONE_NEWIPC)
.with_ctx(|_| (ErrorKind::Filesystem, "unshare ipc ns"))?;
if tty {
use pty_process::blocking as pty_process;
let (pty, pts) = pty_process::open().with_kind(ErrorKind::Filesystem)?;
let mut cmd = pty_process::Command::new("/usr/bin/start-container");
cmd = cmd.arg("subcontainer").arg("launch-init");
for env in env {
cmd = cmd.arg("-e").arg(env)
}
if let Some(env_file) = env_file {
cmd = cmd.arg("--env-file").arg(env_file);
}
if let Some(workdir) = workdir {
cmd = cmd.arg("--workdir").arg(workdir);
}
if let Some(user) = user {
cmd = cmd.arg("--user").arg(user);
}
cmd = cmd.arg(&chroot).args(&command);
if !stderr_tty {
cmd = cmd.stderr(Stdio::piped());
}
let mut child = cmd
.spawn(pts)
.map_err(color_eyre::eyre::Report::msg)
.with_ctx(|_| (ErrorKind::Filesystem, "spawning child process"))?;
send_pid.send(child.id() as i32).unwrap_or_default();
if let Some(pty_size) = pty_size.or_else(|| TermSize::get_current()) {
let size = if let Some((x, y)) = pty_size.pixels {
::pty_process::Size::new_with_pixel(pty_size.rows, pty_size.cols, x, y)
} else {
::pty_process::Size::new(pty_size.rows, pty_size.cols)
};
pty.resize(size).with_kind(ErrorKind::Filesystem)?;
}
let shared = ArcPty(Arc::new(pty));
stdin_send
.send(Box::new(shared.clone()))
.unwrap_or_default();
stdout_send
.send(Box::new(shared.clone()))
.unwrap_or_default();
if let Some(stderr) = child.stderr.take() {
stderr_send.send(Box::new(stderr)).unwrap_or_default();
}
let exit = child
.wait()
.with_ctx(|_| (ErrorKind::Filesystem, "waiting on child process"))?;
stdout_thread.join().unwrap();
stderr_thread.map(|t| t.join().unwrap());
if let Some(code) = exit.code() {
drop(raw);
std::process::exit(code);
} else if exit.success() || exit.signal() == Some(15) {
Ok(())
} else {
Err(Error::new(
color_eyre::eyre::Report::msg(exit),
ErrorKind::Unknown,
))
}
} else {
let mut cmd = StdCommand::new("/usr/bin/start-container");
cmd.arg("subcontainer").arg("launch-init");
for env in env {
cmd.arg("-e").arg(env);
}
if let Some(env_file) = env_file {
cmd.arg("--env-file").arg(env_file);
}
if let Some(workdir) = workdir {
cmd.arg("--workdir").arg(workdir);
}
if let Some(user) = user {
cmd.arg("--user").arg(user);
}
cmd.arg(&chroot);
cmd.args(&command);
cmd.stdin(Stdio::piped());
cmd.stdout(Stdio::piped());
cmd.stderr(Stdio::piped());
let mut child = cmd
.spawn()
.map_err(color_eyre::eyre::Report::msg)
.with_ctx(|_| (ErrorKind::Filesystem, "spawning child process"))?;
send_pid.send(child.id() as i32).unwrap_or_default();
stdin_send
.send(Box::new(child.stdin.take().unwrap()))
.unwrap_or_default();
stdout_send
.send(Box::new(child.stdout.take().unwrap()))
.unwrap_or_default();
stderr_send
.send(Box::new(child.stderr.take().unwrap()))
.unwrap_or_default();
// TODO: subreaping, signal handling
let exit = child
.wait()
.with_ctx(|_| (ErrorKind::Filesystem, "waiting on child process"))?;
stdout_thread.join().unwrap();
stderr_thread.map(|t| t.join().unwrap());
if let Some(code) = exit.code() {
nix::mount::umount(&chroot.join("proc"))
.with_ctx(|_| (ErrorKind::Filesystem, "umount procfs"))?;
std::process::exit(code);
} else if exit.success() || exit.signal() == Some(15) {
Ok(())
} else {
Err(Error::new(
color_eyre::eyre::Report::msg(exit),
ErrorKind::Unknown,
))
}
}
}
pub fn launch_init(_: ContainerCliContext, params: ExecParams) -> Result<(), Error> {
nix::mount::mount(
Some("proc"),
&params.chroot.join("proc"),
Some("proc"),
nix::mount::MsFlags::empty(),
None::<&str>,
)
.with_ctx(|_| (ErrorKind::Filesystem, "mount procfs"))?;
if params.command.is_empty() {
signal_hook::iterator::Signals::new(signal_hook::consts::TERM_SIGNALS)?
.forever()
.next();
std::process::exit(0)
} else {
params.exec()
}
}
#[derive(Clone)]
struct ArcPty(Arc<pty_process::blocking::Pty>);
impl std::io::Write for ArcPty {
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
(&*self.0).write(buf)
}
fn flush(&mut self) -> std::io::Result<()> {
(&*self.0).flush()
}
}
impl std::io::Read for ArcPty {
fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> {
(&*self.0).read(buf)
}
}
pub fn exec(
_: ContainerCliContext,
ExecParams {
force_tty,
force_stderr_tty,
pty_size,
env,
env_file,
workdir,
user,
chroot,
command,
}: ExecParams,
) -> Result<(), Error> {
use std::io::Write;
let mut sig = signal_hook::iterator::Signals::new(FWD_SIGNALS)?;
let (send_pid, recv_pid) = oneshot::channel();
std::thread::spawn(move || {
if let Ok(pid) = recv_pid.blocking_recv() {
for sig in sig.forever() {
match nix::sys::signal::kill(
Pid::from_raw(pid),
Some(nix::sys::signal::Signal::try_from(sig).unwrap()),
) {
Err(Errno::ESRCH) => Ok(()),
a => a,
}
.unwrap();
}
}
});
let mut stdin = std::io::stdin();
let stdout = std::io::stdout();
let stderr = std::io::stderr();
let stderr_tty = force_stderr_tty || stderr.is_terminal();
let tty = force_tty || (stdin.is_terminal() && stdout.is_terminal());
let raw = if stdin.is_terminal() && stdout.is_terminal() {
Some(termion::get_tty()?.into_raw_mode()?)
} else {
None
};
let (stdin_send, stdin_recv) = oneshot::channel::<Box<dyn Write + Send>>();
std::thread::spawn(move || {
if let Ok(mut cstdin) = stdin_recv.blocking_recv() {
if tty {
let mut buf = [0_u8; CAP_1_KiB];
while let Ok(n) = stdin.read(&mut buf) {
if n == 0 {
break;
}
cstdin.write_all(&buf[..n]).ok();
cstdin.flush().ok();
}
} else {
std::io::copy(&mut stdin, &mut cstdin).unwrap();
}
}
});
let (stdout_send, stdout_recv) = oneshot::channel::<Box<dyn std::io::Read + Send>>();
let stdout_thread = std::thread::spawn(move || {
if let Ok(mut cstdout) = stdout_recv.blocking_recv() {
if tty {
let mut stdout = stdout.lock();
let mut buf = [0_u8; CAP_1_KiB];
while let Ok(n) = cstdout.read(&mut buf) {
if n == 0 {
break;
}
stdout.write_all(&buf[..n]).ok();
stdout.flush().ok();
}
} else {
std::io::copy(&mut cstdout, &mut stdout.lock()).unwrap();
}
}
});
let (stderr_send, stderr_recv) = oneshot::channel::<Box<dyn std::io::Read + Send>>();
let stderr_thread = if !stderr_tty {
Some(std::thread::spawn(move || {
if let Ok(mut cstderr) = stderr_recv.blocking_recv() {
std::io::copy(&mut cstderr, &mut stderr.lock()).unwrap();
}
}))
} else {
None
};
nix::sched::setns(
open_file_read(chroot.join("proc/1/ns/pid"))?,
CloneFlags::CLONE_NEWPID,
)
.with_ctx(|_| (ErrorKind::Filesystem, "set pid ns"))?;
nix::sched::setns(
open_file_read(chroot.join("proc/1/ns/cgroup"))?,
CloneFlags::CLONE_NEWCGROUP,
)
.with_ctx(|_| (ErrorKind::Filesystem, "set cgroup ns"))?;
nix::sched::setns(
open_file_read(chroot.join("proc/1/ns/ipc"))?,
CloneFlags::CLONE_NEWIPC,
)
.with_ctx(|_| (ErrorKind::Filesystem, "set ipc ns"))?;
if tty {
use pty_process::blocking as pty_process;
let (pty, pts) = pty_process::open().with_kind(ErrorKind::Filesystem)?;
let mut cmd = pty_process::Command::new("/usr/bin/start-container");
cmd = cmd.arg("subcontainer").arg("exec-command");
for env in env {
cmd = cmd.arg("-e").arg(env);
}
if let Some(env_file) = env_file {
cmd = cmd.arg("--env-file").arg(env_file);
}
if let Some(workdir) = workdir {
cmd = cmd.arg("--workdir").arg(workdir);
}
if let Some(user) = user {
cmd = cmd.arg("--user").arg(user);
}
cmd = cmd.arg(&chroot).args(&command);
if !stderr_tty {
cmd = cmd.stderr(Stdio::piped());
}
let mut child = cmd
.spawn(pts)
.map_err(color_eyre::eyre::Report::msg)
.with_ctx(|_| (ErrorKind::Filesystem, "spawning child process"))?;
send_pid.send(child.id() as i32).unwrap_or_default();
if let Some(pty_size) = pty_size.or_else(|| TermSize::get_current()) {
let size = if let Some((x, y)) = pty_size.pixels {
::pty_process::Size::new_with_pixel(pty_size.rows, pty_size.cols, x, y)
} else {
::pty_process::Size::new(pty_size.rows, pty_size.cols)
};
pty.resize(size).with_kind(ErrorKind::Filesystem)?;
}
let shared = ArcPty(Arc::new(pty));
stdin_send
.send(Box::new(shared.clone()))
.unwrap_or_default();
stdout_send
.send(Box::new(shared.clone()))
.unwrap_or_default();
if let Some(stderr) = child.stderr.take() {
stderr_send.send(Box::new(stderr)).unwrap_or_default();
}
let exit = child
.wait()
.with_ctx(|_| (ErrorKind::Filesystem, "waiting on child process"))?;
stdout_thread.join().unwrap();
stderr_thread.map(|t| t.join().unwrap());
if let Some(code) = exit.code() {
drop(raw);
std::process::exit(code);
} else if exit.success() {
Ok(())
} else {
Err(Error::new(
color_eyre::eyre::Report::msg(exit),
ErrorKind::Unknown,
))
}
} else {
let mut cmd = StdCommand::new("/usr/bin/start-container");
cmd.arg("subcontainer").arg("exec-command");
for env in env {
cmd.arg("-e").arg(env);
}
if let Some(env_file) = env_file {
cmd.arg("--env-file").arg(env_file);
}
if let Some(workdir) = workdir {
cmd.arg("--workdir").arg(workdir);
}
if let Some(user) = user {
cmd.arg("--user").arg(user);
}
cmd.arg(&chroot);
cmd.args(&command);
cmd.stdin(Stdio::piped());
cmd.stdout(Stdio::piped());
cmd.stderr(Stdio::piped());
let mut child = cmd
.spawn()
.map_err(color_eyre::eyre::Report::msg)
.with_ctx(|_| (ErrorKind::Filesystem, "spawning child process"))?;
send_pid.send(child.id() as i32).unwrap_or_default();
stdin_send
.send(Box::new(child.stdin.take().unwrap()))
.unwrap_or_default();
stdout_send
.send(Box::new(child.stdout.take().unwrap()))
.unwrap_or_default();
stderr_send
.send(Box::new(child.stderr.take().unwrap()))
.unwrap_or_default();
let exit = child
.wait()
.with_ctx(|_| (ErrorKind::Filesystem, "waiting on child process"))?;
stdout_thread.join().unwrap();
stderr_thread.map(|t| t.join().unwrap());
if let Some(code) = exit.code() {
std::process::exit(code);
} else if exit.success() {
Ok(())
} else {
Err(Error::new(
color_eyre::eyre::Report::msg(exit),
ErrorKind::Unknown,
))
}
}
}
pub fn exec_command(_: ContainerCliContext, params: ExecParams) -> Result<(), Error> {
params.exec()
}

View File

@@ -0,0 +1,30 @@
use crate::service::effects::ContainerCliContext;
use crate::service::effects::prelude::*;
pub fn launch(_: ContainerCliContext) -> Result<(), Error> {
Err(Error::new(
eyre!("requires feature container-runtime"),
ErrorKind::InvalidRequest,
))
}
pub fn launch_init(_: ContainerCliContext) -> Result<(), Error> {
Err(Error::new(
eyre!("requires feature container-runtime"),
ErrorKind::InvalidRequest,
))
}
pub fn exec(_: ContainerCliContext) -> Result<(), Error> {
Err(Error::new(
eyre!("requires feature container-runtime"),
ErrorKind::InvalidRequest,
))
}
pub fn exec_command(_: ContainerCliContext) -> Result<(), Error> {
Err(Error::new(
eyre!("requires feature container-runtime"),
ErrorKind::InvalidRequest,
))
}

View File

@@ -0,0 +1,40 @@
use crate::service::effects::callbacks::CallbackHandler;
use crate::service::effects::prelude::*;
use crate::service::rpc::CallbackId;
use crate::system::SmtpValue;
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct GetSystemSmtpParams {
#[arg(skip)]
callback: Option<CallbackId>,
}
pub async fn get_system_smtp(
context: EffectContext,
GetSystemSmtpParams { callback }: GetSystemSmtpParams,
) -> Result<Option<SmtpValue>, Error> {
let context = context.deref()?;
if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container);
context
.seed
.ctx
.callbacks
.add_get_system_smtp(CallbackHandler::new(&context, callback));
}
let res = context
.seed
.ctx
.db
.peek()
.await
.into_public()
.into_server_info()
.into_smtp()
.de()?;
Ok(res)
}

View File

@@ -0,0 +1,46 @@
use std::path::Path;
use crate::DATA_DIR;
use crate::service::effects::prelude::*;
use crate::util::io::{delete_file, maybe_read_file_to_string, write_file_atomic};
use crate::volume::PKG_VOLUME_DIR;
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct SetDataVersionParams {
#[ts(type = "string")]
version: Option<String>,
}
#[instrument(skip(context))]
pub async fn set_data_version(
context: EffectContext,
SetDataVersionParams { version }: SetDataVersionParams,
) -> Result<(), Error> {
let context = context.deref()?;
let package_id = &context.seed.id;
let path = Path::new(DATA_DIR)
.join(PKG_VOLUME_DIR)
.join(package_id)
.join("data")
.join(".version");
if let Some(version) = version {
write_file_atomic(path, version.as_bytes()).await?;
} else {
delete_file(path).await?;
}
Ok(())
}
#[instrument(skip_all)]
pub async fn get_data_version(context: EffectContext) -> Result<Option<String>, Error> {
let context = context.deref()?;
let package_id = &context.seed.id;
let path = Path::new(DATA_DIR)
.join(PKG_VOLUME_DIR)
.join(package_id)
.join("data")
.join(".version");
maybe_read_file_to_string(path).await
}

1310
core/src/service/mod.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,593 @@
use std::collections::{BTreeMap, BTreeSet};
use std::path::Path;
use std::sync::{Arc, Weak};
use std::time::Duration;
use futures::Future;
use futures::future::ready;
use imbl::{Vector, vector};
use imbl_value::InternedString;
use rpc_toolkit::{Empty, Server, ShutdownHandle};
use serde::de::DeserializeOwned;
use tokio::process::Command;
use tokio::sync::{Mutex, OnceCell, oneshot, watch};
use tracing::instrument;
use crate::context::RpcContext;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::idmapped::{IdMap, IdMapped};
use crate::disk::mount::filesystem::loop_dev::LoopDev;
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::disk::mount::filesystem::{MountType, ReadOnly};
use crate::disk::mount::guard::{GenericMountGuard, MountGuard};
use crate::lxc::{HOST_RPC_SERVER_SOCKET, LxcConfig, LxcContainer};
use crate::net::net_controller::NetService;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::s9pk::S9pk;
use crate::s9pk::merkle_archive::source::FileSource;
use crate::service::effects::context::EffectContext;
use crate::service::effects::handler;
use crate::service::rpc::{
CallbackHandle, CallbackId, CallbackParams, ExitParams, InitKind, InitParams,
};
use crate::service::{ProcedureName, Service, rpc};
use crate::util::Invoke;
use crate::util::future::NonDetachingJoinHandle;
use crate::util::io::create_file;
use crate::util::rpc_client::UnixRpcClient;
use crate::volume::data_dir;
use crate::{ARCH, DATA_DIR, ImageId, PACKAGE_DATA, VolumeId};
const RPC_CONNECT_TIMEOUT: Duration = Duration::from_secs(30);
#[derive(Debug)]
pub struct ServiceState {
// indicates whether the service container runtime has been initialized yet
pub(super) rt_initialized: bool,
// This tracks references to callbacks registered by the running service:
pub(super) callbacks: BTreeSet<Arc<CallbackId>>,
}
impl ServiceState {
pub fn new() -> Self {
Self {
rt_initialized: false,
callbacks: Default::default(),
}
}
}
/// Want to have a wrapper for uses like the inject where we are going to be finding the subcontainer and doing some filtering on it.
/// As well, the imageName is also used for things like env.
pub struct Subcontainer {
pub(super) name: InternedString,
pub(super) image_id: ImageId,
pub(super) overlay: OverlayGuard<Arc<MountGuard>>,
}
// @DRB On top of this we need to also have the procedures to have the effects and get the results back for them, maybe lock them to the running instance?
/// This contains the LXC container running the javascript init system
/// that can be used via a JSON RPC Client connected to a unix domain
/// socket served by the container
pub struct PersistentContainer {
pub(super) s9pk: S9pk,
pub(super) lxc_container: OnceCell<LxcContainer>,
pub(super) rpc_client: UnixRpcClient,
pub(super) rpc_server: watch::Sender<Option<(NonDetachingJoinHandle<()>, ShutdownHandle)>>,
js_mount: MountGuard,
volumes: BTreeMap<VolumeId, MountGuard>,
assets: Vec<MountGuard>,
pub(super) images: BTreeMap<ImageId, Arc<MountGuard>>,
pub(super) subcontainers: Arc<Mutex<BTreeMap<Guid, Subcontainer>>>,
pub(super) state: Arc<watch::Sender<ServiceState>>,
pub(super) net_service: NetService,
destroyed: bool,
}
impl PersistentContainer {
#[instrument(skip_all)]
pub async fn new(ctx: &RpcContext, s9pk: S9pk) -> Result<Self, Error> {
let lxc_container = ctx
.lxc_manager
.create(
Some(
&Path::new(PACKAGE_DATA)
.join("logs")
.join(&s9pk.as_manifest().id),
),
LxcConfig::default(),
)
.await?;
let rpc_client = lxc_container.connect_rpc(Some(RPC_CONNECT_TIMEOUT)).await?;
let js_mount = MountGuard::mount(
&LoopDev::from(
&**s9pk
.as_archive()
.contents()
.get_path("javascript.squashfs")
.and_then(|f| f.as_file())
.or_not_found("javascript")?,
),
lxc_container.rootfs_dir().join("usr/lib/startos/package"),
ReadOnly,
)
.await?;
let is_compat = tokio::fs::metadata(js_mount.path().join("embassy.js"))
.await
.is_ok();
let mut volumes = BTreeMap::new();
// TODO: remove once packages are reconverted
let added = if is_compat {
["embassy".parse().unwrap()].into_iter().collect()
} else {
BTreeSet::default()
};
for volume in s9pk.as_manifest().volumes.union(&added) {
let mountpoint = lxc_container
.rootfs_dir()
.join("media/startos/volumes")
.join(volume);
let mount = MountGuard::mount(
&IdMapped::new(
Bind::new(data_dir(DATA_DIR, &s9pk.as_manifest().id, volume)),
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
),
mountpoint,
MountType::ReadWrite,
)
.await?;
volumes.insert(volume.clone(), mount);
}
let mountpoint = lxc_container.rootfs_dir().join("media/startos/assets");
let assets = if let Some(sqfs) = s9pk
.as_archive()
.contents()
.get_path("assets.squashfs")
.and_then(|e| e.as_file())
{
vec![
MountGuard::mount(
&IdMapped::new(
LoopDev::from(&**sqfs),
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
),
mountpoint,
MountType::ReadWrite,
)
.await?,
]
} else if let Some(dir) = s9pk
.as_archive()
.contents()
.get_path("assets")
.and_then(|e| e.as_directory())
{
// backwards compatibility for alpha s9pks - remove eventually
let mut assets = Vec::new();
for (asset, entry) in &**dir {
let mountpoint = lxc_container
.rootfs_dir()
.join("media/startos/assets")
.join(Path::new(asset).with_extension(""));
let Some(sqfs) = entry.as_file() else {
continue;
};
assets.push(
MountGuard::mount(
&IdMapped::new(
LoopDev::from(&**sqfs),
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
),
mountpoint,
MountType::ReadWrite,
)
.await?,
);
}
assets
} else {
Vec::new()
};
let mut images = BTreeMap::new();
let image_path = lxc_container.rootfs_dir().join("media/startos/images");
tokio::fs::create_dir_all(&image_path).await?;
for (image, config) in &s9pk.as_manifest().images {
let mut arch = ARCH;
let mut sqfs_path = Path::new("images")
.join(arch)
.join(image)
.with_extension("squashfs");
if !s9pk
.as_archive()
.contents()
.get_path(&sqfs_path)
.and_then(|e| e.as_file())
.is_some()
{
arch = if let Some(arch) = config.emulate_missing_as.as_deref() {
arch
} else {
continue;
};
sqfs_path = Path::new("images")
.join(arch)
.join(image)
.with_extension("squashfs");
}
let sqfs = s9pk
.as_archive()
.contents()
.get_path(&sqfs_path)
.and_then(|e| e.as_file())
.or_not_found(sqfs_path.display())?;
let mountpoint = image_path.join(image);
images.insert(
image.clone(),
Arc::new(
MountGuard::mount(
&IdMapped::new(
LoopDev::from(&**sqfs),
vec![IdMap {
from_id: 0,
to_id: 100000,
range: 65536,
}],
),
&mountpoint,
ReadOnly,
)
.await?,
),
);
let env_filename = Path::new(image.as_ref()).with_extension("env");
if let Some(env) = s9pk
.as_archive()
.contents()
.get_path(Path::new("images").join(arch).join(&env_filename))
.and_then(|e| e.as_file())
{
env.copy(&mut create_file(image_path.join(&env_filename)).await?)
.await?;
}
let json_filename = Path::new(image.as_ref()).with_extension("json");
if let Some(json) = s9pk
.as_archive()
.contents()
.get_path(Path::new("images").join(arch).join(&json_filename))
.and_then(|e| e.as_file())
{
json.copy(&mut create_file(image_path.join(&json_filename)).await?)
.await?;
}
}
let ip = lxc_container.ip().await?;
let net_service = ctx
.net_controller
.create_service(s9pk.as_manifest().id.clone(), ip)
.await?;
if let Some(callbacks) = ctx.callbacks.get_container_ip(&s9pk.as_manifest().id) {
callbacks
.call(vector![Value::String(Arc::new(ip.to_string()))])
.await?;
}
Ok(Self {
s9pk,
lxc_container: OnceCell::new_with(Some(lxc_container)),
rpc_client,
rpc_server: watch::channel(None).0,
// procedures: Default::default(),
js_mount,
volumes,
assets,
images,
subcontainers: Arc::new(Mutex::new(BTreeMap::new())),
state: Arc::new(watch::channel(ServiceState::new()).0),
net_service,
destroyed: false,
})
}
#[instrument(skip_all)]
pub async fn mount_backup(
&self,
backup_path: impl AsRef<Path>,
mount_type: MountType,
) -> Result<MountGuard, Error> {
let backup_path = backup_path.as_ref();
let mountpoint = self
.lxc_container
.get()
.ok_or_else(|| {
Error::new(
eyre!("PersistentContainer has been destroyed"),
ErrorKind::Incoherent,
)
})?
.rootfs_dir()
.join("media/startos/backup");
tokio::fs::create_dir_all(&mountpoint).await?;
Command::new("chown")
.arg("100000:100000")
.arg(mountpoint.as_os_str())
.invoke(ErrorKind::Filesystem)
.await?;
tokio::fs::create_dir_all(backup_path).await?;
Command::new("chown")
.arg("100000:100000")
.arg(backup_path)
.invoke(ErrorKind::Filesystem)
.await?;
let bind = Bind::new(backup_path);
MountGuard::mount(&bind, &mountpoint, mount_type).await
}
#[instrument(skip_all)]
pub async fn init(
&self,
seed: Weak<Service>,
procedure_id: Guid,
kind: Option<InitKind>,
) -> Result<(), Error> {
let socket_server_context = EffectContext::new(seed);
let server = Server::new(move || ready(Ok(socket_server_context.clone())), handler());
let path = self
.lxc_container
.get()
.ok_or_else(|| {
Error::new(
eyre!("PersistentContainer has been destroyed"),
ErrorKind::Incoherent,
)
})?
.rpc_dir()
.join(HOST_RPC_SERVER_SOCKET);
let (send, recv) = oneshot::channel();
let handle = NonDetachingJoinHandle::from(tokio::spawn(async move {
let chown_status = async {
let res = server.run_unix(&path, |err| {
tracing::error!("error on unix socket {}: {err}", path.display())
})?;
Command::new("chown")
.arg("100000:100000")
.arg(&path)
.invoke(ErrorKind::Filesystem)
.await?;
Ok::<_, Error>(res)
};
let (shutdown, fut) = match chown_status.await {
Ok((shutdown, fut)) => (Ok(shutdown), Some(fut)),
Err(e) => (Err(e), None),
};
if send.send(shutdown).is_err() {
panic!("failed to send shutdown handle");
}
if let Some(fut) = fut {
fut.await;
}
}));
let shutdown = recv.await.map_err(|_| {
Error::new(
eyre!("unix socket server thread panicked"),
ErrorKind::Unknown,
)
})??;
if self
.rpc_server
.send_replace(Some((handle, shutdown)))
.is_some()
{
return Err(Error::new(
eyre!("PersistentContainer already initialized"),
ErrorKind::InvalidRequest,
));
}
self.rpc_client
.request(
rpc::Init,
InitParams {
id: procedure_id,
kind,
},
)
.await?;
self.state.send_modify(|s| s.rt_initialized = true);
Ok(())
}
#[instrument(skip_all)]
fn destroy(
&mut self,
uninit: Option<ExitParams>,
) -> Option<impl Future<Output = Result<(), Error>> + 'static> {
if self.destroyed {
return None;
}
let version = self.s9pk.as_manifest().version.clone();
let rpc_client = self.rpc_client.clone();
let rpc_server = self.rpc_server.send_replace(None);
let js_mount = self.js_mount.take();
let volumes = std::mem::take(&mut self.volumes);
let assets = std::mem::take(&mut self.assets);
let images = std::mem::take(&mut self.images);
let subcontainers = self.subcontainers.clone();
let lxc_container = self.lxc_container.take();
self.destroyed = true;
Some(async move {
let mut errs = ErrorCollection::new();
if let Some((hdl, shutdown)) = rpc_server {
errs.handle(
rpc_client
.request(
rpc::Exit,
uninit.unwrap_or_else(|| ExitParams::target_version(&*version)),
)
.await,
);
shutdown.shutdown();
errs.handle(hdl.await.with_kind(ErrorKind::Cancelled));
}
for (_, volume) in volumes {
errs.handle(volume.unmount(true).await);
}
for assets in assets {
errs.handle(assets.unmount(true).await);
}
for (_, overlay) in std::mem::take(&mut *subcontainers.lock().await) {
errs.handle(overlay.overlay.unmount(true).await);
}
for (_, images) in images {
errs.handle(images.unmount().await);
}
errs.handle(js_mount.unmount(true).await);
if let Some(lxc_container) = lxc_container {
errs.handle(lxc_container.exit().await);
}
errs.into_result()
})
}
#[instrument(skip_all)]
pub async fn exit(mut self, uninit: Option<ExitParams>) -> Result<(), Error> {
if let Some(destroy) = self.destroy(uninit) {
destroy.await?;
}
tracing::info!("Service for {} exited", self.s9pk.as_manifest().id);
Ok(())
}
#[instrument(skip_all)]
pub async fn start(&self) -> Result<(), Error> {
self.rpc_client.request(rpc::Start, Empty {}).await?;
Ok(())
}
#[instrument(skip_all)]
pub async fn stop(&self) -> Result<(), Error> {
self.rpc_client.request(rpc::Stop, Empty {}).await?;
Ok(())
}
#[instrument(skip_all)]
pub async fn execute<O>(
&self,
id: Guid,
name: ProcedureName,
input: Value,
timeout: Option<Duration>,
) -> Result<O, Error>
where
O: DeserializeOwned,
{
self._execute(id, name, input, timeout)
.await
.and_then(from_value)
}
#[instrument(skip_all)]
pub async fn sanboxed<O>(
&self,
id: Guid,
name: ProcedureName,
input: Value,
timeout: Option<Duration>,
) -> Result<O, Error>
where
O: DeserializeOwned,
{
self._sandboxed(id, name, input, timeout)
.await
.and_then(from_value)
}
#[instrument(skip_all)]
pub async fn callback(&self, handle: CallbackHandle, args: Vector<Value>) -> Result<(), Error> {
let mut params = None;
self.state.send_if_modified(|s| {
params = handle.params(&mut s.callbacks, args);
params.is_some()
});
if let Some(params) = params {
self._callback(params).await?;
}
Ok(())
}
#[instrument(skip_all)]
async fn _execute(
&self,
id: Guid,
name: ProcedureName,
input: Value,
timeout: Option<Duration>,
) -> Result<Value, Error> {
let fut = self.rpc_client.request(
rpc::Execute,
rpc::ExecuteParams::new(id, name, input, timeout),
);
Ok(if let Some(timeout) = timeout {
tokio::time::timeout(timeout, fut)
.await
.with_kind(ErrorKind::Timeout)??
} else {
fut.await?
})
}
#[instrument(skip_all)]
async fn _sandboxed(
&self,
id: Guid,
name: ProcedureName,
input: Value,
timeout: Option<Duration>,
) -> Result<Value, Error> {
let fut = self.rpc_client.request(
rpc::Sandbox,
rpc::ExecuteParams::new(id, name, input, timeout),
);
Ok(if let Some(timeout) = timeout {
tokio::time::timeout(timeout, fut)
.await
.with_kind(ErrorKind::Timeout)??
} else {
fut.await?
})
}
#[instrument(skip_all)]
async fn _callback(&self, params: CallbackParams) -> Result<(), Error> {
self.rpc_client.notify(rpc::Callback, params).await?;
Ok(())
}
}
impl Drop for PersistentContainer {
fn drop(&mut self) {
if let Some(destroy) = self.destroy(None) {
tokio::spawn(async move { destroy.await.log_err() });
}
}
}

View File

@@ -0,0 +1,20 @@
use serde::{Deserialize, Serialize};
use crate::ActionId;
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ProcedureName {
CreateBackup,
GetActionInput(ActionId),
RunAction(ActionId),
}
impl ProcedureName {
pub fn js_function_name(&self) -> String {
match self {
ProcedureName::CreateBackup => "/backup/create".to_string(),
ProcedureName::RunAction(id) => format!("/actions/{}/run", id),
ProcedureName::GetActionInput(id) => format!("/actions/{}/getInput", id),
}
}
}

280
core/src/service/rpc.rs Normal file
View File

@@ -0,0 +1,280 @@
use std::collections::BTreeSet;
use std::str::FromStr;
use std::sync::{Arc, Weak};
use std::time::Duration;
use clap::builder::ValueParserFactory;
use exver::{ExtendedVersion, VersionRange};
use imbl::Vector;
use imbl_value::{InternedString, Value};
use rpc_toolkit::Empty;
use rpc_toolkit::yajrc::RpcMethod;
use ts_rs::TS;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::service::ProcedureName;
use crate::service::persistent_container::PersistentContainer;
use crate::util::{FromStrParser, Never};
#[derive(Clone, serde::Deserialize, serde::Serialize, TS)]
#[serde(rename_all = "kebab-case")]
pub enum InitKind {
Install,
Update,
Restore,
}
#[derive(Clone, serde::Deserialize, serde::Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitParams {
pub id: Guid,
pub kind: Option<InitKind>,
}
#[derive(Clone)]
pub struct Init;
impl RpcMethod for Init {
type Params = InitParams;
type Response = ();
fn as_str<'a>(&'a self) -> &'a str {
"init"
}
}
impl serde::Serialize for Init {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(self.as_str())
}
}
#[derive(Clone)]
pub struct Start;
impl RpcMethod for Start {
type Params = Empty;
type Response = ();
fn as_str<'a>(&'a self) -> &'a str {
"start"
}
}
impl serde::Serialize for Start {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(self.as_str())
}
}
#[derive(Clone)]
pub struct Stop;
impl RpcMethod for Stop {
type Params = Empty;
type Response = ();
fn as_str<'a>(&'a self) -> &'a str {
"stop"
}
}
impl serde::Serialize for Stop {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(self.as_str())
}
}
#[derive(Clone, serde::Deserialize, serde::Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExitParams {
id: Guid,
/// VersionRange or ExtendedVersion
#[ts(type = "string | null")]
target: Option<InternedString>,
}
impl ExitParams {
pub fn target_version(version: &ExtendedVersion) -> Self {
Self {
id: Guid::new(),
target: Some(InternedString::from_display(version)),
}
}
pub fn target_range(range: &VersionRange) -> Self {
Self {
id: Guid::new(),
target: Some(InternedString::from_display(range)),
}
}
pub fn uninstall() -> Self {
Self {
id: Guid::new(),
target: None,
}
}
pub fn is_uninstall(&self) -> bool {
self.target.is_none()
}
}
#[derive(Clone)]
pub struct Exit;
impl RpcMethod for Exit {
type Params = ExitParams;
type Response = ();
fn as_str<'a>(&'a self) -> &'a str {
"exit"
}
}
impl serde::Serialize for Exit {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(self.as_str())
}
}
#[derive(Clone, serde::Deserialize, serde::Serialize, TS)]
pub struct ExecuteParams {
id: Guid,
procedure: String,
#[ts(type = "any")]
input: Value,
timeout: Option<u128>,
}
impl ExecuteParams {
pub fn new(
id: Guid,
procedure: ProcedureName,
input: Value,
timeout: Option<Duration>,
) -> Self {
Self {
id,
procedure: procedure.js_function_name(),
input,
timeout: timeout.map(|d| d.as_millis()),
}
}
}
#[derive(Clone)]
pub struct Execute;
impl RpcMethod for Execute {
type Params = ExecuteParams;
type Response = Value;
fn as_str<'a>(&'a self) -> &'a str {
"execute"
}
}
impl serde::Serialize for Execute {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(self.as_str())
}
}
#[derive(Clone)]
pub struct Sandbox;
impl RpcMethod for Sandbox {
type Params = ExecuteParams;
type Response = Value;
fn as_str<'a>(&'a self) -> &'a str {
"sandbox"
}
}
impl serde::Serialize for Sandbox {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(self.as_str())
}
}
#[derive(
Clone, Copy, Debug, serde::Deserialize, serde::Serialize, TS, PartialEq, Eq, PartialOrd, Ord,
)]
#[ts(type = "number")]
pub struct CallbackId(u64);
impl CallbackId {
pub fn register(self, container: &PersistentContainer) -> CallbackHandle {
let this = Arc::new(self);
let res = Arc::downgrade(&this);
container
.state
.send_if_modified(|s| s.callbacks.insert(this));
CallbackHandle(res)
}
}
impl FromStr for CallbackId {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
u64::from_str(s).map_err(Error::from).map(Self)
}
}
impl ValueParserFactory for CallbackId {
type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser {
FromStrParser::new()
}
}
pub struct CallbackHandle(Weak<CallbackId>);
impl CallbackHandle {
pub fn is_active(&self) -> bool {
self.0.strong_count() > 0
}
pub fn params(
self,
registered: &mut BTreeSet<Arc<CallbackId>>,
args: Vector<Value>,
) -> Option<CallbackParams> {
if let Some(id) = self.0.upgrade() {
if let Some(strong) = registered.get(&id) {
if Arc::ptr_eq(strong, &id) {
registered.remove(&id);
return Some(CallbackParams::new(&*id, args));
}
}
}
None
}
pub fn take(&mut self) -> Self {
Self(std::mem::take(&mut self.0))
}
}
#[derive(Clone, serde::Deserialize, serde::Serialize, TS)]
pub struct CallbackParams {
id: u64,
#[ts(type = "any[]")]
args: Vector<Value>,
}
impl CallbackParams {
fn new(id: &CallbackId, args: Vector<Value>) -> Self {
Self { id: id.0, args }
}
}
#[derive(Clone)]
pub struct Callback;
impl RpcMethod for Callback {
type Params = CallbackParams;
type Response = Never;
fn as_str<'a>(&'a self) -> &'a str {
"callback"
}
}
impl serde::Serialize for Callback {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_str(self.as_str())
}
}

View File

@@ -0,0 +1,138 @@
use std::sync::Arc;
use std::time::Duration;
use imbl::vector;
use patch_db::TypedDbWatch;
use super::ServiceActorSeed;
use crate::prelude::*;
use crate::service::SYNC_RETRY_COOLDOWN_SECONDS;
use crate::service::transition::{Transition, TransitionKind};
use crate::status::{DesiredStatus, StatusInfo};
use crate::util::actor::Actor;
use crate::util::actor::background::BackgroundJobQueue;
#[derive(Clone)]
pub(super) struct ServiceActor(pub(super) Arc<ServiceActorSeed>);
impl Actor for ServiceActor {
fn init(&mut self, jobs: &BackgroundJobQueue) {
let seed = self.0.clone();
let mut state = seed.persistent_container.state.subscribe();
let initialized = async move { state.wait_for(|s| s.rt_initialized).await.map(|_| ()) };
jobs.add_job(async move {
if initialized.await.is_err() {
return;
}
let mut watch = seed
.ctx
.db
.watch(
format!("/public/packageData/{}/statusInfo", seed.id)
.parse()
.unwrap(),
) // TODO: typed pointers
.await
.typed::<StatusInfo>();
let mut transition: Option<Transition> = None;
loop {
let res = service_actor_loop(&mut watch, &seed, &mut transition).await;
let wait = async {
if let Err(e) = async {
res?;
watch.changed().await?;
Ok::<_, Error>(())
}
.await
{
tracing::error!("error synchronizing state of service: {e}");
tracing::debug!("{e:?}");
tracing::error!("Retrying in {}s...", SYNC_RETRY_COOLDOWN_SECONDS);
tokio::time::timeout(
Duration::from_secs(SYNC_RETRY_COOLDOWN_SECONDS),
async {
watch.changed().await.log_err();
},
)
.await
.ok();
}
};
tokio::pin!(wait);
let transition_handler = async {
match &mut transition {
Some(Transition { future, .. }) => {
let err = future.await.log_err().is_none(); // TODO: ideally this error should be sent to service logs
transition.take();
if err {
tokio::time::sleep(Duration::from_secs(
SYNC_RETRY_COOLDOWN_SECONDS,
))
.await;
} else {
futures::future::pending().await
}
}
_ => futures::future::pending().await,
}
};
tokio::pin!(transition_handler);
futures::future::select(wait, transition_handler).await;
}
});
}
}
async fn service_actor_loop<'a>(
watch: &mut TypedDbWatch<StatusInfo>,
seed: &'a Arc<ServiceActorSeed>,
transition: &mut Option<Transition<'a>>,
) -> Result<(), Error> {
let id = &seed.id;
let status_model = watch.peek_and_mark_seen()?;
let status = status_model.de()?;
if let Some(callbacks) = seed.ctx.callbacks.get_status(id) {
callbacks
.call(vector![patch_db::ModelExt::into_value(status_model)])
.await?;
}
match status {
StatusInfo {
desired: DesiredStatus::Running | DesiredStatus::Restarting,
started: None,
..
} => {
let task = transition
.take()
.filter(|task| task.kind == TransitionKind::Starting);
*transition = task.or_else(|| Some(seed.start()));
}
StatusInfo {
desired:
DesiredStatus::Stopped | DesiredStatus::Restarting | DesiredStatus::BackingUp { .. },
started: Some(_),
..
} => {
let task = transition
.take()
.filter(|task| task.kind == TransitionKind::Stopping);
*transition = task.or_else(|| Some(seed.stop()));
}
StatusInfo {
desired: DesiredStatus::BackingUp { .. },
started: None,
..
} => {
let task = transition
.take()
.filter(|task| task.kind == TransitionKind::BackingUp);
*transition = task.or_else(|| Some(seed.backup()));
}
_ => (),
};
Ok(())
}

View File

@@ -0,0 +1,555 @@
use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use color_eyre::eyre::eyre;
use exver::VersionRange;
use futures::future::{BoxFuture, Fuse};
use futures::stream::FuturesUnordered;
use futures::{Future, FutureExt, StreamExt, TryFutureExt};
use imbl::OrdMap;
use tokio::sync::{OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock, oneshot};
use tracing::instrument;
use url::Url;
use crate::DATA_DIR;
use crate::context::RpcContext;
use crate::db::model::package::{
InstallingInfo, InstallingState, PackageDataEntry, PackageState, UpdatingState,
};
use crate::disk::mount::guard::GenericMountGuard;
use crate::error::ErrorData;
use crate::install::PKG_ARCHIVE_DIR;
use crate::notifications::{NotificationLevel, notify};
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle, ProgressTrackerWriter};
use crate::s9pk::S9pk;
use crate::s9pk::manifest::PackageId;
use crate::s9pk::merkle_archive::source::FileSource;
use crate::service::rpc::{ExitParams, InitKind};
use crate::service::{LoadDisposition, Service, ServiceRef};
use crate::sign::commitment::merkle_archive::MerkleArchiveCommitment;
use crate::status::{DesiredStatus, StatusInfo};
use crate::util::future::NonDetachingJoinHandle;
use crate::util::serde::{Base32, Pem};
use crate::util::sync::SyncMutex;
pub type DownloadInstallFuture = BoxFuture<'static, Result<InstallFuture, Error>>;
pub type InstallFuture = BoxFuture<'static, Result<(), Error>>;
pub struct InstallProgressHandles {
pub finalization_progress: PhaseProgressTrackerHandle,
pub progress: FullProgressTracker,
}
fn s9pk_download_path(commitment: &MerkleArchiveCommitment) -> PathBuf {
Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("downloading")
.join(Base32(commitment.root_sighash.0).to_lower_string())
.with_extension("s9pk")
}
fn s9pk_installed_path(commitment: &MerkleArchiveCommitment) -> PathBuf {
Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("installed")
.join(Base32(commitment.root_sighash.0).to_lower_string())
.with_extension("s9pk")
}
/// This is the structure to contain all the services
#[derive(Default)]
pub struct ServiceMap(SyncMutex<OrdMap<PackageId, Arc<RwLock<Option<ServiceRef>>>>>);
impl ServiceMap {
fn entry(&self, id: &PackageId) -> Arc<RwLock<Option<ServiceRef>>> {
self.0.mutate(|lock| {
lock.entry(id.clone())
.or_insert_with(|| Arc::new(RwLock::new(None)))
.clone()
})
}
#[instrument(skip_all)]
pub fn try_get(&self, id: &PackageId) -> Option<OwnedRwLockReadGuard<Option<ServiceRef>>> {
self.entry(id).try_read_owned().ok()
}
#[instrument(skip_all)]
pub async fn get(&self, id: &PackageId) -> OwnedRwLockReadGuard<Option<ServiceRef>> {
self.entry(id).read_owned().await
}
#[instrument(skip_all)]
pub async fn get_mut(&self, id: &PackageId) -> OwnedRwLockWriteGuard<Option<ServiceRef>> {
self.entry(id).write_owned().await
}
#[instrument(skip_all)]
pub async fn init(&self, ctx: &RpcContext) -> Result<(), Error> {
let ids = ctx.db.peek().await.as_public().as_package_data().keys()?;
let mut jobs = FuturesUnordered::new();
for id in &ids {
jobs.push(self.load(ctx, id, LoadDisposition::Retry));
}
while let Some(res) = jobs.next().await {
if let Err(e) = res {
tracing::error!("Error loading installed package as service: {e}");
tracing::debug!("{e:?}");
}
}
Ok(())
}
#[instrument(skip_all)]
pub async fn load(
&self,
ctx: &RpcContext,
id: &PackageId,
disposition: LoadDisposition,
) -> Result<(), Error> {
let mut shutdown_err = Ok(());
let mut service = self.get_mut(id).await;
if let Some(service) = service.take() {
shutdown_err = service.shutdown(None).await;
}
match Service::load(ctx, id, disposition).await {
Ok(s) => {
ctx.db
.mutate(|db| {
if let Some(pde) = db.as_public_mut().as_package_data_mut().as_idx_mut(id) {
pde.as_status_info_mut().as_error_mut().ser(&None)?;
}
Ok(())
})
.await
.result?;
*service = s.into();
}
Err(e) => {
tracing::error!("Error loading service: {e}");
tracing::debug!("{e:?}");
let e = ErrorData::from(e);
ctx.db
.mutate(|db| {
if let Some(pde) = db.as_public_mut().as_package_data_mut().as_idx_mut(id) {
pde.as_status_info_mut().as_error_mut().ser(&Some(e))?;
}
Ok(())
})
.await
.result?;
}
}
shutdown_err?;
Ok(())
}
#[instrument(skip_all)]
pub async fn install<F, Fut, S: FileSource + Clone>(
&self,
ctx: RpcContext,
s9pk: F,
registry: Option<Url>,
recovery_source: Option<impl GenericMountGuard>,
progress: Option<FullProgressTracker>,
) -> Result<DownloadInstallFuture, Error>
where
F: FnOnce() -> Fut,
Fut: Future<Output = Result<S9pk<S>, Error>>,
S: FileSource + Clone,
{
let progress = progress.unwrap_or_else(|| FullProgressTracker::new());
let mut validate_progress = progress.add_phase("Validating Headers".into(), Some(1));
let mut unpack_progress = progress.add_phase("Unpacking".into(), Some(100));
let mut s9pk = s9pk().await?;
validate_progress.start();
s9pk.validate_and_filter(ctx.s9pk_arch)?;
validate_progress.complete();
let commitment = s9pk.as_archive().commitment().await?;
let mut installed_path = s9pk_installed_path(&commitment);
while tokio::fs::metadata(&installed_path).await.is_ok() {
let prev = installed_path.file_stem().unwrap_or_default();
installed_path.set_file_name(prev.to_string_lossy().into_owned() + "x.s9pk");
// append an x if already exists to avoid reference counting when reinstalling same s9pk
}
let manifest = s9pk.as_manifest().clone();
let id = manifest.id.clone();
let icon = s9pk.icon_data_url().await?;
let developer_key = s9pk.as_archive().signer();
let mut service = self.get_mut(&id).await;
let size = s9pk.size();
if let Some(size) = size {
unpack_progress.set_total(size);
}
let op_name = if recovery_source.is_none() {
if service.is_none() {
"Installing"
} else {
"Updating"
}
} else {
"Restoring"
};
let mut finalization_progress = progress.add_phase(op_name.into(), Some(50));
let restoring = recovery_source.is_some();
let (cancel_send, cancel_recv) = oneshot::channel();
ctx.cancellable_installs
.mutate(|c| c.insert(id.clone(), cancel_send));
let mut reload_guard =
ServiceRefReloadCancelGuard::new(ctx.clone(), id.clone(), op_name, Some(cancel_recv));
reload_guard
.handle(async {
ctx.db
.mutate({
let installed_path = installed_path.clone();
let manifest = manifest.clone();
let id = id.clone();
let install_progress = progress.snapshot();
let registry = registry.clone();
move |db| {
if let Some(pde) =
db.as_public_mut().as_package_data_mut().as_idx_mut(&id)
{
let prev = pde.as_state_info().expect_installed()?.de()?;
pde.as_state_info_mut().ser(&PackageState::Updating(
UpdatingState {
manifest: prev.manifest,
s9pk: installed_path,
installing_info: InstallingInfo {
new_manifest: manifest,
progress: install_progress,
},
},
))?;
} else {
let installing = InstallingState {
installing_info: InstallingInfo {
new_manifest: manifest,
progress: install_progress,
},
};
db.as_public_mut().as_package_data_mut().insert(
&id,
&PackageDataEntry {
state_info: if restoring {
PackageState::Restoring(installing)
} else {
PackageState::Installing(installing)
},
s9pk: installed_path,
status_info: StatusInfo {
error: None,
health: BTreeMap::new(),
started: None,
desired: DesiredStatus::Stopped,
},
registry,
developer_key: Pem::new(developer_key),
icon,
last_backup: None,
current_dependencies: Default::default(),
actions: Default::default(),
tasks: Default::default(),
service_interfaces: Default::default(),
hosts: Default::default(),
store_exposed_dependents: Default::default(),
},
)?;
};
Ok(())
}
})
.await
.result
})
.await?;
Ok(async move {
let sync_progress_task = reload_guard
.handle(async {
let download_path = s9pk_download_path(&commitment);
let deref_id = id.clone();
let sync_progress_task =
NonDetachingJoinHandle::from(tokio::spawn(progress.clone().sync_to_db(
ctx.db.clone(),
move |v| {
v.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&deref_id)
.and_then(|e| e.as_state_info_mut().as_installing_info_mut())
.map(|i| i.as_progress_mut())
},
Some(Duration::from_millis(100)),
)));
unpack_progress.start();
let mut progress_writer = ProgressTrackerWriter::new(
crate::util::io::create_file(&download_path).await?,
unpack_progress,
);
s9pk.serialize(&mut progress_writer, true).await?;
let (file, mut unpack_progress) = progress_writer.into_inner();
file.sync_all().await?;
unpack_progress.complete();
crate::util::io::rename(&download_path, &installed_path).await?;
Ok::<_, Error>(sync_progress_task)
})
.await?;
Ok(reload_guard
.handle_last(async move {
finalization_progress.start();
let s9pk = S9pk::open(&installed_path, Some(&id)).await?;
let prev = if let Some(service) = service.take() {
ensure_code!(
recovery_source.is_none(),
ErrorKind::InvalidRequest,
"cannot restore over existing package"
);
let prev_version = service
.seed
.persistent_container
.s9pk
.as_manifest()
.version
.clone();
let prev_can_migrate_to = &service
.seed
.persistent_container
.s9pk
.as_manifest()
.can_migrate_to;
let next_version = &s9pk.as_manifest().version;
let next_can_migrate_from = &s9pk.as_manifest().can_migrate_from;
let uninit = if prev_version.satisfies(next_can_migrate_from) {
ExitParams::target_version(&*prev_version)
} else if next_version.satisfies(prev_can_migrate_to) {
ExitParams::target_version(&s9pk.as_manifest().version)
} else {
ExitParams::target_range(&VersionRange::and(
prev_can_migrate_to.clone(),
next_can_migrate_from.clone(),
))
};
let cleanup = service.uninstall(uninit, false, false).await?;
progress.complete();
Some(cleanup)
} else {
None
};
let new_service = Service::install(
ctx,
s9pk,
&installed_path,
&registry,
if recovery_source.is_some() {
InitKind::Restore
} else if prev.is_some() {
InitKind::Update
} else {
InitKind::Install
},
recovery_source,
Some(InstallProgressHandles {
finalization_progress,
progress,
}),
)
.await?;
*service = Some(new_service.into());
if let Some(cleanup) = prev {
cleanup.await?;
}
drop(service);
sync_progress_task.await.map_err(|_| {
Error::new(eyre!("progress sync task panicked"), ErrorKind::Unknown)
})??;
Ok(())
})
.boxed())
}
.boxed())
}
/// This is ran during the cleanup, so when we are uninstalling the service
#[instrument(skip_all)]
pub async fn uninstall(
&self,
ctx: RpcContext,
id: PackageId,
soft: bool,
force: bool,
) -> Result<impl Future<Output = Result<(), Error>> + Send + 'static, Error> {
let mut guard = self.get_mut(&id).await;
ctx.db
.mutate(|db| {
let entry = db
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&id)
.or_not_found(&id)?;
entry.as_state_info_mut().map_mutate(|s| match s {
PackageState::Installed(s) => Ok(PackageState::Removing(s)),
_ => Err(Error::new(
eyre!("Package {id} is not installed."),
crate::ErrorKind::NotFound,
)),
})
})
.await
.result?;
Ok(async move {
ServiceRefReloadCancelGuard::new(ctx.clone(), id.clone(), "Uninstall", None)
.handle_last(async move {
if let Some(service) = guard.take() {
let res = service
.uninstall(ExitParams::uninstall(), soft, force)
.await;
drop(guard);
res?.await
} else {
if force {
super::uninstall::cleanup(&ctx, &id, soft).await?;
Ok(())
} else {
Err(Error::new(
eyre!(
"service {id} failed to initialize - cannot remove gracefully"
),
ErrorKind::Uninitialized,
))
}
}
})
.await?;
Ok(())
}
.or_else(|e: Error| e.wait().map(Err)))
}
pub async fn shutdown_all(&self) -> Result<(), Error> {
let futs = self.0.mutate(|lock| {
let mut futs = Vec::with_capacity(lock.len());
for service in lock.values().cloned() {
futs.push(async move {
if let Some(service) = service.write_owned().await.take() {
service.shutdown(None).await?
}
Ok::<_, Error>(())
});
}
futs
});
let mut errors = ErrorCollection::new();
for res in futures::future::join_all(futs).await {
errors.handle(res);
}
errors.into_result()
}
}
pub struct ServiceRefReloadCancelGuard(
Option<ServiceRefReloadInfo>,
Option<Fuse<oneshot::Receiver<()>>>,
);
impl Drop for ServiceRefReloadCancelGuard {
fn drop(&mut self) {
if let Some(info) = self.0.take() {
tokio::spawn(info.reload(None));
}
}
}
impl ServiceRefReloadCancelGuard {
pub fn new(
ctx: RpcContext,
id: PackageId,
operation: &'static str,
cancel: Option<oneshot::Receiver<()>>,
) -> Self {
Self(
Some(ServiceRefReloadInfo { ctx, id, operation }),
cancel.map(|c| c.fuse()),
)
}
pub async fn handle<T>(
&mut self,
operation: impl Future<Output = Result<T, Error>>,
) -> Result<T, Error> {
let res = async {
if let Some(cancel) = self.1.as_mut() {
tokio::select! {
res = operation => res,
_ = cancel => Err(Error::new(eyre!("Operation Cancelled"), ErrorKind::Cancelled)),
}
} else {
operation.await
}
}.await;
match res {
Ok(a) => Ok(a),
Err(e) => {
if let Some(info) = self.0.take() {
let task_e = e.clone_output();
Err(e.with_task(tokio::spawn(async move {
info.reload(Some(task_e)).await.log_err();
})))
} else {
Err(e)
}
}
}
}
pub async fn handle_last<T>(
mut self,
operation: impl Future<Output = Result<T, Error>>,
) -> Result<T, Error> {
let res = self.handle(operation).await;
self.0.take();
res
}
}
struct ServiceRefReloadInfo {
ctx: RpcContext,
id: PackageId,
operation: &'static str,
}
impl ServiceRefReloadInfo {
async fn reload(self, error: Option<Error>) -> Result<(), Error> {
self.ctx
.services
.load(&self.ctx, &self.id, LoadDisposition::Undo)
.await?;
if let Some(error) = error {
let error_string = error.to_string();
self.ctx
.db
.mutate(|db| {
notify(
db,
Some(self.id.clone()),
NotificationLevel::Error,
format!("{} Failed", self.operation),
error_string,
(),
)
})
.await
.result?;
}
Ok(())
}
}

View File

@@ -0,0 +1,15 @@
use serde::{Deserialize, Serialize};
use ts_rs::TS;
#[derive(Clone, Copy, Debug, Eq, PartialEq, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub enum StartStop {
Start,
Stop,
}
impl StartStop {
pub fn is_start(&self) -> bool {
matches!(self, StartStop::Start)
}
}

View File

@@ -0,0 +1,101 @@
use std::path::PathBuf;
use futures::future::BoxFuture;
use futures::{FutureExt, TryFutureExt};
use rpc_toolkit::yajrc::RpcError;
use crate::disk::mount::filesystem::ReadWrite;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::service::action::GetActionInput;
use crate::service::start_stop::StartStop;
use crate::service::transition::{Transition, TransitionKind};
use crate::service::{ProcedureName, ServiceActor, ServiceActorSeed};
use crate::status::DesiredStatus;
use crate::util::actor::background::BackgroundJobQueue;
use crate::util::actor::{ConflictBuilder, Handler};
use crate::util::serde::NoOutput;
impl ServiceActorSeed {
pub fn backup(&self) -> Transition<'_> {
Transition {
kind: TransitionKind::BackingUp,
future: async {
let res = if let Some(fut) = self.backup.replace(None) {
fut.await.map_err(Error::from)
} else {
Err(Error::new(
eyre!("No backup to resume"),
ErrorKind::Cancelled,
))
};
let id = &self.id;
self.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_status_info_mut()
.as_desired_mut()
.map_mutate(|s| {
Ok(match s {
DesiredStatus::BackingUp {
on_complete: StartStop::Start,
} => DesiredStatus::Running,
DesiredStatus::BackingUp {
on_complete: StartStop::Stop,
} => DesiredStatus::Stopped,
x => x,
})
})
})
.await
.result?;
res
}
.boxed(),
}
}
}
pub(in crate::service) struct Backup {
pub path: PathBuf,
}
impl Handler<Backup> for ServiceActor {
type Response = Result<BoxFuture<'static, Result<(), Error>>, Error>;
fn conflicts_with(_: &Backup) -> ConflictBuilder<Self> {
ConflictBuilder::everything().except::<GetActionInput>()
}
async fn handle(
&mut self,
id: Guid,
Backup { path }: Backup,
_: &BackgroundJobQueue,
) -> Self::Response {
let seed = self.0.clone();
let transition = async move {
async {
let backup_guard = seed
.persistent_container
.mount_backup(path, ReadWrite)
.await?;
seed.persistent_container
.execute::<NoOutput>(id, ProcedureName::CreateBackup, Value::Null, None)
.await?;
backup_guard.unmount(true).await?;
Ok::<_, Error>(())
}
.await
.map_err(RpcError::from)
}
.shared();
self.0.backup.replace(Some(transition.clone().boxed()));
Ok(transition.map_err(Error::from).boxed())
}
}

View File

@@ -0,0 +1,78 @@
use futures::FutureExt;
use futures::future::BoxFuture;
use crate::prelude::*;
use crate::service::ServiceActorSeed;
pub mod backup;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum TransitionKind {
BackingUp,
Starting,
Stopping,
}
pub struct Transition<'a> {
pub kind: TransitionKind,
pub future: BoxFuture<'a, Result<(), Error>>,
}
impl<'a> ::std::fmt::Debug for Transition<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("Transition")
.field("kind", &self.kind)
.finish_non_exhaustive()
}
}
impl ServiceActorSeed {
pub fn start(&self) -> Transition<'_> {
Transition {
kind: TransitionKind::Starting,
future: async {
self.persistent_container.start().await?;
let id = &self.id;
self.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_status_info_mut()
.started()
})
.await
.result?;
Ok(())
}
.boxed(),
}
}
pub fn stop(&self) -> Transition<'_> {
Transition {
kind: TransitionKind::Stopping,
future: async {
self.persistent_container.stop().await?;
let id = &self.id;
self.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.as_status_info_mut()
.stopped()
})
.await
.result?;
Ok(())
}
.boxed(),
}
}
}

View File

@@ -0,0 +1,81 @@
use std::path::Path;
use crate::context::RpcContext;
use crate::db::model::package::{InstalledState, InstallingInfo, InstallingState, PackageState};
use crate::prelude::*;
use crate::volume::PKG_VOLUME_DIR;
use crate::{DATA_DIR, PACKAGE_DATA, PackageId};
pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(), Error> {
Ok(
if let Some(pde) = ctx
.db
.mutate(|d| {
if let Some(pde) = d
.as_public_mut()
.as_package_data_mut()
.remove(&id)?
.map(|d| d.de())
.transpose()?
{
d.as_private_mut().as_available_ports_mut().mutate(|p| {
p.free(
pde.hosts
.0
.values()
.flat_map(|h| h.bindings.values())
.flat_map(|b| {
b.net
.assigned_port
.into_iter()
.chain(b.net.assigned_ssl_port)
}),
);
Ok(())
})?;
d.as_private_mut().as_package_stores_mut().remove(&id)?;
Ok(Some(pde))
} else {
Ok(None)
}
})
.await
.result?
{
let manifest = match pde.state_info {
PackageState::Installing(InstallingState {
installing_info:
InstallingInfo {
new_manifest: manifest,
..
},
})
| PackageState::Restoring(InstallingState {
installing_info:
InstallingInfo {
new_manifest: manifest,
..
},
})
| PackageState::Removing(InstalledState { manifest }) => manifest,
s => {
return Err(Error::new(
eyre!("Invalid package state for cleanup: {s:?}"),
ErrorKind::InvalidRequest,
));
}
};
if !soft {
let path = Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(&manifest.id);
if tokio::fs::metadata(&path).await.is_ok() {
tokio::fs::remove_dir_all(&path).await?;
}
let logs_dir = Path::new(PACKAGE_DATA).join("logs").join(&manifest.id);
if tokio::fs::metadata(&logs_dir).await.is_ok() {
#[cfg(not(feature = "dev"))]
tokio::fs::remove_dir_all(&logs_dir).await?;
}
}
},
)
}