Refactor/sdk init (#2947)

* fixes for main

* refactor package initialization

* fixes from testing

* more fixes

* beta.21

* do not use instanceof

* closes #2921

* beta22

* allow disabling kiosk

* migration

* fix /etc/shadow

* actionRequest -> task

* beta.23
This commit is contained in:
Aiden McClelland
2025-05-21 10:24:37 -06:00
committed by GitHub
parent 46fd01c264
commit 44560c8da8
237 changed files with 1827 additions and 98800 deletions

View File

@@ -2,7 +2,7 @@ use std::fmt;
use clap::{CommandFactory, FromArgMatches, Parser};
pub use models::ActionId;
use models::PackageId;
use models::{PackageId, ReplayId};
use qrcode::QrCode;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
@@ -10,6 +10,7 @@ use tracing::instrument;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::package::TaskSeverity;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::util::serde::{
@@ -38,6 +39,13 @@ pub fn action_api<C: Context>() -> ParentHandler<C> {
.with_about("Run service action")
.with_call_remote::<CliContext>(),
)
.subcommand(
"clear-task",
from_fn_async(clear_task)
.no_display()
.with_about("Clear a service task")
.with_call_remote::<CliContext>(),
)
}
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
@@ -332,3 +340,45 @@ pub async fn run_action(
.await
.map(|res| res.map(ActionResult::upcast))
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct ClearTaskParams {
pub package_id: PackageId,
pub replay_id: ReplayId,
#[arg(long)]
pub force: bool,
}
#[instrument(skip_all)]
pub async fn clear_task(
ctx: RpcContext,
ClearTaskParams {
package_id,
replay_id,
force,
}: ClearTaskParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
if let Some(task) = db
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_tasks_mut()
.remove(&replay_id)?
{
if !force && task.as_task().as_severity().de()? == TaskSeverity::Critical {
return Err(Error::new(
eyre!("Cannot clear critical task"),
ErrorKind::InvalidRequest,
));
}
}
Ok(())
})
.await
.result
}

View File

@@ -44,22 +44,25 @@ impl Map for Sessions {
}
pub async fn write_shadow(password: &str) -> Result<(), Error> {
let hash: String = sha_crypt::sha512_simple(password, &sha_crypt::Sha512Params::default())
.map_err(|e| Error::new(eyre!("{e:?}"), ErrorKind::Serialization))?;
let shadow_contents = tokio::fs::read_to_string("/etc/shadow").await?;
let mut shadow_file =
create_file_mod("/media/startos/config/overlay/etc/shadow", 0o640).await?;
for line in shadow_contents.lines() {
if line.starts_with("start9:") {
let rest = line.splitn(3, ":").nth(2).ok_or_else(|| {
Error::new(eyre!("malformed /etc/shadow"), ErrorKind::ParseSysInfo)
})?;
let pw = sha_crypt::sha512_simple(password, &sha_crypt::Sha512Params::default())
.map_err(|e| Error::new(eyre!("{e:?}"), ErrorKind::Serialization))?;
shadow_file
.write_all(format!("start9:{pw}:{rest}\n").as_bytes())
.await?;
} else {
shadow_file.write_all(line.as_bytes()).await?;
shadow_file.write_all(b"\n").await?;
match line.split_once(":") {
Some((user, rest)) if user == "start9" || user == "kiosk" => {
let (_, rest) = rest.split_once(":").ok_or_else(|| {
Error::new(eyre!("malformed /etc/shadow"), ErrorKind::ParseSysInfo)
})?;
shadow_file
.write_all(format!("{user}:{hash}:{rest}\n").as_bytes())
.await?;
}
_ => {
shadow_file.write_all(line.as_bytes()).await?;
shadow_file.write_all(b"\n").await?;
}
}
}
shadow_file.sync_all().await?;

View File

@@ -23,7 +23,9 @@ use crate::prelude::*;
use crate::s9pk::S9pk;
use crate::service::service_map::DownloadInstallFuture;
use crate::setup::SetupExecuteProgress;
use crate::system::sync_kiosk;
use crate::util::serde::IoFormat;
use crate::PLATFORM;
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
@@ -80,6 +82,7 @@ pub async fn recover_full_embassy(
recovery_source: TmpMountGuard,
server_id: &str,
recovery_password: &str,
kiosk: Option<bool>,
SetupExecuteProgress {
init_phases,
restore_phase,
@@ -105,8 +108,12 @@ pub async fn recover_full_embassy(
)
.with_kind(ErrorKind::PasswordHashGeneration)?;
let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi");
sync_kiosk(kiosk).await?;
let db = ctx.db().await?;
db.put(&ROOT, &Database::init(&os_backup.account)?).await?;
db.put(&ROOT, &Database::init(&os_backup.account, kiosk)?)
.await?;
drop(db);
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
@@ -166,6 +173,7 @@ async fn restore_packages(
.install(
ctx.clone(),
|| S9pk::open(s9pk_path, Some(&id)),
None, // TODO: pull from metadata?
Some(backup_dir),
None,
)

View File

@@ -35,7 +35,7 @@ use crate::net::wifi::WpaCli;
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle};
use crate::rpc_continuations::{Guid, OpenAuthedContinuations, RpcContinuations};
use crate::service::action::update_requested_actions;
use crate::service::action::update_tasks;
use crate::service::effects::callbacks::ServiceCallbacks;
use crate::service::ServiceMap;
use crate::shutdown::Shutdown;
@@ -102,14 +102,14 @@ impl InitRpcContextPhases {
pub struct CleanupInitPhases {
cleanup_sessions: PhaseProgressTrackerHandle,
init_services: PhaseProgressTrackerHandle,
check_requested_actions: PhaseProgressTrackerHandle,
check_tasks: PhaseProgressTrackerHandle,
}
impl CleanupInitPhases {
pub fn new(handle: &FullProgressTracker) -> Self {
Self {
cleanup_sessions: handle.add_phase("Cleaning up sessions".into(), Some(1)),
init_services: handle.add_phase("Initializing services".into(), Some(10)),
check_requested_actions: handle.add_phase("Checking action requests".into(), Some(1)),
check_tasks: handle.add_phase("Checking action requests".into(), Some(1)),
}
}
}
@@ -307,7 +307,7 @@ impl RpcContext {
CleanupInitPhases {
mut cleanup_sessions,
init_services,
mut check_requested_actions,
mut check_tasks,
}: CleanupInitPhases,
) -> Result<(), Error> {
cleanup_sessions.start();
@@ -369,31 +369,27 @@ impl RpcContext {
tracing::info!("Initialized Services");
// TODO
check_requested_actions.start();
check_tasks.start();
let peek = self.db.peek().await;
let mut action_input: OrdMap<PackageId, BTreeMap<ActionId, Value>> = OrdMap::new();
let requested_actions: BTreeSet<_> = peek
let tasks: BTreeSet<_> = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| {
Ok(pde
.as_requested_actions()
.as_entries()?
.into_iter()
.map(|(_, r)| {
Ok::<_, Error>((
r.as_request().as_package_id().de()?,
r.as_request().as_action_id().de()?,
))
}))
Ok(pde.as_tasks().as_entries()?.into_iter().map(|(_, r)| {
Ok::<_, Error>((
r.as_task().as_package_id().de()?,
r.as_task().as_action_id().de()?,
))
}))
})
.flatten_ok()
.map(|a| a.and_then(|a| a))
.try_collect()?;
let procedure_id = Guid::new();
for (package_id, action_id) in requested_actions {
for (package_id, action_id) in tasks {
if let Some(service) = self.services.get(&package_id).await.as_ref() {
if let Some(input) = service
.get_action_input(procedure_id.clone(), action_id.clone())
@@ -412,14 +408,8 @@ impl RpcContext {
for (package_id, action_input) in &action_input {
for (action_id, input) in action_input {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_requested_actions_mut().mutate(|requested_actions| {
Ok(update_requested_actions(
requested_actions,
package_id,
action_id,
input,
false,
))
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, package_id, action_id, input, false))
})?;
}
}
@@ -428,7 +418,7 @@ impl RpcContext {
})
.await
.result?;
check_requested_actions.complete();
check_tasks.complete();
Ok(())
}

View File

@@ -27,9 +27,9 @@ pub struct Database {
pub private: Private,
}
impl Database {
pub fn init(account: &AccountInfo) -> Result<Self, Error> {
pub fn init(account: &AccountInfo, kiosk: Option<bool>) -> Result<Self, Error> {
Ok(Self {
public: Public::init(account)?,
public: Public::init(account, kiosk)?,
private: Private {
key_store: KeyStore::new(account)?,
password: account.password.clone(),

View File

@@ -3,10 +3,7 @@ use std::collections::{BTreeMap, BTreeSet};
use chrono::{DateTime, Utc};
use exver::VersionRange;
use imbl_value::InternedString;
use models::{
ActionId, DataUrl, HealthCheckId, HostId, PackageId, ReplayId, ServiceInterfaceId,
VersionString,
};
use models::{ActionId, DataUrl, HealthCheckId, HostId, PackageId, ReplayId, ServiceInterfaceId};
use patch_db::json_ptr::JsonPointer;
use patch_db::HasModel;
use reqwest::Url;
@@ -365,7 +362,7 @@ impl Default for ActionVisibility {
#[ts(export)]
pub struct PackageDataEntry {
pub state_info: PackageState,
pub data_version: Option<VersionString>,
pub data_version: Option<String>,
pub status: MainStatus,
#[ts(type = "string | null")]
pub registry: Option<Url>,
@@ -376,8 +373,8 @@ pub struct PackageDataEntry {
pub last_backup: Option<DateTime<Utc>>,
pub current_dependencies: CurrentDependencies,
pub actions: BTreeMap<ActionId, ActionMetadata>,
#[ts(as = "BTreeMap::<String, ActionRequestEntry>")]
pub requested_actions: BTreeMap<ReplayId, ActionRequestEntry>,
#[ts(as = "BTreeMap::<String, TaskEntry>")]
pub tasks: BTreeMap<ReplayId, TaskEntry>,
pub service_interfaces: BTreeMap<ServiceInterfaceId, ServiceInterface>,
pub hosts: Hosts,
#[ts(type = "string[]")]
@@ -444,8 +441,8 @@ pub enum CurrentDependencyKind {
#[serde(rename_all = "camelCase")]
#[ts(export)]
#[model = "Model<Self>"]
pub struct ActionRequestEntry {
pub request: ActionRequest,
pub struct TaskEntry {
pub task: Task,
pub active: bool,
}
@@ -453,58 +450,59 @@ pub struct ActionRequestEntry {
#[serde(rename_all = "camelCase")]
#[ts(export)]
#[model = "Model<Self>"]
pub struct ActionRequest {
pub struct Task {
pub package_id: PackageId,
pub action_id: ActionId,
#[serde(default)]
pub severity: ActionSeverity,
pub severity: TaskSeverity,
#[ts(optional)]
pub reason: Option<String>,
#[ts(optional)]
pub when: Option<ActionRequestTrigger>,
pub when: Option<TaskTrigger>,
#[ts(optional)]
pub input: Option<ActionRequestInput>,
pub input: Option<TaskInput>,
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[derive(Clone, Debug, Deserialize, Serialize, TS, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "kebab-case")]
#[ts(export)]
pub enum ActionSeverity {
Critical,
pub enum TaskSeverity {
Optional,
Important,
Critical,
}
impl Default for ActionSeverity {
impl Default for TaskSeverity {
fn default() -> Self {
ActionSeverity::Important
TaskSeverity::Important
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct ActionRequestTrigger {
pub struct TaskTrigger {
#[serde(default)]
pub once: bool,
pub condition: ActionRequestCondition,
pub condition: TaskCondition,
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "kebab-case")]
#[ts(export)]
pub enum ActionRequestCondition {
pub enum TaskCondition {
InputNotMatches,
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "kebab-case")]
#[serde(tag = "kind")]
pub enum ActionRequestInput {
pub enum TaskInput {
Partial {
#[ts(type = "Record<string, unknown>")]
value: Value,
},
}
impl ActionRequestInput {
impl TaskInput {
pub fn matches(&self, input: Option<&Value>) -> bool {
match self {
Self::Partial { value } => match input {

View File

@@ -40,7 +40,7 @@ pub struct Public {
pub ui: Value,
}
impl Public {
pub fn init(account: &AccountInfo) -> Result<Self, Error> {
pub fn init(account: &AccountInfo, kiosk: Option<bool>) -> Result<Self, Error> {
Ok(Self {
server_info: ServerInfo {
arch: get_arch(),
@@ -117,6 +117,7 @@ impl Public {
smtp: None,
ram: 0,
devices: Vec::new(),
kiosk,
},
package_data: AllPackageData::default(),
ui: serde_json::from_str(include_str!(concat!(
@@ -175,6 +176,7 @@ pub struct ServerInfo {
#[ts(type = "number")]
pub ram: u64,
pub devices: Vec<LshwDevice>,
pub kiosk: Option<bool>,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]

View File

@@ -37,7 +37,7 @@ use crate::progress::{
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::s9pk::v2::pack::{CONTAINER_DATADIR, CONTAINER_TOOL};
use crate::ssh::SSH_DIR;
use crate::system::get_mem_info;
use crate::system::{get_mem_info, sync_kiosk};
use crate::util::io::{create_file, IOHook};
use crate::util::lshw::lshw;
use crate::util::net::WebSocketExt;
@@ -510,6 +510,7 @@ pub async fn init(
enable_zram.complete();
update_server_info.start();
sync_kiosk(server_info.as_kiosk().de()?).await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
let devices = lshw().await?;
let status_info = ServerStatus {

View File

@@ -161,6 +161,7 @@ pub async fn install(
.install(
ctx.clone(),
|| asset.deserialize_s9pk_buffered(ctx.client.clone(), download_progress),
Some(registry),
None::<Never>,
Some(progress_tracker),
)
@@ -257,6 +258,7 @@ pub async fn sideload(
.install(
ctx.clone(),
|| crate::s9pk::load(file.clone(), || Ok(key.de()?.0), Some(&progress_tracker)),
None,
None::<Never>,
Some(progress_tracker.clone()),
)

View File

@@ -89,6 +89,7 @@ use crate::context::{
use crate::disk::fsck::RequiresReboot;
use crate::net::net;
use crate::registry::context::{RegistryContext, RegistryUrlParams};
use crate::system::kiosk;
use crate::util::serde::{HandlerExtSerde, WithIoFormat};
#[derive(Deserialize, Serialize, Parser, TS)]
@@ -118,7 +119,7 @@ impl std::fmt::Display for ApiState {
}
pub fn main_api<C: Context>() -> ParentHandler<C> {
let api = ParentHandler::new()
let mut api = ParentHandler::new()
.subcommand(
"git-info",
from_fn(|_: C| version::git_info()).with_about("Display the githash of StartOS CLI"),
@@ -198,12 +199,18 @@ pub fn main_api<C: Context>() -> ParentHandler<C> {
"util",
util::rpc::util::<C>().with_about("Command for calculating the blake3 hash of a file"),
);
if &*PLATFORM != "raspberrypi" {
api = api.subcommand("kiosk", kiosk::<C>());
}
#[cfg(feature = "dev")]
let api = api.subcommand(
"lxc",
lxc::dev::lxc::<C>()
.with_about("Commands related to lxc containers i.e. create, list, remove, connect"),
);
{
api = api.subcommand(
"lxc",
lxc::dev::lxc::<C>().with_about(
"Commands related to lxc containers i.e. create, list, remove, connect",
),
);
}
api
}

View File

@@ -222,8 +222,9 @@ impl LanPortForwardController {
}
}
// iptables -I FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT
// iptables -t nat -I PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333
// iptables -t nat -A POSTROUTING -s 10.59.0.0/24 ! -d 10.59.0.0/24 -j SNAT --to $ip
// iptables -I INPUT -p udp --dport $port -j ACCEPT
// iptables -I FORWARD -s 10.59.0.0/24 -j ACCEPT
async fn forward(external: u16, interface: &str, target: SocketAddr) -> Result<(), Error> {
for proto in ["tcp", "udp"] {
Command::new("iptables")

View File

@@ -6,8 +6,7 @@ use models::{ActionId, PackageId, ProcedureName, ReplayId};
use crate::action::{ActionInput, ActionResult};
use crate::db::model::package::{
ActionRequestCondition, ActionRequestEntry, ActionRequestInput, ActionVisibility,
AllowedStatuses,
ActionVisibility, AllowedStatuses, TaskCondition, TaskEntry, TaskInput,
};
use crate::prelude::*;
use crate::rpc_continuations::Guid;
@@ -73,21 +72,21 @@ impl Service {
}
}
pub fn update_requested_actions(
requested_actions: &mut BTreeMap<ReplayId, ActionRequestEntry>,
pub fn update_tasks(
tasks: &mut BTreeMap<ReplayId, TaskEntry>,
package_id: &PackageId,
action_id: &ActionId,
input: &Value,
was_run: bool,
) {
requested_actions.retain(|_, v| {
if &v.request.package_id != package_id || &v.request.action_id != action_id {
tasks.retain(|_, v| {
if &v.task.package_id != package_id || &v.task.action_id != action_id {
return true;
}
if let Some(when) = &v.request.when {
if let Some(when) = &v.task.when {
match &when.condition {
ActionRequestCondition::InputNotMatches => match &v.request.input {
Some(ActionRequestInput::Partial { value }) => {
TaskCondition::InputNotMatches => match &v.task.input {
Some(TaskInput::Partial { value }) => {
if is_partial_of(value, input) {
if when.once {
return !was_run;
@@ -99,10 +98,7 @@ pub fn update_requested_actions(
}
}
None => {
tracing::error!(
"action request exists in an invalid state {:?}",
v.request
);
tracing::error!("action request exists in an invalid state {:?}", v.task);
}
},
}
@@ -180,14 +176,8 @@ impl Handler<RunAction> for ServiceActor {
.db
.mutate(|db| {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_requested_actions_mut().mutate(|requested_actions| {
Ok(update_requested_actions(
requested_actions,
package_id,
action_id,
&input,
true,
))
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, package_id, action_id, &input, true))
})?;
}
Ok(())

View File

@@ -5,7 +5,7 @@ use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use crate::action::{display_action_result, ActionInput, ActionResult};
use crate::db::model::package::{
ActionMetadata, ActionRequest, ActionRequestCondition, ActionRequestEntry, ActionRequestTrigger,
ActionMetadata, Task, TaskCondition, TaskEntry, TaskSeverity, TaskTrigger,
};
use crate::rpc_continuations::Guid;
use crate::service::cli::ContainerCliContext;
@@ -34,10 +34,10 @@ pub fn action_api<C: Context>() -> ParentHandler<C> {
.with_custom_display_fn(|args, res| Ok(display_action_result(args.params, res)))
.with_call_remote::<ContainerCliContext>(),
)
.subcommand("request", from_fn_async(request_action).no_cli())
.subcommand("create-task", from_fn_async(create_task).no_cli())
.subcommand(
"clear-requests",
from_fn_async(clear_action_requests)
"clear-tasks",
from_fn_async(clear_tasks)
.no_display()
.with_call_remote::<ContainerCliContext>(),
)
@@ -196,29 +196,29 @@ async fn run_action(
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct RequestActionParams {
pub struct CreateTaskParams {
#[serde(default)]
#[ts(skip)]
procedure_id: Guid,
replay_id: ReplayId,
#[serde(flatten)]
request: ActionRequest,
task: Task,
}
async fn request_action(
async fn create_task(
context: EffectContext,
RequestActionParams {
CreateTaskParams {
procedure_id,
replay_id,
request,
}: RequestActionParams,
task,
}: CreateTaskParams,
) -> Result<(), Error> {
let context = context.deref()?;
let src_id = &context.seed.id;
let active = match &request.when {
Some(ActionRequestTrigger { once, condition }) => match condition {
ActionRequestCondition::InputNotMatches => {
let Some(input) = request.input.as_ref() else {
let active = match &task.when {
Some(TaskTrigger { once, condition }) => match condition {
TaskCondition::InputNotMatches => {
let Some(input) = task.input.as_ref() else {
return Err(Error::new(
eyre!("input-not-matches trigger requires input to be specified"),
ErrorKind::InvalidRequest,
@@ -228,19 +228,19 @@ async fn request_action(
.seed
.ctx
.services
.get(&request.package_id)
.get(&task.package_id)
.await
.as_ref()
{
let Some(prev) = service
.get_action_input(procedure_id, request.action_id.clone())
.get_action_input(procedure_id.clone(), task.action_id.clone())
.await?
else {
return Err(Error::new(
eyre!(
"action {} of {} has no input",
request.action_id,
request.package_id
task.action_id,
task.package_id
),
ErrorKind::InvalidRequest,
));
@@ -261,6 +261,9 @@ async fn request_action(
},
None => true,
};
if active && task.severity == TaskSeverity::Critical {
context.stop(procedure_id).await?;
}
context
.seed
.ctx
@@ -270,8 +273,8 @@ async fn request_action(
.as_package_data_mut()
.as_idx_mut(src_id)
.or_not_found(src_id)?
.as_requested_actions_mut()
.insert(&replay_id, &ActionRequestEntry { active, request })
.as_tasks_mut()
.insert(&replay_id, &TaskEntry { active, task })
})
.await
.result?;
@@ -281,16 +284,16 @@ async fn request_action(
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[ts(type = "{ only: string[] } | { except: string[] }")]
#[ts(export)]
pub struct ClearActionRequestsParams {
pub struct ClearTasksParams {
#[arg(long, conflicts_with = "except")]
pub only: Option<Vec<ReplayId>>,
#[arg(long, conflicts_with = "only")]
pub except: Option<Vec<ReplayId>>,
}
async fn clear_action_requests(
async fn clear_tasks(
context: EffectContext,
ClearActionRequestsParams { only, except }: ClearActionRequestsParams,
ClearTasksParams { only, except }: ClearTasksParams,
) -> Result<(), Error> {
let context = context.deref()?;
let package_id = context.seed.id.clone();
@@ -305,7 +308,7 @@ async fn clear_action_requests(
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_requested_actions_mut()
.as_tasks_mut()
.mutate(|a| {
Ok(a.retain(|e, _| {
only.as_ref().map_or(true, |only| !only.contains(e))

View File

@@ -10,7 +10,7 @@ use models::{FromStrParser, HealthCheckId, PackageId, ReplayId, VersionString, V
use tokio::process::Command;
use crate::db::model::package::{
ActionRequestEntry, CurrentDependencies, CurrentDependencyInfo, CurrentDependencyKind,
TaskEntry, CurrentDependencies, CurrentDependencyInfo, CurrentDependencyKind,
ManifestPreference,
};
use crate::disk::mount::filesystem::bind::Bind;
@@ -335,7 +335,7 @@ pub struct CheckDependenciesResult {
installed_version: Option<VersionString>,
satisfies: BTreeSet<VersionString>,
is_running: bool,
requested_actions: BTreeMap<ReplayId, ActionRequestEntry>,
tasks: BTreeMap<ReplayId, TaskEntry>,
#[ts(as = "BTreeMap::<HealthCheckId, NamedHealthCheckResult>")]
health_checks: OrdMap<HealthCheckId, NamedHealthCheckResult>,
}
@@ -351,7 +351,7 @@ pub async fn check_dependencies(
.as_idx(&context.seed.id)
.or_not_found(&context.seed.id)?;
let current_dependencies = pde.as_current_dependencies().de()?;
let requested_actions = pde.as_requested_actions().de()?;
let tasks = pde.as_tasks().de()?;
let package_dependency_info: Vec<_> = package_ids
.unwrap_or_else(|| current_dependencies.0.keys().cloned().collect())
.into_iter()
@@ -365,9 +365,9 @@ pub async fn check_dependencies(
for (package_id, dependency_info) in package_dependency_info {
let title = dependency_info.title.clone();
let Some(package) = db.as_public().as_package_data().as_idx(&package_id) else {
let requested_actions = requested_actions
let tasks = tasks
.iter()
.filter(|(_, v)| v.request.package_id == package_id)
.filter(|(_, v)| v.task.package_id == package_id)
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
results.push(CheckDependenciesResult {
@@ -376,7 +376,7 @@ pub async fn check_dependencies(
installed_version: None,
satisfies: BTreeSet::new(),
is_running: false,
requested_actions,
tasks,
health_checks: Default::default(),
});
continue;
@@ -393,9 +393,9 @@ pub async fn check_dependencies(
false
};
let health_checks = status.health().cloned().unwrap_or_default();
let requested_actions = requested_actions
let tasks = tasks
.iter()
.filter(|(_, v)| v.request.package_id == package_id)
.filter(|(_, v)| v.task.package_id == package_id)
.map(|(k, v)| (k.clone(), v.clone()))
.collect();
results.push(CheckDependenciesResult {
@@ -404,7 +404,7 @@ pub async fn check_dependencies(
installed_version,
satisfies,
is_running,
requested_actions,
tasks,
health_checks,
});
}

View File

@@ -1,5 +1,3 @@
use models::VersionString;
use crate::service::effects::prelude::*;
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
@@ -7,7 +5,7 @@ use crate::service::effects::prelude::*;
#[ts(export)]
pub struct SetDataVersionParams {
#[ts(type = "string")]
version: VersionString,
version: Option<String>,
}
pub async fn set_data_version(
context: EffectContext,
@@ -25,7 +23,7 @@ pub async fn set_data_version(
.as_idx_mut(package_id)
.or_not_found(package_id)?
.as_data_version_mut()
.ser(&Some(version))
.ser(&version)
})
.await
.result?;
@@ -33,7 +31,7 @@ pub async fn set_data_version(
Ok(())
}
pub async fn get_data_version(context: EffectContext) -> Result<Option<VersionString>, Error> {
pub async fn get_data_version(context: EffectContext) -> Result<Option<String>, Error> {
let context = context.deref()?;
let package_id = &context.seed.id;
context

View File

@@ -17,7 +17,7 @@ use futures::{FutureExt, SinkExt, StreamExt, TryStreamExt};
use helpers::NonDetachingJoinHandle;
use imbl_value::{json, InternedString};
use itertools::Itertools;
use models::{ActionId, HostId, ImageId, PackageId, ProcedureName};
use models::{ActionId, HostId, ImageId, PackageId};
use nix::sys::signal::Signal;
use persistent_container::{PersistentContainer, Subcontainer};
use rpc_toolkit::{from_fn_async, CallRemoteHandler, Empty, HandlerArgs, HandlerFor};
@@ -30,27 +30,31 @@ use tokio::process::Command;
use tokio::sync::Notify;
use tokio_tungstenite::tungstenite::protocol::frame::coding::CloseCode;
use ts_rs::TS;
use url::Url;
use crate::context::{CliContext, RpcContext};
use crate::db::model::package::{
InstalledState, PackageDataEntry, PackageState, PackageStateMatchModelRef, UpdatingState,
InstalledState, ManifestPreference, PackageDataEntry, PackageState, PackageStateMatchModelRef,
UpdatingState,
};
use crate::disk::mount::guard::GenericMountGuard;
use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::guard::{GenericMountGuard, MountGuard};
use crate::install::PKG_ARCHIVE_DIR;
use crate::lxc::ContainerId;
use crate::prelude::*;
use crate::progress::{NamedProgress, Progress};
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::s9pk::S9pk;
use crate::service::action::update_requested_actions;
use crate::service::action::update_tasks;
use crate::service::rpc::{ExitParams, InitKind};
use crate::service::service_map::InstallProgressHandles;
use crate::util::actor::concurrent::ConcurrentActor;
use crate::util::io::{create_file, AsyncReadStream, TermSize};
use crate::util::net::WebSocketExt;
use crate::util::serde::{NoOutput, Pem};
use crate::util::serde::Pem;
use crate::util::Never;
use crate::volume::data_dir;
use crate::{CAP_1_KiB, DATA_DIR, PACKAGE_DATA};
use crate::{CAP_1_KiB, DATA_DIR};
pub mod action;
pub mod cli;
@@ -62,6 +66,7 @@ mod service_actor;
pub mod service_map;
pub mod start_stop;
mod transition;
pub mod uninstall;
mod util;
pub use service_map::ServiceMap;
@@ -116,99 +121,35 @@ impl ServiceRef {
pub fn weak(&self) -> Weak<Service> {
Arc::downgrade(&self.0)
}
pub async fn uninstall(
self,
target_version: Option<models::VersionString>,
soft: bool,
force: bool,
) -> Result<(), Error> {
let uninit_res = self
.seed
.persistent_container
.execute::<NoOutput>(
Guid::new(),
ProcedureName::PackageUninit,
to_value(&target_version)?,
None,
) // TODO timeout
.await;
pub async fn uninstall(self, uninit: ExitParams, soft: bool, force: bool) -> Result<(), Error> {
let id = self.seed.persistent_container.s9pk.as_manifest().id.clone();
let ctx = self.seed.ctx.clone();
let uninit_res = self.shutdown(Some(uninit.clone())).await;
if force {
uninit_res.log_err();
} else {
uninit_res?;
}
let id = self.seed.persistent_container.s9pk.as_manifest().id.clone();
let ctx = self.seed.ctx.clone();
self.shutdown().await?;
if target_version.is_none() {
if let Some(pde) = ctx
.db
.mutate(|d| {
if let Some(pde) = d
.as_public_mut()
.as_package_data_mut()
.remove(&id)?
.map(|d| d.de())
.transpose()?
{
d.as_private_mut().as_available_ports_mut().mutate(|p| {
p.free(
pde.hosts
.0
.values()
.flat_map(|h| h.bindings.values())
.flat_map(|b| {
b.net
.assigned_port
.into_iter()
.chain(b.net.assigned_ssl_port)
}),
);
Ok(())
})?;
d.as_private_mut().as_package_stores_mut().remove(&id)?;
Ok(Some(pde))
} else {
Ok(None)
}
})
.await
.result?
{
let state = pde.state_info.expect_removing()?;
if !soft {
for volume_id in &state.manifest.volumes {
let path = data_dir(DATA_DIR, &state.manifest.id, volume_id);
if tokio::fs::metadata(&path).await.is_ok() {
tokio::fs::remove_dir_all(&path).await?;
}
}
let logs_dir = Path::new(PACKAGE_DATA)
.join("logs")
.join(&state.manifest.id);
if tokio::fs::metadata(&logs_dir).await.is_ok() {
tokio::fs::remove_dir_all(&logs_dir).await?;
}
let archive_path = Path::new(PACKAGE_DATA)
.join("archive")
.join("installed")
.join(&state.manifest.id);
if tokio::fs::metadata(&archive_path).await.is_ok() {
tokio::fs::remove_file(&archive_path).await?;
}
}
}
if uninit.is_uninstall() {
uninstall::cleanup(&ctx, &id, soft).await?;
}
Ok(())
}
pub async fn shutdown(self) -> Result<(), Error> {
pub async fn shutdown(self, uninit: Option<ExitParams>) -> Result<(), Error> {
if let Some((hdl, shutdown)) = self.seed.persistent_container.rpc_server.send_replace(None)
{
self.seed
.persistent_container
.rpc_client
.request(rpc::Exit, Empty {})
.request(
rpc::Exit,
uninit.clone().unwrap_or_else(|| {
ExitParams::target_version(
&*self.seed.persistent_container.s9pk.as_manifest().version,
)
}),
)
.await?;
shutdown.shutdown();
tokio::time::timeout(Duration::from_secs(30), hdl)
@@ -234,11 +175,12 @@ impl ServiceRef {
)
})?
.persistent_container
.exit()
.exit(uninit)
.await?;
Ok(())
}
}
impl Deref for ServiceRef {
type Target = Service;
fn deref(&self) -> &Self::Target {
@@ -257,7 +199,14 @@ pub struct Service {
}
impl Service {
#[instrument(skip_all)]
async fn new(ctx: RpcContext, s9pk: S9pk, start: StartStop) -> Result<ServiceRef, Error> {
async fn new(
ctx: RpcContext,
s9pk: S9pk,
start: StartStop,
procedure_id: Guid,
init_kind: Option<InitKind>,
recovery_source: Option<impl GenericMountGuard>,
) -> Result<ServiceRef, Error> {
let id = s9pk.as_manifest().id.clone();
let persistent_container = PersistentContainer::new(
&ctx, s9pk,
@@ -277,11 +226,28 @@ impl Service {
seed,
}
.into();
let recovery_guard = if let Some(recovery_source) = &recovery_source {
Some(
service
.seed
.persistent_container
.mount_backup(recovery_source.path().join("data"), ReadOnly)
.await?,
)
} else {
None
};
service
.seed
.persistent_container
.init(service.weak())
.init(service.weak(), procedure_id, init_kind)
.await?;
if let Some(recovery_guard) = recovery_guard {
recovery_guard.unmount(true).await?;
}
if let Some(recovery_source) = recovery_source {
recovery_source.unmount().await?;
}
Ok(service)
}
@@ -305,7 +271,9 @@ impl Service {
} else {
StartStop::Stop
};
Self::new(ctx, s9pk, start_stop).await.map(Some)
Self::new(ctx, s9pk, start_stop, Guid::new(), None, None::<MountGuard>)
.await
.map(Some)
}
};
let s9pk_dir = Path::new(DATA_DIR).join(PKG_ARCHIVE_DIR).join("installed"); // TODO: make this based on hash
@@ -328,7 +296,7 @@ impl Service {
tracing::debug!("{e:?}")
}) {
if let Ok(service) =
Self::install(ctx.clone(), s9pk, None, None::<Never>, None)
Self::install(ctx.clone(), s9pk, &None, None, None::<Never>, None)
.await
.map_err(|e| {
tracing::error!("Error installing service: {e}");
@@ -365,7 +333,8 @@ impl Service {
if let Ok(service) = Self::install(
ctx.clone(),
s9pk,
Some(s.as_manifest().as_version().de()?),
&None,
Some(entry.as_status().de()?.run_state()),
None::<Never>,
None,
)
@@ -407,27 +376,66 @@ impl Service {
tracing::error!("Error opening s9pk for removal: {e}");
tracing::debug!("{e:?}")
}) {
if let Ok(service) = Self::new(ctx.clone(), s9pk, StartStop::Stop)
.await
.map_err(|e| {
tracing::error!("Error loading service for removal: {e}");
tracing::debug!("{e:?}")
})
let err_state = |e: Error| async move {
let state = crate::status::MainStatus::Error {
on_rebuild: StartStop::Stop,
message: e.to_string(),
debug: Some(format!("{e:?}")),
};
ctx.db
.mutate(move |db| {
if let Some(pde) =
db.as_public_mut().as_package_data_mut().as_idx_mut(&id)
{
pde.as_state_info_mut().map_mutate(|s| {
Ok(PackageState::Installed(InstalledState {
manifest: s
.as_manifest(ManifestPreference::Old)
.clone(),
}))
})?;
pde.as_status_mut().ser(&state)?;
}
Ok(())
})
.await
.result
};
match Self::new(
ctx.clone(),
s9pk,
StartStop::Stop,
Guid::new(),
None,
None::<MountGuard>,
)
.await
{
match service.uninstall(None, false, false).await {
Ok(service) => match service
.uninstall(ExitParams::uninstall(), false, false)
.await
{
Err(e) => {
tracing::error!("Error uninstalling service: {e}");
tracing::debug!("{e:?}")
tracing::debug!("{e:?}");
err_state(e).await?;
}
Ok(()) => return Ok(None),
},
Err(e) => {
tracing::error!("Error loading service for removal: {e}");
tracing::debug!("{e:?}");
err_state(e).await?;
}
}
}
ctx.db
.mutate(|v| v.as_public_mut().as_package_data_mut().remove(id))
.await
.result?;
if disposition == LoadDisposition::Retry {
ctx.db
.mutate(|v| v.as_public_mut().as_package_data_mut().remove(id))
.await
.result?;
}
Ok(None)
}
@@ -445,53 +453,30 @@ impl Service {
pub async fn install(
ctx: RpcContext,
s9pk: S9pk,
mut src_version: Option<models::VersionString>,
registry: &Option<Url>,
prev_state: Option<StartStop>,
recovery_source: Option<impl GenericMountGuard>,
progress: Option<InstallProgressHandles>,
) -> Result<ServiceRef, Error> {
let manifest = s9pk.as_manifest().clone();
let developer_key = s9pk.as_archive().signer();
let icon = s9pk.icon_data_url().await?;
let service = Self::new(ctx.clone(), s9pk, StartStop::Stop).await?;
if let Some(recovery_source) = recovery_source {
service
.actor
.send(
Guid::new(),
transition::restore::Restore {
path: recovery_source.path().to_path_buf(),
},
)
.await??;
recovery_source.unmount().await?;
src_version = Some(
service
.seed
.persistent_container
.s9pk
.as_manifest()
.version
.clone(),
);
}
let procedure_id = Guid::new();
service
.seed
.persistent_container
.execute::<NoOutput>(
procedure_id.clone(),
ProcedureName::PackageInit,
to_value(&src_version)?,
None,
) // TODO timeout
.await
.with_kind(if src_version.is_some() {
ErrorKind::UpdateFailed
let service = Self::new(
ctx.clone(),
s9pk,
StartStop::Stop,
procedure_id.clone(),
Some(if recovery_source.is_some() {
InitKind::Restore
} else if prev_state.is_some() {
InitKind::Update
} else {
ErrorKind::InstallFailed
})?; // TODO: handle cancellation
InitKind::Install
}),
recovery_source,
)
.await?;
if let Some(mut progress) = progress {
progress.finalization_progress.complete();
@@ -501,19 +486,19 @@ impl Service {
let peek = ctx.db.peek().await;
let mut action_input: BTreeMap<ActionId, Value> = BTreeMap::new();
let requested_actions: BTreeSet<_> = peek
let tasks: BTreeSet<_> = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| {
Ok(pde
.as_requested_actions()
.as_tasks()
.as_entries()?
.into_iter()
.map(|(_, r)| {
Ok::<_, Error>(if r.as_request().as_package_id().de()? == manifest.id {
Some(r.as_request().as_action_id().de()?)
Ok::<_, Error>(if r.as_task().as_package_id().de()? == manifest.id {
Some(r.as_task().as_action_id().de()?)
} else {
None
})
@@ -523,27 +508,30 @@ impl Service {
.flatten_ok()
.map(|a| a.and_then(|a| a))
.try_collect()?;
for action_id in requested_actions {
if let Some(input) = service
.get_action_input(procedure_id.clone(), action_id.clone())
.await?
.and_then(|i| i.value)
for action_id in tasks {
if peek
.as_public()
.as_package_data()
.as_idx(&manifest.id)
.or_not_found(&manifest.id)?
.as_actions()
.contains_key(&action_id)?
{
action_input.insert(action_id, input);
if let Some(input) = service
.get_action_input(procedure_id.clone(), action_id.clone())
.await?
.and_then(|i| i.value)
{
action_input.insert(action_id, input);
}
}
}
ctx.db
.mutate(|db| {
for (action_id, input) in &action_input {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_requested_actions_mut().mutate(|requested_actions| {
Ok(update_requested_actions(
requested_actions,
&manifest.id,
action_id,
input,
false,
))
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, &manifest.id, action_id, input, false))
})?;
}
}
@@ -552,13 +540,18 @@ impl Service {
.as_package_data_mut()
.as_idx_mut(&manifest.id)
.or_not_found(&manifest.id)?;
let actions = entry.as_actions().keys()?;
entry.as_tasks_mut().mutate(|t| {
Ok(t.retain(|_, v| {
v.task.package_id != manifest.id || actions.contains(&v.task.action_id)
}))
})?;
entry
.as_state_info_mut()
.ser(&PackageState::Installed(InstalledState { manifest }))?;
entry.as_developer_key_mut().ser(&Pem::new(developer_key))?;
entry.as_icon_mut().ser(&icon)?;
// TODO: marketplace url
// TODO: dependency info
entry.as_registry_mut().ser(registry)?;
Ok(())
})
@@ -583,7 +576,7 @@ impl Service {
.send(
Guid::new(),
transition::backup::Backup {
path: guard.path().to_path_buf(),
path: guard.path().join("data"),
},
)
.await??

View File

@@ -31,7 +31,9 @@ use crate::s9pk::merkle_archive::source::FileSource;
use crate::s9pk::S9pk;
use crate::service::effects::context::EffectContext;
use crate::service::effects::handler;
use crate::service::rpc::{CallbackHandle, CallbackId, CallbackParams};
use crate::service::rpc::{
CallbackHandle, CallbackId, CallbackParams, ExitParams, InitKind, InitParams,
};
use crate::service::start_stop::StartStop;
use crate::service::transition::{TransitionKind, TransitionState};
use crate::service::{rpc, RunningStatus, Service};
@@ -369,7 +371,12 @@ impl PersistentContainer {
}
#[instrument(skip_all)]
pub async fn init(&self, seed: Weak<Service>) -> Result<(), Error> {
pub async fn init(
&self,
seed: Weak<Service>,
procedure_id: Guid,
kind: Option<InitKind>,
) -> Result<(), Error> {
let socket_server_context = EffectContext::new(seed);
let server = Server::new(move || ready(Ok(socket_server_context.clone())), handler());
let path = self
@@ -424,7 +431,15 @@ impl PersistentContainer {
));
}
self.rpc_client.request(rpc::Init, Empty {}).await?;
self.rpc_client
.request(
rpc::Init,
InitParams {
id: procedure_id,
kind,
},
)
.await?;
self.state.send_modify(|s| s.rt_initialized = true);
@@ -435,10 +450,12 @@ impl PersistentContainer {
fn destroy(
&mut self,
error: bool,
uninit: Option<ExitParams>,
) -> Option<impl Future<Output = Result<(), Error>> + 'static> {
if self.destroyed {
return None;
}
let version = self.s9pk.as_manifest().version.clone();
let rpc_client = self.rpc_client.clone();
let rpc_server = self.rpc_server.send_replace(None);
let js_mount = self.js_mount.take();
@@ -469,7 +486,14 @@ impl PersistentContainer {
}
}
if let Some((hdl, shutdown)) = rpc_server {
errs.handle(rpc_client.request(rpc::Exit, Empty {}).await);
errs.handle(
rpc_client
.request(
rpc::Exit,
uninit.unwrap_or_else(|| ExitParams::target_version(&*version)),
)
.await,
);
shutdown.shutdown();
errs.handle(hdl.await.with_kind(ErrorKind::Cancelled));
}
@@ -494,8 +518,8 @@ impl PersistentContainer {
}
#[instrument(skip_all)]
pub async fn exit(mut self) -> Result<(), Error> {
if let Some(destroy) = self.destroy(false) {
pub async fn exit(mut self, uninit: Option<ExitParams>) -> Result<(), Error> {
if let Some(destroy) = self.destroy(false, uninit) {
destroy.await?;
}
tracing::info!("Service for {} exited", self.s9pk.as_manifest().id);
@@ -613,7 +637,7 @@ impl PersistentContainer {
impl Drop for PersistentContainer {
fn drop(&mut self) {
if let Some(destroy) = self.destroy(true) {
if let Some(destroy) = self.destroy(true, None) {
tokio::spawn(async move { destroy.await.log_err() });
}
}

View File

@@ -4,8 +4,9 @@ use std::sync::{Arc, Weak};
use std::time::Duration;
use clap::builder::ValueParserFactory;
use exver::{ExtendedVersion, VersionRange};
use imbl::Vector;
use imbl_value::Value;
use imbl_value::{InternedString, Value};
use models::{FromStrParser, ProcedureName};
use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::Empty;
@@ -16,10 +17,25 @@ use crate::rpc_continuations::Guid;
use crate::service::persistent_container::PersistentContainer;
use crate::util::Never;
#[derive(Clone, serde::Deserialize, serde::Serialize, TS)]
#[serde(rename_all = "kebab-case")]
pub enum InitKind {
Install,
Update,
Restore,
}
#[derive(Clone, serde::Deserialize, serde::Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct InitParams {
pub id: Guid,
pub kind: Option<InitKind>,
}
#[derive(Clone)]
pub struct Init;
impl RpcMethod for Init {
type Params = Empty;
type Params = InitParams;
type Response = ();
fn as_str<'a>(&'a self) -> &'a str {
"init"
@@ -70,10 +86,42 @@ impl serde::Serialize for Stop {
}
}
#[derive(Clone, serde::Deserialize, serde::Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct ExitParams {
id: Guid,
/// VersionRange or ExtendedVersion
#[ts(type = "string | null")]
target: Option<InternedString>,
}
impl ExitParams {
pub fn target_version(version: &ExtendedVersion) -> Self {
Self {
id: Guid::new(),
target: Some(InternedString::from_display(version)),
}
}
pub fn target_range(range: &VersionRange) -> Self {
Self {
id: Guid::new(),
target: Some(InternedString::from_display(range)),
}
}
pub fn uninstall() -> Self {
Self {
id: Guid::new(),
target: None,
}
}
pub fn is_uninstall(&self) -> bool {
self.target.is_none()
}
}
#[derive(Clone)]
pub struct Exit;
impl RpcMethod for Exit {
type Params = Empty;
type Params = ExitParams;
type Response = ();
fn as_str<'a>(&'a self) -> &'a str {
"exit"

View File

@@ -58,10 +58,6 @@ async fn service_actor_loop(
transition_state: Some(TransitionKind::Restarting),
..
} => MainStatus::Restarting,
ServiceStateKinds {
transition_state: Some(TransitionKind::Restoring),
..
} => MainStatus::Restoring,
ServiceStateKinds {
transition_state: Some(TransitionKind::BackingUp),
..

View File

@@ -3,15 +3,16 @@ use std::sync::Arc;
use std::time::Duration;
use color_eyre::eyre::eyre;
use exver::VersionRange;
use futures::future::{BoxFuture, Fuse};
use futures::stream::FuturesUnordered;
use futures::{Future, FutureExt, StreamExt, TryFutureExt};
use helpers::NonDetachingJoinHandle;
use imbl::OrdMap;
use imbl_value::InternedString;
use models::ErrorData;
use tokio::sync::{oneshot, Mutex, OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
use tracing::instrument;
use url::Url;
use crate::context::RpcContext;
use crate::db::model::package::{
@@ -22,9 +23,11 @@ use crate::install::PKG_ARCHIVE_DIR;
use crate::notifications::{notify, NotificationLevel};
use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle, ProgressTrackerWriter};
use crate::rpc_continuations::Guid;
use crate::s9pk::manifest::PackageId;
use crate::s9pk::merkle_archive::source::FileSource;
use crate::s9pk::S9pk;
use crate::service::rpc::ExitParams;
use crate::service::start_stop::StartStop;
use crate::service::{LoadDisposition, Service, ServiceRef};
use crate::status::MainStatus;
@@ -94,7 +97,7 @@ impl ServiceMap {
let mut shutdown_err = Ok(());
let mut service = self.get_mut(id).await;
if let Some(service) = service.take() {
shutdown_err = service.shutdown().await;
shutdown_err = service.shutdown(None).await;
}
match Service::load(ctx, id, disposition).await {
Ok(s) => *service = s.into(),
@@ -130,6 +133,7 @@ impl ServiceMap {
&self,
ctx: RpcContext,
s9pk: F,
registry: Option<Url>,
recovery_source: Option<impl GenericMountGuard>,
progress: Option<FullProgressTracker>,
) -> Result<DownloadInstallFuture, Error>
@@ -181,6 +185,7 @@ impl ServiceMap {
let manifest = manifest.clone();
let id = id.clone();
let install_progress = progress.snapshot();
let registry = registry.clone();
move |db| {
if let Some(pde) =
db.as_public_mut().as_package_data_mut().as_idx_mut(&id)
@@ -212,13 +217,13 @@ impl ServiceMap {
},
data_version: None,
status: MainStatus::Stopped,
registry: None,
registry,
developer_key: Pem::new(developer_key),
icon,
last_backup: None,
current_dependencies: Default::default(),
actions: Default::default(),
requested_actions: Default::default(),
tasks: Default::default(),
service_interfaces: Default::default(),
hosts: Default::default(),
store_exposed_dependents: Default::default(),
@@ -287,35 +292,59 @@ impl ServiceMap {
ErrorKind::InvalidRequest,
"cannot restore over existing package"
);
let version = service
let prev_version = service
.seed
.persistent_container
.s9pk
.as_manifest()
.version
.clone();
service
.uninstall(Some(s9pk.as_manifest().version.clone()), false, false)
.await?;
let prev_can_migrate_to = &service
.seed
.persistent_container
.s9pk
.as_manifest()
.can_migrate_to;
let next_version = &s9pk.as_manifest().version;
let next_can_migrate_from = &s9pk.as_manifest().can_migrate_from;
let uninit = if prev_version.satisfies(next_can_migrate_from) {
ExitParams::target_version(&*prev_version)
} else if next_version.satisfies(prev_can_migrate_to) {
ExitParams::target_version(&s9pk.as_manifest().version)
} else {
ExitParams::target_range(&VersionRange::and(
prev_can_migrate_to.clone(),
next_can_migrate_from.clone(),
))
};
let run_state = service
.seed
.persistent_container
.state
.borrow()
.desired_state;
service.uninstall(uninit, false, false).await?;
progress.complete();
Some(version)
Some(run_state)
} else {
None
};
*service = Some(
Service::install(
ctx,
s9pk,
prev,
recovery_source,
Some(InstallProgressHandles {
finalization_progress,
progress,
}),
)
.await?
.into(),
);
let new_service = Service::install(
ctx,
s9pk,
&registry,
prev,
recovery_source,
Some(InstallProgressHandles {
finalization_progress,
progress,
}),
)
.await?;
if prev == Some(StartStop::Start) {
new_service.start(Guid::new()).await?;
}
*service = Some(new_service.into());
drop(service);
sync_progress_task.await.map_err(|_| {
@@ -359,14 +388,23 @@ impl ServiceMap {
ServiceRefReloadCancelGuard::new(ctx.clone(), id.clone(), "Uninstall", None)
.handle_last(async move {
if let Some(service) = guard.take() {
let res = service.uninstall(None, soft, force).await;
let res = service
.uninstall(ExitParams::uninstall(), soft, force)
.await;
drop(guard);
res
} else {
Err(Error::new(
eyre!("service {id} failed to initialize - cannot remove gracefully"),
ErrorKind::Uninitialized,
))
if force {
super::uninstall::cleanup(&ctx, &id, soft).await?;
Ok(())
} else {
Err(Error::new(
eyre!(
"service {id} failed to initialize - cannot remove gracefully"
),
ErrorKind::Uninitialized,
))
}
}
})
.await?;
@@ -382,7 +420,7 @@ impl ServiceMap {
for service in lock.values().cloned() {
futs.push(async move {
if let Some(service) = service.write_owned().await.take() {
service.shutdown().await?
service.shutdown(None).await?
}
Ok::<_, Error>(())
});

View File

@@ -10,13 +10,11 @@ use crate::util::future::{CancellationHandle, RemoteCancellable};
pub mod backup;
pub mod restart;
pub mod restore;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum TransitionKind {
BackingUp,
Restarting,
Restoring,
}
/// Used only in the manager/mod and is used to keep track of the state of the manager during the

View File

@@ -1,76 +0,0 @@
use std::path::PathBuf;
use futures::channel::oneshot;
use futures::FutureExt;
use models::ProcedureName;
use crate::disk::mount::filesystem::ReadOnly;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::service::transition::{TransitionKind, TransitionState};
use crate::service::ServiceActor;
use crate::util::actor::background::BackgroundJobQueue;
use crate::util::actor::{ConflictBuilder, Handler};
use crate::util::future::RemoteCancellable;
use crate::util::serde::NoOutput;
pub(in crate::service) struct Restore {
pub path: PathBuf,
}
impl Handler<Restore> for ServiceActor {
type Response = Result<(), Error>;
fn conflicts_with(_: &Restore) -> ConflictBuilder<Self> {
ConflictBuilder::everything()
}
async fn handle(
&mut self,
id: Guid,
restore: Restore,
jobs: &BackgroundJobQueue,
) -> Self::Response {
// So Need a handle to just a single field in the state
let path = restore.path;
let seed = self.0.clone();
let state = self.0.persistent_container.state.clone();
let (send_res, recv_res) = oneshot::channel();
let transition = RemoteCancellable::new(
async move {
let backup_guard = seed
.persistent_container
.mount_backup(path, ReadOnly)
.await?;
seed.persistent_container
.execute::<NoOutput>(id, ProcedureName::RestoreBackup, Value::Null, None)
.await?;
backup_guard.unmount(true).await?;
state.send_modify(|s| {
s.transition_state.take();
});
Ok::<_, Error>(())
}
.map(|res| send_res.send(res)),
);
let cancel_handle = transition.cancellation_handle();
jobs.add_job(transition.map(|_| ()));
let mut old = None;
self.0.persistent_container.state.send_modify(|s| {
old = std::mem::replace(
&mut s.transition_state,
Some(TransitionState {
kind: TransitionKind::Restoring,
cancel_handle,
}),
)
});
if let Some(t) = old {
t.abort().await;
}
match recv_res.await {
Err(_) => Err(Error::new(eyre!("Restoring canceled"), ErrorKind::Unknown)),
Ok(res) => res,
}
}
}

View File

@@ -0,0 +1,70 @@
use std::path::Path;
use models::PackageId;
use crate::context::RpcContext;
use crate::prelude::*;
use crate::volume::data_dir;
use crate::{DATA_DIR, PACKAGE_DATA};
pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(), Error> {
Ok(
if let Some(pde) = ctx
.db
.mutate(|d| {
if let Some(pde) = d
.as_public_mut()
.as_package_data_mut()
.remove(&id)?
.map(|d| d.de())
.transpose()?
{
d.as_private_mut().as_available_ports_mut().mutate(|p| {
p.free(
pde.hosts
.0
.values()
.flat_map(|h| h.bindings.values())
.flat_map(|b| {
b.net
.assigned_port
.into_iter()
.chain(b.net.assigned_ssl_port)
}),
);
Ok(())
})?;
d.as_private_mut().as_package_stores_mut().remove(&id)?;
Ok(Some(pde))
} else {
Ok(None)
}
})
.await
.result?
{
let state = pde.state_info.expect_removing()?;
if !soft {
for volume_id in &state.manifest.volumes {
let path = data_dir(DATA_DIR, &state.manifest.id, volume_id);
if tokio::fs::metadata(&path).await.is_ok() {
tokio::fs::remove_dir_all(&path).await?;
}
}
let logs_dir = Path::new(PACKAGE_DATA)
.join("logs")
.join(&state.manifest.id);
if tokio::fs::metadata(&logs_dir).await.is_ok() {
tokio::fs::remove_dir_all(&logs_dir).await?;
}
let archive_path = Path::new(PACKAGE_DATA)
.join("archive")
.join("installed")
.join(&state.manifest.id);
if tokio::fs::metadata(&archive_path).await.is_ok() {
tokio::fs::remove_file(&archive_path).await?;
}
}
},
)
}

View File

@@ -8,7 +8,7 @@ use const_format::formatcp;
use josekit::jwk::Jwk;
use patch_db::json_ptr::ROOT;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::io::AsyncWriteExt;
use tokio::process::Command;
@@ -32,15 +32,15 @@ use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::disk::util::{pvscan, recovery_info, DiskInfo, StartOsRecoveryInfo};
use crate::disk::REPAIR_DISK_PATH;
use crate::init::{init, InitPhases, InitResult};
use crate::net::net_controller::NetController;
use crate::net::ssl::root_ca_start_time;
use crate::prelude::*;
use crate::progress::{FullProgress, PhaseProgressTrackerHandle};
use crate::rpc_continuations::Guid;
use crate::system::sync_kiosk;
use crate::util::crypto::EncryptedWire;
use crate::util::io::{create_file, dir_copy, dir_size, Counter};
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, DATA_DIR, MAIN_DATA, PACKAGE_DATA};
use crate::{Error, ErrorKind, ResultExt, DATA_DIR, MAIN_DATA, PACKAGE_DATA, PLATFORM};
pub fn setup<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
@@ -62,6 +62,11 @@ pub fn setup<C: Context>() -> ParentHandler<C> {
.no_cli(),
)
.subcommand("exit", from_fn_async(exit).no_cli())
.subcommand("logs", crate::system::logs::<SetupContext>())
.subcommand(
"logs",
from_fn_async(crate::logs::cli_logs::<SetupContext, Empty>).no_display(),
)
}
pub fn disk<C: Context>() -> ParentHandler<C> {
@@ -80,6 +85,7 @@ pub async fn list_disks(ctx: SetupContext) -> Result<Vec<DiskInfo>, Error> {
async fn setup_init(
ctx: &SetupContext,
password: Option<String>,
kiosk: Option<bool>,
init_phases: InitPhases,
) -> Result<(AccountInfo, InitResult), Error> {
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
@@ -93,15 +99,19 @@ async fn setup_init(
account.set_password(password)?;
}
account.save(m)?;
m.as_public_mut()
.as_server_info_mut()
.as_password_hash_mut()
.ser(&account.password)?;
let info = m.as_public_mut().as_server_info_mut();
info.as_password_hash_mut().ser(&account.password)?;
if let Some(kiosk) = kiosk {
info.as_kiosk_mut().ser(&Some(kiosk))?;
}
Ok(account)
})
.await
.result?;
sync_kiosk(kiosk).await?;
if let Some(password) = &password {
write_shadow(&password).await?;
}
@@ -116,6 +126,8 @@ pub struct AttachParams {
#[serde(rename = "startOsPassword")]
password: Option<EncryptedWire>,
guid: Arc<String>,
#[ts(optional)]
kiosk: Option<bool>,
}
pub async fn attach(
@@ -123,10 +135,11 @@ pub async fn attach(
AttachParams {
password,
guid: disk_guid,
kiosk,
}: AttachParams,
) -> Result<SetupProgress, Error> {
let setup_ctx = ctx.clone();
ctx.run_setup(|| async move {
ctx.run_setup(move || async move {
let progress = &setup_ctx.progress;
let mut disk_phase = progress.add_phase("Opening data drive".into(), Some(10));
let init_phases = InitPhases::new(&progress);
@@ -173,7 +186,7 @@ pub async fn attach(
}
disk_phase.complete();
let (account, net_ctrl) = setup_init(&setup_ctx, password, init_phases).await?;
let (account, net_ctrl) = setup_init(&setup_ctx, password, kiosk, init_phases).await?;
let rpc_ctx = RpcContext::init(&setup_ctx.webserver, &setup_ctx.config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
@@ -293,6 +306,8 @@ pub struct SetupExecuteParams {
start_os_logicalname: PathBuf,
start_os_password: EncryptedWire,
recovery_source: Option<RecoverySource<EncryptedWire>>,
#[ts(optional)]
kiosk: Option<bool>,
}
// #[command(rpc_only)]
@@ -302,6 +317,7 @@ pub async fn execute(
start_os_logicalname,
start_os_password,
recovery_source,
kiosk,
}: SetupExecuteParams,
) -> Result<SetupProgress, Error> {
let start_os_password = match start_os_password.decrypt(&ctx) {
@@ -333,7 +349,15 @@ pub async fn execute(
};
let setup_ctx = ctx.clone();
ctx.run_setup(|| execute_inner(setup_ctx, start_os_logicalname, start_os_password, recovery))?;
ctx.run_setup(move || {
execute_inner(
setup_ctx,
start_os_logicalname,
start_os_password,
recovery,
kiosk,
)
})?;
Ok(ctx.progress().await)
}
@@ -376,6 +400,7 @@ pub async fn execute_inner(
start_os_logicalname: PathBuf,
start_os_password: String,
recovery_source: Option<RecoverySource<String>>,
kiosk: Option<bool>,
) -> Result<(SetupResult, RpcContext), Error> {
let progress = &ctx.progress;
let mut disk_phase = progress.add_phase("Formatting data drive".into(), Some(10));
@@ -429,14 +454,15 @@ pub async fn execute_inner(
target,
server_id,
password,
kiosk,
progress,
)
.await
}
Some(RecoverySource::Migrate { guid: old_guid }) => {
migrate(&ctx, guid, &old_guid, start_os_password, progress).await
migrate(&ctx, guid, &old_guid, start_os_password, kiosk, progress).await
}
None => fresh_setup(&ctx, guid, &start_os_password, progress).await,
None => fresh_setup(&ctx, guid, &start_os_password, kiosk, progress).await,
}
}
@@ -450,6 +476,7 @@ async fn fresh_setup(
ctx: &SetupContext,
guid: Arc<String>,
start_os_password: &str,
kiosk: Option<bool>,
SetupExecuteProgress {
init_phases,
rpc_ctx_phases,
@@ -458,7 +485,9 @@ async fn fresh_setup(
) -> Result<(SetupResult, RpcContext), Error> {
let account = AccountInfo::new(start_os_password, root_ca_start_time().await?)?;
let db = ctx.db().await?;
db.put(&ROOT, &Database::init(&account)?).await?;
let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi");
sync_kiosk(kiosk).await?;
db.put(&ROOT, &Database::init(&account, kiosk)?).await?;
drop(db);
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
@@ -485,6 +514,7 @@ async fn recover(
recovery_source: BackupTargetFS,
server_id: String,
recovery_password: String,
kiosk: Option<bool>,
progress: SetupExecuteProgress,
) -> Result<(SetupResult, RpcContext), Error> {
let recovery_source = TmpMountGuard::mount(&recovery_source, ReadWrite).await?;
@@ -495,6 +525,7 @@ async fn recover(
recovery_source,
&server_id,
&recovery_password,
kiosk,
progress,
)
.await
@@ -506,6 +537,7 @@ async fn migrate(
guid: Arc<String>,
old_guid: &str,
start_os_password: String,
kiosk: Option<bool>,
SetupExecuteProgress {
init_phases,
restore_phase,
@@ -583,7 +615,7 @@ async fn migrate(
crate::disk::main::export(&old_guid, "/media/startos/migrate").await?;
restore_phase.complete();
let (account, net_ctrl) = setup_init(&ctx, Some(start_os_password), init_phases).await?;
let (account, net_ctrl) = setup_init(&ctx, Some(start_os_password), kiosk, init_phases).await?;
let rpc_ctx = RpcContext::init(
&ctx.webserver,

View File

@@ -24,7 +24,6 @@ pub enum MainStatus {
},
Stopped,
Restarting,
Restoring,
Stopping,
Starting {
#[ts(as = "BTreeMap<HealthCheckId, NamedHealthCheckResult>")]
@@ -54,7 +53,6 @@ impl MainStatus {
..
} => true,
MainStatus::Stopped
| MainStatus::Restoring
| MainStatus::Stopping { .. }
| MainStatus::BackingUp {
on_complete: StartStop::Stop,
@@ -65,6 +63,13 @@ impl MainStatus {
} => false,
}
}
pub fn run_state(&self) -> StartStop {
if self.running() {
StartStop::Start
} else {
StartStop::Stop
}
}
pub fn major_changes(&self, other: &Self) -> bool {
match (self, other) {
@@ -73,7 +78,6 @@ impl MainStatus {
(MainStatus::Stopping, MainStatus::Stopping) => false,
(MainStatus::Stopped, MainStatus::Stopped) => false,
(MainStatus::Restarting, MainStatus::Restarting) => false,
(MainStatus::Restoring, MainStatus::Restoring) => false,
(MainStatus::BackingUp { .. }, MainStatus::BackingUp { .. }) => false,
(MainStatus::Error { .. }, MainStatus::Error { .. }) => false,
_ => true,
@@ -95,7 +99,6 @@ impl MainStatus {
MainStatus::Running { health, .. } | MainStatus::Starting { health } => Some(health),
MainStatus::BackingUp { .. }
| MainStatus::Stopped
| MainStatus::Restoring
| MainStatus::Stopping { .. }
| MainStatus::Restarting
| MainStatus::Error { .. } => None,

View File

@@ -248,6 +248,75 @@ pub fn kernel_logs<C: Context + AsRef<RpcContinuations>>() -> ParentHandler<C, L
crate::logs::logs(|_: &C, _| async { Ok(LogSource::Kernel) })
}
const DISABLE_KIOSK_PATH: &str =
"/media/startos/config/overlay/etc/systemd/system/getty@tty1.service.d/autologin.conf";
pub async fn sync_kiosk(kiosk: Option<bool>) -> Result<(), Error> {
if let Some(kiosk) = kiosk {
if kiosk {
enable_kiosk().await?;
} else {
disable_kiosk().await?;
}
}
Ok(())
}
pub async fn enable_kiosk() -> Result<(), Error> {
if tokio::fs::metadata(DISABLE_KIOSK_PATH).await.is_ok() {
crate::util::io::delete_file(DISABLE_KIOSK_PATH).await?;
}
Ok(())
}
pub async fn disable_kiosk() -> Result<(), Error> {
crate::util::io::create_file(DISABLE_KIOSK_PATH)
.await?
.sync_all()
.await?;
Ok(())
}
pub fn kiosk<C: Context>() -> ParentHandler<C> {
ParentHandler::<C>::new()
.subcommand(
"enable",
from_fn_async(|ctx: RpcContext| async move {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_kiosk_mut()
.ser(&Some(true))
})
.await
.result?;
enable_kiosk().await
})
.no_display()
.with_about("Enable kiosk mode")
.with_call_remote::<CliContext>(),
)
.subcommand(
"disable",
from_fn_async(|ctx: RpcContext| async move {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_kiosk_mut()
.ser(&Some(false))
})
.await
.result?;
disable_kiosk().await
})
.no_display()
.with_about("Disable kiosk mode")
.with_call_remote::<CliContext>(),
)
}
#[derive(Serialize, Deserialize)]
pub struct MetricLeaf<T> {
value: T,

View File

@@ -17,7 +17,7 @@ v0.4.0 is a complete rewrite of StartOS, almost nothing survived. After nearly s
## Changelog
- [Improve user interface](#user-interface)
- [Add transaltions](#translations)
- [Add translations](#translations)
- [Switch to lxc-based container runtime](#lxc)
- [Update s9pk archive format](#s9pk-archive-format)
- [Improve Actions](#actions)

View File

@@ -355,6 +355,7 @@ impl VersionT for Version {
.install(
ctx.clone(),
|| crate::s9pk::load(file.clone(), || Ok(key.de()?.0), None),
None,
None::<crate::util::Never>,
None,
)

View File

@@ -27,7 +27,7 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {

View File

@@ -27,7 +27,7 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {

View File

@@ -2,7 +2,9 @@ use exver::{PreReleaseSegment, VersionRange};
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_4_0_alpha_3, VersionT};
use crate::context::RpcContext;
use crate::prelude::*;
use crate::util::io::create_file_mod;
lazy_static::lazy_static! {
static ref V0_4_0_alpha_4: exver::Version = exver::Version::new(
@@ -27,7 +29,76 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
#[instrument]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
db["public"]["serverInfo"]
.as_object_mut()
.or_not_found("public.serverInfo")?
.insert("kiosk".into(), Value::Bool(false));
for (_, pde) in db["public"]["packageData"]
.as_object_mut()
.into_iter()
.flat_map(|m| m.iter_mut())
{
let Some(pde) = pde.as_object_mut() else {
continue;
};
let Some(mut tasks) = pde.remove("requestedActions").and_then(|ar| {
if let Value::Object(ar) = ar {
Some(ar)
} else {
None
}
}) else {
continue;
};
for (_, task_entry) in tasks.iter_mut() {
let Some(task_entry) = task_entry.as_object_mut() else {
continue;
};
let Some(task) = task_entry.remove("request") else {
continue;
};
task_entry.insert("task".into(), task);
}
pde.insert("tasks".into(), Value::Object(tasks));
}
Ok(())
}
async fn post_up(self, _ctx: &RpcContext) -> Result<(), Error> {
use tokio::io::AsyncWriteExt;
if tokio::fs::metadata("/media/startos/config/overlay/etc/shadow")
.await
.is_ok()
{
let mut hash = None;
let shadow_contents = tokio::fs::read_to_string("/etc/shadow").await?;
let mut shadow_file =
create_file_mod("/media/startos/config/overlay/etc/shadow", 0o640).await?;
for line in shadow_contents.lines() {
match line.split_once(":") {
Some((user, rest)) if user == "start9" || user == "kiosk" => {
let (h, rest) = rest.split_once(":").ok_or_else(|| {
Error::new(eyre!("malformed /etc/shadow"), ErrorKind::ParseSysInfo)
})?;
if user == "start9" {
hash = Some(h.to_owned());
}
let h = hash.as_deref().unwrap_or(h);
shadow_file
.write_all(format!("{user}:{h}:{rest}\n").as_bytes())
.await?;
}
_ => {
shadow_file.write_all(line.as_bytes()).await?;
shadow_file.write_all(b"\n").await?;
}
}
}
shadow_file.sync_all().await?;
tokio::fs::copy("/media/startos/config/overlay/etc/shadow", "/etc/shadow").await?;
}
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {