update/alpha.9 (#2988)

* import marketplac preview for sideload

* fix: improve state service (#2977)

* fix: fix sideload DI

* fix: update Angular

* fix: cleanup

* fix: fix version selection

* Bump node version to fix build for Angular

* misc fixes
- update node to v22
- fix chroot-and-upgrade access to prune-images
- don't self-migrate legacy packages
- #2985
- move dataVersion to volume folder
- remove "instructions.md" from s9pk
- add "docsUrl" to manifest

* version bump

* include flavor when clicking view listing from updates tab

* closes #2980

* fix: fix select button

* bring back ssh keys

* fix: drop 'portal' from all routes

* fix: implement longtap action to select table rows

* fix description for ssh page

* replace instructions with docsLink and refactor marketplace preview

* delete unused translations

* fix patchdb diffing algorithm

* continue refactor of marketplace lib show components

* Booting StartOS instead of Setting up your server on init

* misc fixes
- closes #2990
- closes #2987

* fix build

* docsUrl and clickable service headers

* don't cleanup after update until new service install succeeds

* update types

* misc fixes

* beta.35

* sdkversion, githash for sideload, correct logs for init, startos pubkey display

* bring back reboot button on install

* misc fixes

* beta.36

* better handling of setup and init for websocket errors

* reopen init and setup logs even on graceful closure

* better logging, misc fixes

* fix build

* dont let package stats hang

* dont show docsurl in marketplace if no docsurl

* re-add needs-config

* show error if init fails, shorten hover state on header icons

* fix operator precedemce

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Alex Inkin <alexander@inkin.ru>
Co-authored-by: Mariusz Kogen <k0gen@pm.me>
This commit is contained in:
Aiden McClelland
2025-07-18 18:31:12 +00:00
committed by GitHub
parent ba2906a42e
commit 377b7b12ce
237 changed files with 5953 additions and 4777 deletions

663
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -9,6 +9,14 @@ use yasi::InternedString;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, TS)]
#[ts(type = "string")]
pub struct ReplayId(InternedString);
impl<T> From<T> for ReplayId
where
T: Into<InternedString>,
{
fn from(value: T) -> Self {
Self(value.into())
}
}
impl FromStr for ReplayId {
type Err = Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {

View File

@@ -14,7 +14,7 @@ keywords = [
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.8" # VERSION_BUMP
version = "0.4.0-alpha.9" # VERSION_BUMP
license = "MIT"
[lib]
@@ -117,7 +117,7 @@ id-pool = { version = "0.2.2", default-features = false, features = [
"u16",
] }
imbl = "4.0.1"
imbl-value = "0.2.0"
imbl-value = "0.3.2"
include_dir = { version = "0.7.3", features = ["metadata"] }
indexmap = { version = "2.0.2", features = ["serde"] }
indicatif = { version = "0.17.7", features = ["tokio"] }

View File

@@ -6,11 +6,8 @@ use clap::Parser;
use reqwest::Url;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use crate::disk::OsPartitionInfo;
use crate::init::init_postgres;
use crate::prelude::*;
use crate::util::serde::IoFormat;
use crate::version::VersionT;
@@ -151,16 +148,4 @@ impl ServerConfig {
Ok(db)
}
#[instrument(skip_all)]
pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres("/media/startos/data").await?;
let secret_store =
PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root"))
.await?;
sqlx::migrate!()
.run(&secret_store)
.await
.with_kind(crate::ErrorKind::Database)?;
Ok(secret_store)
}
}

View File

@@ -1,7 +1,9 @@
use std::collections::{BTreeMap, BTreeSet};
use std::ffi::OsStr;
use std::future::Future;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
@@ -28,6 +30,7 @@ use crate::db::model::package::TaskSeverity;
use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::{check_time_is_synchronized, InitResult};
use crate::install::PKG_ARCHIVE_DIR;
use crate::lxc::{ContainerId, LxcContainer, LxcManager};
use crate::net::net_controller::{NetController, NetService};
use crate::net::utils::{find_eth_iface, find_wifi_iface};
@@ -40,8 +43,10 @@ use crate::service::action::update_tasks;
use crate::service::effects::callbacks::ServiceCallbacks;
use crate::service::ServiceMap;
use crate::shutdown::Shutdown;
use crate::util::io::delete_file;
use crate::util::lshw::LshwDevice;
use crate::util::sync::{SyncMutex, Watch};
use crate::DATA_DIR;
pub struct RpcContextSeed {
is_closed: AtomicBool,
@@ -103,6 +108,7 @@ impl InitRpcContextPhases {
pub struct CleanupInitPhases {
cleanup_sessions: PhaseProgressTrackerHandle,
init_services: PhaseProgressTrackerHandle,
prune_s9pks: PhaseProgressTrackerHandle,
check_tasks: PhaseProgressTrackerHandle,
}
impl CleanupInitPhases {
@@ -110,6 +116,7 @@ impl CleanupInitPhases {
Self {
cleanup_sessions: handle.add_phase("Cleaning up sessions".into(), Some(1)),
init_services: handle.add_phase("Initializing services".into(), Some(10)),
prune_s9pks: handle.add_phase("Pruning S9PKs".into(), Some(1)),
check_tasks: handle.add_phase("Checking action requests".into(), Some(1)),
}
}
@@ -307,7 +314,8 @@ impl RpcContext {
&self,
CleanupInitPhases {
mut cleanup_sessions,
init_services,
mut init_services,
mut prune_s9pks,
mut check_tasks,
}: CleanupInitPhases,
) -> Result<(), Error> {
@@ -366,12 +374,38 @@ impl RpcContext {
});
cleanup_sessions.complete();
self.services.init(&self, init_services).await?;
tracing::info!("Initialized Services");
init_services.start();
self.services.init(&self).await?;
init_services.complete();
// TODO
check_tasks.start();
prune_s9pks.start();
let peek = self.db.peek().await;
let keep = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| pde.as_s9pk().de())
.collect::<Result<BTreeSet<PathBuf>, Error>>()?;
let installed_dir = &Path::new(DATA_DIR).join(PKG_ARCHIVE_DIR).join("installed");
if tokio::fs::metadata(&installed_dir).await.is_ok() {
let mut dir = tokio::fs::read_dir(&installed_dir)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("dir {installed_dir:?}")))?;
while let Some(file) = dir
.next_entry()
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("dir {installed_dir:?}")))?
{
let path = file.path();
if path.extension() == Some(OsStr::new("s9pk")) && !keep.contains(&path) {
delete_file(path).await?;
}
}
}
prune_s9pks.complete();
check_tasks.start();
let mut action_input: OrdMap<PackageId, BTreeMap<ActionId, Value>> = OrdMap::new();
let tasks: BTreeSet<_> = peek
.as_public()

View File

@@ -1,4 +1,5 @@
use std::collections::{BTreeMap, BTreeSet};
use std::path::PathBuf;
use chrono::{DateTime, Utc};
use exver::VersionRange;
@@ -287,6 +288,7 @@ pub struct InstallingState {
#[ts(export)]
pub struct UpdatingState {
pub manifest: Manifest,
pub s9pk: PathBuf,
pub installing_info: InstallingInfo,
}
@@ -362,7 +364,7 @@ impl Default for ActionVisibility {
#[ts(export)]
pub struct PackageDataEntry {
pub state_info: PackageState,
pub data_version: Option<String>,
pub s9pk: PathBuf,
pub status: MainStatus,
#[ts(type = "string | null")]
pub registry: Option<Url>,

View File

@@ -50,7 +50,7 @@ impl Public {
hostname: account.hostname.no_dot_host_name(),
last_backup: None,
package_version_compat: Current::default().compat().clone(),
post_init_migration_todos: BTreeSet::new(),
post_init_migration_todos: BTreeMap::new(),
network: NetworkInfo {
host: Host {
bindings: [(
@@ -155,8 +155,8 @@ pub struct ServerInfo {
pub version: Version,
#[ts(type = "string")]
pub package_version_compat: VersionRange,
#[ts(type = "string[]")]
pub post_init_migration_todos: BTreeSet<Version>,
#[ts(type = "Record<string, unknown>")]
pub post_init_migration_todos: BTreeMap<Version, Value>,
#[ts(type = "string | null")]
pub last_backup: Option<DateTime<Utc>>,
pub network: NetworkInfo,

View File

@@ -60,145 +60,6 @@ pub async fn check_time_is_synchronized() -> Result<bool, Error> {
== "NTPSynchronized=yes")
}
// must be idempotent
#[tracing::instrument(skip_all)]
pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
let db_dir = datadir.as_ref().join("main/postgresql");
if tokio::process::Command::new("mountpoint")
.arg("/var/lib/postgresql")
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.await?
.success()
{
unmount("/var/lib/postgresql", true).await?;
}
let exists = tokio::fs::metadata(&db_dir).await.is_ok();
if !exists {
Command::new("cp")
.arg("-ra")
.arg("/var/lib/postgresql")
.arg(&db_dir)
.invoke(crate::ErrorKind::Filesystem)
.await?;
}
Command::new("chown")
.arg("-R")
.arg("postgres:postgres")
.arg(&db_dir)
.invoke(crate::ErrorKind::Database)
.await?;
let mut pg_paths = tokio::fs::read_dir("/usr/lib/postgresql").await?;
let mut pg_version = None;
while let Some(pg_path) = pg_paths.next_entry().await? {
let pg_path_version = pg_path
.file_name()
.to_str()
.map(|v| v.parse())
.transpose()?
.unwrap_or(0);
if pg_path_version > pg_version.unwrap_or(0) {
pg_version = Some(pg_path_version)
}
}
let pg_version = pg_version.ok_or_else(|| {
Error::new(
eyre!("could not determine postgresql version"),
crate::ErrorKind::Database,
)
})?;
crate::disk::mount::util::bind(&db_dir, "/var/lib/postgresql", false).await?;
let pg_version_string = pg_version.to_string();
let pg_version_path = db_dir.join(&pg_version_string);
if exists
// maybe migrate
{
let incomplete_path = db_dir.join(format!("{pg_version}.migration.incomplete"));
if tokio::fs::metadata(&incomplete_path).await.is_ok() // previous migration was incomplete
&& tokio::fs::metadata(&pg_version_path).await.is_ok()
{
tokio::fs::remove_dir_all(&pg_version_path).await?;
}
if tokio::fs::metadata(&pg_version_path).await.is_err()
// need to migrate
{
let conf_dir = Path::new("/etc/postgresql").join(pg_version.to_string());
let conf_dir_tmp = {
let mut tmp = conf_dir.clone();
tmp.set_extension("tmp");
tmp
};
if tokio::fs::metadata(&conf_dir).await.is_ok() {
Command::new("mv")
.arg(&conf_dir)
.arg(&conf_dir_tmp)
.invoke(ErrorKind::Filesystem)
.await?;
}
let mut old_version = pg_version;
while old_version > 13
/* oldest pg version included in startos */
{
old_version -= 1;
let old_datadir = db_dir.join(old_version.to_string());
if tokio::fs::metadata(&old_datadir).await.is_ok() {
create_file(&incomplete_path).await?.sync_all().await?;
Command::new("pg_upgradecluster")
.arg(old_version.to_string())
.arg("main")
.invoke(crate::ErrorKind::Database)
.await?;
break;
}
}
if tokio::fs::metadata(&conf_dir).await.is_ok() {
if tokio::fs::metadata(&conf_dir).await.is_ok() {
tokio::fs::remove_dir_all(&conf_dir).await?;
}
Command::new("mv")
.arg(&conf_dir_tmp)
.arg(&conf_dir)
.invoke(ErrorKind::Filesystem)
.await?;
}
tokio::fs::remove_file(&incomplete_path).await?;
}
if tokio::fs::metadata(&incomplete_path).await.is_ok() {
unreachable!() // paranoia
}
}
Command::new("systemctl")
.arg("start")
.arg(format!("postgresql@{pg_version}-main.service"))
.invoke(crate::ErrorKind::Database)
.await?;
if !exists {
Command::new("sudo")
.arg("-u")
.arg("postgres")
.arg("createuser")
.arg("root")
.invoke(crate::ErrorKind::Database)
.await?;
Command::new("sudo")
.arg("-u")
.arg("postgres")
.arg("createdb")
.arg("secrets")
.arg("-O")
.arg("root")
.invoke(crate::ErrorKind::Database)
.await?;
}
Ok(())
}
pub struct InitResult {
pub net_ctrl: Arc<NetController>,
pub os_net_service: NetService,
@@ -336,7 +197,6 @@ pub async fn init(
let db = TypedPatchDb::<Database>::load_unchecked(db);
let peek = db.peek().await;
load_database.complete();
tracing::info!("Opened PatchDB");
load_ssh_keys.start();
crate::ssh::sync_keys(
@@ -347,7 +207,6 @@ pub async fn init(
)
.await?;
load_ssh_keys.complete();
tracing::info!("Synced SSH Keys");
let account = AccountInfo::load(&peek)?;
@@ -398,13 +257,16 @@ pub async fn init(
.arg("systemd-journald")
.invoke(crate::ErrorKind::Journald)
.await?;
Command::new("killall")
.arg("journalctl")
.invoke(crate::ErrorKind::Journald)
.await?;
mount_logs.complete();
tokio::io::copy(
&mut open_file("/run/startos/init.log").await?,
&mut tokio::io::stderr(),
)
.await?;
tracing::info!("Mounted Logs");
load_ca_cert.start();
// write to ca cert store
@@ -434,7 +296,6 @@ pub async fn init(
.result?;
crate::net::wifi::synchronize_network_manager(MAIN_DATA, &wifi).await?;
load_wifi.complete();
tracing::info!("Synchronized WiFi");
init_tmp.start();
let tmp_dir = Path::new(PACKAGE_DATA).join("tmp");
@@ -476,7 +337,6 @@ pub async fn init(
if let Some(governor) = governor {
tracing::info!("Setting CPU Governor to \"{governor}\"");
cpupower::set_governor(governor).await?;
tracing::info!("Set CPU Governor");
}
set_governor.complete();
@@ -504,8 +364,6 @@ pub async fn init(
}
if !ntp_synced {
tracing::warn!("Timed out waiting for system time to synchronize");
} else {
tracing::info!("Syncronized system clock");
}
sync_clock.complete();
@@ -537,7 +395,6 @@ pub async fn init(
})
.await
.result?;
tracing::info!("Updated server info");
update_server_info.complete();
launch_service_network.start();
@@ -546,7 +403,6 @@ pub async fn init(
.arg("lxc-net.service")
.invoke(ErrorKind::Lxc)
.await?;
tracing::info!("Launched service intranet");
launch_service_network.complete();
validate_db.start();
@@ -556,7 +412,6 @@ pub async fn init(
})
.await
.result?;
tracing::info!("Validated database");
validate_db.complete();
if let Some(progress) = postinit {

View File

@@ -134,7 +134,7 @@ pub async fn install(
"package.get",
json!({
"id": id,
"version": VersionRange::exactly(version.deref().clone()),
"targetVersion": VersionRange::exactly(version.deref().clone()),
}),
RegistryUrlParams {
registry: registry.clone(),
@@ -476,7 +476,7 @@ pub async fn cli_install(
let mut packages: GetPackageResponse = from_value(
ctx.call_remote::<RegistryContext>(
"package.get",
json!({ "id": &id, "version": version, "sourceVersion": source_version }),
json!({ "id": &id, "targetVersion": version, "sourceVersion": source_version }),
)
.await?,
)?;

View File

@@ -1,18 +0,0 @@
use std::collections::BTreeMap;
use models::PackageId;
use rpc_toolkit::command;
use tracing::instrument;
use crate::config::not_found;
use crate::context::RpcContext;
use crate::db::model::CurrentDependents;
use crate::prelude::*;
use crate::util::serde::display_serializable;
use crate::util::Version;
use crate::Error;
#[command(subcommands(dry))]
pub async fn update() -> Result<(), Error> {
Ok(())
}

View File

@@ -584,8 +584,10 @@ pub async fn stats(ctx: RpcContext) -> Result<BTreeMap<PackageId, Option<Service
let mut stats = BTreeMap::new();
for id in ids {
let service: tokio::sync::OwnedRwLockReadGuard<Option<crate::service::ServiceRef>> =
ctx.services.get(&id).await;
let Some(service) = ctx.services.try_get(&id) else {
stats.insert(id, None);
continue;
};
let Some(service_ref) = service.as_ref() else {
stats.insert(id, None);

View File

@@ -410,6 +410,7 @@ async fn watcher(
if let Err(e) = res {
tracing::error!("{e}");
tracing::debug!("{e:?}");
tokio::time::sleep(Duration::from_secs(1)).await;
}
}
}
@@ -420,7 +421,7 @@ async fn get_wan_ipv4(iface: &str) -> Result<Option<Ipv4Addr>, Error> {
let client = client.interface(iface);
Ok(client
.build()?
.get("http://ip4only.me/api/")
.get("https://ip4only.me/api/")
.timeout(Duration::from_secs(10))
.send()
.await?

View File

@@ -22,6 +22,7 @@ use http::request::Parts as RequestParts;
use http::{HeaderValue, Method, StatusCode};
use imbl_value::InternedString;
use include_dir::Dir;
use models::PackageId;
use new_mime_guess::MimeGuess;
use openssl::hash::MessageDigest;
use openssl::x509::X509;
@@ -32,7 +33,6 @@ use url::Url;
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::hostname::Hostname;
use crate::install::PKG_ARCHIVE_DIR;
use crate::middleware::auth::{Auth, HasValidSession};
use crate::middleware::cors::Cors;
use crate::middleware::db::SyncDb;
@@ -46,7 +46,7 @@ use crate::s9pk::S9pk;
use crate::util::io::open_file;
use crate::util::net::SyncBody;
use crate::util::serde::BASE64;
use crate::{diagnostic_api, init_api, install_api, main_api, setup_api, DATA_DIR};
use crate::{diagnostic_api, init_api, install_api, main_api, setup_api};
const NOT_FOUND: &[u8] = b"Not Found";
const METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed";
@@ -263,13 +263,22 @@ fn s9pk_router(ctx: RpcContext) -> Router {
any(
|x::Path(s9pk): x::Path<String>, request: Request| async move {
if_authorized(&ctx, request, |request| async {
let id = s9pk
.strip_suffix(".s9pk")
.unwrap_or(&s9pk)
.parse::<PackageId>()?;
let (parts, _) = request.into_parts();
match FileData::from_path(
&parts,
&Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("installed")
.join(s9pk),
&ctx.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&id)
.or_not_found(&id)?
.into_s9pk()
.de()?,
)
.await?
{
@@ -289,13 +298,22 @@ fn s9pk_router(ctx: RpcContext) -> Router {
x::RawQuery(query): x::RawQuery,
request: Request| async move {
if_authorized(&ctx, request, |request| async {
let id = s9pk
.strip_suffix(".s9pk")
.unwrap_or(&s9pk)
.parse::<PackageId>()?;
let s9pk = S9pk::deserialize(
&MultiCursorFile::from(
open_file(
Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("installed")
.join(s9pk),
ctx.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&id)
.or_not_found(&id)?
.into_s9pk()
.de()?,
)
.await?,
),

View File

@@ -24,7 +24,7 @@ use crate::net::static_server::{
};
use crate::prelude::*;
use crate::util::actor::background::BackgroundJobQueue;
use crate::util::sync::Watch;
use crate::util::sync::{SyncMutex, Watch};
pub struct Accepted {
pub https_redirect: bool,
@@ -166,18 +166,20 @@ impl<A: Accept + Send + Sync + 'static> WebServer<A> {
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
#[derive(Clone)]
struct QueueRunner {
queue: Arc<RwLock<Option<BackgroundJobQueue>>>,
queue: Arc<SyncMutex<Option<BackgroundJobQueue>>>,
}
impl<Fut> hyper::rt::Executor<Fut> for QueueRunner
where
Fut: Future + Send + 'static,
{
fn execute(&self, fut: Fut) {
if let Some(q) = &*self.queue.read().unwrap() {
q.add_job(fut);
} else {
tracing::warn!("job queued after shutdown");
}
self.queue.peek(|q| {
if let Some(q) = q {
q.add_job(fut);
} else {
tracing::warn!("job queued after shutdown");
}
})
}
}
@@ -209,8 +211,7 @@ impl<A: Accept + Send + Sync + 'static> WebServer<A> {
}
}
let accept = AtomicBool::new(true);
let queue_cell = Arc::new(RwLock::new(None));
let queue_cell = Arc::new(SyncMutex::new(None));
let graceful = hyper_util::server::graceful::GracefulShutdown::new();
let mut server = hyper_util::server::conn::auto::Builder::new(QueueRunner {
queue: queue_cell.clone(),
@@ -226,7 +227,7 @@ impl<A: Accept + Send + Sync + 'static> WebServer<A> {
.keep_alive_interval(Duration::from_secs(60))
.keep_alive_timeout(Duration::from_secs(300));
let (queue, mut runner) = BackgroundJobQueue::new();
*queue_cell.write().unwrap() = Some(queue.clone());
queue_cell.mutate(|q| *q = Some(queue.clone()));
let handler = async {
loop {
@@ -260,9 +261,8 @@ impl<A: Accept + Send + Sync + 'static> WebServer<A> {
_ = &mut runner => (),
}
accept.store(false, std::sync::atomic::Ordering::SeqCst);
drop(queue);
drop(queue_cell.write().unwrap().take());
drop(queue_cell.mutate(|q| q.take()));
if !runner.is_empty() {
tokio::time::timeout(Duration::from_secs(60), runner)

View File

@@ -5,6 +5,7 @@ use futures::future::pending;
use futures::stream::BoxStream;
use futures::{Future, FutureExt, StreamExt, TryFutureExt};
use helpers::NonDetachingJoinHandle;
use imbl::Vector;
use imbl_value::{InOMap, InternedString};
use indicatif::{MultiProgress, ProgressBar, ProgressStyle};
use itertools::Itertools;
@@ -219,14 +220,22 @@ impl FullProgress {
#[derive(Clone)]
pub struct FullProgressTracker {
log: bool,
overall: watch::Sender<Progress>,
phases: watch::Sender<InOMap<InternedString, watch::Receiver<Progress>>>,
phases: watch::Sender<Vector<(InternedString, watch::Receiver<Progress>)>>,
}
impl FullProgressTracker {
pub fn new() -> Self {
let (overall, _) = watch::channel(Progress::new());
let (phases, _) = watch::channel(InOMap::new());
Self { overall, phases }
let (phases, _) = watch::channel(Vector::new());
Self {
log: false,
overall,
phases,
}
}
pub fn enable_logging(&mut self, log: bool) {
self.log = log;
}
pub fn snapshot(&self) -> FullProgress {
FullProgress {
@@ -245,8 +254,8 @@ impl FullProgressTracker {
pub fn stream(&self, min_interval: Option<Duration>) -> BoxStream<'static, FullProgress> {
struct StreamState {
overall: watch::Receiver<Progress>,
phases_recv: watch::Receiver<InOMap<InternedString, watch::Receiver<Progress>>>,
phases: InOMap<InternedString, watch::Receiver<Progress>>,
phases_recv: watch::Receiver<Vector<(InternedString, watch::Receiver<Progress>)>>,
phases: Vector<(InternedString, watch::Receiver<Progress>)>,
}
let mut overall = self.overall.subscribe();
overall.mark_changed(); // make sure stream starts with a value
@@ -280,11 +289,14 @@ impl FullProgressTracker {
futures::future::select_all(changed).await;
}
for (name, phase) in &*phases_recv.borrow_and_update() {
if !phases.contains_key(name) {
phases.insert(name.clone(), phase.clone());
let phases_ref = phases_recv.borrow_and_update();
for (idx, (name, recv)) in phases_ref.iter().enumerate() {
if phases.get(idx).map(|(n, _)| n) != Some(name) {
phases.insert(idx, (name.clone(), recv.clone()));
}
}
phases.truncate(phases_ref.len());
drop(phases_ref);
let o = *overall.borrow_and_update();
@@ -363,10 +375,12 @@ impl FullProgressTracker {
.send_modify(|o| o.add_total(overall_contribution));
}
let (send, recv) = watch::channel(Progress::new());
let log = self.log.then(|| name.clone());
self.phases.send_modify(|p| {
p.insert(name, recv);
p.push_back((name, recv));
});
PhaseProgressTrackerHandle {
log,
overall: self.overall.clone(),
overall_contribution,
contributed: 0,
@@ -379,6 +393,7 @@ impl FullProgressTracker {
}
pub struct PhaseProgressTrackerHandle {
log: Option<InternedString>,
overall: watch::Sender<Progress>,
overall_contribution: Option<u64>,
contributed: u64,
@@ -404,6 +419,9 @@ impl PhaseProgressTrackerHandle {
}
}
pub fn start(&mut self) {
if let Some(name) = &self.log {
tracing::info!("{}...", name)
}
self.progress.send_modify(|p| p.start());
}
pub fn set_done(&mut self, done: u64) {
@@ -424,6 +442,9 @@ impl PhaseProgressTrackerHandle {
pub fn complete(&mut self) {
self.progress.send_modify(|p| p.set_complete());
self.update_overall();
if let Some(name) = &self.log {
tracing::info!("{}: complete", name)
}
}
pub fn writer<W>(self, writer: W) -> ProgressTrackerWriter<W> {
ProgressTrackerWriter {

View File

@@ -181,24 +181,20 @@ impl CallRemote<RegistryContext> for CliContext {
};
let body = serde_json::to_vec(&rpc_req)?;
let host = url.host().or_not_found("registry hostname")?.to_string();
let res = self
let mut req = self
.client
.request(Method::POST, url)
.header(CONTENT_TYPE, "application/json")
.header(ACCEPT, "application/json")
.header(CONTENT_LENGTH, body.len())
.header(
.header(CONTENT_LENGTH, body.len());
if let Ok(key) = self.developer_key() {
req = req.header(
AUTH_SIG_HEADER,
SignatureHeader::sign(
&AnySigningKey::Ed25519(self.developer_key()?.clone()),
&body,
&host,
)?
.to_header(),
)
.body(body)
.send()
.await?;
SignatureHeader::sign(&AnySigningKey::Ed25519(key.clone()), &body, &host)?
.to_header(),
);
}
let res = req.body(body).send().await?;
if !res.status().is_success() {
let status = res.status();

View File

@@ -84,10 +84,14 @@ pub struct PackageVersionInfo {
pub marketing_site: Url,
#[ts(type = "string | null")]
pub donation_url: Option<Url>,
#[ts(type = "string | null")]
pub docs_url: Option<Url>,
pub alerts: Alerts,
pub dependency_metadata: BTreeMap<PackageId, DependencyMetadata>,
#[ts(type = "string")]
pub os_version: Version,
#[ts(type = "string | null")]
pub sdk_version: Option<Version>,
pub hardware_requirements: HardwareRequirements,
#[ts(type = "string | null")]
pub source_version: Option<VersionRange>,
@@ -121,9 +125,11 @@ impl PackageVersionInfo {
support_site: manifest.support_site.clone(),
marketing_site: manifest.marketing_site.clone(),
donation_url: manifest.donation_url.clone(),
docs_url: manifest.docs_url.clone(),
alerts: manifest.alerts.clone(),
dependency_metadata,
os_version: manifest.os_version.clone(),
sdk_version: manifest.sdk_version.clone(),
hardware_requirements: manifest.hardware_requirements.clone(),
source_version: None, // TODO
s9pk: RegistryAsset {

View File

@@ -21,6 +21,9 @@ impl GitHash {
.invoke(ErrorKind::Git)
.await?,
)?;
while hash.ends_with(|c: char| c.is_whitespace()) {
hash.pop();
}
if Command::new("git")
.arg("diff-index")
.arg("--quiet")
@@ -44,6 +47,9 @@ impl GitHash {
.stdout,
)
.ok()?;
while hash.ends_with(|c: char| c.is_whitespace()) {
hash.pop();
}
if !std::process::Command::new("git")
.arg("diff-index")
.arg("--quiet")

View File

@@ -310,6 +310,18 @@ pub struct Section<S> {
position: u64,
size: u64,
}
impl<S> Section<S> {
pub fn source(&self) -> &S {
&self.source
}
pub fn null(source: S) -> Self {
Self {
source,
position: 0,
size: 0,
}
}
}
impl<S: ArchiveSource> FileSource for Section<S> {
type Reader = S::FetchReader;
type SliceReader = S::FetchReader;

View File

@@ -39,7 +39,7 @@ pub struct MultiCursorFile {
file: Arc<Mutex<File>>,
}
impl MultiCursorFile {
fn path(&self) -> Result<PathBuf, Error> {
pub fn path(&self) -> Result<PathBuf, Error> {
path_from_fd(self.fd)
}
pub async fn open(fd: &impl AsRawFd) -> Result<Self, Error> {

View File

@@ -1,6 +1,6 @@
use std::collections::BTreeSet;
use std::io::SeekFrom;
use std::path::Path;
use std::path::{Path, PathBuf};
use color_eyre::eyre::eyre;
use futures::{FutureExt, TryStreamExt};
@@ -8,7 +8,7 @@ use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt};
use tokio_tar::{Archive, Entry};
use crate::util::io::from_cbor_async_reader;
use crate::util::io::{from_cbor_async_reader, from_json_async_reader};
use crate::{Error, ErrorKind};
#[derive(Default, Deserialize, Serialize)]
@@ -26,7 +26,7 @@ pub enum DockerReader<R: AsyncRead + Unpin> {
}
impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> DockerReader<R> {
pub async fn list_arches(rdr: &mut R) -> Result<BTreeSet<String>, Error> {
if let Some(multiarch) = tokio_tar::Archive::new(rdr)
if let Some(multiarch) = tokio_tar::Archive::new(&mut *rdr)
.entries()?
.try_filter_map(|e| {
async move {
@@ -42,21 +42,14 @@ impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> DockerReader<R> {
.await?
{
let multiarch: DockerMultiArch = from_cbor_async_reader(multiarch).await?;
Ok(multiarch.available)
} else {
Err(Error::new(
eyre!("Single arch legacy s9pks not supported"),
ErrorKind::ParseS9pk,
))
return Ok(multiarch.available);
}
}
pub async fn new(mut rdr: R, arch: &str) -> Result<Self, Error> {
rdr.seek(SeekFrom::Start(0)).await?;
if let Some(image) = tokio_tar::Archive::new(rdr)
let Some(manifest) = tokio_tar::Archive::new(&mut *rdr)
.entries()?
.try_filter_map(|e| {
async move {
Ok(if &*e.path()? == Path::new(&format!("{}.tar", arch)) {
Ok(if &*e.path()? == Path::new("manifest.json") {
Some(e)
} else {
None
@@ -66,13 +59,89 @@ impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> DockerReader<R> {
})
.try_next()
.await?
{
Ok(Self::MultiArch(image))
} else {
Err(Error::new(
eyre!("Docker image section does not contain tarball for architecture"),
else {
return Err(Error::new(
eyre!("Single arch legacy s9pk is malformed"),
ErrorKind::ParseS9pk,
))
));
};
#[derive(Deserialize)]
#[serde(rename_all = "PascalCase")]
struct Manifest {
config: PathBuf,
}
let Manifest { config } = from_json_async_reader(manifest).await?;
rdr.seek(SeekFrom::Start(0)).await?;
let Some(config) = tokio_tar::Archive::new(rdr)
.entries()?
.try_filter_map(|e| {
let config = config.clone();
async move { Ok(if &*e.path()? == config { Some(e) } else { None }) }.boxed()
})
.try_next()
.await?
else {
return Err(Error::new(
eyre!("Single arch legacy s9pk is malformed"),
ErrorKind::ParseS9pk,
));
};
#[derive(Deserialize)]
struct Config {
architecture: String,
}
let Config { architecture } = from_json_async_reader(config).await?;
let mut arches = BTreeSet::new();
arches.insert(architecture);
Ok(arches)
}
pub async fn new(mut rdr: R, arch: &str) -> Result<Self, Error> {
rdr.seek(SeekFrom::Start(0)).await?;
if tokio_tar::Archive::new(&mut rdr)
.entries()?
.try_filter_map(|e| {
async move {
Ok(if &*e.path()? == Path::new("multiarch.cbor") {
Some(e)
} else {
None
})
}
.boxed()
})
.try_next()
.await?
.is_some()
{
rdr.seek(SeekFrom::Start(0)).await?;
if let Some(image) = tokio_tar::Archive::new(rdr)
.entries()?
.try_filter_map(|e| {
async move {
Ok(if &*e.path()? == Path::new(&format!("{}.tar", arch)) {
Some(e)
} else {
None
})
}
.boxed()
})
.try_next()
.await?
{
Ok(Self::MultiArch(image))
} else {
Err(Error::new(
eyre!("Docker image section does not contain tarball for architecture"),
ErrorKind::ParseS9pk,
))
}
} else {
Ok(Self::SingleArch(rdr))
}
}
}

View File

@@ -9,7 +9,6 @@ use std::task::{Context, Poll};
use color_eyre::eyre::eyre;
use digest::Output;
use ed25519_dalek::VerifyingKey;
use futures::TryStreamExt;
use models::{ImageId, PackageId};
use sha2::{Digest, Sha512};
use tokio::fs::File;
@@ -157,34 +156,6 @@ impl S9pkReader {
}
}
impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<R> {
#[instrument(skip_all)]
pub async fn image_tags(&mut self, arch: &str) -> Result<Vec<ImageTag>, Error> {
let mut tar = tokio_tar::Archive::new(self.docker_images(arch).await?);
let mut entries = tar.entries()?;
while let Some(mut entry) = entries.try_next().await? {
if &*entry.path()? != Path::new("manifest.json") {
continue;
}
let mut buf = Vec::with_capacity(entry.header().size()? as usize);
entry.read_to_end(&mut buf).await?;
#[derive(serde::Deserialize)]
struct ManEntry {
#[serde(rename = "RepoTags")]
tags: Vec<String>,
}
let man_entries = serde_json::from_slice::<Vec<ManEntry>>(&buf)
.with_ctx(|_| (crate::ErrorKind::Deserialization, "manifest.json"))?;
return man_entries
.iter()
.flat_map(|e| &e.tags)
.map(|t| t.parse())
.collect();
}
Err(Error::new(
eyre!("image.tar missing manifest.json"),
crate::ErrorKind::ParseS9pk,
))
}
#[instrument(skip_all)]
pub async fn from_reader(mut rdr: R, check_sig: bool) -> Result<Self, Error> {
let header = Header::deserialize(&mut rdr).await?;

View File

@@ -68,17 +68,7 @@ impl S9pk<TmpSource<PackSource>> {
)),
)?;
// instructions.md
let instructions: Arc<[u8]> = reader.instructions().await?.to_vec().await?.into();
archive.insert_path(
"instructions.md",
Entry::file(TmpSource::new(
tmp_dir.clone(),
PackSource::Buffered(instructions.into()),
)),
)?;
// icon.md
// icon.*
let icon: Arc<[u8]> = reader.icon().await?.to_vec().await?.into();
archive.insert_path(
format!("icon.{}", manifest.assets.icon_type()),
@@ -214,6 +204,7 @@ impl TryFrom<ManifestV1> for Manifest {
support_site: value.support_site.unwrap_or_else(|| default_url.clone()),
marketing_site: value.marketing_site.unwrap_or_else(|| default_url.clone()),
donation_url: value.donation_url,
docs_url: None,
description: value.description,
images: BTreeMap::new(),
volumes: value

View File

@@ -51,6 +51,8 @@ pub struct Manifest {
pub marketing_site: Url,
#[ts(type = "string | null")]
pub donation_url: Option<Url>,
#[ts(type = "string | null")]
pub docs_url: Option<Url>,
pub description: Description,
pub images: BTreeMap<ImageId, ImageConfig>,
pub volumes: BTreeSet<VolumeId>,
@@ -60,8 +62,6 @@ pub struct Manifest {
pub dependencies: Dependencies,
#[serde(default)]
pub hardware_requirements: HardwareRequirements,
#[ts(optional)]
#[serde(default = "GitHash::load_sync")]
pub git_hash: Option<GitHash>,
#[serde(default = "current_version")]
#[ts(type = "string")]
@@ -83,7 +83,6 @@ impl Manifest {
.map_or(false, |mime| mime.starts_with("image/"))
})?;
expected.check_file("LICENSE.md")?;
expected.check_file("instructions.md")?;
expected.check_file("javascript.squashfs")?;
for (dependency, _) in &self.dependencies.0 {
let dep_path = Path::new("dependencies").join(dependency);

View File

@@ -57,11 +57,10 @@ fn priority(s: &str) -> Option<usize> {
"manifest.json" => Some(0),
a if Path::new(a).file_stem() == Some(OsStr::new("icon")) => Some(1),
"LICENSE.md" => Some(2),
"instructions.md" => Some(3),
"dependencies" => Some(4),
"javascript.squashfs" => Some(5),
"assets.squashfs" => Some(6),
"images" => Some(7),
"dependencies" => Some(3),
"javascript.squashfs" => Some(4),
"assets.squashfs" => Some(5),
"images" => Some(6),
_ => None,
}
}

View File

@@ -18,6 +18,7 @@ use crate::context::CliContext;
use crate::dependencies::DependencyMetadata;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::s9pk::git_hash::GitHash;
use crate::s9pk::manifest::Manifest;
use crate::s9pk::merkle_archive::directory_contents::DirectoryContents;
use crate::s9pk::merkle_archive::source::http::HttpSource;
@@ -673,13 +674,6 @@ pub async fn pack(ctx: CliContext, params: PackParams) -> Result<(), Error> {
PackSource::File(params.license().await?),
)),
);
files.insert(
"instructions.md".into(),
Entry::file(TmpSource::new(
tmp_dir.clone(),
PackSource::File(params.instructions()),
)),
);
files.insert(
"javascript.squashfs".into(),
Entry::file(TmpSource::new(
@@ -694,6 +688,8 @@ pub async fn pack(ctx: CliContext, params: PackParams) -> Result<(), Error> {
)
.await?;
s9pk.as_manifest_mut().git_hash = Some(GitHash::from_path(params.path()).await?);
if !params.no_assets {
let assets_dir = params.assets();
s9pk.as_archive_mut().contents_mut().insert_path(

View File

@@ -1,4 +1,9 @@
use std::path::Path;
use crate::service::effects::prelude::*;
use crate::util::io::{delete_file, maybe_read_file_to_string, write_file_atomic};
use crate::volume::PKG_VOLUME_DIR;
use crate::DATA_DIR;
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
#[serde(rename_all = "camelCase")]
@@ -7,43 +12,35 @@ pub struct SetDataVersionParams {
#[ts(type = "string")]
version: Option<String>,
}
#[instrument(skip(context))]
pub async fn set_data_version(
context: EffectContext,
SetDataVersionParams { version }: SetDataVersionParams,
) -> Result<(), Error> {
let context = context.deref()?;
let package_id = &context.seed.id;
context
.seed
.ctx
.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(package_id)
.or_not_found(package_id)?
.as_data_version_mut()
.ser(&version)
})
.await
.result?;
let path = Path::new(DATA_DIR)
.join(PKG_VOLUME_DIR)
.join(package_id)
.join("data")
.join(".version");
if let Some(version) = version {
write_file_atomic(path, version.as_bytes()).await?;
} else {
delete_file(path).await?;
}
Ok(())
}
#[instrument(skip_all)]
pub async fn get_data_version(context: EffectContext) -> Result<Option<String>, Error> {
let context = context.deref()?;
let package_id = &context.seed.id;
context
.seed
.ctx
.db
.peek()
.await
.as_public()
.as_package_data()
.as_idx(package_id)
.or_not_found(package_id)?
.as_data_version()
.de()
let path = Path::new(DATA_DIR)
.join(PKG_VOLUME_DIR)
.join(package_id)
.join("data")
.join(".version");
maybe_read_file_to_string(path).await
}

View File

@@ -39,10 +39,8 @@ use crate::db::model::package::{
};
use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::guard::{GenericMountGuard, MountGuard};
use crate::install::PKG_ARCHIVE_DIR;
use crate::lxc::ContainerId;
use crate::prelude::*;
use crate::progress::{NamedProgress, Progress};
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::s9pk::S9pk;
use crate::service::action::update_tasks;
@@ -50,7 +48,7 @@ use crate::service::rpc::{ExitParams, InitKind};
use crate::service::service_map::InstallProgressHandles;
use crate::service::uninstall::cleanup;
use crate::util::actor::concurrent::ConcurrentActor;
use crate::util::io::{create_file, AsyncReadStream, TermSize};
use crate::util::io::{create_file, delete_file, AsyncReadStream, TermSize};
use crate::util::net::WebSocketExt;
use crate::util::serde::Pem;
use crate::util::Never;
@@ -95,9 +93,10 @@ struct RootCommand(pub String);
pub struct MiB(pub u64);
impl MiB {
fn new(value: u64) -> Self {
pub fn new(value: u64) -> Self {
Self(value / 1024 / 1024)
}
#[allow(non_snake_case)]
fn from_MiB(value: u64) -> Self {
Self(value)
}
@@ -121,7 +120,12 @@ impl ServiceRef {
pub fn weak(&self) -> Weak<Service> {
Arc::downgrade(&self.0)
}
pub async fn uninstall(self, uninit: ExitParams, soft: bool, force: bool) -> Result<(), Error> {
pub async fn uninstall(
self,
uninit: ExitParams,
soft: bool,
force: bool,
) -> Result<BoxFuture<'static, Result<(), Error>>, Error> {
let id = self.seed.persistent_container.s9pk.as_manifest().id.clone();
let ctx = self.seed.ctx.clone();
let uninit_res = self.shutdown(Some(uninit.clone())).await;
@@ -131,10 +135,26 @@ impl ServiceRef {
uninit_res?;
}
if uninit.is_uninstall() {
uninstall::cleanup(&ctx, &id, soft).await?;
let s9pk_path = ctx
.db
.peek()
.await
.into_public()
.into_package_data()
.into_idx(&id)
.map(|pde| pde.into_s9pk());
Ok(async move {
if let Some(s9pk_path) = s9pk_path {
delete_file(s9pk_path.de()?).await?;
}
if uninit.is_uninstall() {
uninstall::cleanup(&ctx, &id, soft).await?;
}
Ok(())
}
Ok(())
.boxed())
}
pub async fn shutdown(self, uninit: Option<ExitParams>) -> Result<(), Error> {
if let Some((hdl, shutdown)) = self.seed.persistent_container.rpc_server.send_replace(None)
@@ -276,8 +296,6 @@ impl Service {
.map(Some)
}
};
let s9pk_dir = Path::new(DATA_DIR).join(PKG_ARCHIVE_DIR).join("installed"); // TODO: make this based on hash
let s9pk_path = s9pk_dir.join(id).with_extension("s9pk");
let Some(entry) = ctx
.db
.peek()
@@ -288,21 +306,28 @@ impl Service {
else {
return Ok(None);
};
let s9pk_path = entry.as_s9pk().de()?;
match entry.as_state_info().as_match() {
PackageStateMatchModelRef::Installing(_) => {
if disposition == LoadDisposition::Retry {
if let Ok(s9pk) = S9pk::open(s9pk_path, Some(id)).await.map_err(|e| {
if let Ok(s9pk) = S9pk::open(&s9pk_path, Some(id)).await.map_err(|e| {
tracing::error!("Error opening s9pk for install: {e}");
tracing::debug!("{e:?}")
}) {
if let Ok(service) =
Self::install(ctx.clone(), s9pk, &None, None, None::<Never>, None)
.await
.map_err(|e| {
tracing::error!("Error installing service: {e}");
tracing::debug!("{e:?}")
})
{
if let Ok(service) = Self::install(
ctx.clone(),
s9pk,
&s9pk_path,
&None,
None,
None::<Never>,
None,
)
.await
.map_err(|e| {
tracing::error!("Error installing service: {e}");
tracing::debug!("{e:?}")
}) {
return Ok(Some(service));
}
}
@@ -315,24 +340,18 @@ impl Service {
Ok(None)
}
PackageStateMatchModelRef::Updating(s) => {
let new_s9pk = s.as_s9pk().de()?;
if disposition == LoadDisposition::Retry
&& s.as_installing_info()
.as_progress()
.de()?
.phases
.iter()
.any(|NamedProgress { name, progress }| {
name.eq_ignore_ascii_case("download")
&& progress == &Progress::Complete(true)
})
&& tokio::fs::metadata(&new_s9pk).await.is_ok()
{
if let Ok(s9pk) = S9pk::open(&s9pk_path, Some(id)).await.map_err(|e| {
if let Ok(s9pk) = S9pk::open(&new_s9pk, Some(id)).await.map_err(|e| {
tracing::error!("Error opening s9pk for update: {e}");
tracing::debug!("{e:?}")
}) {
if let Ok(service) = Self::install(
ctx.clone(),
s9pk,
&s9pk_path,
&None,
Some(entry.as_status().de()?.run_state()),
None::<Never>,
@@ -411,9 +430,13 @@ impl Service {
)
.await
{
Ok(service) => match service
.uninstall(ExitParams::uninstall(), false, false)
.await
Ok(service) => match async {
service
.uninstall(ExitParams::uninstall(), false, false)
.await?
.await
}
.await
{
Err(e) => {
tracing::error!("Error uninstalling service: {e}");
@@ -453,6 +476,7 @@ impl Service {
pub async fn install(
ctx: RpcContext,
s9pk: S9pk,
s9pk_path: &PathBuf,
registry: &Option<Url>,
prev_state: Option<StartStop>,
recovery_source: Option<impl GenericMountGuard>,
@@ -552,6 +576,7 @@ impl Service {
entry
.as_state_info_mut()
.ser(&PackageState::Installed(InstalledState { manifest }))?;
entry.as_s9pk_mut().ser(s9pk_path)?;
entry.as_developer_key_mut().ser(&Pem::new(developer_key))?;
entry.as_icon_mut().ser(&icon)?;
entry.as_registry_mut().ser(registry)?;

View File

@@ -1,4 +1,4 @@
use std::path::Path;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
@@ -10,7 +10,7 @@ use futures::{Future, FutureExt, StreamExt, TryFutureExt};
use helpers::NonDetachingJoinHandle;
use imbl::OrdMap;
use models::ErrorData;
use tokio::sync::{oneshot, Mutex, OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
use tokio::sync::{oneshot, OwnedRwLockReadGuard, OwnedRwLockWriteGuard, RwLock};
use tracing::instrument;
use url::Url;
@@ -22,9 +22,8 @@ use crate::disk::mount::guard::GenericMountGuard;
use crate::install::PKG_ARCHIVE_DIR;
use crate::notifications::{notify, NotificationLevel};
use crate::prelude::*;
use crate::progress::{
FullProgressTracker, PhaseProgressTrackerHandle, ProgressTrackerWriter, ProgressUnits,
};
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle, ProgressTrackerWriter};
use crate::registry::signer::commitment::merkle_archive::MerkleArchiveCommitment;
use crate::s9pk::manifest::PackageId;
use crate::s9pk::merkle_archive::source::FileSource;
use crate::s9pk::S9pk;
@@ -32,7 +31,8 @@ use crate::service::rpc::ExitParams;
use crate::service::start_stop::StartStop;
use crate::service::{LoadDisposition, Service, ServiceRef};
use crate::status::MainStatus;
use crate::util::serde::Pem;
use crate::util::serde::{Base32, Pem};
use crate::util::sync::SyncMutex;
use crate::DATA_DIR;
pub type DownloadInstallFuture = BoxFuture<'static, Result<InstallFuture, Error>>;
@@ -43,37 +43,52 @@ pub struct InstallProgressHandles {
pub progress: FullProgressTracker,
}
fn s9pk_download_path(commitment: &MerkleArchiveCommitment) -> PathBuf {
Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("downloading")
.join(Base32(commitment.root_sighash.0).to_lower_string())
.with_extension("s9pk")
}
fn s9pk_installed_path(commitment: &MerkleArchiveCommitment) -> PathBuf {
Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("installed")
.join(Base32(commitment.root_sighash.0).to_lower_string())
.with_extension("s9pk")
}
/// This is the structure to contain all the services
#[derive(Default)]
pub struct ServiceMap(Mutex<OrdMap<PackageId, Arc<RwLock<Option<ServiceRef>>>>>);
pub struct ServiceMap(SyncMutex<OrdMap<PackageId, Arc<RwLock<Option<ServiceRef>>>>>);
impl ServiceMap {
async fn entry(&self, id: &PackageId) -> Arc<RwLock<Option<ServiceRef>>> {
let mut lock = self.0.lock().await;
lock.entry(id.clone())
.or_insert_with(|| Arc::new(RwLock::new(None)))
.clone()
fn entry(&self, id: &PackageId) -> Arc<RwLock<Option<ServiceRef>>> {
self.0.mutate(|lock| {
lock.entry(id.clone())
.or_insert_with(|| Arc::new(RwLock::new(None)))
.clone()
})
}
#[instrument(skip_all)]
pub fn try_get(&self, id: &PackageId) -> Option<OwnedRwLockReadGuard<Option<ServiceRef>>> {
self.entry(id).try_read_owned().ok()
}
#[instrument(skip_all)]
pub async fn get(&self, id: &PackageId) -> OwnedRwLockReadGuard<Option<ServiceRef>> {
self.entry(id).await.read_owned().await
self.entry(id).read_owned().await
}
#[instrument(skip_all)]
pub async fn get_mut(&self, id: &PackageId) -> OwnedRwLockWriteGuard<Option<ServiceRef>> {
self.entry(id).await.write_owned().await
self.entry(id).write_owned().await
}
#[instrument(skip_all)]
pub async fn init(
&self,
ctx: &RpcContext,
mut progress: PhaseProgressTrackerHandle,
) -> Result<(), Error> {
progress.start();
pub async fn init(&self, ctx: &RpcContext) -> Result<(), Error> {
let ids = ctx.db.peek().await.as_public().as_package_data().keys()?;
progress.set_total(ids.len() as u64);
progress.set_units(Some(ProgressUnits::Steps));
let mut jobs = FuturesUnordered::new();
for id in &ids {
jobs.push(self.load(ctx, id, LoadDisposition::Retry));
@@ -83,9 +98,7 @@ impl ServiceMap {
tracing::error!("Error loading installed package as service: {e}");
tracing::debug!("{e:?}");
}
progress += 1;
}
progress.complete();
Ok(())
}
@@ -152,6 +165,13 @@ impl ServiceMap {
validate_progress.start();
s9pk.validate_and_filter(ctx.s9pk_arch)?;
validate_progress.complete();
let commitment = s9pk.as_archive().commitment().await?;
let mut installed_path = s9pk_installed_path(&commitment);
while tokio::fs::metadata(&installed_path).await.is_ok() {
let prev = installed_path.file_stem().unwrap_or_default();
installed_path.set_file_name(prev.to_string_lossy().into_owned() + "x.s9pk");
// append an x if already exists to avoid reference counting when reinstalling same s9pk
}
let manifest = s9pk.as_manifest().clone();
let id = manifest.id.clone();
let icon = s9pk.icon_data_url().await?;
@@ -184,6 +204,7 @@ impl ServiceMap {
.handle(async {
ctx.db
.mutate({
let installed_path = installed_path.clone();
let manifest = manifest.clone();
let id = id.clone();
let install_progress = progress.snapshot();
@@ -196,6 +217,7 @@ impl ServiceMap {
pde.as_state_info_mut().ser(&PackageState::Updating(
UpdatingState {
manifest: prev.manifest,
s9pk: installed_path,
installing_info: InstallingInfo {
new_manifest: manifest,
progress: install_progress,
@@ -217,7 +239,7 @@ impl ServiceMap {
} else {
PackageState::Installing(installing)
},
data_version: None,
s9pk: installed_path,
status: MainStatus::Stopped,
registry,
developer_key: Pem::new(developer_key),
@@ -241,13 +263,9 @@ impl ServiceMap {
.await?;
Ok(async move {
let (installed_path, sync_progress_task) = reload_guard
let sync_progress_task = reload_guard
.handle(async {
let download_path = Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("downloading")
.join(&id)
.with_extension("s9pk");
let download_path = s9pk_download_path(&commitment);
let deref_id = id.clone();
let sync_progress_task =
@@ -273,15 +291,9 @@ impl ServiceMap {
file.sync_all().await?;
unpack_progress.complete();
let installed_path = Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("installed")
.join(&id)
.with_extension("s9pk");
crate::util::io::rename(&download_path, &installed_path).await?;
Ok::<_, Error>((installed_path, sync_progress_task))
Ok::<_, Error>(sync_progress_task)
})
.await?;
Ok(reload_guard
@@ -325,17 +337,18 @@ impl ServiceMap {
.state
.borrow()
.desired_state;
service.uninstall(uninit, false, false).await?;
let cleanup = service.uninstall(uninit, false, false).await?;
progress.complete();
Some(run_state)
Some((run_state, cleanup))
} else {
None
};
let new_service = Service::install(
ctx,
s9pk,
&installed_path,
&registry,
prev,
prev.as_ref().map(|(s, _)| *s),
recovery_source,
Some(InstallProgressHandles {
finalization_progress,
@@ -344,6 +357,11 @@ impl ServiceMap {
)
.await?;
*service = Some(new_service.into());
if let Some((_, cleanup)) = prev {
cleanup.await?;
}
drop(service);
sync_progress_task.await.map_err(|_| {
@@ -391,7 +409,7 @@ impl ServiceMap {
.uninstall(ExitParams::uninstall(), soft, force)
.await;
drop(guard);
res
res?.await
} else {
if force {
super::uninstall::cleanup(&ctx, &id, soft).await?;
@@ -414,17 +432,18 @@ impl ServiceMap {
}
pub async fn shutdown_all(&self) -> Result<(), Error> {
let lock = self.0.lock().await;
let mut futs = Vec::with_capacity(lock.len());
for service in lock.values().cloned() {
futs.push(async move {
if let Some(service) = service.write_owned().await.take() {
service.shutdown(None).await?
}
Ok::<_, Error>(())
});
}
drop(lock);
let futs = self.0.mutate(|lock| {
let mut futs = Vec::with_capacity(lock.len());
for service in lock.values().cloned() {
futs.push(async move {
if let Some(service) = service.write_owned().await.take() {
service.shutdown(None).await?
}
Ok::<_, Error>(())
});
}
futs
});
let mut errors = ErrorCollection::new();
for res in futures::future::join_all(futs).await {
errors.handle(res);

View File

@@ -57,13 +57,6 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(),
if tokio::fs::metadata(&logs_dir).await.is_ok() {
tokio::fs::remove_dir_all(&logs_dir).await?;
}
let archive_path = Path::new(PACKAGE_DATA)
.join("archive")
.join("installed")
.join(&state.manifest.id);
if tokio::fs::metadata(&archive_path).await.is_ok() {
tokio::fs::remove_file(&archive_path).await?;
}
}
},
)

View File

@@ -82,6 +82,7 @@ pub async fn list_disks(ctx: SetupContext) -> Result<Vec<DiskInfo>, Error> {
crate::disk::util::list(&ctx.os_partitions).await
}
#[instrument(skip_all)]
async fn setup_init(
ctx: &SetupContext,
password: Option<String>,
@@ -130,6 +131,7 @@ pub struct AttachParams {
kiosk: Option<bool>,
}
#[instrument(skip_all)]
pub async fn attach(
ctx: SetupContext,
AttachParams {

View File

@@ -302,7 +302,7 @@ mod test {
async fn test_conflicts() {
let actor = super::ConcurrentActor::new(CActor);
let guid = Guid::new();
actor.queue(guid.clone(), Pending);
let pending = actor.queue(guid.clone(), Pending);
assert!(
tokio::time::timeout(Duration::from_secs(1), actor.send(Guid::new(), Conflicts))
.await
@@ -318,5 +318,8 @@ mod test {
.await
.is_ok()
);
assert!(tokio::time::timeout(Duration::from_secs(1), pending)
.await
.is_err());
}
}

View File

@@ -15,7 +15,7 @@ use bytes::{Buf, BytesMut};
use clap::builder::ValueParserFactory;
use futures::future::{BoxFuture, Fuse};
use futures::{AsyncSeek, FutureExt, Stream, TryStreamExt};
use helpers::NonDetachingJoinHandle;
use helpers::{AtomicFile, NonDetachingJoinHandle};
use models::FromStrParser;
use nix::unistd::{Gid, Uid};
use serde::{Deserialize, Serialize};
@@ -920,6 +920,7 @@ impl Drop for TmpDir {
}
}
#[instrument(skip_all)]
pub async fn maybe_open_file(path: impl AsRef<Path>) -> Result<Option<File>, Error> {
let path = path.as_ref();
match File::open(path).await {
@@ -930,13 +931,32 @@ pub async fn maybe_open_file(path: impl AsRef<Path>) -> Result<Option<File>, Err
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("open {path:?}")))
}
#[instrument(skip_all)]
pub async fn open_file(path: impl AsRef<Path>) -> Result<File, Error> {
let path = path.as_ref();
File::open(path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("open {path:?}")))
maybe_open_file(path.as_ref())
.await?
.or_not_found(path.as_ref().display())
}
#[instrument(skip_all)]
pub async fn maybe_read_file_to_string(path: impl AsRef<Path>) -> Result<Option<String>, Error> {
let Some(mut file) = maybe_open_file(path).await? else {
return Ok(None);
};
let meta = file.metadata().await?;
let mut res = String::with_capacity(meta.len() as usize);
file.read_to_string(&mut res).await?;
Ok(Some(res))
}
#[instrument(skip_all)]
pub async fn read_file_to_string(path: impl AsRef<Path>) -> Result<String, Error> {
maybe_read_file_to_string(path.as_ref())
.await?
.or_not_found(path.as_ref().display())
}
#[instrument(skip_all)]
pub async fn create_file(path: impl AsRef<Path>) -> Result<File, Error> {
let path = path.as_ref();
if let Some(parent) = path.parent() {
@@ -949,6 +969,7 @@ pub async fn create_file(path: impl AsRef<Path>) -> Result<File, Error> {
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}")))
}
#[instrument(skip_all)]
pub async fn create_file_mod(path: impl AsRef<Path>, mode: u32) -> Result<File, Error> {
let path = path.as_ref();
if let Some(parent) = path.parent() {
@@ -966,6 +987,7 @@ pub async fn create_file_mod(path: impl AsRef<Path>, mode: u32) -> Result<File,
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}")))
}
#[instrument(skip_all)]
pub async fn append_file(path: impl AsRef<Path>) -> Result<File, Error> {
let path = path.as_ref();
if let Some(parent) = path.parent() {
@@ -981,6 +1003,7 @@ pub async fn append_file(path: impl AsRef<Path>) -> Result<File, Error> {
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}")))
}
#[instrument(skip_all)]
pub async fn delete_file(path: impl AsRef<Path>) -> Result<(), Error> {
let path = path.as_ref();
tokio::fs::remove_file(path)
@@ -995,6 +1018,7 @@ pub async fn delete_file(path: impl AsRef<Path>) -> Result<(), Error> {
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("delete {path:?}")))
}
#[instrument(skip_all)]
pub async fn rename(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> Result<(), Error> {
let src = src.as_ref();
let dst = dst.as_ref();
@@ -1008,6 +1032,29 @@ pub async fn rename(src: impl AsRef<Path>, dst: impl AsRef<Path>) -> Result<(),
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("mv {src:?} -> {dst:?}")))
}
#[instrument(skip_all)]
pub async fn write_file_atomic(
path: impl AsRef<Path>,
contents: impl AsRef<[u8]>,
) -> Result<(), Error> {
let path = path.as_ref();
if let Some(parent) = path.parent() {
tokio::fs::create_dir_all(parent)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("mkdir -p {parent:?}")))?;
}
let mut file = AtomicFile::new(path, None::<&Path>)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("create {path:?}")))?;
file.write_all(contents.as_ref())
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("write {path:?}")))?;
file.save()
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("save {path:?}")))?;
Ok(())
}
fn poll_flush_prefix<W: AsyncWrite>(
mut writer: Pin<&mut W>,
cx: &mut std::task::Context<'_>,

View File

@@ -968,9 +968,31 @@ impl<T: AsRef<[u8]>> std::fmt::Display for Base16<T> {
#[derive(TS)]
#[ts(type = "string", concrete(T = Vec<u8>))]
pub struct Base32<T>(pub T);
impl<T: AsRef<[u8]>> Base32<T> {
pub fn to_lower_string(&self) -> String {
base32::encode(
base32::Alphabet::Rfc4648Lower { padding: false },
self.0.as_ref(),
)
}
}
impl<T: AsRef<[u8]>> std::fmt::Display for Base32<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
base32::encode(base32::Alphabet::Rfc4648 { padding: true }, self.0.as_ref()).fmt(f)
base32::encode(
base32::Alphabet::Rfc4648 { padding: false },
self.0.as_ref(),
)
.fmt(f)
}
}
impl<T: TryFrom<Vec<u8>>> FromStr for Base32<T> {
type Err = color_eyre::eyre::Report;
fn from_str(s: &str) -> Result<Self, Self::Err> {
base32::decode(base32::Alphabet::Rfc4648 { padding: false }, &s)
.ok_or_else(|| eyre!("{s} is not a valid base32 string"))?
.try_into()
.map_err(|_| eyre!("base32 string is an invalid length"))
.map(Self)
}
}
impl<'de, T: TryFrom<Vec<u8>>> Deserialize<'de> for Base32<T> {
@@ -978,17 +1000,7 @@ impl<'de, T: TryFrom<Vec<u8>>> Deserialize<'de> for Base32<T> {
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
base32::decode(base32::Alphabet::Rfc4648 { padding: true }, &s)
.ok_or_else(|| {
serde::de::Error::invalid_value(
serde::de::Unexpected::Str(&s),
&"a valid base32 string",
)
})?
.try_into()
.map_err(|_| serde::de::Error::custom("invalid length"))
.map(Self)
deserialize_from_str(deserializer)
}
}
impl<T: AsRef<[u8]>> Serialize for Base32<T> {
@@ -1000,10 +1012,13 @@ impl<T: AsRef<[u8]>> Serialize for Base32<T> {
}
}
pub const BASE64: base64::engine::GeneralPurpose = base64::engine::GeneralPurpose::new(
&base64::alphabet::STANDARD,
base64::engine::GeneralPurposeConfig::new(),
);
pub const BASE64: base64::engine::GeneralPurpose =
base64::engine::general_purpose::GeneralPurpose::new(
&base64::alphabet::STANDARD,
base64::engine::GeneralPurposeConfig::new()
.with_encode_padding(false)
.with_decode_padding_mode(base64::engine::DecodePaddingMode::Indifferent),
);
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, TS)]
#[ts(type = "string", concrete(T = Vec<u8>))]

View File

@@ -5,7 +5,6 @@ use std::panic::{RefUnwindSafe, UnwindSafe};
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::{Future, FutureExt};
use imbl::Vector;
use imbl_value::{to_value, InternedString};
use patch_db::json_ptr::ROOT;
@@ -48,8 +47,9 @@ mod v0_4_0_alpha_5;
mod v0_4_0_alpha_6;
mod v0_4_0_alpha_7;
mod v0_4_0_alpha_8;
mod v0_4_0_alpha_9;
pub type Current = v0_4_0_alpha_8::Version; // VERSION_BUMP
pub type Current = v0_4_0_alpha_9::Version; // VERSION_BUMP
impl Current {
#[instrument(skip(self, db))]
@@ -96,20 +96,19 @@ pub async fn post_init(
.de()?;
if !todos.is_empty() {
progress.set_total(todos.len() as u64);
while let Some(version) = {
while let Some((version, input)) = {
peek = ctx.db.peek().await;
peek.as_public()
.as_server_info()
.as_post_init_migration_todos()
.de()?
.first()
.cloned()
.map(Version::from_exver_version)
.as_ref()
.map(Version::as_version_t)
.pop_first()
.map(|(version, input)| {
Ok::<_, Error>((Version::from_exver_version(version).as_version_t()?, input))
})
.transpose()?
} {
version.0.post_up(ctx).await?;
version.0.post_up(ctx, input).await?;
ctx.db
.mutate(|db| {
db.as_public_mut()
@@ -161,7 +160,8 @@ enum Version {
V0_4_0_alpha_5(Wrapper<v0_4_0_alpha_5::Version>),
V0_4_0_alpha_6(Wrapper<v0_4_0_alpha_6::Version>),
V0_4_0_alpha_7(Wrapper<v0_4_0_alpha_7::Version>),
V0_4_0_alpha_8(Wrapper<v0_4_0_alpha_8::Version>), // VERSION_BUMP
V0_4_0_alpha_8(Wrapper<v0_4_0_alpha_8::Version>),
V0_4_0_alpha_9(Wrapper<v0_4_0_alpha_9::Version>), // VERSION_BUMP
Other(exver::Version),
}
@@ -212,7 +212,8 @@ impl Version {
Self::V0_4_0_alpha_5(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_6(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_7(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_8(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::V0_4_0_alpha_8(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_9(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::Other(v) => {
return Err(Error::new(
eyre!("unknown version {v}"),
@@ -255,12 +256,14 @@ impl Version {
Version::V0_4_0_alpha_5(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_6(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_7(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_8(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::V0_4_0_alpha_8(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_9(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::Other(x) => x.clone(),
}
}
}
#[instrument(skip_all)]
fn version_accessor(db: &mut Value) -> Option<&mut Value> {
if db.get("public").is_some() {
db.get_mut("public")?
@@ -271,6 +274,7 @@ fn version_accessor(db: &mut Value) -> Option<&mut Value> {
}
}
#[instrument(skip_all)]
fn version_compat_accessor(db: &mut Value) -> Option<&mut Value> {
if db.get("public").is_some() {
let server_info = db.get_mut("public")?.get_mut("serverInfo")?;
@@ -293,6 +297,7 @@ fn version_compat_accessor(db: &mut Value) -> Option<&mut Value> {
}
}
#[instrument(skip_all)]
fn post_init_migration_todos_accessor(db: &mut Value) -> Option<&mut Value> {
let server_info = if db.get("public").is_some() {
db.get_mut("public")?.get_mut("serverInfo")?
@@ -300,9 +305,10 @@ fn post_init_migration_todos_accessor(db: &mut Value) -> Option<&mut Value> {
db.get_mut("server-info")?
};
if server_info.get("postInitMigrationTodos").is_none() {
server_info
.as_object_mut()?
.insert("postInitMigrationTodos".into(), Value::Array(Vector::new()));
server_info.as_object_mut()?.insert(
"postInitMigrationTodos".into(),
Value::Object(Default::default()),
);
}
server_info.get_mut("postInitMigrationTodos")
}
@@ -363,8 +369,8 @@ fn migrate_from_unchecked<VFrom: DynVersionT + ?Sized, VTo: DynVersionT + ?Sized
}
_ => (),
};
to.up(db, pre_ups.value)?;
to.commit(db)?;
let res = to.up(db, pre_ups.value)?;
to.commit(db, res)?;
Ok(())
}
@@ -375,7 +381,7 @@ fn rollback_to_unchecked<VFrom: DynVersionT + ?Sized, VTo: DynVersionT + ?Sized>
) -> Result<(), Error> {
let previous = from.previous();
from.down(db)?;
previous.commit(db)?;
previous.commit(db, Value::Null)?;
if to.semver() < previous.semver() {
rollback_to_unchecked(&previous, to, db)?
} else if to.semver() > previous.semver() {
@@ -399,15 +405,16 @@ where
type PreUpRes: Send + UnwindSafe;
fn semver(self) -> exver::Version;
fn compat(self) -> &'static exver::VersionRange;
/// MUST NOT change system state. Intended for async I/O reads
/// MUST be idempotent, and is run before *all* db migrations
fn pre_up(self) -> impl Future<Output = Result<Self::PreUpRes, Error>> + Send + 'static;
fn up(self, db: &mut Value, input: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, db: &mut Value, input: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
/// MUST be idempotent, and is run after *all* db migrations
fn post_up<'a>(
self,
ctx: &'a RpcContext,
input: Value,
) -> impl Future<Output = Result<(), Error>> + Send + 'a {
async { Ok(()) }
}
@@ -417,20 +424,28 @@ where
ErrorKind::InvalidRequest,
))
}
fn commit(self, db: &mut Value) -> Result<(), Error> {
*version_accessor(db).or_not_found("`version` in db")? = to_value(&self.semver())?;
*version_compat_accessor(db).or_not_found("`versionCompat` in db")? =
#[instrument(skip_all)]
fn commit(self, db: &mut Value, res: Value) -> Result<(), Error> {
*version_accessor(db).or_not_found("`public.serverInfo.version` in db")? =
to_value(&self.semver())?;
*version_compat_accessor(db).or_not_found("`public.serverInfo.versionCompat` in db")? =
to_value(self.compat())?;
post_init_migration_todos_accessor(db)
.or_not_found("`serverInfo` in db")?
if let Some(arr) = post_init_migration_todos_accessor(db)
.or_not_found("`public.serverInfo.postInitMigrationTodos` in db")?
.as_array_mut()
.ok_or_else(|| {
Error::new(
eyre!("postInitMigrationTodos is not an array"),
ErrorKind::Database,
)
})?
.push_back(to_value(&self.semver())?);
{
arr.push_back(to_value(&self.semver())?);
} else if let Some(obj) = post_init_migration_todos_accessor(db)
.or_not_found("`public.serverInfo.postInitMigrationTodos` in db")?
.as_object_mut()
{
obj.insert(InternedString::from_display(&self.semver()), res);
} else {
return Err(Error::new(
eyre!("postInitMigrationTodos is not an array or object"),
ErrorKind::Database,
));
}
Ok(())
}
}
@@ -443,10 +458,10 @@ trait DynVersionT: RefUnwindSafe + Send + Sync {
fn semver(&self) -> exver::Version;
fn compat(&self) -> &'static exver::VersionRange;
fn pre_up(&self) -> BoxFuture<'static, Result<Box<dyn Any + UnwindSafe + Send>, Error>>;
fn up(&self, db: &mut Value, input: Box<dyn Any + Send>) -> Result<(), Error>;
fn post_up<'a>(&self, ctx: &'a RpcContext) -> BoxFuture<'a, Result<(), Error>>;
fn up(&self, db: &mut Value, input: Box<dyn Any + Send>) -> Result<Value, Error>;
fn post_up<'a>(&self, ctx: &'a RpcContext, input: Value) -> BoxFuture<'a, Result<(), Error>>;
fn down(&self, db: &mut Value) -> Result<(), Error>;
fn commit(&self, db: &mut Value) -> Result<(), Error>;
fn commit(&self, db: &mut Value, res: Value) -> Result<(), Error>;
}
impl<T> DynVersionT for T
where
@@ -466,7 +481,7 @@ where
async move { Ok(Box::new(VersionT::pre_up(v).await?) as Box<dyn Any + UnwindSafe + Send>) }
.boxed()
}
fn up(&self, db: &mut Value, input: Box<dyn Any + Send>) -> Result<(), Error> {
fn up(&self, db: &mut Value, input: Box<dyn Any + Send>) -> Result<Value, Error> {
VersionT::up(
*self,
db,
@@ -478,14 +493,14 @@ where
})?,
)
}
fn post_up<'a>(&self, ctx: &'a RpcContext) -> BoxFuture<'a, Result<(), Error>> {
VersionT::post_up(*self, ctx).boxed()
fn post_up<'a>(&self, ctx: &'a RpcContext, input: Value) -> BoxFuture<'a, Result<(), Error>> {
VersionT::post_up(*self, ctx, input).boxed()
}
fn down(&self, db: &mut Value) -> Result<(), Error> {
VersionT::down(*self, db)
}
fn commit(&self, db: &mut Value) -> Result<(), Error> {
VersionT::commit(*self, db)
fn commit(&self, db: &mut Value, res: Value) -> Result<(), Error> {
VersionT::commit(*self, db, res)
}
}
impl DynVersionT for DynVersion {
@@ -501,17 +516,17 @@ impl DynVersionT for DynVersion {
fn pre_up(&self) -> BoxFuture<'static, Result<Box<dyn Any + UnwindSafe + Send>, Error>> {
self.0.pre_up()
}
fn up(&self, db: &mut Value, input: Box<dyn Any + Send>) -> Result<(), Error> {
fn up(&self, db: &mut Value, input: Box<dyn Any + Send>) -> Result<Value, Error> {
self.0.up(db, input)
}
fn post_up<'a>(&self, ctx: &'a RpcContext) -> BoxFuture<'a, Result<(), Error>> {
self.0.post_up(ctx)
fn post_up<'a>(&self, ctx: &'a RpcContext, input: Value) -> BoxFuture<'a, Result<(), Error>> {
self.0.post_up(ctx, input)
}
fn down(&self, db: &mut Value) -> Result<(), Error> {
self.0.down(db)
}
fn commit(&self, db: &mut Value) -> Result<(), Error> {
self.0.commit(db)
fn commit(&self, db: &mut Value, res: Value) -> Result<(), Error> {
self.0.commit(db, res)
}
}

View File

@@ -40,8 +40,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -24,8 +24,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -24,8 +24,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -1,4 +1,5 @@
use std::collections::BTreeMap;
use std::ffi::OsStr;
use std::path::Path;
use chrono::{DateTime, Utc};
@@ -6,6 +7,7 @@ use const_format::formatcp;
use ed25519_dalek::SigningKey;
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::{json, InternedString};
use models::{PackageId, ReplayId};
use openssl::pkey::PKey;
use openssl::x509::X509;
use sqlx::postgres::PgConnectOptions;
@@ -178,8 +180,26 @@ async fn init_postgres(datadir: impl AsRef<Path>) -> Result<PgPool, Error> {
.await?;
}
let secret_store =
PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root")).await?;
let secret_store = if let Ok(s) = PgPool::connect_with(
PgConnectOptions::new()
.database("secrets")
.username("root")
.port(5432)
.socket("/var/run/postgresql"),
)
.await
{
s
} else {
PgPool::connect_with(
PgConnectOptions::new()
.database("secrets")
.username("root")
.port(5433)
.socket("/var/run/postgresql"),
)
.await?
};
sqlx::migrate!()
.run(&secret_store)
.await
@@ -209,7 +229,9 @@ impl VersionT for Version {
Ok((account, ssh_keys, cifs))
}
fn up(self, db: &mut Value, (account, ssh_keys, cifs): Self::PreUpRes) -> Result<(), Error> {
fn up(self, db: &mut Value, (account, ssh_keys, cifs): Self::PreUpRes) -> Result<Value, Error> {
let prev_package_data = db["package-data"].clone();
let wifi = json!({
"interface": db["server-info"]["wifi"]["interface"],
"ssids": db["server-info"]["wifi"]["ssids"],
@@ -236,7 +258,7 @@ impl VersionT for Version {
"lanAddress": db["server-info"]["lan-address"],
});
server_info["postInitMigrationTodos"] = json!([]);
server_info["postInitMigrationTodos"] = json!({});
let tor_address: String = from_value(db["server-info"]["tor-address"].clone())?;
// Maybe we do this like the Public::init does
server_info["torAddress"] = json!(tor_address);
@@ -287,7 +309,8 @@ impl VersionT for Version {
});
*db = next;
Ok(())
Ok(prev_package_data)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Err(Error::new(
@@ -298,7 +321,7 @@ impl VersionT for Version {
#[instrument(skip(self, ctx))]
/// MUST be idempotent, and is run after *all* db migrations
async fn post_up(self, ctx: &RpcContext) -> Result<(), Error> {
async fn post_up(self, ctx: &RpcContext, input: Value) -> Result<(), Error> {
let path = Path::new(formatcp!("{PACKAGE_DATA}/archive/"));
if !path.is_dir() {
return Err(Error::new(
@@ -312,6 +335,9 @@ impl VersionT for Version {
// Should be the name of the package
let mut paths = tokio::fs::read_dir(path).await?;
while let Some(path) = paths.next_entry().await? {
let Ok(id) = path.file_name().to_string_lossy().parse::<PackageId>() else {
continue;
};
let path = path.path();
if !path.is_dir() {
continue;
@@ -328,25 +354,62 @@ impl VersionT for Version {
let mut paths = tokio::fs::read_dir(path).await?;
while let Some(path) = paths.next_entry().await? {
let path = path.path();
if path.is_dir() {
if path.extension() != Some(OsStr::new("s9pk")) {
continue;
}
let package_s9pk = tokio::fs::File::open(path).await?;
let file = MultiCursorFile::open(&package_s9pk).await?;
let configured = if !input.is_null() {
let Some(configured) = input
.get(&*id)
.and_then(|pde| pde.get("installed"))
.and_then(|i| i.get("status"))
.and_then(|s| s.get("configured"))
.and_then(|c| c.as_bool())
else {
continue;
};
configured
} else {
false
};
let key = ctx.db.peek().await.into_private().into_compat_s9pk_key();
ctx.services
.install(
ctx.clone(),
|| crate::s9pk::load(file.clone(), || Ok(key.de()?.0), None),
None,
None::<crate::util::Never>,
None,
)
.await?
.await?
.await?;
if let Err(e) = async {
let package_s9pk = tokio::fs::File::open(path).await?;
let file = MultiCursorFile::open(&package_s9pk).await?;
let key = ctx.db.peek().await.into_private().into_compat_s9pk_key();
ctx.services
.install(
ctx.clone(),
|| crate::s9pk::load(file.clone(), || Ok(key.de()?.0), None),
None,
None::<crate::util::Never>,
None,
)
.await?
.await?
.await?;
if configured {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&id)
.or_not_found(&id)?
.as_tasks_mut()
.remove(&ReplayId::from("needs-config"))
})
.await
.result?;
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error reinstalling {id}: {e}");
tracing::debug!("{e:?}");
}
}
}
}

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -41,7 +41,7 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
for (_, package) in db["public"]["packageData"]
.as_object_mut()
.ok_or_else(|| {
@@ -86,7 +86,7 @@ impl VersionT for Version {
}
}
Ok(())
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -28,7 +28,7 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
let acme = std::mem::replace(
&mut db["public"]["serverInfo"]["acme"],
Value::Object(Default::default()),
@@ -75,7 +75,7 @@ impl VersionT for Version {
}
}
Ok(())
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -30,7 +30,7 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
let bindings: BTreeMap<u16, Value> = [(
80,
json!({
@@ -60,7 +60,7 @@ impl VersionT for Version {
"hostnameInfo": {},
});
Ok(())
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -26,8 +26,8 @@ impl VersionT for Version {
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -4,6 +4,7 @@ use tokio::process::Command;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_6_alpha_6, VersionT};
use crate::context::RpcContext;
use crate::prelude::*;
use crate::util::Invoke;
@@ -30,7 +31,7 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
let server_info = db["public"]["serverInfo"]
.as_object_mut()
.or_not_found("public.serverInfo")?;
@@ -44,9 +45,9 @@ impl VersionT for Version {
manifest["hardwareRequirements"]["device"] = json!([]);
}
}
Ok(())
Ok(Value::Null)
}
async fn post_up(self, ctx: &crate::context::RpcContext) -> Result<(), Error> {
async fn post_up(self, ctx: &RpcContext, _input: Value) -> Result<(), Error> {
Command::new("systemd-firstboot")
.arg("--root=/media/startos/config/overlay/")
.arg(format!(

View File

@@ -5,6 +5,7 @@ use tokio::fs::File;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_6_alpha_7, VersionT};
use crate::context::RpcContext;
use crate::install::PKG_ARCHIVE_DIR;
use crate::prelude::*;
use crate::s9pk::manifest::{DeviceFilter, Manifest};
@@ -39,10 +40,10 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
async fn post_up(self, ctx: &crate::context::RpcContext) -> Result<(), Error> {
async fn post_up(self, ctx: &RpcContext, _input: Value) -> Result<(), Error> {
let s9pk_dir = Path::new(DATA_DIR).join(PKG_ARCHIVE_DIR).join("installed");
if tokio::fs::metadata(&s9pk_dir).await.is_ok() {

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -3,6 +3,7 @@ use imbl_value::json;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_6_alpha_18, VersionT};
use crate::context::RpcContext;
use crate::notifications::{notify, NotificationLevel};
use crate::prelude::*;
@@ -29,7 +30,7 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
let host = db["public"]["serverInfo"]["host"].clone();
let mut wifi = db["public"]["serverInfo"]["wifi"].clone();
wifi["enabled"] = Value::Bool(!wifi["selected"].is_null());
@@ -51,9 +52,9 @@ impl VersionT for Version {
"networkInterfaces": network_interfaces,
"acme": acme,
});
Ok(())
Ok(Value::Null)
}
async fn post_up<'a>(self, ctx: &'a crate::context::RpcContext) -> Result<(), Error> {
async fn post_up(self, ctx: &RpcContext, _input: Value) -> Result<(), Error> {
let message_update = include_str!("update_details/v0_4_0.md").to_string();
ctx.db

View File

@@ -28,7 +28,7 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
let Some(ui) = db["public"]["ui"].as_object_mut() else {
return Err(Error::new(
eyre!("db.public.ui is not an object"),
@@ -64,7 +64,7 @@ impl VersionT for Version {
ui.remove("gaming");
ui.remove("theme");
Ok(())
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -27,8 +27,8 @@ impl VersionT for Version {
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -30,7 +30,7 @@ impl VersionT for Version {
&V0_3_0_COMPAT
}
#[instrument]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
db["public"]["serverInfo"]
.as_object_mut()
.or_not_found("public.serverInfo")?
@@ -63,9 +63,9 @@ impl VersionT for Version {
}
pde.insert("tasks".into(), Value::Object(tasks));
}
Ok(())
Ok(Value::Null)
}
async fn post_up(self, _ctx: &RpcContext) -> Result<(), Error> {
async fn post_up(self, _ctx: &RpcContext, _input: Value) -> Result<(), Error> {
use tokio::io::AsyncWriteExt;
if tokio::fs::metadata("/media/startos/config/overlay/etc/shadow")

View File

@@ -28,8 +28,8 @@ impl VersionT for Version {
&V0_3_0_COMPAT
}
#[instrument]
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -28,7 +28,7 @@ impl VersionT for Version {
&V0_3_0_COMPAT
}
#[instrument]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
let ui = db["public"]["ui"]
.as_object_mut()
.or_not_found("public.ui")?;
@@ -38,7 +38,7 @@ impl VersionT for Version {
}
}
ui.remove("ackWelcome");
Ok(())
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -28,8 +28,8 @@ impl VersionT for Version {
&V0_3_0_COMPAT
}
#[instrument]
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -28,8 +28,8 @@ impl VersionT for Version {
&V0_3_0_COMPAT
}
#[instrument]
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<(), Error> {
Ok(())
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())

View File

@@ -0,0 +1,112 @@
use std::collections::BTreeMap;
use std::path::Path;
use std::sync::Arc;
use exver::{PreReleaseSegment, VersionRange};
use imbl_value::{InOMap, InternedString};
use models::PackageId;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_4_0_alpha_8, VersionT};
use crate::context::RpcContext;
use crate::install::PKG_ARCHIVE_DIR;
use crate::prelude::*;
use crate::util::io::write_file_atomic;
use crate::volume::PKG_VOLUME_DIR;
use crate::DATA_DIR;
lazy_static::lazy_static! {
static ref V0_4_0_alpha_9: exver::Version = exver::Version::new(
[0, 4, 0],
[PreReleaseSegment::String("alpha".into()), 9.into()]
);
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_4_0_alpha_8::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_4_0_alpha_9.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
#[instrument]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
let mut res = InOMap::new();
let todos = db
.get_mut("public")
.and_then(|p| p.get_mut("serverInfo"))
.and_then(|si| si.get_mut("postInitMigrationTodos"))
.or_not_found("`public.serverInfo.postInitMigrationTodos` in db")?;
if let Some(prev) = todos.as_array().cloned() {
*todos = Value::Object(
prev.iter()
.filter_map(|version| version.as_str())
.map(InternedString::intern)
.map(|v| (v, Value::Null))
.collect(),
);
}
for (id, pde) in db
.get_mut("public")
.and_then(|si| si.get_mut("packageData"))
.and_then(|pd| pd.as_object_mut())
.into_iter()
.flat_map(|m| m.iter_mut())
{
let Some(pde) = pde.as_object_mut() else {
continue;
};
res.insert(id.clone(), pde.remove("dataVersion").unwrap_or_default());
pde.insert(
"s9pk".into(),
Value::String(Arc::new(
Path::new(DATA_DIR)
.join(PKG_ARCHIVE_DIR)
.join("installed")
.join(id)
.with_extension("s9pk")
.into_os_string()
.into_string()
.map_or_else(|o| o.to_string_lossy().into_owned(), |a| a),
)),
);
}
db.get_mut("public")
.and_then(|p| p.get_mut("ui"))
.and_then(|u| u.as_object_mut())
.or_not_found("`public.ui` in db")?
.remove("ackInstructions");
Ok(Value::Object(res))
}
async fn post_up(self, _ctx: &RpcContext, input: Value) -> Result<(), Error> {
for (id, data_version) in from_value::<BTreeMap<PackageId, Option<String>>>(input)? {
if let Some(data_version) = data_version {
write_file_atomic(
Path::new(DATA_DIR)
.join(PKG_VOLUME_DIR)
.join(&id)
.join("data")
.join(".version"),
data_version.as_bytes(),
)
.await?;
}
}
Ok(())
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}