Fix/backups (#2659)

* fix master build (#2639)

* feat: Change ts to use rsync
Chore: Update the ts to use types over interface

* feat: Get the rust and the js to do a backup

* Wip: Got the backup working?

* fix permissions

* remove trixie list

* update tokio to fix timer bug

* fix error handling on backup

* wip

* remove idmap

* run restore before init, and init with own version on restore

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
This commit is contained in:
Jade
2024-07-17 15:46:27 -06:00
committed by GitHub
parent 95611e9c4b
commit 8f0bdcd172
23 changed files with 445 additions and 380 deletions

View File

@@ -7,6 +7,7 @@ import { RpcResult, matchRpcResult } from "../RpcListener"
import { duration } from "../../Models/Duration" import { duration } from "../../Models/Duration"
import { T } from "@start9labs/start-sdk" import { T } from "@start9labs/start-sdk"
import { MainEffects } from "@start9labs/start-sdk/cjs/lib/StartSdk" import { MainEffects } from "@start9labs/start-sdk/cjs/lib/StartSdk"
import { Volume } from "../../Models/Volume"
export const STARTOS_JS_LOCATION = "/usr/lib/startos/package/index.js" export const STARTOS_JS_LOCATION = "/usr/lib/startos/package/index.js"
export class SystemForStartOs implements System { export class SystemForStartOs implements System {
private onTerm: (() => Promise<void>) | undefined private onTerm: (() => Promise<void>) | undefined
@@ -151,8 +152,17 @@ export class SystemForStartOs implements System {
return this.abi.getConfig({ effects }) return this.abi.getConfig({ effects })
} }
case "/backup/create": case "/backup/create":
return this.abi.createBackup({
effects,
pathMaker: ((options) =>
new Volume(options.volume, options.path).path) as T.PathMaker,
})
case "/backup/restore": case "/backup/restore":
throw new Error("this should be called with the init/unit") return this.abi.restoreBackup({
effects,
pathMaker: ((options) =>
new Volume(options.volume, options.path).path) as T.PathMaker,
})
case "/actions/metadata": { case "/actions/metadata": {
return this.abi.actionsMetadata({ effects }) return this.abi.actionsMetadata({ effects })
} }

View File

@@ -5,7 +5,7 @@ import { hostSystemStartOs } from "../Adapters/HostSystemStartOs"
export type ExecuteResult = export type ExecuteResult =
| { ok: unknown } | { ok: unknown }
| { err: { code: number; message: string } } | { err: { code: number; message: string } }
export interface System { export type System = {
// init(effects: Effects): Promise<void> // init(effects: Effects): Promise<void>
// exit(effects: Effects): Promise<void> // exit(effects: Effects): Promise<void>
// start(effects: Effects): Promise<void> // start(effects: Effects): Promise<void>

354
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -128,6 +128,7 @@ nix = { version = "0.29.0", features = ["user", "process", "signal", "fs"] }
nom = "7.1.3" nom = "7.1.3"
num = "0.4.1" num = "0.4.1"
num_enum = "0.7.0" num_enum = "0.7.0"
num_cpus = "1.16.0"
once_cell = "1.19.0" once_cell = "1.19.0"
openssh-keys = "0.6.2" openssh-keys = "0.6.2"
openssl = { version = "0.10.57", features = ["vendored"] } openssl = { version = "0.10.57", features = ["vendored"] }
@@ -170,7 +171,7 @@ sscanf = "0.4.1"
ssh-key = { version = "0.6.2", features = ["ed25519"] } ssh-key = { version = "0.6.2", features = ["ed25519"] }
tar = "0.4.40" tar = "0.4.40"
thiserror = "1.0.49" thiserror = "1.0.49"
tokio = { version = "1.38.0", features = ["full"] } tokio = { version = "1.38.1", features = ["full"] }
tokio-rustls = "0.26.0" tokio-rustls = "0.26.0"
tokio-socks = "0.5.1" tokio-socks = "0.5.1"
tokio-stream = { version = "0.1.14", features = ["io-util", "sync", "net"] } tokio-stream = { version = "0.1.14", features = ["io-util", "sync", "net"] }

View File

@@ -260,7 +260,7 @@ async fn perform_backup(
for id in package_ids { for id in package_ids {
if let Some(service) = &*ctx.services.get(id).await { if let Some(service) = &*ctx.services.get(id).await {
let backup_result = service let backup_result = service
.backup(backup_guard.package_backup(id)) .backup(backup_guard.package_backup(id).await?)
.await .await
.err() .err()
.map(|e| e.to_string()); .map(|e| e.to_string());

View File

@@ -158,7 +158,7 @@ async fn restore_packages(
let backup_guard = Arc::new(backup_guard); let backup_guard = Arc::new(backup_guard);
let mut tasks = BTreeMap::new(); let mut tasks = BTreeMap::new();
for id in ids { for id in ids {
let backup_dir = backup_guard.clone().package_backup(&id); let backup_dir = backup_guard.clone().package_backup(&id).await?;
let s9pk_path = backup_dir.path().join(&id).with_extension("s9pk"); let s9pk_path = backup_dir.path().join(&id).with_extension("s9pk");
let task = ctx let task = ctx
.services .services

View File

@@ -1,3 +1,4 @@
use std::cmp::max;
use std::ffi::OsString; use std::ffi::OsString;
use std::net::{Ipv6Addr, SocketAddr}; use std::net::{Ipv6Addr, SocketAddr};
use std::sync::Arc; use std::sync::Arc;
@@ -136,6 +137,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
let res = { let res = {
let rt = tokio::runtime::Builder::new_multi_thread() let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(max(4, num_cpus::get()))
.enable_all() .enable_all()
.build() .build()
.expect("failed to initialize runtime"); .expect("failed to initialize runtime");

View File

@@ -106,8 +106,11 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
) )
})?; })?;
} }
let encrypted_guard = let encrypted_guard = TmpMountGuard::mount(
TmpMountGuard::mount(&BackupFS::new(&crypt_path, &enc_key), ReadWrite).await?; &BackupFS::new(&crypt_path, &enc_key, vec![(100000, 65536)]),
ReadWrite,
)
.await?;
let metadata_path = encrypted_guard.path().join("metadata.json"); let metadata_path = encrypted_guard.path().join("metadata.json");
let metadata: BackupInfo = if tokio::fs::metadata(&metadata_path).await.is_ok() { let metadata: BackupInfo = if tokio::fs::metadata(&metadata_path).await.is_ok() {
@@ -148,8 +151,23 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn package_backup(self: &Arc<Self>, id: &PackageId) -> SubPath<Arc<Self>> { pub async fn package_backup(
SubPath::new(self.clone(), id) self: &Arc<Self>,
id: &PackageId,
) -> Result<SubPath<Arc<Self>>, Error> {
let package_guard = SubPath::new(self.clone(), id);
let package_path = package_guard.path();
if tokio::fs::metadata(&package_path).await.is_err() {
tokio::fs::create_dir_all(&package_path)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
package_path.display().to_string(),
)
})?;
}
Ok(package_guard)
} }
#[instrument(skip_all)] #[instrument(skip_all)]

View File

@@ -1,3 +1,4 @@
use std::borrow::Cow;
use std::fmt::{self, Display}; use std::fmt::{self, Display};
use std::os::unix::ffi::OsStrExt; use std::os::unix::ffi::OsStrExt;
use std::path::Path; use std::path::Path;
@@ -12,10 +13,15 @@ use crate::prelude::*;
pub struct BackupFS<DataDir: AsRef<Path>, Password: fmt::Display> { pub struct BackupFS<DataDir: AsRef<Path>, Password: fmt::Display> {
data_dir: DataDir, data_dir: DataDir,
password: Password, password: Password,
idmapped_root: Vec<(u32, u32)>,
} }
impl<DataDir: AsRef<Path>, Password: fmt::Display> BackupFS<DataDir, Password> { impl<DataDir: AsRef<Path>, Password: fmt::Display> BackupFS<DataDir, Password> {
pub fn new(data_dir: DataDir, password: Password) -> Self { pub fn new(data_dir: DataDir, password: Password, idmapped_root: Vec<(u32, u32)>) -> Self {
BackupFS { data_dir, password } BackupFS {
data_dir,
password,
idmapped_root,
}
} }
} }
impl<DataDir: AsRef<Path> + Send + Sync, Password: fmt::Display + Send + Sync> FileSystem impl<DataDir: AsRef<Path> + Send + Sync, Password: fmt::Display + Send + Sync> FileSystem
@@ -26,9 +32,16 @@ impl<DataDir: AsRef<Path> + Send + Sync, Password: fmt::Display + Send + Sync> F
} }
fn mount_options(&self) -> impl IntoIterator<Item = impl Display> { fn mount_options(&self) -> impl IntoIterator<Item = impl Display> {
[ [
format!("password={}", self.password), Cow::Owned(format!("password={}", self.password)),
format!("file-size-padding=0.05"), Cow::Borrowed("file-size-padding=0.05"),
Cow::Borrowed("allow_other"),
] ]
.into_iter()
.chain(
self.idmapped_root
.iter()
.map(|(root, range)| Cow::Owned(format!("idmapped-root={root}:{range}"))),
)
} }
async fn source(&self) -> Result<Option<impl AsRef<Path>>, Error> { async fn source(&self) -> Result<Option<impl AsRef<Path>>, Error> {
Ok(Some(&self.data_dir)) Ok(Some(&self.data_dir))

View File

@@ -34,6 +34,7 @@ use crate::util::actor::concurrent::ConcurrentActor;
use crate::util::actor::Actor; use crate::util::actor::Actor;
use crate::util::io::create_file; use crate::util::io::create_file;
use crate::util::serde::Pem; use crate::util::serde::Pem;
use crate::util::Never;
use crate::volume::data_dir; use crate::volume::data_dir;
mod action; mod action;
@@ -220,12 +221,13 @@ impl Service {
tracing::error!("Error opening s9pk for install: {e}"); tracing::error!("Error opening s9pk for install: {e}");
tracing::debug!("{e:?}") tracing::debug!("{e:?}")
}) { }) {
if let Ok(service) = Self::install(ctx.clone(), s9pk, None, None) if let Ok(service) =
.await Self::install(ctx.clone(), s9pk, None, None::<Never>, None)
.map_err(|e| { .await
tracing::error!("Error installing service: {e}"); .map_err(|e| {
tracing::debug!("{e:?}") tracing::error!("Error installing service: {e}");
}) tracing::debug!("{e:?}")
})
{ {
return Ok(Some(service)); return Ok(Some(service));
} }
@@ -257,6 +259,7 @@ impl Service {
ctx.clone(), ctx.clone(),
s9pk, s9pk,
Some(s.as_manifest().as_version().de()?), Some(s.as_manifest().as_version().de()?),
None::<Never>,
None, None,
) )
.await .await
@@ -334,13 +337,35 @@ impl Service {
pub async fn install( pub async fn install(
ctx: RpcContext, ctx: RpcContext,
s9pk: S9pk, s9pk: S9pk,
src_version: Option<models::VersionString>, mut src_version: Option<models::VersionString>,
recovery_source: Option<impl GenericMountGuard>,
progress: Option<InstallProgressHandles>, progress: Option<InstallProgressHandles>,
) -> Result<ServiceRef, Error> { ) -> Result<ServiceRef, Error> {
let manifest = s9pk.as_manifest().clone(); let manifest = s9pk.as_manifest().clone();
let developer_key = s9pk.as_archive().signer(); let developer_key = s9pk.as_archive().signer();
let icon = s9pk.icon_data_url().await?; let icon = s9pk.icon_data_url().await?;
let service = Self::new(ctx.clone(), s9pk, StartStop::Stop).await?; let service = Self::new(ctx.clone(), s9pk, StartStop::Stop).await?;
if let Some(recovery_source) = recovery_source {
service
.actor
.send(
Guid::new(),
transition::restore::Restore {
path: recovery_source.path().to_path_buf(),
},
)
.await??;
recovery_source.unmount().await?;
src_version = Some(
service
.seed
.persistent_container
.s9pk
.as_manifest()
.version
.clone(),
);
}
service service
.seed .seed
.persistent_container .persistent_container
@@ -382,26 +407,6 @@ impl Service {
Ok(service) Ok(service)
} }
pub async fn restore(
ctx: RpcContext,
s9pk: S9pk,
backup_source: impl GenericMountGuard,
progress: Option<InstallProgressHandles>,
) -> Result<ServiceRef, Error> {
let service = Service::install(ctx.clone(), s9pk, None, progress).await?;
service
.actor
.send(
Guid::new(),
transition::restore::Restore {
path: backup_source.path().to_path_buf(),
},
)
.await??;
Ok(service)
}
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn backup(&self, guard: impl GenericMountGuard) -> Result<(), Error> { pub async fn backup(&self, guard: impl GenericMountGuard) -> Result<(), Error> {
let id = &self.seed.id; let id = &self.seed.id;
@@ -417,10 +422,11 @@ impl Service {
.send( .send(
Guid::new(), Guid::new(),
transition::backup::Backup { transition::backup::Backup {
path: guard.path().to_path_buf(), path: guard.path().join("data"),
}, },
) )
.await??; .await??
.await?;
Ok(()) Ok(())
} }
@@ -505,13 +511,21 @@ impl Actor for ServiceActor {
} }
(Some(TransitionKind::Restarting), _, _) => MainStatus::Restarting, (Some(TransitionKind::Restarting), _, _) => MainStatus::Restarting,
(Some(TransitionKind::Restoring), _, _) => MainStatus::Restoring, (Some(TransitionKind::Restoring), _, _) => MainStatus::Restoring,
(Some(TransitionKind::BackingUp), _, Some(status)) => { (Some(TransitionKind::BackingUp), StartStop::Stop, Some(status)) => {
seed.persistent_container.stop().await?;
MainStatus::BackingUp { MainStatus::BackingUp {
started: Some(status.started), started: Some(status.started),
health: status.health.clone(), health: status.health.clone(),
} }
} }
(Some(TransitionKind::BackingUp), _, None) => MainStatus::BackingUp { (Some(TransitionKind::BackingUp), StartStop::Start, _) => {
seed.persistent_container.start().await?;
MainStatus::BackingUp {
started: None,
health: OrdMap::new(),
}
}
(Some(TransitionKind::BackingUp), _, _) => MainStatus::BackingUp {
started: None, started: None,
health: OrdMap::new(), health: OrdMap::new(),
}, },

View File

@@ -1,5 +1,5 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::path::{Path, PathBuf}; use std::path::Path;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::time::Duration; use std::time::Duration;
@@ -277,7 +277,7 @@ impl PersistentContainer {
backup_path: impl AsRef<Path>, backup_path: impl AsRef<Path>,
mount_type: MountType, mount_type: MountType,
) -> Result<MountGuard, Error> { ) -> Result<MountGuard, Error> {
let backup_path: PathBuf = backup_path.as_ref().to_path_buf(); let backup_path = backup_path.as_ref();
let mountpoint = self let mountpoint = self
.lxc_container .lxc_container
.get() .get()
@@ -295,14 +295,14 @@ impl PersistentContainer {
.arg(mountpoint.as_os_str()) .arg(mountpoint.as_os_str())
.invoke(ErrorKind::Filesystem) .invoke(ErrorKind::Filesystem)
.await?; .await?;
let bind = Bind::new(&backup_path); tokio::fs::create_dir_all(backup_path).await?;
let mount_guard = MountGuard::mount(&bind, &mountpoint, mount_type).await;
Command::new("chown") Command::new("chown")
.arg("100000:100000") .arg("100000:100000")
.arg(backup_path.as_os_str()) .arg(backup_path)
.invoke(ErrorKind::Filesystem) .invoke(ErrorKind::Filesystem)
.await?; .await?;
mount_guard let bind = Bind::new(backup_path);
MountGuard::mount(&bind, &mountpoint, mount_type).await
} }
#[instrument(skip_all)] #[instrument(skip_all)]

View File

@@ -265,35 +265,20 @@ impl ServiceMap {
} else { } else {
None None
}; };
if let Some(recovery_source) = recovery_source { *service = Some(
*service = Some( Service::install(
Service::restore( ctx,
ctx, s9pk,
s9pk, prev,
recovery_source, recovery_source,
Some(InstallProgressHandles { Some(InstallProgressHandles {
finalization_progress, finalization_progress,
progress, progress,
}), }),
) )
.await? .await?
.into(), .into(),
); );
} else {
*service = Some(
Service::install(
ctx,
s9pk,
prev,
Some(InstallProgressHandles {
finalization_progress,
progress,
}),
)
.await?
.into(),
);
}
drop(service); drop(service);
sync_progress_task.await.map_err(|_| { sync_progress_task.await.map_err(|_| {

View File

@@ -1,5 +1,7 @@
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc;
use futures::future::BoxFuture;
use futures::FutureExt; use futures::FutureExt;
use models::ProcedureName; use models::ProcedureName;
@@ -19,7 +21,7 @@ pub(in crate::service) struct Backup {
pub path: PathBuf, pub path: PathBuf,
} }
impl Handler<Backup> for ServiceActor { impl Handler<Backup> for ServiceActor {
type Response = Result<(), Error>; type Response = Result<BoxFuture<'static, Result<(), Error>>, Error>;
fn conflicts_with(_: &Backup) -> ConflictBuilder<Self> { fn conflicts_with(_: &Backup) -> ConflictBuilder<Self> {
ConflictBuilder::everything() ConflictBuilder::everything()
.except::<GetConfig>() .except::<GetConfig>()
@@ -37,43 +39,31 @@ impl Handler<Backup> for ServiceActor {
let path = backup.path.clone(); let path = backup.path.clone();
let seed = self.0.clone(); let seed = self.0.clone();
let state = self.0.persistent_container.state.clone(); let transition = RemoteCancellable::new(async move {
let transition = RemoteCancellable::new( temp.stop();
async move { current
temp.stop(); .wait_for(|s| s.running_status.is_none())
.await
.with_kind(ErrorKind::Unknown)?;
let backup_guard = seed
.persistent_container
.mount_backup(path, ReadWrite)
.await?;
seed.persistent_container
.execute(id, ProcedureName::CreateBackup, Value::Null, None)
.await?;
backup_guard.unmount(true).await?;
if temp.restore().is_start() {
current current
.wait_for(|s| s.running_status.is_none()) .wait_for(|s| s.running_status.is_some())
.await .await
.with_kind(ErrorKind::Unknown)?; .with_kind(ErrorKind::Unknown)?;
let backup_guard = seed
.persistent_container
.mount_backup(path, ReadWrite)
.await?;
seed.persistent_container
.execute(id, ProcedureName::CreateBackup, Value::Null, None)
.await?;
backup_guard.unmount(true).await?;
if temp.restore().is_start() {
current
.wait_for(|s| s.running_status.is_some())
.await
.with_kind(ErrorKind::Unknown)?;
}
drop(temp);
state.send_modify(|s| {
s.transition_state.take();
});
Ok::<_, Error>(())
} }
.map(|x| { drop(temp);
if let Err(err) = dbg!(x) { Ok::<_, Arc<Error>>(())
tracing::debug!("{:?}", err); });
tracing::warn!("{}", err);
}
}),
);
let cancel_handle = transition.cancellation_handle(); let cancel_handle = transition.cancellation_handle();
let transition = transition.shared(); let transition = transition.shared();
let job_transition = transition.clone(); let job_transition = transition.clone();
@@ -92,9 +82,11 @@ impl Handler<Backup> for ServiceActor {
if let Some(t) = old { if let Some(t) = old {
t.abort().await; t.abort().await;
} }
match transition.await { Ok(transition
None => Err(Error::new(eyre!("Backup canceled"), ErrorKind::Unknown)), .map(|r| {
Some(x) => Ok(x), r.ok_or_else(|| Error::new(eyre!("Backup canceled"), ErrorKind::Cancelled))?
} .map_err(|e| e.clone_output())
})
.boxed())
} }
} }

View File

@@ -79,7 +79,10 @@ impl TempDesiredRestore {
} }
impl Drop for TempDesiredRestore { impl Drop for TempDesiredRestore {
fn drop(&mut self) { fn drop(&mut self) {
self.0.send_modify(|s| s.temp_desired_state = None); self.0.send_modify(|s| {
s.temp_desired_state.take();
s.transition_state.take();
});
} }
} }
// impl Deref for TempDesiredState { // impl Deref for TempDesiredState {

View File

@@ -10,12 +10,12 @@ use crate::util::{FileLock, Invoke};
use crate::{Error, ErrorKind}; use crate::{Error, ErrorKind};
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref SEMITONE_K: f64 = 2f64.powf(1f64 / 12f64); static ref SEMITONE_K: f64 = 2f64.powf(1.0 / 12.0);
static ref A_4: f64 = 440f64; static ref A_4: f64 = 440.0;
static ref C_0: f64 = *A_4 / SEMITONE_K.powf(9f64) / 2f64.powf(4f64); static ref C_0: f64 = *A_4 / SEMITONE_K.powf(9.0) / 2_f64.powf(4.0);
} }
pub const SOUND_LOCK_FILE: &str = "/etc/embassy/sound.lock"; pub const SOUND_LOCK_FILE: &str = "/run/startos/sound.lock";
struct SoundInterface { struct SoundInterface {
guard: Option<FileLock>, guard: Option<FileLock>,

View File

@@ -555,7 +555,7 @@ impl<F: FnOnce() -> T, T> Drop for GeneralGuard<F, T> {
} }
} }
pub struct FileLock(OwnedMutexGuard<()>, Option<FdLock<File>>); pub struct FileLock(#[allow(unused)] OwnedMutexGuard<()>, Option<FdLock<File>>);
impl Drop for FileLock { impl Drop for FileLock {
fn drop(&mut self) { fn drop(&mut self) {
if let Some(fd_lock) = self.1.take() { if let Some(fd_lock) = self.1.take() {

View File

@@ -166,6 +166,9 @@ fi
curl -fsSL https://deb.torproject.org/torproject.org/A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89.asc > config/archives/tor.key curl -fsSL https://deb.torproject.org/torproject.org/A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89.asc > config/archives/tor.key
echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/tor.key.gpg] https://deb.torproject.org/torproject.org ${IB_SUITE} main" > config/archives/tor.list echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/tor.key.gpg] https://deb.torproject.org/torproject.org ${IB_SUITE} main" > config/archives/tor.list
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o config/archives/docker.key
echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/docker.key.gpg] https://download.docker.com/linux/debian ${IB_SUITE} stable" > config/archives/docker.list
# Dependencies # Dependencies
## Base dependencies ## Base dependencies

View File

@@ -1,6 +1,10 @@
import { recursive } from "ts-matches"
import { SDKManifest } from "../manifest/ManifestTypes" import { SDKManifest } from "../manifest/ManifestTypes"
import * as T from "../types" import * as T from "../types"
import * as child_process from "child_process"
import { promises as fsPromises } from "fs"
export type BACKUP = "BACKUP" export type BACKUP = "BACKUP"
export const DEFAULT_OPTIONS: T.BackupOptions = { export const DEFAULT_OPTIONS: T.BackupOptions = {
delete: true, delete: true,
@@ -91,58 +95,22 @@ export class Backups<M extends SDKManifest> {
) )
return this return this
} }
build() { build(pathMaker: T.PathMaker) {
const createBackup: T.ExpectedExports.createBackup = async ({ const createBackup: T.ExpectedExports.createBackup = async ({
effects, effects,
}) => { }) => {
// const previousItems = (
// await effects
// .readDir({
// volumeId: Backups.BACKUP,
// path: ".",
// })
// .catch(() => [])
// ).map((x) => `${x}`)
// const backupPaths = this.backupSet
// .filter((x) => x.dstVolume === Backups.BACKUP)
// .map((x) => x.dstPath)
// .map((x) => x.replace(/\.\/([^]*)\//, "$1"))
// const filteredItems = previousItems.filter(
// (x) => backupPaths.indexOf(x) === -1,
// )
// for (const itemToRemove of filteredItems) {
// effects.console.error(`Trying to remove ${itemToRemove}`)
// await effects
// .removeDir({
// volumeId: Backups.BACKUP,
// path: itemToRemove,
// })
// .catch(() =>
// effects.removeFile({
// volumeId: Backups.BACKUP,
// path: itemToRemove,
// }),
// )
// .catch(() => {
// console.warn(`Failed to remove ${itemToRemove} from backup volume`)
// })
// }
for (const item of this.backupSet) { for (const item of this.backupSet) {
// if (notEmptyPath(item.dstPath)) { const rsyncResults = await runRsync(
// await effects.createDir({ {
// volumeId: item.dstVolume, dstPath: item.dstPath,
// path: item.dstPath, dstVolume: item.dstVolume,
// }) options: { ...this.options, ...item.options },
// } srcPath: item.srcPath,
// await effects srcVolume: item.srcVolume,
// .runRsync({ },
// ...item, pathMaker,
// options: { )
// ...this.options, await rsyncResults.wait()
// ...item.options,
// },
// })
// .wait()
} }
return return
} }
@@ -150,26 +118,17 @@ export class Backups<M extends SDKManifest> {
effects, effects,
}) => { }) => {
for (const item of this.backupSet) { for (const item of this.backupSet) {
// if (notEmptyPath(item.srcPath)) { const rsyncResults = await runRsync(
// await new Promise((resolve, reject) => fs.mkdir(items.src)).createDir( {
// { dstPath: item.dstPath,
// volumeId: item.srcVolume, dstVolume: item.dstVolume,
// path: item.srcPath, options: { ...this.options, ...item.options },
// }, srcPath: item.srcPath,
// ) srcVolume: item.srcVolume,
// } },
// await effects pathMaker,
// .runRsync({ )
// options: { await rsyncResults.wait()
// ...this.options,
// ...item.options,
// },
// srcVolume: item.dstVolume,
// dstVolume: item.srcVolume,
// srcPath: item.dstPath,
// dstPath: item.srcPath,
// })
// .wait()
} }
return return
} }
@@ -179,3 +138,73 @@ export class Backups<M extends SDKManifest> {
function notEmptyPath(file: string) { function notEmptyPath(file: string) {
return ["", ".", "./"].indexOf(file) === -1 return ["", ".", "./"].indexOf(file) === -1
} }
async function runRsync(
rsyncOptions: {
srcVolume: string
dstVolume: string
srcPath: string
dstPath: string
options: T.BackupOptions
},
pathMaker: T.PathMaker,
): Promise<{
id: () => Promise<string>
wait: () => Promise<null>
progress: () => Promise<number>
}> {
const { srcVolume, dstVolume, srcPath, dstPath, options } = rsyncOptions
const command = "rsync"
const args: string[] = []
if (options.delete) {
args.push("--delete")
}
if (options.force) {
args.push("--force")
}
if (options.ignoreExisting) {
args.push("--ignore-existing")
}
for (const exclude of options.exclude) {
args.push(`--exclude=${exclude}`)
}
args.push("-actAXH")
args.push("--info=progress2")
args.push("--no-inc-recursive")
args.push(pathMaker({ volume: srcVolume, path: srcPath }))
args.push(pathMaker({ volume: dstVolume, path: dstPath }))
const spawned = child_process.spawn(command, args, { detached: true })
let percentage = 0.0
spawned.stdout.on("data", (data: unknown) => {
const lines = String(data).replace("\r", "\n").split("\n")
for (const line of lines) {
const parsed = /$([0-9.]+)%/.exec(line)?.[1]
if (!parsed) continue
percentage = Number.parseFloat(parsed)
}
})
spawned.stderr.on("data", (data: unknown) => {
console.error(String(data))
})
const id = async () => {
const pid = spawned.pid
if (pid === undefined) {
throw new Error("rsync process has no pid")
}
return String(pid)
}
const waitPromise = new Promise<null>((resolve, reject) => {
spawned.on("exit", (code: any) => {
if (code === 0) {
resolve(null)
} else {
reject(new Error(`rsync exited with code ${code}`))
}
})
})
const wait = () => waitPromise
const progress = () => Promise.resolve(percentage)
return { id, wait, progress }
}

View File

@@ -1,6 +1,6 @@
import { Backups } from "./Backups" import { Backups } from "./Backups"
import { SDKManifest } from "../manifest/ManifestTypes" import { SDKManifest } from "../manifest/ManifestTypes"
import { ExpectedExports } from "../types" import { ExpectedExports, PathMaker } from "../types"
import { _ } from "../util" import { _ } from "../util"
export type SetupBackupsParams<M extends SDKManifest> = Array< export type SetupBackupsParams<M extends SDKManifest> = Array<
@@ -27,14 +27,14 @@ export function setupBackups<M extends SDKManifest>(
get createBackup() { get createBackup() {
return (async (options) => { return (async (options) => {
for (const backup of backups) { for (const backup of backups) {
await backup.build().createBackup(options) await backup.build(options.pathMaker).createBackup(options)
} }
}) as ExpectedExports.createBackup }) as ExpectedExports.createBackup
}, },
get restoreBackup() { get restoreBackup() {
return (async (options) => { return (async (options) => {
for (const backup of backups) { for (const backup of backups) {
await backup.build().restoreBackup(options) await backup.build(options.pathMaker).restoreBackup(options)
} }
}) as ExpectedExports.restoreBackup }) as ExpectedExports.restoreBackup
}, },

View File

@@ -240,7 +240,6 @@ export type ListValueSpecText = {
inputmode: "text" | "email" | "tel" | "url" inputmode: "text" | "email" | "tel" | "url"
placeholder: string | null placeholder: string | null
} }
export type ListValueSpecObject = { export type ListValueSpecObject = {
type: "object" type: "object"
/** this is a mapped type of the config object at this level, replacing the object's values with specs on those values */ /** this is a mapped type of the config object at this level, replacing the object's values with specs on those values */

View File

@@ -1,7 +1,7 @@
import { ValidEmVer } from "../emverLite/mod" import { ValidEmVer } from "../emverLite/mod"
import { ActionMetadata, ImageConfig, ImageId } from "../types" import { ActionMetadata, ImageConfig, ImageId } from "../types"
export interface Container { export type Container = {
/** This should be pointing to a docker container name */ /** This should be pointing to a docker container name */
image: string image: string
/** These should match the manifest data volumes */ /** These should match the manifest data volumes */
@@ -72,7 +72,7 @@ export type SDKManifest = {
readonly dependencies: Readonly<Record<string, ManifestDependency>> readonly dependencies: Readonly<Record<string, ManifestDependency>>
} }
export interface ManifestDependency { export type ManifestDependency = {
/** /**
* A human readable explanation on what the dependency is used for * A human readable explanation on what the dependency is used for
*/ */

View File

@@ -26,6 +26,7 @@ export * from "./osBindings"
export { SDKManifest } from "./manifest/ManifestTypes" export { SDKManifest } from "./manifest/ManifestTypes"
export { HealthReceipt } from "./health/HealthReceipt" export { HealthReceipt } from "./health/HealthReceipt"
export type PathMaker = (options: { volume: string; path: string }) => string
export type ExportedAction = (options: { export type ExportedAction = (options: {
effects: Effects effects: Effects
input?: Record<string, unknown> input?: Record<string, unknown>
@@ -43,10 +44,14 @@ export namespace ExpectedExports {
// /** These are how we make sure the our dependency configurations are valid and if not how to fix them. */ // /** These are how we make sure the our dependency configurations are valid and if not how to fix them. */
// export type dependencies = Dependencies; // export type dependencies = Dependencies;
/** For backing up service data though the startOS UI */ /** For backing up service data though the startOS UI */
export type createBackup = (options: { effects: Effects }) => Promise<unknown> export type createBackup = (options: {
effects: Effects
pathMaker: PathMaker
}) => Promise<unknown>
/** For restoring service data that was previously backed up using the startOS UI create backup flow. Backup restores are also triggered via the startOS UI, or doing a system restore flow during setup. */ /** For restoring service data that was previously backed up using the startOS UI create backup flow. Backup restores are also triggered via the startOS UI, or doing a system restore flow during setup. */
export type restoreBackup = (options: { export type restoreBackup = (options: {
effects: Effects effects: Effects
pathMaker: PathMaker
}) => Promise<unknown> }) => Promise<unknown>
// /** Health checks are used to determine if the service is working properly after starting // /** Health checks are used to determine if the service is working properly after starting

View File

@@ -22,6 +22,7 @@ export module Mock {
headline: 'Our biggest release ever.', headline: 'Our biggest release ever.',
releaseNotes: { releaseNotes: {
'0.3.6': 'Some **Markdown** release _notes_ for 0.3.6', '0.3.6': 'Some **Markdown** release _notes_ for 0.3.6',
'0.3.5.2': 'Some **Markdown** release _notes_ for 0.3.5.2',
'0.3.5.1': 'Some **Markdown** release _notes_ for 0.3.5.1', '0.3.5.1': 'Some **Markdown** release _notes_ for 0.3.5.1',
'0.3.4.4': 'Some **Markdown** release _notes_ for 0.3.4.4', '0.3.4.4': 'Some **Markdown** release _notes_ for 0.3.4.4',
'0.3.4.3': 'Some **Markdown** release _notes_ for 0.3.4.3', '0.3.4.3': 'Some **Markdown** release _notes_ for 0.3.4.3',