fix migration to support portable fatties (#1935)

* load docker images directly from s9pk to ensure fatties can be loaded across platform

* don't migrate tmpdir

* init after package data transfer

* set default rsync options
This commit is contained in:
Aiden McClelland
2022-11-10 10:20:26 -07:00
parent ca71c88744
commit 22b273b145
7 changed files with 44 additions and 56 deletions

View File

@@ -10,7 +10,7 @@ use tokio::process::Command;
use crate::context::rpc::RpcContextConfig; use crate::context::rpc::RpcContextConfig;
use crate::db::model::ServerStatus; use crate::db::model::ServerStatus;
use crate::install::PKG_DOCKER_DIR; use crate::install::PKG_ARCHIVE_DIR;
use crate::sound::CIRCLE_OF_5THS_SHORT; use crate::sound::CIRCLE_OF_5THS_SHORT;
use crate::util::Invoke; use crate::util::Invoke;
use crate::Error; use crate::Error;
@@ -292,7 +292,7 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
tracing::info!("Loaded System Docker Images"); tracing::info!("Loaded System Docker Images");
tracing::info!("Loading Package Docker Images"); tracing::info!("Loading Package Docker Images");
crate::install::load_images(cfg.datadir().join(PKG_DOCKER_DIR)).await?; crate::install::load_images(cfg.datadir().join(PKG_ARCHIVE_DIR)).await?;
tracing::info!("Loaded Package Docker Images"); tracing::info!("Loaded Package Docker Images");
} }

View File

@@ -5,7 +5,7 @@ use patch_db::{DbHandle, LockReceipt, LockTargetId, LockType, PatchDbHandle, Ver
use sqlx::{Executor, Postgres}; use sqlx::{Executor, Postgres};
use tracing::instrument; use tracing::instrument;
use super::{PKG_ARCHIVE_DIR, PKG_DOCKER_DIR}; use super::PKG_ARCHIVE_DIR;
use crate::config::{not_found, ConfigReceipts}; use crate::config::{not_found, ConfigReceipts};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::{ use crate::db::model::{
@@ -145,16 +145,6 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, version: &Version) -> Res
.await .await
.apply(|res| errors.handle(res)); .apply(|res| errors.handle(res));
} }
let docker_path = ctx
.datadir
.join(PKG_DOCKER_DIR)
.join(id)
.join(version.as_str());
if tokio::fs::metadata(&docker_path).await.is_ok() {
tokio::fs::remove_dir_all(&docker_path)
.await
.apply(|res| errors.handle(res));
}
let assets_path = asset_dir(&ctx.datadir, id, version); let assets_path = asset_dir(&ctx.datadir, id, version);
if tokio::fs::metadata(&assets_path).await.is_ok() { if tokio::fs::metadata(&assets_path).await.is_ok() {
tokio::fs::remove_dir_all(&assets_path) tokio::fs::remove_dir_all(&assets_path)

View File

@@ -56,7 +56,6 @@ pub mod update;
pub const PKG_ARCHIVE_DIR: &str = "package-data/archive"; pub const PKG_ARCHIVE_DIR: &str = "package-data/archive";
pub const PKG_PUBLIC_DIR: &str = "package-data/public"; pub const PKG_PUBLIC_DIR: &str = "package-data/public";
pub const PKG_DOCKER_DIR: &str = "package-data/docker";
pub const PKG_WASM_DIR: &str = "package-data/wasm"; pub const PKG_WASM_DIR: &str = "package-data/wasm";
#[command(display(display_serializable))] #[command(display(display_serializable))]
@@ -1014,44 +1013,11 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
tracing::info!("Install {}@{}: Unpacking Docker Images", pkg_id, version); tracing::info!("Install {}@{}: Unpacking Docker Images", pkg_id, version);
progress progress
.track_read_during(progress_model.clone(), &ctx.db, || async { .track_read_during(progress_model.clone(), &ctx.db, || async {
let image_tar_dir = ctx
.datadir
.join(PKG_DOCKER_DIR)
.join(pkg_id)
.join(version.as_str());
if tokio::fs::metadata(&image_tar_dir).await.is_err() {
tokio::fs::create_dir_all(&image_tar_dir)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
image_tar_dir.display().to_string(),
)
})?;
}
let image_tar_path = image_tar_dir.join("image.tar");
let mut tee = Command::new("tee")
.arg(&image_tar_path)
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.spawn()?;
let mut load = Command::new("docker") let mut load = Command::new("docker")
.arg("load") .arg("load")
.stdin(Stdio::piped()) .stdin(Stdio::piped())
.stderr(Stdio::piped()) .stderr(Stdio::piped())
.spawn()?; .spawn()?;
let tee_in = tee.stdin.take().ok_or_else(|| {
Error::new(
eyre!("Could not write to stdin of tee"),
crate::ErrorKind::Docker,
)
})?;
let mut tee_out = tee.stdout.take().ok_or_else(|| {
Error::new(
eyre!("Could not read from stdout of tee"),
crate::ErrorKind::Docker,
)
})?;
let load_in = load.stdin.take().ok_or_else(|| { let load_in = load.stdin.take().ok_or_else(|| {
Error::new( Error::new(
eyre!("Could not write to stdin of docker load"), eyre!("Could not write to stdin of docker load"),
@@ -1059,10 +1025,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
) )
})?; })?;
let mut docker_rdr = rdr.docker_images().await?; let mut docker_rdr = rdr.docker_images().await?;
tokio::try_join!( copy_and_shutdown(&mut docker_rdr, load_in).await?;
copy_and_shutdown(&mut docker_rdr, tee_in),
copy_and_shutdown(&mut tee_out, load_in),
)?;
let res = load.wait_with_output().await?; let res = load.wait_with_output().await?;
if !res.status.success() { if !res.status.success() {
Err(Error::new( Err(Error::new(
@@ -1435,7 +1398,9 @@ pub fn load_images<'a, P: AsRef<Path> + 'a + Send + Sync>(
.try_for_each(|entry| async move { .try_for_each(|entry| async move {
let m = entry.metadata().await?; let m = entry.metadata().await?;
if m.is_file() { if m.is_file() {
if entry.path().extension().and_then(|ext| ext.to_str()) == Some("tar") { let path = entry.path();
let ext = path.extension().and_then(|ext| ext.to_str());
if ext == Some("tar") || ext == Some("s9pk") {
let mut load = Command::new("docker") let mut load = Command::new("docker")
.arg("load") .arg("load")
.stdin(Stdio::piped()) .stdin(Stdio::piped())
@@ -1447,8 +1412,24 @@ pub fn load_images<'a, P: AsRef<Path> + 'a + Send + Sync>(
crate::ErrorKind::Docker, crate::ErrorKind::Docker,
) )
})?; })?;
let mut docker_rdr = File::open(&entry.path()).await?; match ext {
copy_and_shutdown(&mut docker_rdr, load_in).await?; Some("tar") => {
copy_and_shutdown(&mut File::open(&path).await?, load_in)
.await?
}
Some("s9pk") => {
copy_and_shutdown(
&mut S9pkReader::open(&path, false)
.await?
.docker_images()
.await?,
load_in,
)
.await?
}
_ => unreachable!(),
};
let res = load.wait_with_output().await?; let res = load.wait_with_output().await?;
if !res.status.success() { if !res.status.success() {
Err(Error::new( Err(Error::new(

View File

@@ -81,7 +81,6 @@ async fn setup_init(
ctx: &SetupContext, ctx: &SetupContext,
password: Option<String>, password: Option<String>,
) -> Result<(Hostname, OnionAddressV3, X509), Error> { ) -> Result<(Hostname, OnionAddressV3, X509), Error> {
init(&RpcContextConfig::load(ctx.config_path.clone()).await?).await?;
let secrets = ctx.secret_store().await?; let secrets = ctx.secret_store().await?;
let db = ctx.db(&secrets).await?; let db = ctx.db(&secrets).await?;
let mut secrets_handle = secrets.acquire().await?; let mut secrets_handle = secrets.acquire().await?;
@@ -159,6 +158,7 @@ pub async fn attach(
)); ));
} }
let (hostname, tor_addr, root_ca) = setup_init(&ctx, password).await?; let (hostname, tor_addr, root_ca) = setup_init(&ctx, password).await?;
init(&RpcContextConfig::load(ctx.config_path.clone()).await?).await?;
let setup_result = SetupResult { let setup_result = SetupResult {
tor_address: format!("http://{}", tor_addr), tor_address: format!("http://{}", tor_addr),
lan_address: hostname.lan_address(), lan_address: hostname.lan_address(),
@@ -410,6 +410,7 @@ pub async fn execute_inner(
delete: true, delete: true,
force: true, force: true,
ignore_existing: false, ignore_existing: false,
exclude: Vec::new(),
}, },
)? )?
.wait() .wait()
@@ -429,6 +430,7 @@ pub async fn execute_inner(
delete: true, delete: true,
force: true, force: true,
ignore_existing: false, ignore_existing: false,
exclude: vec!["tmp".to_owned()],
}, },
)?; )?;
*ctx.recovery_status.write().await = Some(Ok(RecoveryStatus { *ctx.recovery_status.write().await = Some(Ok(RecoveryStatus {
@@ -448,6 +450,7 @@ pub async fn execute_inner(
})); }));
} }
package_data_transfer.wait().await?; package_data_transfer.wait().await?;
init(&RpcContextConfig::load(ctx.config_path.clone()).await?).await?;
Ok::<_, Error>(()) Ok::<_, Error>(())
} }
.and_then(|_| async { .and_then(|_| async {

View File

@@ -320,6 +320,7 @@ async fn sync_boot() -> Result<(), Error> {
delete: false, delete: false,
force: false, force: false,
ignore_existing: true, ignore_existing: true,
exclude: Vec::new(),
}, },
)? )?
.wait() .wait()

View File

@@ -16,6 +16,10 @@ pub use byte_replacement_reader::*;
pub use rsync::*; pub use rsync::*;
pub use script_dir::*; pub use script_dir::*;
pub fn const_true() -> bool {
true
}
pub fn to_tmp_path(path: impl AsRef<Path>) -> Result<PathBuf, Error> { pub fn to_tmp_path(path: impl AsRef<Path>) -> Result<PathBuf, Error> {
let path = path.as_ref(); let path = path.as_ref();
if let (Some(parent), Some(file_name)) = if let (Some(parent), Some(file_name)) =

View File

@@ -1,7 +1,7 @@
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use std::path::Path; use std::path::Path;
use crate::{ByteReplacementReader, NonDetachingJoinHandle}; use crate::{const_true, ByteReplacementReader, NonDetachingJoinHandle};
use models::{Error, ErrorKind}; use models::{Error, ErrorKind};
use tokio::io::{AsyncBufReadExt, AsyncReadExt, BufReader}; use tokio::io::{AsyncBufReadExt, AsyncReadExt, BufReader};
use tokio::process::{Child, Command}; use tokio::process::{Child, Command};
@@ -11,9 +11,14 @@ use tokio_stream::wrappers::WatchStream;
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)] #[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct RsyncOptions { pub struct RsyncOptions {
#[serde(default = "const_true")]
pub delete: bool, pub delete: bool,
#[serde(default = "const_true")]
pub force: bool, pub force: bool,
#[serde(default)]
pub ignore_existing: bool, pub ignore_existing: bool,
#[serde(default)]
pub exclude: Vec<String>,
} }
impl Default for RsyncOptions { impl Default for RsyncOptions {
fn default() -> Self { fn default() -> Self {
@@ -21,6 +26,7 @@ impl Default for RsyncOptions {
delete: true, delete: true,
force: true, force: true,
ignore_existing: false, ignore_existing: false,
exclude: Vec::new(),
} }
} }
} }
@@ -47,6 +53,9 @@ impl Rsync {
if options.ignore_existing { if options.ignore_existing {
cmd.arg("--ignore-existing"); cmd.arg("--ignore-existing");
} }
for exclude in options.exclude {
cmd.arg(format!("--exclude={}", exclude));
}
let mut command = cmd let mut command = cmd
.arg("-ac") .arg("-ac")
.arg("--info=progress2") .arg("--info=progress2")