appmgr 0.3.0 rewrite pt 1

appmgr: split bins

update cargo.toml and .gitignore

context

appmgr: refactor error module

appmgr: context

begin new s9pk format

appmgr: add fields to manifest

appmgr: start action abstraction

appmgr: volume abstraction

appmgr: improved volumes

appmgr: install wip

appmgr: health daemon

appmgr: health checks

appmgr: wip

config get

appmgr: secret store

wip

appmgr: config rewritten

appmgr: delete non-reusable code

appmgr: wip

appmgr: please the borrow-checker

appmgr: technically runs now

appmgr: cli

appmgr: clean up cli

appmgr: rpc-toolkit in action

appmgr: wrap up config

appmgr: account for updates during install

appmgr: fix: #308

appmgr: impl Display for Version

appmgr: cleanup

appmgr: set dependents on install

appmgr: dependency health checks
This commit is contained in:
Aiden McClelland
2021-04-08 11:16:25 -06:00
parent fd354b6cbe
commit ad12bf395c
84 changed files with 7510 additions and 9942 deletions

3
appmgr/.gitignore vendored
View File

@@ -1,3 +1,4 @@
/target
**/*.rs.bk
.DS_Store
.DS_Store
.vscode

1907
appmgr/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,16 +1,28 @@
[package]
authors = ["Aiden McClelland <me@drbonez.dev>"]
edition = "2018"
name = "appmgr"
version = "0.2.14"
name = "embassy"
version = "0.3.0"
[lib]
name = "appmgrlib"
name = "embassy"
path = "src/lib.rs"
[[bin]]
name = "appmgr"
path = "src/main.rs"
name = "embassyd"
path = "src/bin/embassyd.rs"
[[bin]]
name = "embassy-init"
path = "src/bin/embassy-init.rs"
[[bin]]
name = "embassy-sdk"
path = "src/bin/embassy-sdk.rs"
[[bin]]
name = "embassy-cli"
path = "src/bin/embassy-cli.rs"
[features]
avahi = ["avahi-sys"]
@@ -19,42 +31,53 @@ portable = []
production = []
[dependencies]
anyhow = "1.0.40"
async-trait = "0.1.42"
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", branch = "feature/dynamic-linking", features = [
"dynamic",
], optional = true }
base32 = "0.4.0"
bollard = "0.10.1"
chrono = { version = "0.4.19", features = ["serde"] }
clap = "2.33"
ctrlc = "3.1.7"
ed25519-dalek = "1.0.1"
emver = { version = "0.1.0", features = ["serde"] }
failure = "0.1.8"
file-lock = "1.1"
digest = "0.9.0"
ed25519-dalek = { version = "1.0.1", features = ["serde"] }
emver = { version = "0.1.2", features = ["serde"] }
futures = "0.3.8"
git-version = "0.3.4"
http = "0.2.3"
itertools = "0.9.0"
id-pool = { version = "0.2.1", features = ["u16", "serde"], default-features = false }
indexmap = { version = "1.6.2", features = ["serde"] }
itertools = "0.10.0"
jsonpath_lib = "0.2.6"
lazy_static = "1.4"
libc = "0.2.86"
linear-map = { version = "1.2", features = ["serde_impl"] }
log = "0.4.11"
nix = "0.19.1"
nix = "0.20.0"
openssl = "0.10.30"
pest = "2.1"
pest_derive = "2.1"
patch-db = { path = "../../patch-db/patch-db" }
pin-project = "1.0.6"
prettytable-rs = "0.8.0"
rand = "0.7.3"
rand = "0.8.3"
regex = "1.4.2"
reqwest = { version = "0.10.9", features = ["stream", "json"] }
reqwest = { version = "0.11.2", features = ["stream", "json"] }
rpassword = "5.0.0"
rpc-toolkit = { version = "*", path = "../../rpc-toolkit/rpc-toolkit" }
rust-argon2 = "0.8.3"
scopeguard = "1.1" # because avahi-sys fucks your shit up
serde = { version = "1.0.118", features = ["derive", "rc"] }
serde_cbor = "0.11.1"
serde_json = "1.0.59"
serde_toml = { package = "toml", version = "0.5.8" }
serde_yaml = "0.8.14"
sha2 = "0.9.3"
simple-logging = "2.0"
tokio = { version = "0.3.5", features = ["full"] }
tokio-compat-02 = "0.1.2"
tokio-tar = { version = "0.3.0", git = "https://github.com/dr-bonez/tokio-tar.git", rev = "1ba710f3" }
yajrc = { version = "0.1.0", git = "https://github.com/dr-bonez/yajrc", rev = "c2952a4a21c50f7be6f8003afa37ee77deb66d56" }
sqlx = { version = "0.5", features = ["runtime-tokio-rustls", "sqlite"] }
thiserror = "1.0.24"
tokio = { version = "1.5.0", features = ["full"] }
tokio-compat-02 = "0.2.0"
tokio-stream = { version = "0.1.5", features = ["io-util"] }
tokio-tar = "0.3.0"
tokio-util = { version = "0.6.6", features = ["io"] }
typed-builder = "0.9.0"
url = { version = "2.2.1", features = ["serde"] }

2
appmgr/rustfmt.toml Normal file
View File

@@ -0,0 +1,2 @@
group_imports = "StdExternalCrate"
imports_granularity = "Module"

234
appmgr/src/action/docker.rs Normal file
View File

@@ -0,0 +1,234 @@
use std::borrow::Cow;
use std::ffi::{OsStr, OsString};
use std::net::Ipv4Addr;
use std::path::PathBuf;
use indexmap::IndexMap;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use crate::id::ImageId;
use crate::net::host::Hosts;
use crate::s9pk::manifest::{PackageId, SYSTEM_PACKAGE_ID};
use crate::util::{Invoke, IoFormat, Version};
use crate::volume::{VolumeId, Volumes};
use crate::{Error, ResultExt};
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct DockerAction {
pub image: ImageId,
#[serde(default)]
pub system: bool,
pub entrypoint: String,
#[serde(default)]
pub args: Vec<String>,
#[serde(default)]
pub mounts: IndexMap<VolumeId, PathBuf>,
#[serde(default)]
pub io_format: Option<IoFormat>,
#[serde(default)]
pub inject: bool,
#[serde(default)]
pub shm_size_mb: Option<usize>, // TODO: use postfix sizing? like 1k vs 1m vs 1g
}
impl DockerAction {
pub async fn create(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
ip: Ipv4Addr,
) -> Result<(), Error> {
tokio::process::Command::new("docker")
.arg("create")
.arg("--net")
.arg("start9")
.arg("--ip")
.arg(format!("{}", ip))
.arg("--name")
.arg(Self::container_name(pkg_id, pkg_version))
.args(self.docker_args(pkg_id, pkg_version, volumes, false))
.invoke(crate::ErrorKind::Docker)
.await?;
Ok(())
}
pub async fn execute<I: Serialize, O: for<'de> Deserialize<'de>>(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
input: Option<I>,
allow_inject: bool,
) -> Result<Result<O, (i32, String)>, Error> {
let mut cmd = tokio::process::Command::new("docker");
if self.inject && allow_inject {
cmd.arg("exec");
} else {
cmd.arg("run").arg("--rm");
cmd.args(hosts.docker_args());
}
cmd.args(self.docker_args(pkg_id, pkg_version, volumes, allow_inject));
let input_buf = if let (Some(input), Some(format)) = (&input, &self.io_format) {
cmd.stdin(std::process::Stdio::piped());
Some(format.to_vec(input)?)
} else {
None
};
let mut handle = cmd.spawn().with_kind(crate::ErrorKind::Docker)?;
if let (Some(input), Some(stdin)) = (&input_buf, &mut handle.stdin) {
use tokio::io::AsyncWriteExt;
stdin
.write_all(input)
.await
.with_kind(crate::ErrorKind::Docker)?;
}
let res = handle
.wait_with_output()
.await
.with_kind(crate::ErrorKind::Docker)?;
Ok(if res.status.success() {
Ok(if let Some(format) = &self.io_format {
match format.from_slice(&res.stdout) {
Ok(a) => a,
Err(e) => {
log::warn!(
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
format,
e
);
serde_json::from_value(String::from_utf8(res.stdout)?.into())
.with_kind(crate::ErrorKind::Deserialization)?
}
}
} else if res.stdout.is_empty() {
serde_json::from_value(Value::Null).with_kind(crate::ErrorKind::Deserialization)?
} else {
serde_json::from_value(String::from_utf8(res.stdout)?.into())
.with_kind(crate::ErrorKind::Deserialization)?
})
} else {
Err((
res.status.code().unwrap_or_default(),
String::from_utf8(res.stderr)?,
))
})
}
pub async fn sandboxed<I: Serialize, O: for<'de> Deserialize<'de>>(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
input: Option<I>,
) -> Result<Result<O, (i32, String)>, Error> {
let mut cmd = tokio::process::Command::new("docker");
cmd.arg("run").arg("--rm");
cmd.arg("--network=none");
cmd.args(self.docker_args(pkg_id, pkg_version, &Volumes::default(), false));
let input_buf = if let (Some(input), Some(format)) = (&input, &self.io_format) {
cmd.stdin(std::process::Stdio::piped());
Some(format.to_vec(input)?)
} else {
None
};
let mut handle = cmd.spawn().with_kind(crate::ErrorKind::Docker)?;
if let (Some(input), Some(stdin)) = (&input_buf, &mut handle.stdin) {
use tokio::io::AsyncWriteExt;
stdin
.write_all(input)
.await
.with_kind(crate::ErrorKind::Docker)?;
}
let res = handle
.wait_with_output()
.await
.with_kind(crate::ErrorKind::Docker)?;
Ok(if res.status.success() {
Ok(if let Some(format) = &self.io_format {
match format.from_slice(&res.stdout) {
Ok(a) => a,
Err(e) => {
log::warn!(
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
format,
e
);
serde_json::from_value(String::from_utf8(res.stdout)?.into())
.with_kind(crate::ErrorKind::Deserialization)?
}
}
} else if res.stdout.is_empty() {
serde_json::from_value(Value::Null).with_kind(crate::ErrorKind::Deserialization)?
} else {
serde_json::from_value(String::from_utf8(res.stdout)?.into())
.with_kind(crate::ErrorKind::Deserialization)?
})
} else {
Err((
res.status.code().unwrap_or_default(),
String::from_utf8(res.stderr)?,
))
})
}
pub fn container_name(pkg_id: &PackageId, version: &Version) -> String {
format!("service_{}_{}", pkg_id, version)
}
pub fn uncontainer_name(name: &str) -> Option<&str> {
name.strip_prefix("service_")
.and_then(|name| name.split("_").next())
}
fn docker_args<'a>(
&'a self,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
allow_inject: bool,
) -> Vec<Cow<'a, OsStr>> {
let mut res = Vec::with_capacity(
(2 * self.mounts.len()) // --mount <MOUNT_ARG>
+ (2 * self.shm_size_mb.is_some() as usize) // --shm-size <SHM_SIZE>
+ 3 // --entrypoint <ENTRYPOINT> <IMAGE>
+ self.args.len(), // [ARG...]
);
for (volume_id, dst) in &self.mounts {
let src = if let Some(path) = volumes.get_path_for(pkg_id, volume_id) {
path
} else {
continue;
};
res.push(OsStr::new("--mount").into());
res.push(
OsString::from(format!(
"type=bind,src={},dst={}",
src.display(),
dst.display()
))
.into(),
);
}
if let Some(shm_size_mb) = self.shm_size_mb {
res.push(OsStr::new("--shm-size").into());
res.push(OsString::from(format!("{}m", shm_size_mb)).into());
}
if self.inject && allow_inject {
res.push(OsString::from(Self::container_name(pkg_id, pkg_version)).into());
res.push(OsStr::new(&self.entrypoint).into());
} else {
res.push(OsStr::new("--entrypoint").into());
res.push(OsStr::new(&self.entrypoint).into());
if self.system {
res.push(OsString::from(self.image.for_package(SYSTEM_PACKAGE_ID, None)).into());
} else {
res.push(OsString::from(self.image.for_package(pkg_id, Some(pkg_version))).into());
}
}
res.extend(self.args.iter().map(|s| OsStr::new(s).into()));
res
}
}

160
appmgr/src/action/mod.rs Normal file
View File

@@ -0,0 +1,160 @@
use std::net::Ipv4Addr;
use std::path::Path;
use anyhow::anyhow;
use indexmap::{IndexMap, IndexSet};
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use self::docker::DockerAction;
use crate::config::{Config, ConfigSpec};
use crate::id::Id;
use crate::net::host::Hosts;
use crate::s9pk::manifest::PackageId;
use crate::util::{ValuePrimative, Version};
use crate::volume::Volumes;
use crate::{Error, ResultExt};
pub mod docker;
#[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)]
pub struct ActionId<S: AsRef<str> = String>(Id<S>);
impl<S: AsRef<str>> AsRef<ActionId<S>> for ActionId<S> {
fn as_ref(&self) -> &ActionId<S> {
self
}
}
impl<S: AsRef<str>> std::fmt::Display for ActionId<S> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0)
}
}
impl<S: AsRef<str>> AsRef<str> for ActionId<S> {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl<S: AsRef<str>> AsRef<Path> for ActionId<S> {
fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref()
}
}
impl<'de, S> Deserialize<'de> for ActionId<S>
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
Ok(ActionId(Deserialize::deserialize(deserializer)?))
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Actions(pub IndexMap<ActionId, Action>);
#[derive(Debug, Deserialize)]
#[serde(tag = "version")]
pub enum ActionResult {
#[serde(rename = "0")]
V0(ActionResultV0),
}
#[derive(Debug, Deserialize)]
pub struct ActionResultV0 {
pub message: String,
pub value: ValuePrimative,
pub copyable: bool,
pub qr: bool,
}
#[derive(Clone, Copy, Debug, Hash, PartialEq, Eq, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub enum DockerStatus {
Running,
Stopped,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Action {
pub name: String,
pub description: String,
#[serde(default)]
pub warning: Option<String>,
pub implementation: ActionImplementation,
pub allowed_statuses: IndexSet<DockerStatus>,
#[serde(default)]
pub input_spec: ConfigSpec,
}
impl Action {
pub async fn execute(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
input: Config,
) -> Result<ActionResult, Error> {
self.input_spec
.matches(&input)
.with_kind(crate::ErrorKind::ConfigSpecViolation)?;
self.implementation
.execute(pkg_id, pkg_version, volumes, hosts, Some(input), true)
.await?
.map_err(|e| Error::new(anyhow!("{}", e.1), crate::ErrorKind::Action))
}
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[serde(rename = "kebab-case")]
#[serde(tag = "type")]
pub enum ActionImplementation {
Docker(DockerAction),
}
impl ActionImplementation {
pub async fn install(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
ip: Ipv4Addr,
) -> Result<(), Error> {
match self {
ActionImplementation::Docker(action) => {
action.create(pkg_id, pkg_version, volumes, ip).await
}
}
}
pub async fn execute<I: Serialize, O: for<'de> Deserialize<'de>>(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
input: Option<I>,
allow_inject: bool,
) -> Result<Result<O, (i32, String)>, Error> {
match self {
ActionImplementation::Docker(action) => {
action
.execute(pkg_id, pkg_version, volumes, hosts, input, allow_inject)
.await
}
}
}
pub async fn sandboxed<I: Serialize, O: for<'de> Deserialize<'de>>(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
input: Option<I>,
) -> Result<Result<O, (i32, String)>, Error> {
match self {
ActionImplementation::Docker(action) => {
action.sandboxed(pkg_id, pkg_version, input).await
}
}
}
}

View File

@@ -1,116 +0,0 @@
use std::os::unix::process::ExitStatusExt;
use std::process::Stdio;
use linear_map::set::LinearSet;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, Error as IoError};
use yajrc::RpcError;
use crate::apps::DockerStatus;
pub const STATUS_NOT_ALLOWED: i32 = -2;
pub const INVALID_COMMAND: i32 = -3;
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Action {
pub id: String,
pub name: String,
pub description: String,
#[serde(default)]
#[serde(skip_serializing_if = "Option::is_none")]
pub warning: Option<String>,
pub allowed_statuses: LinearSet<DockerStatus>,
pub command: Vec<String>,
}
async fn tee<R: AsyncRead + Unpin, W: AsyncWrite + Unpin>(
mut r: R,
mut w: W,
) -> Result<Vec<u8>, IoError> {
let mut res = Vec::new();
let mut buf = vec![0; 2048];
let mut bytes;
while {
bytes = r.read(&mut buf).await?;
bytes != 0
} {
res.extend_from_slice(&buf[..bytes]);
w.write_all(&buf[..bytes]).await?;
}
w.flush().await?;
Ok(res)
}
impl Action {
pub async fn perform(&self, app_id: &str) -> Result<String, RpcError> {
let man = crate::apps::manifest(app_id)
.await
.map_err(failure::Error::from)
.map_err(failure::Error::compat)?;
let status = crate::apps::status(app_id, true)
.await
.map_err(failure::Error::from)
.map_err(failure::Error::compat)?
.status;
if !self.allowed_statuses.contains(&status) {
return Err(RpcError {
code: STATUS_NOT_ALLOWED,
message: format!(
"{} is in status {:?} which is not allowed by {}",
app_id, status, self.id
),
data: None,
});
}
let mut cmd = if status == DockerStatus::Running {
let mut cmd = tokio::process::Command::new("docker");
cmd.arg("exec").arg(&app_id).args(&self.command);
cmd
} else {
let mut cmd = tokio::process::Command::new("docker");
let entrypoint = self.command.get(0).ok_or_else(|| RpcError {
code: INVALID_COMMAND,
message: "Command Cannot Be Empty".to_owned(),
data: None,
})?;
cmd.arg("run")
.arg("--rm")
.arg("--name")
.arg(format!("{}_{}", app_id, self.id))
.arg("--mount")
.arg(format!(
"type=bind,src={}/{},dst={}",
crate::VOLUMES,
app_id,
man.mount.display()
))
.arg("--entrypoint")
.arg(entrypoint)
.arg(format!("start9/{}", app_id))
.args(&self.command[1..]);
// TODO: 0.3.0: net, tor, shm
cmd
};
cmd.stdout(Stdio::piped());
cmd.stderr(Stdio::piped());
let mut child = cmd.spawn()?;
let (stdout, stderr) = futures::try_join!(
tee(child.stdout.take().unwrap(), tokio::io::sink()),
tee(child.stderr.take().unwrap(), tokio::io::sink())
)?;
let status = child.wait().await?;
if status.success() {
String::from_utf8(stdout).map_err(From::from)
} else {
Err(RpcError {
code: status
.code()
.unwrap_or_else(|| status.signal().unwrap_or(0) + 128),
message: String::from_utf8(stderr)?,
data: None,
})
}
}
}

View File

@@ -1,457 +0,0 @@
use failure::ResultExt as _;
use futures::future::{BoxFuture, FutureExt, OptionFuture};
use linear_map::{set::LinearSet, LinearMap};
use rand::SeedableRng;
use crate::dependencies::AppDependencies;
use crate::manifest::{Manifest, ManifestLatest};
use crate::util::Apply;
use crate::util::{from_yaml_async_reader, PersistencePath, YamlUpdateHandle};
use crate::Error;
use crate::ResultExt as _;
#[derive(Clone, Copy, Debug, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum DockerStatus {
Running,
Stopped, // created || exited
Paused,
Restarting,
Removing,
Dead,
}
fn not(b: &bool) -> bool {
!b
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct AppInfo {
pub title: String,
pub version: emver::Version,
pub tor_address: Option<String>,
pub configured: bool,
#[serde(default)]
#[serde(skip_serializing_if = "not")]
pub recoverable: bool,
#[serde(default)]
#[serde(skip_serializing_if = "not")]
pub needs_restart: bool,
}
#[derive(Clone, Debug, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct AppStatus {
pub status: DockerStatus,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct AppConfig {
pub spec: crate::config::ConfigSpec,
pub rules: Vec<crate::config::ConfigRuleEntry>,
pub config: Option<crate::config::Config>,
}
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct AppInfoFull {
#[serde(flatten)]
pub info: AppInfo,
#[serde(flatten)]
#[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<AppStatus>,
#[serde(skip_serializing_if = "Option::is_none")]
pub manifest: Option<ManifestLatest>,
#[serde(skip_serializing_if = "Option::is_none")]
pub config: Option<AppConfig>,
#[serde(skip_serializing_if = "Option::is_none")]
pub dependencies: Option<AppDependencies>,
}
pub async fn list_info() -> Result<LinearMap<String, AppInfo>, Error> {
let apps_path = PersistencePath::from_ref("apps.yaml");
let mut f = match apps_path.maybe_read(false).await.transpose()? {
Some(a) => a,
None => return Ok(LinearMap::new()),
};
from_yaml_async_reader(&mut *f).await
}
pub async fn list_info_mut() -> Result<YamlUpdateHandle<LinearMap<String, AppInfo>>, Error> {
let apps_path = PersistencePath::from_ref("apps.yaml");
YamlUpdateHandle::new_or_default(apps_path).await
}
pub async fn add(id: &str, info: AppInfo) -> Result<(), failure::Error> {
let mut apps = list_info_mut().await?;
apps.insert(id.to_string(), info);
apps.commit().await?;
Ok(())
}
pub async fn set_configured(id: &str, configured: bool) -> Result<(), Error> {
let mut apps = list_info_mut().await?;
let mut app = apps
.get_mut(id)
.ok_or_else(|| failure::format_err!("App Not Installed: {}", id))
.with_code(crate::error::NOT_FOUND)?;
app.configured = configured;
apps.commit().await?;
Ok(())
}
pub async fn set_needs_restart(id: &str, needs_restart: bool) -> Result<(), Error> {
let mut apps = list_info_mut().await?;
let mut app = apps
.get_mut(id)
.ok_or_else(|| failure::format_err!("App Not Installed: {}", id))
.with_code(crate::error::NOT_FOUND)?;
app.needs_restart = needs_restart;
apps.commit().await?;
Ok(())
}
pub async fn set_recoverable(id: &str, recoverable: bool) -> Result<(), Error> {
let mut apps = list_info_mut().await?;
let mut app = apps
.get_mut(id)
.ok_or_else(|| failure::format_err!("App Not Installed: {}", id))
.with_code(crate::error::NOT_FOUND)?;
app.recoverable = recoverable;
apps.commit().await?;
Ok(())
}
pub async fn remove(id: &str) -> Result<(), failure::Error> {
let mut apps = list_info_mut().await?;
apps.remove(id);
apps.commit().await?;
Ok(())
}
pub async fn status(id: &str, remap_crashed: bool) -> Result<AppStatus, Error> {
let output = std::process::Command::new("docker")
.args(&["inspect", id, "--format", "{{.State.Status}}"])
.stdout(std::process::Stdio::piped())
.stderr(match log::max_level() {
log::LevelFilter::Error => std::process::Stdio::null(),
_ => std::process::Stdio::inherit(),
})
.spawn()?
.wait_with_output()?;
crate::ensure_code!(
output.status.success(),
crate::error::DOCKER_ERROR,
"{}: Docker Error: {}",
id,
std::str::from_utf8(&output.stderr).no_code()?
);
let status = std::str::from_utf8(&output.stdout).no_code()?;
Ok(AppStatus {
status: match status.trim() {
"running" => DockerStatus::Running,
"restarting" => DockerStatus::Restarting,
"removing" => DockerStatus::Removing,
"dead" => DockerStatus::Dead,
"exited"
if remap_crashed && {
let path = PersistencePath::from_ref("running.yaml");
if let Some(mut f) = path.maybe_read(false).await.transpose()? {
let running: Vec<String> = from_yaml_async_reader(&mut *f).await?;
running.iter().filter(|a| a.as_str() == id).next().is_some()
} else {
false
}
} =>
{
DockerStatus::Restarting
}
"created" | "exited" => DockerStatus::Stopped,
"paused" => DockerStatus::Paused,
_ => Err(format_err!("unknown status: {}", status))?,
},
})
}
pub async fn manifest(id: &str) -> Result<ManifestLatest, Error> {
let manifest: Manifest = from_yaml_async_reader(
&mut *PersistencePath::from_ref("apps")
.join(id)
.join("manifest.yaml")
.read(false)
.await?,
)
.await?;
Ok(manifest.into_latest())
}
pub async fn config(id: &str) -> Result<AppConfig, Error> {
let spec = PersistencePath::from_ref("apps")
.join(id)
.join("config_spec.yaml");
let spec: crate::config::ConfigSpec =
crate::util::from_yaml_async_reader(&mut *spec.read(false).await?)
.await
.no_code()?;
let rules = PersistencePath::from_ref("apps")
.join(id)
.join("config_rules.yaml");
let rules: Vec<crate::config::ConfigRuleEntry> =
crate::util::from_yaml_async_reader(&mut *rules.read(false).await?)
.await
.no_code()?;
let config = PersistencePath::from_ref("apps")
.join(id)
.join("config.yaml");
let config: Option<crate::config::Config> = match config
.maybe_read(false)
.await
.transpose()?
.map(|mut f| async move { from_yaml_async_reader(&mut *f).await })
.apply(OptionFuture::from)
.await
{
Some(Ok(cfg)) => Some(cfg),
#[cfg(not(feature = "production"))]
Some(Err(e)) => return Err(e),
_ => {
let volume_config = std::path::Path::new(crate::VOLUMES)
.join(id)
.join("start9")
.join("config.yaml");
if volume_config.exists() {
let cfg_path = config.path();
tokio::fs::copy(&volume_config, &cfg_path)
.await
.with_context(|e| {
format!(
"{}: {} -> {}",
e,
volume_config.display(),
cfg_path.display()
)
})
.with_code(crate::error::FILESYSTEM_ERROR)?;
let mut f = tokio::fs::File::open(&volume_config)
.await
.with_context(|e| format!("{}: {}", e, volume_config.display()))
.with_code(crate::error::FILESYSTEM_ERROR)?;
match from_yaml_async_reader(&mut f).await {
Ok(a) => Some(a),
#[cfg(not(feature = "production"))]
Err(e) => return Err(e),
#[cfg(feature = "production")]
_ => None,
}
} else {
None
}
}
};
Ok(AppConfig {
spec,
rules,
config,
})
}
pub async fn config_or_default(id: &str) -> Result<crate::config::Config, Error> {
let config = config(id).await?;
Ok(if let Some(config) = config.config {
config
} else {
config
.spec
.gen(&mut rand::rngs::StdRng::from_entropy(), &None)
.with_code(crate::error::CFG_SPEC_VIOLATION)?
})
}
pub async fn info(id: &str) -> Result<AppInfo, Error> {
list_info()
.await
.map_err(Error::from)?
.get(id)
.ok_or_else(|| Error::new(failure::format_err!("{} is not installed", id), Some(6)))
.map(Clone::clone)
}
pub async fn info_full(
id: &str,
with_status: bool,
with_manifest: bool,
with_config: bool,
with_dependencies: bool,
) -> Result<AppInfoFull, Error> {
Ok(AppInfoFull {
info: info(id).await?,
status: if with_status {
Some(status(id, true).await?)
} else {
None
},
manifest: if with_manifest {
Some(manifest(id).await?)
} else {
None
},
config: if with_config {
Some(config(id).await?)
} else {
None
},
dependencies: if with_dependencies {
Some(dependencies(id, true).await?)
} else {
None
},
})
}
pub async fn dependencies(id_version: &str, local_only: bool) -> Result<AppDependencies, Error> {
let mut id_version_iter = id_version.split("@");
let id = id_version_iter.next().unwrap();
let version_range = id_version_iter
.next()
.map(|a| a.parse::<emver::VersionRange>())
.transpose()
.with_context(|e| format!("Failed to Parse Version Requirement: {}", e))
.no_code()?
.unwrap_or_else(emver::VersionRange::any);
let (manifest, config_info) = match list_info().await?.get(id) {
Some(info) if info.version.satisfies(&version_range) => {
futures::try_join!(manifest(id), config(id))?
}
_ if !local_only => futures::try_join!(
crate::registry::manifest(id, &version_range),
crate::registry::config(id, &version_range)
)?,
_ => {
return Err(failure::format_err!("App Not Installed: {}", id))
.with_code(crate::error::NOT_FOUND)
}
};
let config = if let Some(cfg) = config_info.config {
cfg
} else {
config_info
.spec
.gen(&mut rand::rngs::StdRng::from_entropy(), &None)
.unwrap_or_default()
};
crate::dependencies::check_dependencies(manifest, &config, &config_info.spec).await
}
pub async fn dependents(id: &str, transitive: bool) -> Result<LinearSet<String>, Error> {
pub fn dependents_rec<'a>(
id: &'a str,
transitive: bool,
res: &'a mut LinearSet<String>,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
for (app_id, _) in list_info().await? {
let manifest = manifest(&app_id).await?;
match manifest.dependencies.0.get(id) {
Some(info) if !res.contains(&app_id) => {
let config_info = config(&app_id).await?;
let config = if let Some(cfg) = config_info.config {
cfg
} else {
config_info
.spec
.gen(&mut rand::rngs::StdRng::from_entropy(), &None)
.unwrap_or_default()
};
if info.optional.is_none() || config_info.spec.requires(&id, &config) {
res.insert(app_id.clone());
if transitive {
dependents_rec(&app_id, true, res).await?;
}
}
}
_ => (),
}
}
Ok(())
}
.boxed()
}
let mut res = LinearSet::new();
dependents_rec(id, transitive, &mut res).await?;
Ok(res)
}
pub async fn list(
with_status: bool,
with_manifest: bool,
with_config: bool,
with_dependencies: bool,
) -> Result<LinearMap<String, AppInfoFull>, Error> {
let info = list_info().await?;
futures::future::join_all(info.into_iter().map(move |(id, info)| async move {
let (status, manifest, config, dependencies) = futures::try_join!(
OptionFuture::from(if with_status {
Some(status(&id, true))
} else {
None
})
.map(Option::transpose),
OptionFuture::from(if with_manifest {
Some(manifest(&id))
} else {
None
})
.map(Option::transpose),
OptionFuture::from(if with_config { Some(config(&id)) } else { None })
.map(Option::transpose),
OptionFuture::from(if with_dependencies {
Some(dependencies(&id, true))
} else {
None
})
.map(Option::transpose)
)?;
Ok((
id,
AppInfoFull {
info,
status,
manifest,
config,
dependencies,
},
))
}))
.await
.into_iter()
.collect()
}
pub async fn print_instructions(id: &str) -> Result<(), Error> {
if let Some(file) = PersistencePath::from_ref("apps")
.join(id)
.join("instructions.md")
.maybe_read(false)
.await
{
use tokio::io::AsyncWriteExt;
let mut stdout = tokio::io::stdout();
tokio::io::copy(&mut *file?, &mut stdout)
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
stdout
.flush()
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
stdout
.shutdown()
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
Ok(())
} else {
Err(failure::format_err!("No Instructions: {}", id)).with_code(crate::error::NOT_FOUND)
}
}

View File

@@ -1,286 +0,0 @@
use std::os::unix::process::ExitStatusExt;
use std::path::Path;
use argon2::Config;
use emver::Version;
use futures::try_join;
use futures::TryStreamExt;
use rand::Rng;
use serde::Serialize;
use crate::util::from_yaml_async_reader;
use crate::util::to_yaml_async_writer;
use crate::util::Invoke;
use crate::util::PersistencePath;
use crate::version::VersionT;
use crate::Error;
use crate::ResultExt;
#[derive(Debug, Clone, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Metadata {
pub app_version: Version,
pub os_version: &'static Version,
}
pub async fn create_backup<P: AsRef<Path>>(
path: P,
app_id: &str,
password: &str,
) -> Result<(), Error> {
let path = tokio::fs::canonicalize(path).await?;
crate::ensure_code!(
path.is_dir(),
crate::error::FILESYSTEM_ERROR,
"Backup Path Must Be Directory"
);
let metadata_path = path.join("metadata.yaml");
let pw_path = path.join("password");
let data_path = path.join("data");
let tor_path = path.join("tor");
let volume_path = Path::new(crate::VOLUMES).join(app_id);
let hidden_service_path =
Path::new(crate::tor::HIDDEN_SERVICE_DIR_ROOT).join(format!("app-{}", app_id));
if pw_path.exists() {
use tokio::io::AsyncReadExt;
let mut f = tokio::fs::File::open(&pw_path).await?;
let mut hash = String::new();
f.read_to_string(&mut hash).await?;
crate::ensure_code!(
argon2::verify_encoded(&hash, password.as_bytes())
.with_code(crate::error::INVALID_BACKUP_PASSWORD)?,
crate::error::INVALID_BACKUP_PASSWORD,
"Invalid Backup Decryption Password"
);
}
{
// save password
use tokio::io::AsyncWriteExt;
let salt = rand::thread_rng().gen::<[u8; 32]>();
let hash = argon2::hash_encoded(password.as_bytes(), &salt, &Config::default()).unwrap(); // this is safe because apparently the API was poorly designed
let mut f = tokio::fs::File::create(pw_path).await?;
f.write_all(hash.as_bytes()).await?;
f.flush().await?;
}
let info = crate::apps::info(app_id).await?;
to_yaml_async_writer(
tokio::fs::File::create(metadata_path).await?,
&Metadata {
app_version: info.version,
os_version: crate::version::Current::new().semver(),
},
)
.await?;
let status = crate::apps::status(app_id, false).await?;
let exclude = if volume_path.is_dir() {
let ignore_path = volume_path.join(".backupignore");
if ignore_path.is_file() {
use tokio::io::AsyncBufReadExt;
tokio::io::BufReader::new(tokio::fs::File::open(ignore_path).await?)
.lines()
.try_filter(|l| futures::future::ready(!l.is_empty()))
.try_collect()
.await?
} else {
Vec::new()
}
} else {
return Err(format_err!("Volume For {} Does Not Exist", app_id))
.with_code(crate::error::NOT_FOUND);
};
let running = status.status == crate::apps::DockerStatus::Running;
if running {
crate::control::pause_app(&app_id).await?;
}
let mut data_cmd = tokio::process::Command::new("duplicity");
for exclude in exclude {
if exclude.starts_with('!') {
data_cmd.arg(format!(
"--include={}",
volume_path.join(exclude.trim_start_matches('!')).display()
));
} else {
data_cmd.arg(format!("--exclude={}", volume_path.join(exclude).display()));
}
}
let data_res = data_cmd
.env("PASSPHRASE", password)
.arg(volume_path)
.arg(format!("file://{}", data_path.display()))
.invoke("Duplicity")
.await;
let tor_res = tokio::process::Command::new("duplicity")
.env("PASSPHRASE", password)
.arg(hidden_service_path)
.arg(format!("file://{}", tor_path.display()))
.invoke("Duplicity")
.await;
if running {
if crate::apps::info(&app_id).await?.needs_restart {
crate::control::restart_app(&app_id).await?;
} else {
crate::control::resume_app(&app_id).await?;
}
}
data_res?;
tor_res?;
Ok(())
}
pub async fn restore_backup<P: AsRef<Path>>(
path: P,
app_id: &str,
password: &str,
) -> Result<(), Error> {
let path = tokio::fs::canonicalize(path).await?;
crate::ensure_code!(
path.is_dir(),
crate::error::FILESYSTEM_ERROR,
"Backup Path Must Be Directory"
);
let metadata_path = path.join("metadata.yaml");
let pw_path = path.join("password");
let data_path = path.join("data");
let tor_path = path.join("tor");
let volume_path = Path::new(crate::VOLUMES).join(app_id);
let hidden_service_path =
Path::new(crate::tor::HIDDEN_SERVICE_DIR_ROOT).join(format!("app-{}", app_id));
if pw_path.exists() {
use tokio::io::AsyncReadExt;
let mut f = tokio::fs::File::open(&pw_path).await?;
let mut hash = String::new();
f.read_to_string(&mut hash).await?;
crate::ensure_code!(
argon2::verify_encoded(&hash, password.as_bytes())
.with_code(crate::error::INVALID_BACKUP_PASSWORD)?,
crate::error::INVALID_BACKUP_PASSWORD,
"Invalid Backup Decryption Password"
);
}
let status = crate::apps::status(app_id, false).await?;
let running = status.status == crate::apps::DockerStatus::Running;
if running {
crate::control::stop_app(app_id, true, false).await?;
}
let mut data_cmd = tokio::process::Command::new("duplicity");
data_cmd
.env("PASSPHRASE", password)
.arg("--force")
.arg(format!("file://{}", data_path.display()))
.arg(&volume_path);
let mut tor_cmd = tokio::process::Command::new("duplicity");
tor_cmd
.env("PASSPHRASE", password)
.arg("--force")
.arg(format!("file://{}", tor_path.display()))
.arg(&hidden_service_path);
let (data_output, tor_output) = try_join!(data_cmd.status(), tor_cmd.status())?;
crate::ensure_code!(
data_output.success(),
crate::error::GENERAL_ERROR,
"Duplicity Error"
);
crate::ensure_code!(
tor_output.success(),
crate::error::GENERAL_ERROR,
"Duplicity Error"
);
// Fix the tor address in apps.yaml
let mut yhdl = crate::apps::list_info_mut().await?;
if let Some(app_info) = yhdl.get_mut(app_id) {
app_info.tor_address = Some(crate::tor::read_tor_address(app_id, None).await?);
}
yhdl.commit().await?;
tokio::fs::copy(
metadata_path,
Path::new(crate::VOLUMES)
.join(app_id)
.join("start9")
.join("restore.yaml"),
)
.await?;
// Attempt to configure the service with the config coming from restoration
let cfg_path = Path::new(crate::VOLUMES)
.join(app_id)
.join("start9")
.join("config.yaml");
if cfg_path.exists() {
let cfg = from_yaml_async_reader(tokio::fs::File::open(cfg_path).await?).await?;
if let Err(e) = crate::config::configure(app_id, cfg, None, false).await {
log::warn!("Could not restore backup configuration: {}", e);
}
}
crate::tor::restart().await?;
// Delete the fullchain certificate, so it can be regenerated with the restored tor pubkey address
PersistencePath::from_ref("apps")
.join(&app_id)
.join("cert-local.fullchain.crt.pem")
.delete()
.await?;
crate::tor::write_lan_services(
&crate::tor::services_map(&PersistencePath::from_ref(crate::SERVICES_YAML)).await?,
)
.await?;
let svc_exit = std::process::Command::new("service")
.args(&["nginx", "reload"])
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
"Failed to Reload Nginx: {}",
svc_exit
.code()
.or_else(|| { svc_exit.signal().map(|a| 128 + a) })
.unwrap_or(0)
);
Ok(())
}
pub async fn backup_to_partition(
logicalname: &str,
app_id: &str,
password: &str,
) -> Result<(), Error> {
let backup_mount_path = Path::new(crate::BACKUP_MOUNT_POINT);
let guard = crate::disks::MountGuard::new(logicalname, &backup_mount_path).await?;
let backup_dir_path = backup_mount_path.join(crate::BACKUP_DIR).join(app_id);
tokio::fs::create_dir_all(&backup_dir_path).await?;
let res = create_backup(backup_dir_path, app_id, password).await;
guard.unmount().await?;
res
}
pub async fn restore_from_partition(
logicalname: &str,
app_id: &str,
password: &str,
) -> Result<(), Error> {
let backup_mount_path = Path::new(crate::BACKUP_MOUNT_POINT);
let guard = crate::disks::MountGuard::new(logicalname, &backup_mount_path).await?;
let backup_dir_path = backup_mount_path.join(crate::BACKUP_DIR).join(app_id);
let res = restore_backup(backup_dir_path, app_id, password).await;
guard.unmount().await?;
res
}

51
appmgr/src/backup/mod.rs Normal file
View File

@@ -0,0 +1,51 @@
use anyhow::anyhow;
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use crate::action::ActionImplementation;
use crate::net::host::Hosts;
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::volume::{Volume, VolumeId, Volumes};
use crate::{Error, ResultExt};
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
pub struct BackupActions {
pub create: ActionImplementation,
pub restore: ActionImplementation,
}
impl BackupActions {
pub async fn backup(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
) -> Result<(), Error> {
let mut volumes = volumes.to_readonly();
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: false });
self.create
.execute(pkg_id, pkg_version, &volumes, hosts, None::<()>, false)
.await?
.map_err(|e| anyhow!("{}", e.1))
.with_kind(crate::ErrorKind::Backup)?;
Ok(())
}
pub async fn restore(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
) -> Result<(), Error> {
let mut volumes = volumes.clone();
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: true });
self.restore
.execute(pkg_id, pkg_version, &volumes, hosts, None::<()>, false)
.await?
.map_err(|e| anyhow!("{}", e.1))
.with_kind(crate::ErrorKind::Restore)?;
Ok(())
}
}

View File

@@ -0,0 +1,28 @@
use clap::Arg;
use embassy::context::{CliContext, EitherContext};
use embassy::Error;
use rpc_toolkit::run_cli;
fn inner_main() -> Result<(), Error> {
simple_logging::log_to_stderr(log::LevelFilter::Info);
run_cli!(
embassy::main_api,
app => app
.arg(Arg::with_name("host").long("host").short("h").takes_value(true))
.arg(Arg::with_name("port").long("port").short("p").takes_value(true)),
matches => EitherContext::Cli(CliContext::init(matches)?),
|code| if code < 0 { 1 } else { code }
)
}
fn main() {
match inner_main() {
Ok(_) => (),
Err(e) => {
eprintln!("{}", e.source);
log::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -0,0 +1,22 @@
use embassy::Error;
async fn inner_main() -> Result<(), Error> {
// os sync
embassy::volume::disk::mount("/dev/sda", "/mnt/embassy-os-crypt").await?;
Ok(())
}
fn main() {
let rt = tokio::runtime::Runtime::new().expect("failed to initialize runtime");
match rt.block_on(inner_main()) {
Ok(_) => (),
Err(e) => {
drop(rt);
eprintln!("{}", e.source);
log::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -0,0 +1,28 @@
use clap::Arg;
use embassy::context::{CliContext, EitherContext};
use embassy::Error;
use rpc_toolkit::run_cli;
fn inner_main() -> Result<(), Error> {
simple_logging::log_to_stderr(log::LevelFilter::Info);
run_cli!(
embassy::portable_api,
app => app
.arg(Arg::with_name("host").long("host").short("h").takes_value(true))
.arg(Arg::with_name("port").long("port").short("p").takes_value(true)),
matches => EitherContext::Cli(CliContext::init(matches)?),
|code| if code < 0 { 1 } else { code }
)
}
fn main() {
match inner_main() {
Ok(_) => (),
Err(e) => {
eprintln!("{}", e.source);
log::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -0,0 +1,84 @@
use std::time::Duration;
use embassy::context::{EitherContext, RpcContext};
use embassy::db::model::Database;
use embassy::status::{check_all, synchronize_all};
use embassy::util::daemon;
use embassy::{Error, ErrorKind};
use futures::TryFutureExt;
use patch_db::json_ptr::JsonPointer;
use rpc_toolkit::hyper::StatusCode;
use rpc_toolkit::rpc_server;
fn status_fn(_: i32) -> StatusCode {
StatusCode::OK
}
async fn inner_main() -> Result<(), Error> {
simple_logging::log_to_stderr(log::LevelFilter::Info);
let rpc_ctx = RpcContext::init().await?;
if !rpc_ctx.db.exists(&<JsonPointer>::default()).await? {
rpc_ctx
.db
.put(&<JsonPointer>::default(), &Database::init(), None)
.await?;
}
let ctx = EitherContext::Rpc(rpc_ctx.clone());
let server = rpc_server!(embassy::main_api, ctx, status_fn);
let status_ctx = rpc_ctx.clone();
let status_daemon = daemon(
move || {
let ctx = status_ctx.clone();
async move {
if let Err(e) = synchronize_all(&ctx).await {
log::error!("Error in Status Sync daemon: {}", e);
log::debug!("{:?}", e);
} else {
log::info!("Status Sync completed successfully");
}
}
},
Duration::from_millis(500),
);
let health_ctx = rpc_ctx.clone();
let health_daemon = daemon(
move || {
let ctx = health_ctx.clone();
async move {
if let Err(e) = check_all(&ctx).await {
log::error!("Error in Health Check daemon: {}", e);
log::debug!("{:?}", e);
} else {
log::info!("Health Check completed successfully");
}
}
},
Duration::from_millis(500),
);
futures::try_join!(
server.map_err(|e| Error::new(e, ErrorKind::Network)),
status_daemon.map_err(|e| Error::new(
e.context("Status Sync daemon panicked!"),
ErrorKind::Unknown
)),
health_daemon.map_err(|e| Error::new(
e.context("Health Check daemon panicked!"),
ErrorKind::Unknown
)),
)?;
Ok(())
}
fn main() {
let rt = tokio::runtime::Runtime::new().expect("failed to initialize runtime");
match rt.block_on(inner_main()) {
Ok(_) => (),
Err(e) => {
drop(rt);
eprintln!("{}", e.source);
log::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -0,0 +1,81 @@
use anyhow::anyhow;
use indexmap::{IndexMap, IndexSet};
use nix::sys::signal::Signal;
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use super::{Config, ConfigSpec};
use crate::action::ActionImplementation;
use crate::dependencies::Dependencies;
use crate::net::host::Hosts;
use crate::s9pk::manifest::PackageId;
use crate::status::health_check::HealthCheckId;
use crate::util::Version;
use crate::volume::Volumes;
use crate::Error;
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct ConfigRes {
pub config: Option<Config>,
pub spec: ConfigSpec,
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
pub struct ConfigActions {
pub get: ActionImplementation,
pub set: ActionImplementation,
}
impl ConfigActions {
pub async fn get(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
) -> Result<ConfigRes, Error> {
self.get
.execute(pkg_id, pkg_version, volumes, hosts, None::<()>, false)
.await
.and_then(|res| {
res.map_err(|e| Error::new(anyhow!("{}", e.1), crate::ErrorKind::ConfigGen))
})
}
pub async fn set(
&self,
pkg_id: &PackageId,
pkg_version: &Version,
dependencies: &Dependencies,
volumes: &Volumes,
hosts: &Hosts,
input: &Config,
) -> Result<SetResult, Error> {
let res: SetResult = self
.set
.execute(pkg_id, pkg_version, volumes, hosts, Some(input), false)
.await
.and_then(|res| {
res.map_err(|e| {
Error::new(anyhow!("{}", e.1), crate::ErrorKind::ConfigRulesViolation)
})
})?;
Ok(SetResult {
signal: res.signal,
depends_on: res
.depends_on
.into_iter()
.filter(|(pkg, _)| dependencies.0.contains_key(pkg))
.collect(),
})
}
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct SetResult {
#[serde(deserialize_with = "crate::util::deserialize_from_str_opt")]
#[serde(serialize_with = "crate::util::serialize_display_opt")]
pub signal: Option<Signal>,
pub depends_on: IndexMap<PackageId, IndexSet<HealthCheckId>>,
}

View File

@@ -1,56 +1,81 @@
use std::borrow::Cow;
use std::path::Path;
use std::time::Duration;
use failure::ResultExt as _;
use anyhow::anyhow;
use bollard::container::KillContainerOptions;
use bollard::Docker;
use futures::future::{BoxFuture, FutureExt};
use indexmap::{IndexMap, IndexSet};
use itertools::Itertools;
use linear_map::{set::LinearSet, LinearMap};
use patch_db::DbHandle;
use rand::SeedableRng;
use regex::Regex;
use rpc_toolkit::command;
use serde_json::Value;
use crate::dependencies::{DependencyError, TaggedDependencyError};
use crate::util::PersistencePath;
use crate::util::{from_yaml_async_reader, to_yaml_async_writer};
use crate::ResultExt as _;
use crate::action::docker::DockerAction;
use crate::config::spec::PackagePointerSpecVariant;
use crate::context::{EitherContext, ExtendedContext};
use crate::db::model::{CurrentDependencyInfo, InstalledPackageDataEntryModel};
use crate::db::util::WithRevision;
use crate::dependencies::{BreakageRes, DependencyError, TaggedDependencyError};
use crate::net::host::Hosts;
use crate::s9pk::manifest::PackageId;
use crate::util::{
display_none, display_serializable, parse_duration, parse_stdin_deserializable, IoFormat,
};
use crate::{Error, ResultExt as _};
pub mod rules;
pub mod action;
pub mod spec;
pub mod util;
pub mod value;
pub use rules::{ConfigRuleEntry, ConfigRuleEntryWithSuggestions};
pub use spec::{ConfigSpec, Defaultable};
use util::NumRange;
pub use value::Config;
#[derive(Debug, Fail)]
use self::action::ConfigRes;
use self::spec::{PackagePointerSpec, ValueSpecPointer};
pub type Config = serde_json::Map<String, Value>;
pub trait TypeOf {
fn type_of(&self) -> &'static str;
}
impl TypeOf for Value {
fn type_of(&self) -> &'static str {
match self {
Value::Array(_) => "list",
Value::Bool(_) => "boolean",
Value::Null => "null",
Value::Number(_) => "number",
Value::Object(_) => "object",
Value::String(_) => "string",
}
}
}
#[derive(Debug, thiserror::Error)]
pub enum ConfigurationError {
#[fail(display = "Timeout Error")]
TimeoutError,
#[fail(display = "No Match: {}", _0)]
NoMatch(NoMatchWithPath),
#[fail(display = "Invalid Variant: {}", _0)]
InvalidVariant(String),
#[fail(display = "System Error: {}", _0)]
SystemError(crate::Error),
#[error("Timeout Error")]
TimeoutError(#[from] TimeoutError),
#[error("No Match: {0}")]
NoMatch(#[from] NoMatchWithPath),
#[error("System Error: {0}")]
SystemError(Error),
}
impl From<TimeoutError> for ConfigurationError {
fn from(_: TimeoutError) -> Self {
ConfigurationError::TimeoutError
}
}
impl From<NoMatchWithPath> for ConfigurationError {
fn from(e: NoMatchWithPath) -> Self {
ConfigurationError::NoMatch(e)
impl From<ConfigurationError> for Error {
fn from(err: ConfigurationError) -> Self {
let kind = match &err {
ConfigurationError::SystemError(e) => e.kind,
_ => crate::ErrorKind::ConfigGen,
};
crate::Error::new(err, kind)
}
}
#[derive(Clone, Copy, Debug, Fail)]
#[fail(display = "Timeout Error")]
#[derive(Clone, Copy, Debug, thiserror::Error)]
#[error("Timeout Error")]
pub struct TimeoutError;
#[derive(Clone, Debug, Fail)]
#[derive(Clone, Debug, thiserror::Error)]
pub struct NoMatchWithPath {
pub path: Vec<String>,
pub error: MatchError,
@@ -72,256 +97,504 @@ impl std::fmt::Display for NoMatchWithPath {
write!(f, "{}: {}", self.path.iter().rev().join("."), self.error)
}
}
impl From<NoMatchWithPath> for Error {
fn from(e: NoMatchWithPath) -> Self {
ConfigurationError::from(e).into()
}
}
#[derive(Clone, Debug, Fail)]
#[derive(Clone, Debug, thiserror::Error)]
pub enum MatchError {
#[fail(display = "String {:?} Does Not Match Pattern {}", _0, _1)]
#[error("String {0:?} Does Not Match Pattern {1}")]
Pattern(String, Regex),
#[fail(display = "String {:?} Is Not In Enum {:?}", _0, _1)]
Enum(String, LinearSet<String>),
#[fail(display = "Field Is Not Nullable")]
#[error("String {0:?} Is Not In Enum {1:?}")]
Enum(String, IndexSet<String>),
#[error("Field Is Not Nullable")]
NotNullable,
#[fail(display = "Length Mismatch: expected {}, actual: {}", _0, _1)]
#[error("Length Mismatch: expected {0}, actual: {1}")]
LengthMismatch(NumRange<usize>, usize),
#[fail(display = "Invalid Type: expected {}, actual: {}", _0, _1)]
#[error("Invalid Type: expected {0}, actual: {1}")]
InvalidType(&'static str, &'static str),
#[fail(display = "Number Out Of Range: expected {}, actual: {}", _0, _1)]
#[error("Number Out Of Range: expected {0}, actual: {1}")]
OutOfRange(NumRange<f64>, f64),
#[fail(display = "Number Is Not Integral: {}", _0)]
#[error("Number Is Not Integral: {0}")]
NonIntegral(f64),
#[fail(display = "Variant {:?} Is Not In Union {:?}", _0, _1)]
Union(String, LinearSet<String>),
#[fail(display = "Variant Is Missing Tag {:?}", _0)]
#[error("Variant {0:?} Is Not In Union {1:?}")]
Union(String, IndexSet<String>),
#[error("Variant Is Missing Tag {0:?}")]
MissingTag(String),
#[fail(
display = "Property {:?} Of Variant {:?} Conflicts With Union Tag",
_0, _1
)]
#[error("Property {0:?} Of Variant {1:?} Conflicts With Union Tag")]
PropertyMatchesUnionTag(String, String),
#[fail(display = "Name of Property {:?} Conflicts With Map Tag Name", _0)]
#[error("Name of Property {0:?} Conflicts With Map Tag Name")]
PropertyNameMatchesMapTag(String),
#[fail(display = "Pointer Is Invalid: {}", _0)]
#[error("Pointer Is Invalid: {0}")]
InvalidPointer(spec::ValueSpecPointer),
#[fail(display = "Object Key Is Invalid: {}", _0)]
#[error("Object Key Is Invalid: {0}")]
InvalidKey(String),
#[fail(display = "Value In List Is Not Unique")]
#[error("Value In List Is Not Unique")]
ListUniquenessViolation,
}
#[derive(Clone, Debug, Default, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct ConfigurationRes {
pub changed: LinearMap<String, Config>,
pub needs_restart: LinearSet<String>,
pub stopped: LinearMap<String, TaggedDependencyError>,
#[command(subcommands(get, set))]
pub fn config(
#[context] ctx: EitherContext,
#[arg] id: PackageId,
) -> Result<ExtendedContext<EitherContext, PackageId>, Error> {
Ok(ExtendedContext::from(ctx).map(|_| id))
}
// returns apps with changed configurations
pub async fn configure(
name: &str,
config: Option<Config>,
timeout: Option<Duration>,
dry_run: bool,
) -> Result<ConfigurationRes, crate::Error> {
async fn handle_broken_dependent(
name: &str,
dependent: String,
dry_run: bool,
res: &mut ConfigurationRes,
error: DependencyError,
) -> Result<(), crate::Error> {
crate::control::stop_dependents(
&dependent,
dry_run,
DependencyError::NotRunning,
&mut res.stopped,
)
#[command(display(display_serializable))]
pub async fn get(
#[context] ctx: ExtendedContext<EitherContext, PackageId>,
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<ConfigRes, Error> {
let mut db = ctx.base().as_rpc().unwrap().db.handle();
let pkg_model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(ctx.extension())
.and_then(|m| m.installed())
.expect(&mut db)
.await
.with_kind(crate::ErrorKind::NotFound)?;
let action = pkg_model
.clone()
.manifest()
.config()
.get(&mut db)
.await?
.to_owned()
.ok_or_else(|| {
Error::new(
anyhow!("{} has no config", ctx.extension()),
crate::ErrorKind::NotFound,
)
})?;
let version = pkg_model.clone().manifest().version().get(&mut db).await?;
let volumes = pkg_model.manifest().volumes().get(&mut db).await?;
let hosts = crate::db::DatabaseModel::new()
.network()
.hosts()
.get(&mut db)
.await?;
if crate::apps::status(&dependent, false).await?.status
!= crate::apps::DockerStatus::Stopped
{
crate::control::stop_app(&dependent, false, dry_run).await?;
res.stopped.insert(
// TODO: maybe don't do this if its not running
dependent,
TaggedDependencyError {
dependency: name.to_owned(),
error,
},
);
}
Ok(())
}
fn configure_rec<'a>(
name: &'a str,
config: Option<Config>,
timeout: Option<Duration>,
dry_run: bool,
res: &'a mut ConfigurationRes,
) -> BoxFuture<'a, Result<Config, crate::Error>> {
async move {
let info = crate::apps::list_info()
.await?
.remove(name)
.ok_or_else(|| failure::format_err!("{} is not installed", name))
.with_code(crate::error::NOT_FOUND)?;
let mut rng = rand::rngs::StdRng::from_entropy();
let spec_path = PersistencePath::from_ref("apps")
.join(name)
.join("config_spec.yaml");
let rules_path = PersistencePath::from_ref("apps")
.join(name)
.join("config_rules.yaml");
let config_path = PersistencePath::from_ref("apps")
.join(name)
.join("config.yaml");
let spec: ConfigSpec =
from_yaml_async_reader(&mut *spec_path.read(false).await?).await?;
let rules: Vec<ConfigRuleEntry> =
from_yaml_async_reader(&mut *rules_path.read(false).await?).await?;
let old_config: Option<Config> =
if let Some(mut f) = config_path.maybe_read(false).await.transpose()? {
Some(from_yaml_async_reader(&mut *f).await?)
action
.get(ctx.extension(), &*version, &*volumes, &*hosts)
.await
}
#[command(subcommands(self(set_impl(async)), set_dry), display(display_none))]
pub fn set(
#[context] ctx: ExtendedContext<EitherContext, PackageId>,
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
#[arg(long = "timeout", parse(parse_duration))] timeout: Option<Duration>,
#[arg(stdin, parse(parse_stdin_deserializable))] config: Option<Config>,
#[arg(rename = "expire-id", long = "expire-id")] expire_id: Option<String>,
) -> Result<
ExtendedContext<EitherContext, (PackageId, Option<Config>, Option<Duration>, Option<String>)>,
Error,
> {
Ok(ctx.map(|id| (id, config, timeout, expire_id)))
}
#[command(display(display_serializable))]
pub async fn set_dry(
#[context] ctx: ExtendedContext<
EitherContext,
(PackageId, Option<Config>, Option<Duration>, Option<String>),
>,
) -> Result<BreakageRes, Error> {
let (ctx, (id, config, timeout, _)) = ctx.split();
let rpc_ctx = ctx.as_rpc().unwrap();
let mut db = rpc_ctx.db.handle();
let hosts = crate::db::DatabaseModel::new()
.network()
.hosts()
.get(&mut db)
.await?;
let mut tx = db.begin().await?;
let mut breakages = IndexMap::new();
configure(
&mut tx,
&rpc_ctx.docker,
&*hosts,
&id,
config,
&timeout,
true,
&mut IndexMap::new(),
&mut breakages,
)
.await?;
crate::db::DatabaseModel::new()
.package_data()
.idx_model(&id)
.expect(&mut tx)
.await?
.installed()
.expect(&mut tx)
.await?
.status()
.configured()
.put(&mut tx, &true)
.await?;
Ok(BreakageRes {
patch: tx.abort().await?,
breakages,
})
}
pub async fn set_impl(
ctx: ExtendedContext<
EitherContext,
(PackageId, Option<Config>, Option<Duration>, Option<String>),
>,
) -> Result<WithRevision<()>, Error> {
let (ctx, (id, config, timeout, expire_id)) = ctx.split();
let rpc_ctx = ctx.as_rpc().unwrap();
let mut db = rpc_ctx.db.handle();
let hosts = crate::db::DatabaseModel::new()
.network()
.hosts()
.get(&mut db)
.await?;
let mut tx = db.begin().await?;
let mut breakages = IndexMap::new();
configure(
&mut tx,
&rpc_ctx.docker,
&*hosts,
&id,
config,
&timeout,
false,
&mut IndexMap::new(),
&mut breakages,
)
.await?;
crate::db::DatabaseModel::new()
.package_data()
.idx_model(&id)
.expect(&mut tx)
.await?
.installed()
.expect(&mut tx)
.await?
.status()
.configured()
.put(&mut tx, &true)
.await?;
Ok(WithRevision {
response: (),
revision: tx.commit(expire_id).await?,
})
}
pub fn configure<'a, Db: DbHandle>(
db: &'a mut Db,
docker: &'a Docker,
hosts: &'a Hosts,
id: &'a PackageId,
config: Option<Config>,
timeout: &'a Option<Duration>,
dry_run: bool,
overrides: &'a mut IndexMap<PackageId, Config>,
breakages: &'a mut IndexMap<PackageId, TaggedDependencyError>,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
// fetch data from db
let pkg_model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|m| m.installed())
.expect(db)
.await
.with_kind(crate::ErrorKind::NotFound)?;
let action = pkg_model
.clone()
.manifest()
.config()
.get(db)
.await?
.to_owned()
.ok_or_else(|| {
Error::new(anyhow!("{} has no config", id), crate::ErrorKind::NotFound)
})?;
let version = pkg_model.clone().manifest().version().get(db).await?;
let dependencies = pkg_model.clone().manifest().dependencies().get(db).await?;
let volumes = pkg_model.clone().manifest().volumes().get(db).await?;
// get current config and current spec
let ConfigRes {
config: old_config,
spec,
} = action.get(id, &*version, &*volumes, &*hosts).await?;
// determine new config to use
let mut config = if let Some(config) = config.or_else(|| old_config.clone()) {
config
} else {
spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)?
};
spec.matches(&config)?; // check that new config matches spec
spec.update(db, &*overrides, &mut config).await?; // dereference pointers in the new config
// create backreferences to pointers
let mut sys = pkg_model.clone().system_pointers().get_mut(db).await?;
sys.truncate(0);
let mut current_dependencies: IndexMap<PackageId, CurrentDependencyInfo> = dependencies
.0
.iter()
.filter_map(|(id, info)| {
if info.optional.is_none() {
Some((id.clone(), CurrentDependencyInfo::default()))
} else {
None
};
let mut config = if let Some(cfg) = config {
cfg
} else {
if let Some(old) = &old_config {
old.clone()
} else {
spec.gen(&mut rng, &timeout)
.with_code(crate::error::CFG_SPEC_VIOLATION)?
}
};
spec.matches(&config)
.with_code(crate::error::CFG_SPEC_VIOLATION)?;
spec.update(&mut config)
.await
.with_code(crate::error::CFG_SPEC_VIOLATION)?;
let mut cfgs = LinearMap::new();
cfgs.insert(name, Cow::Borrowed(&config));
for rule in rules {
rule.check(&config, &cfgs)
.with_code(crate::error::CFG_RULES_VIOLATION)?;
})
.collect();
for ptr in spec.pointers(&config)? {
match ptr {
ValueSpecPointer::Package(PackagePointerSpec { package_id, target }) => {
if let Some(current_dependency) = current_dependencies.get_mut(&package_id) {
current_dependency.pointers.push(target);
} else {
current_dependencies.insert(
package_id,
CurrentDependencyInfo {
pointers: vec![target],
health_checks: IndexSet::new(),
},
);
}
}
ValueSpecPointer::System(s) => sys.push(s),
}
match old_config {
Some(old) if &old == &config && info.configured && !info.recoverable => {
return Ok(config)
}
sys.save(db).await?;
let signal = if !dry_run {
// run config action
let res = action
.set(id, &*version, &*dependencies, &*volumes, hosts, &config)
.await?;
// track dependencies with no pointers
for (package_id, health_checks) in res.depends_on.into_iter() {
if let Some(current_dependency) = current_dependencies.get_mut(&package_id) {
current_dependency.health_checks.extend(health_checks);
} else {
current_dependencies.insert(
package_id,
CurrentDependencyInfo {
pointers: Vec::new(),
health_checks,
},
);
}
_ => (),
};
res.changed.insert(name.to_owned(), config.clone());
for dependent in crate::apps::dependents(name, false).await? {
match configure_rec(&dependent, None, timeout, dry_run, res).await {
Ok(dependent_config) => {
let man = crate::apps::manifest(&dependent).await?;
if let Some(dep_info) = man.dependencies.0.get(name) {
match dep_info
.satisfied(
name,
Some(config.clone()),
&dependent,
&dependent_config,
)
}
// track dependency health checks
let mut deps = pkg_model.clone().current_dependencies().get_mut(db).await?;
*deps = current_dependencies.clone();
deps.save(db).await?;
res.signal
} else {
None
};
// update dependencies
for (dependency, dep_info) in current_dependencies {
if let Some(dependency_model) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&dependency)
.and_then(|pkg| pkg.installed())
.check(db)
.await?
{
dependency_model
.current_dependents()
.idx_model(id)
.put(db, &dep_info)
.await?;
}
}
// cache current config for dependents
overrides.insert(id.clone(), config.clone());
// handle dependents
let dependents = pkg_model.clone().current_dependents().get(db).await?;
let prev = old_config.map(Value::Object).unwrap_or_default();
let next = Value::Object(config.clone());
for (dependent, dep_info) in &*dependents {
fn handle_broken_dependents<'a, Db: DbHandle>(
db: &'a mut Db,
id: &'a PackageId,
dependency: &'a PackageId,
model: InstalledPackageDataEntryModel,
error: DependencyError,
breakages: &'a mut IndexMap<PackageId, TaggedDependencyError>,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
let mut status = model.clone().status().get_mut(db).await?;
let old = status.dependency_errors.0.remove(id);
let newly_broken = old.is_none();
status.dependency_errors.0.insert(
id.clone(),
if let Some(old) = old {
old.merge_with(error.clone())
} else {
error.clone()
},
);
if newly_broken {
breakages.insert(
id.clone(),
TaggedDependencyError {
dependency: dependency.clone(),
error: error.clone(),
},
);
if status.main.running() {
if model
.clone()
.manifest()
.dependencies()
.idx_model(dependency)
.expect(db)
.await?
.get(db)
.await?
.critical
{
Ok(_) => (),
Err(e) => {
handle_broken_dependent(name, dependent, dry_run, res, e)
status.main.stop();
let dependents = model.current_dependents().get(db).await?;
for (dependent, _) in &*dependents {
let dependent_model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(dependent)
.and_then(|pkg| pkg.installed())
.expect(db)
.await?;
handle_broken_dependents(
db,
dependent,
id,
dependent_model,
DependencyError::NotRunning,
breakages,
)
.await?;
}
}
}
}
Err(e) => {
if e.code == Some(crate::error::CFG_RULES_VIOLATION)
|| e.code == Some(crate::error::CFG_SPEC_VIOLATION)
{
if !dry_run {
crate::apps::set_configured(&dependent, false).await?;
status.save(db).await?;
Ok(())
}
.boxed()
}
// check if config passes dependent check
let dependent_model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(dependent)
.and_then(|pkg| pkg.installed())
.expect(db)
.await?;
if let Some(cfg) = &*dependent_model
.clone()
.manifest()
.dependencies()
.idx_model(id)
.expect(db)
.await?
.config()
.get(db)
.await?
{
let version = dependent_model.clone().manifest().version().get(db).await?;
if let Err(error) = cfg.check(dependent, &*version, &config).await? {
let dep_err = DependencyError::ConfigUnsatisfied { error };
handle_broken_dependents(
db,
dependent,
id,
dependent_model,
dep_err,
breakages,
)
.await?;
}
// handle backreferences
for ptr in &dep_info.pointers {
if let PackagePointerSpecVariant::Config { selector, multi } = ptr {
if selector.select(*multi, &next) != selector.select(*multi, &prev) {
if let Err(e) = configure(
db, docker, hosts, dependent, None, timeout, dry_run, overrides,
breakages,
)
.await
{
if e.kind == crate::ErrorKind::ConfigRulesViolation {
let dependent_model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(dependent)
.and_then(|pkg| pkg.installed())
.expect(db)
.await?;
handle_broken_dependents(
db,
dependent,
id,
dependent_model,
DependencyError::ConfigUnsatisfied {
error: format!("{}", e),
},
breakages,
)
.await?;
} else {
return Err(e);
}
}
handle_broken_dependent(
name,
dependent,
dry_run,
res,
DependencyError::PointerUpdateError(format!("{}", e)),
)
.await?;
} else {
handle_broken_dependent(
name,
dependent,
dry_run,
res,
DependencyError::Other(format!("{}", e)),
)
.await?;
}
}
}
}
if !dry_run {
let mut file = config_path.write(None).await?;
to_yaml_async_writer(file.as_mut(), &config).await?;
file.commit().await?;
let volume_config = Path::new(crate::VOLUMES)
.join(name)
.join("start9")
.join("config.yaml");
tokio::fs::copy(config_path.path(), &volume_config)
.await
.with_context(|e| {
format!(
"{}: {} -> {}",
e,
config_path.path().display(),
volume_config.display()
)
})
.with_code(crate::error::FILESYSTEM_ERROR)?;
crate::apps::set_configured(name, true).await?;
crate::apps::set_recoverable(name, false).await?;
}
if crate::apps::status(name, false).await?.status != crate::apps::DockerStatus::Stopped
{
if !dry_run {
crate::apps::set_needs_restart(name, true).await?;
}
res.needs_restart.insert(name.to_string());
}
Ok(config)
}
.boxed()
}
let mut res = ConfigurationRes::default();
configure_rec(name, config, timeout, dry_run, &mut res).await?;
Ok(res)
}
pub async fn remove(name: &str) -> Result<(), crate::Error> {
let config_path = PersistencePath::from_ref("apps")
.join(name)
.join("config.yaml")
.path();
if config_path.exists() {
tokio::fs::remove_file(&config_path)
.await
.with_context(|e| format!("{}: {}", e, config_path.display()))
.with_code(crate::error::FILESYSTEM_ERROR)?;
if let Some(signal) = signal {
docker
.kill_container(
&DockerAction::container_name(id, &*version),
Some(KillContainerOptions {
signal: signal.to_string(),
}),
)
.await
// ignore container is not running https://docs.docker.com/engine/api/v1.41/#operation/ContainerKill
.or_else(|e| {
if matches!(
e,
bollard::errors::Error::DockerResponseConflictError { .. }
) {
Ok(())
} else {
Err(e)
}
})?;
}
Ok(())
}
let volume_config = Path::new(crate::VOLUMES)
.join(name)
.join("start9")
.join("config.yaml");
if volume_config.exists() {
tokio::fs::remove_file(&volume_config)
.await
.with_context(|e| format!("{}: {}", e, volume_config.display()))
.with_code(crate::error::FILESYSTEM_ERROR)?;
}
crate::apps::set_configured(name, false).await?;
Ok(())
.boxed()
}

View File

@@ -1,76 +0,0 @@
num = @{ int ~ ("." ~ ASCII_DIGIT*)? ~ (^"e" ~ int)? }
int = @{ ("+" | "-")? ~ ASCII_DIGIT+ }
raw_string = @{ (!("\\" | "\"") ~ ANY)+ }
predefined = @{ "n" | "r" | "t" | "\\" | "0" | "\"" | "'" }
escape = @{ "\\" ~ predefined }
str = @{ "\"" ~ (raw_string | escape)* ~ "\"" }
ident_char = @{ ASCII_ALPHANUMERIC | "-" }
sub_ident = _{ sub_ident_regular | sub_ident_index | sub_ident_any | sub_ident_all | sub_ident_fn }
sub_ident_regular = { sub_ident_regular_base | sub_ident_regular_expr }
sub_ident_regular_base = @{ ASCII_ALPHA ~ ident_char* }
sub_ident_regular_expr = ${ "[" ~ str_expr ~ "]" }
sub_ident_index = { sub_ident_index_base | sub_ident_index_expr }
sub_ident_index_base = @{ ASCII_DIGIT+ }
sub_ident_index_expr = ${ "[" ~ num_expr ~ "]" }
sub_ident_any = @{ "*" }
sub_ident_all = @{ "&" }
sub_ident_fn = ${ "[" ~ list_access_function ~ "]"}
list_access_function = _{ list_access_function_first | list_access_function_last | list_access_function_any | list_access_function_all }
list_access_function_first = !{ "first" ~ "(" ~ sub_ident_regular ~ "=>" ~ bool_expr ~ ")" }
list_access_function_last = !{ "last" ~ "(" ~ sub_ident_regular ~ "=>" ~ bool_expr ~ ")" }
list_access_function_any = !{ "any" ~ "(" ~ sub_ident_regular ~ "=>" ~ bool_expr ~ ")" }
list_access_function_all = !{ "all" ~ "(" ~ sub_ident_regular ~ "=>" ~ bool_expr ~ ")" }
app_id = ${ "[" ~ sub_ident_regular ~ "]" }
ident = _{ (app_id ~ ".")? ~ sub_ident_regular ~ ("." ~ sub_ident)* }
bool_var = ${ ident ~ "?" }
num_var = ${ "#" ~ ident }
str_var = ${ "'" ~ ident }
any_var = ${ ident }
bool_op = _{ and | or | xor }
and = { "AND" }
or = { "OR" }
xor = { "XOR" }
num_cmp_op = _{ lt | lte | eq | neq | gt | gte }
str_cmp_op = _{ lt | lte | eq | neq | gt | gte }
lt = { "<" }
lte = { "<=" }
eq = { "=" }
neq = { "!=" }
gt = { ">" }
gte = { ">=" }
num_op = _{ add | sub | mul | div | pow }
str_op = _{ add }
add = { "+" }
sub = { "-" }
mul = { "*" }
div = { "/" }
pow = { "^" }
num_expr = !{ num_term ~ (num_op ~ num_term)* }
num_term = _{ num | num_var | "(" ~ num_expr ~ ")" }
str_expr = !{ str_term ~ (str_op ~ str_term)* }
str_term = _{ str | str_var | "(" ~ str_expr ~ ")" }
num_cmp_expr = { num_expr ~ num_cmp_op ~ num_expr }
str_cmp_expr = { str_expr ~ str_cmp_op ~ str_expr }
bool_expr = !{ bool_term ~ (bool_op ~ bool_term)* }
inv_bool_expr = { "!(" ~ bool_expr ~ ")" }
bool_term = _{ bool_var | "(" ~ bool_expr ~ ")" | inv_bool_expr | num_cmp_expr | str_cmp_expr }
val_expr = _{ any_var | str_expr | num_expr | bool_expr }
rule = _{ SOI ~ bool_expr ~ EOI }
reference = _{ SOI ~ any_var ~ EOI }
value = _{ SOI ~ val_expr ~ EOI }
del_action = _{ SOI ~ "FROM" ~ any_var ~ "AS" ~ sub_ident_regular ~ "WHERE" ~ bool_expr ~ EOI }
obj_key = _{ SOI ~ sub_ident_regular ~ EOI }
WHITESPACE = _{ " " | "\t" }

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,12 @@
use std::ops::Bound;
use std::ops::RangeBounds;
use std::ops::RangeInclusive;
use std::ops::{Bound, RangeBounds, RangeInclusive};
use rand::{distributions::Distribution, Rng};
use rand::distributions::Distribution;
use rand::Rng;
use serde_json::Value;
use super::value::Config;
use super::Config;
pub const STATIC_NULL: super::value::Value = super::value::Value::Null;
pub const STATIC_NULL: Value = Value::Null;
#[derive(Clone, Debug)]
pub struct CharSet(pub Vec<(RangeInclusive<char>, usize)>, usize);
@@ -15,7 +15,7 @@ impl CharSet {
self.0.iter().any(|r| r.0.contains(c))
}
pub fn gen<R: Rng>(&self, rng: &mut R) -> char {
let mut idx = rng.gen_range(0, self.1);
let mut idx = rng.gen_range(0..self.1);
for r in &self.0 {
if idx < r.1 {
return std::convert::TryFrom::try_from(
@@ -282,7 +282,7 @@ impl UniqueBy {
match self {
UniqueBy::Any(any) => any.iter().any(|u| u.eq(lhs, rhs)),
UniqueBy::All(all) => all.iter().all(|u| u.eq(lhs, rhs)),
UniqueBy::Exactly(key) => lhs.0.get(key) == rhs.0.get(key),
UniqueBy::Exactly(key) => lhs.get(key) == rhs.get(key),
UniqueBy::NotUnique => false,
}
}

View File

@@ -1,66 +0,0 @@
use linear_map::LinearMap;
#[derive(Clone, Debug, Default, PartialEq, serde::Serialize, serde::Deserialize)]
pub struct Config(pub LinearMap<String, Value>);
impl Config {
pub fn merge_with(&mut self, other: Config) {
for (key, val) in other.0.into_iter() {
match (self.0.get_mut(&key), &val) {
(Some(Value::Object(l_obj)), Value::Object(_)) => {
// gross, I know. https://github.com/rust-lang/rust/issues/45600
let r_obj = match val {
Value::Object(r_obj) => r_obj,
_ => unreachable!(),
};
l_obj.merge_with(r_obj)
}
(Some(Value::List(l_vec)), Value::List(_)) => {
let mut r_vec = match val {
Value::List(r_vec) => r_vec,
_ => unreachable!(),
};
l_vec.append(&mut r_vec);
}
_ => {
self.0.insert(key, val);
}
}
}
}
}
fn serialize_num<S: serde::Serializer>(num: &f64, serializer: S) -> Result<S::Ok, S::Error> {
if *num < (1_i64 << f64::MANTISSA_DIGITS) as f64
&& *num > -(1_i64 << f64::MANTISSA_DIGITS) as f64
&& num.trunc() == *num
{
serializer.serialize_i64(*num as i64)
} else {
serializer.serialize_f64(*num)
}
}
#[derive(Clone, Debug, PartialEq, serde::Serialize, serde::Deserialize)]
#[serde(untagged)]
pub enum Value {
String(String),
#[serde(serialize_with = "serialize_num")]
Number(f64),
Bool(bool),
List(Vec<Value>),
Object(Config),
Null,
}
impl Value {
pub fn type_of(&self) -> &'static str {
match self {
Value::String(_) => "string",
Value::Number(_) => "number",
Value::Bool(_) => "boolean",
Value::List(_) => "list",
Value::Object(_) => "object",
Value::Null => "null",
}
}
}

135
appmgr/src/context/cli.rs Normal file
View File

@@ -0,0 +1,135 @@
use std::fs::File;
use std::net::IpAddr;
use std::path::Path;
use std::sync::Arc;
use clap::ArgMatches;
use reqwest::Proxy;
use rpc_toolkit::reqwest::{Client, Url};
use rpc_toolkit::url::Host;
use rpc_toolkit::Context;
use serde::Deserialize;
use super::rpc::RpcContextConfig;
use crate::ResultExt;
#[derive(Debug, Default, Deserialize)]
pub struct CliContextConfig {
#[serde(deserialize_with = "deserialize_host")]
pub host: Option<Host>,
pub port: Option<u16>,
#[serde(deserialize_with = "crate::util::deserialize_from_str_opt")]
pub proxy: Option<Url>,
#[serde(flatten)]
pub server_config: RpcContextConfig,
}
#[derive(Debug)]
pub struct CliContextSeed {
pub host: Host,
pub port: u16,
pub client: Client,
}
#[derive(Debug, Clone)]
pub struct CliContext(Arc<CliContextSeed>);
impl CliContext {
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
let cfg_path = Path::new(crate::CONFIG_PATH);
let mut base = if cfg_path.exists() {
serde_yaml::from_reader(
File::open(cfg_path)
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?,
)
.with_kind(crate::ErrorKind::Deserialization)?
} else {
CliContextConfig::default()
};
if let Some(bind) = base.server_config.bind {
if base.host.is_none() {
base.host = Some(match bind.ip() {
IpAddr::V4(a) => Host::Ipv4(a),
IpAddr::V6(a) => Host::Ipv6(a),
});
}
if base.port.is_none() {
base.port = Some(bind.port())
}
}
if let Some(host) = matches.value_of("host") {
base.host = Some(Host::parse(host).with_kind(crate::ErrorKind::ParseUrl)?);
}
if let Some(port) = matches.value_of("port") {
base.port = Some(port.parse()?);
}
if let Some(proxy) = matches.value_of("proxy") {
base.proxy = Some(proxy.parse()?);
}
Ok(CliContext(Arc::new(CliContextSeed {
host: base.host.unwrap_or(Host::Ipv4([127, 0, 0, 1].into())),
port: base.port.unwrap_or(5959),
client: if let Some(proxy) = base.proxy {
Client::builder()
.proxy(Proxy::all(proxy).with_kind(crate::ErrorKind::ParseUrl)?)
.build()
.expect("cannot fail")
} else {
Client::new()
},
})))
}
}
impl Context for CliContext {
fn host(&self) -> Host<&str> {
match &self.0.host {
Host::Domain(a) => Host::Domain(a.as_str()),
Host::Ipv4(a) => Host::Ipv4(*a),
Host::Ipv6(a) => Host::Ipv6(*a),
}
}
fn port(&self) -> u16 {
self.0.port
}
fn client(&self) -> &Client {
&self.0.client
}
}
fn deserialize_host<'de, D: serde::de::Deserializer<'de>>(
deserializer: D,
) -> Result<Option<Host>, D::Error> {
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Option<Host>;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(formatter, "a parsable string")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Host::parse(v)
.map(Some)
.map_err(|e| serde::de::Error::custom(e))
}
fn visit_some<D>(self, deserializer: D) -> Result<Self::Value, D::Error>
where
D: serde::de::Deserializer<'de>,
{
deserializer.deserialize_str(Visitor)
}
fn visit_none<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(None)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(None)
}
}
deserializer.deserialize_any(Visitor)
}

109
appmgr/src/context/mod.rs Normal file
View File

@@ -0,0 +1,109 @@
use rpc_toolkit::reqwest::Client;
use rpc_toolkit::url::{Host, Url};
use rpc_toolkit::Context;
mod cli;
mod rpc;
pub use cli::CliContext;
pub use rpc::RpcContext;
#[derive(Debug, Clone)]
pub struct ExtendedContext<T, U> {
base: T,
extension: U,
}
impl<T, U> ExtendedContext<T, U> {
pub fn map<F: FnOnce(U) -> V, V>(self, f: F) -> ExtendedContext<T, V> {
ExtendedContext {
base: self.base,
extension: f(self.extension),
}
}
pub fn split(self) -> (T, U) {
(self.base, self.extension)
}
pub fn base(&self) -> &T {
&self.base
}
pub fn extension(&self) -> &U {
&self.extension
}
}
impl<T> From<T> for ExtendedContext<T, ()> {
fn from(base: T) -> Self {
ExtendedContext {
base,
extension: (),
}
}
}
impl<T: Context, U> Context for ExtendedContext<T, U> {
fn host(&self) -> Host<&str> {
self.base.host()
}
fn port(&self) -> u16 {
self.base.port()
}
fn protocol(&self) -> &str {
self.base.protocol()
}
fn url(&self) -> Url {
self.base.url()
}
fn client(&self) -> &Client {
self.base.client()
}
}
#[derive(Clone)]
pub enum EitherContext {
Cli(CliContext),
Rpc(RpcContext),
}
impl EitherContext {
pub fn as_cli(&self) -> Option<&CliContext> {
match self {
EitherContext::Cli(a) => Some(a),
_ => None,
}
}
pub fn as_rpc(&self) -> Option<&RpcContext> {
match self {
EitherContext::Rpc(a) => Some(a),
_ => None,
}
}
}
impl Context for EitherContext {
fn host(&self) -> Host<&str> {
match self {
EitherContext::Cli(a) => a.host(),
EitherContext::Rpc(b) => b.host(),
}
}
fn port(&self) -> u16 {
match self {
EitherContext::Cli(a) => a.port(),
EitherContext::Rpc(b) => b.port(),
}
}
fn protocol(&self) -> &str {
match self {
EitherContext::Cli(a) => a.protocol(),
EitherContext::Rpc(b) => b.protocol(),
}
}
fn url(&self) -> Url {
match self {
EitherContext::Cli(a) => a.url(),
EitherContext::Rpc(b) => b.url(),
}
}
fn client(&self) -> &Client {
match self {
EitherContext::Cli(a) => a.client(),
EitherContext::Rpc(b) => b.client(),
}
}
}

79
appmgr/src/context/rpc.rs Normal file
View File

@@ -0,0 +1,79 @@
use std::net::{IpAddr, SocketAddr};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use bollard::Docker;
use patch_db::PatchDb;
use rpc_toolkit::url::Host;
use rpc_toolkit::Context;
use serde::Deserialize;
use sqlx::SqlitePool;
use tokio::fs::File;
use crate::util::{from_yaml_async_reader, AsyncFileExt};
use crate::{Error, ResultExt};
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct RpcContextConfig {
pub bind: Option<SocketAddr>,
pub db: Option<PathBuf>,
pub secret_store: Option<PathBuf>,
}
pub struct RpcContextSeed {
pub bind: SocketAddr,
pub db: PatchDb,
pub secret_store: SqlitePool,
pub docker: Docker,
}
#[derive(Clone)]
pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext {
pub async fn init() -> Result<Self, Error> {
let cfg_path = Path::new(crate::CONFIG_PATH);
let base = if let Some(f) = File::maybe_open(cfg_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
{
from_yaml_async_reader(f).await?
} else {
RpcContextConfig::default()
};
let seed = Arc::new(RpcContextSeed {
bind: base.bind.unwrap_or(([127, 0, 0, 1], 5959).into()),
db: PatchDb::open(
base.db
.unwrap_or_else(|| Path::new("/mnt/embassy-os/embassy.db").to_owned()),
)
.await?,
secret_store: SqlitePool::connect(&format!(
"sqlite://{}",
base.secret_store
.unwrap_or_else(|| Path::new("/mnt/embassy-os/secrets.db").to_owned())
.display()
))
.await?,
docker: Docker::connect_with_unix_defaults()?,
});
Ok(Self(seed))
}
}
impl Context for RpcContext {
fn host(&self) -> Host<&str> {
match self.0.bind.ip() {
IpAddr::V4(a) => Host::Ipv4(a),
IpAddr::V6(a) => Host::Ipv6(a),
}
}
fn port(&self) -> u16 {
self.0.bind.port()
}
}
impl Deref for RpcContext {
type Target = RpcContextSeed;
fn deref(&self) -> &Self::Target {
&*self.0
}
}

View File

@@ -1,237 +0,0 @@
use std::path::Path;
use futures::future::{BoxFuture, FutureExt};
use linear_map::{set::LinearSet, LinearMap};
use crate::dependencies::{DependencyError, TaggedDependencyError};
use crate::util::{from_yaml_async_reader, PersistencePath, YamlUpdateHandle};
use crate::Error;
pub async fn start_app(name: &str, update_metadata: bool) -> Result<(), Error> {
let lock = crate::util::lock_file(
format!(
"{}",
Path::new(crate::PERSISTENCE_DIR)
.join("apps")
.join(name)
.join("control.lock")
.display()
),
true,
)
.await?;
let status = crate::apps::status(name, false).await?.status;
if status == crate::apps::DockerStatus::Stopped {
if update_metadata {
crate::config::configure(name, None, None, false).await?;
crate::dependencies::update_binds(name).await?;
}
crate::apps::set_needs_restart(name, false).await?;
let mut running = YamlUpdateHandle::<LinearSet<String>>::new_or_default(
PersistencePath::from_ref("running.yaml"),
)
.await?;
let output = tokio::process::Command::new("docker")
.args(&["start", name])
.stdout(std::process::Stdio::null())
.output()
.await?;
crate::ensure_code!(
output.status.success(),
crate::error::DOCKER_ERROR,
"Failed to Start Application: {}",
std::str::from_utf8(&output.stderr).unwrap_or("Unknown Error")
);
running.insert(name.to_owned());
running.commit().await?;
} else if status == crate::apps::DockerStatus::Paused {
resume_app(name).await?;
}
crate::util::unlock(lock).await?;
Ok(())
}
pub async fn stop_app(
name: &str,
cascade: bool,
dry_run: bool,
) -> Result<LinearMap<String, TaggedDependencyError>, Error> {
let mut res = LinearMap::new();
if cascade {
stop_dependents(name, dry_run, DependencyError::NotRunning, &mut res).await?;
}
if !dry_run {
let lock = crate::util::lock_file(
format!(
"{}",
Path::new(crate::PERSISTENCE_DIR)
.join("apps")
.join(name)
.join("control.lock")
.display()
),
true,
)
.await?;
let mut running = YamlUpdateHandle::<LinearSet<String>>::new_or_default(
PersistencePath::from_ref("running.yaml"),
)
.await?;
log::info!("Stopping {}", name);
let output = tokio::process::Command::new("docker")
.args(&["stop", "-t", "25", name])
.stdout(std::process::Stdio::null())
.output()
.await?;
crate::ensure_code!(
output.status.success(),
crate::error::DOCKER_ERROR,
"Failed to Stop Application: {}",
std::str::from_utf8(&output.stderr).unwrap_or("Unknown Error")
);
running.remove(name);
running.commit().await?;
crate::util::unlock(lock).await?;
}
Ok(res)
}
pub async fn stop_dependents(
name: &str,
dry_run: bool,
err: DependencyError,
res: &mut LinearMap<String, TaggedDependencyError>,
) -> Result<(), Error> {
fn stop_dependents_rec<'a>(
name: &'a str,
dry_run: bool,
err: DependencyError,
res: &'a mut LinearMap<String, TaggedDependencyError>,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
for dependent in crate::apps::dependents(name, false).await? {
if crate::apps::status(&dependent, false).await?.status
!= crate::apps::DockerStatus::Stopped
{
stop_dependents_rec(&dependent, dry_run, DependencyError::NotRunning, res)
.await?;
stop_app(&dependent, false, dry_run).await?;
res.insert(
dependent,
TaggedDependencyError {
dependency: name.to_owned(),
error: err.clone(),
},
);
}
}
Ok(())
}
.boxed()
}
stop_dependents_rec(name, dry_run, err, res).await
}
pub async fn restart_app(name: &str) -> Result<(), Error> {
stop_app(name, false, false).await?;
if let Err(e) = start_app(name, true).await {
log::warn!("Stopping dependents");
stop_dependents(
name,
false,
crate::dependencies::DependencyError::NotRunning,
&mut linear_map::LinearMap::new(),
)
.await?;
return Err(e);
}
Ok(())
}
pub async fn pause_app(name: &str) -> Result<(), Error> {
let lock = crate::util::lock_file(
format!(
"{}",
Path::new(crate::PERSISTENCE_DIR)
.join("apps")
.join(name)
.join("control.lock")
.display()
),
true,
)
.await?;
let output = tokio::process::Command::new("docker")
.args(&["pause", name])
.stdout(std::process::Stdio::null())
.output()
.await?;
crate::ensure_code!(
output.status.success(),
crate::error::DOCKER_ERROR,
"Failed to Pause Application: {}",
std::str::from_utf8(&output.stderr).unwrap_or("Unknown Error")
);
crate::util::unlock(lock).await?;
Ok(())
}
pub async fn resume_app(name: &str) -> Result<(), Error> {
let lock = crate::util::lock_file(
format!(
"{}",
Path::new(crate::PERSISTENCE_DIR)
.join("apps")
.join(name)
.join("control.lock")
.display()
),
true,
)
.await?;
let output = tokio::process::Command::new("docker")
.args(&["unpause", name])
.stdout(std::process::Stdio::null())
.output()
.await?;
crate::ensure_code!(
output.status.success(),
crate::error::DOCKER_ERROR,
"Failed to Resume Application: {}",
std::str::from_utf8(&output.stderr).unwrap_or("Unknown Error")
);
crate::util::unlock(lock).await?;
Ok(())
}
pub async fn repair_app_status() -> Result<(), Error> {
let mut running_file = PersistencePath::from_ref("running.yaml")
.maybe_read(false)
.await
.transpose()?;
let running: Vec<String> = if let Some(f) = running_file.as_mut() {
from_yaml_async_reader::<_, &mut tokio::fs::File>(f).await?
} else {
Vec::new()
};
for name in running {
let lock = crate::util::lock_file(
format!(
"{}",
Path::new(crate::PERSISTENCE_DIR)
.join("apps")
.join(&name)
.join("control.lock")
.display()
),
true,
)
.await?;
if crate::apps::status(&name, false).await?.status == crate::apps::DockerStatus::Stopped {
start_app(&name, true).await?;
}
crate::util::unlock(lock).await?;
}
Ok(())
}

4
appmgr/src/db/mod.rs Normal file
View File

@@ -0,0 +1,4 @@
pub mod model;
pub mod util;
pub use model::DatabaseModel;

199
appmgr/src/db/model.rs Normal file
View File

@@ -0,0 +1,199 @@
use std::net::Ipv4Addr;
use std::sync::Arc;
use indexmap::{IndexMap, IndexSet};
use patch_db::json_ptr::JsonPointer;
use patch_db::{HasModel, Map, MapModel, OptionModel};
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use crate::config::spec::{PackagePointerSpecVariant, SystemPointerSpec};
use crate::id::InterfaceId;
use crate::install::progress::InstallProgress;
use crate::net::Network;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::health_check::HealthCheckId;
use crate::status::Status;
use crate::util::Version;
use crate::Error;
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct Database {
#[model]
pub server_info: ServerInfo,
#[model]
pub package_data: AllPackageData,
pub broken_packages: Vec<PackageId>,
#[model]
pub network: Network,
pub ui: Value,
}
impl Database {
pub fn init() -> Self {
// TODO
Database {
server_info: ServerInfo {
id: "c3ad21d8".to_owned(),
version: emver::Version::new(0, 3, 0, 0).into(),
lan_address: "https://start9-c3ad21d8.local".parse().unwrap(),
tor_address:
"http://privacy34kn4ez3y3nijweec6w4g54i3g54sdv7r5mr6soma3w4begyd.onion"
.parse()
.unwrap(),
updating: false,
registry: "https://registry.start9.com".parse().unwrap(),
unread_notification_count: 0,
},
package_data: AllPackageData::default(),
broken_packages: Vec::new(),
network: Network::default(),
ui: Value::Object(Default::default()),
}
}
}
impl DatabaseModel {
pub fn new() -> Self {
Self::from(JsonPointer::default())
}
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct ServerInfo {
id: String,
version: Version,
lan_address: Url,
tor_address: Url,
updating: bool,
registry: Url,
unread_notification_count: u64,
}
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct AllPackageData(pub IndexMap<PackageId, PackageDataEntry>);
impl Map for AllPackageData {
type Key = PackageId;
type Value = PackageDataEntry;
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
self.0.get(key)
}
}
impl HasModel for AllPackageData {
type Model = MapModel<Self>;
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct StaticFiles {
license: Url,
instructions: Url,
icon: Url,
}
impl StaticFiles {
pub fn local(id: &PackageId, version: &Version, icon_type: &str) -> Result<Self, Error> {
Ok(StaticFiles {
license: format!("/public/package-data/{}/{}/LICENSE.md", id, version).parse()?,
instructions: format!("/public/package-data/{}/{}/INSTRUCTIONS.md", id, version)
.parse()?,
icon: format!("/public/package-data/{}/{}/icon.{}", id, version, icon_type).parse()?,
})
}
pub fn remote(id: &PackageId, version: &Version, icon_type: &str) -> Result<Self, Error> {
Ok(StaticFiles {
license: format!("/registry/packages/{}/{}/LICENSE.md", id, version).parse()?,
instructions: format!("/registry/packages/{}/{}/INSTRUCTIONS.md", id, version)
.parse()?,
icon: format!("/registry/packages/{}/{}/icon.{}", id, version, icon_type).parse()?,
})
}
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(tag = "state")]
#[serde(rename_all = "kebab-case")]
pub enum PackageDataEntry {
#[serde(rename_all = "kebab-case")]
Installing {
static_files: StaticFiles,
temp_manifest: Manifest,
install_progress: Arc<InstallProgress>,
}, // { state: "installing", 'install-progress': InstallProgress }
#[serde(rename_all = "kebab-case")]
Updating {
static_files: StaticFiles,
temp_manifest: Manifest,
installed: InstalledPackageDataEntry,
install_progress: Arc<InstallProgress>,
},
#[serde(rename_all = "kebab-case")]
Removing {
static_files: StaticFiles,
temp_manifest: Manifest,
},
#[serde(rename_all = "kebab-case")]
Installed {
static_files: StaticFiles,
installed: InstalledPackageDataEntry,
},
}
impl PackageDataEntryModel {
pub fn installed(self) -> OptionModel<InstalledPackageDataEntry> {
self.0.child("installed").into()
}
pub fn install_progress(self) -> OptionModel<InstallProgress> {
self.0.child("install-progress").into()
}
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct InstalledPackageDataEntry {
#[model]
pub manifest: Manifest,
#[model]
pub status: Status,
pub system_pointers: Vec<SystemPointerSpec>,
#[model]
pub current_dependents: IndexMap<PackageId, CurrentDependencyInfo>,
#[model]
pub current_dependencies: IndexMap<PackageId, CurrentDependencyInfo>,
#[model]
pub interface_info: InterfaceInfo,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct CurrentDependencyInfo {
pub pointers: Vec<PackagePointerSpecVariant>,
pub health_checks: IndexSet<HealthCheckId>,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct InterfaceInfo {
pub ip: Ipv4Addr,
#[model]
pub addresses: InterfaceAddressMap,
}
#[derive(Debug, Deserialize, Serialize)]
pub struct InterfaceAddressMap(pub IndexMap<InterfaceId, InterfaceAddresses>);
impl Map for InterfaceAddressMap {
type Key = InterfaceId;
type Value = InterfaceAddresses;
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
self.0.get(key)
}
}
impl HasModel for InterfaceAddressMap {
type Model = MapModel<Self>;
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct InterfaceAddresses {
pub tor_address: Option<String>,
pub lan_address: Option<String>,
}

10
appmgr/src/db/util.rs Normal file
View File

@@ -0,0 +1,10 @@
use std::sync::Arc;
use patch_db::Revision;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct WithRevision<T> {
pub response: T,
pub revision: Arc<Revision>,
}

View File

@@ -1,260 +1,245 @@
use std::borrow::Cow;
use std::path::Path;
use std::collections::HashMap;
use emver::{Version, VersionRange};
use linear_map::LinearMap;
use rand::SeedableRng;
use anyhow::anyhow;
use emver::VersionRange;
use indexmap::{IndexMap, IndexSet};
use patch_db::{DbHandle, DiffPatch, HasModel, Map, MapModel};
use serde::{Deserialize, Serialize};
use crate::config::{Config, ConfigRuleEntryWithSuggestions, ConfigSpec};
use crate::manifest::ManifestLatest;
use crate::Error;
use crate::ResultExt as _;
use crate::action::ActionImplementation;
use crate::config::{Config, ConfigSpec};
use crate::id::InterfaceId;
use crate::net::host::Hosts;
use crate::s9pk::manifest::PackageId;
use crate::status::health_check::{HealthCheckId, HealthCheckResult, HealthCheckResultVariant};
use crate::status::{DependencyErrors, MainStatus, Status};
use crate::util::Version;
use crate::{Error, ResultExt as _};
#[derive(Clone, Debug, Fail, serde::Serialize)]
#[derive(Clone, Debug, thiserror::Error, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
#[serde(tag = "type")]
pub enum DependencyError {
NotInstalled, // "not-installed"
NotRunning, // "not-running"
NotInstalled, // { "type": "not-installed" }
IncorrectVersion {
expected: VersionRange,
received: Version,
}, // { "incorrect-version": { "expected": "0.1.0", "received": "^0.2.0" } }
ConfigUnsatisfied(Vec<String>), // { "config-unsatisfied": ["Bitcoin Core must have pruning set to manual."] }
PointerUpdateError(String), // { "pointer-update-error": "Bitcoin Core RPC Port must not be 18332" }
Other(String), // { "other": "Well fuck." }
}, // { "type": "incorrect-version", "expected": "0.1.0", "received": "^0.2.0" }
ConfigUnsatisfied {
error: String,
}, // { "type": "config-unsatisfied", "error": "Bitcoin Core must have pruning set to manual." }
NotRunning, // { "type": "not-running" }
HealthChecksFailed {
failures: IndexMap<HealthCheckId, HealthCheckResult>,
}, // { "type": "health-checks-failed", "checks": { "rpc": { "time": "2021-05-11T18:21:29Z", "result": "warming-up" } } }
}
impl DependencyError {
pub fn merge_with(self, other: DependencyError) -> DependencyError {
use DependencyError::*;
match (self, other) {
(NotInstalled, _) => NotInstalled,
(_, NotInstalled) => NotInstalled,
(IncorrectVersion { expected, received }, _) => IncorrectVersion { expected, received },
(_, IncorrectVersion { expected, received }) => IncorrectVersion { expected, received },
(ConfigUnsatisfied { error: e0 }, ConfigUnsatisfied { error: e1 }) => {
ConfigUnsatisfied {
error: e0 + "\n" + &e1,
}
}
(ConfigUnsatisfied { error }, _) => ConfigUnsatisfied { error },
(_, ConfigUnsatisfied { error }) => ConfigUnsatisfied { error },
(NotRunning, _) => NotRunning,
(_, NotRunning) => NotRunning,
(HealthChecksFailed { failures: f0 }, HealthChecksFailed { failures: f1 }) => {
HealthChecksFailed {
failures: f0.into_iter().chain(f1.into_iter()).collect(),
}
}
}
}
}
impl std::fmt::Display for DependencyError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use DependencyError::*;
match self {
NotInstalled => write!(f, "Not Installed"),
NotRunning => write!(f, "Not Running"),
IncorrectVersion { expected, received } => write!(
f,
"Incorrect Version: Expected {}, Received {}",
expected, received
expected,
received.as_str()
),
ConfigUnsatisfied(rules) => {
write!(f, "Configuration Rule(s) Violated: {}", rules.join(", "))
ConfigUnsatisfied { error } => {
write!(f, "Configuration Requirements Not Satisfied: {}", error)
}
NotRunning => write!(f, "Not Running"),
HealthChecksFailed { failures } => {
write!(f, "Failed Health Check(s): ")?;
let mut comma = false;
for (check, res) in failures {
if !comma {
comma = true;
} else {
write!(f, ", ");
}
write!(f, "{} @ {} {}", check, res.time, res.result)?;
}
Ok(())
}
PointerUpdateError(e) => write!(f, "Pointer Update Caused {}", e),
Other(e) => write!(f, "System Error: {}", e),
}
}
}
#[derive(Clone, Debug, serde::Serialize)]
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct TaggedDependencyError {
pub dependency: String,
pub dependency: PackageId,
pub error: DependencyError,
}
impl std::fmt::Display for TaggedDependencyError {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}: {}", self.dependency, self.error)
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct BreakageRes {
pub patch: DiffPatch,
pub breakages: IndexMap<PackageId, TaggedDependencyError>,
}
#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)]
pub struct Dependencies(pub LinearMap<String, DepInfo>);
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Dependencies(pub IndexMap<PackageId, DepInfo>);
impl Map for Dependencies {
type Key = PackageId;
type Value = DepInfo;
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
self.0.get(key)
}
}
impl HasModel for Dependencies {
type Model = MapModel<Self>;
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct DepInfo {
pub version: VersionRange,
pub optional: Option<String>,
pub description: Option<String>,
pub critical: bool,
#[serde(default)]
pub mount_public: bool,
#[serde(default)]
pub mount_shared: bool,
#[serde(default)]
pub config: Vec<ConfigRuleEntryWithSuggestions>,
#[model]
pub config: Option<DependencyConfig>,
}
impl DepInfo {
pub async fn satisfied(
pub async fn satisfied<Db: DbHandle>(
&self,
dependency_id: &str,
db: &mut Db,
dependency_id: &PackageId,
dependency_config: Option<Config>, // fetch if none
dependent_id: &str,
dependent_id: &PackageId,
dependent_version: &Version,
dependent_config: &Config,
) -> Result<Result<(), DependencyError>, Error> {
let info = if let Some(info) = crate::apps::list_info().await?.remove(dependency_id) {
let dependency = crate::db::DatabaseModel::new()
.package_data()
.idx_model(dependency_id)
.and_then(|pde| pde.installed())
.get(db)
.await?;
let info = if let Some(info) = &*dependency {
info
} else {
return Ok(Err(DependencyError::NotInstalled));
};
if !&info.version.satisfies(&self.version) {
if !&info.manifest.version.satisfies(&self.version) {
return Ok(Err(DependencyError::IncorrectVersion {
expected: self.version.clone(),
received: info.version.clone(),
received: info.manifest.version.clone(),
}));
}
let hosts = crate::db::DatabaseModel::new()
.network()
.hosts()
.get(db)
.await?;
let dependency_config = if let Some(cfg) = dependency_config {
cfg
} else if let Some(cfg_info) = &info.manifest.config {
cfg_info
.get(
dependency_id,
&info.manifest.version,
&info.manifest.volumes,
&hosts,
)
.await?
.config
.unwrap_or_default()
} else {
let app_config = crate::apps::config(dependency_id).await?;
if let Some(cfg) = app_config.config {
cfg
} else {
app_config
.spec
.gen(&mut rand::rngs::StdRng::from_entropy(), &None)
.unwrap_or_default()
}
Config::default()
};
let mut errors = Vec::new();
let mut cfgs = LinearMap::with_capacity(2);
cfgs.insert(dependency_id, Cow::Borrowed(&dependency_config));
cfgs.insert(dependent_id, Cow::Borrowed(dependent_config));
for rule in self.config.iter() {
if !(rule.entry.rule.compiled)(&dependency_config, &cfgs) {
errors.push(rule.entry.description.clone());
if let Some(cfg_req) = &self.config {
if let Err(e) = cfg_req
.check(dependent_id, dependent_version, dependent_config)
.await
{
if e.kind == crate::ErrorKind::ConfigRulesViolation {
return Ok(Err(DependencyError::ConfigUnsatisfied {
error: format!("{}", e),
}));
} else {
return Err(e);
}
}
}
if !errors.is_empty() {
return Ok(Err(DependencyError::ConfigUnsatisfied(errors)));
}
if crate::apps::status(dependency_id, false).await?.status
!= crate::apps::DockerStatus::Running
{
return Ok(Err(DependencyError::NotRunning));
match &info.status.main {
MainStatus::BackingUp {
started: Some(_),
health,
}
| MainStatus::Running { health, .. } => {
let mut failures = IndexMap::with_capacity(health.len());
for (check, res) in health {
if !matches!(res.result, HealthCheckResultVariant::Success) {
failures.insert(check.clone(), res.clone());
}
}
if !failures.is_empty() {
return Ok(Err(DependencyError::HealthChecksFailed { failures }));
}
}
_ => return Ok(Err(DependencyError::NotRunning)),
}
Ok(Ok(()))
}
}
#[derive(Debug, serde::Serialize)]
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct AppDepInfo {
#[serde(flatten)]
pub info: DepInfo,
pub required: bool,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<DependencyError>,
pub struct DependencyConfig {
check: ActionImplementation,
auto_configure: ActionImplementation,
}
#[derive(Debug, Default, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct AppDependencies(pub LinearMap<String, AppDepInfo>);
pub async fn check_dependencies(
manifest: ManifestLatest,
dependent_config: &Config,
dependent_config_spec: &ConfigSpec,
) -> Result<AppDependencies, Error> {
let mut deps = AppDependencies::default();
for (dependency_id, dependency_info) in manifest.dependencies.0.into_iter() {
let required = dependency_info.optional.is_none()
|| dependent_config_spec.requires(&dependency_id, dependent_config);
let error = dependency_info
.satisfied(&dependency_id, None, &manifest.id, dependent_config)
impl DependencyConfig {
pub async fn check(
&self,
dependent_id: &PackageId,
dependent_version: &Version,
dependency_config: &Config,
) -> Result<Result<(), String>, Error> {
Ok(self
.check
.sandboxed(dependent_id, dependent_version, Some(dependency_config))
.await?
.err();
let app_dep_info = AppDepInfo {
error,
required,
info: dependency_info,
};
deps.0.insert(dependency_id, app_dep_info);
.map_err(|(_, e)| e))
}
Ok(deps)
}
pub async fn auto_configure(
dependent: &str,
dependency: &str,
dry_run: bool,
) -> Result<crate::config::ConfigurationRes, Error> {
let (dependent_config, mut dependency_config, manifest) = futures::try_join!(
crate::apps::config_or_default(dependent),
crate::apps::config_or_default(dependency),
crate::apps::manifest(dependent)
)?;
let mut cfgs = LinearMap::new();
cfgs.insert(dependent, Cow::Borrowed(&dependent_config));
cfgs.insert(dependency, Cow::Owned(dependency_config.clone()));
let dep_info = manifest
.dependencies
.0
.get(dependency)
.ok_or_else(|| failure::format_err!("{} Does Not Depend On {}", dependent, dependency))
.no_code()?;
for rule in &dep_info.config {
if let Err(e) = rule.apply(dependency, &mut dependency_config, &mut cfgs) {
log::warn!("Rule Unsatisfied After Applying Suggestions: {}", e);
}
pub async fn auto_configure(
&self,
dependent_id: &PackageId,
dependent_version: &Version,
old: &Config,
) -> Result<Config, Error> {
self.auto_configure
.sandboxed(dependent_id, dependent_version, Some(old))
.await?
.map_err(|e| Error::new(anyhow!("{}", e.1), crate::ErrorKind::AutoConfigure))
}
crate::config::configure(dependency, Some(dependency_config), None, dry_run).await
}
pub async fn update_binds(dependent_id: &str) -> Result<(), Error> {
let dependent_manifest = crate::apps::manifest(dependent_id).await?;
let dependency_manifests = futures::future::try_join_all(
dependent_manifest
.dependencies
.0
.into_iter()
.filter(|(_, info)| info.mount_public || info.mount_shared)
.map(|(id, info)| async {
Ok::<_, Error>(if crate::apps::list_info().await?.contains_key(&id) {
let man = crate::apps::manifest(&id).await?;
Some((id, info, man))
} else {
None
})
}),
)
.await?;
// i just have a gut feeling this shouldn't be concurrent
for (dependency_id, info, dependency_manifest) in
dependency_manifests.into_iter().filter_map(|a| a)
{
match (dependency_manifest.public, info.mount_public) {
(Some(public), true) => {
let public_path = Path::new(crate::VOLUMES).join(&dependency_id).join(public);
if let Ok(metadata) = tokio::fs::metadata(&public_path).await {
if metadata.is_dir() {
crate::disks::bind(
public_path,
Path::new(crate::VOLUMES)
.join(&dependent_id)
.join("start9")
.join("public")
.join(&dependency_id),
true,
)
.await?
}
}
}
_ => (),
}
match (dependency_manifest.shared, info.mount_shared) {
(Some(shared), true) => {
let shared_path = Path::new(crate::VOLUMES)
.join(&dependency_id)
.join(shared)
.join(dependent_id); // namespaced by dependent
tokio::fs::create_dir_all(&shared_path).await?;
if let Ok(metadata) = tokio::fs::metadata(&shared_path).await {
if metadata.is_dir() {
crate::disks::bind(
shared_path,
Path::new(crate::VOLUMES)
.join(&dependent_id)
.join("start9")
.join("shared")
.join(&dependency_id),
false,
)
.await?
}
}
}
_ => (),
}
}
Ok(())
}

View File

@@ -1,241 +0,0 @@
use std::path::Path;
use failure::ResultExt as _;
use futures::future::try_join_all;
use crate::util::Invoke;
use crate::Error;
use crate::ResultExt as _;
pub const FSTAB: &'static str = "/etc/fstab";
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct DiskInfo {
pub logicalname: String,
pub size: String,
pub description: Option<String>,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct PartitionInfo {
pub logicalname: String,
pub is_mounted: bool,
pub size: Option<String>,
pub label: Option<String>,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Disk {
#[serde(flatten)]
pub info: DiskInfo,
pub partitions: Vec<PartitionInfo>,
}
pub async fn list() -> Result<Vec<Disk>, Error> {
let output = tokio::process::Command::new("parted")
.arg("-lm")
.invoke("GNU Parted")
.await?;
let output_str = std::str::from_utf8(&output).no_code()?;
let disks = output_str.split("\n\n").filter_map(|s| -> Option<Disk> {
let mut lines = s.split("\n");
let has_size = lines.next()? == "BYT;";
let disk_info_line = lines.next()?;
let mut disk_info_iter = disk_info_line.split(":");
let logicalname = disk_info_iter.next()?.to_owned();
let partition_prefix = if logicalname.ends_with(|c: char| c.is_digit(10)) {
logicalname.clone() + "p"
} else {
logicalname.clone()
};
let size = disk_info_iter.next()?.to_owned();
disk_info_iter.next()?; // transport-type
disk_info_iter.next()?; // logical-sector-size
disk_info_iter.next()?; // physical-sector-size
disk_info_iter.next()?; // partition-table-type
let description = disk_info_iter.next()?;
let description = if description.is_empty() {
None
} else {
Some(description.to_owned())
};
let info = DiskInfo {
logicalname,
size,
description,
};
let partitions = lines
.filter_map(|partition_info_line| -> Option<PartitionInfo> {
let mut partition_info_iter = partition_info_line.split(":");
let partition_idx = partition_info_iter.next()?;
let logicalname = partition_prefix.clone() + partition_idx;
let size = if has_size {
partition_info_iter.next()?; // begin
partition_info_iter.next()?; // end
Some(partition_info_iter.next()?.to_owned())
} else {
None
};
Some(PartitionInfo {
logicalname,
is_mounted: false,
size,
label: None,
})
})
.collect();
Some(Disk { info, partitions })
});
try_join_all(disks.map(|disk| async move {
Ok(Disk {
info: disk.info,
partitions: try_join_all(disk.partitions.into_iter().map(|mut partition| async move {
let mut blkid_command = tokio::process::Command::new("blkid");
let (blkid_res, findmnt_status) = futures::join!(
blkid_command
.arg(&partition.logicalname)
.arg("-s")
.arg("LABEL")
.arg("-o")
.arg("value")
.invoke("BLKID"),
tokio::process::Command::new("findmnt")
.arg(&partition.logicalname)
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
);
let blkid_output = blkid_res?;
let label = std::str::from_utf8(&blkid_output).no_code()?.trim();
if !label.is_empty() {
partition.label = Some(label.to_owned());
}
if findmnt_status?.success() {
partition.is_mounted = true;
}
Ok::<_, Error>(partition)
}))
.await?,
})
}))
.await
}
pub async fn mount<P: AsRef<Path>>(logicalname: &str, mount_point: P) -> Result<(), Error> {
let is_mountpoint = tokio::process::Command::new("mountpoint")
.arg(mount_point.as_ref())
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.await?;
if is_mountpoint.success() {
unmount(mount_point.as_ref()).await?;
}
tokio::fs::create_dir_all(&mount_point).await?;
let mount_output = tokio::process::Command::new("mount")
.arg(logicalname)
.arg(mount_point.as_ref())
.output()
.await?;
crate::ensure_code!(
mount_output.status.success(),
crate::error::FILESYSTEM_ERROR,
"Error Mounting Drive: {}",
std::str::from_utf8(&mount_output.stderr).unwrap_or("Unknown Error")
);
Ok(())
}
pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
src: P0,
dst: P1,
read_only: bool,
) -> Result<(), Error> {
log::info!(
"Binding {} to {}",
src.as_ref().display(),
dst.as_ref().display()
);
let is_mountpoint = tokio::process::Command::new("mountpoint")
.arg(dst.as_ref())
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.await?;
if is_mountpoint.success() {
unmount(dst.as_ref()).await?;
}
tokio::fs::create_dir_all(&dst).await?;
let mut mount_cmd = tokio::process::Command::new("mount");
mount_cmd.arg("--bind");
if read_only {
mount_cmd.arg("-o").arg("ro");
}
let mount_output = mount_cmd
.arg(src.as_ref())
.arg(dst.as_ref())
.output()
.await?;
crate::ensure_code!(
mount_output.status.success(),
crate::error::FILESYSTEM_ERROR,
"Error Binding {} to {}: {}",
src.as_ref().display(),
dst.as_ref().display(),
std::str::from_utf8(&mount_output.stderr).unwrap_or("Unknown Error")
);
Ok(())
}
pub async fn unmount<P: AsRef<Path>>(mount_point: P) -> Result<(), Error> {
log::info!("Unmounting {}.", mount_point.as_ref().display());
let umount_output = tokio::process::Command::new("umount")
.arg(mount_point.as_ref())
.output()
.await?;
crate::ensure_code!(
umount_output.status.success(),
crate::error::FILESYSTEM_ERROR,
"Error Unmounting Drive: {}: {}",
mount_point.as_ref().display(),
std::str::from_utf8(&umount_output.stderr).unwrap_or("Unknown Error")
);
tokio::fs::remove_dir_all(mount_point.as_ref())
.await
.with_context(|e| format!("rm {}: {}", mount_point.as_ref().display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
Ok(())
}
#[must_use]
pub struct MountGuard<P: AsRef<Path>> {
path: Option<P>,
}
impl<P: AsRef<Path>> MountGuard<P> {
pub async fn new(logicalname: &str, mount_point: P) -> Result<Self, Error> {
mount(logicalname, mount_point.as_ref()).await?;
Ok(Self {
path: Some(mount_point),
})
}
pub async fn unmount(mut self) -> Result<(), Error> {
if let Some(ref path) = self.path {
unmount(path).await?;
self.path = None;
}
Ok(())
}
}
impl<P: AsRef<Path>> Drop for MountGuard<P> {
fn drop(&mut self) {
if let Some(ref path) = self.path {
tokio::runtime::Runtime::new()
.unwrap()
.block_on(unmount(path))
.unwrap()
}
}
}

View File

@@ -1,107 +1,245 @@
use std::fmt::Display;
pub const GENERAL_ERROR: i32 = 1;
pub const FILESYSTEM_ERROR: i32 = 2;
pub const DOCKER_ERROR: i32 = 3;
pub const CFG_SPEC_VIOLATION: i32 = 4;
pub const CFG_RULES_VIOLATION: i32 = 5;
pub const NOT_FOUND: i32 = 6;
pub const INVALID_BACKUP_PASSWORD: i32 = 7;
pub const VERSION_INCOMPATIBLE: i32 = 8;
pub const NETWORK_ERROR: i32 = 9;
pub const REGISTRY_ERROR: i32 = 10;
pub const SERDE_ERROR: i32 = 11;
use anyhow::anyhow;
use patch_db::Revision;
use rpc_toolkit::yajrc::RpcError;
#[derive(Debug, Fail)]
#[fail(display = "{}", _0)]
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ErrorKind {
Unknown = 1,
Filesystem = 2,
Docker = 3,
ConfigSpecViolation = 4,
ConfigRulesViolation = 5,
NotFound = 6,
InvalidPassword = 7,
VersionIncompatible = 8,
Network = 9,
Registry = 10,
Serialization = 11,
Deserialization = 12,
Utf8 = 13,
ParseVersion = 14,
Duplicity = 15,
Nginx = 16,
Dependency = 17,
ParseS9pk = 18,
ParseUrl = 19,
GParted = 20,
Blkid = 21,
InvalidOnionAddress = 22,
Pack = 23,
ValidateS9pk = 24,
OpenSSL = 25,
Tor = 26,
ConfigGen = 27,
ParseNumber = 28,
Database = 29,
InvalidPackageId = 30,
InvalidSignature = 31,
Backup = 32,
Restore = 33,
Authorization = 34,
AutoConfigure = 35,
Action = 36,
RateLimited = 37,
InvalidRequest = 38,
MigrationFailed = 39,
}
impl ErrorKind {
pub fn as_str(&self) -> &'static str {
use ErrorKind::*;
match self {
Unknown => "Unknown Error",
Filesystem => "Filesystem I/O Error",
Docker => "Docker Error",
ConfigSpecViolation => "Config Spec Violation",
ConfigRulesViolation => "Config Rules Violation",
NotFound => "Not Found",
InvalidPassword => "Invalid Password",
VersionIncompatible => "Version Incompatible",
Network => "Network Error",
Registry => "Registry Error",
Serialization => "Serialization Error",
Deserialization => "Deserialization Error",
Utf8 => "UTF-8 Parse Error",
ParseVersion => "Version Parsing Error",
Duplicity => "Duplicity Error",
Nginx => "Nginx Error",
Dependency => "Dependency Error",
ParseS9pk => "S9PK Parsing Error",
ParseUrl => "URL Parsing Error",
GParted => "GNU Parted Error",
Blkid => "BLKID Error",
InvalidOnionAddress => "Invalid Onion Address",
Pack => "Pack Error",
ValidateS9pk => "S9PK Validation Error",
OpenSSL => "OpenSSL Error",
Tor => "Tor Daemon Error",
ConfigGen => "Config Generation Error",
ParseNumber => "Number Parsing Error",
Database => "Database Error",
InvalidPackageId => "Invalid Package ID",
InvalidSignature => "Invalid Signature",
Backup => "Backup Error",
Restore => "Restore Error",
Authorization => "Unauthorized",
AutoConfigure => "Auto-Configure Error",
Action => "Action Failed",
RateLimited => "Rate Limited",
InvalidRequest => "Invalid Request",
MigrationFailed => "Migration Failed",
}
}
}
impl Display for ErrorKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.as_str())
}
}
#[derive(Debug)]
pub struct Error {
pub failure: failure::Error,
pub code: Option<i32>,
pub source: anyhow::Error,
pub kind: ErrorKind,
pub revision: Option<Revision>,
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}: {}", self.kind.as_str(), self.source)
}
}
impl Error {
pub fn new<E: Into<failure::Error>>(e: E, code: Option<i32>) -> Self {
pub fn new<E: Into<anyhow::Error>>(source: E, kind: ErrorKind) -> Self {
Error {
failure: e.into(),
code,
}
}
pub fn from<E: Into<failure::Error>>(e: E) -> Self {
Error {
failure: e.into(),
code: None,
}
}
}
impl From<failure::Error> for Error {
fn from(e: failure::Error) -> Self {
Error {
failure: e,
code: None,
source: source.into(),
kind,
revision: None,
}
}
}
impl From<std::io::Error> for Error {
fn from(e: std::io::Error) -> Self {
Error {
failure: e.into(),
code: Some(2),
Error::new(e, ErrorKind::Filesystem)
}
}
impl From<std::str::Utf8Error> for Error {
fn from(e: std::str::Utf8Error) -> Self {
Error::new(e, ErrorKind::Utf8)
}
}
impl From<std::string::FromUtf8Error> for Error {
fn from(e: std::string::FromUtf8Error) -> Self {
Error::new(e, ErrorKind::Utf8)
}
}
impl From<emver::ParseError> for Error {
fn from(e: emver::ParseError) -> Self {
Error::new(e, ErrorKind::ParseVersion)
}
}
impl From<rpc_toolkit::url::ParseError> for Error {
fn from(e: rpc_toolkit::url::ParseError) -> Self {
Error::new(e, ErrorKind::ParseUrl)
}
}
impl From<std::num::ParseIntError> for Error {
fn from(e: std::num::ParseIntError) -> Self {
Error::new(e, ErrorKind::ParseNumber)
}
}
impl From<std::num::ParseFloatError> for Error {
fn from(e: std::num::ParseFloatError) -> Self {
Error::new(e, ErrorKind::ParseNumber)
}
}
impl From<patch_db::Error> for Error {
fn from(e: patch_db::Error) -> Self {
Error::new(e, ErrorKind::Database)
}
}
impl From<sqlx::Error> for Error {
fn from(e: sqlx::Error) -> Self {
Error::new(e, ErrorKind::Database)
}
}
impl From<ed25519_dalek::SignatureError> for Error {
fn from(e: ed25519_dalek::SignatureError) -> Self {
Error::new(e, ErrorKind::InvalidSignature)
}
}
impl From<bollard::errors::Error> for Error {
fn from(e: bollard::errors::Error) -> Self {
Error::new(e, ErrorKind::Docker)
}
}
impl From<Error> for RpcError {
fn from(e: Error) -> Self {
let mut data_object = serde_json::Map::with_capacity(2);
data_object.insert("message".to_owned(), format!("{}", e).into());
data_object.insert(
"revision".to_owned(),
match serde_json::to_value(&e.revision) {
Ok(a) => a,
Err(e) => {
log::warn!("Error serializing revision for Error object: {}", e);
serde_json::Value::Null
}
},
);
RpcError {
code: e.kind as i32,
message: e.kind.as_str().into(),
data: Some(data_object.into()),
}
}
}
pub trait ResultExt<T, E>
where
Self: Sized,
{
fn with_code(self, code: i32) -> Result<T, Error>;
fn with_ctx<F: FnOnce(&E) -> (Option<i32>, D), D: Display + Send + Sync + 'static>(
fn with_kind(self, kind: ErrorKind) -> Result<T, Error>;
fn with_ctx<F: FnOnce(&E) -> (ErrorKind, D), D: Display + Send + Sync + 'static>(
self,
f: F,
) -> Result<T, Error>;
fn no_code(self) -> Result<T, Error>;
}
impl<T, E> ResultExt<T, E> for Result<T, E>
where
failure::Error: From<E>,
anyhow::Error: From<E>,
{
fn with_code(self, code: i32) -> Result<T, Error> {
#[cfg(not(feature = "production"))]
assert!(code != 0);
fn with_kind(self, kind: ErrorKind) -> Result<T, Error> {
self.map_err(|e| Error {
failure: e.into(),
code: Some(code),
source: e.into(),
kind,
revision: None,
})
}
fn with_ctx<F: FnOnce(&E) -> (Option<i32>, D), D: Display + Send + Sync + 'static>(
fn with_ctx<F: FnOnce(&E) -> (ErrorKind, D), D: Display + Send + Sync + 'static>(
self,
f: F,
) -> Result<T, Error> {
self.map_err(|e| {
let (code, ctx) = f(&e);
let failure = failure::Error::from(e).context(ctx);
let (kind, ctx) = f(&e);
let source = anyhow::Error::from(e);
let ctx = format!("{}: {}", ctx, source);
let source = source.context(ctx);
Error {
code,
failure: failure.into(),
kind,
source: source.into(),
revision: None,
}
})
}
fn no_code(self) -> Result<T, Error> {
self.map_err(|e| Error {
failure: e.into(),
code: None,
})
}
}
#[macro_export]
macro_rules! ensure_code {
($x:expr, $c:expr, $fmt:expr $(, $arg:expr)*) => {
if !($x) {
return Err(crate::Error {
failure: format_err!($fmt, $($arg, )*),
code: Some($c),
});
return Err(crate::Error::new(anyhow!($fmt, $($arg, )*), $c));
}
};
}

192
appmgr/src/id.rs Normal file
View File

@@ -0,0 +1,192 @@
use std::borrow::{Borrow, Cow};
use std::fmt::Debug;
use std::path::Path;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::util::Version;
use crate::Error;
pub const SYSTEM_ID: Id<&'static str> = Id("SYSTEM");
#[derive(Debug, thiserror::Error)]
#[error("Invalid ID")]
pub struct InvalidId;
impl From<InvalidId> for Error {
fn from(err: InvalidId) -> Self {
Error::new(err, crate::error::ErrorKind::InvalidPackageId)
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct IdUnchecked<S: AsRef<str>>(pub S);
impl<'de> Deserialize<'de> for IdUnchecked<Cow<'de, str>> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = IdUnchecked<Cow<'de, str>>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "a valid ID")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(IdUnchecked(Cow::Owned(v.to_owned())))
}
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(IdUnchecked(Cow::Owned(v)))
}
fn visit_borrowed_str<E>(self, v: &'de str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(IdUnchecked(Cow::Borrowed(v)))
}
}
deserializer.deserialize_any(Visitor)
}
}
impl<'de> Deserialize<'de> for IdUnchecked<String> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(IdUnchecked(String::deserialize(deserializer)?))
}
}
impl<'de> Deserialize<'de> for IdUnchecked<&'de str> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(IdUnchecked(<&'de str>::deserialize(deserializer)?))
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct Id<S: AsRef<str> = String>(S);
impl<S: AsRef<str>> Id<S> {
pub fn try_from(value: S) -> Result<Self, InvalidId> {
if value
.as_ref()
.chars()
.all(|c| c.is_ascii_lowercase() || c == '-')
{
Ok(Id(value))
} else {
Err(InvalidId)
}
}
}
impl<'a> Id<&'a str> {
pub fn owned(&self) -> Id {
Id(self.0.to_owned())
}
}
impl<S: AsRef<str>> std::fmt::Display for Id<S> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0.as_ref())
}
}
impl<S: AsRef<str>> AsRef<str> for Id<S> {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl<S: AsRef<str>> Borrow<str> for Id<S> {
fn borrow(&self) -> &str {
self.0.as_ref()
}
}
impl<'de, S> Deserialize<'de> for Id<S>
where
S: AsRef<str>,
IdUnchecked<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let unchecked: IdUnchecked<S> = Deserialize::deserialize(deserializer)?;
Id::try_from(unchecked.0).map_err(serde::de::Error::custom)
}
}
impl<S: AsRef<str>> Serialize for Id<S> {
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
where
Ser: Serializer,
{
serializer.serialize_str(self.as_ref())
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize)]
pub struct ImageId<S: AsRef<str> = String>(Id<S>);
impl<S: AsRef<str>> std::fmt::Display for ImageId<S> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0)
}
}
impl<S: AsRef<str>> ImageId<S> {
pub fn for_package<PkgId: AsRef<crate::s9pk::manifest::PackageId<S0>>, S0: AsRef<str>>(
&self,
pkg_id: PkgId,
pkg_version: Option<&Version>,
) -> String {
format!(
"start9/{}/{}:{}",
pkg_id.as_ref(),
self.0,
pkg_version.map(|v| { v.as_str() }).unwrap_or("latest")
)
}
}
impl<'de, S> Deserialize<'de> for ImageId<S>
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(ImageId(Deserialize::deserialize(deserializer)?))
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize)]
pub struct InterfaceId<S: AsRef<str> = String>(Id<S>);
impl<S: AsRef<str>> std::fmt::Display for InterfaceId<S> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0)
}
}
impl<S: AsRef<str>> AsRef<str> for InterfaceId<S> {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl<'de, S> Deserialize<'de> for InterfaceId<S>
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(InterfaceId(Deserialize::deserialize(deserializer)?))
}
}
impl<S: AsRef<str>> AsRef<Path> for InterfaceId<S> {
fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref()
}
}

View File

@@ -1,133 +0,0 @@
use std::cmp::Ord;
use std::ffi::OsStr;
use std::iter::FromIterator;
use std::path::Path;
use emver::{Version, VersionRange};
use futures::future::{BoxFuture, FutureExt};
use linear_map::LinearMap;
use crate::inspect::info_full;
use crate::manifest::{Description, ManifestLatest};
use crate::{Error, ResultExt};
#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)]
pub struct AppIndex(pub LinearMap<String, IndexInfo>);
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct IndexInfo {
pub title: String,
pub description: Description,
pub version_info: Vec<VersionInfo>,
pub icon_type: String,
}
#[derive(Clone, Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct VersionInfo {
pub version: Version,
pub release_notes: String,
pub os_version_required: VersionRange,
pub os_version_recommended: VersionRange,
pub install_alert: Option<String>,
}
const NULL_VERSION: Version = Version::new(0, 0, 0, 0);
impl AppIndex {
fn add(&mut self, manifest: ManifestLatest) {
if let Some(ref mut entry) = self.0.get_mut(&manifest.id) {
if entry
.version_info
.get(0)
.map(|i| &i.version)
.unwrap_or(&NULL_VERSION)
<= &manifest.version
{
entry.title = manifest.title;
entry.description = manifest.description;
}
entry.version_info.push(VersionInfo {
version: manifest.version,
release_notes: manifest.release_notes,
os_version_required: manifest.os_version_required,
os_version_recommended: manifest.os_version_recommended,
install_alert: manifest.install_alert,
});
entry
.version_info
.sort_unstable_by(|a, b| b.version.cmp(&a.version));
entry.version_info.dedup_by(|a, b| a.version == b.version);
} else {
self.0.insert(
manifest.id,
IndexInfo {
title: manifest.title,
description: manifest.description,
version_info: vec![VersionInfo {
version: manifest.version,
release_notes: manifest.release_notes,
os_version_required: manifest.os_version_required,
os_version_recommended: manifest.os_version_recommended,
install_alert: manifest.install_alert,
}],
icon_type: "png".to_owned(), // TODO
},
);
}
}
}
impl Extend<ManifestLatest> for AppIndex {
fn extend<I: IntoIterator<Item = ManifestLatest>>(&mut self, iter: I) {
for manifest in iter {
self.add(manifest);
}
}
}
impl FromIterator<ManifestLatest> for AppIndex {
fn from_iter<I: IntoIterator<Item = ManifestLatest>>(iter: I) -> Self {
let mut res = Self::default();
res.extend(iter);
res
}
}
pub async fn index<P: AsRef<Path>>(dir: P) -> Result<AppIndex, Error> {
let dir_path = dir.as_ref();
let mut idx = AppIndex::default();
fn index_rec<'a, P: AsRef<Path> + Send + Sync + 'a>(
idx: &'a mut AppIndex,
dir: P,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
let dir_path = dir.as_ref();
if let Ok(_) = tokio::fs::metadata(dir_path.join(".ignore")).await {
log::info!("Skipping {}", dir_path.display());
return Ok(());
}
let mut entry_stream = tokio::fs::read_dir(dir_path).await?;
while let Some(entry) = entry_stream.next_entry().await? {
let path = entry.path();
let metadata = entry.metadata().await?;
if metadata.is_file() {
let ext = path.extension();
if ext == Some(OsStr::new("s9pk")) {
let info = info_full(&path, true, false)
.await
.with_ctx(|e| (e.code.clone(), format!("{}: {}", path.display(), e)))?;
idx.add(info.manifest.unwrap());
}
} else if metadata.is_dir() {
index_rec(idx, &path).await?;
}
}
Ok(())
}
.boxed()
}
index_rec(&mut idx, dir_path).await?;
Ok(idx)
}

View File

@@ -1,195 +0,0 @@
use std::path::Path;
use failure::ResultExt as _;
use futures::stream::StreamExt;
use tokio_tar as tar;
use crate::config::{ConfigRuleEntry, ConfigSpec};
use crate::manifest::{Manifest, ManifestLatest};
use crate::util::from_cbor_async_reader;
use crate::version::VersionT;
use crate::Error;
use crate::ResultExt as _;
#[derive(Debug, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct AppInfoFull {
#[serde(flatten)]
pub info: AppInfo,
#[serde(skip_serializing_if = "Option::is_none")]
pub manifest: Option<ManifestLatest>,
#[serde(skip_serializing_if = "Option::is_none")]
pub config: Option<AppConfig>,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct AppInfo {
pub title: String,
pub version: emver::Version,
}
#[derive(Debug, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct AppConfig {
pub spec: ConfigSpec,
pub rules: Vec<ConfigRuleEntry>,
}
pub async fn info_full<P: AsRef<Path>>(
path: P,
with_manifest: bool,
with_config: bool,
) -> Result<AppInfoFull, Error> {
let p = path.as_ref();
log::info!("Opening file.");
let r = tokio::fs::File::open(p)
.await
.with_context(|e| format!("{}: {}", p.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
log::info!("Extracting archive.");
let mut pkg = tar::Archive::new(r);
let mut entries = pkg.entries()?;
log::info!("Opening manifest from archive.");
let manifest = entries
.next()
.await
.ok_or(crate::install::Error::CorruptedPkgFile("missing manifest"))
.no_code()??;
crate::ensure_code!(
manifest.path()?.to_str() == Some("manifest.cbor"),
crate::error::GENERAL_ERROR,
"Package File Invalid or Corrupted"
);
log::trace!("Deserializing manifest.");
let manifest: Manifest = from_cbor_async_reader(manifest).await?;
let manifest = manifest.into_latest();
crate::ensure_code!(
crate::version::Current::new()
.semver()
.satisfies(&manifest.os_version_required),
crate::error::VERSION_INCOMPATIBLE,
"AppMgr Version Not Compatible: needs {}",
manifest.os_version_required
);
Ok(AppInfoFull {
info: AppInfo {
title: manifest.title.clone(),
version: manifest.version.clone(),
},
manifest: if with_manifest { Some(manifest) } else { None },
config: if with_config {
log::info!("Opening config spec from archive.");
let spec = entries
.next()
.await
.ok_or(crate::install::Error::CorruptedPkgFile(
"missing config spec",
))
.no_code()??;
crate::ensure_code!(
spec.path()?.to_str() == Some("config_spec.cbor"),
crate::error::GENERAL_ERROR,
"Package File Invalid or Corrupted"
);
log::trace!("Deserializing config spec.");
let spec = from_cbor_async_reader(spec).await?;
log::info!("Opening config rules from archive.");
let rules = entries
.next()
.await
.ok_or(crate::install::Error::CorruptedPkgFile(
"missing config rules",
))
.no_code()??;
crate::ensure_code!(
rules.path()?.to_str() == Some("config_rules.cbor"),
crate::error::GENERAL_ERROR,
"Package File Invalid or Corrupted"
);
log::trace!("Deserializing config rules.");
let rules = from_cbor_async_reader(rules).await?;
Some(AppConfig { spec, rules })
} else {
None
},
})
}
pub async fn print_instructions<P: AsRef<Path>>(path: P) -> Result<(), Error> {
let p = path.as_ref();
log::info!("Opening file.");
let r = tokio::fs::File::open(p)
.await
.with_context(|e| format!("{}: {}", p.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
log::info!("Extracting archive.");
let mut pkg = tar::Archive::new(r);
let mut entries = pkg.entries()?;
log::info!("Opening manifest from archive.");
let manifest = entries
.next()
.await
.ok_or(crate::install::Error::CorruptedPkgFile("missing manifest"))
.no_code()??;
crate::ensure_code!(
manifest.path()?.to_str() == Some("manifest.cbor"),
crate::error::GENERAL_ERROR,
"Package File Invalid or Corrupted"
);
log::trace!("Deserializing manifest.");
let manifest: Manifest = from_cbor_async_reader(manifest).await?;
let manifest = manifest.into_latest();
crate::ensure_code!(
crate::version::Current::new()
.semver()
.satisfies(&manifest.os_version_required),
crate::error::VERSION_INCOMPATIBLE,
"AppMgr Version Not Compatible: needs {}",
manifest.os_version_required
);
entries
.next()
.await
.ok_or(crate::install::Error::CorruptedPkgFile(
"missing config spec",
))
.no_code()??;
entries
.next()
.await
.ok_or(crate::install::Error::CorruptedPkgFile(
"missing config rules",
))
.no_code()??;
if manifest.has_instructions {
use tokio::io::AsyncWriteExt;
let mut instructions = entries
.next()
.await
.ok_or(crate::install::Error::CorruptedPkgFile(
"missing instructions",
))
.no_code()??;
let mut stdout = tokio::io::stdout();
tokio::io::copy(&mut instructions, &mut stdout)
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
stdout
.flush()
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
stdout
.shutdown()
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
} else {
return Err(failure::format_err!("No instructions for {}", p.display()))
.with_code(crate::error::NOT_FOUND);
}
Ok(())
}

View File

@@ -1,579 +0,0 @@
use std::borrow::Cow;
use std::ffi::{OsStr, OsString};
use std::marker::Unpin;
use std::path::{Path, PathBuf};
use std::pin::Pin;
use std::sync::{
atomic::{self, AtomicBool, AtomicU64},
Arc,
};
use std::task::Context;
use std::task::Poll;
use std::time::Duration;
use failure::ResultExt as _;
use futures::stream::StreamExt;
use futures::stream::TryStreamExt;
use tokio::io::AsyncWriteExt;
use tokio::io::{AsyncRead, ReadBuf};
use tokio_compat_02::FutureExt;
use tokio_tar as tar;
use crate::config::{ConfigRuleEntry, ConfigSpec};
use crate::manifest::{ImageConfig, Manifest, ManifestV0};
use crate::util::{from_cbor_async_reader, to_yaml_async_writer, AsyncCompat, PersistencePath};
use crate::version::VersionT;
use crate::ResultExt as _;
#[derive(Fail, Debug, Clone)]
pub enum Error {
#[fail(display = "Package File Invalid or Corrupted: {}", _0)]
CorruptedPkgFile(&'static str),
#[fail(display = "Invalid File Name")]
InvalidFileName,
}
pub async fn install_name(name_version: &str, use_cache: bool) -> Result<(), crate::Error> {
let name = name_version.split("@").next().unwrap();
let tmp_path = Path::new(crate::TMP_DIR).join(format!("{}.s9pk", name));
if !use_cache || !tmp_path.exists() {
download_name(name_version).await?;
}
install_path(
&tmp_path
.as_os_str()
.to_str()
.ok_or(Error::InvalidFileName)
.with_code(crate::error::FILESYSTEM_ERROR)?,
Some(name),
)
.await?;
tokio::fs::remove_file(&tmp_path)
.await
.with_context(|e| format!("{}: {}", tmp_path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
Ok(())
}
struct CountingReader<R: AsyncRead>(pub R, pub Arc<AtomicU64>);
impl<R> AsyncRead for CountingReader<R>
where
R: AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf,
) -> Poll<std::io::Result<()>> {
let atomic = self.as_ref().1.clone(); // TODO: not efficient
match unsafe { self.map_unchecked_mut(|a| &mut a.0) }.poll_read(cx, buf) {
Poll::Ready(Ok(())) => {
atomic.fetch_add(buf.filled().len() as u64, atomic::Ordering::SeqCst);
Poll::Ready(Ok(()))
}
a => a,
}
}
}
pub async fn download_name(name_version: &str) -> Result<PathBuf, crate::Error> {
let mut split = name_version.split("@");
let name = split.next().unwrap();
let req: Option<emver::VersionRange> = split.next().map(|a| a.parse()).transpose().no_code()?;
if let Some(req) = req {
download(
&format!("{}/{}.s9pk?spec={}", &*crate::APP_REGISTRY_URL, name, req),
Some(name),
)
.await
} else {
download(
&format!("{}/{}.s9pk", &*crate::APP_REGISTRY_URL, name),
Some(name),
)
.await
}
}
pub async fn download(url: &str, name: Option<&str>) -> Result<PathBuf, crate::Error> {
let url = reqwest::Url::parse(url).no_code()?;
log::info!("Downloading {}.", url.as_str());
let response = reqwest::get(url)
.compat()
.await
.with_code(crate::error::NETWORK_ERROR)?
.error_for_status()
.with_code(crate::error::REGISTRY_ERROR)?;
tokio::fs::create_dir_all(crate::TMP_DIR).await?;
let tmp_file_path =
Path::new(crate::TMP_DIR).join(&format!("{}.s9pk", name.unwrap_or("download")));
let mut f = tokio::fs::File::create(&tmp_file_path).await?;
let len: Option<u64> = response.content_length().map(|a| {
log::info!("{}KiB to download.", a / 1024);
a
});
let done = Arc::new(AtomicBool::new(false));
let counter = Arc::new(AtomicU64::new(0));
let mut reader = CountingReader(
AsyncCompat(
response
.bytes_stream()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.into_async_read(),
),
counter.clone(),
);
let done_handle = done.clone();
let download_handle = tokio::spawn(async move {
let res = tokio::io::copy(&mut reader, &mut f).await;
done_handle.store(true, atomic::Ordering::SeqCst);
res
});
let poll_handle = tokio::spawn(async move {
loop {
let is_done = done.load(atomic::Ordering::SeqCst);
let downloaded_bytes = counter.load(atomic::Ordering::SeqCst);
if !*crate::QUIET.read().await {
if let Some(len) = len {
print!("\rDownloading... {}%", downloaded_bytes * 100 / len);
} else {
print!("\rDownloading... {}KiB", downloaded_bytes / 1024);
}
}
if is_done {
break;
}
tokio::time::sleep(Duration::from_millis(10)).await;
}
if !*crate::QUIET.read().await {
println!("\rDownloading... 100%");
}
});
download_handle.await.unwrap()?;
poll_handle.await.unwrap();
Ok(tmp_file_path)
}
pub async fn install_url(url: &str, name: Option<&str>) -> Result<(), crate::Error> {
let tmp_file_path = download(url, name).await?;
install_path(&tmp_file_path, name).await?;
tokio::fs::remove_file(&tmp_file_path)
.await
.with_context(|e| format!("{}: {}", tmp_file_path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
Ok(())
}
pub async fn install_path<P: AsRef<Path>>(p: P, name: Option<&str>) -> Result<(), crate::Error> {
let path = p.as_ref();
log::info!(
"Starting install of {}.",
path.file_name()
.and_then(|a| a.to_str())
.ok_or(Error::InvalidFileName)
.no_code()?
);
let file = tokio::fs::File::open(&path)
.await
.with_context(|e| format!("{}: {}", path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
let len = file.metadata().await?.len();
let done = Arc::new(AtomicBool::new(false));
let counter = Arc::new(AtomicU64::new(0));
let done_handle = done.clone();
let name_clone = name.map(|a| a.to_owned());
let counter_clone = counter.clone();
let poll_handle = tokio::spawn(async move {
loop {
let is_done = done.load(atomic::Ordering::SeqCst);
let installed_bytes = counter.load(atomic::Ordering::SeqCst);
if !*crate::QUIET.read().await {
print!("\rInstalling... {}%", installed_bytes * 100 / len);
}
if is_done {
break;
}
tokio::time::sleep(Duration::from_millis(10)).await;
}
if !*crate::QUIET.read().await {
println!("\rInstalling... 100%");
}
});
let reader = CountingReader(file, counter_clone);
let res = install(reader, name_clone.as_ref().map(|a| a.as_str())).await;
done_handle.store(true, atomic::Ordering::SeqCst);
res?;
poll_handle.await.unwrap();
if !*crate::QUIET.read().await {
println!("Complete.");
}
Ok(())
}
pub async fn install<R: AsyncRead + Unpin + Send + Sync>(
r: R,
name: Option<&str>,
) -> Result<(), crate::Error> {
log::info!("Extracting archive.");
let mut pkg = tar::Archive::new(r);
let mut entries = pkg.entries()?;
log::info!("Opening manifest from archive.");
let manifest = entries
.next()
.await
.ok_or(Error::CorruptedPkgFile("missing manifest"))
.no_code()??;
crate::ensure_code!(
manifest.path()?.to_str() == Some("manifest.cbor"),
crate::error::GENERAL_ERROR,
"Package File Invalid or Corrupted"
);
log::trace!("Deserializing manifest.");
let manifest: Manifest = from_cbor_async_reader(manifest).await.no_code()?;
match manifest {
Manifest::V0(m) => install_v0(m, entries, name).await?,
};
Ok(())
}
pub async fn install_v0<R: AsyncRead + Unpin + Send + Sync>(
manifest: ManifestV0,
mut entries: tar::Entries<R>,
name: Option<&str>,
) -> Result<(), crate::Error> {
crate::ensure_code!(
crate::version::Current::new()
.semver()
.satisfies(&manifest.os_version_required),
crate::error::VERSION_INCOMPATIBLE,
"OS Version Not Compatible: need {}",
manifest.os_version_required
);
if let Some(name) = name {
crate::ensure_code!(
manifest.id == name,
crate::error::GENERAL_ERROR,
"Package Name Does Not Match Expected"
);
}
log::info!(
"Creating metadata directory: {}/apps/{}",
crate::PERSISTENCE_DIR,
manifest.id
);
let app_dir = PersistencePath::from_ref("apps").join(&manifest.id);
let app_dir_path = app_dir.path();
if app_dir_path.exists() {
tokio::fs::remove_dir_all(&app_dir_path).await?;
}
tokio::fs::create_dir_all(&app_dir_path).await?;
let (ip, tor_addr, tor_key) = crate::tor::set_svc(
&manifest.id,
crate::tor::NewService {
ports: manifest.ports.clone(),
hidden_service_version: manifest.hidden_service_version,
},
)
.await?;
let recoverable = Path::new(crate::VOLUMES).join(&manifest.id).exists();
log::info!("Creating volume {}/{}.", crate::VOLUMES, manifest.id);
tokio::fs::create_dir_all(Path::new(crate::VOLUMES).join(&manifest.id)).await?;
let _lock = app_dir.lock(true).await?;
log::info!("Saving manifest.");
let mut manifest_out = app_dir.join("manifest.yaml").write(None).await?;
to_yaml_async_writer(&mut *manifest_out, &Manifest::V0(manifest.clone())).await?;
manifest_out.commit().await?;
log::info!("Opening config spec from archive.");
let config_spec = entries
.next()
.await
.ok_or(Error::CorruptedPkgFile("missing config spec"))
.no_code()??;
crate::ensure_code!(
config_spec.path()?.to_str() == Some("config_spec.cbor"),
crate::error::GENERAL_ERROR,
"Package File Invalid or Corrupted"
);
log::trace!("Deserializing config spec.");
let config_spec: ConfigSpec = from_cbor_async_reader(config_spec).await?;
log::info!("Saving config spec.");
let mut config_spec_out = app_dir.join("config_spec.yaml").write(None).await?;
to_yaml_async_writer(&mut *config_spec_out, &config_spec).await?;
config_spec_out.commit().await?;
log::info!("Opening config rules from archive.");
let config_rules = entries
.next()
.await
.ok_or(Error::CorruptedPkgFile("missing config rules"))
.no_code()??;
crate::ensure_code!(
config_rules.path()?.to_str() == Some("config_rules.cbor"),
crate::error::GENERAL_ERROR,
"Package File Invalid or Corrupted"
);
log::trace!("Deserializing config rules.");
let config_rules: Vec<ConfigRuleEntry> = from_cbor_async_reader(config_rules).await?;
log::info!("Saving config rules.");
let mut config_rules_out = app_dir.join("config_rules.yaml").write(None).await?;
to_yaml_async_writer(&mut *config_rules_out, &config_rules).await?;
config_rules_out.commit().await?;
if manifest.has_instructions {
log::info!("Opening instructions from archive.");
let mut instructions = entries
.next()
.await
.ok_or(Error::CorruptedPkgFile("missing config rules"))
.no_code()??;
crate::ensure_code!(
instructions.path()?.to_str() == Some("instructions.md"),
crate::error::GENERAL_ERROR,
"Package File Invalid or Corrupted"
);
log::info!("Saving instructions.");
let mut instructions_out = app_dir.join("instructions.md").write(None).await?;
tokio::io::copy(&mut instructions, &mut *instructions_out)
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
instructions_out.commit().await?;
}
log::info!("Copying over assets.");
for asset in manifest.assets.iter() {
let dst_path = Path::new(crate::VOLUMES)
.join(&manifest.id)
.join(&asset.dst);
log::info!("Copying {} to {}", asset.src.display(), dst_path.display());
let src_path = Path::new(&asset.src);
log::info!("Opening {} from archive.", src_path.display());
let mut src = entries
.next()
.await
.ok_or(Error::CorruptedPkgFile("missing asset"))
.no_code()??;
crate::ensure_code!(
src.path()? == src_path,
crate::error::GENERAL_ERROR,
"Package File Invalid or Corrupted"
);
let dst_path_file = dst_path.join(src_path);
if dst_path_file.exists() && !asset.overwrite {
log::info!("{} already exists, skipping.", dst_path_file.display());
} else {
if dst_path_file.exists() {
if dst_path_file.is_dir() {
tokio::fs::remove_dir_all(&dst_path_file)
.await
.with_context(|e| format!("{}: {}", dst_path_file.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
} else {
tokio::fs::remove_file(&dst_path_file)
.await
.with_context(|e| format!("{}: {}", dst_path_file.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
}
}
src.unpack_in(&dst_path).await?;
if src.header().entry_type().is_dir() {
loop {
let mut file = entries
.next()
.await
.ok_or(Error::CorruptedPkgFile("missing asset"))
.no_code()??;
if file
.path()?
.starts_with(format!("APPMGR_DIR_END:{}", asset.src.display()))
{
break;
} else {
file.unpack_in(&dst_path).await?;
}
}
}
}
}
let tag = match &manifest.image {
ImageConfig::Tar => {
let image_name = format!("start9/{}", manifest.id);
let tag = format!("{}:latest", image_name);
if tokio::process::Command::new("docker")
.arg("images")
.arg("-q")
.arg(&image_name)
.output()
.await?
.stdout
.len()
> 0
{
tokio::process::Command::new("docker")
.arg("stop")
.arg(&manifest.id)
.spawn()?
.wait()
.await?;
tokio::process::Command::new("docker")
.arg("rm")
.arg(&manifest.id)
.spawn()?
.wait()
.await?;
crate::ensure_code!(
tokio::process::Command::new("docker")
.arg("rmi")
.arg(&image_name)
.output()
.await?
.status
.success(),
crate::error::DOCKER_ERROR,
"Failed to Remove Existing Image"
)
}
log::info!("Opening image.tar from archive.");
let mut image = entries
.next()
.await
.ok_or(Error::CorruptedPkgFile("missing image.tar"))
.no_code()??;
let image_path = image.path()?;
if image_path != Path::new("image.tar") {
return Err(crate::Error::from(format_err!(
"Package File Invalid or Corrupted: expected image.tar, got {}",
image_path.display()
)));
}
log::info!(
"Loading docker image start9/{} from image.tar.",
manifest.id
);
let mut child = tokio::process::Command::new("docker")
.arg("load")
.stdin(std::process::Stdio::piped())
.stdout(std::process::Stdio::inherit())
.stderr(match log::max_level() {
log::LevelFilter::Error => std::process::Stdio::null(),
_ => std::process::Stdio::inherit(),
})
.spawn()?;
let mut child_in = child.stdin.take().unwrap();
tokio::io::copy(&mut image, &mut child_in).await?;
child_in.flush().await?;
child_in.shutdown().await?;
drop(child_in);
crate::ensure_code!(
child.wait().await?.success(),
crate::error::DOCKER_ERROR,
"Failed to Load Docker Image From Tar"
);
tag
}
};
log::info!("Creating docker container: {} from {}.", manifest.id, tag);
let volume_arg = format!(
"type=bind,src={}/{},dst={}",
crate::VOLUMES,
manifest.id,
manifest.mount.display()
);
let mut args = vec![
Cow::Borrowed(OsStr::new("create")),
Cow::Borrowed(OsStr::new("--restart")),
Cow::Borrowed(OsStr::new("no")),
Cow::Borrowed(OsStr::new("--name")),
Cow::Borrowed(OsStr::new(&manifest.id)),
Cow::Borrowed(OsStr::new("--mount")),
Cow::Borrowed(OsStr::new(&volume_arg)),
Cow::Borrowed(OsStr::new("--net")),
Cow::Borrowed(OsStr::new("start9")),
Cow::Borrowed(OsStr::new("--ip")),
Cow::Owned(OsString::from(format!("{}", ip))),
];
if let (Some(ref tor_addr), Some(ref tor_key)) = (&tor_addr, &tor_key) {
args.extend(
std::iter::empty()
.chain(std::iter::once(Cow::Borrowed(OsStr::new("--env"))))
.chain(std::iter::once(Cow::Owned(OsString::from(format!(
"TOR_ADDRESS={}",
tor_addr
)))))
.chain(std::iter::once(Cow::Borrowed(OsStr::new("--env"))))
.chain(std::iter::once(Cow::Owned(OsString::from(format!(
"TOR_KEY={}",
tor_key
))))),
);
}
if let Some(shm_size_mb) = manifest.shm_size_mb {
args.push(Cow::Borrowed(OsStr::new("--shm-size")));
args.push(Cow::Owned(OsString::from(format!("{}m", shm_size_mb))));
}
args.push(Cow::Borrowed(OsStr::new(&tag)));
crate::ensure_code!(
std::process::Command::new("docker")
.args(&args)
.stdout(std::process::Stdio::null())
.stderr(match log::max_level() {
log::LevelFilter::Error => std::process::Stdio::null(),
_ => std::process::Stdio::inherit(),
})
.status()?
.success(),
crate::error::DOCKER_ERROR,
"Failed to Create Docker Container"
);
tokio::fs::create_dir_all(Path::new(crate::VOLUMES).join(&manifest.id).join("start9")).await?;
if let Some(public) = manifest.public {
tokio::fs::create_dir_all(Path::new(crate::VOLUMES).join(&manifest.id).join(public))
.await?;
}
if let Some(shared) = manifest.shared {
tokio::fs::create_dir_all(Path::new(crate::VOLUMES).join(&manifest.id).join(shared))
.await?;
}
log::info!("Updating app list.");
crate::apps::add(
&manifest.id,
crate::apps::AppInfo {
title: manifest.title.clone(),
version: manifest.version.clone(),
tor_address: tor_addr.clone(),
configured: false,
recoverable,
needs_restart: false,
},
)
.await?;
let config = crate::apps::config(&manifest.id).await?;
if let Some(cfg) = config.config {
if config.spec.matches(&cfg).is_ok() {
crate::apps::set_configured(&manifest.id, true).await?;
}
} else {
let empty_config = crate::config::Config::default();
if config.spec.matches(&empty_config).is_ok() {
crate::config::configure(&manifest.id, Some(empty_config), None, false).await?;
}
}
crate::dependencies::update_binds(&manifest.id).await?;
for (dep_id, dep_info) in manifest.dependencies.0 {
if dep_info.mount_shared
&& crate::apps::list_info().await?.get(&dep_id).is_some()
&& crate::apps::manifest(&dep_id).await?.shared.is_some()
{
match crate::apps::status(&dep_id, false).await?.status {
crate::apps::DockerStatus::Stopped => (),
crate::apps::DockerStatus::Running => crate::control::restart_app(&dep_id).await?,
_ => crate::apps::set_needs_restart(&dep_id, true).await?,
}
}
}
Ok(())
}

465
appmgr/src/install/mod.rs Normal file
View File

@@ -0,0 +1,465 @@
use std::fmt::Display;
use std::io::SeekFrom;
use std::path::Path;
use std::pin::Pin;
use std::process::Stdio;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use anyhow::anyhow;
use futures::TryStreamExt;
use http::HeaderMap;
use indexmap::{IndexMap, IndexSet};
use patch_db::json_ptr::JsonPointer;
use patch_db::{
DbHandle, HasModel, MapModel, Model, ModelData, OptionModel, PatchDbHandle, Revision,
};
use reqwest::Response;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use sha2::{Digest, Sha256};
use tokio::fs::{File, OpenOptions};
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt, AsyncWrite, AsyncWriteExt};
use self::progress::{InstallProgress, InstallProgressTracker};
use crate::context::RpcContext;
use crate::db::model::{
CurrentDependencyInfo, InstalledPackageDataEntry, PackageDataEntry, StaticFiles,
};
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::s9pk::reader::S9pkReader;
use crate::status::{DependencyErrors, MainStatus, Status};
use crate::util::{AsyncFileExt, Version};
use crate::Error;
pub mod progress;
pub const PKG_CACHE: &'static str = "/mnt/embassy-os/cache/packages";
pub const PKG_PUBLIC_DIR: &'static str = "/mnt/embassy-os/public/package-data";
pub async fn download_install_s9pk(
ctx: RpcContext,
temp_manifest: &Manifest,
s9pk: Response,
) -> Result<(), Error> {
let pkg_id = &temp_manifest.id;
let version = &temp_manifest.version;
let mut db = ctx.db.handle();
let pkg_cache_dir = Path::new(PKG_CACHE).join(pkg_id).join(version.as_str());
tokio::fs::create_dir_all(&pkg_cache_dir).await?;
let pkg_cache = AsRef::<Path>::as_ref(pkg_id).with_extension("s9pk");
let pkg_data_entry = crate::db::DatabaseModel::new()
.package_data()
.idx_model(pkg_id);
let res = (|| async {
let progress = InstallProgress::new(s9pk.content_length());
let static_files = StaticFiles::remote(pkg_id, version, temp_manifest.assets.icon_type())?;
let mut pde = pkg_data_entry.get_mut(&mut db).await?;
match pde.take() {
Some(PackageDataEntry::Installed { installed, .. }) => {
*pde = Some(PackageDataEntry::Updating {
install_progress: progress.clone(),
static_files,
installed,
temp_manifest: temp_manifest.clone(),
})
}
None => {
*pde = Some(PackageDataEntry::Installing {
install_progress: progress.clone(),
static_files,
temp_manifest: temp_manifest.clone(),
})
}
_ => {
return Err(Error::new(
anyhow!("Cannot install over an app in a transient state"),
crate::ErrorKind::InvalidRequest,
))
}
}
pde.save(&mut db).await?;
let progress_model = pkg_data_entry.and_then(|pde| pde.install_progress());
async fn check_cache(
pkg_id: &PackageId,
version: &Version,
pkg_cache: &Path,
headers: &HeaderMap,
progress: &Arc<InstallProgress>,
model: OptionModel<InstallProgress>,
ctx: &RpcContext,
db: &mut PatchDbHandle,
) -> Option<S9pkReader<InstallProgressTracker<File>>> {
fn warn_ok<T, E: Display>(
pkg_id: &PackageId,
version: &Version,
res: Result<T, E>,
) -> Option<T> {
match res {
Ok(a) => Some(a),
Err(e) => {
log::warn!(
"Install {}@{}: Could not open cache: {}",
pkg_id,
version,
e
);
None
}
}
}
let hash = headers.get("x-s9pk-hash")?;
let file = warn_ok(pkg_id, version, File::maybe_open(&pkg_cache).await)??;
let progress_reader = InstallProgressTracker::new(file, progress.clone());
let rdr = warn_ok(
pkg_id,
version,
progress
.track_read_during(model, &ctx.db, db, || {
S9pkReader::from_reader(progress_reader)
})
.await,
)?;
if hash.as_bytes() == rdr.hash_str().as_bytes() {
Some(rdr)
} else {
None
}
}
let cached = check_cache(
pkg_id,
version,
&pkg_cache,
s9pk.headers(),
&progress,
progress_model.clone(),
&ctx,
&mut db,
)
.await;
let mut s9pk_reader = if let Some(cached) = cached {
cached
} else {
File::delete(&pkg_cache).await?;
let mut dst = OpenOptions::new()
.create(true)
.write(true)
.read(true)
.open(&pkg_cache)
.await?;
progress
.track_download_during(progress_model.clone(), &ctx.db, &mut db, || async {
let mut progress_writer =
InstallProgressTracker::new(&mut dst, progress.clone());
tokio::io::copy(
&mut tokio_util::io::StreamReader::new(s9pk.bytes_stream().map_err(|e| {
std::io::Error::new(
if e.is_connect() {
std::io::ErrorKind::ConnectionRefused
} else if e.is_timeout() {
std::io::ErrorKind::TimedOut
} else {
std::io::ErrorKind::Other
},
e,
)
})),
&mut progress_writer,
)
.await?;
progress.download_complete();
Ok(())
})
.await?;
dst.seek(SeekFrom::Start(0)).await?;
let progress_reader = InstallProgressTracker::new(dst, progress.clone());
let rdr = progress
.track_read_during(progress_model.clone(), &ctx.db, &mut db, || {
S9pkReader::from_reader(progress_reader)
})
.await?;
rdr
};
install_s9pk(&ctx, &mut db, pkg_id, version, &mut s9pk_reader, progress).await?;
Ok(())
})()
.await;
if let Err(e) = res {
let mut broken = crate::db::DatabaseModel::new()
.broken_packages()
.get_mut(&mut db)
.await?;
broken.push(pkg_id.clone());
broken.save(&mut db).await?;
Err(e)
} else {
Ok(())
}
}
pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
ctx: &RpcContext,
mut db: &mut PatchDbHandle,
pkg_id: &PackageId,
version: &Version,
rdr: &mut S9pkReader<InstallProgressTracker<R>>,
progress: Arc<InstallProgress>,
) -> Result<(), Error> {
rdr.validate().await?;
rdr.validated();
let model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(pkg_id)
.check(db)
.await?
.ok_or_else(|| {
Error::new(
anyhow!("PackageDataEntry does not exist"),
crate::ErrorKind::Database,
)
})?;
let progress_model = model.clone().install_progress();
log::info!("Install {}@{}: Unpacking Manifest", pkg_id, version);
let manifest = progress
.track_read_during(progress_model.clone(), &ctx.db, db, || rdr.manifest())
.await?;
log::info!("Install {}@{}: Unpacked Manifest", pkg_id, version);
let public_dir_path = Path::new(PKG_PUBLIC_DIR)
.join(pkg_id)
.join(version.as_str());
tokio::fs::create_dir_all(&public_dir_path).await?;
log::info!("Install {}@{}: Unpacking LICENSE.md", pkg_id, version);
progress
.track_read_during(progress_model.clone(), &ctx.db, db, || async {
let license_path = public_dir_path.join("LICENSE.md");
let mut dst = File::create(&license_path).await?;
tokio::io::copy(&mut rdr.license().await?, &mut dst).await?;
dst.sync_all().await?;
Ok(())
})
.await?;
log::info!("Install {}@{}: Unpacked LICENSE.md", pkg_id, version);
log::info!("Install {}@{}: Unpacking INSTRUCTIONS.md", pkg_id, version);
progress
.track_read_during(progress_model.clone(), &ctx.db, db, || async {
let instructions_path = public_dir_path.join("INSTRUCTIONS.md");
let mut dst = File::create(&instructions_path).await?;
tokio::io::copy(&mut rdr.instructions().await?, &mut dst).await?;
dst.sync_all().await?;
Ok(())
})
.await?;
log::info!("Install {}@{}: Unpacked INSTRUCTIONS.md", pkg_id, version);
let icon_path = Path::new("icon").with_extension(&manifest.assets.icon_type());
log::info!(
"Install {}@{}: Unpacking {}",
pkg_id,
version,
icon_path.display()
);
progress
.track_read_during(progress_model.clone(), &ctx.db, db, || async {
let icon_path = public_dir_path.join(&icon_path);
let mut dst = File::create(&icon_path).await?;
tokio::io::copy(&mut rdr.icon().await?, &mut dst).await?;
dst.sync_all().await?;
Ok(())
})
.await?;
log::info!(
"Install {}@{}: Unpacked {}",
pkg_id,
version,
icon_path.display()
);
log::info!("Install {}@{}: Unpacking Docker Images", pkg_id, version);
progress
.track_read_during(progress_model.clone(), &ctx.db, db, || async {
let mut load = tokio::process::Command::new("docker")
.arg("load")
.stdin(Stdio::piped())
.stderr(Stdio::piped())
.spawn()?;
let mut dst = load.stdin.take().ok_or_else(|| {
Error::new(
anyhow!("Could not write to stdin of docker load"),
crate::ErrorKind::Docker,
)
})?;
tokio::io::copy(&mut rdr.docker_images().await?, &mut dst).await?;
dst.flush().await?;
dst.shutdown().await?;
drop(dst);
let res = load.wait_with_output().await?;
if !res.status.success() {
Err(Error::new(
anyhow!(
"{}",
String::from_utf8(res.stderr)
.unwrap_or_else(|e| format!("Could not parse stderr: {}", e))
),
crate::ErrorKind::Docker,
))
} else {
Ok(())
}
})
.await?;
log::info!("Install {}@{}: Unpacked Docker Images", pkg_id, version,);
progress.read_complete.store(true, Ordering::SeqCst);
progress_model.put(&mut db, &progress).await?;
let mut tx = db.begin().await?;
let mut network = crate::db::DatabaseModel::new()
.network()
.get_mut(&mut tx)
.await?;
log::info!("Install {}@{}: Installing main", pkg_id, version);
let ip = network.register_host(&manifest.id)?;
manifest
.main
.install(pkg_id, version, &manifest.volumes, ip)
.await?;
let hosts = network.hosts.clone();
network.save(&mut tx).await?;
log::info!("Install {}@{}: Installed main", pkg_id, version);
log::info!("Install {}@{}: Installing interfaces", pkg_id, version);
let interface_info = manifest.interfaces.install(ip).await?;
log::info!("Install {}@{}: Installed interfaces", pkg_id, version);
log::info!("Install {}@{}: Complete", pkg_id, version);
let static_files = StaticFiles::local(pkg_id, version, manifest.assets.icon_type())?;
let installed = InstalledPackageDataEntry {
manifest: manifest.clone(),
status: Status {
configured: manifest.config.is_none(),
main: MainStatus::Stopped,
dependency_errors: todo!(),
},
system_pointers: Vec::new(),
current_dependents: {
// search required dependencies
let mut deps = IndexMap::new();
for package in crate::db::DatabaseModel::new()
.package_data()
.keys(&mut tx)
.await?
{
if let Some(dep) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&package)
.expect(&mut tx)
.await?
.installed()
.and_then(|i| i.current_dependencies().idx_model(pkg_id))
.get(&mut tx)
.await?
.to_owned()
{
deps.insert(package, dep);
}
}
deps
},
current_dependencies: manifest
.dependencies
.0
.iter()
.filter_map(|(id, info)| {
if info.optional.is_none() {
Some((id.clone(), CurrentDependencyInfo::default()))
} else {
None
}
})
.collect(),
interface_info,
};
let mut pde = model.get_mut(&mut tx).await?;
let prev = std::mem::replace(
&mut *pde,
PackageDataEntry::Installed {
installed,
static_files,
},
);
pde.save(&mut tx).await?;
if let PackageDataEntry::Updating {
installed: prev, ..
} = prev
{
let mut configured = prev.status.configured;
if let Some(res) = prev
.manifest
.migrations
.to(
version,
pkg_id,
&prev.manifest.version,
&prev.manifest.volumes,
&hosts,
)
.await?
{
configured &= res.configured;
}
// cleanup(pkg_id, Some(prev)).await?;
if let Some(res) = manifest
.migrations
.from(
&prev.manifest.version,
pkg_id,
version,
&manifest.volumes,
&hosts,
)
.await?
{
configured &= res.configured;
}
if configured {
crate::config::configure(
&mut tx,
&ctx.docker,
&hosts,
pkg_id,
None,
&None,
false,
&mut IndexMap::new(),
&mut IndexMap::new(),
)
.await?;
todo!("set as running if viable");
}
}
tx.commit(None).await?;
Ok(())
}

View File

@@ -0,0 +1,227 @@
use std::future::Future;
use std::io::SeekFrom;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use patch_db::{DbHandle, HasModel, OptionModel, PatchDb, PatchDbHandle};
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncSeek, AsyncWrite};
use crate::db::model::PackageDataEntry;
use crate::Error;
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct InstallProgress {
pub size: Option<u64>,
pub downloaded: AtomicU64,
pub download_complete: AtomicBool,
pub validated: AtomicU64,
pub validation_complete: AtomicBool,
pub read: AtomicU64,
pub read_complete: AtomicBool,
}
impl InstallProgress {
pub fn new(size: Option<u64>) -> Arc<Self> {
Arc::new(InstallProgress {
size,
downloaded: AtomicU64::new(0),
download_complete: AtomicBool::new(false),
validated: AtomicU64::new(0),
validation_complete: AtomicBool::new(false),
read: AtomicU64::new(0),
read_complete: AtomicBool::new(false),
})
}
pub fn download_complete(&self) {
self.download_complete.store(true, Ordering::SeqCst)
}
pub async fn track_download<Db: DbHandle>(
self: Arc<Self>,
model: OptionModel<InstallProgress>,
mut db: Db,
) -> (Db, Result<(), Error>) {
while !self.download_complete.load(Ordering::SeqCst) {
if let Err(e) = model.put(&mut db, &self).await {
return (db, Err(e.into()));
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
if let Err(e) = model.put(&mut db, &self).await {
(db, Err(e.into()))
} else {
(db, Ok(()))
}
}
pub async fn track_download_during<
F: FnOnce() -> Fut,
Fut: Future<Output = Result<T, Error>>,
T,
>(
self: &Arc<Self>,
model: OptionModel<InstallProgress>,
db: &PatchDb,
handle: &mut PatchDbHandle,
f: F,
) -> Result<T, Error> {
let local_db = std::mem::replace(handle, db.handle());
let tracker = tokio::spawn(self.clone().track_download(model.clone(), local_db));
let res = f().await;
self.download_complete.store(true, Ordering::SeqCst);
let (local_db, tracker_res) = tracker.await.unwrap();
let _ = std::mem::replace(handle, local_db);
tracker_res?;
res
}
pub async fn track_read<Db: DbHandle>(
self: Arc<Self>,
model: OptionModel<InstallProgress>,
mut db: Db,
complete: Arc<AtomicBool>,
) -> (Db, Result<(), Error>) {
while !complete.load(Ordering::SeqCst) {
if let Err(e) = model.put(&mut db, &self).await {
return (db, Err(e.into()));
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
(db, Ok(()))
}
pub async fn track_read_during<
F: FnOnce() -> Fut,
Fut: Future<Output = Result<T, Error>>,
T,
>(
self: &Arc<Self>,
model: OptionModel<InstallProgress>,
db: &PatchDb,
handle: &mut PatchDbHandle,
f: F,
) -> Result<T, Error> {
let local_db = std::mem::replace(handle, db.handle());
let complete = Arc::new(AtomicBool::new(false));
let tracker = tokio::spawn(self.clone().track_read(
model.clone(),
local_db,
complete.clone(),
));
let res = f().await;
complete.store(true, Ordering::SeqCst);
let (local_db, tracker_res) = tracker.await.unwrap();
let _ = std::mem::replace(handle, local_db);
tracker_res?;
res
}
}
#[pin_project::pin_project]
pub struct InstallProgressTracker<RW> {
#[pin]
inner: RW,
validating: bool,
progress: Arc<InstallProgress>,
}
impl<RW> InstallProgressTracker<RW> {
pub fn new(inner: RW, progress: Arc<InstallProgress>) -> Self {
InstallProgressTracker {
inner,
validating: true,
progress,
}
}
pub fn validated(&mut self) {
self.validating = false;
}
}
impl<W: AsyncWrite> AsyncWrite for InstallProgressTracker<W> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
let this = self.project();
match this.inner.poll_write(cx, buf) {
Poll::Ready(Ok(n)) => {
this.progress
.downloaded
.fetch_add(n as u64, Ordering::SeqCst);
Poll::Ready(Ok(n))
}
a => a,
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
let this = self.project();
this.inner.poll_flush(cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
let this = self.project();
this.inner.poll_shutdown(cx)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
let this = self.project();
match this.inner.poll_write_vectored(cx, bufs) {
Poll::Ready(Ok(n)) => {
this.progress
.downloaded
.fetch_add(n as u64, Ordering::SeqCst);
Poll::Ready(Ok(n))
}
a => a,
}
}
}
impl<R: AsyncRead> AsyncRead for InstallProgressTracker<R> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let this = self.project();
let prev = buf.filled().len() as u64;
match this.inner.poll_read(cx, buf) {
Poll::Ready(Ok(())) => {
if *this.validating {
&this.progress.validated
} else {
&this.progress.read
}
.fetch_add(buf.filled().len() as u64 - prev, Ordering::SeqCst);
Poll::Ready(Ok(()))
}
a => a,
}
}
}
impl<R: AsyncSeek> AsyncSeek for InstallProgressTracker<R> {
fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> std::io::Result<()> {
let this = self.project();
this.inner.start_seek(position)
}
fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<u64>> {
let this = self.project();
match this.inner.poll_complete(cx) {
Poll::Ready(Ok(n)) => {
if *this.validating {
&this.progress.validated
} else {
&this.progress.read
}
.store(n, Ordering::SeqCst);
Poll::Ready(Ok(n))
}
a => a,
}
}
}

View File

@@ -1,93 +0,0 @@
use crate::Error;
use avahi_sys;
use futures::future::pending;
#[derive(Clone, Debug, Eq, PartialEq, Hash)]
pub struct AppId {
pub un_app_id: String,
}
pub async fn enable_lan() -> Result<(), Error> {
unsafe {
let app_list = crate::apps::list_info().await?;
let simple_poll = avahi_sys::avahi_simple_poll_new();
let poll = avahi_sys::avahi_simple_poll_get(simple_poll);
let mut stack_err = 0;
let err_c: *mut i32 = &mut stack_err;
let avahi_client = avahi_sys::avahi_client_new(
poll,
avahi_sys::AvahiClientFlags::AVAHI_CLIENT_NO_FAIL,
None,
std::ptr::null_mut(),
err_c,
);
let group =
avahi_sys::avahi_entry_group_new(avahi_client, Some(noop), std::ptr::null_mut());
let hostname_raw = avahi_sys::avahi_client_get_host_name_fqdn(avahi_client);
let hostname_bytes = std::ffi::CStr::from_ptr(hostname_raw).to_bytes_with_nul();
const HOSTNAME_LEN: usize = 1 + 15 + 1 + 5; // leading byte, main address, dot, "local"
debug_assert_eq!(hostname_bytes.len(), HOSTNAME_LEN);
let mut hostname_buf = [0; HOSTNAME_LEN + 1];
hostname_buf[1..].copy_from_slice(hostname_bytes);
// assume fixed length prefix on hostname due to local address
hostname_buf[0] = 15; // set the prefix length to 15 for the main address
hostname_buf[16] = 5; // set the prefix length to 5 for "local"
for (app_id, app_info) in app_list {
let man = crate::apps::manifest(&app_id).await?;
if man
.ports
.iter()
.filter(|p| p.lan.is_some())
.next()
.is_none()
{
continue;
}
let tor_address = if let Some(addr) = app_info.tor_address {
addr
} else {
continue;
};
let lan_address = tor_address
.strip_suffix(".onion")
.ok_or_else(|| failure::format_err!("Invalid Tor Address: {:?}", tor_address))?
.to_owned()
+ ".local";
let lan_address_ptr = std::ffi::CString::new(lan_address)
.expect("Could not cast lan address to c string");
let _ = avahi_sys::avahi_entry_group_add_record(
group,
avahi_sys::AVAHI_IF_UNSPEC,
avahi_sys::AVAHI_PROTO_UNSPEC,
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST
| avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_ALLOW_MULTIPLE,
lan_address_ptr.as_ptr(),
avahi_sys::AVAHI_DNS_CLASS_IN as u16,
avahi_sys::AVAHI_DNS_TYPE_CNAME as u16,
avahi_sys::AVAHI_DEFAULT_TTL,
hostname_buf.as_ptr().cast(),
hostname_buf.len(),
);
log::info!("Published {:?}", lan_address_ptr);
}
avahi_sys::avahi_entry_group_commit(group);
ctrlc::set_handler(move || {
// please the borrow checker with the below semantics
// avahi_sys::avahi_entry_group_free(group);
// avahi_sys::avahi_client_free(avahi_client);
// drop(Box::from_raw(err_c));
std::process::exit(0);
})
.expect("Error setting signal handler");
}
pending().await
}
unsafe extern "C" fn noop(
_group: *mut avahi_sys::AvahiEntryGroup,
_state: avahi_sys::AvahiEntryGroupState,
_userdata: *mut core::ffi::c_void,
) {
}

View File

@@ -1,8 +1,4 @@
#[macro_use]
extern crate failure;
#[macro_use]
extern crate pest_derive;
pub const CONFIG_PATH: &'static str = "/etc/embassy/config.toml";
pub const TOR_RC: &'static str = "/root/appmgr/tor/torrc";
pub const SERVICES_YAML: &'static str = "tor/services.yaml";
pub const VOLUMES: &'static str = "/root/volumes";
@@ -20,35 +16,37 @@ lazy_static::lazy_static! {
pub static ref QUIET: tokio::sync::RwLock<bool> = tokio::sync::RwLock::new(!std::env::var("APPMGR_QUIET").map(|a| a == "0").unwrap_or(true));
}
pub mod actions;
pub mod apps;
pub mod action;
pub mod backup;
pub mod config;
pub mod control;
pub mod context;
pub mod db;
pub mod dependencies;
pub mod disks;
pub mod error;
pub mod index;
pub mod inspect;
pub mod id;
pub mod install;
#[cfg(feature = "avahi")]
pub mod lan;
pub mod logs;
pub mod manifest;
pub mod pack;
pub mod migration;
pub mod net;
pub mod registry;
pub mod remove;
pub mod tor;
pub mod update;
pub mod s9pk;
pub mod status;
pub mod util;
pub mod version;
pub mod volume;
pub use config::{configure, Config};
pub use control::{restart_app, start_app, stop_app, stop_dependents};
pub use error::{Error, ResultExt};
pub use install::{install_name, install_path, install_url};
pub use logs::{logs, notifications, stats, LogOptions};
pub use pack::{pack, verify};
pub use remove::remove;
pub use update::update;
pub use config::Config;
use context::{CliContext, EitherContext};
pub use error::{Error, ErrorKind, ResultExt};
use rpc_toolkit::command;
use rpc_toolkit::yajrc::RpcError;
pub use version::{init, self_update};
#[command(subcommands(config::config, version::git_info))]
pub fn main_api(#[context] ctx: EitherContext) -> Result<EitherContext, RpcError> {
Ok(ctx)
}
#[command(subcommands(version::git_info))]
pub fn portable_api(#[context] ctx: EitherContext) -> Result<EitherContext, RpcError> {
Ok(ctx)
}

View File

@@ -1,199 +0,0 @@
use std::borrow::Cow;
use std::ffi::{OsStr, OsString};
use std::path::Path;
use failure::ResultExt as _;
use futures::stream::StreamExt;
use futures::stream::TryStreamExt;
use itertools::Itertools;
use crate::util::PersistencePath;
use crate::Error;
use crate::ResultExt as _;
#[derive(Clone, Copy, Debug, serde::Serialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum Level {
Error,
Warn,
Success,
Info,
}
impl std::fmt::Display for Level {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
Level::Error => write!(f, "ERROR"),
Level::Warn => write!(f, "WARN"),
Level::Success => write!(f, "SUCCESS"),
Level::Info => write!(f, "INFO"),
}
}
}
impl std::str::FromStr for Level {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"ERROR" => Ok(Level::Error),
"WARN" => Ok(Level::Warn),
"SUCCESS" => Ok(Level::Success),
"INFO" => Ok(Level::Info),
_ => Err(Error::from(format_err!("Unknown Notification Level"))),
}
}
}
#[derive(Clone, Debug, serde::Serialize)]
pub struct Notification {
pub time: i64,
pub level: Level,
pub code: usize,
pub title: String,
pub message: String,
}
impl std::fmt::Display for Notification {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}:{}:{}:{}",
self.level,
self.code,
self.title.replace(":", "\u{A789}"),
self.message.replace("\n", "\u{2026}")
)
}
}
impl std::str::FromStr for Notification {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let mut split = s.split(":");
Ok(Notification {
time: split
.next()
.ok_or_else(|| format_err!("missing time"))?
.parse::<f64>()
.map(|a| a as i64)
.no_code()?,
level: split
.next()
.ok_or_else(|| format_err!("missing level"))?
.parse()?,
code: split
.next()
.ok_or_else(|| format_err!("missing code"))?
.parse()
.no_code()?,
title: split
.next()
.ok_or_else(|| format_err!("missing title"))?
.replace("\u{A789}", ":"),
message: split
.intersperse(":")
.collect::<String>()
.replace("\u{2026}", "\n"),
})
}
}
pub struct LogOptions<A: AsRef<str>, B: AsRef<str>> {
pub details: bool,
pub follow: bool,
pub since: Option<A>,
pub until: Option<B>,
pub tail: Option<usize>,
pub timestamps: bool,
}
pub async fn logs<A: AsRef<str>, B: AsRef<str>>(
name: &str,
options: LogOptions<A, B>,
) -> Result<(), Error> {
let mut args = vec![Cow::Borrowed(OsStr::new("logs"))];
if options.details {
args.push(Cow::Borrowed(OsStr::new("--details")));
}
if options.follow {
args.push(Cow::Borrowed(OsStr::new("-f")));
}
if let Some(since) = options.since.as_ref() {
args.push(Cow::Borrowed(OsStr::new("--since")));
args.push(Cow::Borrowed(OsStr::new(since.as_ref())));
}
if let Some(until) = options.until.as_ref() {
args.push(Cow::Borrowed(OsStr::new("--until")));
args.push(Cow::Borrowed(OsStr::new(until.as_ref())));
}
if let Some(tail) = options.tail {
args.push(Cow::Borrowed(OsStr::new("--tail")));
args.push(Cow::Owned(OsString::from(format!("{}", tail))));
}
if options.timestamps {
args.push(Cow::Borrowed(OsStr::new("-t")));
}
args.push(Cow::Borrowed(OsStr::new(name)));
crate::ensure_code!(
std::process::Command::new("docker")
.args(args.into_iter())
.status()?
.success(),
crate::error::DOCKER_ERROR,
"Failed to Collect Logs from Docker"
);
Ok(())
}
pub async fn notifications(id: &str) -> Result<Vec<Notification>, Error> {
let p = PersistencePath::from_ref("notifications").join(id).tmp();
if let Some(parent) = p.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent).await?;
}
}
match tokio::fs::rename(
Path::new(crate::VOLUMES)
.join(id)
.join("start9")
.join("notifications.log"),
&p,
)
.await
{
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => return Ok(Vec::new()),
a => a,
}?;
let f = tokio::fs::File::open(&p)
.await
.with_context(|e| format!("{}: {}", p.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
tokio::io::AsyncBufReadExt::lines(tokio::io::BufReader::new(f))
.map(|a| a.map_err(From::from).and_then(|a| a.parse()))
.try_collect()
.await
}
pub async fn stats(id: &str) -> Result<serde_yaml::Value, Error> {
let p = PersistencePath::from_ref("stats").join(id).tmp();
if let Some(parent) = p.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent).await?;
}
}
match tokio::fs::copy(
Path::new(crate::VOLUMES)
.join(id)
.join("start9")
.join("stats.yaml"),
&p,
)
.await
{
Err(ref e) if e.kind() == std::io::ErrorKind::NotFound => {
return Ok(serde_yaml::Value::Null)
}
a => a,
}?;
let f = tokio::fs::File::open(&p)
.await
.with_context(|e| format!("{}: {}", p.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
crate::util::from_yaml_async_reader(f).await.no_code()
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,87 +0,0 @@
use std::path::PathBuf;
use linear_map::LinearMap;
use crate::actions::Action;
use crate::dependencies::Dependencies;
use crate::tor::HiddenServiceVersion;
use crate::tor::PortMapping;
pub type ManifestLatest = ManifestV0;
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
pub struct Description {
pub short: String,
pub long: String,
}
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
#[serde(tag = "type")]
#[serde(rename_all = "snake_case")]
pub enum ImageConfig {
Tar,
}
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
pub struct Asset {
pub src: PathBuf,
pub dst: PathBuf,
pub overwrite: bool,
}
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct ManifestV0 {
pub id: String,
pub version: emver::Version,
pub title: String,
pub description: Description,
pub release_notes: String,
#[serde(default)]
pub install_alert: Option<String>,
#[serde(default)]
pub uninstall_alert: Option<String>,
#[serde(default)]
pub restore_alert: Option<String>,
#[serde(default)]
pub start_alert: Option<String>,
#[serde(default)]
pub has_instructions: bool,
#[serde(default = "emver::VersionRange::any")]
pub os_version_required: emver::VersionRange,
#[serde(default = "emver::VersionRange::any")]
pub os_version_recommended: emver::VersionRange,
pub ports: Vec<PortMapping>,
pub image: ImageConfig,
#[serde(default)]
pub shm_size_mb: Option<usize>,
pub mount: PathBuf,
#[serde(default)]
pub public: Option<PathBuf>,
#[serde(default)]
pub shared: Option<PathBuf>,
#[serde(default)]
pub assets: Vec<Asset>,
#[serde(default)]
pub hidden_service_version: HiddenServiceVersion,
#[serde(default)]
pub dependencies: Dependencies,
#[serde(default)]
pub actions: Vec<Action>,
#[serde(flatten)]
pub extra: LinearMap<String, serde_yaml::Value>,
}
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
#[serde(tag = "compat")]
#[serde(rename_all = "lowercase")]
pub enum Manifest {
V0(ManifestV0),
}
impl Manifest {
pub fn into_latest(self) -> ManifestLatest {
match self {
Manifest::V0(m) => m,
}
}
}

80
appmgr/src/migration.rs Normal file
View File

@@ -0,0 +1,80 @@
use anyhow::anyhow;
use emver::VersionRange;
use indexmap::{IndexMap, IndexSet};
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use crate::action::ActionImplementation;
use crate::net::host::Hosts;
use crate::s9pk::manifest::PackageId;
use crate::status::health_check::HealthCheckId;
use crate::util::Version;
use crate::volume::Volumes;
use crate::Error;
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct Migrations {
pub from: IndexMap<VersionRange, ActionImplementation>,
pub to: IndexMap<VersionRange, ActionImplementation>,
}
impl Migrations {
pub async fn from(
&self,
version: &Version,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
) -> Result<Option<MigrationRes>, Error> {
Ok(
if let Some((_, migration)) = self
.from
.iter()
.find(|(range, _)| version.satisfies(*range))
{
Some(
migration
.execute(pkg_id, pkg_version, volumes, hosts, Some(version), false)
.await?
.map_err(|e| {
Error::new(anyhow!("{}", e.1), crate::ErrorKind::MigrationFailed)
})?,
)
} else {
None
},
)
}
pub async fn to(
&self,
version: &Version,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
) -> Result<Option<MigrationRes>, Error> {
Ok(
if let Some((_, migration)) =
self.to.iter().find(|(range, _)| version.satisfies(*range))
{
Some(
migration
.execute(pkg_id, pkg_version, volumes, hosts, Some(version), false)
.await?
.map_err(|e| {
Error::new(anyhow!("{}", e.1), crate::ErrorKind::MigrationFailed)
})?,
)
} else {
None
},
)
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct MigrationRes {
pub configured: bool,
}

24
appmgr/src/net/host.rs Normal file
View File

@@ -0,0 +1,24 @@
use std::ffi::{OsStr, OsString};
use std::net::Ipv4Addr;
use indexmap::IndexMap;
use patch_db::DbHandle;
use serde::{Deserialize, Serialize};
use crate::s9pk::manifest::PackageId;
use crate::{Error, HOST_IP};
pub const TLD: &'static str = "embassy";
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Hosts(pub IndexMap<PackageId, Ipv4Addr>);
impl Hosts {
pub fn docker_args(&self) -> Vec<OsString> {
let mut res = Vec::with_capacity(self.0.len() + 1);
res.push(format!("--add-host={}:{}", TLD, Ipv4Addr::from(HOST_IP)).into());
for (id, ip) in &self.0 {
res.push(format!("--add-host={}.{}:{}", id, TLD, ip).into());
}
res
}
}

109
appmgr/src/net/mdns.rs Normal file
View File

@@ -0,0 +1,109 @@
use avahi_sys;
use futures::future::pending;
use patch_db::{DbHandle, OptionModel};
use crate::db::model::{InterfaceAddressesModel, InterfaceInfoModel};
use crate::util::Apply;
use crate::Error;
pub async fn enable_lan<Db: DbHandle>(mut db: Db) -> Result<LanHandle, Error> {
unsafe {
// let app_list = crate::apps::list_info().await?;
let simple_poll = avahi_sys::avahi_simple_poll_new();
let poll = avahi_sys::avahi_simple_poll_get(simple_poll);
let mut stack_err = 0;
let err_c: *mut i32 = &mut stack_err;
let avahi_client = avahi_sys::avahi_client_new(
poll,
avahi_sys::AvahiClientFlags::AVAHI_CLIENT_NO_FAIL,
None,
std::ptr::null_mut(),
err_c,
);
let group =
avahi_sys::avahi_entry_group_new(avahi_client, Some(noop), std::ptr::null_mut());
let hostname_raw = avahi_sys::avahi_client_get_host_name_fqdn(avahi_client);
let hostname_bytes = std::ffi::CStr::from_ptr(hostname_raw).to_bytes_with_nul();
const HOSTNAME_LEN: usize = 1 + 15 + 1 + 5; // leading byte, main address, dot, "local"
debug_assert_eq!(hostname_bytes.len(), HOSTNAME_LEN);
let mut hostname_buf = [0; HOSTNAME_LEN + 1];
hostname_buf[1..].copy_from_slice(hostname_bytes);
// assume fixed length prefix on hostname due to local address
hostname_buf[0] = 15; // set the prefix length to 15 for the main address
hostname_buf[16] = 5; // set the prefix length to 5 for "local"
for app_id in crate::db::DatabaseModel::new()
.package_data()
.keys(&mut db)
.await?
{
let iface_model = if let Some(model) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&app_id)
.expect(&mut db)
.await?
.installed()
.map(|i| i.interface_info().addresses())
.apply(OptionModel::from)
.check(&mut db)
.await?
{
model
} else {
continue;
};
for iface in iface_model.keys(&mut db).await? {
let lan_address = if let Some(addr) = iface_model
.clone()
.idx_model(&iface)
.expect(&mut db)
.await?
.lan_address()
.get(&mut db)
.await?
.to_owned()
{
addr
} else {
continue;
};
let lan_address_ptr = std::ffi::CString::new(lan_address)
.expect("Could not cast lan address to c string");
let _ = avahi_sys::avahi_entry_group_add_record(
group,
avahi_sys::AVAHI_IF_UNSPEC,
avahi_sys::AVAHI_PROTO_UNSPEC,
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST
| avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_ALLOW_MULTIPLE,
lan_address_ptr.as_ptr(),
avahi_sys::AVAHI_DNS_CLASS_IN as u16,
avahi_sys::AVAHI_DNS_TYPE_CNAME as u16,
avahi_sys::AVAHI_DEFAULT_TTL,
hostname_buf.as_ptr().cast(),
hostname_buf.len(),
);
log::info!("Published {:?}", lan_address_ptr);
}
}
avahi_sys::avahi_entry_group_commit(group);
Ok(LanHandle(group))
}
}
unsafe extern "C" fn noop(
_group: *mut avahi_sys::AvahiEntryGroup,
_state: avahi_sys::AvahiEntryGroupState,
_userdata: *mut core::ffi::c_void,
) {
}
pub struct LanHandle(*mut avahi_sys::AvahiEntryGroup);
impl Drop for LanHandle {
fn drop(&mut self) {
unsafe {
avahi_sys::avahi_entry_group_reset(self.0);
avahi_sys::avahi_entry_group_free(self.0);
}
}
}

62
appmgr/src/net/mod.rs Normal file
View File

@@ -0,0 +1,62 @@
use std::net::Ipv4Addr;
use anyhow::anyhow;
use id_pool::IdPool;
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use self::host::Hosts;
use crate::s9pk::manifest::PackageId;
use crate::{Error, ResultExt};
pub mod host;
#[cfg(feature = "avahi")]
pub mod mdns;
pub mod tor;
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct IpPool(IdPool);
impl IpPool {
pub fn new() -> Self {
let pool = IdPool::new();
IpPool(pool)
}
pub fn get(&mut self) -> Option<Ipv4Addr> {
let id = self.0.request_id()?;
let ip = u32::from_be_bytes(crate::HOST_IP) + id as u32;
Some(ip.into())
}
pub fn put(&mut self, ip: Ipv4Addr) {
let ip = u32::from_be_bytes(ip.octets());
let id = ip - u32::from_be_bytes(crate::HOST_IP);
let _ = self.0.return_id(id as u16);
}
}
impl Default for IpPool {
fn default() -> Self {
Self::new()
}
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
pub struct Network {
pub ip_pool: IpPool,
pub hosts: Hosts,
}
impl Network {
pub fn register_host(&mut self, id: &PackageId) -> Result<Ipv4Addr, Error> {
if let Some(exists) = self.hosts.0.get(id) {
Ok(*exists)
} else {
let ip = self
.ip_pool
.get()
.ok_or_else(|| anyhow!("No available IP addresses"))
.with_kind(crate::ErrorKind::Network)?;
self.hosts.0.insert(id.clone(), ip);
Ok(ip)
}
}
}

View File

@@ -1,14 +1,13 @@
use std::collections::{BTreeSet, HashMap};
use std::net::Ipv4Addr;
use std::os::unix::process::ExitStatusExt;
use std::path::Path;
use std::path::{Path, PathBuf};
use std::time::{Duration, Instant};
use failure::ResultExt as _;
use tokio::io::AsyncReadExt;
use tokio::io::AsyncWriteExt;
use anyhow::anyhow;
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::util::{Invoke, PersistencePath, YamlUpdateHandle};
use crate::util::Invoke;
use crate::{Error, ResultExt as _};
#[derive(Debug, Clone, Copy, serde::Deserialize, serde::Serialize)]
@@ -81,17 +80,17 @@ impl From<HiddenServiceVersion> for usize {
}
}
}
impl std::convert::TryFrom<usize> for HiddenServiceVersion {
type Error = failure::Error;
fn try_from(v: usize) -> Result<Self, Self::Error> {
Ok(match v {
1 => HiddenServiceVersion::V1,
2 => HiddenServiceVersion::V2,
3 => HiddenServiceVersion::V3,
n => bail!("Invalid HiddenServiceVersion {}", n),
})
}
}
// impl std::convert::TryFrom<usize> for HiddenServiceVersion {
// type Error = anyhow::Error;
// fn try_from(v: usize) -> Result<Self, Self::Error> {
// Ok(match v {
// 1 => HiddenServiceVersion::V1,
// 2 => HiddenServiceVersion::V2,
// 3 => HiddenServiceVersion::V3,
// n => bail!("Invalid HiddenServiceVersion {}", n),
// })
// }
// }
impl Default for HiddenServiceVersion {
fn default() -> Self {
HiddenServiceVersion::V3
@@ -167,26 +166,15 @@ impl ServicesMap {
}
}
pub async fn services_map(path: &PersistencePath) -> Result<ServicesMap, Error> {
let f = path.maybe_read(false).await.transpose()?;
if let Some(mut f) = f {
crate::util::from_yaml_async_reader(&mut *f).await
} else {
Ok(Default::default())
}
}
pub async fn services_map_mut(
path: PersistencePath,
) -> Result<YamlUpdateHandle<ServicesMap>, Error> {
YamlUpdateHandle::new_or_default(path).await
}
pub async fn write_services(hidden_services: &ServicesMap) -> Result<(), Error> {
tokio::fs::copy(crate::TOR_RC, ETC_TOR_RC)
.await
.with_context(|e| format!("{} -> {}: {}", crate::TOR_RC, ETC_TOR_RC, e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("{} -> {}", crate::TOR_RC, ETC_TOR_RC),
)
})?;
let mut f = tokio::fs::OpenOptions::new()
.append(true)
.open(ETC_TOR_RC)
@@ -233,27 +221,29 @@ pub async fn write_lan_services(hidden_services: &ServicesMap) -> Result<(), Err
.join("hostname"),
)
.await
.with_context(|e| format!("{}/app-{}/hostname: {}", HIDDEN_SERVICE_DIR_ROOT, app_id, e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("{}/app-{}/hostname", HIDDEN_SERVICE_DIR_ROOT, app_id),
)
})?;
let hostname_str = hostname
.trim()
.strip_suffix(".onion")
.ok_or_else(|| failure::format_err!("invalid tor hostname"))
.no_code()?;
.ok_or_else(|| anyhow!("{}", hostname))
.with_kind(crate::ErrorKind::InvalidOnionAddress)?;
for mapping in &service.ports {
match &mapping.lan {
Some(LanOptions::Standard) => {
log::info!("Writing LAN certificates for {}", app_id);
let base_path = PersistencePath::from_ref("apps").join(&app_id);
let key_path = base_path.join("cert-local.key.pem").path();
let conf_path = base_path.join("cert-local.csr.conf").path();
let req_path = base_path.join("cert-local.csr").path();
let cert_path = base_path.join("cert-local.crt.pem").path();
let base_path: PathBuf = todo!(); //PersistencePath::from_ref("apps").join(&app_id);
let key_path = base_path.join("cert-local.key.pem");
let conf_path = base_path.join("cert-local.csr.conf");
let req_path = base_path.join("cert-local.csr");
let cert_path = base_path.join("cert-local.crt.pem");
let fullchain_path = base_path.join("cert-local.fullchain.crt.pem");
if !fullchain_path.exists().await
|| tokio::fs::metadata(&key_path).await.is_err()
{
let mut fullchain_file = fullchain_path.write(None).await?;
if !fullchain_path.exists() || tokio::fs::metadata(&key_path).await.is_err() {
let mut fullchain_file = tokio::fs::File::create(&fullchain_path).await?;
tokio::process::Command::new("openssl")
.arg("ecparam")
.arg("-genkey")
@@ -262,8 +252,12 @@ pub async fn write_lan_services(hidden_services: &ServicesMap) -> Result<(), Err
.arg("-noout")
.arg("-out")
.arg(&key_path)
.invoke("OpenSSL GenKey")
.await?;
.invoke(crate::ErrorKind::OpenSSL)
.await
.map_err(|e| {
let ctx = format!("GenKey: {}", e);
crate::Error::new(e.source.context(ctx), e.kind)
})?;
tokio::fs::write(
&conf_path,
format!(
@@ -286,8 +280,12 @@ pub async fn write_lan_services(hidden_services: &ServicesMap) -> Result<(), Err
))
.arg("-out")
.arg(&req_path)
.invoke("OpenSSL Req")
.await?;
.invoke(crate::ErrorKind::OpenSSL)
.await
.map_err(|e| {
let ctx = format!("Req: {}", e);
crate::Error::new(e.source.context(ctx), e.kind)
})?;
tokio::process::Command::new("openssl")
.arg("ca")
.arg("-batch")
@@ -307,12 +305,16 @@ pub async fn write_lan_services(hidden_services: &ServicesMap) -> Result<(), Err
.arg(&req_path)
.arg("-out")
.arg(&cert_path)
.invoke("OpenSSL CA")
.await?;
log::info!("Writing fullchain to: {}", fullchain_path.path().display());
.invoke(crate::ErrorKind::OpenSSL)
.await
.map_err(|e| {
let ctx = format!("CA: {}", e);
crate::Error::new(e.source.context(ctx), e.kind)
})?;
log::info!("Writing fullchain to: {}", fullchain_path.display());
tokio::io::copy(
&mut tokio::fs::File::open(&cert_path).await?,
&mut *fullchain_file,
&mut fullchain_file,
)
.await?;
tokio::io::copy(
@@ -320,14 +322,13 @@ pub async fn write_lan_services(hidden_services: &ServicesMap) -> Result<(), Err
"/root/agent/ca/intermediate/certs/embassy-int-ca.crt.pem",
)
.await
.with_context(|e| {
format!(
"{}: /root/agent/ca/intermediate/certs/embassy-int-ca.crt.pem",
e
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
"/root/agent/ca/intermediate/certs/embassy-int-ca.crt.pem",
)
})
.with_code(crate::error::FILESYSTEM_ERROR)?,
&mut *fullchain_file,
})?,
&mut fullchain_file,
)
.await?;
tokio::io::copy(
@@ -335,15 +336,16 @@ pub async fn write_lan_services(hidden_services: &ServicesMap) -> Result<(), Err
"/root/agent/ca/certs/embassy-root-ca.cert.pem",
)
.await
.with_context(|e| {
format!("{}: /root/agent/ca/certs/embassy-root-ca.cert.pem", e)
})
.with_code(crate::error::FILESYSTEM_ERROR)?,
&mut *fullchain_file,
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
"/root/agent/ca/certs/embassy-root-ca.cert.pem",
)
})?,
&mut fullchain_file,
)
.await?;
fullchain_file.commit().await?;
log::info!("{} written successfully", fullchain_path.path().display());
log::info!("{} written successfully", fullchain_path.display());
}
f.write_all(
format!(
@@ -402,12 +404,15 @@ pub async fn read_tor_address(name: &str, timeout: Option<Duration>) -> Result<S
}
}
let tor_addr = match tokio::fs::read_to_string(&addr_path).await {
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Err(e)
.with_context(|e| format!("{}: {}", addr_path.display(), e))
.with_code(crate::error::NOT_FOUND),
a => a
.with_context(|e| format!("{}: {}", addr_path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => {
Err(e).with_ctx(|_| (crate::ErrorKind::NotFound, addr_path.display().to_string()))
}
a => a.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
addr_path.display().to_string(),
)
}),
}?;
Ok(tor_addr.trim().to_owned())
}
@@ -443,18 +448,24 @@ pub async fn read_tor_key(
}
let tor_key = match version {
HiddenServiceVersion::V3 => {
let mut f = tokio::fs::File::open(&addr_path)
.await
.with_context(|e| format!("{}: {}", e, addr_path.display()))
.with_code(crate::error::FILESYSTEM_ERROR)?;
let mut f = tokio::fs::File::open(&addr_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
addr_path.display().to_string(),
)
})?;
let mut buf = [0; 96];
f.read_exact(&mut buf).await?;
base32::encode(base32::Alphabet::RFC4648 { padding: false }, &buf[32..]).to_lowercase()
}
_ => tokio::fs::read_to_string(&addr_path)
.await
.with_context(|e| format!("{}: {}", e, addr_path.display()))
.with_code(crate::error::FILESYSTEM_ERROR)?
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
addr_path.display().to_string(),
)
})?
.trim_end_matches("\u{0}")
.to_string(),
};
@@ -471,8 +482,8 @@ pub async fn set_svc(
crate::SERVICES_YAML
);
let is_listening = !service.ports.is_empty();
let path = PersistencePath::from_ref(crate::SERVICES_YAML);
let mut hidden_services = services_map_mut(path).await?;
// let path = PersistencePath::from_ref(crate::SERVICES_YAML);
let mut hidden_services: ServicesMap = todo!(); //services_map_mut(path).await?;
let ver = service.hidden_service_version;
let ip = hidden_services.add(name.to_owned(), service);
log::info!("Adding Tor hidden service {} to {}.", name, ETC_TOR_RC);
@@ -495,7 +506,7 @@ pub async fn set_svc(
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
crate::ErrorKind::Tor,
"Failed to Reload Tor: {}",
svc_exit
.code()
@@ -519,14 +530,13 @@ pub async fn set_svc(
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
crate::ErrorKind::Nginx,
"Failed to Reload Nginx: {}",
svc_exit
.code()
.or_else(|| { svc_exit.signal().map(|a| 128 + a) })
.unwrap_or(0)
);
hidden_services.commit().await?;
Ok((ip, addr, key))
}
@@ -536,8 +546,8 @@ pub async fn rm_svc(name: &str) -> Result<(), Error> {
name,
crate::SERVICES_YAML
);
let path = PersistencePath::from_ref(crate::SERVICES_YAML);
let mut hidden_services = services_map_mut(path).await?;
// let path = PersistencePath::from_ref(crate::SERVICES_YAML);
let mut hidden_services: ServicesMap = todo!(); //services_map_mut(path).await?;
hidden_services.remove(name);
let hidden_service_path = Path::new(HIDDEN_SERVICE_DIR_ROOT).join(format!("app-{}", name));
log::info!("Removing {}", hidden_service_path.display());
@@ -552,7 +562,7 @@ pub async fn rm_svc(name: &str) -> Result<(), Error> {
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
crate::ErrorKind::Tor,
"Failed to Reload Tor: {}",
svc_exit.code().unwrap_or(0)
);
@@ -563,14 +573,13 @@ pub async fn rm_svc(name: &str) -> Result<(), Error> {
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
crate::ErrorKind::Nginx,
"Failed to Reload Nginx: {}",
svc_exit
.code()
.or_else(|| { svc_exit.signal().map(|a| 128 + a) })
.unwrap_or(0)
);
hidden_services.commit().await?;
Ok(())
}
@@ -583,8 +592,12 @@ pub async fn change_key(
if hidden_service_path.exists() {
tokio::fs::remove_dir_all(&hidden_service_path)
.await
.with_context(|e| format!("{}: {}", hidden_service_path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
hidden_service_path.display().to_string(),
)
})?;
}
if let Some(key) = key {
tokio::fs::create_dir_all(&hidden_service_path).await?;
@@ -593,8 +606,7 @@ pub async fn change_key(
key_data.extend_from_slice(&key.to_bytes());
tokio::fs::write(&key_path, key_data)
.await
.with_context(|e| format!("{}: {}", key_path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
.with_ctx(|_| (crate::ErrorKind::Filesystem, key_path.display().to_string()))?;
}
log::info!("Reloading Tor.");
let svc_exit = std::process::Command::new("service")
@@ -602,22 +614,22 @@ pub async fn change_key(
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
crate::ErrorKind::Tor,
"Failed to Reload Tor: {}",
svc_exit.code().unwrap_or(0)
);
let mut info = crate::apps::list_info_mut().await?;
if let Some(mut i) = info.get_mut(name) {
if i.tor_address.is_some() {
i.tor_address = Some(read_tor_address(name, Some(Duration::from_secs(30))).await?);
}
}
// let mut info = crate::apps::list_info_mut().await?;
// if let Some(mut i) = info.get_mut(name) {
// if i.tor_address.is_some() {
// i.tor_address = Some(read_tor_address(name, Some(Duration::from_secs(30))).await?);
// }
// }
Ok(())
}
pub async fn reload() -> Result<(), Error> {
let path = PersistencePath::from_ref(crate::SERVICES_YAML);
let hidden_services = services_map(&path).await?;
// let path = PersistencePath::from_ref(crate::SERVICES_YAML);
let hidden_services = todo!(); //services_map(&path).await?;
log::info!("Syncing Tor hidden services to {}.", ETC_TOR_RC);
write_services(&hidden_services).await?;
log::info!("Reloading Tor.");
@@ -626,7 +638,7 @@ pub async fn reload() -> Result<(), Error> {
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
crate::ErrorKind::Tor,
"Failed to Reload Tor: {}",
svc_exit.code().unwrap_or(0)
);
@@ -634,8 +646,8 @@ pub async fn reload() -> Result<(), Error> {
}
pub async fn restart() -> Result<(), Error> {
let path = PersistencePath::from_ref(crate::SERVICES_YAML);
let hidden_services = services_map(&path).await?;
// let path = PersistencePath::from_ref(crate::SERVICES_YAML);
let hidden_services = todo!(); //services_map(&path).await?;
log::info!("Syncing Tor hidden services to {}.", ETC_TOR_RC);
write_services(&hidden_services).await?;
log::info!("Restarting Tor.");
@@ -644,7 +656,7 @@ pub async fn restart() -> Result<(), Error> {
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
crate::ErrorKind::Tor,
"Failed to Restart Tor: {}",
svc_exit.code().unwrap_or(0)
);

View File

@@ -1,388 +0,0 @@
use std::borrow::Cow;
use std::path::{Path, PathBuf};
use failure::ResultExt;
use futures::stream::StreamExt;
use linear_map::LinearMap;
use rand::SeedableRng;
use tokio_tar as tar;
use crate::config::{ConfigRuleEntry, ConfigSpec};
use crate::manifest::{ImageConfig, Manifest};
use crate::util::{from_cbor_async_reader, from_json_async_reader, from_yaml_async_reader};
use crate::version::VersionT;
#[derive(Clone, Debug, Fail)]
pub enum Error {
#[fail(display = "Invalid Directory Name: {}", _0)]
InvalidDirectoryName(String),
#[fail(display = "Invalid File Name: {}", _0)]
InvalidFileName(String),
#[fail(display = "Invalid Output Path: {}", _0)]
InvalidOutputPath(String),
}
pub async fn pack(path: &str, output: &str) -> Result<(), failure::Error> {
let path = Path::new(path.trim_end_matches("/"));
let output = Path::new(output);
log::info!(
"Starting pack of {} to {}.",
path.file_name()
.and_then(|a| a.to_str())
.ok_or_else(|| Error::InvalidDirectoryName(format!("{}", path.display())))?,
output.display(),
);
let out_file = tokio::fs::File::create(output).await?;
let mut out = tar::Builder::new(out_file);
log::info!("Reading {}/manifest.yaml.", path.display());
let manifest: Manifest = crate::util::from_yaml_async_reader(
tokio::fs::File::open(path.join("manifest.yaml"))
.await
.with_context(|e| format!("{}: manifest.yaml", e))?,
)
.await?;
log::info!("Writing manifest to archive.");
let bin_manifest = serde_cbor::to_vec(&manifest)?;
let mut manifest_header = tar::Header::new_gnu();
manifest_header.set_size(bin_manifest.len() as u64);
out.append_data(
&mut manifest_header,
"manifest.cbor",
std::io::Cursor::new(bin_manifest),
)
.await?;
let manifest = manifest.into_latest();
ensure!(
crate::version::Current::new()
.semver()
.satisfies(&manifest.os_version_required),
"Unsupported AppMgr version: expected {}",
manifest.os_version_required
);
log::info!("Reading {}/config_spec.yaml.", path.display());
let config_spec: ConfigSpec = from_yaml_async_reader(
tokio::fs::File::open(path.join("config_spec.yaml"))
.await
.with_context(|e| format!("{}: config_spec.yaml", e))?,
)
.await?;
log::info!("Writing config spec to archive.");
let bin_config_spec = serde_cbor::to_vec(&config_spec)?;
let mut config_spec_header = tar::Header::new_gnu();
config_spec_header.set_size(bin_config_spec.len() as u64);
out.append_data(
&mut config_spec_header,
"config_spec.cbor",
std::io::Cursor::new(bin_config_spec),
)
.await?;
log::info!("Reading {}/config_rules.yaml.", path.display());
let config_rules: Vec<ConfigRuleEntry> = from_yaml_async_reader(
tokio::fs::File::open(path.join("config_rules.yaml"))
.await
.with_context(|e| format!("{}: config_rules.yaml", e))?,
)
.await?;
log::info!("Writing config rules to archive.");
let bin_config_rules = serde_cbor::to_vec(&config_rules)?;
let mut config_rules_header = tar::Header::new_gnu();
config_rules_header.set_size(bin_config_rules.len() as u64);
out.append_data(
&mut config_rules_header,
"config_rules.cbor",
std::io::Cursor::new(bin_config_rules),
)
.await?;
if manifest.has_instructions {
log::info!("Packing instructions.md");
out.append_path_with_name(path.join("instructions.md"), "instructions.md")
.await?;
}
log::info!("Copying over assets.");
for asset in &manifest.assets {
let src_path = Path::new("assets").join(&asset.src);
log::info!("Reading {}/{}.", path.display(), src_path.display());
let file_path = path.join(&src_path);
let src = tokio::fs::File::open(&file_path)
.await
.with_context(|e| format!("{}: {}", e, src_path.display()))?;
log::info!("Writing {} to archive.", src_path.display());
if src.metadata().await?.is_dir() {
out.append_dir_all(&asset.src, &file_path).await?;
let mut h = tar::Header::new_gnu();
h.set_size(0);
h.set_path(format!("APPMGR_DIR_END:{}", asset.src.display()))?;
h.set_cksum();
out.append(&h, tokio::io::empty()).await?;
} else {
out.append_path_with_name(&file_path, &asset.src).await?;
}
}
match manifest.image {
ImageConfig::Tar => {
log::info!("Reading {}/image.tar.", path.display());
let image = tokio::fs::File::open(path.join("image.tar"))
.await
.with_context(|e| format!("{}: image.tar", e))?;
log::info!("Writing image.tar to archive.");
let mut header = tar::Header::new_gnu();
header.set_size(image.metadata().await?.len());
out.append_data(&mut header, "image.tar", image).await?;
}
}
out.into_inner().await?;
Ok(())
}
pub fn validate_path<P: AsRef<Path>>(p: P) -> Result<(), Error> {
let path = p.as_ref();
if path.is_absolute() {
return Err(Error::InvalidFileName(format!("{}", path.display())));
}
for seg in path {
if seg == ".." {
return Err(Error::InvalidFileName(format!("{}", path.display())));
}
}
Ok(())
}
pub async fn verify(path: &str) -> Result<(), failure::Error> {
let path = Path::new(path.trim_end_matches("/"));
ensure!(
path.extension()
.and_then(|a| a.to_str())
.ok_or_else(|| Error::InvalidFileName(format!("{}", path.display())))?
== "s9pk",
"Extension Must Be '.s9pk'"
);
let name = path
.file_stem()
.and_then(|a| a.to_str())
.ok_or_else(|| Error::InvalidFileName(format!("{}", path.display())))?;
ensure!(
!name.starts_with("start9")
&& name
.chars()
.filter(|c| !c.is_alphanumeric() && c != &'-')
.next()
.is_none(),
"Invalid Application ID"
);
log::info!(
"Starting verification of {}.",
path.file_name()
.and_then(|a| a.to_str())
.ok_or_else(|| Error::InvalidFileName(format!("{}", path.display())))?,
);
{}
log::info!("Opening file.");
let r = tokio::fs::File::open(&path)
.await
.with_context(|e| format!("{}: {}", path.display(), e))?;
log::info!("Extracting archive.");
let mut pkg = tar::Archive::new(r);
let mut entries = pkg.entries()?;
log::info!("Opening manifest from archive.");
let manifest = entries
.next()
.await
.ok_or_else(|| format_err!("missing manifest"))??;
ensure!(
manifest.path()?.to_str() == Some("manifest.cbor"),
"Package File Invalid or Corrupted: expected manifest.cbor, got {}",
manifest.path()?.display()
);
log::trace!("Deserializing manifest.");
let manifest: Manifest = from_cbor_async_reader(manifest).await?;
let manifest = manifest.into_latest();
ensure!(
crate::version::Current::new()
.semver()
.satisfies(&manifest.os_version_required),
"Unsupported AppMgr Version: expected {}",
manifest.os_version_required
);
ensure!(manifest.id == name, "Package Name Does Not Match Expected",);
if let (Some(public), Some(shared)) = (&manifest.public, &manifest.shared) {
ensure!(
!public.starts_with(shared) && !shared.starts_with(public),
"Public Directory Conflicts With Shared Directory"
)
}
if let Some(public) = &manifest.public {
validate_path(public)?;
}
if let Some(shared) = &manifest.shared {
validate_path(shared)?;
}
for action in &manifest.actions {
ensure!(
!action.command.is_empty(),
"Command Cannot Be Empty: {}",
action.id
);
}
log::info!("Opening config spec from archive.");
let config_spec = entries
.next()
.await
.ok_or_else(|| format_err!("missing config spec"))??;
ensure!(
config_spec.path()?.to_str() == Some("config_spec.cbor"),
"Package File Invalid or Corrupted: expected config_rules.cbor, got {}",
config_spec.path()?.display()
);
log::trace!("Deserializing config spec.");
let config_spec: ConfigSpec = from_cbor_async_reader(config_spec).await?;
log::trace!("Validating config spec.");
config_spec.validate(&manifest)?;
let config = config_spec.gen(&mut rand::rngs::StdRng::from_entropy(), &None)?;
config_spec.matches(&config)?;
log::info!("Opening config rules from archive.");
let config_rules = entries
.next()
.await
.ok_or_else(|| format_err!("missing config rules"))??;
ensure!(
config_rules.path()?.to_str() == Some("config_rules.cbor"),
"Package File Invalid or Corrupted: expected config_rules.cbor, got {}",
config_rules.path()?.display()
);
log::trace!("Deserializing config rules.");
let config_rules: Vec<ConfigRuleEntry> = from_cbor_async_reader(config_rules).await?;
log::trace!("Validating config rules against config spec.");
let mut cfgs = LinearMap::new();
cfgs.insert(name, Cow::Borrowed(&config));
for rule in &config_rules {
rule.check(&config, &cfgs)
.with_context(|e| format!("Default Config does not satisfy: {}", e))?;
}
if manifest.has_instructions {
let instructions = entries
.next()
.await
.ok_or_else(|| format_err!("missing instructions"))??;
ensure!(
instructions.path()?.to_str() == Some("instructions.md"),
"Package File Invalid or Corrupted: expected instructions.md, got {}",
instructions.path()?.display()
);
}
for asset_info in manifest.assets {
validate_path(&asset_info.src)?;
validate_path(&asset_info.dst)?;
let asset = entries
.next()
.await
.ok_or_else(|| format_err!("missing asset: {}", asset_info.src.display()))??;
if asset.header().entry_type().is_file() {
ensure!(
asset.path()?.to_str() == Some(&format!("{}", asset_info.src.display())),
"Package File Invalid or Corrupted: expected {}, got {}",
asset_info.src.display(),
asset.path()?.display()
);
} else if asset.header().entry_type().is_dir() {
ensure!(
asset.path()?.to_str() == Some(&format!("{}/", asset_info.src.display())),
"Package File Invalid or Corrupted: expected {}, got {}",
asset_info.src.display(),
asset.path()?.display()
);
loop {
let file = entries.next().await.ok_or_else(|| {
format_err!(
"missing directory end marker: APPMGR_DIR_END:{}",
asset_info.src.display()
)
})??;
if file
.path()?
.starts_with(format!("APPMGR_DIR_END:{}", asset_info.src.display()))
{
break;
} else {
ensure!(
file.path()?
.to_str()
.map(|p| p.starts_with(&format!("{}/", asset_info.src.display())))
.unwrap_or(false),
"Package File Invalid or Corrupted: expected {}, got {}",
asset_info.src.display(),
asset.path()?.display()
);
}
}
} else {
bail!("Asset Not Regular File: {}", asset_info.src.display());
}
}
match &manifest.image {
ImageConfig::Tar => {
#[derive(Clone, Debug, serde::Deserialize)]
#[serde(rename_all = "PascalCase")]
struct DockerManifest {
config: PathBuf,
repo_tags: Vec<String>,
layers: Vec<PathBuf>,
}
let image_name = format!("start9/{}", manifest.id);
log::debug!("Opening image.tar from archive.");
let image = entries
.next()
.await
.ok_or_else(|| format_err!("missing image.tar"))??;
let image_path = image.path()?;
if image_path != Path::new("image.tar") {
return Err(format_err!(
"Package File Invalid or Corrupted: expected image.tar, got {}",
image_path.display()
));
}
log::info!("Verifying image.tar.");
let mut image_tar = tar::Archive::new(image);
let image_manifest = image_tar
.entries()?
.map(|e| {
let e = e?;
Ok((e.path()?.to_path_buf(), e))
})
.filter_map(|res: Result<(PathBuf, tar::Entry<_>), std::io::Error>| {
futures::future::ready(match res {
Ok((path, e)) => {
if path == Path::new("manifest.json") {
Some(Ok(e))
} else {
None
}
}
Err(e) => Some(Err(e)),
})
})
.next()
.await
.ok_or_else(|| format_err!("image.tar is missing manifest.json"))??;
let image_manifest: Vec<DockerManifest> =
from_json_async_reader(image_manifest).await?;
image_manifest
.into_iter()
.flat_map(|a| a.repo_tags)
.map(|t| {
if t.starts_with("start9/") {
if t.split(":").next().unwrap() != image_name {
Err(format_err!("Contains prohibited image tag: {}", t))
} else {
Ok(())
}
} else {
Ok(())
}
})
.collect::<Result<_, _>>()?;
}
};
Ok(())
}

View File

@@ -1,13 +1,11 @@
use emver::VersionRange;
use tokio_compat_02::FutureExt;
use crate::apps::AppConfig;
use crate::manifest::ManifestLatest;
use crate::Error;
use crate::ResultExt as _;
use crate::s9pk::manifest::Manifest;
use crate::{Error, ResultExt as _};
pub async fn manifest(id: &str, version: &VersionRange) -> Result<ManifestLatest, Error> {
let manifest: ManifestLatest = reqwest::get(&format!(
pub async fn manifest(id: &str, version: &VersionRange) -> Result<Manifest, Error> {
let manifest: Manifest = reqwest::get(&format!(
"{}/manifest/{}?spec={}",
&*crate::APP_REGISTRY_URL,
id,
@@ -15,12 +13,12 @@ pub async fn manifest(id: &str, version: &VersionRange) -> Result<ManifestLatest
))
.compat()
.await
.with_code(crate::error::NETWORK_ERROR)?
.with_kind(crate::ErrorKind::Network)?
.error_for_status()
.with_code(crate::error::REGISTRY_ERROR)?
.with_kind(crate::ErrorKind::Registry)?
.json()
.await
.with_code(crate::error::SERDE_ERROR)?;
.with_kind(crate::ErrorKind::Deserialization)?;
Ok(manifest)
}
@@ -38,33 +36,11 @@ pub async fn version(id: &str, version: &VersionRange) -> Result<emver::Version,
))
.compat()
.await
.with_code(crate::error::NETWORK_ERROR)?
.with_kind(crate::ErrorKind::Network)?
.error_for_status()
.with_code(crate::error::REGISTRY_ERROR)?
.with_kind(crate::ErrorKind::Registry)?
.json()
.await
.with_code(crate::error::SERDE_ERROR)?;
.with_kind(crate::ErrorKind::Deserialization)?;
Ok(version.version)
}
pub async fn config(id: &str, version: &VersionRange) -> Result<AppConfig, Error> {
let config: crate::inspect::AppConfig = reqwest::get(&format!(
"{}/config/{}?spec={}",
&*crate::APP_REGISTRY_URL,
id,
version
))
.compat()
.await
.with_code(crate::error::NETWORK_ERROR)?
.error_for_status()
.with_code(crate::error::REGISTRY_ERROR)?
.json()
.await
.with_code(crate::error::SERDE_ERROR)?;
Ok(AppConfig {
config: None,
spec: config.spec,
rules: config.rules,
})
}

View File

@@ -1,150 +0,0 @@
use crate::failure::ResultExt;
use std::path::Path;
use linear_map::LinearMap;
use crate::dependencies::{DependencyError, TaggedDependencyError};
use crate::Error;
use crate::ResultExt as _;
pub async fn remove(
name: &str,
purge: bool,
dry_run: bool,
) -> Result<LinearMap<String, TaggedDependencyError>, Error> {
let manifest = crate::apps::manifest(name).await?;
let mut res = LinearMap::new();
crate::stop_dependents(name, dry_run, DependencyError::NotInstalled, &mut res).await?;
if dry_run {
return Ok(res);
}
let image_name = format!("start9/{}", name);
log::info!("Removing app from manifest.");
crate::apps::remove(name).await?;
log::info!("Stopping docker container.");
let res = crate::control::stop_app(name, false, false)
.await
.unwrap_or_else(|e| {
log::error!("Error stopping app: {}", e);
LinearMap::new()
});
log::info!("Removing docker container.");
if !std::process::Command::new("docker")
.args(&["rm", name])
.stdout(std::process::Stdio::null())
.stderr(match log::max_level() {
log::LevelFilter::Error => std::process::Stdio::null(),
_ => std::process::Stdio::inherit(),
})
.status()?
.success()
{
log::error!("Failed to Remove Docker Container");
};
if !std::process::Command::new("docker")
.args(&["rmi", &image_name])
.stdout(std::process::Stdio::null())
.stderr(match log::max_level() {
log::LevelFilter::Error => std::process::Stdio::null(),
_ => std::process::Stdio::inherit(),
})
.status()?
.success()
{
log::error!("Failed to Remove Docker Image");
};
if purge {
log::info!("Removing tor hidden service.");
crate::tor::rm_svc(name).await?;
log::info!("Removing app metadata.");
let metadata_path = Path::new(crate::PERSISTENCE_DIR).join("apps").join(name);
tokio::fs::remove_dir_all(&metadata_path)
.await
.with_context(|e| format!("rm {}: {}", metadata_path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
log::info!("Unbinding shared filesystem.");
let installed_apps = crate::apps::list_info().await?;
for (dep, _) in manifest.dependencies.0.iter() {
let path = Path::new(crate::VOLUMES)
.join(name)
.join("start9")
.join("public")
.join(&dep);
if path.exists() {
crate::disks::unmount(&path).await?;
} else {
log::warn!("{} does not exist, skipping...", path.display());
}
let path = Path::new(crate::VOLUMES)
.join(name)
.join("start9")
.join("shared")
.join(&dep);
if path.exists() {
crate::disks::unmount(&path).await?;
} else {
log::warn!("{} does not exist, skipping...", path.display());
}
if installed_apps.contains_key(dep) {
let dep_man = crate::apps::manifest(dep).await?;
if let Some(shared) = dep_man.shared {
let path = Path::new(crate::VOLUMES).join(dep).join(&shared).join(name);
if path.exists() {
tokio::fs::remove_dir_all(&path)
.await
.with_context(|e| format!("rm {}: {}", path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
}
}
} else {
log::warn!("{} is not installed, skipping...", dep);
}
}
if manifest.public.is_some() || manifest.shared.is_some() {
for dependent in crate::apps::dependents(name, false).await? {
let path = Path::new(crate::VOLUMES)
.join(&dependent)
.join("start9")
.join("public")
.join(name);
if path.exists() {
crate::disks::unmount(&path).await?;
} else {
log::warn!("{} does not exist, skipping...", path.display());
}
let path = Path::new(crate::VOLUMES)
.join(dependent)
.join("start9")
.join("shared")
.join(name);
if path.exists() {
crate::disks::unmount(&path).await?;
} else {
log::warn!("{} does not exist, skipping...", path.display());
}
}
}
log::info!("Destroying mounted volume.");
let volume_path = Path::new(crate::VOLUMES).join(name);
tokio::fs::remove_dir_all(&volume_path)
.await
.with_context(|e| format!("rm {}: {}", volume_path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
log::info!("Pruning unused docker images.");
crate::ensure_code!(
std::process::Command::new("docker")
.args(&["image", "prune", "-a", "-f"])
.stdout(std::process::Stdio::null())
.stderr(match log::max_level() {
log::LevelFilter::Error => std::process::Stdio::null(),
_ => std::process::Stdio::inherit(),
})
.status()?
.success(),
crate::error::DOCKER_ERROR,
"Failed to Prune Docker Images"
);
};
Ok(res)
}

106
appmgr/src/s9pk/builder.rs Normal file
View File

@@ -0,0 +1,106 @@
use std::io::{Read, Seek, SeekFrom, Write};
use typed_builder::TypedBuilder;
use super::header::{FileSection, Header};
use super::manifest::Manifest;
use crate::{Error, ResultExt};
#[derive(TypedBuilder)]
pub struct S9pkPacker<
'a,
W: Write + Seek,
RLicense: Read,
RInstructions: Read,
RIcon: Read,
RDockerImages: Read,
> {
writer: W,
manifest: &'a Manifest,
license: RLicense,
instructions: RInstructions,
icon: RIcon,
docker_images: RDockerImages,
}
impl<
'a,
W: Write + Seek,
RLicense: Read,
RInstructions: Read,
RIcon: Read,
RDockerImages: Read,
> S9pkPacker<'a, W, RLicense, RInstructions, RIcon, RDockerImages>
{
/// BLOCKING
pub fn pack(mut self) -> Result<(), Error> {
let header_pos = self.writer.stream_position()?;
if header_pos != 0 {
log::warn!("Appending to non-empty file.");
}
let mut header = Header::placeholder();
header.serialize(&mut self.writer).with_ctx(|_| {
(
crate::ErrorKind::Serialization,
"Writing Placeholder Header",
)
})?;
let mut position = self.writer.stream_position()?;
// manifest
serde_cbor::to_writer(&mut self.writer, self.manifest).with_ctx(|_| {
(
crate::ErrorKind::Serialization,
"Serializing Manifest (CBOR)",
)
})?;
let new_pos = self.writer.stream_position()?;
header.table_of_contents.manifest = FileSection {
position,
length: new_pos - position,
};
position = new_pos;
// license
std::io::copy(&mut self.license, &mut self.writer)
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying License"))?;
let new_pos = self.writer.stream_position()?;
header.table_of_contents.license = FileSection {
position,
length: new_pos - position,
};
position = new_pos;
// instructions
std::io::copy(&mut self.instructions, &mut self.writer)
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying Instructions"))?;
let new_pos = self.writer.stream_position()?;
header.table_of_contents.instructions = FileSection {
position,
length: new_pos - position,
};
position = new_pos;
// icon
std::io::copy(&mut self.icon, &mut self.writer)
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying Icon"))?;
let new_pos = self.writer.stream_position()?;
header.table_of_contents.icon = FileSection {
position,
length: new_pos - position,
};
position = new_pos;
// docker_images
std::io::copy(&mut self.docker_images, &mut self.writer)
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying App Image"))?;
let new_pos = self.writer.stream_position()?;
header.table_of_contents.docker_images = FileSection {
position,
length: new_pos - position,
};
position = new_pos;
// header
self.writer.seek(SeekFrom::Start(header_pos))?;
header
.serialize(&mut self.writer)
.with_ctx(|_| (crate::ErrorKind::Serialization, "Writing Header"))?;
self.writer.seek(SeekFrom::Start(position))?;
Ok(())
}
}

169
appmgr/src/s9pk/header.rs Normal file
View File

@@ -0,0 +1,169 @@
use std::collections::HashMap;
use std::io::Write;
use anyhow::anyhow;
use ed25519_dalek::{PublicKey, Signature};
use tokio::io::{AsyncRead, AsyncReadExt};
use crate::Error;
pub const MAGIC: [u8; 2] = [59, 59];
pub const VERSION: u8 = 1;
pub struct Header {
pub pubkey: PublicKey,
pub signature: Signature,
pub table_of_contents: TableOfContents,
}
impl Header {
pub fn placeholder() -> Self {
Header {
pubkey: PublicKey::default(),
signature: Signature::new([0; 64]),
table_of_contents: Default::default(),
}
}
// MUST BE SAME SIZE REGARDLESS OF DATA
pub fn serialize<W: Write>(&self, mut writer: W) -> std::io::Result<()> {
writer.write_all(&MAGIC)?;
writer.write_all(&[VERSION])?;
writer.write_all(self.pubkey.as_bytes())?;
writer.write_all(self.signature.as_ref())?;
self.table_of_contents.serialize(writer)?;
Ok(())
}
pub async fn deserialize<R: AsyncRead + Unpin>(mut reader: R) -> Result<Self, Error> {
let mut magic = [0; 2];
reader.read_exact(&mut magic).await?;
if magic != MAGIC {
return Err(Error::new(
anyhow!("Incorrect Magic"),
crate::ErrorKind::ParseS9pk,
));
}
let mut version = [0];
reader.read_exact(&mut version).await?;
if version[0] != VERSION {
return Err(Error::new(
anyhow!("Unknown Version"),
crate::ErrorKind::ParseS9pk,
));
}
let mut pubkey_bytes = [0; 32];
reader.read_exact(&mut pubkey_bytes).await?;
let pubkey = PublicKey::from_bytes(&pubkey_bytes)
.map_err(|e| Error::new(e, crate::ErrorKind::ParseS9pk))?;
let mut sig_bytes = [0; 64];
reader.read_exact(&mut sig_bytes).await?;
let signature = Signature::new(sig_bytes);
let table_of_contents = TableOfContents::deserialize(reader).await?;
Ok(Header {
pubkey,
signature,
table_of_contents,
})
}
}
#[derive(Debug, Default)]
pub struct TableOfContents {
pub manifest: FileSection,
pub license: FileSection,
pub instructions: FileSection,
pub icon: FileSection,
pub docker_images: FileSection,
}
impl TableOfContents {
pub fn serialize<W: Write>(&self, mut writer: W) -> std::io::Result<()> {
let len: u32 = 16 // size of FileSection
* (
1 + // manifest
1 + // license
1 + // instructions
1 + // icon
1 // docker_images
);
writer.write_all(&u32::to_be_bytes(len))?;
self.manifest.serialize_entry("manifest", &mut writer)?;
self.license.serialize_entry("license", &mut writer)?;
self.instructions
.serialize_entry("instructions", &mut writer)?;
self.icon.serialize_entry("icon", &mut writer)?;
self.docker_images
.serialize_entry("docker_images", &mut writer)?;
Ok(())
}
pub async fn deserialize<R: AsyncRead + Unpin>(mut reader: R) -> std::io::Result<Self> {
let mut toc_len = [0; 4];
reader.read_exact(&mut toc_len).await?;
let toc_len = u32::from_be_bytes(toc_len);
let mut reader = reader.take(toc_len as u64);
let mut table = HashMap::new();
while let Some((label, section)) = FileSection::deserialize_entry(&mut reader).await? {
table.insert(label, section);
}
fn from_table(
table: &HashMap<Vec<u8>, FileSection>,
label: &str,
) -> std::io::Result<FileSection> {
table.get(label.as_bytes()).copied().ok_or_else(|| {
std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
format!("Missing Required Label: {}", label),
)
})
}
fn as_opt(fs: FileSection) -> Option<FileSection> {
if fs.position | fs.length == 0 {
// 0/0 is not a valid file section
None
} else {
Some(fs)
}
}
Ok(TableOfContents {
manifest: from_table(&table, "manifest")?,
license: from_table(&table, "license")?,
instructions: from_table(&table, "instructions")?,
icon: from_table(&table, "icon")?,
docker_images: from_table(&table, "docker_images")?,
})
}
}
#[derive(Clone, Copy, Debug, Default)]
pub struct FileSection {
pub position: u64,
pub length: u64,
}
impl FileSection {
pub fn serialize_entry<W: Write>(self, label: &str, mut writer: W) -> std::io::Result<()> {
writer.write_all(&[label.len() as u8])?;
writer.write_all(label.as_bytes())?;
writer.write_all(&u64::to_be_bytes(self.position))?;
writer.write_all(&u64::to_be_bytes(self.length))?;
Ok(())
}
pub async fn deserialize_entry<R: AsyncRead + Unpin>(
mut reader: R,
) -> std::io::Result<Option<(Vec<u8>, Self)>> {
let mut label_len = [0];
let read = reader.read(&mut label_len).await?;
if read == 0 {
return Ok(None);
}
let label = vec![0; label_len[0] as usize];
let mut pos = [0; 8];
reader.read_exact(&mut pos).await?;
let mut len = [0; 8];
reader.read_exact(&mut len).await?;
Ok(Some((
label,
FileSection {
position: u64::from_be_bytes(pos),
length: u64::from_be_bytes(len),
},
)))
}
}

222
appmgr/src/s9pk/manifest.rs Normal file
View File

@@ -0,0 +1,222 @@
use std::borrow::Borrow;
use std::net::Ipv4Addr;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use chrono::{DateTime, Utc};
use indexmap::IndexMap;
use patch_db::HasModel;
use serde::{Deserialize, Serialize, Serializer};
use url::Url;
use crate::action::{ActionImplementation, Actions};
use crate::backup::BackupActions;
use crate::config::action::ConfigActions;
use crate::db::model::InterfaceInfo;
use crate::dependencies::Dependencies;
use crate::id::{Id, InterfaceId, InvalidId, SYSTEM_ID};
use crate::migration::Migrations;
use crate::net::host::Hosts;
use crate::net::tor::HiddenServiceVersion;
use crate::status::health_check::{HealthCheckResult, HealthChecks};
use crate::util::Version;
use crate::volume::Volumes;
use crate::Error;
pub const SYSTEM_PACKAGE_ID: PackageId<&'static str> = PackageId(SYSTEM_ID);
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct PackageId<S: AsRef<str> = String>(Id<S>);
impl FromStr for PackageId {
type Err = InvalidId;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(PackageId(Id::try_from(s.to_owned())?))
}
}
impl<S: AsRef<str>> AsRef<PackageId<S>> for PackageId<S> {
fn as_ref(&self) -> &PackageId<S> {
self
}
}
impl<S: AsRef<str>> std::fmt::Display for PackageId<S> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0)
}
}
impl<S: AsRef<str>> AsRef<str> for PackageId<S> {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl<S: AsRef<str>> Borrow<str> for PackageId<S> {
fn borrow(&self) -> &str {
self.0.as_ref()
}
}
impl<S: AsRef<str>> AsRef<Path> for PackageId<S> {
fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref()
}
}
impl<'de, S> Deserialize<'de> for PackageId<S>
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
Ok(PackageId(Deserialize::deserialize(deserializer)?))
}
}
impl<S> Serialize for PackageId<S>
where
S: AsRef<str>,
{
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
where
Ser: Serializer,
{
Serialize::serialize(&self.0, serializer)
}
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct Manifest {
pub id: PackageId,
pub title: String,
pub version: Version,
pub description: Description,
#[serde(default)]
pub assets: Assets,
#[serde(default)]
pub build: Option<Vec<String>>,
pub release_notes: String,
pub license: String, // type of license
pub wrapper_repo: Url,
pub upstream_repo: Url,
pub support_site: Option<Url>,
pub marketing_site: Option<Url>,
#[serde(default)]
pub alerts: Alerts,
#[model]
pub main: ActionImplementation,
pub health_checks: HealthChecks,
#[model]
pub config: Option<ConfigActions>,
#[model]
pub volumes: Volumes,
// #[serde(default = "current_version")]
pub min_os_version: Version,
// #[serde(default)]
pub interfaces: Interfaces,
// #[serde(default)]
#[model]
pub backup: BackupActions,
#[serde(default)]
#[model]
pub migrations: Migrations,
#[serde(default)]
pub actions: Actions,
// #[serde(default)]
// pub permissions: Permissions,
#[serde(default)]
#[model]
pub dependencies: Dependencies,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Interfaces(IndexMap<InterfaceId, Interface>); // TODO
impl Interfaces {
pub async fn install(&self, ip: Ipv4Addr) -> Result<InterfaceInfo, Error> {
todo!()
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Interface {
tor_config: Option<TorConfig>,
lan_config: Option<IndexMap<u16, LanPortConfig>>,
ui: bool,
protocols: Vec<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct TorConfig {
#[serde(default)]
hidden_service_version: HiddenServiceVersion,
port_mapping: IndexMap<u16, u16>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct LanPortConfig {
ssl: bool,
mapping: u16,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Assets {
#[serde(default)]
license: Option<PathBuf>,
#[serde(default)]
icon: Option<PathBuf>,
#[serde(default)]
docker_images: Option<PathBuf>,
#[serde(default)]
instructions: Option<PathBuf>,
}
impl Assets {
pub fn license_path(&self) -> &Path {
self.license
.as_ref()
.map(|a| a.as_path())
.unwrap_or(Path::new("LICENSE.md"))
}
pub fn icon_path(&self) -> &Path {
self.icon
.as_ref()
.map(|a| a.as_path())
.unwrap_or(Path::new("icon.png"))
}
pub fn icon_type(&self) -> &str {
self.icon
.as_ref()
.and_then(|icon| icon.extension())
.and_then(|ext| ext.to_str())
.unwrap_or("png")
}
pub fn docker_images_path(&self) -> &Path {
self.docker_images
.as_ref()
.map(|a| a.as_path())
.unwrap_or(Path::new("images.tar"))
}
pub fn instructions_path(&self) -> &Path {
self.instructions
.as_ref()
.map(|a| a.as_path())
.unwrap_or(Path::new("INSTRUCTIONS.md"))
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct Description {
pub short: String,
pub long: String,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Alerts {
pub install: Option<String>,
pub uninstall: Option<String>,
pub restore: Option<String>,
pub start: Option<String>,
pub stop: Option<String>,
}

6
appmgr/src/s9pk/mod.rs Normal file
View File

@@ -0,0 +1,6 @@
pub mod builder;
pub mod header;
pub mod manifest;
pub mod reader;
pub const SIG_CONTEXT: &'static [u8] = b"s9pk";

144
appmgr/src/s9pk/reader.rs Normal file
View File

@@ -0,0 +1,144 @@
use std::io::SeekFrom;
use std::path::Path;
use std::pin::Pin;
use std::task::{Context, Poll};
use digest::Output;
use sha2::{Digest, Sha512};
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf, Take};
use super::header::{FileSection, Header, TableOfContents};
use super::manifest::Manifest;
use super::SIG_CONTEXT;
use crate::install::progress::InstallProgressTracker;
use crate::{Error, ResultExt};
#[pin_project::pin_project]
pub struct ReadHandle<'a, R: AsyncRead + AsyncSeek + Unpin = File> {
pos: &'a mut u64,
#[pin]
rdr: Take<&'a mut R>,
}
impl<'a, R: AsyncRead + AsyncSeek + Unpin> ReadHandle<'a, R> {
pub async fn to_vec(mut self) -> std::io::Result<Vec<u8>> {
let mut buf = vec![0; self.rdr.limit() as usize];
self.rdr.read_exact(&mut buf).await?;
Ok(buf)
}
}
impl<'a, R: AsyncRead + AsyncSeek + Unpin> AsyncRead for ReadHandle<'a, R> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let start = buf.filled().len();
let this = self.project();
let pos = this.pos;
AsyncRead::poll_read(this.rdr, cx, buf).map(|res| {
**pos += (buf.filled().len() - start) as u64;
res
})
}
}
pub struct S9pkReader<R: AsyncRead + AsyncSeek + Unpin = File> {
hash: Output<Sha512>,
hash_string: String,
toc: TableOfContents,
pos: u64,
rdr: R,
}
impl S9pkReader {
pub async fn open<P: AsRef<Path>>(path: P) -> Result<Self, Error> {
let p = path.as_ref();
let rdr = File::open(p)
.await
.with_ctx(|_| (crate::error::ErrorKind::Filesystem, p.display().to_string()))?;
Self::from_reader(rdr).await
}
}
impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<InstallProgressTracker<R>> {
pub fn validated(&mut self) {
self.rdr.validated()
}
}
impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<R> {
pub async fn validate(&mut self) -> Result<(), Error> {
todo!()
}
pub async fn from_reader(mut rdr: R) -> Result<Self, Error> {
let header = Header::deserialize(&mut rdr).await?;
let pos = rdr.stream_position().await?;
let mut hasher = Sha512::new();
let mut buf = [0; 1024];
let mut read;
while {
read = rdr.read(&mut buf).await?;
read != 0
} {
hasher.update(&buf[0..read]);
}
let hash = hasher.clone().finalize();
header
.pubkey
.verify_prehashed(hasher, Some(SIG_CONTEXT), &header.signature)?;
Ok(S9pkReader {
hash_string: base32::encode(
base32::Alphabet::RFC4648 { padding: false },
hash.as_slice(),
),
hash,
toc: header.table_of_contents,
pos,
rdr,
})
}
pub fn hash(&self) -> &Output<Sha512> {
&self.hash
}
pub fn hash_str(&self) -> &str {
self.hash_string.as_str()
}
async fn read_handle<'a>(
&'a mut self,
section: FileSection,
) -> Result<ReadHandle<'a, R>, Error> {
if self.pos != section.position {
self.rdr.seek(SeekFrom::Start(section.position)).await?;
self.pos = section.position;
}
Ok(ReadHandle {
pos: &mut self.pos,
rdr: (&mut self.rdr).take(section.length),
})
}
pub async fn manifest(&mut self) -> Result<Manifest, Error> {
serde_cbor::from_slice(&self.read_handle(self.toc.manifest).await?.to_vec().await?)
.with_ctx(|_| (crate::ErrorKind::ParseS9pk, "Deserializing Manifest (CBOR)"))
}
pub async fn license<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> {
Ok(self.read_handle(self.toc.license).await?)
}
pub async fn instructions<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> {
Ok(self.read_handle(self.toc.instructions).await?)
}
pub async fn icon<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> {
Ok(self.read_handle(self.toc.icon).await?)
}
pub async fn docker_images<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> {
Ok(self.read_handle(self.toc.docker_images).await?)
}
}

View File

@@ -0,0 +1,135 @@
use std::path::Path;
use chrono::{DateTime, Utc};
use indexmap::IndexMap;
use serde::{Deserialize, Deserializer, Serialize};
use crate::action::ActionImplementation;
use crate::id::Id;
use crate::net::host::Hosts;
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::volume::Volumes;
use crate::Error;
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize)]
pub struct HealthCheckId<S: AsRef<str> = String>(Id<S>);
impl<S: AsRef<str>> std::fmt::Display for HealthCheckId<S> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0)
}
}
impl<S: AsRef<str>> AsRef<str> for HealthCheckId<S> {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl<'de, S> Deserialize<'de> for HealthCheckId<S>
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(HealthCheckId(Deserialize::deserialize(deserializer)?))
}
}
impl<S: AsRef<str>> AsRef<Path> for HealthCheckId<S> {
fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref()
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct HealthChecks(pub IndexMap<HealthCheckId, HealthCheck>);
impl HealthChecks {
pub async fn check_all(
&self,
started: &DateTime<Utc>,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
) -> Result<IndexMap<HealthCheckId, HealthCheckResult>, Error> {
let res = futures::future::try_join_all(self.0.iter().map(|(id, check)| async move {
Ok::<_, Error>((
id.clone(),
check
.check(started, pkg_id, pkg_version, volumes, hosts)
.await?,
))
}))
.await?;
Ok(res.into_iter().collect())
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct HealthCheck {
#[serde(flatten)]
implementation: ActionImplementation,
pub critical: bool,
}
impl HealthCheck {
pub async fn check(
&self,
started: &DateTime<Utc>,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
hosts: &Hosts,
) -> Result<HealthCheckResult, Error> {
let res = self
.implementation
.execute(pkg_id, pkg_version, volumes, hosts, Some(started), true)
.await?;
Ok(HealthCheckResult {
time: Utc::now(),
result: match res {
Ok(()) => HealthCheckResultVariant::Success,
Err((59, _)) => HealthCheckResultVariant::Disabled,
Err((60, _)) => HealthCheckResultVariant::WarmingUp,
Err((_, error)) => HealthCheckResultVariant::Failure { error },
},
})
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct HealthCheckResult {
pub time: DateTime<Utc>,
#[serde(flatten)]
pub result: HealthCheckResultVariant,
}
impl HealthCheckResult {
pub fn not_available() -> Self {
HealthCheckResult {
time: Utc::now(),
result: HealthCheckResultVariant::Failure {
error: "Health Check Status Not Available".to_owned(),
},
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
#[serde(tag = "result")]
pub enum HealthCheckResultVariant {
WarmingUp,
Disabled,
Success,
Failure { error: String },
}
impl std::fmt::Display for HealthCheckResultVariant {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
HealthCheckResultVariant::WarmingUp => write!(f, "Warming Up"),
HealthCheckResultVariant::Disabled => write!(f, "Disabled"),
HealthCheckResultVariant::Success => write!(f, "Succeeded"),
HealthCheckResultVariant::Failure { error } => write!(f, "Failed ({})", error),
}
}
}

464
appmgr/src/status/mod.rs Normal file
View File

@@ -0,0 +1,464 @@
use std::collections::HashMap;
use std::sync::Arc;
use anyhow::anyhow;
use bollard::container::{ListContainersOptions, StartContainerOptions, StopContainerOptions};
use bollard::models::{ContainerStateStatusEnum, ContainerSummaryInner};
use bollard::Docker;
use chrono::{DateTime, Utc};
use futures::{StreamExt, TryFutureExt};
use indexmap::IndexMap;
use patch_db::{DbHandle, HasModel, Map, MapModel, Model, ModelData, ModelDataMut};
use serde::{Deserialize, Serialize};
use self::health_check::{HealthCheckId, HealthCheckResult};
use crate::action::docker::DockerAction;
use crate::context::RpcContext;
use crate::db::model::{
CurrentDependencyInfo, InstalledPackageDataEntryModel, PackageDataEntryModel,
};
use crate::dependencies::{Dependencies, DependencyError};
use crate::id::InterfaceId;
use crate::net::host::Hosts;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::health_check::HealthCheckResultVariant;
use crate::util::Invoke;
use crate::Error;
pub mod health_check;
// Assume docker for now
pub async fn synchronize_all(ctx: &RpcContext) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut pkg_ids = crate::db::DatabaseModel::new()
.package_data()
.keys(&mut db)
.await?;
let mut container_names = Vec::with_capacity(pkg_ids.len());
for id in pkg_ids.clone().into_iter() {
if let Some(version) = &*crate::db::DatabaseModel::new()
.package_data()
.idx_model(&id)
.expect(&mut db)
.await?
.installed()
.map(|i| i.manifest().version())
.get(&mut db)
.await?
{
container_names.push(DockerAction::container_name(id.as_ref(), version));
} else {
pkg_ids.remove(&id);
}
}
let mut filters = HashMap::new();
filters.insert("name".to_owned(), container_names);
let info = ctx
.docker
.list_containers(Some(ListContainersOptions {
all: true,
size: false,
limit: None,
filters,
}))
.await?;
let mut fuckening = false;
for summary in info {
let id = if let Some(id) = summary.names.iter().flatten().find_map(|s| {
DockerAction::uncontainer_name(s.as_str()).and_then(|id| pkg_ids.take(id))
}) {
id
} else {
continue;
};
async fn status<Db: DbHandle>(
docker: &Docker,
id: &PackageId,
db: &mut Db,
summary: &ContainerSummaryInner,
) -> Result<bool, Error> {
let pkg_data = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.check(db)
.await?
.ok_or_else(|| {
Error::new(
anyhow!("VersionedPackageData does not exist"),
crate::ErrorKind::Database,
)
})?;
let (mut status, manifest) =
if let Some(installed) = pkg_data.installed().check(db).await? {
(
installed.clone().status().get_mut(db).await?,
installed.manifest().get(db).await?,
)
} else {
return Ok(false);
};
let res = status.main.synchronize(docker, &*manifest, summary).await?;
status.save(db).await?;
Ok(res)
}
match status(&ctx.docker, &id, &mut db, &summary).await {
Ok(a) => fuckening |= a,
Err(e) => log::error!("Error syncronizing status of {}: {}", id, e),
}
}
if fuckening {
tokio::process::Command::new("service")
.arg("docker")
.arg("restart")
.invoke(crate::ErrorKind::Docker)
.await?;
}
for id in pkg_ids {
log::warn!("No container for {}", id);
}
Ok(())
}
pub async fn check_all(ctx: &RpcContext) -> Result<(), Error> {
let mut db = ctx.db.handle();
let hosts = Arc::new(
crate::db::DatabaseModel::new()
.network()
.hosts()
.get(&mut db)
.await?
.to_owned(),
);
let pkg_ids = crate::db::DatabaseModel::new()
.package_data()
.keys(&mut db)
.await?;
let mut status_manifest = Vec::with_capacity(pkg_ids.len());
let mut status_deps = Vec::with_capacity(pkg_ids.len());
for id in &pkg_ids {
let model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.check(&mut db)
.await?
.ok_or_else(|| {
Error::new(
anyhow!("PackageDataEntry does not exist"),
crate::ErrorKind::Database,
)
})?;
if let Some(installed) = model.installed().check(&mut db).await? {
status_manifest.push((
installed.clone().status(),
Arc::new(installed.clone().manifest().get(&mut db).await?),
));
status_deps.push((
installed.clone().status(),
Arc::new(installed.current_dependencies().get(&mut db).await?),
));
}
}
drop(db);
async fn main_status<Db: DbHandle>(
status_model: StatusModel,
manifest: Arc<ModelData<Manifest>>,
hosts: Arc<Hosts>,
mut db: Db,
) -> Result<MainStatus, Error> {
let mut status = status_model.get_mut(&mut db).await?;
status.main.check(&*manifest, &*hosts).await?;
let res = status.main.clone();
status.save(&mut db).await?;
Ok(res)
}
let (status_sender, mut statuses_recv) = tokio::sync::mpsc::channel(status_manifest.len() + 1);
let mut statuses = HashMap::with_capacity(status_manifest.len());
futures::stream::iter(
status_manifest
.into_iter()
.zip(pkg_ids.clone())
.zip(std::iter::repeat(hosts)),
)
.for_each_concurrent(None, move |(((status, manifest), id), hosts)| {
let status_sender = status_sender.clone();
async move {
match tokio::spawn(main_status(status, manifest, hosts, ctx.db.handle()))
.await
.unwrap()
{
Err(e) => {
log::error!("Error running main health check for {}: {}", id, e);
log::debug!("{:?}", e);
}
Ok(status) => {
status_sender.send((id, status)).await.expect("unreachable");
}
}
}
})
.await;
while let Some((id, status)) = statuses_recv.recv().await {
statuses.insert(id, status);
}
let statuses = Arc::new(statuses);
async fn dependency_status<Db: DbHandle>(
statuses: Arc<HashMap<PackageId, MainStatus>>,
status_model: StatusModel,
current_deps: Arc<ModelData<IndexMap<PackageId, CurrentDependencyInfo>>>,
mut db: Db,
) -> Result<(), Error> {
let mut status = status_model.get_mut(&mut db).await?;
status
.dependency_errors
.update_health_based(&current_deps, &*statuses)
.await?;
status.save(&mut db).await?;
Ok(())
}
futures::stream::iter(status_deps.into_iter().zip(pkg_ids.clone()))
.for_each_concurrent(None, |((status, deps), id)| {
let statuses = statuses.clone();
async move {
if let Err(e) =
tokio::spawn(dependency_status(statuses, status, deps, ctx.db.handle()))
.await
.unwrap()
{
log::error!("Error running dependency health check for {}: {}", id, e);
log::debug!("{:?}", e);
}
}
})
.await;
Ok(())
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
pub struct Status {
pub configured: bool,
pub main: MainStatus,
pub dependency_errors: DependencyErrors,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(tag = "status")]
#[serde(rename_all = "kebab-case")]
pub enum MainStatus {
Stopped,
Stopping,
Running {
started: DateTime<Utc>,
health: IndexMap<HealthCheckId, HealthCheckResult>,
},
BackingUp {
started: Option<DateTime<Utc>>,
health: IndexMap<HealthCheckId, HealthCheckResult>,
},
Restoring {
running: bool,
},
}
impl MainStatus {
pub async fn synchronize(
&mut self,
docker: &Docker,
manifest: &Manifest,
summary: &ContainerSummaryInner,
) -> Result<bool, Error> {
// true if Docker Fuckening
async fn check_fuckening(docker: &Docker, manifest: &Manifest) -> Result<bool, Error> {
Ok(docker
.inspect_container(
&DockerAction::container_name(&manifest.id, &manifest.version),
None,
)
.await?
.state
.as_ref()
.and_then(|s| s.status)
== Some(ContainerStateStatusEnum::RUNNING))
}
let name = DockerAction::container_name(&manifest.id, &manifest.version);
let state = summary.state.as_ref().map(|s| s.as_str());
match state {
Some("created") | Some("exited") => match self {
MainStatus::Stopped => (),
MainStatus::Stopping => {
*self = MainStatus::Stopped;
}
MainStatus::Running { started, .. } => {
*started = Utc::now();
docker
.start_container(&name, None::<StartContainerOptions<String>>)
.await?;
}
MainStatus::BackingUp { .. } => (),
MainStatus::Restoring { .. } => (),
},
Some("running") | Some("restarting") => match self {
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restoring { .. } => {
docker
.stop_container(&name, Some(StopContainerOptions { t: 30 }))
.await?;
return check_fuckening(docker, manifest).await;
}
MainStatus::Running { .. } => (),
MainStatus::BackingUp { .. } => {
docker.pause_container(&name).await?;
}
},
Some("paused") => match self {
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restoring { .. } => {
docker.unpause_container(&name).await?;
docker
.stop_container(&name, Some(StopContainerOptions { t: 30 }))
.await?;
return check_fuckening(docker, manifest).await;
}
MainStatus::Running { .. } => {
docker.unpause_container(&name).await?;
}
MainStatus::BackingUp { .. } => (),
},
unknown => {
return Err(Error::new(
anyhow!("Unexpected Docker Status: {:?}", unknown),
crate::ErrorKind::Docker,
));
}
}
Ok(false)
}
pub async fn check(&mut self, manifest: &Manifest, hosts: &Hosts) -> Result<(), Error> {
match self {
MainStatus::Running { started, health } => {
*health = manifest
.health_checks
.check_all(
started,
&manifest.id,
&manifest.version,
&manifest.volumes,
hosts,
)
.await?;
for (check, res) in health {
if matches!(
res.result,
health_check::HealthCheckResultVariant::Failure { .. }
) && manifest
.health_checks
.0
.get(check)
.map(|hc| hc.critical)
.unwrap_or_default()
{
todo!("emit notification");
*self = MainStatus::Stopping;
}
}
}
_ => (),
}
Ok(())
}
pub fn running(&self) -> bool {
match self {
MainStatus::Running { .. }
| MainStatus::BackingUp {
started: Some(_), ..
}
| MainStatus::Restoring { running: true } => true,
_ => false,
}
}
pub fn stop(&mut self) {
match self {
MainStatus::Running { .. } => {
*self = MainStatus::Stopping;
}
MainStatus::BackingUp { started, .. } => {
*started = None;
}
MainStatus::Restoring { running } => {
*running = false;
}
_ => (),
}
}
}
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
pub struct DependencyErrors(pub IndexMap<PackageId, DependencyError>);
impl Map for DependencyErrors {
type Key = PackageId;
type Value = DependencyError;
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
self.0.get(key)
}
}
impl HasModel for DependencyErrors {
type Model = MapModel<Self>;
}
impl DependencyErrors {
async fn update_health_based(
&mut self,
dependencies: &IndexMap<PackageId, CurrentDependencyInfo>,
statuses: &HashMap<PackageId, MainStatus>,
) -> Result<(), Error> {
for (dep_id, dep_info) in dependencies {
if matches!(
self.get(&dep_id),
Some(&DependencyError::NotRunning)
| Some(&DependencyError::HealthChecksFailed { .. })
| None
) {
match statuses.get(dep_id) {
Some(MainStatus::Running { ref health, .. })
| Some(MainStatus::BackingUp {
started: Some(_),
ref health,
}) => {
let mut failures = IndexMap::new();
for check in &dep_info.health_checks {
let res =
health
.get(check)
.cloned()
.unwrap_or_else(|| HealthCheckResult {
result: HealthCheckResultVariant::Disabled,
time: Utc::now(),
});
if !matches!(res.result, HealthCheckResultVariant::Success) {
failures.insert(check.clone(), res);
}
}
self.0.insert(
dep_id.clone(),
DependencyError::HealthChecksFailed { failures },
);
}
_ => {
self.0.insert(dep_id.clone(), DependencyError::NotRunning);
}
}
}
}
Ok(())
}
}

View File

@@ -1,84 +0,0 @@
use linear_map::LinearMap;
use crate::dependencies::{DependencyError, TaggedDependencyError};
use crate::Error;
use crate::ResultExt as _;
pub async fn update(
name_version: &str,
dry_run: bool,
) -> Result<LinearMap<String, TaggedDependencyError>, Error> {
let mut name_version_iter = name_version.split("@");
let name = name_version_iter.next().unwrap();
let version_req = name_version_iter
.next()
.map(|v| v.parse())
.transpose()
.no_code()?
.unwrap_or_else(emver::VersionRange::any);
let version = crate::registry::version(name, &version_req).await?;
let mut res = LinearMap::new();
for dependent in crate::apps::dependents(name, false).await? {
if crate::apps::status(&dependent, false).await?.status
!= crate::apps::DockerStatus::Stopped
{
let manifest = crate::apps::manifest(&dependent).await?;
match manifest.dependencies.0.get(name) {
Some(dep) if !version.satisfies(&dep.version) => {
crate::control::stop_dependents(
&dependent,
dry_run,
DependencyError::NotRunning,
&mut res,
)
.await?;
if crate::apps::status(name, false).await?.status
!= crate::apps::DockerStatus::Stopped
{
crate::control::stop_app(&dependent, false, dry_run).await?;
res.insert(
dependent,
TaggedDependencyError {
dependency: name.to_owned(),
error: DependencyError::IncorrectVersion {
expected: version_req.clone(),
received: version.clone(),
},
},
);
}
}
_ => {
crate::control::stop_dependents(
&dependent,
dry_run,
DependencyError::NotRunning,
&mut res,
)
.await?;
if crate::apps::status(name, false).await?.status
!= crate::apps::DockerStatus::Stopped
{
crate::control::stop_app(&dependent, false, dry_run).await?;
res.insert(
dependent,
TaggedDependencyError {
dependency: name.to_owned(),
error: DependencyError::NotRunning,
},
);
}
}
}
}
}
if dry_run {
return Ok(res);
}
let download_path = crate::install::download_name(name_version).await?;
crate::remove::remove(name, false, false).await?;
crate::install::install_path(download_path, Some(name)).await?;
crate::apps::set_recoverable(name, false).await?;
Ok(res)
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,64 +1,63 @@
use std::cmp::Ordering;
use async_trait::async_trait;
use failure::ResultExt as _;
use futures::stream::TryStreamExt;
use lazy_static::lazy_static;
use patch_db::DbHandle;
use rpc_toolkit::command;
use tokio_compat_02::FutureExt;
use crate::util::{to_yaml_async_writer, AsyncCompat, PersistencePath};
use crate::Error;
use crate::ResultExt as _;
// mod v0_1_0;
// mod v0_1_1;
// mod v0_1_2;
// mod v0_1_3;
// mod v0_1_4;
// mod v0_1_5;
// mod v0_2_0;
// mod v0_2_1;
// mod v0_2_2;
// mod v0_2_3;
// mod v0_2_4;
// mod v0_2_5;
// mod v0_2_6;
// mod v0_2_7;
// mod v0_2_8;
// mod v0_2_9;
mod v0_1_0;
mod v0_1_1;
mod v0_1_2;
mod v0_1_3;
mod v0_1_4;
mod v0_1_5;
mod v0_2_0;
mod v0_2_1;
mod v0_2_2;
mod v0_2_3;
mod v0_2_4;
mod v0_2_5;
mod v0_2_6;
mod v0_2_7;
mod v0_2_8;
mod v0_2_9;
// mod v0_2_10;
// mod v0_2_11;
// mod v0_2_12;
mod v0_2_10;
mod v0_2_11;
mod v0_2_12;
mod v0_2_13;
mod v0_2_14;
// pub use v0_2_12::Version as Current;
pub type Current = ();
pub use v0_2_14::Version as Current;
use crate::context::{CliContext, EitherContext, RpcContext};
use crate::util::{to_yaml_async_writer, AsyncCompat};
use crate::{Error, ResultExt as _};
#[derive(serde::Serialize, serde::Deserialize)]
#[serde(untagged)]
enum Version {
V0_0_0(Wrapper<()>),
V0_1_0(Wrapper<v0_1_0::Version>),
V0_1_1(Wrapper<v0_1_1::Version>),
V0_1_2(Wrapper<v0_1_2::Version>),
V0_1_3(Wrapper<v0_1_3::Version>),
V0_1_4(Wrapper<v0_1_4::Version>),
V0_1_5(Wrapper<v0_1_5::Version>),
V0_2_0(Wrapper<v0_2_0::Version>),
V0_2_1(Wrapper<v0_2_1::Version>),
V0_2_2(Wrapper<v0_2_2::Version>),
V0_2_3(Wrapper<v0_2_3::Version>),
V0_2_4(Wrapper<v0_2_4::Version>),
V0_2_5(Wrapper<v0_2_5::Version>),
V0_2_6(Wrapper<v0_2_6::Version>),
V0_2_7(Wrapper<v0_2_7::Version>),
V0_2_8(Wrapper<v0_2_8::Version>),
V0_2_9(Wrapper<v0_2_9::Version>),
V0_2_10(Wrapper<v0_2_10::Version>),
V0_2_11(Wrapper<v0_2_11::Version>),
V0_2_12(Wrapper<v0_2_12::Version>),
V0_2_13(Wrapper<v0_2_13::Version>),
V0_2_14(Wrapper<v0_2_14::Version>),
// V0_1_0(Wrapper<v0_1_0::Version>),
// V0_1_1(Wrapper<v0_1_1::Version>),
// V0_1_2(Wrapper<v0_1_2::Version>),
// V0_1_3(Wrapper<v0_1_3::Version>),
// V0_1_4(Wrapper<v0_1_4::Version>),
// V0_1_5(Wrapper<v0_1_5::Version>),
// V0_2_0(Wrapper<v0_2_0::Version>),
// V0_2_1(Wrapper<v0_2_1::Version>),
// V0_2_2(Wrapper<v0_2_2::Version>),
// V0_2_3(Wrapper<v0_2_3::Version>),
// V0_2_4(Wrapper<v0_2_4::Version>),
// V0_2_5(Wrapper<v0_2_5::Version>),
// V0_2_6(Wrapper<v0_2_6::Version>),
// V0_2_7(Wrapper<v0_2_7::Version>),
// V0_2_8(Wrapper<v0_2_8::Version>),
// V0_2_9(Wrapper<v0_2_9::Version>),
// V0_2_10(Wrapper<v0_2_10::Version>),
// V0_2_11(Wrapper<v0_2_11::Version>),
// V0_2_12(Wrapper<v0_2_12::Version>),
Other(emver::Version),
}
@@ -69,39 +68,62 @@ where
{
type Previous: VersionT;
fn new() -> Self;
fn semver(&self) -> &'static emver::Version;
async fn up(&self) -> Result<(), Error>;
async fn down(&self) -> Result<(), Error>;
async fn commit(&self) -> Result<(), Error> {
let mut out = PersistencePath::from_ref("version").write(None).await?;
to_yaml_async_writer(out.as_mut(), &self.semver()).await?;
out.commit().await?;
fn semver(&self) -> &'static crate::util::Version;
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>;
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>;
async fn commit<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
crate::db::DatabaseModel::new()
.server_info()
.version()
.put(db, self.semver())
.await?;
Ok(())
}
async fn migrate_to<V: VersionT>(&self, version: &V) -> Result<(), Error> {
async fn migrate_to<V: VersionT, Db: DbHandle>(
&self,
version: &V,
db: &mut Db,
) -> Result<(), Error> {
match self.semver().cmp(version.semver()) {
Ordering::Greater => self.rollback_to_unchecked(version).await,
Ordering::Less => version.migrate_from_unchecked(self).await,
Ordering::Greater => self.rollback_to_unchecked(version, db).await,
Ordering::Less => version.migrate_from_unchecked(self, db).await,
Ordering::Equal => Ok(()),
}
}
async fn migrate_from_unchecked<V: VersionT>(&self, version: &V) -> Result<(), Error> {
async fn migrate_from_unchecked<V: VersionT, Db: DbHandle>(
&self,
version: &V,
db: &mut Db,
) -> Result<(), Error> {
let previous = Self::Previous::new();
if version.semver() != previous.semver() {
previous.migrate_from_unchecked(version).await?;
previous.migrate_from_unchecked(version, db).await?;
}
log::info!("{} -> {}", previous.semver(), self.semver());
self.up().await?;
self.commit().await?;
log::info!(
"{} -> {}",
previous.semver().as_str(),
self.semver().as_str()
);
self.up(db).await?;
self.commit(db).await?;
Ok(())
}
async fn rollback_to_unchecked<V: VersionT>(&self, version: &V) -> Result<(), Error> {
async fn rollback_to_unchecked<V: VersionT, Db: DbHandle>(
&self,
version: &V,
db: &mut Db,
) -> Result<(), Error> {
let previous = Self::Previous::new();
log::info!("{} -> {}", self.semver(), previous.semver());
self.down().await?;
previous.commit().await?;
log::info!(
"{} -> {}",
self.semver().as_str(),
previous.semver().as_str()
);
self.down(db).await?;
previous.commit(db).await?;
if version.semver() != previous.semver() {
previous.rollback_to_unchecked(version).await?;
previous.rollback_to_unchecked(version, db).await?;
}
Ok(())
}
@@ -120,7 +142,7 @@ where
T: VersionT,
{
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let v = emver::Version::deserialize(deserializer)?;
let v = crate::util::Version::deserialize(deserializer)?;
let version = T::new();
if &v == version.semver() {
Ok(Wrapper(version))
@@ -129,162 +151,38 @@ where
}
}
}
const V0_0_0: emver::Version = emver::Version::new(0, 0, 0, 0);
lazy_static! {
static ref V0_0_0: crate::util::Version = emver::Version::new(0, 0, 0, 0).into();
}
#[async_trait]
impl VersionT for () {
type Previous = ();
fn new() -> Self {
()
}
fn semver(&self) -> &'static emver::Version {
&V0_0_0
fn semver(&self) -> &'static crate::util::Version {
&*V0_0_0
}
async fn up(&self) -> Result<(), Error> {
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
Ok(())
}
}
pub async fn init() -> Result<(), failure::Error> {
let _lock = PersistencePath::from_ref("").lock(true).await?;
let vpath = PersistencePath::from_ref("version");
if let Some(mut f) = vpath.maybe_read(false).await.transpose()? {
let v: Version = crate::util::from_yaml_async_reader(&mut *f).await?;
match v {
Version::V0_0_0(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_1_0(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_1_1(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_1_2(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_1_3(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_1_4(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_1_5(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_0(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_1(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_2(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_3(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_4(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_5(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_6(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_7(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_8(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_9(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_10(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_11(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_12(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_13(v) => v.0.migrate_to(&Current::new()).await?,
Version::V0_2_14(v) => v.0.migrate_to(&Current::new()).await?,
Version::Other(_) => (),
// TODO find some way to automate this?
}
} else {
().migrate_to(&Current::new()).await?;
}
Ok(())
pub async fn init() -> Result<(), Error> {
todo!()
}
pub async fn self_update(requirement: emver::VersionRange) -> Result<(), Error> {
let req_str: String = format!("{}", requirement)
.chars()
.filter(|c| !c.is_whitespace())
.collect();
let url = format!("{}/appmgr?spec={}", &*crate::SYS_REGISTRY_URL, req_str);
log::info!("Fetching new version from {}", url);
let response = reqwest::get(&url)
.compat()
.await
.with_code(crate::error::NETWORK_ERROR)?
.error_for_status()
.with_code(crate::error::REGISTRY_ERROR)?;
let tmp_appmgr_path = PersistencePath::from_ref("appmgr").tmp();
if let Some(parent) = tmp_appmgr_path.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent)
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
}
}
let mut f = tokio::fs::OpenOptions::new()
.create(true)
.write(true)
.open(&tmp_appmgr_path)
.await
.with_context(|e| format!("{}: {}", tmp_appmgr_path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
tokio::io::copy(
&mut AsyncCompat(
response
.bytes_stream()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.into_async_read(),
),
&mut f,
)
.await
.no_code()?;
drop(f);
crate::ensure_code!(
tokio::process::Command::new("chmod")
.arg("700")
.arg(&tmp_appmgr_path)
.output()
.await?
.status
.success(),
crate::error::FILESYSTEM_ERROR,
"chmod failed"
);
let out = std::process::Command::new(&tmp_appmgr_path)
.arg("semver")
.stdout(std::process::Stdio::piped())
.spawn()?
.wait_with_output()
.with_context(|e| format!("{} semver: {}", tmp_appmgr_path.display(), e))
.no_code()?;
let out_str = std::str::from_utf8(&out.stdout).no_code()?;
log::info!("Migrating to version {}", out_str);
let v: Version = serde_yaml::from_str(out_str)
.with_context(|e| format!("{}: {:?}", e, out_str))
.with_code(crate::error::SERDE_ERROR)?;
match v {
Version::V0_0_0(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_1_0(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_1_1(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_1_2(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_1_3(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_1_4(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_1_5(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_0(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_1(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_2(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_3(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_4(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_5(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_6(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_7(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_8(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_9(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_10(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_11(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_12(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_13(v) => Current::new().migrate_to(&v.0).await?,
Version::V0_2_14(v) => Current::new().migrate_to(&v.0).await?,
Version::Other(_) => (),
// TODO find some way to automate this?
};
let cur_path = std::path::Path::new("/usr/local/bin/appmgr");
tokio::fs::rename(&tmp_appmgr_path, &cur_path)
.await
.with_context(|e| {
format!(
"{} -> {}: {}",
tmp_appmgr_path.display(),
cur_path.display(),
e
)
})
.with_code(crate::error::FILESYSTEM_ERROR)?;
Ok(())
todo!()
}
#[command(rename = "git-info", local)]
pub fn git_info(#[context] _ctx: EitherContext) -> Result<String, Error> {
Ok(
git_version::git_version!(args = ["--always", "--abbrev=40", "--dirty=-modified"])
.to_owned(),
)
}

View File

@@ -1,279 +0,0 @@
use std::path::Path;
use super::*;
const V0_1_0: emver::Version = emver::Version::new(0, 1, 0, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = ();
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_1_0
}
async fn up(&self) -> Result<(), Error> {
tokio::fs::create_dir_all(Path::new(crate::PERSISTENCE_DIR).join("tor")).await?;
tokio::fs::create_dir_all(Path::new(crate::PERSISTENCE_DIR).join("apps")).await?;
tokio::fs::create_dir_all(Path::new(crate::TMP_DIR).join("tor")).await?;
tokio::fs::create_dir_all(Path::new(crate::TMP_DIR).join("apps")).await?;
let mut outfile = legacy::util::PersistencePath::from_ref("tor/torrc")
.write()
.await?;
tokio::io::copy(
&mut AsyncCompat(
reqwest::get(&format!("{}/torrc?spec==0.0.0", &*crate::SYS_REGISTRY_URL))
.compat()
.await
.with_context(|e| format!("GET {}/torrc: {}", &*crate::SYS_REGISTRY_URL, e))
.with_code(crate::error::NETWORK_ERROR)?
.error_for_status()
.with_context(|e| format!("GET {}/torrc: {}", &*crate::SYS_REGISTRY_URL, e))
.with_code(crate::error::REGISTRY_ERROR)?
.bytes_stream()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.into_async_read(),
),
outfile.as_mut(),
)
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
outfile.commit().await?;
legacy::tor::set_svc(
"start9-agent",
legacy::tor::Service {
ports: vec![5959],
hidden_service_version: Default::default(),
},
)
.await
.no_code()?;
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}
mod legacy {
pub mod tor {
use failure::{Error, ResultExt};
use linear_map::LinearMap;
use tokio::io::AsyncWriteExt;
use crate::tor::HiddenServiceVersion;
use super::util::PersistencePath;
pub const ETC_TOR_RC: &'static str = "/etc/tor/torrc";
pub const HIDDEN_SERVICE_DIR_ROOT: &'static str = "/var/lib/tor";
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
pub struct Service {
pub ports: Vec<u16>,
pub hidden_service_version: HiddenServiceVersion,
}
async fn services_map(path: &PersistencePath) -> Result<LinearMap<String, Service>, Error> {
use crate::util::Apply;
Ok(path
.maybe_read()
.await
.transpose()?
.map(crate::util::from_yaml_async_reader)
.apply(futures::future::OptionFuture::from)
.await
.transpose()?
.unwrap_or_else(LinearMap::new))
}
pub async fn write_services(
hidden_services: &LinearMap<String, Service>,
) -> Result<(), Error> {
tokio::fs::copy(crate::TOR_RC, ETC_TOR_RC)
.await
.with_context(|e| format!("{} -> {}: {}", crate::TOR_RC, ETC_TOR_RC, e))?;
let mut f = tokio::fs::OpenOptions::new()
.append(true)
.open(ETC_TOR_RC)
.await?;
f.write("\n".as_bytes()).await?;
for (name, service) in hidden_services {
f.write("\n".as_bytes()).await?;
f.write(format!("# HIDDEN SERVICE FOR {}\n", name).as_bytes())
.await?;
f.write(
format!(
"HiddenServiceDir {}/app-{}/\n",
HIDDEN_SERVICE_DIR_ROOT, name
)
.as_bytes(),
)
.await?;
f.write(format!("{}\n", service.hidden_service_version).as_bytes())
.await?;
for port in &service.ports {
f.write(format!("HiddenServicePort {} 127.0.0.1:{}\n", port, port).as_bytes())
.await?;
}
f.write("\n".as_bytes()).await?;
}
Ok(())
}
pub async fn set_svc(name: &str, service: Service) -> Result<(), Error> {
log::info!(
"Adding Tor hidden service {} to {}.",
name,
crate::SERVICES_YAML
);
let path = PersistencePath::from_ref(crate::SERVICES_YAML);
let mut hidden_services = services_map(&path).await?;
hidden_services.insert(name.to_owned(), service);
let mut services_yaml = path.write().await?;
crate::util::to_yaml_async_writer(services_yaml.as_mut(), &hidden_services).await?;
services_yaml.write_all("\n".as_bytes()).await?;
services_yaml.commit().await?;
log::info!("Adding Tor hidden service {} to {}.", name, ETC_TOR_RC);
write_services(&hidden_services).await?;
log::info!("Restarting Tor.");
let svc_exit = std::process::Command::new("service")
.args(&["tor", "restart"])
.status()?;
ensure!(
svc_exit.success(),
"Failed to Restart Tor: {}",
svc_exit.code().unwrap_or(0)
);
Ok(())
}
}
pub mod util {
use std::path::{Path, PathBuf};
use tokio::fs::File;
use crate::Error;
use crate::ResultExt as _;
use failure::ResultExt as _;
#[derive(Clone, Debug)]
pub struct PersistencePath(PathBuf);
impl PersistencePath {
pub fn from_ref<P: AsRef<Path>>(p: P) -> Self {
let path = p.as_ref();
PersistencePath(if path.has_root() {
path.strip_prefix("/").unwrap().to_owned()
} else {
path.to_owned()
})
}
pub fn tmp(&self) -> PathBuf {
Path::new(crate::TMP_DIR).join(&self.0)
}
pub fn path(&self) -> PathBuf {
Path::new(crate::PERSISTENCE_DIR).join(&self.0)
}
pub async fn maybe_read(&self) -> Option<Result<File, Error>> {
let path = self.path();
if path.exists() {
Some(
File::open(&path)
.await
.with_context(|e| format!("{}: {}", path.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR),
)
} else {
None
}
}
pub async fn write(&self) -> Result<PersistenceFile, Error> {
let path = self.path();
if let Some(parent) = path.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent).await?;
}
}
Ok(if path.exists() {
let path = self.tmp();
if let Some(parent) = path.parent() {
if !parent.exists() {
tokio::fs::create_dir_all(parent).await?;
}
}
PersistenceFile::new(File::create(path).await?, Some(self.clone()))
} else {
PersistenceFile::new(File::create(path).await?, None)
})
}
}
#[derive(Debug)]
pub struct PersistenceFile {
file: File,
needs_commit: Option<PersistencePath>,
}
impl PersistenceFile {
pub fn new(file: File, needs_commit: Option<PersistencePath>) -> Self {
PersistenceFile { file, needs_commit }
}
/// Commits the file to the persistence directory.
/// If this fails, the file was not saved.
pub async fn commit(mut self) -> Result<(), Error> {
if let Some(path) = self.needs_commit.take() {
tokio::fs::rename(path.tmp(), path.path())
.await
.with_context(|e| {
format!(
"{} -> {}: {}",
path.tmp().display(),
path.path().display(),
e
)
})
.with_code(crate::error::FILESYSTEM_ERROR)
} else {
Ok(())
}
}
}
impl std::ops::Deref for PersistenceFile {
type Target = File;
fn deref(&self) -> &Self::Target {
&self.file
}
}
impl std::ops::DerefMut for PersistenceFile {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.file
}
}
impl AsRef<File> for PersistenceFile {
fn as_ref(&self) -> &File {
&*self
}
}
impl AsMut<File> for PersistenceFile {
fn as_mut(&mut self) -> &mut File {
&mut *self
}
}
impl Drop for PersistenceFile {
fn drop(&mut self) {
if let Some(path) = &self.needs_commit {
log::warn!(
"{} was dropped without being committed.",
path.path().display()
);
}
}
}
}
}

View File

@@ -1,204 +0,0 @@
use std::path::Path;
use super::*;
const V0_1_1: emver::Version = emver::Version::new(0, 1, 1, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_1_0::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_1_1
}
async fn up(&self) -> Result<(), Error> {
log::info!("Update torrc");
let mut outfile = crate::util::PersistencePath::from_ref("tor/torrc")
.write(None)
.await?;
tokio::io::copy(
&mut AsyncCompat(
reqwest::get(&format!("{}/torrc?spec==0.1.1", &*crate::SYS_REGISTRY_URL))
.compat()
.await
.with_context(|e| format!("GET {}/torrc: {}", &*crate::SYS_REGISTRY_URL, e))
.with_code(crate::error::NETWORK_ERROR)?
.error_for_status()
.with_context(|e| format!("GET {}/torrc: {}", &*crate::SYS_REGISTRY_URL, e))
.with_code(crate::error::REGISTRY_ERROR)?
.bytes_stream()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.into_async_read(),
),
outfile.as_mut(),
)
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
outfile.commit().await?;
if !std::process::Command::new("docker")
.arg("network")
.arg("create")
.arg("-d")
.arg("bridge")
.arg("--subnet=172.18.0.0/16")
.arg("start9")
.stdout(std::process::Stdio::null())
.status()?
.success()
{
log::warn!("Failed to Create Network")
}
match tokio::fs::remove_file(Path::new(crate::PERSISTENCE_DIR).join(crate::SERVICES_YAML))
.await
{
Ok(_) => Ok(()),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(()),
Err(e) => Err(e),
}
.with_context(|e| format!("{}/{}: {}", crate::PERSISTENCE_DIR, crate::SERVICES_YAML, e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
crate::tor::reload().await?;
for app in crate::apps::list_info().await? {
legacy::update::update(&app.0).await?;
}
Ok(())
}
async fn down(&self) -> Result<(), Error> {
let mut outfile = crate::util::PersistencePath::from_ref("tor/torrc")
.write(None)
.await?;
tokio::io::copy(
&mut AsyncCompat(
reqwest::get(&format!("{}/torrc?spec==0.1.0", &*crate::SYS_REGISTRY_URL))
.compat()
.await
.with_context(|e| format!("GET {}/torrc: {}", &*crate::SYS_REGISTRY_URL, e))
.no_code()?
.error_for_status()
.with_context(|e| format!("GET {}/torrc: {}", &*crate::SYS_REGISTRY_URL, e))
.no_code()?
.bytes_stream()
.map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))
.into_async_read(),
),
outfile.as_mut(),
)
.await
.with_code(crate::error::FILESYSTEM_ERROR)?;
outfile.commit().await?;
for app in crate::apps::list_info().await? {
legacy::remove::remove(&app.0, false).await?;
}
let tor_svcs = crate::util::PersistencePath::from_ref(crate::SERVICES_YAML).path();
if tor_svcs.exists() {
tokio::fs::remove_file(&tor_svcs)
.await
.with_context(|e| format!("{}: {}", tor_svcs.display(), e))
.with_code(crate::error::FILESYSTEM_ERROR)?;
}
if !std::process::Command::new("docker")
.arg("network")
.arg("rm")
.arg("start9")
.stdout(std::process::Stdio::null())
.status()?
.success()
{
log::warn!("Failed to Remove Network");
}
Ok(())
}
}
mod legacy {
pub mod remove {
use std::path::Path;
use crate::Error;
pub async fn remove(name: &str, purge: bool) -> Result<(), Error> {
log::info!("Removing app from manifest.");
crate::apps::remove(name).await?;
log::info!("Stopping docker container.");
if !tokio::process::Command::new("docker")
.args(&["stop", name])
.stdout(std::process::Stdio::null())
.stderr(match log::max_level() {
log::LevelFilter::Error => std::process::Stdio::null(),
_ => std::process::Stdio::inherit(),
})
.status()
.await?
.success()
{
log::error!("Failed to Stop Docker Container");
};
log::info!("Removing docker container.");
if !tokio::process::Command::new("docker")
.args(&["rm", name])
.stdout(std::process::Stdio::null())
.stderr(match log::max_level() {
log::LevelFilter::Error => std::process::Stdio::null(),
_ => std::process::Stdio::inherit(),
})
.status()
.await?
.success()
{
log::error!("Failed to Remove Docker Container");
};
if purge {
log::info!("Removing tor hidden service.");
crate::tor::rm_svc(name).await?;
log::info!("Removing app metadata.");
std::fs::remove_dir_all(Path::new(crate::PERSISTENCE_DIR).join("apps").join(name))?;
log::info!("Destroying mounted volume.");
std::fs::remove_dir_all(Path::new(crate::VOLUMES).join(name))?;
log::info!("Pruning unused docker images.");
crate::ensure_code!(
std::process::Command::new("docker")
.args(&["image", "prune", "-a", "-f"])
.stdout(std::process::Stdio::null())
.stderr(match log::max_level() {
log::LevelFilter::Error => std::process::Stdio::null(),
_ => std::process::Stdio::inherit(),
})
.status()?
.success(),
3,
"Failed to Prune Docker Images"
);
};
Ok(())
}
}
pub mod update {
use crate::Error;
pub async fn update(name_version: &str) -> Result<(), Error> {
let name = name_version
.split("@")
.next()
.ok_or_else(|| failure::format_err!("invalid app id"))?;
crate::install::download_name(name_version).await?;
super::remove::remove(name, false).await?;
crate::install::install_name(name_version, true).await?;
let config = crate::apps::config(name).await?;
if let Some(cfg) = config.config {
if config.spec.matches(&cfg).is_ok() {
crate::apps::set_configured(name, true).await?;
}
}
Ok(())
}
}
}

View File

@@ -1,104 +0,0 @@
use futures::StreamExt;
use futures::TryStreamExt;
use linear_map::LinearMap;
use super::*;
const V0_1_2: emver::Version = emver::Version::new(0, 1, 2, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_1_1::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_1_2
}
async fn up(&self) -> Result<(), Error> {
let app_info = legacy::apps::list_info().await?;
for (name, _) in &app_info {
let p = PersistencePath::from_ref("apps")
.join(name)
.join("manifest.yaml");
let mut f = p.for_update().await?;
let manifest: crate::manifest::ManifestV0 = crate::util::from_yaml_async_reader(&mut f)
.await
.no_code()?;
let mut f = f.into_writer().await?;
crate::util::to_yaml_async_writer(&mut f, &crate::manifest::Manifest::V0(manifest))
.await
.no_code()?;
f.commit().await?;
}
let p = PersistencePath::from_ref("apps.yaml");
let exists = p.path().exists();
let mut f = p.for_update().await?;
let info: LinearMap<String, legacy::apps::AppInfo> = if exists {
crate::util::from_yaml_async_reader(&mut f)
.await
.no_code()?
} else {
LinearMap::new()
};
let new_info: LinearMap<String, crate::apps::AppInfo> = futures::stream::iter(info)
.then(|(name, i)| async move {
let title = crate::apps::manifest(&name).await?.title;
Ok::<_, Error>((
name,
crate::apps::AppInfo {
title,
version: i.version,
tor_address: i.tor_address,
configured: i.configured,
recoverable: false,
needs_restart: false,
},
))
})
.try_collect()
.await?;
let mut f = f.into_writer().await?;
crate::util::to_yaml_async_writer(&mut f, &new_info)
.await
.no_code()?;
f.commit().await?;
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}
mod legacy {
pub mod apps {
use linear_map::LinearMap;
use crate::util::from_yaml_async_reader;
use crate::util::Apply;
use crate::util::PersistencePath;
use crate::Error;
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct AppInfo {
pub version: emver::Version,
pub tor_address: Option<String>,
pub configured: bool,
}
pub async fn list_info() -> Result<LinearMap<String, AppInfo>, Error> {
let apps_path = PersistencePath::from_ref("apps.yaml");
Ok(apps_path
.maybe_read(false)
.await
.transpose()?
.map(|mut f| async move { from_yaml_async_reader(&mut *f).await })
.apply(futures::future::OptionFuture::from)
.await
.transpose()?
.unwrap_or_else(LinearMap::new))
}
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_1_3: emver::Version = emver::Version::new(0, 1, 3, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_1_2::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_1_3
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_1_4: emver::Version = emver::Version::new(0, 1, 4, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_1_3::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_1_4
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_1_5: emver::Version = emver::Version::new(0, 1, 5, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_1_4::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_1_5
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,98 +0,0 @@
use linear_map::LinearMap;
use super::*;
use crate::util::{to_yaml_async_writer, PersistencePath};
const V0_2_0: emver::Version = emver::Version::new(0, 2, 0, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_1_5::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_0
}
async fn up(&self) -> Result<(), Error> {
let app_info: LinearMap<String, crate::apps::AppInfo> = legacy::apps::list_info()
.await?
.into_iter()
.map(|(id, ai)| {
(
id,
crate::apps::AppInfo {
title: ai.title,
version: ai.version,
tor_address: ai.tor_address,
configured: ai.configured,
recoverable: ai.recoverable,
needs_restart: false,
},
)
})
.collect();
let mut apps_file = PersistencePath::from_ref("apps.yaml").write(None).await?;
to_yaml_async_writer(&mut *apps_file, &app_info).await?;
apps_file.commit().await?;
Ok(())
}
async fn down(&self) -> Result<(), Error> {
let app_info: LinearMap<String, legacy::apps::AppInfo> = crate::apps::list_info()
.await?
.into_iter()
.map(|(id, ai)| {
(
id,
legacy::apps::AppInfo {
title: ai.title,
version: ai.version,
tor_address: ai.tor_address,
configured: ai.configured,
recoverable: ai.recoverable,
},
)
})
.collect();
let mut apps_file = PersistencePath::from_ref("apps.yaml").write(None).await?;
to_yaml_async_writer(&mut *apps_file, &app_info).await?;
apps_file.commit().await?;
Ok(())
}
}
mod legacy {
pub mod apps {
use linear_map::LinearMap;
use crate::util::{from_yaml_async_reader, PersistencePath};
use crate::Error;
fn not(b: &bool) -> bool {
!b
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct AppInfo {
pub title: String,
pub version: emver::Version,
pub tor_address: Option<String>,
pub configured: bool,
#[serde(default)]
#[serde(skip_serializing_if = "not")]
pub recoverable: bool,
}
pub async fn list_info() -> Result<LinearMap<String, AppInfo>, Error> {
let apps_path = PersistencePath::from_ref("apps.yaml");
let mut f = match apps_path.maybe_read(false).await.transpose()? {
Some(a) => a,
None => return Ok(LinearMap::new()),
};
from_yaml_async_reader(&mut *f).await
}
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_2_1: emver::Version = emver::Version::new(0, 2, 1, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_0::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_1
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_2_10: emver::Version = emver::Version::new(0, 2, 10, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_9::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_10
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,38 +0,0 @@
use super::*;
use std::os::unix::process::ExitStatusExt;
const V0_2_11: emver::Version = emver::Version::new(0, 2, 11, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_10::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_11
}
async fn up(&self) -> Result<(), Error> {
crate::tor::write_lan_services(
&crate::tor::services_map(&PersistencePath::from_ref(crate::SERVICES_YAML)).await?,
)
.await?;
let svc_exit = std::process::Command::new("service")
.args(&["nginx", "reload"])
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
"Failed to Reload Nginx: {}",
svc_exit
.code()
.or_else(|| { svc_exit.signal().map(|a| 128 + a) })
.unwrap_or(0)
);
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,38 +0,0 @@
use super::*;
use std::os::unix::process::ExitStatusExt;
const V0_2_12: emver::Version = emver::Version::new(0, 2, 12, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_11::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_12
}
async fn up(&self) -> Result<(), Error> {
crate::tor::write_lan_services(
&crate::tor::services_map(&PersistencePath::from_ref(crate::SERVICES_YAML)).await?,
)
.await?;
let svc_exit = std::process::Command::new("service")
.args(&["nginx", "reload"])
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
"Failed to Reload Nginx: {}",
svc_exit
.code()
.or_else(|| { svc_exit.signal().map(|a| 128 + a) })
.unwrap_or(0)
);
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_2_2: emver::Version = emver::Version::new(0, 2, 2, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_1::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_2
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_2_3: emver::Version = emver::Version::new(0, 2, 3, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_2::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_3
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_2_4: emver::Version = emver::Version::new(0, 2, 4, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_3::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_4
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_2_5: emver::Version = emver::Version::new(0, 2, 5, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_4::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_5
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,21 +0,0 @@
use super::*;
const V0_2_6: emver::Version = emver::Version::new(0, 2, 6, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_5::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_6
}
async fn up(&self) -> Result<(), Error> {
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,36 +0,0 @@
use super::*;
use crate::util::Invoke;
const V0_2_7: emver::Version = emver::Version::new(0, 2, 7, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_6::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_7
}
async fn up(&self) -> Result<(), Error> {
for (app_id, _) in crate::apps::list_info().await? {
tokio::process::Command::new("docker")
.arg("stop")
.arg(&app_id)
.invoke("Docker")
.await?;
tokio::process::Command::new("docker")
.arg("update")
.arg("--restart")
.arg("no")
.arg(&app_id)
.invoke("Docker")
.await?;
}
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,36 +0,0 @@
use super::*;
use crate::util::Invoke;
const V0_2_8: emver::Version = emver::Version::new(0, 2, 8, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_7::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_8
}
async fn up(&self) -> Result<(), Error> {
for (app_id, _) in crate::apps::list_info().await? {
tokio::process::Command::new("docker")
.arg("stop")
.arg(&app_id)
.invoke("Docker")
.await?;
tokio::process::Command::new("docker")
.arg("update")
.arg("--restart")
.arg("no")
.arg(&app_id)
.invoke("Docker")
.await?;
}
Ok(())
}
async fn down(&self) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,75 +0,0 @@
use std::os::unix::process::ExitStatusExt;
use super::*;
const V0_2_9: emver::Version = emver::Version::new(0, 2, 9, 0);
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_2_8::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> &'static emver::Version {
&V0_2_9
}
async fn up(&self) -> Result<(), Error> {
crate::tor::write_lan_services(
&crate::tor::services_map(&PersistencePath::from_ref(crate::SERVICES_YAML)).await?,
)
.await?;
tokio::fs::os::unix::symlink(
crate::tor::ETC_NGINX_SERVICES_CONF,
"/etc/nginx/sites-enabled/start9-services.conf",
)
.await
.or_else(|e| {
if e.kind() == std::io::ErrorKind::AlreadyExists {
Ok(())
} else {
Err(e)
}
})?;
let svc_exit = std::process::Command::new("service")
.args(&["nginx", "reload"])
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
"Failed to Reload Nginx: {}",
svc_exit
.code()
.or_else(|| { svc_exit.signal().map(|a| 128 + a) })
.unwrap_or(0)
);
Ok(())
}
async fn down(&self) -> Result<(), Error> {
tokio::fs::remove_file("/etc/nginx/sites-enabled/start9-services.conf")
.await
.or_else(|e| match e {
e if e.kind() == std::io::ErrorKind::NotFound => Ok(()),
e => Err(e),
})?;
tokio::fs::remove_file(crate::tor::ETC_NGINX_SERVICES_CONF)
.await
.or_else(|e| match e {
e if e.kind() == std::io::ErrorKind::NotFound => Ok(()),
e => Err(e),
})?;
let svc_exit = std::process::Command::new("service")
.args(&["nginx", "reload"])
.status()?;
crate::ensure_code!(
svc_exit.success(),
crate::error::GENERAL_ERROR,
"Failed to Reload Nginx: {}",
svc_exit
.code()
.or_else(|| { svc_exit.signal().map(|a| 128 + a) })
.unwrap_or(0)
);
Ok(())
}
}

245
appmgr/src/volume/disk.rs Normal file
View File

@@ -0,0 +1,245 @@
use std::path::Path;
use anyhow::anyhow;
use futures::future::try_join_all;
use indexmap::IndexMap;
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use crate::util::Invoke;
use crate::{Error, ResultExt as _};
pub const ROOT_DISK: &'static str = "/dev/mmcblk0";
pub const MAIN_DISK: &'static str = "/dev/sda";
pub struct Disks(IndexMap<String, DiskInfo>);
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct DiskInfo {
pub size: String,
pub description: Option<String>,
pub partitions: IndexMap<String, PartitionInfo>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct PartitionInfo {
pub is_mounted: bool,
pub size: Option<String>,
pub label: Option<String>,
}
pub async fn list() -> Result<Disks, Error> {
let output = tokio::process::Command::new("parted")
.arg("-lm")
.invoke(crate::ErrorKind::GParted)
.await?;
let output_str = std::str::from_utf8(&output)?;
let disks = output_str
.split("\n\n")
.filter_map(|s| -> Option<(String, DiskInfo)> {
let mut lines = s.split("\n");
let has_size = lines.next()? == "BYT;";
let disk_info_line = lines.next()?;
let mut disk_info_iter = disk_info_line.split(":");
let logicalname = disk_info_iter.next()?.to_owned();
let partition_prefix = if logicalname.ends_with(|c: char| c.is_digit(10)) {
logicalname.clone() + "p"
} else {
logicalname.clone()
};
let size = disk_info_iter.next()?.to_owned();
disk_info_iter.next()?; // transport-type
disk_info_iter.next()?; // logical-sector-size
disk_info_iter.next()?; // physical-sector-size
disk_info_iter.next()?; // partition-table-type
let description = disk_info_iter.next()?;
let description = if description.is_empty() {
None
} else {
Some(description.to_owned())
};
Some((
logicalname,
DiskInfo {
size,
description,
partitions: lines
.filter_map(|partition_info_line| -> Option<(String, PartitionInfo)> {
let mut partition_info_iter = partition_info_line.split(":");
let partition_idx = partition_info_iter.next()?;
let logicalname = partition_prefix.clone() + partition_idx;
let size = if has_size {
partition_info_iter.next()?; // begin
partition_info_iter.next()?; // end
Some(partition_info_iter.next()?.to_owned())
} else {
None
};
Some((
logicalname,
PartitionInfo {
is_mounted: false,
size,
label: None,
},
))
})
.collect(),
},
))
});
Ok(Disks(
try_join_all(disks.map(|(logicalname, disk)| async move {
Ok::<_, Error>((
logicalname,
DiskInfo {
partitions: try_join_all(disk.partitions.into_iter().map(
|(logicalname, mut partition)| async move {
let mut blkid_command = tokio::process::Command::new("blkid");
let (blkid_res, findmnt_status) = futures::join!(
blkid_command
.arg(&logicalname)
.arg("-s")
.arg("LABEL")
.arg("-o")
.arg("value")
.invoke(crate::ErrorKind::Blkid),
tokio::process::Command::new("findmnt")
.arg(&logicalname)
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
);
let blkid_output = blkid_res?;
let label = std::str::from_utf8(&blkid_output)?.trim();
if !label.is_empty() {
partition.label = Some(label.to_owned());
}
if findmnt_status?.success() {
partition.is_mounted = true;
}
Ok::<_, Error>((logicalname, partition))
},
))
.await?
.into_iter()
.collect(),
..disk
},
))
}))
.await?
.into_iter()
.collect(),
))
}
pub async fn mount<P: AsRef<Path>>(logicalname: &str, mount_point: P) -> Result<(), Error> {
let is_mountpoint = tokio::process::Command::new("mountpoint")
.arg(mount_point.as_ref())
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.await?;
if is_mountpoint.success() {
unmount(mount_point.as_ref()).await?;
}
tokio::fs::create_dir_all(&mount_point).await?;
let mount_output = tokio::process::Command::new("mount")
.arg(logicalname)
.arg(mount_point.as_ref())
.output()
.await?;
crate::ensure_code!(
mount_output.status.success(),
crate::ErrorKind::Filesystem,
"Error Mounting Drive: {}",
std::str::from_utf8(&mount_output.stderr).unwrap_or("Unknown Error")
);
Ok(())
}
pub async fn mount_encfs<P0: AsRef<Path>, P1: AsRef<Path>>(
src: P0,
dst: P1,
password: &str,
) -> Result<(), Error> {
let mut encfs = tokio::process::Command::new("encfs")
.arg("--standard")
.arg("--public")
.arg("-S")
.arg(src.as_ref())
.arg(dst.as_ref())
.stdin(std::process::Stdio::piped())
.stderr(std::process::Stdio::piped())
.spawn()?;
let mut stdin = encfs.stdin.take().unwrap();
let mut stderr = encfs.stderr.take().unwrap();
stdin.write_all(password.as_bytes()).await?;
stdin.flush().await?;
stdin.shutdown().await?;
drop(stdin);
let mut err = String::new();
stderr.read_to_string(&mut err).await?;
if !encfs.wait().await?.success() {
Err(Error::new(anyhow!("{}", err), crate::ErrorKind::Filesystem))
} else {
Ok(())
}
}
pub async fn unmount<P: AsRef<Path>>(mount_point: P) -> Result<(), Error> {
log::info!("Unmounting {}.", mount_point.as_ref().display());
let umount_output = tokio::process::Command::new("umount")
.arg(mount_point.as_ref())
.output()
.await?;
crate::ensure_code!(
umount_output.status.success(),
crate::ErrorKind::Filesystem,
"Error Unmounting Drive: {}: {}",
mount_point.as_ref().display(),
std::str::from_utf8(&umount_output.stderr).unwrap_or("Unknown Error")
);
tokio::fs::remove_dir_all(mount_point.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("rm {}", mount_point.as_ref().display()),
)
})?;
Ok(())
}
#[must_use]
pub struct MountGuard<P: AsRef<Path>> {
path: Option<P>,
}
impl<P: AsRef<Path>> MountGuard<P> {
pub async fn new(logicalname: &str, mount_point: P) -> Result<Self, Error> {
mount(logicalname, mount_point.as_ref()).await?;
Ok(Self {
path: Some(mount_point),
})
}
pub async fn unmount(mut self) -> Result<(), Error> {
if let Some(ref path) = self.path {
unmount(path).await?;
self.path = None;
}
Ok(())
}
}
impl<P: AsRef<Path>> Drop for MountGuard<P> {
fn drop(&mut self) {
if let Some(ref path) = self.path {
tokio::runtime::Runtime::new()
.unwrap()
.block_on(unmount(path))
.unwrap()
}
}
}

188
appmgr/src/volume/mod.rs Normal file
View File

@@ -0,0 +1,188 @@
use std::borrow::Borrow;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use indexmap::IndexMap;
use patch_db::{HasModel, Map, MapModel};
use serde::{Deserialize, Deserializer, Serialize};
use crate::id::{Id, IdUnchecked, InterfaceId};
use crate::s9pk::manifest::PackageId;
pub mod disk;
pub const PKG_VOLUME_DIR: &'static str = "/mnt/embassy-os/volumes/package-data";
pub const BACKUP_DIR: &'static str = "/mnt/embassy-os-backups/EmbassyBackups";
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize)]
#[serde(untagged)]
pub enum VolumeId<S: AsRef<str> = String> {
#[serde(rename = "BACKUP")]
Backup,
Custom(Id<S>),
}
impl<S: AsRef<str>> std::fmt::Display for VolumeId<S> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
VolumeId::Backup => write!(f, "BACKUP"),
VolumeId::Custom(id) => write!(f, "{}", id),
}
}
}
impl<S: AsRef<str>> AsRef<str> for VolumeId<S> {
fn as_ref(&self) -> &str {
match self {
VolumeId::Backup => "BACKUP",
VolumeId::Custom(id) => id.as_ref(),
}
}
}
impl<S: AsRef<str>> Borrow<str> for VolumeId<S> {
fn borrow(&self) -> &str {
self.as_ref()
}
}
impl<S: AsRef<str>> AsRef<Path> for VolumeId<S> {
fn as_ref(&self) -> &Path {
AsRef::<str>::as_ref(self).as_ref()
}
}
impl<'de, S> Deserialize<'de> for VolumeId<S>
where
S: AsRef<str>,
IdUnchecked<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let unchecked: IdUnchecked<S> = Deserialize::deserialize(deserializer)?;
Ok(match unchecked.0.as_ref() {
"BACKUP" => VolumeId::Backup,
_ => VolumeId::Custom(Id::try_from(unchecked.0).map_err(serde::de::Error::custom)?),
})
}
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize)]
pub struct CustomVolumeId<S: AsRef<str> = String>(Id<S>);
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Volumes(IndexMap<VolumeId, Volume>);
impl Volumes {
pub fn get_path_for(&self, pkg_id: &PackageId, volume_id: &VolumeId) -> Option<PathBuf> {
self.0
.get(volume_id)
.map(|volume| volume.path_for(pkg_id, volume_id))
}
pub fn to_readonly(&self) -> Self {
Volumes(
self.0
.iter()
.map(|(id, volume)| {
let mut volume = volume.clone();
volume.set_readonly();
(id.clone(), volume)
})
.collect(),
)
}
}
impl Deref for Volumes {
type Target = IndexMap<VolumeId, Volume>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl DerefMut for Volumes {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl Map for Volumes {
type Key = VolumeId;
type Value = Volume;
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
self.0.get(key)
}
}
impl HasModel for Volumes {
type Model = MapModel<Self>;
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[serde(tag = "type")]
#[serde(rename_all = "kebab-case")]
pub enum Volume {
#[serde(rename_all = "kebab-case")]
Data {
#[serde(skip)]
readonly: bool,
},
#[serde(rename_all = "kebab-case")]
Pointer {
package_id: PackageId,
volume_id: VolumeId,
path: PathBuf,
readonly: bool,
},
#[serde(rename_all = "kebab-case")]
Certificate { interface_id: InterfaceId },
#[serde(rename_all = "kebab-case")]
HiddenService { interface_id: InterfaceId },
#[serde(rename_all = "kebab-case")]
#[serde(skip)]
Backup { readonly: bool },
}
impl Volume {
pub fn path_for(&self, pkg_id: &PackageId, volume_id: &VolumeId) -> PathBuf {
match self {
Volume::Data { .. } => Path::new(PKG_VOLUME_DIR)
.join(pkg_id)
.join("volumes")
.join(volume_id),
Volume::Pointer {
package_id,
volume_id,
path,
..
} => Path::new(PKG_VOLUME_DIR)
.join(package_id)
.join("volumes")
.join(volume_id)
.join(path),
Volume::Certificate { interface_id } => Path::new(PKG_VOLUME_DIR)
.join(pkg_id)
.join("certificates")
.join(interface_id),
Volume::HiddenService { interface_id } => Path::new(PKG_VOLUME_DIR)
.join(pkg_id)
.join("hidden-services")
.join(interface_id),
Volume::Backup { .. } => Path::new(BACKUP_DIR).join(pkg_id),
}
}
pub fn set_readonly(&mut self) {
match self {
Volume::Data { readonly } => {
*readonly = true;
}
Volume::Pointer { readonly, .. } => {
*readonly = true;
}
Volume::Backup { readonly } => {
*readonly = true;
}
_ => (),
}
}
pub fn readonly(&self) -> bool {
match self {
Volume::Data { readonly } => *readonly,
Volume::Pointer { readonly, .. } => *readonly,
Volume::Certificate { .. } => true,
Volume::HiddenService { .. } => true,
Volume::Backup { readonly } => *readonly,
}
}
}