js effect for subscribing to config

fix errors

chore: Fix some things in the manager for clippy
This commit is contained in:
Aiden McClelland
2023-02-01 16:53:25 -07:00
parent 93c751f6eb
commit bec307d0e9
18 changed files with 223 additions and 168 deletions

6
backend/Cargo.lock generated
View File

@@ -182,9 +182,9 @@ dependencies = [
[[package]] [[package]]
name = "async-trait" name = "async-trait"
version = "0.1.58" version = "0.1.64"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1e805d94e6b5001b651426cf4cd446b1ab5f319d27bab5c644f61de0a804360c" checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2"
dependencies = [ dependencies = [
"proc-macro2 1.0.51", "proc-macro2 1.0.51",
"quote 1.0.23", "quote 1.0.23",
@@ -1987,6 +1987,7 @@ dependencies = [
name = "helpers" name = "helpers"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-trait",
"color-eyre", "color-eyre",
"futures", "futures",
"lazy_async_pool", "lazy_async_pool",
@@ -2441,6 +2442,7 @@ dependencies = [
"helpers", "helpers",
"itertools 0.10.5", "itertools 0.10.5",
"models", "models",
"pin-project",
"reqwest", "reqwest",
"serde", "serde",
"serde_json", "serde_json",

View File

@@ -47,7 +47,7 @@ pub struct ServerBackupReport {
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
pub struct PackageBackupReport { pub struct PackageBackupReport {
error: Option<String>, pub error: Option<String>,
} }
#[command(subcommands(backup_bulk::backup_all, target::target))] #[command(subcommands(backup_bulk::backup_all, target::target))]

View File

@@ -28,7 +28,7 @@ impl StartReceipts {
let mut locks = Vec::new(); let mut locks = Vec::new();
let setup = Self::setup(&mut locks, id); let setup = Self::setup(&mut locks, id);
Ok(setup(&db.lock_all(locks).await?)?) setup(&db.lock_all(locks).await?)
} }
pub fn setup( pub fn setup(
@@ -95,7 +95,7 @@ impl StopReceipts {
let mut locks = Vec::new(); let mut locks = Vec::new();
let setup = Self::setup(&mut locks, id); let setup = Self::setup(&mut locks, id);
Ok(setup(&db.lock_all(locks).await?)?) setup(&db.lock_all(locks).await?)
} }
pub fn setup( pub fn setup(

View File

@@ -5,12 +5,10 @@ use color_eyre::eyre::eyre;
use patch_db::DbHandle; use patch_db::DbHandle;
use sqlx::{Executor, Postgres}; use sqlx::{Executor, Postgres};
use tokio::sync::RwLock; use tokio::sync::RwLock;
use torut::onion::TorSecretKeyV3;
use tracing::instrument; use tracing::instrument;
use super::Manager; use super::Manager;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::net::interface::InterfaceId;
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::util::Version; use crate::util::Version;
use crate::Error; use crate::Error;
@@ -48,10 +46,9 @@ impl ManagerMap {
continue; continue;
}; };
let tor_keys = man.interfaces.tor_keys(secrets, &package).await?;
res.insert( res.insert(
(package, man.version.clone()), (package, man.version.clone()),
Arc::new(Manager::new(ctx.clone(), man, tor_keys).await?), Arc::new(Manager::new(ctx.clone(), man).await?),
); );
} }
*self.0.write().await = res; *self.0.write().await = res;
@@ -59,18 +56,13 @@ impl ManagerMap {
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn add( pub async fn add(&self, ctx: RpcContext, manifest: Manifest) -> Result<(), Error> {
&self,
ctx: RpcContext,
manifest: Manifest,
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
) -> Result<(), Error> {
let mut lock = self.0.write().await; let mut lock = self.0.write().await;
let id = (manifest.id.clone(), manifest.version.clone()); let id = (manifest.id.clone(), manifest.version.clone());
if let Some(man) = lock.remove(&id) { if let Some(man) = lock.remove(&id) {
man.exit().await; man.exit().await;
} }
lock.insert(id, Arc::new(Manager::new(ctx, manifest, tor_keys).await?)); lock.insert(id, Arc::new(Manager::new(ctx, manifest).await?));
Ok(()) Ok(())
} }

View File

@@ -1,11 +1,6 @@
use std::collections::BTreeMap;
use bollard::container::StopContainerOptions; use bollard::container::StopContainerOptions;
use torut::onion::TorSecretKeyV3;
use super::sigterm_timeout;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::net::interface::InterfaceId;
use crate::s9pk::manifest::Manifest; use crate::s9pk::manifest::Manifest;
use crate::Error; use crate::Error;
@@ -13,7 +8,6 @@ pub struct ManagerSeed {
pub ctx: RpcContext, pub ctx: RpcContext,
pub manifest: Manifest, pub manifest: Manifest,
pub container_name: String, pub container_name: String,
pub tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
} }
impl ManagerSeed { impl ManagerSeed {
@@ -24,7 +18,11 @@ impl ManagerSeed {
.stop_container( .stop_container(
&self.container_name, &self.container_name,
Some(StopContainerOptions { Some(StopContainerOptions {
t: sigterm_timeout(&self.manifest) t: self
.manifest
.containers
.as_ref()
.and_then(|c| c.main.sigterm_timeout)
.map(|d| d.as_secs()) .map(|d| d.as_secs())
.unwrap_or(30) as i64, .unwrap_or(30) as i64,
}), }),

View File

@@ -1,6 +1,5 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use std::sync::atomic::{AtomicBool, AtomicU32, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::task::Poll; use std::task::Poll;
use std::time::Duration; use std::time::Duration;
@@ -18,12 +17,11 @@ use rand::SeedableRng;
use serde_json::Value; use serde_json::Value;
use sqlx::Connection; use sqlx::Connection;
use start_stop::StartStop; use start_stop::StartStop;
use tokio::sync::{oneshot, Notify}; use tokio::sync::oneshot;
use tokio::sync::{ use tokio::sync::{
watch::{self, Receiver, Sender}, watch::{self, Sender},
Mutex, Mutex,
}; };
use torut::onion::TorSecretKeyV3;
use tracing::instrument; use tracing::instrument;
use transition_state::TransitionState; use transition_state::TransitionState;
@@ -41,10 +39,9 @@ use crate::dependencies::{
use crate::disk::mount::backup::BackupMountGuard; use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::guard::TmpMountGuard; use crate::disk::mount::guard::TmpMountGuard;
use crate::install::cleanup::remove_from_current_dependents_lists; use crate::install::cleanup::remove_from_current_dependents_lists;
use crate::net::interface::InterfaceId;
use crate::net::net_controller::NetService; use crate::net::net_controller::NetService;
use crate::procedure::docker::{DockerContainer, DockerProcedure, LongRunning}; use crate::procedure::docker::{DockerContainer, DockerProcedure, LongRunning};
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName}; use crate::procedure::{NoOutput, ProcedureName};
use crate::s9pk::manifest::Manifest; use crate::s9pk::manifest::Manifest;
use crate::status::MainStatus; use crate::status::MainStatus;
use crate::util::NonDetachingJoinHandle; use crate::util::NonDetachingJoinHandle;
@@ -69,7 +66,6 @@ pub const HEALTH_CHECK_GRACE_PERIOD_SECONDS: u64 = 5;
type ManagerPersistentContainer = Arc<Option<PersistentContainer>>; type ManagerPersistentContainer = Arc<Option<PersistentContainer>>;
type BackupGuard = Arc<Mutex<BackupMountGuard<TmpMountGuard>>>; type BackupGuard = Arc<Mutex<BackupMountGuard<TmpMountGuard>>>;
type BackupTask = Result<PackageBackupInfo, Error>;
pub enum BackupReturn { pub enum BackupReturn {
Error(Error), Error(Error),
AlreadyRunning(PackageBackupReport), AlreadyRunning(PackageBackupReport),
@@ -94,7 +90,7 @@ impl Default for Gid {
} }
impl Gid { impl Gid {
pub fn new_gid(&self) -> ProcessGroupId { pub fn new_gid(&self) -> ProcessGroupId {
let previous; let mut previous = 0;
self.next_gid.send_modify(|x| { self.next_gid.send_modify(|x| {
previous = *x; previous = *x;
*x = previous + 1; *x = previous + 1;
@@ -104,13 +100,13 @@ impl Gid {
pub fn new_main_gid(&self) -> ProcessGroupId { pub fn new_main_gid(&self) -> ProcessGroupId {
let gid = self.new_gid(); let gid = self.new_gid();
self.main_gid.send(gid); self.main_gid.send(gid).unwrap();
gid gid
} }
} }
#[derive(Clone)] #[derive(Clone)]
struct Manager { pub struct Manager {
seed: Arc<ManagerSeed>, seed: Arc<ManagerSeed>,
manage_container: Arc<manager_container::ManageContainer>, manage_container: Arc<manager_container::ManageContainer>,
@@ -120,16 +116,11 @@ struct Manager {
pub gid: Arc<Gid>, pub gid: Arc<Gid>,
} }
impl Manager { impl Manager {
pub async fn new( pub async fn new(ctx: RpcContext, manifest: Manifest) -> Result<Self, Error> {
ctx: RpcContext,
manifest: Manifest,
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
) -> Result<Self, Error> {
let seed = Arc::new(ManagerSeed { let seed = Arc::new(ManagerSeed {
ctx, ctx,
container_name: DockerProcedure::container_name(&manifest.id, None), container_name: DockerProcedure::container_name(&manifest.id, None),
manifest, manifest,
tor_keys,
}); });
let persistent_container = Arc::new(PersistentContainer::init(&seed).await?); let persistent_container = Arc::new(PersistentContainer::init(&seed).await?);
@@ -148,15 +139,13 @@ impl Manager {
}) })
} }
pub fn start(&self) -> Result<(), Error> { pub fn start(&self) {
self._transition_abort(); self._transition_abort();
self.manage_container.to_desired(StartStop::Start); self.manage_container.to_desired(StartStop::Start);
Ok(())
} }
pub fn stop(&self) -> Result<(), Error> { pub fn stop(&self) {
self._transition_abort(); self._transition_abort();
self.manage_container.to_desired(StartStop::Stop); self.manage_container.to_desired(StartStop::Stop);
Ok(())
} }
pub async fn restart(&self) { pub async fn restart(&self) {
if self._is_transition_restart() { if self._is_transition_restart() {
@@ -176,17 +165,15 @@ impl Manager {
let (transition_state, done) = configure(context, id, configure_context).remote_handle(); let (transition_state, done) = configure(context, id, configure_context).remote_handle();
self._transition_replace({ self._transition_replace({
let seed = self.seed.clone();
let manage_container = self.manage_container.clone(); let manage_container = self.manage_container.clone();
TransitionState::Configuring(tokio::spawn(async move { TransitionState::Configuring(tokio::spawn(async move {
let desired_state = manage_container.desired_state(); let desired_state = manage_container.desired_state();
let state_reverter = DesiredStateReverter::new(manage_container.clone()); let state_reverter = DesiredStateReverter::new(manage_container.clone());
let starting_desired = desired_state.borrow().clone();
let mut current_state = manage_container.current_state(); let mut current_state = manage_container.current_state();
manage_container.to_desired(StartStop::Stop); manage_container.to_desired(StartStop::Stop);
while current_state.borrow().is_start() { while current_state.borrow().is_start() {
current_state.changed().await; current_state.changed().await.unwrap();
} }
transition_state.await; transition_state.await;
@@ -211,7 +198,7 @@ impl Manager {
let mut current_status = self.manage_container.current_state(); let mut current_status = self.manage_container.current_state();
while current_status.borrow().is_start() { while current_status.borrow().is_start() {
current_status.changed().await; current_status.changed().await.unwrap();
} }
} }
@@ -223,7 +210,9 @@ impl Manager {
} }
pub fn rpc_client(&self) -> Option<Arc<UnixRpcClient>> { pub fn rpc_client(&self) -> Option<Arc<UnixRpcClient>> {
self.persistent_container.clone().map(|x| x.rpc_client()) (*self.persistent_container)
.as_ref()
.map(|x| x.rpc_client())
} }
fn _transition_abort(&self) { fn _transition_abort(&self) {
@@ -244,34 +233,30 @@ impl Manager {
let transition = self.transition.clone(); let transition = self.transition.clone();
let manage_container = self.manage_container.clone(); let manage_container = self.manage_container.clone();
TransitionState::Restarting(tokio::spawn(async move { TransitionState::Restarting(tokio::spawn(async move {
let mut desired_state = manage_container.desired_state();
let mut current_state = manage_container.current_state(); let mut current_state = manage_container.current_state();
let _ = manage_container.set_override(Some(MainStatus::Restarting)); let _ = manage_container.set_override(Some(MainStatus::Restarting));
manage_container.to_desired(StartStop::Stop); manage_container.to_desired(StartStop::Stop);
while current_state.borrow().is_start() { while current_state.borrow().is_start() {
current_state.changed().await; current_state.changed().await.unwrap();
} }
manage_container.to_desired(StartStop::Start); manage_container.to_desired(StartStop::Start);
while current_state.borrow().is_stop() { while current_state.borrow().is_stop() {
current_state.changed().await; current_state.changed().await.unwrap();
} }
transition.send_replace(Default::default()); transition.send_replace(Default::default());
})) }))
} }
fn _transition_backup( fn _transition_backup(
&self, &self,
mut backup_guard: BackupGuard, backup_guard: BackupGuard,
) -> (TransitionState, BoxFuture<BackupReturn>) { ) -> (TransitionState, BoxFuture<BackupReturn>) {
let transition = self.transition.clone();
let manage_container = self.manage_container.clone(); let manage_container = self.manage_container.clone();
let seed = self.seed.clone(); let seed = self.seed.clone();
let (send, done) = oneshot::channel(); let (send, done) = oneshot::channel();
( (
TransitionState::BackingUp(tokio::spawn( TransitionState::BackingUp(tokio::spawn(
async move { async move {
let mut desired_state = manage_container.desired_state();
let state_reverter = DesiredStateReverter::new(manage_container.clone()); let state_reverter = DesiredStateReverter::new(manage_container.clone());
let starting_desired = desired_state.borrow().clone();
let mut current_state = manage_container.current_state(); let mut current_state = manage_container.current_state();
let mut tx = seed.ctx.db.handle(); let mut tx = seed.ctx.db.handle();
let _ = manage_container.set_override(Some( let _ = manage_container.set_override(Some(
@@ -279,7 +264,7 @@ impl Manager {
)); ));
manage_container.to_desired(StartStop::Stop); manage_container.to_desired(StartStop::Stop);
while current_state.borrow().is_start() { while current_state.borrow().is_start() {
current_state.changed().await; current_state.changed().await.unwrap();
} }
let backup_guard = backup_guard.lock().await; let backup_guard = backup_guard.lock().await;
@@ -333,8 +318,8 @@ fn configure(
mut configure_context: ConfigureContext, mut configure_context: ConfigureContext,
) -> BoxFuture<'static, Result<BTreeMap<PackageId, TaggedDependencyError>, Error>> { ) -> BoxFuture<'static, Result<BTreeMap<PackageId, TaggedDependencyError>, Error>> {
async move { async move {
let db = ctx.db.handle(); let mut db = ctx.db.handle();
let tx = db.begin().await?; let mut tx = db.begin().await?;
let db = &mut tx; let db = &mut tx;
let receipts = ConfigReceipts::new(db).await?; let receipts = ConfigReceipts::new(db).await?;
@@ -353,11 +338,6 @@ fn configure(
.await? .await?
.ok_or_else(not_found)?; .ok_or_else(not_found)?;
let volumes = receipts.volumes.get(db, id).await?.ok_or_else(not_found)?; let volumes = receipts.volumes.get(db, id).await?.ok_or_else(not_found)?;
let is_needs_config = !receipts
.configured
.get(db, id)
.await?
.ok_or_else(not_found)?;
let version = receipts.version.get(db, id).await?.ok_or_else(not_found)?; let version = receipts.version.get(db, id).await?.ok_or_else(not_found)?;
// get current config and current spec // get current config and current spec
@@ -394,7 +374,7 @@ fn configure(
// create backreferences to pointers // create backreferences to pointers
let mut sys = receipts let mut sys = receipts
.system_pointers .system_pointers
.get(db, &id) .get(db, id)
.await? .await?
.ok_or_else(not_found)?; .ok_or_else(not_found)?;
sys.truncate(0); sys.truncate(0);
@@ -431,7 +411,7 @@ fn configure(
ValueSpecPointer::System(s) => sys.push(s), ValueSpecPointer::System(s) => sys.push(s),
} }
} }
receipts.system_pointers.set(db, sys, &id).await?; receipts.system_pointers.set(db, sys, id).await?;
let signal = if !configure_context.dry_run { let signal = if !configure_context.dry_run {
// run config action // run config action
@@ -475,7 +455,7 @@ fn configure(
// update dependencies // update dependencies
let prev_current_dependencies = receipts let prev_current_dependencies = receipts
.current_dependencies .current_dependencies
.get(db, &id) .get(db, id)
.await? .await?
.unwrap_or_default(); .unwrap_or_default();
remove_from_current_dependents_lists( remove_from_current_dependents_lists(
@@ -495,7 +475,7 @@ fn configure(
current_dependencies.0.remove(id); current_dependencies.0.remove(id);
receipts receipts
.current_dependencies .current_dependencies
.set(db, current_dependencies.clone(), &id) .set(db, current_dependencies.clone(), id)
.await?; .await?;
let errs = receipts let errs = receipts
@@ -512,7 +492,7 @@ fn configure(
&receipts.dependency_receipt.try_heal, &receipts.dependency_receipt.try_heal,
) )
.await?; .await?;
receipts.dependency_errors.set(db, errs, &id).await?; receipts.dependency_errors.set(db, errs, id).await?;
// cache current config for dependents // cache current config for dependents
configure_context configure_context
@@ -525,22 +505,18 @@ fn configure(
.get(db, id) .get(db, id)
.await? .await?
.ok_or_else(not_found)?; .ok_or_else(not_found)?;
let prev = if is_needs_config { None } else { old_config } for (dependent, _dep_info) in dependents.0.iter().filter(|(dep_id, _)| dep_id != &id) {
.map(Value::Object) let dependent_container = receipts.docker_containers.get(db, dependent).await?;
.unwrap_or_default();
let next = Value::Object(config.clone());
for (dependent, dep_info) in dependents.0.iter().filter(|(dep_id, _)| dep_id != &id) {
let dependent_container = receipts.docker_containers.get(db, &dependent).await?;
let dependent_container = &dependent_container; let dependent_container = &dependent_container;
// check if config passes dependent check // check if config passes dependent check
if let Some(cfg) = receipts if let Some(cfg) = receipts
.manifest_dependencies_config .manifest_dependencies_config
.get(db, (&dependent, &id)) .get(db, (dependent, id))
.await? .await?
{ {
let manifest = receipts let manifest = receipts
.manifest .manifest
.get(db, &dependent) .get(db, dependent)
.await? .await?
.ok_or_else(not_found)?; .ok_or_else(not_found)?;
if let Err(error) = cfg if let Err(error) = cfg
@@ -605,7 +581,7 @@ struct DesiredStateReverter {
} }
impl DesiredStateReverter { impl DesiredStateReverter {
fn new(manage_container: Arc<ManageContainer>) -> Self { fn new(manage_container: Arc<ManageContainer>) -> Self {
let starting_state = manage_container.desired_state().borrow().clone(); let starting_state = *manage_container.desired_state().borrow();
let manage_container = Some(manage_container); let manage_container = Some(manage_container);
Self { Self {
starting_state, starting_state,
@@ -614,8 +590,8 @@ impl DesiredStateReverter {
} }
async fn revert(mut self) { async fn revert(mut self) {
if let Some(mut current_state) = self._revert() { if let Some(mut current_state) = self._revert() {
while &*current_state.borrow() != &self.starting_state { while *current_state.borrow() != self.starting_state {
current_state.changed().await; current_state.changed().await.unwrap();
} }
} }
} }
@@ -646,7 +622,8 @@ fn finnish_up_backup_task(
send.send(match result { send.send(match result {
Ok(a) => a, Ok(a) => a,
Err(e) => Err(e), Err(e) => Err(e),
}); })
.unwrap_or_default();
} }
.boxed() .boxed()
} }
@@ -659,7 +636,7 @@ fn response_to_report(response: &Result<PackageBackupInfo, Error>) -> PackageBac
} }
fn flatten_backup_error(input: Result<Result<PackageBackupInfo, Error>, Error>) -> BackupReturn { fn flatten_backup_error(input: Result<Result<PackageBackupInfo, Error>, Error>) -> BackupReturn {
match input { match input {
Ok(a) | Ok(a) => BackupReturn::Ran { Ok(a) => BackupReturn::Ran {
report: response_to_report(&a), report: response_to_report(&a),
res: a, res: a,
}, },
@@ -675,18 +652,6 @@ pub enum Status {
Paused, Paused,
Shutdown, Shutdown,
} }
pub struct ManagerSharedState {
seed: Arc<ManagerSeed>,
persistent_container: Option<PersistentContainer>,
status: (Sender<Status>, Receiver<Status>),
killer: Notify,
on_stop: Sender<OnStop>,
synchronized: Notify,
synchronize_now: Notify,
commit_health_check_results: AtomicBool,
next_gid: AtomicU32,
main_gid: (Sender<ProcessGroupId>, Receiver<ProcessGroupId>),
}
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub enum OnStop { pub enum OnStop {
@@ -703,8 +668,6 @@ async fn run_main(
persistent_container: ManagerPersistentContainer, persistent_container: ManagerPersistentContainer,
started: Arc<impl Fn()>, started: Arc<impl Fn()>,
) -> RunMainResult { ) -> RunMainResult {
let interfaces = main_interfaces(&seed)?;
let mut runtime = NonDetachingJoinHandle::from(tokio::spawn(start_up_image(seed.clone()))); let mut runtime = NonDetachingJoinHandle::from(tokio::spawn(start_up_image(seed.clone())));
let ip = match persistent_container.is_some() { let ip = match persistent_container.is_some() {
false => Some(match get_running_ip(&seed, &mut runtime).await { false => Some(match get_running_ip(&seed, &mut runtime).await {
@@ -749,35 +712,6 @@ async fn start_up_image(seed: Arc<ManagerSeed>) -> Result<Result<NoOutput, (i32,
.await .await
} }
fn main_interfaces(
seed: &ManagerSeed,
) -> Result<
Vec<(
InterfaceId,
&crate::net::interface::Interface,
TorSecretKeyV3,
)>,
Error,
> {
seed.manifest
.interfaces
.0
.iter()
.map(|(id, info)| {
Ok((
id.clone(),
info,
seed.tor_keys
.get(id)
.ok_or_else(|| {
Error::new(eyre!("interface {} missing key", id), crate::ErrorKind::Tor)
})?
.clone(),
))
})
.collect::<Result<Vec<_>, Error>>()
}
async fn long_running_docker( async fn long_running_docker(
seed: &ManagerSeed, seed: &ManagerSeed,
container: &DockerContainer, container: &DockerContainer,
@@ -896,17 +830,11 @@ async fn main_health_check_daemon(seed: Arc<ManagerSeed>) {
} }
} }
fn set_commit_health_true(state: &Arc<ManagerSharedState>) {
state
.commit_health_check_results
.store(true, Ordering::SeqCst);
}
type RuntimeOfCommand = NonDetachingJoinHandle<Result<Result<NoOutput, (i32, String)>, Error>>; type RuntimeOfCommand = NonDetachingJoinHandle<Result<Result<NoOutput, (i32, String)>, Error>>;
async fn get_running_ip(seed: &ManagerSeed, mut runtime: &mut RuntimeOfCommand) -> GetRunningIp { async fn get_running_ip(seed: &ManagerSeed, mut runtime: &mut RuntimeOfCommand) -> GetRunningIp {
loop { loop {
match container_inspect(&seed).await { match container_inspect(seed).await {
Ok(res) => { Ok(res) => {
match res match res
.network_settings .network_settings
@@ -968,7 +896,7 @@ async fn send_signal(
// .store(false, Ordering::SeqCst); // .store(false, Ordering::SeqCst);
if let Some(rpc_client) = rpc_client { if let Some(rpc_client) = rpc_client {
let main_gid = gid.main_gid.borrow().clone(); let main_gid = *gid.main_gid.borrow();
let next_gid = gid.new_gid(); let next_gid = gid.new_gid();
#[cfg(feature = "js_engine")] #[cfg(feature = "js_engine")]
if let Err(e) = crate::procedure::js_scripts::JsProcedure::default() if let Err(e) = crate::procedure::js_scripts::JsProcedure::default()
@@ -985,6 +913,7 @@ async fn send_signal(
None, // TODO None, // TODO
next_gid, next_gid,
Some(rpc_client), Some(rpc_client),
Arc::new(seed.ctx.clone()),
) )
.await? .await?
{ {

View File

@@ -2,7 +2,6 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use embassy_container_init::ProcessGroupId;
use helpers::UnixRpcClient; use helpers::UnixRpcClient;
use tokio::sync::oneshot; use tokio::sync::oneshot;
use tokio::sync::watch::{self, Receiver}; use tokio::sync::watch::{self, Receiver};
@@ -11,7 +10,7 @@ use tracing::instrument;
use super::manager_seed::ManagerSeed; use super::manager_seed::ManagerSeed;
use super::{ use super::{
add_network_for_main, generate_certificate, get_long_running_ip, long_running_docker, add_network_for_main, generate_certificate, get_long_running_ip, long_running_docker,
main_interfaces, remove_network_for_main, GetRunningIp, remove_network_for_main, GetRunningIp,
}; };
use crate::procedure::docker::DockerContainer; use crate::procedure::docker::DockerContainer;
use crate::util::NonDetachingJoinHandle; use crate::util::NonDetachingJoinHandle;
@@ -53,8 +52,7 @@ pub async fn spawn_persistent_container(
let mut send_inserter: Option<oneshot::Sender<Receiver<Arc<UnixRpcClient>>>> = Some(send_inserter); let mut send_inserter: Option<oneshot::Sender<Receiver<Arc<UnixRpcClient>>>> = Some(send_inserter);
loop { loop {
if let Err(e) = async { if let Err(e) = async {
let interfaces = main_interfaces(&*seed)?; let generated_certificate = generate_certificate(&*seed).await?;
let generated_certificate = generate_certificate(&*seed, &interfaces).await?;
let (mut runtime, inserter) = let (mut runtime, inserter) =
long_running_docker(&seed, &container).await?; long_running_docker(&seed, &container).await?;
@@ -67,7 +65,7 @@ pub async fn spawn_persistent_container(
return Ok(()); return Ok(());
} }
}; };
add_network_for_main(&*seed, ip, interfaces, generated_certificate).await?; add_network_for_main(&*seed, ip, generated_certificate).await?;
if let Some(inserter_send) = inserter_send.as_mut() { if let Some(inserter_send) = inserter_send.as_mut() {
let _ = inserter_send.send(Arc::new(inserter)); let _ = inserter_send.send(Arc::new(inserter));

View File

@@ -1,7 +1,7 @@
use crate::status::MainStatus; use crate::status::MainStatus;
#[derive(Clone, Copy, Debug, Eq, PartialEq)] #[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum StartStop { pub enum StartStop {
Start, Start,
Stop, Stop,
} }

View File

@@ -4,7 +4,7 @@ use std::time::Duration;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use embassy_container_init::{ProcessGroupId, SignalGroup, SignalGroupParams}; use embassy_container_init::{ProcessGroupId, SignalGroup, SignalGroupParams};
use helpers::UnixRpcClient; use helpers::{Callback, OsApi, UnixRpcClient};
pub use js_engine::JsError; pub use js_engine::JsError;
use js_engine::{JsExecutionEnvironment, PathForVolumeId}; use js_engine::{JsExecutionEnvironment, PathForVolumeId};
use models::{ErrorKind, VolumeId}; use models::{ErrorKind, VolumeId};
@@ -19,6 +19,18 @@ use crate::util::{GeneralGuard, Version};
use crate::volume::Volumes; use crate::volume::Volumes;
use crate::Error; use crate::Error;
#[async_trait::async_trait]
impl OsApi for RpcContext {
async fn get_service_config(
&self,
id: PackageId,
path: &str,
callback: Callback,
) -> Result<serde_json::Value, Error> {
todo!()
}
}
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
@@ -69,6 +81,7 @@ impl JsProcedure {
timeout: Option<Duration>, timeout: Option<Duration>,
gid: ProcessGroupId, gid: ProcessGroupId,
rpc_client: Option<Arc<UnixRpcClient>>, rpc_client: Option<Arc<UnixRpcClient>>,
os: Arc<dyn OsApi>,
) -> Result<Result<O, (i32, String)>, Error> { ) -> Result<Result<O, (i32, String)>, Error> {
let cleaner_client = rpc_client.clone(); let cleaner_client = rpc_client.clone();
let cleaner = GeneralGuard::new(move || { let cleaner = GeneralGuard::new(move || {
@@ -87,6 +100,7 @@ impl JsProcedure {
}); });
let res = async move { let res = async move {
let running_action = JsExecutionEnvironment::load_from_package( let running_action = JsExecutionEnvironment::load_from_package(
os,
directory, directory,
pkg_id, pkg_id,
pkg_version, pkg_version,
@@ -124,6 +138,7 @@ impl JsProcedure {
) -> Result<Result<O, (i32, String)>, Error> { ) -> Result<Result<O, (i32, String)>, Error> {
Ok(async move { Ok(async move {
let running_action = JsExecutionEnvironment::load_from_package( let running_action = JsExecutionEnvironment::load_from_package(
Arc::new(ctx.clone()),
&ctx.datadir, &ctx.datadir,
pkg_id, pkg_id,
pkg_version, pkg_version,
@@ -212,6 +227,7 @@ async fn js_action_execute() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) )
.await .await
.unwrap() .unwrap()
@@ -269,6 +285,7 @@ async fn js_action_execute_error() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) )
.await .await
.unwrap(); .unwrap();
@@ -315,6 +332,7 @@ async fn js_action_fetch() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) )
.await .await
.unwrap() .unwrap()
@@ -363,6 +381,7 @@ async fn js_test_slow() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) => { a.unwrap().unwrap(); }, ) => { a.unwrap().unwrap(); },
_ = tokio::time::sleep(Duration::from_secs(1)) => () _ = tokio::time::sleep(Duration::from_secs(1)) => ()
} }
@@ -412,6 +431,7 @@ async fn js_action_var_arg() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) )
.await .await
.unwrap() .unwrap()
@@ -458,6 +478,7 @@ async fn js_action_test_rename() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) )
.await .await
.unwrap() .unwrap()
@@ -504,6 +525,7 @@ async fn js_action_test_deep_dir() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) )
.await .await
.unwrap() .unwrap()
@@ -549,6 +571,7 @@ async fn js_action_test_deep_dir_escape() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) )
.await .await
.unwrap() .unwrap()
@@ -594,6 +617,7 @@ async fn js_action_test_zero_dir() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) )
.await .await
.unwrap() .unwrap()
@@ -685,6 +709,7 @@ async fn js_rsync() {
timeout, timeout,
ProcessGroupId(0), ProcessGroupId(0),
None, None,
None,
) )
.await .await
.unwrap() .unwrap()

View File

@@ -1,4 +1,5 @@
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
@@ -83,26 +84,24 @@ impl PackageProcedure {
} }
#[cfg(feature = "js_engine")] #[cfg(feature = "js_engine")]
PackageProcedure::Script(procedure) => { PackageProcedure::Script(procedure) => {
let (gid, rpc_client) = match ctx let man = ctx
.managers .managers
.get(&(pkg_id.clone(), pkg_version.clone())) .get(&(pkg_id.clone(), pkg_version.clone()))
.await .await
{ .ok_or_else(|| {
None => { Error::new(
return Err(Error::new(
eyre!("No manager found for {}", pkg_id), eyre!("No manager found for {}", pkg_id),
ErrorKind::NotFound, ErrorKind::NotFound,
)) )
} })?;
Some(man) => ( let gid;
if matches!(name, ProcedureName::Main) { let rpc_client = man.rpc_client();
man.gid.new_main_gid() let os = Arc::new(ctx.clone());
} else { if matches!(name, ProcedureName::Main) {
man.gid.new_gid() gid = man.gid.new_main_gid();
}, } else {
man.rpc_client(), gid = man.gid.new_gid();
), }
};
procedure procedure
.execute( .execute(
@@ -115,6 +114,7 @@ impl PackageProcedure {
timeout, timeout,
gid, gid,
rpc_client, rpc_client,
os,
) )
.await .await
} }

View File

@@ -280,8 +280,8 @@ impl<F: FnOnce() -> T, T> Drop for GeneralGuard<F, T> {
pub struct GeneralBoxedGuard(Option<Box<dyn FnOnce() -> ()>>); pub struct GeneralBoxedGuard(Option<Box<dyn FnOnce() -> ()>>);
impl GeneralBoxedGuard { impl GeneralBoxedGuard {
pub fn new(f: impl FnOnce() -> ()) -> Self { pub fn new(f: impl FnOnce() -> () + 'static) -> Self {
GeneralBoxedGuard(Some(f.boxed())) GeneralBoxedGuard(Some(Box::new(f)))
} }
pub fn drop(mut self) -> () { pub fn drop(mut self) -> () {

6
libs/Cargo.lock generated
View File

@@ -110,9 +110,9 @@ dependencies = [
[[package]] [[package]]
name = "async-trait" name = "async-trait"
version = "0.1.61" version = "0.1.64"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "705339e0e4a9690e2908d2b3d049d85682cf19fbd5782494498fbf7003a6a282" checksum = "1cd7fce9ba8c3c042128ce72d8b2ddbf3a05747efb67ea0313c635e10bda47a2"
dependencies = [ dependencies = [
"proc-macro2", "proc-macro2",
"quote", "quote",
@@ -1304,6 +1304,7 @@ dependencies = [
name = "helpers" name = "helpers"
version = "0.1.0" version = "0.1.0"
dependencies = [ dependencies = [
"async-trait",
"color-eyre", "color-eyre",
"futures", "futures",
"lazy_async_pool", "lazy_async_pool",
@@ -1649,6 +1650,7 @@ dependencies = [
"helpers", "helpers",
"itertools 0.10.5", "itertools 0.10.5",
"models", "models",
"pin-project",
"reqwest", "reqwest",
"serde", "serde",
"serde_json", "serde_json",

View File

@@ -6,6 +6,7 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html # See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies] [dependencies]
async-trait = "0.1.64"
color-eyre = "0.6.2" color-eyre = "0.6.2"
futures = "0.3.21" futures = "0.3.21"
lazy_async_pool = "0.3.3" lazy_async_pool = "0.3.3"

View File

@@ -10,10 +10,12 @@ use tokio::sync::oneshot;
use tokio::task::{JoinError, JoinHandle, LocalSet}; use tokio::task::{JoinError, JoinHandle, LocalSet};
mod byte_replacement_reader; mod byte_replacement_reader;
mod os_api;
mod rpc_client; mod rpc_client;
mod rsync; mod rsync;
mod script_dir; mod script_dir;
pub use byte_replacement_reader::*; pub use byte_replacement_reader::*;
pub use os_api::*;
pub use rpc_client::{RpcClient, UnixRpcClient}; pub use rpc_client::{RpcClient, UnixRpcClient};
pub use rsync::*; pub use rsync::*;
pub use script_dir::*; pub use script_dir::*;

View File

@@ -0,0 +1,17 @@
use models::Error;
use models::PackageId;
use serde_json::Value;
pub struct RuntimeDropped;
pub type Callback = Box<dyn Fn(Value) -> Result<(), RuntimeDropped> + Send + Sync + 'static>; // bool indicating if
#[async_trait::async_trait]
pub trait OsApi: Send + Sync + 'static {
async fn get_service_config(
&self,
id: PackageId,
path: &str,
callback: Callback,
) -> Result<Value, Error>;
}

View File

@@ -43,3 +43,4 @@ serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0" serde_json = "1.0"
tokio = { version = "1", features = ["full"] } tokio = { version = "1", features = ["full"] }
tracing = "0.1" tracing = "0.1"
pin-project = "1"

View File

@@ -193,6 +193,18 @@ const diskUsage = async ({
return { used, total } return { used, total }
} }
const callbackMapping = {}
const registerCallback = (fn) => {
const uuid = generateUuid(); // TODO
callbackMapping[uuid] = fn;
return uuid
}
const runCallback = (uuid, data) => callbackMapping[uuid](data)
const getServiceConfig = async (serviceId, configPath, onChange) => {
await Deno.core.opAsync("get_service_config", serviceId, configPath, registerCallback(onChange))
}
const currentFunction = Deno.core.opSync("current_function"); const currentFunction = Deno.core.opSync("current_function");
const input = Deno.core.opSync("get_input"); const input = Deno.core.opSync("get_input");
const variable_args = Deno.core.opSync("get_variable_args"); const variable_args = Deno.core.opSync("get_variable_args");
@@ -223,6 +235,7 @@ const effects = {
runRsync, runRsync,
readDir, readDir,
diskUsage, diskUsage,
getServiceConfig,
}; };
const defaults = { const defaults = {

View File

@@ -1,7 +1,9 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::future::Future;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::pin::Pin; use std::pin::Pin;
use std::sync::Arc; use std::sync::Arc;
use std::task::Poll;
use std::time::SystemTime; use std::time::SystemTime;
use deno_core::anyhow::{anyhow, bail}; use deno_core::anyhow::{anyhow, bail};
@@ -11,12 +13,12 @@ use deno_core::{
ModuleSpecifier, ModuleType, OpDecl, RuntimeOptions, Snapshot, ModuleSpecifier, ModuleType, OpDecl, RuntimeOptions, Snapshot,
}; };
use embassy_container_init::ProcessGroupId; use embassy_container_init::ProcessGroupId;
use helpers::{script_dir, spawn_local, Rsync, UnixRpcClient}; use helpers::{script_dir, spawn_local, OsApi, Rsync, UnixRpcClient};
use models::{PackageId, ProcedureName, Version, VolumeId}; use models::{PackageId, ProcedureName, Version, VolumeId};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::Value; use serde_json::Value;
use tokio::io::AsyncReadExt; use tokio::io::AsyncReadExt;
use tokio::sync::Mutex; use tokio::sync::{mpsc, Mutex};
pub trait PathForVolumeId: Send + Sync { pub trait PathForVolumeId: Send + Sync {
fn path_for( fn path_for(
@@ -87,6 +89,7 @@ const SNAPSHOT_BYTES: &[u8] = include_bytes!("./artifacts/ARM_JS_SNAPSHOT.bin");
#[derive(Clone)] #[derive(Clone)]
struct JsContext { struct JsContext {
sandboxed: bool, sandboxed: bool,
os: Arc<dyn OsApi>,
datadir: PathBuf, datadir: PathBuf,
run_function: String, run_function: String,
version: Version, version: Version,
@@ -97,6 +100,7 @@ struct JsContext {
container_process_gid: ProcessGroupId, container_process_gid: ProcessGroupId,
container_rpc_client: Option<Arc<UnixRpcClient>>, container_rpc_client: Option<Arc<UnixRpcClient>>,
rsyncs: Arc<Mutex<(usize, BTreeMap<usize, Rsync>)>>, rsyncs: Arc<Mutex<(usize, BTreeMap<usize, Rsync>)>>,
callback_sender: mpsc::UnboundedSender<(String, Value)>,
} }
#[derive(Debug, Clone, serde::Deserialize, serde::Serialize)] #[derive(Debug, Clone, serde::Deserialize, serde::Serialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
@@ -178,6 +182,7 @@ impl ModuleLoader for ModsLoader {
pub struct JsExecutionEnvironment { pub struct JsExecutionEnvironment {
sandboxed: bool, sandboxed: bool,
os: Arc<dyn OsApi>,
base_directory: PathBuf, base_directory: PathBuf,
module_loader: ModsLoader, module_loader: ModsLoader,
package_id: PackageId, package_id: PackageId,
@@ -189,13 +194,14 @@ pub struct JsExecutionEnvironment {
impl JsExecutionEnvironment { impl JsExecutionEnvironment {
pub async fn load_from_package( pub async fn load_from_package(
os: Arc<dyn OsApi>,
data_directory: impl AsRef<std::path::Path>, data_directory: impl AsRef<std::path::Path>,
package_id: &PackageId, package_id: &PackageId,
version: &Version, version: &Version,
volumes: Box<dyn PathForVolumeId>, volumes: Box<dyn PathForVolumeId>,
container_process_gid: ProcessGroupId, container_process_gid: ProcessGroupId,
container_rpc_client: Option<Arc<UnixRpcClient>>, container_rpc_client: Option<Arc<UnixRpcClient>>,
) -> Result<JsExecutionEnvironment, (JsError, String)> { ) -> Result<Self, (JsError, String)> {
let data_dir = data_directory.as_ref(); let data_dir = data_directory.as_ref();
let base_directory = data_dir; let base_directory = data_dir;
let js_code = JsCode({ let js_code = JsCode({
@@ -222,6 +228,7 @@ impl JsExecutionEnvironment {
buffer buffer
}); });
Ok(JsExecutionEnvironment { Ok(JsExecutionEnvironment {
os,
base_directory: base_directory.to_owned(), base_directory: base_directory.to_owned(),
module_loader: ModsLoader { code: js_code }, module_loader: ModsLoader { code: js_code },
package_id: package_id.clone(), package_id: package_id.clone(),
@@ -303,6 +310,7 @@ impl JsExecutionEnvironment {
fns::rsync::decl(), fns::rsync::decl(),
fns::rsync_wait::decl(), fns::rsync_wait::decl(),
fns::rsync_progress::decl(), fns::rsync_progress::decl(),
fns::get_service_config::decl(),
] ]
} }
@@ -315,7 +323,9 @@ impl JsExecutionEnvironment {
let base_directory = self.base_directory.clone(); let base_directory = self.base_directory.clone();
let answer_state = AnswerState::default(); let answer_state = AnswerState::default();
let ext_answer_state = answer_state.clone(); let ext_answer_state = answer_state.clone();
let (callback_sender, callback_receiver) = mpsc::unbounded_channel();
let js_ctx = JsContext { let js_ctx = JsContext {
os: self.os,
datadir: base_directory, datadir: base_directory,
run_function: procedure_name run_function: procedure_name
.js_function_name() .js_function_name()
@@ -334,6 +344,7 @@ impl JsExecutionEnvironment {
variable_args, variable_args,
container_process_gid: self.container_process_gid, container_process_gid: self.container_process_gid,
container_rpc_client: self.container_rpc_client.clone(), container_rpc_client: self.container_rpc_client.clone(),
callback_sender,
rsyncs: Default::default(), rsyncs: Default::default(),
}; };
let ext = Extension::builder() let ext = Extension::builder()
@@ -352,16 +363,14 @@ impl JsExecutionEnvironment {
startup_snapshot: Some(Snapshot::Static(SNAPSHOT_BYTES)), startup_snapshot: Some(Snapshot::Static(SNAPSHOT_BYTES)),
..Default::default() ..Default::default()
}; };
let runtime = Arc::new(Mutex::new(JsRuntime::new(runtime_options))); let mut runtime = JsRuntime::new(runtime_options);
let future = async move { let future = async move {
let mod_id = runtime let mod_id = runtime
.lock()
.await
.load_main_module(&"file:///loadModule.js".parse().unwrap(), None) .load_main_module(&"file:///loadModule.js".parse().unwrap(), None)
.await?; .await?;
let evaluated = runtime.lock().await.mod_evaluate(mod_id); let evaluated = runtime.mod_evaluate(mod_id);
let res = runtime.lock().await.run_event_loop(false).await; let res = run_event_loop(&mut runtime, callback_receiver).await;
res?; res?;
evaluated.await??; evaluated.await??;
Ok::<_, AnyError>(()) Ok::<_, AnyError>(())
@@ -377,6 +386,41 @@ impl JsExecutionEnvironment {
} }
} }
#[pin_project::pin_project]
struct RuntimeEventLoop<'a> {
runtime: &'a mut JsRuntime,
callback: mpsc::UnboundedReceiver<(String, Value)>,
}
impl<'a> Future for RuntimeEventLoop<'a> {
type Output = Result<(), AnyError>;
fn poll(
self: Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Self::Output> {
let this = self.project();
if let Poll::Ready(Some((uuid, value))) = this.callback.poll_recv(cx) {
match this
.runtime
.execute_script("callback", &format!("runCallback({uuid}, {value})"))
{
Ok(_) => (),
Err(e) => return Poll::Ready(Err(e)),
}
}
this.runtime.poll_event_loop(cx, false)
}
}
async fn run_event_loop(
runtime: &mut JsRuntime,
callback_receiver: mpsc::UnboundedReceiver<(String, Value)>,
) -> Result<(), AnyError> {
RuntimeEventLoop {
runtime,
callback: callback_receiver,
}
.await
}
/// Note: Make sure that we have the assumption that all these methods are callable at any time, and all call restrictions should be in rust /// Note: Make sure that we have the assumption that all these methods are callable at any time, and all call restrictions should be in rust
mod fns { mod fns {
use std::cell::RefCell; use std::cell::RefCell;
@@ -396,9 +440,9 @@ mod fns {
OutputParams, OutputStrategy, ProcessGroupId, ProcessId, RunCommand, RunCommandParams, OutputParams, OutputStrategy, ProcessGroupId, ProcessId, RunCommand, RunCommandParams,
SendSignal, SendSignalParams, SignalGroup, SignalGroupParams, SendSignal, SendSignalParams, SignalGroup, SignalGroupParams,
}; };
use helpers::{to_tmp_path, AtomicFile, Rsync, RsyncOptions}; use helpers::{to_tmp_path, AtomicFile, Rsync, RsyncOptions, RuntimeDropped};
use itertools::Itertools; use itertools::Itertools;
use models::{ErrorKind, VolumeId}; use models::{PackageId, VolumeId};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::{json, Value}; use serde_json::{json, Value};
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
@@ -1373,6 +1417,37 @@ mod fns {
tokio::fs::set_permissions(new_file, Permissions::from_mode(mode)).await?; tokio::fs::set_permissions(new_file, Permissions::from_mode(mode)).await?;
Ok(()) Ok(())
} }
#[op]
async fn get_service_config(
state: Rc<RefCell<OpState>>,
service_id: PackageId,
path: String,
callback: String,
) -> Result<ResultType, AnyError> {
let state = state.borrow();
let ctx = state.borrow::<JsContext>();
let sender = ctx.callback_sender.clone();
Ok(
match ctx
.os
.get_service_config(
service_id,
&path,
Box::new(move |value| {
sender
.send((callback.clone(), value))
.map_err(|_| RuntimeDropped)
}),
)
.await
{
Ok(a) => ResultType::Result(a),
Err(e) => ResultType::ErrorCode(e.kind as i32, e.source.to_string()),
},
)
}
/// We need to make sure that during the file accessing, we don't reach beyond our scope of control /// We need to make sure that during the file accessing, we don't reach beyond our scope of control
async fn is_subset( async fn is_subset(
parent: impl AsRef<Path>, parent: impl AsRef<Path>,