add start/stop/restart to effects

fix: add a default always to the get status

chore: Only do updates when the thing is installed.

chore: Make the thing buildable for testing

chore: Add in the debugging

chore: Remove the bluj tracing

chore: Fix the build

Chore: Make these fn's instead of allways ran.

chore: Fix the testing

fix: The stopping/ restarting service

fix: Fix the restarting.
This commit is contained in:
Aiden McClelland
2023-02-09 14:54:06 -07:00
parent 10e3e80042
commit 8ee64d22b3
16 changed files with 554 additions and 383 deletions

View File

@@ -1,5 +1,4 @@
use std::collections::BTreeMap;
use std::sync::atomic::{AtomicBool, Ordering};
use itertools::Itertools;
use patch_db::{DbHandle, LockReceipt, LockType};

View File

@@ -147,7 +147,6 @@ impl OsApi for Manager {
.create_service(self.seed.manifest.id.clone(), ip)
.await
.map_err(|e| eyre!("Could not get to net controller: {e:?}"))?;
let mut secrets = self.seed.ctx.secret_store.acquire().await?;
svc.remove_lan(id, external)
.await
@@ -165,7 +164,6 @@ impl OsApi for Manager {
.create_service(self.seed.manifest.id.clone(), ip)
.await
.map_err(|e| eyre!("Could not get to net controller: {e:?}"))?;
let mut secrets = self.seed.ctx.secret_store.acquire().await?;
svc.remove_tor(id, external)
.await
@@ -173,30 +171,26 @@ impl OsApi for Manager {
Ok(())
}
fn set_started(&self) -> Result<(), Report> {
fn set_started(&self) {
self.manage_container
.current_state
.send(StartStop::Start)
.unwrap_or_default();
Ok(())
.unwrap_or_default()
}
async fn restart(&self) -> Result<(), Report> {
self.perform_restart().await;
Ok(())
async fn restart(&self) {
self.perform_restart().await
}
async fn start(&self) -> Result<(), Report> {
async fn start(&self) {
self.manage_container
.wait_for_desired(StartStop::Start)
.await;
Ok(())
.await
}
async fn stop(&self) -> Result<(), Report> {
async fn stop(&self) {
self.manage_container
.wait_for_desired(StartStop::Stop)
.await;
Ok(())
.await
}
}

View File

@@ -1,12 +1,15 @@
use std::sync::Arc;
use std::time::Duration;
use futures::FutureExt;
use patch_db::PatchDbHandle;
use tokio::sync::watch;
use tokio::sync::watch::Sender;
use tracing::instrument;
use super::start_stop::StartStop;
use super::{manager_seed, run_main, ManagerPersistentContainer, RunMainResult};
use crate::procedure::NoOutput;
use crate::s9pk::manifest::Manifest;
use crate::status::MainStatus;
use crate::util::{GeneralBoxedGuard, NonDetachingJoinHandle};
@@ -15,10 +18,10 @@ use crate::Error;
pub type ManageContainerOverride = Arc<watch::Sender<Option<MainStatus>>>;
pub struct ManageContainer {
current_state: Arc<watch::Sender<StartStop>>,
desired_state: Arc<watch::Sender<StartStop>>,
service: NonDetachingJoinHandle<()>,
save_state: NonDetachingJoinHandle<()>,
pub(super) current_state: Arc<watch::Sender<StartStop>>,
pub(super) desired_state: Arc<watch::Sender<StartStop>>,
_service: NonDetachingJoinHandle<()>,
_save_state: NonDetachingJoinHandle<()>,
override_main_status: ManageContainerOverride,
}
@@ -30,7 +33,7 @@ impl ManageContainer {
let mut db = seed.ctx.db.handle();
let current_state = Arc::new(watch::channel(StartStop::Stop).0);
let desired_state = Arc::new(
watch::channel::<StartStop>(get_status(&mut db, &seed.manifest).await?.into()).0,
watch::channel::<StartStop>(get_status(&mut db, &seed.manifest).await.into()).0,
);
let override_main_status: ManageContainerOverride = Arc::new(watch::channel(None).0);
let service = tokio::spawn(create_service_manager(
@@ -50,17 +53,19 @@ impl ManageContainer {
Ok(ManageContainer {
current_state,
desired_state,
service,
_service: service,
override_main_status,
save_state,
_save_state: save_state,
})
}
pub fn set_override(&self, override_status: Option<MainStatus>) -> GeneralBoxedGuard {
self.override_main_status.send(override_status);
self.override_main_status
.send(override_status)
.unwrap_or_default();
let override_main_status = self.override_main_status.clone();
let guard = GeneralBoxedGuard::new(move || {
override_main_status.send(None);
override_main_status.send(None).unwrap_or_default();
});
guard
}
@@ -70,9 +75,9 @@ impl ManageContainer {
}
pub async fn wait_for_desired(&self, new_state: StartStop) {
let mut current_state = self.current_state();
let current_state = self.current_state();
self.to_desired(new_state);
while *current_state.borrow() != new_state {
while current_state.borrow() != new_state {
current_state.changed().await.unwrap_or_default();
}
}
@@ -99,14 +104,14 @@ async fn create_service_manager(
let current: StartStop = current_state.borrow().clone();
let desired: StartStop = desired_state_receiver.borrow().clone();
match (current, desired) {
(StartStop::Start, StartStop::Start) => continue,
(StartStop::Start, StartStop::Start) => (),
(StartStop::Start, StartStop::Stop) => {
if let Err(err) = seed.stop_container().await {
tracing::error!("Could not stop container");
tracing::debug!("{:?}", err)
};
running_service = None;
current_state.send(StartStop::Stop);
current_state.send(StartStop::Stop).unwrap_or_default();
}
(StartStop::Stop, StartStop::Start) => starting_service(
current_state.clone(),
@@ -115,8 +120,9 @@ async fn create_service_manager(
persistent_container.clone(),
&mut running_service,
),
(StartStop::Stop, StartStop::Stop) => continue,
(StartStop::Stop, StartStop::Stop) => (),
}
if let Err(_) = desired_state_receiver.changed().await {
tracing::error!("Desired state error");
break;
@@ -188,7 +194,7 @@ fn starting_service(
let set_running = {
let current_state = current_state.clone();
Arc::new(move || {
current_state.send(StartStop::Start);
current_state.send(StartStop::Start).unwrap_or_default();
})
};
let set_stopped = {
@@ -203,8 +209,8 @@ fn starting_service(
set_running.clone(),
)
.await;
run_main_log_result(result, seed.clone());
set_stopped();
set_stopped().unwrap_or_default();
run_main_log_result(result, seed.clone()).await;
}
};
*running_service = Some(tokio::spawn(running_main_loop).into());
@@ -217,24 +223,23 @@ async fn run_main_log_result(result: RunMainResult, seed: Arc<manager_seed::Mana
#[cfg(feature = "unstable")]
{
use crate::notifications::NotificationLevel;
use crate::status::MainStatus;
let mut db = seed.ctx.db.handle();
let started = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&thread_shared.seed.manifest.id)
.idx_model(&seed.manifest.id)
.and_then(|pde| pde.installed())
.map::<_, MainStatus>(|i| i.status().main())
.get(db, false)
.get(&mut db)
.await;
match started.as_deref() {
Ok(Some(MainStatus::Running { .. })) => {
let res = thread_shared.seed.ctx.notification_manager
let res = seed.ctx.notification_manager
.notify(
db,
Some(thread_shared.seed.manifest.id.clone()),
&mut db,
Some(seed.manifest.id.clone()),
NotificationLevel::Warning,
String::from("Service Crashed"),
format!("The service {} has crashed with the following exit code: {}\nDetails: {}", thread_shared.seed.manifest.id.clone(), e.0, e.1),
format!("The service {} has crashed with the following exit code: {}\nDetails: {}", seed.manifest.id.clone(), e.0, e.1),
(),
Some(3600) // 1 hour
)
@@ -249,6 +254,11 @@ async fn run_main_log_result(result: RunMainResult, seed: Arc<manager_seed::Mana
}
}
}
tracing::error!(
"The service {} has crashed with the following exit code: {}",
seed.manifest.id.clone(),
e.0
);
tokio::time::sleep(Duration::from_secs(15)).await;
}
@@ -259,41 +269,56 @@ async fn run_main_log_result(result: RunMainResult, seed: Arc<manager_seed::Mana
}
}
pub(super) async fn get_status(
db: &mut PatchDbHandle,
manifest: &Manifest,
) -> Result<MainStatus, Error> {
Ok(crate::db::DatabaseModel::new()
.package_data()
.idx_model(&manifest.id)
.expect(db)
.await?
.installed()
.expect(db)
.await?
.status()
.main()
.get(db)
.await?
.clone())
#[instrument(skip(db, manifest))]
pub(super) async fn get_status(db: &mut PatchDbHandle, manifest: &Manifest) -> MainStatus {
async move {
Ok::<_, Error>(
crate::db::DatabaseModel::new()
.package_data()
.idx_model(&manifest.id)
.expect(db)
.await?
.installed()
.expect(db)
.await?
.status()
.main()
.get(db)
.await?
.clone(),
)
}
.map(|x| x.unwrap_or_else(|e| MainStatus::Stopped))
.await
}
#[instrument(skip(db, manifest))]
async fn set_status(
db: &mut PatchDbHandle,
manifest: &Manifest,
main_status: &MainStatus,
) -> Result<(), Error> {
crate::db::DatabaseModel::new()
if crate::db::DatabaseModel::new()
.package_data()
.idx_model(&manifest.id)
.expect(db)
.await?
.installed()
.expect(db)
.exists(db)
.await?
.status()
.main()
.put(db, main_status)
.await?;
{
crate::db::DatabaseModel::new()
.package_data()
.idx_model(&manifest.id)
.expect(db)
.await?
.installed()
.expect(db)
.await?
.status()
.main()
.put(db, main_status)
.await?;
}
Ok(())
}

View File

@@ -7,7 +7,7 @@ use std::time::Duration;
use color_eyre::{eyre::eyre, Report};
use embassy_container_init::ProcessGroupId;
use futures::future::BoxFuture;
use futures::{FutureExt, TryFutureExt};
use futures::{Future, FutureExt, TryFutureExt};
use helpers::UnixRpcClient;
use models::{ErrorKind, PackageId};
use nix::sys::signal::Signal;
@@ -168,19 +168,22 @@ impl Manager {
self._transition_replace({
let manage_container = self.manage_container.clone();
TransitionState::Configuring(tokio::spawn(async move {
let desired_state = manage_container.desired_state();
let state_reverter = DesiredStateReverter::new(manage_container.clone());
let mut current_state = manage_container.current_state();
manage_container.to_desired(StartStop::Stop);
while current_state.borrow().is_start() {
current_state.changed().await.unwrap();
}
TransitionState::Configuring(
tokio::spawn(async move {
let desired_state = manage_container.desired_state();
let state_reverter = DesiredStateReverter::new(manage_container.clone());
let mut current_state = manage_container.current_state();
manage_container.to_desired(StartStop::Stop);
while current_state.borrow().is_start() {
current_state.changed().await.unwrap();
}
transition_state.await;
transition_state.await;
state_reverter.revert().await;
}))
state_reverter.revert().await;
})
.into(),
)
});
done.await
}
@@ -230,69 +233,76 @@ impl Manager {
.send_replace(Arc::new(transition_state))
.abort();
}
pub(super) fn perform_restart(&self) -> impl Future<Output = ()> + 'static {
let manage_container = self.manage_container.clone();
async move {
let _ = manage_container.set_override(Some(MainStatus::Restarting));
manage_container.wait_for_desired(StartStop::Stop).await;
manage_container.wait_for_desired(StartStop::Start).await;
}
}
fn _transition_restart(&self) -> TransitionState {
let transition = self.transition.clone();
let restart = self.perform_restart();
TransitionState::Restarting(
tokio::spawn(async move {
restart.await;
transition.send_replace(Default::default());
})
.into(),
)
}
fn perform_backup(
&self,
backup_guard: BackupGuard,
) -> impl Future<Output = Result<Result<PackageBackupInfo, Error>, Error>> + 'static {
let manage_container = self.manage_container.clone();
TransitionState::Restarting(tokio::spawn(async move {
let mut current_state = manage_container.current_state();
let _ = manage_container.set_override(Some(MainStatus::Restarting));
manage_container.to_desired(StartStop::Stop);
while current_state.borrow().is_start() {
current_state.changed().await.unwrap();
}
manage_container.to_desired(StartStop::Start);
while current_state.borrow().is_stop() {
current_state.changed().await.unwrap();
}
transition.send_replace(Default::default());
}))
let seed = self.seed.clone();
async move {
let state_reverter = DesiredStateReverter::new(manage_container.clone());
let mut tx = seed.ctx.db.handle();
let _ = manage_container
.set_override(Some(get_status(&mut tx, &seed.manifest).await.backing_up()));
manage_container.wait_for_desired(StartStop::Stop).await;
let backup_guard = backup_guard.lock().await;
let guard = backup_guard.mount_package_backup(&seed.manifest.id).await?;
let res = seed
.manifest
.backup
.create(
&seed.ctx,
&mut tx,
&seed.manifest.id,
&seed.manifest.title,
&seed.manifest.version,
&seed.manifest.interfaces,
&seed.manifest.volumes,
)
.await;
guard.unmount().await?;
drop(backup_guard);
let return_value = res;
state_reverter.revert().await;
Ok::<_, Error>(return_value)
}
}
fn _transition_backup(
&self,
backup_guard: BackupGuard,
) -> (TransitionState, BoxFuture<BackupReturn>) {
let manage_container = self.manage_container.clone();
let seed = self.seed.clone();
let (send, done) = oneshot::channel();
(
TransitionState::BackingUp(tokio::spawn(
async move {
let state_reverter = DesiredStateReverter::new(manage_container.clone());
let mut current_state = manage_container.current_state();
let mut tx = seed.ctx.db.handle();
let _ = manage_container.set_override(Some(
get_status(&mut tx, &seed.manifest).await?.backing_up(),
));
manage_container.to_desired(StartStop::Stop);
while current_state.borrow().is_start() {
current_state.changed().await.unwrap();
}
let backup_guard = backup_guard.lock().await;
let guard = backup_guard.mount_package_backup(&seed.manifest.id).await?;
let res = seed
.manifest
.backup
.create(
&seed.ctx,
&mut tx,
&seed.manifest.id,
&seed.manifest.title,
&seed.manifest.version,
&seed.manifest.interfaces,
&seed.manifest.volumes,
)
.await;
guard.unmount().await?;
drop(backup_guard);
let return_value = res;
state_reverter.revert().await;
Ok::<_, Error>(return_value)
}
.then(finnish_up_backup_task(self.transition.clone(), send)), //
)),
TransitionState::BackingUp(
tokio::spawn(
self.perform_backup(backup_guard)
.then(finnish_up_backup_task(self.transition.clone(), send)),
)
.into(),
),
done.map_err(|err| Error::new(eyre!("Oneshot error: {err:?}"), ErrorKind::Unknown))
.map(flatten_backup_error)
.boxed(),
@@ -683,11 +693,13 @@ async fn run_main(
};
let svc = if let Some(ip) = ip {
Some(add_network_for_main(&*seed, ip).await?)
let net = add_network_for_main(&seed, ip).await?;
started();
Some(net)
} else {
None
};
started();
let health = main_health_check_daemon(seed.clone());
let res = tokio::select! {
a = runtime => a.map_err(|_| Error::new(eyre!("Manager runtime panicked!"), crate::ErrorKind::Docker)).and_then(|a| a),
@@ -774,6 +786,7 @@ async fn get_long_running_ip(seed: &ManagerSeed, runtime: &mut LongRunning) -> G
}
}
#[instrument(skip(seed))]
async fn container_inspect(
seed: &ManagerSeed,
) -> Result<bollard::models::ContainerInspectResponse, bollard::errors::Error> {
@@ -783,6 +796,7 @@ async fn container_inspect(
.await
}
#[instrument(skip(seed))]
async fn add_network_for_main(
seed: &ManagerSeed,
ip: std::net::Ipv4Addr,
@@ -814,6 +828,7 @@ async fn add_network_for_main(
Ok(svc)
}
#[instrument(skip(svc))]
async fn remove_network_for_main(svc: NetService) -> Result<(), Error> {
svc.remove_all().await
}
@@ -848,6 +863,7 @@ async fn try_get_running_ip(seed: &ManagerSeed) -> Result<Option<Ipv4Addr>, Repo
.transpose()?)
}
#[instrument(skip(seed, runtime))]
async fn get_running_ip(seed: &ManagerSeed, mut runtime: &mut RuntimeOfCommand) -> GetRunningIp {
loop {
match container_inspect(seed).await {

View File

@@ -1,16 +1,14 @@
use tokio::task::JoinHandle;
use helpers::NonDetachingJoinHandle;
pub(crate) enum TransitionState {
// Starting(JoinHandle<()>),
// Stopping(JoinHandle<()>)
BackingUp(JoinHandle<()>),
Restarting(JoinHandle<()>),
Configuring(JoinHandle<()>),
BackingUp(NonDetachingJoinHandle<()>),
Restarting(NonDetachingJoinHandle<()>),
Configuring(NonDetachingJoinHandle<()>),
None,
}
impl TransitionState {
pub(crate) fn join_handle(&self) -> Option<&JoinHandle<()>> {
pub(crate) fn join_handle(&self) -> Option<&NonDetachingJoinHandle<()>> {
Some(match self {
TransitionState::BackingUp(a) => a,
TransitionState::Restarting(a) => a,