Feat/logging (#2602)

* wip: Working on something to help

* chore: Add in some of the logging now

* chore: fix the type to interned instead of id

* wip

* wip

* chore: fix the logging by moving levels

* Apply suggestions from code review

* mount at machine id for journal

* Persistant

* limit log size

* feat: Actually logging and mounting now

* fix: Get the logs from the previous versions of the boot

* Chore: Add the boot id

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
This commit is contained in:
Jade
2024-04-17 15:46:10 -06:00
committed by GitHub
parent 711c82472c
commit 9eff920989
10 changed files with 308 additions and 120 deletions

View File

@@ -3,38 +3,61 @@
## Methods ## Methods
### init ### init
initialize runtime (mount `/proc`, `/sys`, `/dev`, and `/run` to each image in `/media/images`) initialize runtime (mount `/proc`, `/sys`, `/dev`, and `/run` to each image in `/media/images`)
called after os has mounted js and images to the container called after os has mounted js and images to the container
#### args #### args
`[]` `[]`
#### response #### response
`null` `null`
### exit ### exit
shutdown runtime shutdown runtime
#### args #### args
`[]` `[]`
#### response #### response
`null` `null`
### start ### start
run main method if not already running run main method if not already running
#### args #### args
`[]` `[]`
#### response #### response
`null` `null`
### stop ### stop
stop main method by sending SIGTERM to child processes, and SIGKILL after timeout stop main method by sending SIGTERM to child processes, and SIGKILL after timeout
#### args #### args
`{ timeout: millis }` `{ timeout: millis }`
#### response #### response
`null` `null`
### execute ### execute
run a specific package procedure run a specific package procedure
#### args
#### args
```ts ```ts
{ {
procedure: JsonPath, procedure: JsonPath,
@@ -42,12 +65,17 @@ run a specific package procedure
timeout: millis, timeout: millis,
} }
``` ```
#### response #### response
`any` `any`
### sandbox ### sandbox
run a specific package procedure in sandbox mode run a specific package procedure in sandbox mode
#### args
#### args
```ts ```ts
{ {
procedure: JsonPath, procedure: JsonPath,
@@ -55,5 +83,7 @@ run a specific package procedure in sandbox mode
timeout: millis, timeout: millis,
} }
``` ```
#### response #### response
`any` `any`

View File

@@ -11,9 +11,13 @@ apt-get install -y curl rsync
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
source ~/.bashrc source ~/.bashrc
nvm install 20 nvm install 20
ln -s $(which node) /usr/bin/node ln -s $(which node) /usr/bin/node
sed -i '/\(^\|#\)Storage=/c\Storage=persistent' /etc/systemd/journald.conf
sed -i '/\(^\|#\)Compress=/c\Compress=yes' /etc/systemd/journald.conf
sed -i '/\(^\|#\)SystemMaxUse=/c\SystemMaxUse=1G' /etc/systemd/journald.conf
sed -i '/\(^\|#\)ForwardToSyslog=/c\ForwardToSyslog=no' /etc/systemd/journald.conf
systemctl enable container-runtime.service systemctl enable container-runtime.service
rm -rf /run/systemd rm -rf /run/systemd

View File

@@ -5,7 +5,7 @@
"module": "./index.js", "module": "./index.js",
"scripts": { "scripts": {
"check": "tsc --noEmit", "check": "tsc --noEmit",
"build": "prettier --write '**/*.ts' && rm -rf dist && tsc", "build": "prettier . '!tmp/**' --write && rm -rf dist && tsc",
"tsc": "rm -rf dist; tsc" "tsc": "rm -rf dist; tsc"
}, },
"author": "", "author": "",

View File

@@ -16,7 +16,6 @@ use tokio::time::Instant;
use tracing::instrument; use tracing::instrument;
use super::setup::CURRENT_SECRET; use super::setup::CURRENT_SECRET;
use crate::account::AccountInfo;
use crate::context::config::ServerConfig; use crate::context::config::ServerConfig;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation, WebSocketHandler}; use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation, WebSocketHandler};
use crate::db::prelude::PatchDbExt; use crate::db::prelude::PatchDbExt;
@@ -33,6 +32,7 @@ use crate::service::ServiceMap;
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::system::get_mem_info; use crate::system::get_mem_info;
use crate::util::lshw::{lshw, LshwDevice}; use crate::util::lshw::{lshw, LshwDevice};
use crate::{account::AccountInfo, lxc::ContainerId};
pub struct RpcContextSeed { pub struct RpcContextSeed {
is_closed: AtomicBool, is_closed: AtomicBool,
@@ -60,7 +60,7 @@ pub struct RpcContextSeed {
} }
pub struct Dev { pub struct Dev {
pub lxc: Mutex<BTreeMap<InternedString, LxcContainer>>, pub lxc: Mutex<BTreeMap<ContainerId, LxcContainer>>,
} }
pub struct Hardware { pub struct Hardware {

View File

@@ -18,15 +18,21 @@ use tokio_stream::wrappers::LinesStream;
use tokio_tungstenite::tungstenite::Message; use tokio_tungstenite::tungstenite::Message;
use tracing::instrument; use tracing::instrument;
use crate::context::{CliContext, RpcContext};
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::error::ResultExt; use crate::error::ResultExt;
use crate::prelude::*; use crate::prelude::*;
use crate::util::serde::Reversible; use crate::util::serde::Reversible;
use crate::{
context::{CliContext, RpcContext},
lxc::ContainerId,
};
use crate::{
core::rpc_continuations::{RequestGuid, RpcContinuation},
util::Invoke,
};
#[pin_project::pin_project] #[pin_project::pin_project]
pub struct LogStream { pub struct LogStream {
_child: Child, _child: Option<Child>,
#[pin] #[pin]
entries: BoxStream<'static, Result<JournalctlEntry, Error>>, entries: BoxStream<'static, Result<JournalctlEntry, Error>>,
} }
@@ -116,9 +122,11 @@ pub struct LogFollowResponse {
} }
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)] #[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
#[serde(rename_all = "camelCase")]
pub struct LogEntry { pub struct LogEntry {
timestamp: DateTime<Utc>, timestamp: DateTime<Utc>,
message: String, message: String,
boot_id: String,
} }
impl std::fmt::Display for LogEntry { impl std::fmt::Display for LogEntry {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
@@ -141,6 +149,8 @@ pub struct JournalctlEntry {
pub message: String, pub message: String,
#[serde(rename = "__CURSOR")] #[serde(rename = "__CURSOR")]
pub cursor: String, pub cursor: String,
#[serde(rename = "_BOOT_ID")]
pub boot_id: String,
} }
impl JournalctlEntry { impl JournalctlEntry {
fn log_entry(self) -> Result<(String, LogEntry), Error> { fn log_entry(self) -> Result<(String, LogEntry), Error> {
@@ -151,6 +161,7 @@ impl JournalctlEntry {
UNIX_EPOCH + Duration::from_micros(self.timestamp.parse::<u64>()?), UNIX_EPOCH + Duration::from_micros(self.timestamp.parse::<u64>()?),
), ),
message: self.message, message: self.message,
boot_id: self.boot_id,
}, },
)) ))
} }
@@ -200,12 +211,12 @@ fn deserialize_log_message<'de, D: serde::de::Deserializer<'de>>(
/// --user-unit=UNIT Show logs from the specified user unit)) /// --user-unit=UNIT Show logs from the specified user unit))
/// System: Unit is startd, but we also filter on the comm /// System: Unit is startd, but we also filter on the comm
/// Container: Filtering containers, like podman/docker is done by filtering on the CONTAINER_NAME /// Container: Filtering containers, like podman/docker is done by filtering on the CONTAINER_NAME
#[derive(Debug)] #[derive(Debug, Clone)]
pub enum LogSource { pub enum LogSource {
Kernel, Kernel,
Unit(&'static str), Unit(&'static str),
System, System,
Container(PackageId), Container(ContainerId),
} }
pub const SYSTEM_UNIT: &str = "startd"; pub const SYSTEM_UNIT: &str = "startd";
@@ -276,7 +287,7 @@ pub async fn cli_logs(
} }
} }
pub async fn logs_nofollow( pub async fn logs_nofollow(
_ctx: (), ctx: RpcContext,
_: Empty, _: Empty,
LogsParam { LogsParam {
id, id,
@@ -286,14 +297,38 @@ pub async fn logs_nofollow(
.. ..
}: LogsParam, }: LogsParam,
) -> Result<LogResponse, Error> { ) -> Result<LogResponse, Error> {
fetch_logs(LogSource::Container(id), limit, cursor, before).await let container_id = ctx
.services
.get(&id)
.await
.as_ref()
.map(|x| x.container_id())
.ok_or_else(|| {
Error::new(
eyre!("No service found with id: {}", id),
ErrorKind::NotFound,
)
})??;
fetch_logs(LogSource::Container(container_id), limit, cursor, before).await
} }
pub async fn logs_follow( pub async fn logs_follow(
ctx: RpcContext, ctx: RpcContext,
_: Empty, _: Empty,
LogsParam { id, limit, .. }: LogsParam, LogsParam { id, limit, .. }: LogsParam,
) -> Result<LogFollowResponse, Error> { ) -> Result<LogFollowResponse, Error> {
follow_logs(ctx, LogSource::Container(id), limit).await let container_id = ctx
.services
.get(&id)
.await
.as_ref()
.map(|x| x.container_id())
.ok_or_else(|| {
Error::new(
eyre!("No service found with id: {}", id),
ErrorKind::NotFound,
)
})??;
follow_logs(ctx, LogSource::Container(container_id), limit).await
} }
pub async fn cli_logs_generic_nofollow( pub async fn cli_logs_generic_nofollow(
@@ -358,7 +393,74 @@ pub async fn journalctl(
before: bool, before: bool,
follow: bool, follow: bool,
) -> Result<LogStream, Error> { ) -> Result<LogStream, Error> {
let mut cmd = Command::new("journalctl"); let mut cmd = gen_journalctl_command(&id, limit);
let cursor_formatted = format!("--after-cursor={}", cursor.unwrap_or(""));
if cursor.is_some() {
cmd.arg(&cursor_formatted);
if before {
cmd.arg("--reverse");
}
}
let deserialized_entries = String::from_utf8(cmd.invoke(ErrorKind::Journald).await?)?
.lines()
.map(serde_json::from_str::<JournalctlEntry>)
.collect::<Result<Vec<_>, _>>()
.with_kind(ErrorKind::Deserialization)?;
if follow {
let mut follow_cmd = gen_journalctl_command(&id, limit);
follow_cmd.arg("-f");
if let Some(last) = deserialized_entries.last() {
cmd.arg(format!("--after-cursor={}", last.cursor));
}
let mut child = cmd.stdout(Stdio::piped()).spawn()?;
let out =
BufReader::new(child.stdout.take().ok_or_else(|| {
Error::new(eyre!("No stdout available"), crate::ErrorKind::Journald)
})?);
let journalctl_entries = LinesStream::new(out.lines());
let follow_deserialized_entries = journalctl_entries
.map_err(|e| Error::new(e, crate::ErrorKind::Journald))
.and_then(|s| {
futures::future::ready(
serde_json::from_str::<JournalctlEntry>(&s)
.with_kind(crate::ErrorKind::Deserialization),
)
});
let entries = futures::stream::iter(deserialized_entries)
.map(Ok)
.chain(follow_deserialized_entries)
.boxed();
Ok(LogStream {
_child: Some(child),
entries,
})
} else {
let entries = futures::stream::iter(deserialized_entries).map(Ok).boxed();
Ok(LogStream {
_child: None,
entries,
})
}
}
fn gen_journalctl_command(id: &LogSource, limit: usize) -> Command {
let mut cmd = match id {
LogSource::Container(container_id) => {
let mut cmd = Command::new("lxc-attach");
cmd.arg(format!("{}", container_id))
.arg("--")
.arg("journalctl");
cmd
}
_ => Command::new("journalctl"),
};
cmd.kill_on_drop(true); cmd.kill_on_drop(true);
cmd.arg("--output=json"); cmd.arg("--output=json");
@@ -377,48 +479,11 @@ pub async fn journalctl(
cmd.arg(SYSTEM_UNIT); cmd.arg(SYSTEM_UNIT);
cmd.arg(format!("_COMM={}", SYSTEM_UNIT)); cmd.arg(format!("_COMM={}", SYSTEM_UNIT));
} }
LogSource::Container(id) => { LogSource::Container(_container_id) => {
#[cfg(not(feature = "docker"))] cmd.arg("-u").arg("container-runtime.service");
cmd.arg(format!("SYSLOG_IDENTIFIER={}.embassy", id));
#[cfg(feature = "docker")]
cmd.arg(format!("CONTAINER_NAME={}.embassy", id));
} }
}; };
cmd
let cursor_formatted = format!("--after-cursor={}", cursor.unwrap_or(""));
if cursor.is_some() {
cmd.arg(&cursor_formatted);
if before {
cmd.arg("--reverse");
}
}
if follow {
cmd.arg("--follow");
}
let mut child = cmd.stdout(Stdio::piped()).spawn()?;
let out = BufReader::new(
child
.stdout
.take()
.ok_or_else(|| Error::new(eyre!("No stdout available"), crate::ErrorKind::Journald))?,
);
let journalctl_entries = LinesStream::new(out.lines());
let deserialized_entries = journalctl_entries
.map_err(|e| Error::new(e, crate::ErrorKind::Journald))
.and_then(|s| {
futures::future::ready(
serde_json::from_str::<JournalctlEntry>(&s)
.with_kind(crate::ErrorKind::Deserialization),
)
});
Ok(LogStream {
_child: child,
entries: deserialized_entries.boxed(),
})
} }
#[instrument(skip_all)] #[instrument(skip_all)]

View File

@@ -5,9 +5,11 @@ use std::path::Path;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::time::Duration; use std::time::Duration;
use clap::builder::ValueParserFactory;
use clap::Parser; use clap::Parser;
use futures::{AsyncWriteExt, FutureExt, StreamExt}; use futures::{AsyncWriteExt, FutureExt, StreamExt};
use imbl_value::{InOMap, InternedString}; use imbl_value::{InOMap, InternedString};
use models::InvalidId;
use rpc_toolkit::yajrc::{RpcError, RpcResponse}; use rpc_toolkit::yajrc::{RpcError, RpcResponse};
use rpc_toolkit::{ use rpc_toolkit::{
from_fn_async, AnyContext, CallRemoteHandler, GenericRpcMethod, Handler, HandlerArgs, from_fn_async, AnyContext, CallRemoteHandler, GenericRpcMethod, Handler, HandlerArgs,
@@ -28,10 +30,11 @@ use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev; use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::idmapped::IdMapped; use crate::disk::mount::filesystem::idmapped::IdMapped;
use crate::disk::mount::filesystem::overlayfs::OverlayGuard; use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::disk::mount::filesystem::ReadWrite; use crate::disk::mount::filesystem::{MountType, ReadWrite};
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard}; use crate::disk::mount::guard::{GenericMountGuard, MountGuard, TmpMountGuard};
use crate::disk::mount::util::unmount; use crate::disk::mount::util::unmount;
use crate::prelude::*; use crate::prelude::*;
use crate::util::clap::FromStrParser;
use crate::util::rpc_client::UnixRpcClient; use crate::util::rpc_client::UnixRpcClient;
use crate::util::{new_guid, Invoke}; use crate::util::{new_guid, Invoke};
@@ -41,18 +44,57 @@ pub const CONTAINER_RPC_SERVER_SOCKET: &str = "service.sock"; // must not be abs
pub const HOST_RPC_SERVER_SOCKET: &str = "host.sock"; // must not be absolute path pub const HOST_RPC_SERVER_SOCKET: &str = "host.sock"; // must not be absolute path
const CONTAINER_DHCP_TIMEOUT: Duration = Duration::from_secs(30); const CONTAINER_DHCP_TIMEOUT: Duration = Duration::from_secs(30);
pub struct LxcManager { #[derive(
containers: Mutex<Vec<Weak<InternedString>>>, Clone, Debug, Serialize, Deserialize, Default, PartialEq, Eq, PartialOrd, Ord, Hash, TS,
)]
#[ts(type = "string")]
pub struct ContainerId(InternedString);
impl std::ops::Deref for ContainerId {
type Target = str;
fn deref(&self) -> &Self::Target {
&self.0
}
} }
impl std::fmt::Display for ContainerId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &*self.0)
}
}
impl TryFrom<&str> for ContainerId {
type Error = InvalidId;
fn try_from(value: &str) -> Result<Self, Self::Error> {
Ok(ContainerId(InternedString::intern(value)))
}
}
impl std::str::FromStr for ContainerId {
type Err = InvalidId;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::try_from(s)
}
}
impl ValueParserFactory for ContainerId {
type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser {
FromStrParser::new()
}
}
#[derive(Default)]
pub struct LxcManager {
containers: Mutex<Vec<Weak<ContainerId>>>,
}
impl LxcManager { impl LxcManager {
pub fn new() -> Self { pub fn new() -> Self {
Self { Self::default()
containers: Default::default(),
}
} }
pub async fn create(self: &Arc<Self>, config: LxcConfig) -> Result<LxcContainer, Error> { pub async fn create(
let container = LxcContainer::new(self, config).await?; self: &Arc<Self>,
log_mount: Option<&Path>,
config: LxcConfig,
) -> Result<LxcContainer, Error> {
let container = LxcContainer::new(self, log_mount, config).await?;
let mut guard = self.containers.lock().await; let mut guard = self.containers.lock().await;
*guard = std::mem::take(&mut *guard) *guard = std::mem::take(&mut *guard)
.into_iter() .into_iter()
@@ -69,7 +111,7 @@ impl LxcManager {
.await .await
.iter() .iter()
.filter_map(|g| g.upgrade()) .filter_map(|g| g.upgrade())
.map(|g| (&*g).clone()), .map(|g| (*g).clone()),
); );
for container in String::from_utf8( for container in String::from_utf8(
Command::new("lxc-ls") Command::new("lxc-ls")
@@ -80,7 +122,7 @@ impl LxcManager {
.lines() .lines()
.map(|s| s.trim()) .map(|s| s.trim())
{ {
if !expected.contains(container) { if !expected.contains(&ContainerId::try_from(container)?) {
let rootfs_path = Path::new(LXC_CONTAINER_DIR).join(container).join("rootfs"); let rootfs_path = Path::new(LXC_CONTAINER_DIR).join(container).join("rootfs");
if tokio::fs::metadata(&rootfs_path).await.is_ok() { if tokio::fs::metadata(&rootfs_path).await.is_ok() {
unmount(Path::new(LXC_CONTAINER_DIR).join(container).join("rootfs")).await?; unmount(Path::new(LXC_CONTAINER_DIR).join(container).join("rootfs")).await?;
@@ -112,14 +154,20 @@ impl LxcManager {
pub struct LxcContainer { pub struct LxcContainer {
manager: Weak<LxcManager>, manager: Weak<LxcManager>,
rootfs: OverlayGuard, rootfs: OverlayGuard,
guid: Arc<InternedString>, pub guid: Arc<ContainerId>,
rpc_bind: TmpMountGuard, rpc_bind: TmpMountGuard,
log_mount: Option<MountGuard>,
config: LxcConfig, config: LxcConfig,
exited: bool, exited: bool,
} }
impl LxcContainer { impl LxcContainer {
async fn new(manager: &Arc<LxcManager>, config: LxcConfig) -> Result<Self, Error> { async fn new(
manager: &Arc<LxcManager>,
log_mount: Option<&Path>,
config: LxcConfig,
) -> Result<Self, Error> {
let guid = new_guid(); let guid = new_guid();
let machine_id = hex::encode(rand::random::<[u8; 16]>());
let container_dir = Path::new(LXC_CONTAINER_DIR).join(&*guid); let container_dir = Path::new(LXC_CONTAINER_DIR).join(&*guid);
tokio::fs::create_dir_all(&container_dir).await?; tokio::fs::create_dir_all(&container_dir).await?;
tokio::fs::write( tokio::fs::write(
@@ -145,6 +193,7 @@ impl LxcContainer {
&rootfs_dir, &rootfs_dir,
) )
.await?; .await?;
tokio::fs::write(rootfs_dir.join("etc/machine-id"), format!("{machine_id}\n")).await?;
tokio::fs::write(rootfs_dir.join("etc/hostname"), format!("{guid}\n")).await?; tokio::fs::write(rootfs_dir.join("etc/hostname"), format!("{guid}\n")).await?;
Command::new("sed") Command::new("sed")
.arg("-i") .arg("-i")
@@ -166,6 +215,20 @@ impl LxcContainer {
.arg(rpc_bind.path()) .arg(rpc_bind.path())
.invoke(ErrorKind::Filesystem) .invoke(ErrorKind::Filesystem)
.await?; .await?;
let log_mount = if let Some(path) = log_mount {
let log_mount_point = rootfs_dir.join("var/log/journal").join(machine_id);
let log_mount =
MountGuard::mount(&Bind::new(path), &log_mount_point, MountType::ReadWrite).await?;
Command::new("chown")
// This was needed as 100999 because the group id of journald
.arg("100000:100999")
.arg(&log_mount_point)
.invoke(crate::ErrorKind::Filesystem)
.await?;
Some(log_mount)
} else {
None
};
Command::new("lxc-start") Command::new("lxc-start")
.arg("-d") .arg("-d")
.arg("--name") .arg("--name")
@@ -175,10 +238,11 @@ impl LxcContainer {
Ok(Self { Ok(Self {
manager: Arc::downgrade(manager), manager: Arc::downgrade(manager),
rootfs, rootfs,
guid: Arc::new(guid), guid: Arc::new(ContainerId::try_from(&*guid)?),
rpc_bind, rpc_bind,
config, config,
exited: false, exited: false,
log_mount,
}) })
} }
@@ -188,11 +252,12 @@ impl LxcContainer {
pub async fn ip(&self) -> Result<Ipv4Addr, Error> { pub async fn ip(&self) -> Result<Ipv4Addr, Error> {
let start = Instant::now(); let start = Instant::now();
let guid: &str = &self.guid;
loop { loop {
let output = String::from_utf8( let output = String::from_utf8(
Command::new("lxc-info") Command::new("lxc-info")
.arg("--name") .arg("--name")
.arg(&*self.guid) .arg(guid)
.arg("-iH") .arg("-iH")
.invoke(ErrorKind::Docker) .invoke(ErrorKind::Docker)
.await?, .await?,
@@ -218,6 +283,9 @@ impl LxcContainer {
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn exit(mut self) -> Result<(), Error> { pub async fn exit(mut self) -> Result<(), Error> {
self.rpc_bind.take().unmount().await?; self.rpc_bind.take().unmount().await?;
if let Some(log_mount) = self.log_mount.take() {
log_mount.unmount(true).await?;
}
self.rootfs.take().unmount(true).await?; self.rootfs.take().unmount(true).await?;
let rootfs_path = self.rootfs_dir(); let rootfs_path = self.rootfs_dir();
let err_path = rootfs_path.join("var/log/containerRuntime.err"); let err_path = rootfs_path.join("var/log/containerRuntime.err");
@@ -228,17 +296,16 @@ impl LxcContainer {
tracing::error!(container, "{}", line); tracing::error!(container, "{}", line);
} }
} }
if tokio::fs::metadata(&rootfs_path).await.is_ok() { if tokio::fs::metadata(&rootfs_path).await.is_ok()
if tokio_stream::wrappers::ReadDirStream::new(tokio::fs::read_dir(&rootfs_path).await?) && tokio_stream::wrappers::ReadDirStream::new(tokio::fs::read_dir(&rootfs_path).await?)
.count() .count()
.await .await
> 0 > 0
{ {
return Err(Error::new( return Err(Error::new(
eyre!("rootfs is not empty, refusing to delete"), eyre!("rootfs is not empty, refusing to delete"),
ErrorKind::InvalidRequest, ErrorKind::InvalidRequest,
)); ));
}
} }
Command::new("lxc-destroy") Command::new("lxc-destroy")
.arg("--force") .arg("--force")
@@ -341,21 +408,21 @@ pub fn lxc() -> ParentHandler {
.subcommand("connect", from_fn_async(connect_rpc_cli).no_display()) .subcommand("connect", from_fn_async(connect_rpc_cli).no_display())
} }
pub async fn create(ctx: RpcContext) -> Result<InternedString, Error> { pub async fn create(ctx: RpcContext) -> Result<ContainerId, Error> {
let container = ctx.lxc_manager.create(LxcConfig::default()).await?; let container = ctx.lxc_manager.create(None, LxcConfig::default()).await?;
let guid = container.guid.deref().clone(); let guid = container.guid.deref().clone();
ctx.dev.lxc.lock().await.insert(guid.clone(), container); ctx.dev.lxc.lock().await.insert(guid.clone(), container);
Ok(guid) Ok(guid)
} }
pub async fn list(ctx: RpcContext) -> Result<Vec<InternedString>, Error> { pub async fn list(ctx: RpcContext) -> Result<Vec<ContainerId>, Error> {
Ok(ctx.dev.lxc.lock().await.keys().cloned().collect()) Ok(ctx.dev.lxc.lock().await.keys().cloned().collect())
} }
#[derive(Deserialize, Serialize, Parser, TS)] #[derive(Deserialize, Serialize, Parser, TS)]
pub struct RemoveParams { pub struct RemoveParams {
#[ts(type = "string")] #[ts(type = "string")]
pub guid: InternedString, pub guid: ContainerId,
} }
pub async fn remove(ctx: RpcContext, RemoveParams { guid }: RemoveParams) -> Result<(), Error> { pub async fn remove(ctx: RpcContext, RemoveParams { guid }: RemoveParams) -> Result<(), Error> {
@@ -368,7 +435,7 @@ pub async fn remove(ctx: RpcContext, RemoveParams { guid }: RemoveParams) -> Res
#[derive(Deserialize, Serialize, Parser, TS)] #[derive(Deserialize, Serialize, Parser, TS)]
pub struct ConnectParams { pub struct ConnectParams {
#[ts(type = "string")] #[ts(type = "string")]
pub guid: InternedString, pub guid: ContainerId,
} }
pub async fn connect_rpc( pub async fn connect_rpc(
@@ -502,7 +569,7 @@ pub async fn connect_cli(ctx: &CliContext, guid: RequestGuid) -> Result<(), Erro
if let Some((method, rest)) = command.split_first() { if let Some((method, rest)) = command.split_first() {
let mut params = InOMap::new(); let mut params = InOMap::new();
for arg in rest { for arg in rest {
if let Some((name, value)) = arg.split_once("=") { if let Some((name, value)) = arg.split_once('=') {
params.insert(InternedString::intern(name), if value.is_empty() { params.insert(InternedString::intern(name), if value.is_empty() {
Value::Null Value::Null
} else if let Ok(v) = serde_json::from_str(value) { } else if let Ok(v) = serde_json::from_str(value) {

View File

@@ -70,7 +70,7 @@ fn filter(p: &Path) -> bool {
#[derive(Clone)] #[derive(Clone)]
pub struct S9pk<S = Section<MultiCursorFile>> { pub struct S9pk<S = Section<MultiCursorFile>> {
manifest: Manifest, pub manifest: Manifest,
manifest_dirty: bool, manifest_dirty: bool,
archive: MerkleArchive<S>, archive: MerkleArchive<S>,
size: Option<u64>, size: Option<u64>,

View File

@@ -13,7 +13,6 @@ use start_stop::StartStop;
use tokio::sync::Notify; use tokio::sync::Notify;
use ts_rs::TS; use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::core::rpc_continuations::RequestGuid; use crate::core::rpc_continuations::RequestGuid;
use crate::db::model::package::{ use crate::db::model::package::{
InstalledState, PackageDataEntry, PackageState, PackageStateMatchModelRef, UpdatingState, InstalledState, PackageDataEntry, PackageState, PackageStateMatchModelRef, UpdatingState,
@@ -32,6 +31,10 @@ use crate::util::actor::concurrent::ConcurrentActor;
use crate::util::actor::Actor; use crate::util::actor::Actor;
use crate::util::serde::Pem; use crate::util::serde::Pem;
use crate::volume::data_dir; use crate::volume::data_dir;
use crate::{
context::{CliContext, RpcContext},
lxc::ContainerId,
};
mod action; mod action;
pub mod cli; pub mod cli;
@@ -193,8 +196,8 @@ impl Service {
|db| { |db| {
db.as_public_mut() db.as_public_mut()
.as_package_data_mut() .as_package_data_mut()
.as_idx_mut(&id) .as_idx_mut(id)
.or_not_found(&id)? .or_not_found(id)?
.as_state_info_mut() .as_state_info_mut()
.map_mutate(|s| { .map_mutate(|s| {
if let PackageState::Updating(UpdatingState { if let PackageState::Updating(UpdatingState {
@@ -223,16 +226,12 @@ impl Service {
tracing::debug!("{e:?}") tracing::debug!("{e:?}")
}) })
{ {
if service match service.uninstall(None).await {
.uninstall(None) Err(e) => {
.await
.map_err(|e| {
tracing::error!("Error uninstalling service: {e}"); tracing::error!("Error uninstalling service: {e}");
tracing::debug!("{e:?}") tracing::debug!("{e:?}")
}) }
.is_ok() Ok(()) => return Ok(None),
{
return Ok(None);
} }
} }
} }
@@ -299,10 +298,10 @@ impl Service {
} }
pub async fn restore( pub async fn restore(
ctx: RpcContext, _ctx: RpcContext,
s9pk: S9pk, _s9pk: S9pk,
guard: impl GenericMountGuard, _guard: impl GenericMountGuard,
progress: Option<InstallProgressHandles>, _progress: Option<InstallProgressHandles>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
// TODO // TODO
Err(Error::new(eyre!("not yet implemented"), ErrorKind::Unknown)) Err(Error::new(eyre!("not yet implemented"), ErrorKind::Unknown))
@@ -341,21 +340,36 @@ impl Service {
.execute(ProcedureName::Uninit, to_value(&target_version)?, None) // TODO timeout .execute(ProcedureName::Uninit, to_value(&target_version)?, None) // TODO timeout
.await?; .await?;
let id = self.seed.persistent_container.s9pk.as_manifest().id.clone(); let id = self.seed.persistent_container.s9pk.as_manifest().id.clone();
self.seed let ctx = self.seed.ctx.clone();
.ctx self.shutdown().await?;
.db if target_version.is_none() {
.mutate(|d| d.as_public_mut().as_package_data_mut().remove(&id)) ctx.db
.await?; .mutate(|d| d.as_public_mut().as_package_data_mut().remove(&id))
self.shutdown().await .await?;
}
Ok(())
} }
pub async fn backup(&self, _guard: impl GenericMountGuard) -> Result<BackupReturn, Error> { pub async fn backup(&self, _guard: impl GenericMountGuard) -> Result<BackupReturn, Error> {
// TODO // TODO
Err(Error::new(eyre!("not yet implemented"), ErrorKind::Unknown)) Err(Error::new(eyre!("not yet implemented"), ErrorKind::Unknown))
} }
pub fn container_id(&self) -> Result<ContainerId, Error> {
let id = &self.seed.id;
let container_id = (*self
.seed
.persistent_container
.lxc_container
.get()
.or_not_found(format!("container for {id}"))?
.guid)
.clone();
Ok(container_id)
}
} }
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
struct RunningStatus { pub struct RunningStatus {
health: OrdMap<HealthCheckId, HealthCheckResult>, health: OrdMap<HealthCheckId, HealthCheckResult>,
started: DateTime<Utc>, started: DateTime<Utc>,
} }

View File

@@ -10,7 +10,7 @@ use imbl_value::InternedString;
use models::{ProcedureName, VolumeId}; use models::{ProcedureName, VolumeId};
use rpc_toolkit::{Empty, Server, ShutdownHandle}; use rpc_toolkit::{Empty, Server, ShutdownHandle};
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use tokio::fs::File; use tokio::fs::{create_dir_all, File};
use tokio::process::Command; use tokio::process::Command;
use tokio::sync::{oneshot, watch, Mutex, OnceCell}; use tokio::sync::{oneshot, watch, Mutex, OnceCell};
use tracing::instrument; use tracing::instrument;
@@ -39,8 +39,6 @@ use crate::ARCH;
const RPC_CONNECT_TIMEOUT: Duration = Duration::from_secs(10); const RPC_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
struct ProcedureId(u64);
#[derive(Debug)] #[derive(Debug)]
pub struct ServiceState { pub struct ServiceState {
// This contains the start time and health check information for when the service is running. Note: Will be overwritting to the db, // This contains the start time and health check information for when the service is running. Note: Will be overwritting to the db,
@@ -99,7 +97,17 @@ pub struct PersistentContainer {
impl PersistentContainer { impl PersistentContainer {
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn new(ctx: &RpcContext, s9pk: S9pk, start: StartStop) -> Result<Self, Error> { pub async fn new(ctx: &RpcContext, s9pk: S9pk, start: StartStop) -> Result<Self, Error> {
let lxc_container = ctx.lxc_manager.create(LxcConfig::default()).await?; let lxc_container = ctx
.lxc_manager
.create(
Some(
&ctx.datadir
.join("package-data/logs")
.join(&s9pk.as_manifest().id),
),
LxcConfig::default(),
)
.await?;
let rpc_client = lxc_container.connect_rpc(Some(RPC_CONNECT_TIMEOUT)).await?; let rpc_client = lxc_container.connect_rpc(Some(RPC_CONNECT_TIMEOUT)).await?;
let js_mount = MountGuard::mount( let js_mount = MountGuard::mount(
&LoopDev::from( &LoopDev::from(
@@ -114,6 +122,7 @@ impl PersistentContainer {
ReadOnly, ReadOnly,
) )
.await?; .await?;
let mut volumes = BTreeMap::new(); let mut volumes = BTreeMap::new();
for volume in &s9pk.as_manifest().volumes { for volume in &s9pk.as_manifest().volumes {
let mountpoint = lxc_container let mountpoint = lxc_container
@@ -175,7 +184,7 @@ impl PersistentContainer {
if let Some(env) = s9pk if let Some(env) = s9pk
.as_archive() .as_archive()
.contents() .contents()
.get_path(Path::new("images").join(&*ARCH).join(&env_filename)) .get_path(Path::new("images").join(*ARCH).join(&env_filename))
.and_then(|e| e.as_file()) .and_then(|e| e.as_file())
{ {
env.copy(&mut File::create(image_path.join(&env_filename)).await?) env.copy(&mut File::create(image_path.join(&env_filename)).await?)
@@ -185,7 +194,7 @@ impl PersistentContainer {
if let Some(json) = s9pk if let Some(json) = s9pk
.as_archive() .as_archive()
.contents() .contents()
.get_path(Path::new("images").join(&*ARCH).join(&json_filename)) .get_path(Path::new("images").join(*ARCH).join(&json_filename))
.and_then(|e| e.as_file()) .and_then(|e| e.as_file())
{ {
json.copy(&mut File::create(image_path.join(&json_filename)).await?) json.copy(&mut File::create(image_path.join(&json_filename)).await?)
@@ -231,7 +240,7 @@ impl PersistentContainer {
.join(HOST_RPC_SERVER_SOCKET); .join(HOST_RPC_SERVER_SOCKET);
let (send, recv) = oneshot::channel(); let (send, recv) = oneshot::channel();
let handle = NonDetachingJoinHandle::from(tokio::spawn(async move { let handle = NonDetachingJoinHandle::from(tokio::spawn(async move {
let (shutdown, fut) = match async { let chown_status = async {
let res = server.run_unix(&path, |err| { let res = server.run_unix(&path, |err| {
tracing::error!("error on unix socket {}: {err}", path.display()) tracing::error!("error on unix socket {}: {err}", path.display())
})?; })?;
@@ -241,9 +250,8 @@ impl PersistentContainer {
.invoke(ErrorKind::Filesystem) .invoke(ErrorKind::Filesystem)
.await?; .await?;
Ok::<_, Error>(res) Ok::<_, Error>(res)
} };
.await let (shutdown, fut) = match chown_status.await {
{
Ok((shutdown, fut)) => (Ok(shutdown), Some(fut)), Ok((shutdown, fut)) => (Ok(shutdown), Some(fut)),
Err(e) => (Err(e), None), Err(e) => (Err(e), None),
}; };

View File

@@ -32,9 +32,9 @@ use crate::util::serde::Pem;
pub type DownloadInstallFuture = BoxFuture<'static, Result<InstallFuture, Error>>; pub type DownloadInstallFuture = BoxFuture<'static, Result<InstallFuture, Error>>;
pub type InstallFuture = BoxFuture<'static, Result<(), Error>>; pub type InstallFuture = BoxFuture<'static, Result<(), Error>>;
pub(super) struct InstallProgressHandles { pub struct InstallProgressHandles {
pub(super) finalization_progress: PhaseProgressTrackerHandle, pub finalization_progress: PhaseProgressTrackerHandle,
pub(super) progress_handle: FullProgressTrackerHandle, pub progress_handle: FullProgressTrackerHandle,
} }
/// This is the structure to contain all the services /// This is the structure to contain all the services