chore: Get the tracing in

This commit is contained in:
Justin Miller
2021-10-10 20:41:09 -06:00
committed by Aiden McClelland
parent 81164f974f
commit c3ac27865d
27 changed files with 226 additions and 227 deletions

1
appmgr/Cargo.lock generated
View File

@@ -891,6 +891,7 @@ dependencies = [
"toml", "toml",
"torut", "torut",
"tracing", "tracing",
"tracing-error",
"tracing-futures", "tracing-futures",
"tracing-subscriber", "tracing-subscriber",
"typed-builder", "typed-builder",

View File

@@ -118,6 +118,7 @@ tokio-tungstenite = "0.14.0"
tokio-util = { version = "0.6.8", features = ["io"] } tokio-util = { version = "0.6.8", features = ["io"] }
torut = "0.2.0" torut = "0.2.0"
tracing = "0.1" tracing = "0.1"
tracing-error = "0.1"
tracing-subscriber = "0.2" tracing-subscriber = "0.2"
tracing-futures="0.2" tracing-futures="0.2"
typed-builder = "0.9.1" typed-builder = "0.9.1"
@@ -126,3 +127,6 @@ url = { version = "2.2.2", features = ["serde"] }
[dependencies.serde_with] [dependencies.serde_with]
version = "1.10.0" version = "1.10.0"
features = [ "macros", "json" ] features = [ "macros", "json" ]
[profile.dev.package.backtrace]
opt-level = 3

View File

@@ -68,15 +68,13 @@ impl DockerAction {
}; };
cmd.stdout(std::process::Stdio::piped()); cmd.stdout(std::process::Stdio::piped());
cmd.stderr(std::process::Stdio::piped()); cmd.stderr(std::process::Stdio::piped());
if log::log_enabled!(log::Level::Trace) { tracing::trace!(
log::trace!( "{}",
"{}", format!("{:?}", cmd)
format!("{:?}", cmd) .split(r#"" ""#)
.split(r#"" ""#) .collect::<Vec<&str>>()
.collect::<Vec<&str>>() .join(" ")
.join(" ") );
);
}
let mut handle = cmd.spawn().with_kind(crate::ErrorKind::Docker)?; let mut handle = cmd.spawn().with_kind(crate::ErrorKind::Docker)?;
if let (Some(input), Some(stdin)) = (&input_buf, &mut handle.stdin) { if let (Some(input), Some(stdin)) = (&input_buf, &mut handle.stdin) {
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
@@ -94,7 +92,7 @@ impl DockerAction {
match format.from_slice(&res.stdout) { match format.from_slice(&res.stdout) {
Ok(a) => a, Ok(a) => a,
Err(e) => { Err(e) => {
log::warn!( tracing::warn!(
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.", "Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
format, format,
e e
@@ -156,7 +154,7 @@ impl DockerAction {
match format.from_slice(&res.stdout) { match format.from_slice(&res.stdout) {
Ok(a) => a, Ok(a) => a,
Err(e) => { Err(e) => {
log::warn!( tracing::warn!(
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.", "Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
format, format,
e e

View File

@@ -1,9 +1,11 @@
use clap::Arg; use clap::Arg;
use embassy::context::CliContext; use embassy::context::CliContext;
use embassy::util::logger::EmbassyLogger;
use embassy::Error; use embassy::Error;
use rpc_toolkit::run_cli; use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
use serde_json::Value; use serde_json::Value;
use tracing::metadata::LevelFilter;
fn inner_main() -> Result<(), Error> { fn inner_main() -> Result<(), Error> {
run_cli!({ run_cli!({
@@ -25,14 +27,20 @@ fn inner_main() -> Result<(), Error> {
.arg(Arg::with_name("host").long("host").short("h").takes_value(true)) .arg(Arg::with_name("host").long("host").short("h").takes_value(true))
.arg(Arg::with_name("proxy").long("proxy").short("p").takes_value(true)), .arg(Arg::with_name("proxy").long("proxy").short("p").takes_value(true)),
context: matches => { context: matches => {
simple_logging::log_to_stderr(match matches.occurrences_of("verbosity") { EmbassyLogger::init(
0 => log::LevelFilter::Off, match matches.occurrences_of("verbosity") {
1 => log::LevelFilter::Error, 0 => LevelFilter::OFF,
2 => log::LevelFilter::Warn, 1 => LevelFilter::ERROR,
3 => log::LevelFilter::Info, 2 => LevelFilter::WARN,
4 => log::LevelFilter::Debug, 3 => LevelFilter::INFO,
_ => log::LevelFilter::Trace, 4 => LevelFilter::DEBUG,
}); _ => LevelFilter::TRACE,
},
Default::default(),
None,
false,
Default::default(),
);
CliContext::init(matches)? CliContext::init(matches)?
}, },
exit: |e: RpcError| { exit: |e: RpcError| {
@@ -56,7 +64,7 @@ fn main() {
Ok(_) => (), Ok(_) => (),
Err(e) => { Err(e) => {
eprintln!("{}", e.source); eprintln!("{}", e.source);
log::debug!("{:?}", e.source); tracing::debug!("{:?}", e.source);
drop(e.source); drop(e.source);
std::process::exit(e.kind as i32) std::process::exit(e.kind as i32)
} }

View File

@@ -1,5 +1,6 @@
use std::path::Path; use std::path::Path;
use color_eyre::eyre::eyre;
use embassy::context::rpc::RpcContextConfig; use embassy::context::rpc::RpcContextConfig;
use embassy::context::{DiagnosticContext, SetupContext}; use embassy::context::{DiagnosticContext, SetupContext};
use embassy::db::model::ServerStatus; use embassy::db::model::ServerStatus;
@@ -11,17 +12,20 @@ use embassy::middleware::encrypt::encrypt;
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController; use embassy::net::mdns::MdnsController;
use embassy::sound::MARIO_COIN; use embassy::sound::MARIO_COIN;
use embassy::util::logger::EmbassyLogger;
use embassy::util::{Invoke, Version}; use embassy::util::{Invoke, Version};
use embassy::{Error, ResultExt}; use embassy::{Error, ResultExt};
use http::StatusCode; use http::StatusCode;
use rpc_toolkit::rpc_server; use rpc_toolkit::rpc_server;
use tokio::process::Command; use tokio::process::Command;
use tracing::metadata::LevelFilter;
fn status_fn(_: i32) -> StatusCode { fn status_fn(_: i32) -> StatusCode {
StatusCode::OK StatusCode::OK
} }
async fn init(cfg_path: Option<&str>) -> Result<(), Error> { async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
// return Err(eyre!("Test failure").with_kind(embassy::ErrorKind::Unknown));
let cfg = RpcContextConfig::load(cfg_path).await?; let cfg = RpcContextConfig::load(cfg_path).await?;
embassy::disk::util::mount("LABEL=EMBASSY", "/embassy-os").await?; embassy::disk::util::mount("LABEL=EMBASSY", "/embassy-os").await?;
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_err() { if tokio::fs::metadata("/embassy-os/disk.guid").await.is_err() {
@@ -82,7 +86,7 @@ async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
DEFAULT_PASSWORD, DEFAULT_PASSWORD,
) )
.await?; .await?;
log::info!("Loaded Disk"); tracing::info!("Loaded Disk");
let secret_store = cfg.secret_store().await?; let secret_store = cfg.secret_store().await?;
let log_dir = cfg.datadir().join("main").join("logs"); let log_dir = cfg.datadir().join("main").join("logs");
if tokio::fs::metadata(&log_dir).await.is_err() { if tokio::fs::metadata(&log_dir).await.is_err() {
@@ -94,7 +98,7 @@ async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
.arg("systemd-journald") .arg("systemd-journald")
.invoke(embassy::ErrorKind::Journald) .invoke(embassy::ErrorKind::Journald)
.await?; .await?;
log::info!("Mounted Logs"); tracing::info!("Mounted Logs");
let tmp_docker = cfg.datadir().join("tmp").join("docker"); let tmp_docker = cfg.datadir().join("tmp").join("docker");
if tokio::fs::metadata(&tmp_docker).await.is_ok() { if tokio::fs::metadata(&tmp_docker).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_docker).await?; tokio::fs::remove_dir_all(&tmp_docker).await?;
@@ -121,14 +125,14 @@ async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
.arg("docker") .arg("docker")
.invoke(embassy::ErrorKind::Docker) .invoke(embassy::ErrorKind::Docker)
.await?; .await?;
log::info!("Mounted Docker Data"); tracing::info!("Mounted Docker Data");
embassy::install::load_images(cfg.datadir()).await?; embassy::install::load_images(cfg.datadir()).await?;
log::info!("Loaded Docker Images"); tracing::info!("Loaded Docker Images");
embassy::ssh::sync_keys_from_db(&secret_store, "/root/.ssh/authorized_keys").await?; embassy::ssh::sync_keys_from_db(&secret_store, "/root/.ssh/authorized_keys").await?;
log::info!("Synced SSH Keys"); tracing::info!("Synced SSH Keys");
// todo!("sync wifi"); // todo!("sync wifi");
embassy::hostname::sync_hostname().await?; embassy::hostname::sync_hostname().await?;
log::info!("Synced Hostname"); tracing::info!("Synced Hostname");
if tokio::fs::metadata("/var/www/html/main/public") if tokio::fs::metadata("/var/www/html/main/public")
.await .await
@@ -146,7 +150,7 @@ async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
) )
.await?; .await?;
} }
log::info!("Enabled nginx public dir"); tracing::info!("Enabled nginx public dir");
embassy::net::wifi::synchronize_wpa_supplicant_conf(&cfg.datadir().join("main")).await?; embassy::net::wifi::synchronize_wpa_supplicant_conf(&cfg.datadir().join("main")).await?;
let db = cfg.db(&secret_store).await?; let db = cfg.db(&secret_store).await?;
@@ -177,10 +181,10 @@ async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
match Command::new("/bin/bash").arg(script).spawn() { match Command::new("/bin/bash").arg(script).spawn() {
Ok(mut c) => { Ok(mut c) => {
if let Err(e) = c.wait().await { if let Err(e) = c.wait().await {
log::error!("Error Running {}: {}", script.display(), e) tracing::error!("Error Running {}: {}", script.display(), e)
} }
} }
Err(e) => log::error!("Error Running {}: {}", script.display(), e), Err(e) => tracing::error!("Error Running {}: {}", script.display(), e),
} }
} }
} }
@@ -192,8 +196,8 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<(), Error> {
let res = if let Err(e) = init(cfg_path).await { let res = if let Err(e) = init(cfg_path).await {
(|| async { (|| async {
log::error!("{}", e.source); tracing::error!("{}", e.source);
log::debug!("{}", e.source); tracing::debug!("{}", e.source);
embassy::sound::BEETHOVEN.play().await?; embassy::sound::BEETHOVEN.play().await?;
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
let _mdns = MdnsController::init(); let _mdns = MdnsController::init();
@@ -259,14 +263,29 @@ fn main() {
) )
.get_matches(); .get_matches();
simple_logging::log_to_stderr(match matches.occurrences_of("verbosity") { // simple_logging::log_to_stderr(match matches.occurrences_of("verbosity") {
0 => log::LevelFilter::Off, // 0 => LevelFilter::OFF,
1 => log::LevelFilter::Error, // 1 => LevelFilter::ERROR,
2 => log::LevelFilter::Warn, // 2 => LevelFilter::WARN,
3 => log::LevelFilter::Info, // 3 => LevelFilter::INFO,
4 => log::LevelFilter::Debug, // 4 => LevelFilter::DEBUG,
_ => log::LevelFilter::Trace, // _ => LevelFilter::TRACE,
}); // });
EmbassyLogger::init(
match matches.occurrences_of("verbosity") {
0 => LevelFilter::OFF,
1 => LevelFilter::ERROR,
2 => LevelFilter::WARN,
3 => LevelFilter::INFO,
4 => LevelFilter::DEBUG,
_ => LevelFilter::TRACE,
},
Default::default(),
None,
false,
Default::default(),
);
let cfg_path = matches.value_of("config"); let cfg_path = matches.value_of("config");
let res = { let res = {
let rt = tokio::runtime::Builder::new_multi_thread() let rt = tokio::runtime::Builder::new_multi_thread()
@@ -280,7 +299,7 @@ fn main() {
Ok(_) => (), Ok(_) => (),
Err(e) => { Err(e) => {
eprintln!("{}", e.source); eprintln!("{}", e.source);
log::debug!("{:?}", e.source); tracing::debug!("{:?}", e.source);
drop(e.source); drop(e.source);
std::process::exit(e.kind as i32) std::process::exit(e.kind as i32)
} }

View File

@@ -23,12 +23,12 @@ fn inner_main() -> Result<(), Error> {
), ),
context: matches => { context: matches => {
simple_logging::log_to_stderr(match matches.occurrences_of("verbosity") { simple_logging::log_to_stderr(match matches.occurrences_of("verbosity") {
0 => log::LevelFilter::Off, 0 => tracing::LevelFilter::Off,
1 => log::LevelFilter::Error, 1 => tracing::LevelFilter::Error,
2 => log::LevelFilter::Warn, 2 => tracing::LevelFilter::Warn,
3 => log::LevelFilter::Info, 3 => tracing::LevelFilter::Info,
4 => log::LevelFilter::Debug, 4 => tracing::LevelFilter::Debug,
_ => log::LevelFilter::Trace, _ => tracing::LevelFilter::Trace,
}); });
SdkContext::init(matches)? SdkContext::init(matches)?
}, },
@@ -52,7 +52,7 @@ fn main() {
Ok(_) => (), Ok(_) => (),
Err(e) => { Err(e) => {
eprintln!("{}", e.source); eprintln!("{}", e.source);
log::debug!("{:?}", e.source); tracing::debug!("{:?}", e.source);
drop(e.source); drop(e.source);
std::process::exit(e.kind as i32) std::process::exit(e.kind as i32)
} }

View File

@@ -1,7 +1,7 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::time::Duration; use std::time::Duration;
use color_eyre::eyre::eyre; use color_eyre::eyre::{self, eyre};
use embassy::context::{DiagnosticContext, RpcContext}; use embassy::context::{DiagnosticContext, RpcContext};
use embassy::db::subscribe; use embassy::db::subscribe;
use embassy::middleware::auth::auth; use embassy::middleware::auth::auth;
@@ -15,25 +15,27 @@ use embassy::status::{check_all, synchronize_all};
use embassy::util::{daemon, Invoke}; use embassy::util::{daemon, Invoke};
use embassy::{Error, ErrorKind, ResultExt}; use embassy::{Error, ErrorKind, ResultExt};
use futures::{FutureExt, TryFutureExt}; use futures::{FutureExt, TryFutureExt};
use log::LevelFilter;
use reqwest::{Client, Proxy}; use reqwest::{Client, Proxy};
use rpc_toolkit::hyper::{Body, Response, Server, StatusCode}; use rpc_toolkit::hyper::{Body, Response, Server, StatusCode};
use rpc_toolkit::rpc_server; use rpc_toolkit::rpc_server;
use tokio::process::Command; use tokio::process::Command;
use tokio::signal::unix::signal; use tokio::signal::unix::signal;
use tracing::instrument;
use tracing::metadata::LevelFilter;
fn status_fn(_: i32) -> StatusCode { fn status_fn(_: i32) -> StatusCode {
StatusCode::OK StatusCode::OK
} }
fn err_to_500(e: Error) -> Response<Body> { fn err_to_500(e: Error) -> Response<Body> {
log::error!("{}", e); tracing::error!("{}", e);
Response::builder() Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR) .status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::empty()) .body(Body::empty())
.unwrap() .unwrap()
} }
#[instrument]
async fn inner_main( async fn inner_main(
cfg_path: Option<&str>, cfg_path: Option<&str>,
log_level: LevelFilter, log_level: LevelFilter,
@@ -165,10 +167,10 @@ async fn inner_main(
let ctx = status_ctx.clone(); let ctx = status_ctx.clone();
async move { async move {
if let Err(e) = synchronize_all(&ctx).await { if let Err(e) = synchronize_all(&ctx).await {
log::error!("Error in Status Sync daemon: {}", e); tracing::error!("Error in Status Sync daemon: {}", e);
log::debug!("{:?}", e); tracing::debug!("{:?}", e);
} else { } else {
log::trace!("Status Sync completed successfully"); tracing::trace!("Status Sync completed successfully");
} }
} }
}, },
@@ -181,10 +183,10 @@ async fn inner_main(
let ctx = health_ctx.clone(); let ctx = health_ctx.clone();
async move { async move {
if let Err(e) = check_all(&ctx).await { if let Err(e) = check_all(&ctx).await {
log::error!("Error in Health Check daemon: {}", e); tracing::error!("Error in Health Check daemon: {}", e);
log::debug!("{:?}", e); tracing::debug!("{:?}", e);
} else { } else {
log::trace!("Health Check completed successfully"); tracing::trace!("Health Check completed successfully");
} }
} }
}, },
@@ -270,11 +272,11 @@ fn main() {
// initializes the bootstrap logger, this will be replaced with the EmbassyLogger later // initializes the bootstrap logger, this will be replaced with the EmbassyLogger later
let filter = match matches.occurrences_of("verbosity") { let filter = match matches.occurrences_of("verbosity") {
0 => log::LevelFilter::Error, 0 => LevelFilter::ERROR,
1 => log::LevelFilter::Warn, 1 => LevelFilter::WARN,
2 => log::LevelFilter::Info, 2 => LevelFilter::INFO,
3 => log::LevelFilter::Debug, 3 => LevelFilter::DEBUG,
_ => log::LevelFilter::Trace, _ => LevelFilter::TRACE,
}; };
let module_logging = matches let module_logging = matches
.values_of("log-module") .values_of("log-module")
@@ -301,8 +303,8 @@ fn main() {
Ok(a) => Ok(a), Ok(a) => Ok(a),
Err(e) => { Err(e) => {
(|| async { (|| async {
log::error!("{}", e.source); tracing::error!("{:?}", e.source);
log::debug!("{}", e.source); tracing::debug!("{:?}", e.source);
embassy::sound::BEETHOVEN.play().await?; embassy::sound::BEETHOVEN.play().await?;
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
let _mdns = MdnsController::init(); let _mdns = MdnsController::init();
@@ -353,7 +355,7 @@ fn main() {
Ok(Some(s)) => s.execute(), Ok(Some(s)) => s.execute(),
Err(e) => { Err(e) => {
eprintln!("{}", e.source); eprintln!("{}", e.source);
log::debug!("{:?}", e.source); tracing::debug!("{:?}", e.source);
drop(e.source); drop(e.source);
std::process::exit(e.kind as i32) std::process::exit(e.kind as i32)
} }

View File

@@ -403,7 +403,7 @@ pub fn configure<'a, Db: DbHandle>(
.into_iter() .into_iter()
.filter(|(dep_id, _)| { .filter(|(dep_id, _)| {
if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) { if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) {
log::warn!("Illegal dependency specified: {}", dep_id); tracing::warn!("Illegal dependency specified: {}", dep_id);
false false
} else { } else {
true true

View File

@@ -8,7 +8,6 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use bollard::Docker; use bollard::Docker;
use log::LevelFilter;
use patch_db::json_ptr::JsonPointer; use patch_db::json_ptr::JsonPointer;
use patch_db::{PatchDb, Revision}; use patch_db::{PatchDb, Revision};
use reqwest::Url; use reqwest::Url;
@@ -20,6 +19,8 @@ use sqlx::SqlitePool;
use tokio::fs::File; use tokio::fs::File;
use tokio::sync::broadcast::Sender; use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tracing::instrument;
use tracing::metadata::LevelFilter;
use crate::db::model::Database; use crate::db::model::Database;
use crate::hostname::{get_hostname, get_id}; use crate::hostname::{get_hostname, get_id};
@@ -92,6 +93,7 @@ impl RpcContextConfig {
} }
Ok(db) Ok(db)
} }
#[instrument]
pub async fn secret_store(&self) -> Result<SqlitePool, Error> { pub async fn secret_store(&self) -> Result<SqlitePool, Error> {
let secret_store = SqlitePool::connect_with( let secret_store = SqlitePool::connect_with(
SqliteConnectOptions::new() SqliteConnectOptions::new()

View File

@@ -126,7 +126,7 @@ async fn ws_handler<
} }
Some(Message::Close(frame)) => { Some(Message::Close(frame)) => {
if let Some(reason) = frame.as_ref() { if let Some(reason) = frame.as_ref() {
log::info!("Closing WebSocket: Reason: {} {}", reason.code, reason.reason); tracing::info!("Closing WebSocket: Reason: {} {}", reason.code, reason.reason);
} }
stream stream
.send(Message::Close(frame)) .send(Message::Close(frame))
@@ -155,7 +155,7 @@ pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<B
tokio::task::spawn(async move { tokio::task::spawn(async move {
match ws_handler(ctx, ws_fut).await { match ws_handler(ctx, ws_fut).await {
Ok(()) => (), Ok(()) => (),
Err(e) => log::error!("WebSocket Closed: {}", e), Err(e) => tracing::error!("WebSocket Closed: {}", e),
} }
}); });
} }

View File

@@ -17,9 +17,9 @@ pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
std::fs::create_dir_all(parent) std::fs::create_dir_all(parent)
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?; .with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
} }
log::info!("Generating new developer key..."); tracing::info!("Generating new developer key...");
let keypair = Keypair::generate(&mut rand::thread_rng()); let keypair = Keypair::generate(&mut rand::thread_rng());
log::info!("Writing key to {}", ctx.developer_key_path.display()); tracing::info!("Writing key to {}", ctx.developer_key_path.display());
let mut dev_key_file = File::create(&ctx.developer_key_path)?; let mut dev_key_file = File::create(&ctx.developer_key_path)?;
dev_key_file.write_all(&keypair.to_bytes())?; dev_key_file.write_all(&keypair.to_bytes())?;
dev_key_file.sync_all()?; dev_key_file.sync_all()?;

View File

@@ -188,15 +188,17 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
let mut partitions = Vec::with_capacity(parts.len()); let mut partitions = Vec::with_capacity(parts.len());
let vendor = get_vendor(&disk) let vendor = get_vendor(&disk)
.await .await
.map_err(|e| log::warn!("Could not get vendor of {}: {}", disk.display(), e.source)) .map_err(|e| tracing::warn!("Could not get vendor of {}: {}", disk.display(), e.source))
.unwrap_or_default(); .unwrap_or_default();
let model = get_model(&disk) let model = get_model(&disk)
.await .await
.map_err(|e| log::warn!("Could not get model of {}: {}", disk.display(), e.source)) .map_err(|e| tracing::warn!("Could not get model of {}: {}", disk.display(), e.source))
.unwrap_or_default(); .unwrap_or_default();
let capacity = get_capacity(&disk) let capacity = get_capacity(&disk)
.await .await
.map_err(|e| log::warn!("Could not get capacity of {}: {}", disk.display(), e.source)) .map_err(|e| {
tracing::warn!("Could not get capacity of {}: {}", disk.display(), e.source)
})
.unwrap_or_default(); .unwrap_or_default();
let mut embassy_os = None; let mut embassy_os = None;
for part in parts { for part in parts {
@@ -204,7 +206,7 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
let capacity = get_capacity(&part) let capacity = get_capacity(&part)
.await .await
.map_err(|e| { .map_err(|e| {
log::warn!("Could not get capacity of {}: {}", part.display(), e.source) tracing::warn!("Could not get capacity of {}: {}", part.display(), e.source)
}) })
.unwrap_or_default(); .unwrap_or_default();
let mut used = None; let mut used = None;
@@ -212,7 +214,7 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
let tmp_mountpoint = let tmp_mountpoint =
Path::new(TMP_MOUNTPOINT).join(&part.strip_prefix("/").unwrap_or(&part)); Path::new(TMP_MOUNTPOINT).join(&part.strip_prefix("/").unwrap_or(&part));
if let Err(e) = mount(&part, &tmp_mountpoint).await { if let Err(e) = mount(&part, &tmp_mountpoint).await {
log::warn!("Could not collect usage information: {}", e.source) tracing::warn!("Could not collect usage information: {}", e.source)
} else { } else {
let mount_guard = GeneralGuard::new(|| { let mount_guard = GeneralGuard::new(|| {
let path = tmp_mountpoint.clone(); let path = tmp_mountpoint.clone();
@@ -221,7 +223,7 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
used = get_used(&tmp_mountpoint) used = get_used(&tmp_mountpoint)
.await .await
.map_err(|e| { .map_err(|e| {
log::warn!("Could not get usage of {}: {}", part.display(), e.source) tracing::warn!("Could not get usage of {}: {}", part.display(), e.source)
}) })
.ok(); .ok();
if label.as_deref() == Some("rootfs") { if label.as_deref() == Some("rootfs") {
@@ -323,7 +325,7 @@ pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
dst: P1, dst: P1,
read_only: bool, read_only: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
log::info!( tracing::info!(
"Binding {} to {}", "Binding {} to {}",
src.as_ref().display(), src.as_ref().display(),
dst.as_ref().display() dst.as_ref().display()
@@ -362,7 +364,7 @@ pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
} }
pub async fn unmount<P: AsRef<Path>>(mount_point: P) -> Result<(), Error> { pub async fn unmount<P: AsRef<Path>>(mount_point: P) -> Result<(), Error> {
log::info!("Unmounting {}.", mount_point.as_ref().display()); tracing::info!("Unmounting {}.", mount_point.as_ref().display());
let umount_output = tokio::process::Command::new("umount") let umount_output = tokio::process::Command::new("umount")
.arg(mount_point.as_ref()) .arg(mount_point.as_ref())
.output() .output()

View File

@@ -1,6 +1,6 @@
use std::fmt::Display; use std::fmt::Display;
use color_eyre::eyre::eyre; use color_eyre::eyre::{eyre, ErrReport};
use patch_db::Revision; use patch_db::Revision;
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
@@ -225,7 +225,7 @@ impl From<Error> for RpcError {
match serde_json::to_value(&e.revision) { match serde_json::to_value(&e.revision) {
Ok(a) => a, Ok(a) => a,
Err(e) => { Err(e) => {
log::warn!("Error serializing revision for Error object: {}", e); tracing::warn!("Error serializing revision for Error object: {}", e);
serde_json::Value::Null serde_json::Value::Null
} }
}, },

View File

@@ -127,7 +127,7 @@ pub async fn cleanup_failed<Db: DbHandle>(
} }
} }
_ => { _ => {
log::warn!("{}: Nothing to clean up!", id); tracing::warn!("{}: Nothing to clean up!", id);
false false
} }
} { } {

View File

@@ -113,7 +113,7 @@ pub async fn install(
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = download_install_s9pk(&ctx, &man, s9pk).await { if let Err(e) = download_install_s9pk(&ctx, &man, s9pk).await {
log::error!("Install of {}@{} Failed: {}", man.id, man.version, e); tracing::error!("Install of {}@{} Failed: {}", man.id, man.version, e);
} }
}); });
@@ -180,7 +180,7 @@ pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevisi
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = cleanup::uninstall(&ctx, &mut ctx.db.handle(), &installed).await { if let Err(e) = cleanup::uninstall(&ctx, &mut ctx.db.handle(), &installed).await {
log::error!("Uninstall of {} Failed: {}", id, e); tracing::error!("Uninstall of {} Failed: {}", id, e);
} }
}); });
@@ -267,7 +267,7 @@ pub async fn download_install_s9pk(
if let Err(e) = cleanup_failed(&ctx, &mut tx, pkg_id, version).await { if let Err(e) = cleanup_failed(&ctx, &mut tx, pkg_id, version).await {
let mut tx = handle.begin().await?; let mut tx = handle.begin().await?;
log::error!( tracing::error!(
"Failed to clean up {}@{}: {}: Adding to broken packages", "Failed to clean up {}@{}: {}: Adding to broken packages",
pkg_id, pkg_id,
version, version,
@@ -303,13 +303,13 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
.idx_model(pkg_id); .idx_model(pkg_id);
let progress_model = model.clone().and_then(|m| m.install_progress()); let progress_model = model.clone().and_then(|m| m.install_progress());
log::info!("Install {}@{}: Unpacking Manifest", pkg_id, version); tracing::info!("Install {}@{}: Unpacking Manifest", pkg_id, version);
let manifest = progress let manifest = progress
.track_read_during(progress_model.clone(), &ctx.db, || rdr.manifest()) .track_read_during(progress_model.clone(), &ctx.db, || rdr.manifest())
.await?; .await?;
log::info!("Install {}@{}: Unpacked Manifest", pkg_id, version); tracing::info!("Install {}@{}: Unpacked Manifest", pkg_id, version);
log::info!("Install {}@{}: Fetching Dependency Info", pkg_id, version); tracing::info!("Install {}@{}: Fetching Dependency Info", pkg_id, version);
let mut dependency_info = BTreeMap::new(); let mut dependency_info = BTreeMap::new();
let reg_url = ctx.package_registry_url().await?; let reg_url = ctx.package_registry_url().await?;
for (dep, info) in &manifest.dependencies.0 { for (dep, info) in &manifest.dependencies.0 {
@@ -379,7 +379,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
); );
} }
} }
log::info!("Install {}@{}: Fetched Dependency Info", pkg_id, version); tracing::info!("Install {}@{}: Fetched Dependency Info", pkg_id, version);
let public_dir_path = ctx let public_dir_path = ctx
.datadir .datadir
@@ -388,7 +388,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
.join(version.as_str()); .join(version.as_str());
tokio::fs::create_dir_all(&public_dir_path).await?; tokio::fs::create_dir_all(&public_dir_path).await?;
log::info!("Install {}@{}: Unpacking LICENSE.md", pkg_id, version); tracing::info!("Install {}@{}: Unpacking LICENSE.md", pkg_id, version);
progress progress
.track_read_during(progress_model.clone(), &ctx.db, || async { .track_read_during(progress_model.clone(), &ctx.db, || async {
let license_path = public_dir_path.join("LICENSE.md"); let license_path = public_dir_path.join("LICENSE.md");
@@ -398,9 +398,9 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
Ok(()) Ok(())
}) })
.await?; .await?;
log::info!("Install {}@{}: Unpacked LICENSE.md", pkg_id, version); tracing::info!("Install {}@{}: Unpacked LICENSE.md", pkg_id, version);
log::info!("Install {}@{}: Unpacking INSTRUCTIONS.md", pkg_id, version); tracing::info!("Install {}@{}: Unpacking INSTRUCTIONS.md", pkg_id, version);
progress progress
.track_read_during(progress_model.clone(), &ctx.db, || async { .track_read_during(progress_model.clone(), &ctx.db, || async {
let instructions_path = public_dir_path.join("INSTRUCTIONS.md"); let instructions_path = public_dir_path.join("INSTRUCTIONS.md");
@@ -410,10 +410,10 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
Ok(()) Ok(())
}) })
.await?; .await?;
log::info!("Install {}@{}: Unpacked INSTRUCTIONS.md", pkg_id, version); tracing::info!("Install {}@{}: Unpacked INSTRUCTIONS.md", pkg_id, version);
let icon_path = Path::new("icon").with_extension(&manifest.assets.icon_type()); let icon_path = Path::new("icon").with_extension(&manifest.assets.icon_type());
log::info!( tracing::info!(
"Install {}@{}: Unpacking {}", "Install {}@{}: Unpacking {}",
pkg_id, pkg_id,
version, version,
@@ -428,14 +428,14 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
Ok(()) Ok(())
}) })
.await?; .await?;
log::info!( tracing::info!(
"Install {}@{}: Unpacked {}", "Install {}@{}: Unpacked {}",
pkg_id, pkg_id,
version, version,
icon_path.display() icon_path.display()
); );
log::info!("Install {}@{}: Unpacking Docker Images", pkg_id, version); tracing::info!("Install {}@{}: Unpacking Docker Images", pkg_id, version);
progress progress
.track_read_during(progress_model.clone(), &ctx.db, || async { .track_read_during(progress_model.clone(), &ctx.db, || async {
let image_tar_dir = ctx let image_tar_dir = ctx
@@ -502,9 +502,9 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
} }
}) })
.await?; .await?;
log::info!("Install {}@{}: Unpacked Docker Images", pkg_id, version,); tracing::info!("Install {}@{}: Unpacked Docker Images", pkg_id, version,);
log::info!("Install {}@{}: Unpacking Assets", pkg_id, version); tracing::info!("Install {}@{}: Unpacking Assets", pkg_id, version);
progress progress
.track_read_during(progress_model.clone(), &ctx.db, || async { .track_read_during(progress_model.clone(), &ctx.db, || async {
let asset_dir = asset_dir(&ctx.datadir, pkg_id, version); let asset_dir = asset_dir(&ctx.datadir, pkg_id, version);
@@ -517,7 +517,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
Ok(()) Ok(())
}) })
.await?; .await?;
log::info!("Install {}@{}: Unpacked Assets", pkg_id, version); tracing::info!("Install {}@{}: Unpacked Assets", pkg_id, version);
progress.unpack_complete.store(true, Ordering::SeqCst); progress.unpack_complete.store(true, Ordering::SeqCst);
@@ -531,15 +531,15 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
.lock(&mut tx, true) .lock(&mut tx, true)
.await; .await;
log::info!("Install {}@{}: Creating volumes", pkg_id, version); tracing::info!("Install {}@{}: Creating volumes", pkg_id, version);
manifest.volumes.install(ctx, pkg_id, version).await?; manifest.volumes.install(ctx, pkg_id, version).await?;
log::info!("Install {}@{}: Created volumes", pkg_id, version); tracing::info!("Install {}@{}: Created volumes", pkg_id, version);
log::info!("Install {}@{}: Installing interfaces", pkg_id, version); tracing::info!("Install {}@{}: Installing interfaces", pkg_id, version);
let interface_addresses = manifest.interfaces.install(&mut sql_tx, pkg_id).await?; let interface_addresses = manifest.interfaces.install(&mut sql_tx, pkg_id).await?;
log::info!("Install {}@{}: Installed interfaces", pkg_id, version); tracing::info!("Install {}@{}: Installed interfaces", pkg_id, version);
log::info!("Install {}@{}: Creating manager", pkg_id, version); tracing::info!("Install {}@{}: Creating manager", pkg_id, version);
ctx.managers ctx.managers
.add( .add(
ctx.clone(), ctx.clone(),
@@ -547,7 +547,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
manifest.interfaces.tor_keys(&mut sql_tx, pkg_id).await?, manifest.interfaces.tor_keys(&mut sql_tx, pkg_id).await?,
) )
.await?; .await?;
log::info!("Install {}@{}: Created manager", pkg_id, version); tracing::info!("Install {}@{}: Created manager", pkg_id, version);
let static_files = StaticFiles::local(pkg_id, version, manifest.assets.icon_type()); let static_files = StaticFiles::local(pkg_id, version, manifest.assets.icon_type());
let current_dependencies: BTreeMap<_, _> = manifest let current_dependencies: BTreeMap<_, _> = manifest
@@ -715,7 +715,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
sql_tx.commit().await?; sql_tx.commit().await?;
tx.commit(None).await?; tx.commit(None).await?;
log::info!("Install {}@{}: Complete", pkg_id, version); tracing::info!("Install {}@{}: Complete", pkg_id, version);
Ok(()) Ok(())
} }

View File

@@ -85,7 +85,7 @@ impl ManagerMap {
pub async fn remove(&self, id: &(PackageId, Version)) { pub async fn remove(&self, id: &(PackageId, Version)) {
if let Some(man) = self.0.write().await.remove(id) { if let Some(man) = self.0.write().await.remove(id) {
if let Err(e) = man.exit().await { if let Err(e) = man.exit().await {
log::error!("Error shutting down manager: {}", e); tracing::error!("Error shutting down manager: {}", e);
} }
} }
} }
@@ -312,14 +312,14 @@ impl Manager {
match res { match res {
Err(e) => { Err(e) => {
// TODO for code review: Do we return this error or just log it? // TODO for code review: Do we return this error or just log it?
log::error!("Failed to issue notification: {}", e); tracing::error!("Failed to issue notification: {}", e);
} }
Ok(()) => {} Ok(()) => {}
} }
log::error!("service crashed: {}: {}", e.0, e.1) tracing::error!("service crashed: {}: {}", e.0, e.1)
} }
Err(e) => { Err(e) => {
log::error!("failed to start service: {}", e) tracing::error!("failed to start service: {}", e)
} }
} }
} }

View File

@@ -46,7 +46,7 @@ unsafe impl Sync for MdnsControllerInner {}
impl MdnsControllerInner { impl MdnsControllerInner {
fn load_services(&mut self) { fn load_services(&mut self) {
unsafe { unsafe {
log::debug!("Loading services for mDNS"); tracing::debug!("Loading services for mDNS");
let mut res; let mut res;
let http_tcp_cstr = std::ffi::CString::new("_http._tcp") let http_tcp_cstr = std::ffi::CString::new("_http._tcp")
.expect("Could not cast _http._tcp to c string"); .expect("Could not cast _http._tcp to c string");
@@ -70,14 +70,14 @@ impl MdnsControllerInner {
); );
if res < avahi_sys::AVAHI_OK { if res < avahi_sys::AVAHI_OK {
let e_str = avahi_strerror(res); let e_str = avahi_strerror(res);
log::error!( tracing::error!(
"Could not add service to Avahi entry group: {:?}", "Could not add service to Avahi entry group: {:?}",
std::ffi::CStr::from_ptr(e_str) std::ffi::CStr::from_ptr(e_str)
); );
avahi_free(e_str as *mut c_void); avahi_free(e_str as *mut c_void);
panic!("Failed to load Avahi services"); panic!("Failed to load Avahi services");
} }
log::info!( tracing::info!(
"Published {:?}", "Published {:?}",
std::ffi::CStr::from_ptr(self.hostname_raw) std::ffi::CStr::from_ptr(self.hostname_raw)
); );
@@ -87,7 +87,7 @@ impl MdnsControllerInner {
.get_onion_address() .get_onion_address()
.get_address_without_dot_onion() .get_address_without_dot_onion()
+ ".local"; + ".local";
log::debug!("Adding mdns CNAME entry for {}", &lan_address); tracing::debug!("Adding mdns CNAME entry for {}", &lan_address);
let lan_address_ptr = std::ffi::CString::new(lan_address) let lan_address_ptr = std::ffi::CString::new(lan_address)
.expect("Could not cast lan address to c string"); .expect("Could not cast lan address to c string");
res = avahi_sys::avahi_entry_group_add_record( res = avahi_sys::avahi_entry_group_add_record(
@@ -105,7 +105,7 @@ impl MdnsControllerInner {
); );
if res < avahi_sys::AVAHI_OK { if res < avahi_sys::AVAHI_OK {
let e_str = avahi_strerror(res); let e_str = avahi_strerror(res);
log::error!( tracing::error!(
"Could not add record for {:?} to Avahi entry group: {:?}", "Could not add record for {:?} to Avahi entry group: {:?}",
lan_address_ptr, lan_address_ptr,
std::ffi::CStr::from_ptr(e_str) std::ffi::CStr::from_ptr(e_str)
@@ -113,13 +113,13 @@ impl MdnsControllerInner {
avahi_free(e_str as *mut c_void); avahi_free(e_str as *mut c_void);
panic!("Failed to load Avahi services"); panic!("Failed to load Avahi services");
} }
log::info!("Published {:?}", lan_address_ptr); tracing::info!("Published {:?}", lan_address_ptr);
} }
} }
} }
fn init() -> Self { fn init() -> Self {
unsafe { unsafe {
log::debug!("Initializing mDNS controller"); tracing::debug!("Initializing mDNS controller");
let simple_poll = avahi_sys::avahi_simple_poll_new(); let simple_poll = avahi_sys::avahi_simple_poll_new();
let poll = avahi_sys::avahi_simple_poll_get(simple_poll); let poll = avahi_sys::avahi_simple_poll_get(simple_poll);
let mut box_err = Box::pin(0 as i32); let mut box_err = Box::pin(0 as i32);
@@ -133,7 +133,7 @@ impl MdnsControllerInner {
); );
if avahi_client == std::ptr::null_mut::<AvahiClient>() { if avahi_client == std::ptr::null_mut::<AvahiClient>() {
let e_str = avahi_strerror(*box_err); let e_str = avahi_strerror(*box_err);
log::error!( tracing::error!(
"Could not create avahi client: {:?}", "Could not create avahi client: {:?}",
std::ffi::CStr::from_ptr(e_str) std::ffi::CStr::from_ptr(e_str)
); );
@@ -144,7 +144,7 @@ impl MdnsControllerInner {
avahi_sys::avahi_entry_group_new(avahi_client, Some(noop), std::ptr::null_mut()); avahi_sys::avahi_entry_group_new(avahi_client, Some(noop), std::ptr::null_mut());
if group == std::ptr::null_mut() { if group == std::ptr::null_mut() {
let e_str = avahi_strerror(avahi_client_errno(avahi_client)); let e_str = avahi_strerror(avahi_client_errno(avahi_client));
log::error!( tracing::error!(
"Could not create avahi entry group: {:?}", "Could not create avahi entry group: {:?}",
std::ffi::CStr::from_ptr(e_str) std::ffi::CStr::from_ptr(e_str)
); );

View File

@@ -233,7 +233,7 @@ impl TorControllerInner {
} }
async fn add_embassyd_onion(&mut self) -> Result<(), Error> { async fn add_embassyd_onion(&mut self) -> Result<(), Error> {
log::info!( tracing::info!(
"Registering Main Tor Service: {}", "Registering Main Tor Service: {}",
self.embassyd_tor_key.public().get_onion_address() self.embassyd_tor_key.public().get_onion_address()
); );
@@ -249,7 +249,7 @@ impl TorControllerInner {
&mut std::iter::once(&(self.embassyd_addr.port(), self.embassyd_addr)), &mut std::iter::once(&(self.embassyd_addr.port(), self.embassyd_addr)),
) )
.await?; .await?;
log::info!( tracing::info!(
"Registered Main Tor Service: {}", "Registered Main Tor Service: {}",
self.embassyd_tor_key.public().get_onion_address() self.embassyd_tor_key.public().get_onion_address()
); );
@@ -301,7 +301,7 @@ impl TorControllerInner {
} }
} }
Err(e) => { Err(e) => {
log::info!("Failed to reconnect to tor control socket: {}", e); tracing::info!("Failed to reconnect to tor control socket: {}", e);
} }
} }
tokio::time::sleep(Duration::from_secs(1)).await; tokio::time::sleep(Duration::from_secs(1)).await;
@@ -345,7 +345,7 @@ impl TorControllerInner {
} }
pub async fn tor_health_check(client: &Client, tor_controller: &TorController) { pub async fn tor_health_check(client: &Client, tor_controller: &TorController) {
log::debug!("Attempting to self-check tor address"); tracing::debug!("Attempting to self-check tor address");
let onion = tor_controller.embassyd_onion().await; let onion = tor_controller.embassyd_onion().await;
let result = client let result = client
.post(format!("http://{}/rpc/v1", onion)) .post(format!("http://{}/rpc/v1", onion))
@@ -363,24 +363,24 @@ pub async fn tor_health_check(client: &Client, tor_controller: &TorController) {
match result { match result {
// if success, do nothing // if success, do nothing
Ok(_) => { Ok(_) => {
log::debug!( tracing::debug!(
"Successfully verified main tor address liveness at {}", "Successfully verified main tor address liveness at {}",
onion onion
) )
} }
// if failure, disconnect tor control port, and restart tor controller // if failure, disconnect tor control port, and restart tor controller
Err(e) => { Err(e) => {
log::error!("Unable to reach self over tor: {}", e); tracing::error!("Unable to reach self over tor: {}", e);
loop { loop {
match tor_controller.replace().await { match tor_controller.replace().await {
Ok(restarted) => { Ok(restarted) => {
if restarted { if restarted {
log::error!("Tor has been recently restarted, refusing to restart"); tracing::error!("Tor has been recently restarted, refusing to restart");
} }
break; break;
} }
Err(e) => { Err(e) => {
log::error!("Unable to restart tor: {}", e); tracing::error!("Unable to restart tor: {}", e);
} }
} }
} }

View File

@@ -42,13 +42,13 @@ pub async fn add(
priority: isize, priority: isize,
connect: bool, connect: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
log::info!("Adding new WiFi network: '{}'", ssid); tracing::info!("Adding new WiFi network: '{}'", ssid);
wpa_supplicant.add_network(ssid, password, priority).await?; wpa_supplicant.add_network(ssid, password, priority).await?;
if connect { if connect {
let current = wpa_supplicant.get_current_network().await?; let current = wpa_supplicant.get_current_network().await?;
let connected = wpa_supplicant.select_network(ssid).await?; let connected = wpa_supplicant.select_network(ssid).await?;
if !connected { if !connected {
log::error!("Faild to add new WiFi network: '{}'", ssid); tracing::error!("Faild to add new WiFi network: '{}'", ssid);
wpa_supplicant.remove_network(ssid).await?; wpa_supplicant.remove_network(ssid).await?;
match current { match current {
None => {} None => {}
@@ -63,7 +63,7 @@ pub async fn add(
tokio::spawn(async move { tokio::spawn(async move {
match add_procedure(wpa_supplicant, &ssid, &password, priority, connect).await { match add_procedure(wpa_supplicant, &ssid, &password, priority, connect).await {
Err(e) => { Err(e) => {
log::error!("Failed to add new WiFi network '{}': {}", ssid, e); tracing::error!("Failed to add new WiFi network '{}': {}", ssid, e);
} }
Ok(_) => {} Ok(_) => {}
} }
@@ -83,12 +83,12 @@ pub async fn connect(#[arg] ssid: String) -> Result<(), Error> {
let current = wpa_supplicant.get_current_network().await?; let current = wpa_supplicant.get_current_network().await?;
let connected = wpa_supplicant.select_network(&ssid).await?; let connected = wpa_supplicant.select_network(&ssid).await?;
if connected { if connected {
log::info!("Successfully connected to WiFi: '{}'", ssid); tracing::info!("Successfully connected to WiFi: '{}'", ssid);
} else { } else {
log::error!("Failed to connect to WiFi: '{}'", ssid); tracing::error!("Failed to connect to WiFi: '{}'", ssid);
match current { match current {
None => { None => {
log::warn!("No WiFi to revert to!"); tracing::warn!("No WiFi to revert to!");
} }
Some(current) => { Some(current) => {
wpa_supplicant.select_network(&current).await?; wpa_supplicant.select_network(&current).await?;
@@ -101,7 +101,7 @@ pub async fn connect(#[arg] ssid: String) -> Result<(), Error> {
tokio::spawn(async move { tokio::spawn(async move {
match connect_procedure(wpa_supplicant, &ssid).await { match connect_procedure(wpa_supplicant, &ssid).await {
Err(e) => { Err(e) => {
log::error!("Failed to connect to WiFi network '{}': {}", &ssid, e); tracing::error!("Failed to connect to WiFi network '{}': {}", &ssid, e);
} }
Ok(_) => {} Ok(_) => {}
} }

View File

@@ -42,7 +42,7 @@ impl<
pub fn pack(mut self, key: &ed25519_dalek::Keypair) -> Result<(), Error> { pub fn pack(mut self, key: &ed25519_dalek::Keypair) -> Result<(), Error> {
let header_pos = self.writer.stream_position()?; let header_pos = self.writer.stream_position()?;
if header_pos != 0 { if header_pos != 0 {
log::warn!("Appending to non-empty file."); tracing::warn!("Appending to non-empty file.");
} }
let mut header = Header::placeholder(); let mut header = Header::placeholder();
header.serialize(&mut self.writer).with_ctx(|_| { header.serialize(&mut self.writer).with_ctx(|_| {

View File

@@ -96,11 +96,11 @@ pub async fn execute(
.await .await
{ {
Ok(a) => { Ok(a) => {
log::info!("Setup Successful! Tor Address: {}", a); tracing::info!("Setup Successful! Tor Address: {}", a);
Ok(a) Ok(a)
} }
Err(e) => { Err(e) => {
log::error!("Error Setting Up Embassy: {}", e); tracing::error!("Error Setting Up Embassy: {}", e);
Err(e) Err(e)
} }
} }
@@ -178,7 +178,7 @@ pub async fn execute_inner(
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = recover(ctx.clone(), guid, recovery_drive, recovery_password).await { if let Err(e) = recover(ctx.clone(), guid, recovery_drive, recovery_password).await {
BEETHOVEN.play().await.unwrap_or_default(); // ignore error in playing the song BEETHOVEN.play().await.unwrap_or_default(); // ignore error in playing the song
log::error!("Error recovering drive!: {}", e); tracing::error!("Error recovering drive!: {}", e);
*ctx.recovery_status.write().await = Some(Err(e.into())); *ctx.recovery_status.write().await = Some(Err(e.into()));
} }
}); });

View File

@@ -33,7 +33,7 @@ impl Shutdown {
.invoke(crate::ErrorKind::Journald) .invoke(crate::ErrorKind::Journald)
.await .await
{ {
log::error!("Error Stopping Journald: {}", e); tracing::error!("Error Stopping Journald: {}", e);
} }
if let Err(e) = Command::new("systemctl") if let Err(e) = Command::new("systemctl")
.arg("stop") .arg("stop")
@@ -41,13 +41,13 @@ impl Shutdown {
.invoke(crate::ErrorKind::Docker) .invoke(crate::ErrorKind::Docker)
.await .await
{ {
log::error!("Error Stopping Docker: {}", e); tracing::error!("Error Stopping Docker: {}", e);
} }
if let Err(e) = export(&*self.zfs_pool).await { if let Err(e) = export(&*self.zfs_pool).await {
log::error!("Error Exporting ZFS Pool: {}", e); tracing::error!("Error Exporting ZFS Pool: {}", e);
} }
if let Err(e) = MARIO_DEATH.play().await { if let Err(e) = MARIO_DEATH.play().await {
log::error!("Error Playing Shutdown Song: {}", e); tracing::error!("Error Playing Shutdown Song: {}", e);
} }
}); });
if self.restart { if self.restart {

View File

@@ -132,7 +132,7 @@ impl Drop for SoundInterface {
let guard = self.0.take(); let guard = self.0.take();
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = tokio::fs::write(&*UNEXPORT_FILE, "0").await { if let Err(e) = tokio::fs::write(&*UNEXPORT_FILE, "0").await {
log::error!("Failed to Unexport Sound Interface: {}", e) tracing::error!("Failed to Unexport Sound Interface: {}", e)
} }
if let Some(mut guard) = guard { if let Some(mut guard) = guard {
if let Some(lock) = guard.take() { if let Some(lock) = guard.take() {
@@ -140,7 +140,7 @@ impl Drop for SoundInterface {
.await .await
.unwrap() .unwrap()
{ {
log::error!("Failed to drop Sound Interface File Lock: {}", e.1) tracing::error!("Failed to drop Sound Interface File Lock: {}", e.1)
} }
} }
} }

View File

@@ -74,7 +74,7 @@ pub async fn synchronize_all(ctx: &RpcContext) -> Result<(), Error> {
Ok(res) Ok(res)
} }
if let Err(e) = status(ctx, id.clone()).await { if let Err(e) = status(ctx, id.clone()).await {
log::error!("Error syncronizing status of {}: {}", id, e); tracing::error!("Error syncronizing status of {}: {}", id, e);
} }
}) })
.await; .await;
@@ -162,8 +162,8 @@ pub async fn check_all(ctx: &RpcContext) -> Result<(), Error> {
async move { async move {
match main_status(ctx.clone(), status, manifest, ctx.db.handle()).await { match main_status(ctx.clone(), status, manifest, ctx.db.handle()).await {
Err(e) => { Err(e) => {
log::error!("Error running main health check for {}: {}", id, e); tracing::error!("Error running main health check for {}: {}", id, e);
log::debug!("{:?}", e); tracing::debug!("{:?}", e);
} }
Ok(status) => { Ok(status) => {
status_sender.send((id, status)).await.expect("unreachable"); status_sender.send((id, status)).await.expect("unreachable");
@@ -235,8 +235,8 @@ pub async fn check_all(ctx: &RpcContext) -> Result<(), Error> {
if let Err(e) = if let Err(e) =
dependency_status(&id, statuses, installed, deps, ctx.db.handle()).await dependency_status(&id, statuses, installed, deps, ctx.db.handle()).await
{ {
log::error!("Error running dependency health check for {}: {}", id, e); tracing::error!("Error running dependency health check for {}: {}", id, e);
log::debug!("{:?}", e); tracing::debug!("{:?}", e);
} }
} }
}) })

View File

@@ -230,7 +230,7 @@ pub async fn launch_metrics_task<F: FnMut() -> Receiver<Option<Shutdown>>>(
break; break;
} }
Err(e) => { Err(e) => {
log::error!("Could not get initial temperature: {}", e); tracing::error!("Could not get initial temperature: {}", e);
} }
} }
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
@@ -247,11 +247,11 @@ pub async fn launch_metrics_task<F: FnMut() -> Receiver<Option<Shutdown>>>(
break; break;
} }
Err(e) => { Err(e) => {
log::error!("Could not get initial cpu info: {}", e); tracing::error!("Could not get initial cpu info: {}", e);
} }
}, },
Err(e) => { Err(e) => {
log::error!("Could not get initial proc stat: {}", e); tracing::error!("Could not get initial proc stat: {}", e);
} }
} }
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
@@ -265,7 +265,7 @@ pub async fn launch_metrics_task<F: FnMut() -> Receiver<Option<Shutdown>>>(
break; break;
} }
Err(e) => { Err(e) => {
log::error!("Could not get initial mem info: {}", e); tracing::error!("Could not get initial mem info: {}", e);
} }
} }
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
@@ -279,7 +279,7 @@ pub async fn launch_metrics_task<F: FnMut() -> Receiver<Option<Shutdown>>>(
break; break;
} }
Err(e) => { Err(e) => {
log::error!("Could not get initial disk info: {}", e); tracing::error!("Could not get initial disk info: {}", e);
} }
} }
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await; tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
@@ -331,7 +331,7 @@ async fn launch_temp_task(
(*lock).as_mut().unwrap().general.temperature = a (*lock).as_mut().unwrap().general.temperature = a
} }
Err(e) => { Err(e) => {
log::error!("Could not get new temperature: {}", e); tracing::error!("Could not get new temperature: {}", e);
} }
} }
tokio::select! { tokio::select! {
@@ -354,7 +354,7 @@ async fn launch_cpu_task(
(*lock).as_mut().unwrap().cpu = info; (*lock).as_mut().unwrap().cpu = info;
} }
Err(e) => { Err(e) => {
log::error!("Could not get new CPU Metrics: {}", e); tracing::error!("Could not get new CPU Metrics: {}", e);
} }
} }
tokio::select! { tokio::select! {
@@ -376,7 +376,7 @@ async fn launch_mem_task(
(*lock).as_mut().unwrap().memory = a; (*lock).as_mut().unwrap().memory = a;
} }
Err(e) => { Err(e) => {
log::error!("Could not get new Memory Metrics: {}", e); tracing::error!("Could not get new Memory Metrics: {}", e);
} }
} }
tokio::select! { tokio::select! {
@@ -397,7 +397,7 @@ async fn launch_disk_task(
(*lock).as_mut().unwrap().disk = a; (*lock).as_mut().unwrap().disk = a;
} }
Err(e) => { Err(e) => {
log::error!("Could not get new Disk Metrics: {}", e); tracing::error!("Could not get new Disk Metrics: {}", e);
} }
} }
tokio::select! { tokio::select! {

View File

@@ -2,10 +2,9 @@ use std::collections::BTreeMap;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering}; use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc; use std::sync::Arc;
use log::{set_boxed_logger, set_max_level, LevelFilter, Metadata, Record}; use reqwest::Url;
use reqwest::{Client, Url};
use sequence_trie::SequenceTrie; use sequence_trie::SequenceTrie;
use stderrlog::{StdErrLog, Timestamp}; use tracing_subscriber::filter::LevelFilter;
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct ModuleMap { pub struct ModuleMap {
@@ -68,12 +67,12 @@ impl ModuleMap {
let module_key = k.split("::"); let module_key = k.split("::");
match self.trie.get(module_key.clone()) { match self.trie.get(module_key.clone()) {
None => match self.trie.get_ancestor(module_key) { None => match self.trie.get_ancestor(module_key) {
None => &LevelFilter::Off, None => &LevelFilter::OFF,
Some((level_filter, include_submodules)) => { Some((level_filter, include_submodules)) => {
if *include_submodules { if *include_submodules {
level_filter level_filter
} else { } else {
&LevelFilter::Off &LevelFilter::OFF
} }
} }
}, },
@@ -84,16 +83,15 @@ impl ModuleMap {
#[derive(Clone)] #[derive(Clone)]
pub struct EmbassyLogger { pub struct EmbassyLogger {
log_level: log::LevelFilter, log_level: LevelFilter,
log_epoch: Arc<AtomicU64>, log_epoch: Arc<AtomicU64>,
logger: StdErrLog,
sharing: Arc<AtomicBool>, sharing: Arc<AtomicBool>,
share_dest: Url, share_dest: Url,
module_map: ModuleMap, module_map: ModuleMap,
} }
impl EmbassyLogger { impl EmbassyLogger {
pub fn init( pub fn init(
log_level: log::LevelFilter, log_level: LevelFilter,
log_epoch: Arc<AtomicU64>, log_epoch: Arc<AtomicU64>,
share_dest: Option<Url>, share_dest: Option<Url>,
share_errors: bool, share_errors: bool,
@@ -103,26 +101,27 @@ impl EmbassyLogger {
None => Url::parse("https://beta-registry-0-3.start9labs.com/error-logs").unwrap(), // TODO None => Url::parse("https://beta-registry-0-3.start9labs.com/error-logs").unwrap(), // TODO
Some(a) => a, Some(a) => a,
}; };
let mut logger = stderrlog::new(); use tracing_error::ErrorLayer;
logger.timestamp(Timestamp::Millisecond); use tracing_subscriber::prelude::*;
match log_level { use tracing_subscriber::{fmt, EnvFilter};
LevelFilter::Off => logger.quiet(true),
LevelFilter::Error => logger.verbosity(0), let fmt_layer = fmt::layer().with_target(false);
LevelFilter::Warn => logger.verbosity(1), let filter_layer = EnvFilter::from_default_env().add_directive(log_level.into());
LevelFilter::Info => logger.verbosity(2),
LevelFilter::Debug => logger.verbosity(3), tracing_subscriber::registry()
LevelFilter::Trace => logger.verbosity(4), .with(filter_layer)
}; .with(fmt_layer)
.with(ErrorLayer::default())
.init();
color_eyre::install().expect("Color Eyre Init");
let embassy_logger = EmbassyLogger { let embassy_logger = EmbassyLogger {
log_level, log_level,
log_epoch, log_epoch,
logger,
sharing: Arc::new(AtomicBool::new(share_errors)), sharing: Arc::new(AtomicBool::new(share_errors)),
share_dest: share_dest, share_dest: share_dest,
module_map: ModuleMap::new(module_map), module_map: ModuleMap::new(module_map),
}; };
set_boxed_logger(Box::new(embassy_logger.clone())).unwrap();
set_max_level(log_level);
embassy_logger embassy_logger
} }
pub fn set_sharing(&self, sharing: bool) { pub fn set_sharing(&self, sharing: bool) {
@@ -130,45 +129,9 @@ impl EmbassyLogger {
} }
} }
impl log::Log for EmbassyLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
let top = metadata.target().split("::").next().unwrap();
if vec!["embassy", "embassyd", "embassy-cli", "embassy-sdk"].contains(&top) {
metadata.level() <= self.log_level
} else {
&metadata.level() <= self.module_map.level_for(metadata.target())
}
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
self.logger.log(record);
}
if self.sharing.load(Ordering::SeqCst) {
if record.level() <= log::Level::Warn {
let mut body = BTreeMap::new();
body.insert(
"log-epoch",
format!("{}", self.log_epoch.load(Ordering::SeqCst)),
);
body.insert("log-message", format!("{}", record.args()));
// we don't care about the result and need it to be fast
tokio::spawn(
Client::new()
.post(self.share_dest.clone())
.json(&body)
.send(),
);
}
}
}
fn flush(&self) {
self.logger.flush()
}
}
#[tokio::test] #[tokio::test]
pub async fn order_level() { pub async fn order_level() {
assert!(log::Level::Warn > log::Level::Error) assert!(tracing::Level::WARN > tracing::Level::ERROR)
} }
#[test] #[test]
@@ -179,7 +142,7 @@ pub fn module() {
proptest::proptest! { proptest::proptest! {
#[test] #[test]
fn submodules_handled_by_parent(s0 in "[a-z][a-z0-9_]+", s1 in "[a-z][a-z0-9_]+", level in filter_strategy()) { fn submodules_handled_by_parent(s0 in "[a-z][a-z0-9_]+", s1 in "[a-z][a-z0-9_]+", level in filter_strategy()) {
proptest::prop_assume!(level > LevelFilter::Off); proptest::prop_assume!(level > LevelFilter::OFF);
let mut hm = BTreeMap::new(); let mut hm = BTreeMap::new();
hm.insert(format!("{}::*", s0.clone()), level); hm.insert(format!("{}::*", s0.clone()), level);
let mod_map = ModuleMap::new(hm); let mod_map = ModuleMap::new(hm);
@@ -187,15 +150,15 @@ proptest::proptest! {
} }
#[test] #[test]
fn submodules_ignored_by_parent(s0 in "[a-z][a-z0-9_]+", s1 in "[a-z][a-z0-9_]+", level in filter_strategy()) { fn submodules_ignored_by_parent(s0 in "[a-z][a-z0-9_]+", s1 in "[a-z][a-z0-9_]+", level in filter_strategy()) {
proptest::prop_assume!(level > LevelFilter::Off); proptest::prop_assume!(level > LevelFilter::OFF);
let mut hm = BTreeMap::new(); let mut hm = BTreeMap::new();
hm.insert(s0.clone(), level); hm.insert(s0.clone(), level);
let mod_map = ModuleMap::new(hm); let mod_map = ModuleMap::new(hm);
proptest::prop_assert_eq!(mod_map.level_for(&format!("{}::{}", s0, s1)), &LevelFilter::Off) proptest::prop_assert_eq!(mod_map.level_for(&format!("{}::{}", s0, s1)), &LevelFilter::OFF)
} }
#[test] #[test]
fn duplicate_insertion_ignored(s0 in "[a-z][a-z0-9_]+", s1 in "[a-z][a-z0-9_]+", level in filter_strategy()) { fn duplicate_insertion_ignored(s0 in "[a-z][a-z0-9_]+", s1 in "[a-z][a-z0-9_]+", level in filter_strategy()) {
proptest::prop_assume!(level > LevelFilter::Off); proptest::prop_assume!(level > LevelFilter::OFF);
let mut hm = BTreeMap::new(); let mut hm = BTreeMap::new();
hm.insert(format!("{}::*", s0.clone()), level); hm.insert(format!("{}::*", s0.clone()), level);
let sub = format!("{}::{}", s0, s1); let sub = format!("{}::{}", s0, s1);
@@ -217,11 +180,11 @@ proptest::proptest! {
fn filter_strategy() -> impl proptest::strategy::Strategy<Value = LevelFilter> { fn filter_strategy() -> impl proptest::strategy::Strategy<Value = LevelFilter> {
use proptest::strategy::Just; use proptest::strategy::Just;
proptest::prop_oneof![ proptest::prop_oneof![
Just(LevelFilter::Off), Just(LevelFilter::OFF),
Just(LevelFilter::Error), Just(LevelFilter::ERROR),
Just(LevelFilter::Warn), Just(LevelFilter::WARN),
Just(LevelFilter::Info), Just(LevelFilter::INFO),
Just(LevelFilter::Debug), Just(LevelFilter::DEBUG),
Just(LevelFilter::Trace), Just(LevelFilter::TRACE),
] ]
} }

View File

@@ -96,7 +96,7 @@ where
if version.semver() != previous.semver() { if version.semver() != previous.semver() {
previous.migrate_from_unchecked(version, db).await?; previous.migrate_from_unchecked(version, db).await?;
} }
log::info!( tracing::info!(
"{} -> {}", "{} -> {}",
previous.semver().as_str(), previous.semver().as_str(),
self.semver().as_str() self.semver().as_str()
@@ -111,7 +111,7 @@ where
db: &mut Db, db: &mut Db,
) -> Result<(), Error> { ) -> Result<(), Error> {
let previous = Self::Previous::new(); let previous = Self::Previous::new();
log::info!( tracing::info!(
"{} -> {}", "{} -> {}",
self.semver().as_str(), self.semver().as_str(),
previous.semver().as_str() previous.semver().as_str()