chore: Get the tracing in

This commit is contained in:
Justin Miller
2021-10-10 20:41:09 -06:00
committed by Aiden McClelland
parent 81164f974f
commit c3ac27865d
27 changed files with 226 additions and 227 deletions

1
appmgr/Cargo.lock generated
View File

@@ -891,6 +891,7 @@ dependencies = [
"toml",
"torut",
"tracing",
"tracing-error",
"tracing-futures",
"tracing-subscriber",
"typed-builder",

View File

@@ -118,6 +118,7 @@ tokio-tungstenite = "0.14.0"
tokio-util = { version = "0.6.8", features = ["io"] }
torut = "0.2.0"
tracing = "0.1"
tracing-error = "0.1"
tracing-subscriber = "0.2"
tracing-futures="0.2"
typed-builder = "0.9.1"
@@ -126,3 +127,6 @@ url = { version = "2.2.2", features = ["serde"] }
[dependencies.serde_with]
version = "1.10.0"
features = [ "macros", "json" ]
[profile.dev.package.backtrace]
opt-level = 3

View File

@@ -68,15 +68,13 @@ impl DockerAction {
};
cmd.stdout(std::process::Stdio::piped());
cmd.stderr(std::process::Stdio::piped());
if log::log_enabled!(log::Level::Trace) {
log::trace!(
"{}",
format!("{:?}", cmd)
.split(r#"" ""#)
.collect::<Vec<&str>>()
.join(" ")
);
}
tracing::trace!(
"{}",
format!("{:?}", cmd)
.split(r#"" ""#)
.collect::<Vec<&str>>()
.join(" ")
);
let mut handle = cmd.spawn().with_kind(crate::ErrorKind::Docker)?;
if let (Some(input), Some(stdin)) = (&input_buf, &mut handle.stdin) {
use tokio::io::AsyncWriteExt;
@@ -94,7 +92,7 @@ impl DockerAction {
match format.from_slice(&res.stdout) {
Ok(a) => a,
Err(e) => {
log::warn!(
tracing::warn!(
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
format,
e
@@ -156,7 +154,7 @@ impl DockerAction {
match format.from_slice(&res.stdout) {
Ok(a) => a,
Err(e) => {
log::warn!(
tracing::warn!(
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
format,
e

View File

@@ -1,9 +1,11 @@
use clap::Arg;
use embassy::context::CliContext;
use embassy::util::logger::EmbassyLogger;
use embassy::Error;
use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError;
use serde_json::Value;
use tracing::metadata::LevelFilter;
fn inner_main() -> Result<(), Error> {
run_cli!({
@@ -25,14 +27,20 @@ fn inner_main() -> Result<(), Error> {
.arg(Arg::with_name("host").long("host").short("h").takes_value(true))
.arg(Arg::with_name("proxy").long("proxy").short("p").takes_value(true)),
context: matches => {
simple_logging::log_to_stderr(match matches.occurrences_of("verbosity") {
0 => log::LevelFilter::Off,
1 => log::LevelFilter::Error,
2 => log::LevelFilter::Warn,
3 => log::LevelFilter::Info,
4 => log::LevelFilter::Debug,
_ => log::LevelFilter::Trace,
});
EmbassyLogger::init(
match matches.occurrences_of("verbosity") {
0 => LevelFilter::OFF,
1 => LevelFilter::ERROR,
2 => LevelFilter::WARN,
3 => LevelFilter::INFO,
4 => LevelFilter::DEBUG,
_ => LevelFilter::TRACE,
},
Default::default(),
None,
false,
Default::default(),
);
CliContext::init(matches)?
},
exit: |e: RpcError| {
@@ -56,7 +64,7 @@ fn main() {
Ok(_) => (),
Err(e) => {
eprintln!("{}", e.source);
log::debug!("{:?}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}

View File

@@ -1,5 +1,6 @@
use std::path::Path;
use color_eyre::eyre::eyre;
use embassy::context::rpc::RpcContextConfig;
use embassy::context::{DiagnosticContext, SetupContext};
use embassy::db::model::ServerStatus;
@@ -11,17 +12,20 @@ use embassy::middleware::encrypt::encrypt;
#[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController;
use embassy::sound::MARIO_COIN;
use embassy::util::logger::EmbassyLogger;
use embassy::util::{Invoke, Version};
use embassy::{Error, ResultExt};
use http::StatusCode;
use rpc_toolkit::rpc_server;
use tokio::process::Command;
use tracing::metadata::LevelFilter;
fn status_fn(_: i32) -> StatusCode {
StatusCode::OK
}
async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
// return Err(eyre!("Test failure").with_kind(embassy::ErrorKind::Unknown));
let cfg = RpcContextConfig::load(cfg_path).await?;
embassy::disk::util::mount("LABEL=EMBASSY", "/embassy-os").await?;
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_err() {
@@ -82,7 +86,7 @@ async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
DEFAULT_PASSWORD,
)
.await?;
log::info!("Loaded Disk");
tracing::info!("Loaded Disk");
let secret_store = cfg.secret_store().await?;
let log_dir = cfg.datadir().join("main").join("logs");
if tokio::fs::metadata(&log_dir).await.is_err() {
@@ -94,7 +98,7 @@ async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
.arg("systemd-journald")
.invoke(embassy::ErrorKind::Journald)
.await?;
log::info!("Mounted Logs");
tracing::info!("Mounted Logs");
let tmp_docker = cfg.datadir().join("tmp").join("docker");
if tokio::fs::metadata(&tmp_docker).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_docker).await?;
@@ -121,14 +125,14 @@ async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
.arg("docker")
.invoke(embassy::ErrorKind::Docker)
.await?;
log::info!("Mounted Docker Data");
tracing::info!("Mounted Docker Data");
embassy::install::load_images(cfg.datadir()).await?;
log::info!("Loaded Docker Images");
tracing::info!("Loaded Docker Images");
embassy::ssh::sync_keys_from_db(&secret_store, "/root/.ssh/authorized_keys").await?;
log::info!("Synced SSH Keys");
tracing::info!("Synced SSH Keys");
// todo!("sync wifi");
embassy::hostname::sync_hostname().await?;
log::info!("Synced Hostname");
tracing::info!("Synced Hostname");
if tokio::fs::metadata("/var/www/html/main/public")
.await
@@ -146,7 +150,7 @@ async fn init(cfg_path: Option<&str>) -> Result<(), Error> {
)
.await?;
}
log::info!("Enabled nginx public dir");
tracing::info!("Enabled nginx public dir");
embassy::net::wifi::synchronize_wpa_supplicant_conf(&cfg.datadir().join("main")).await?;
let db = cfg.db(&secret_store).await?;
@@ -177,10 +181,10 @@ async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
match Command::new("/bin/bash").arg(script).spawn() {
Ok(mut c) => {
if let Err(e) = c.wait().await {
log::error!("Error Running {}: {}", script.display(), e)
tracing::error!("Error Running {}: {}", script.display(), e)
}
}
Err(e) => log::error!("Error Running {}: {}", script.display(), e),
Err(e) => tracing::error!("Error Running {}: {}", script.display(), e),
}
}
}
@@ -192,8 +196,8 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<(), Error> {
let res = if let Err(e) = init(cfg_path).await {
(|| async {
log::error!("{}", e.source);
log::debug!("{}", e.source);
tracing::error!("{}", e.source);
tracing::debug!("{}", e.source);
embassy::sound::BEETHOVEN.play().await?;
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init();
@@ -259,14 +263,29 @@ fn main() {
)
.get_matches();
simple_logging::log_to_stderr(match matches.occurrences_of("verbosity") {
0 => log::LevelFilter::Off,
1 => log::LevelFilter::Error,
2 => log::LevelFilter::Warn,
3 => log::LevelFilter::Info,
4 => log::LevelFilter::Debug,
_ => log::LevelFilter::Trace,
});
// simple_logging::log_to_stderr(match matches.occurrences_of("verbosity") {
// 0 => LevelFilter::OFF,
// 1 => LevelFilter::ERROR,
// 2 => LevelFilter::WARN,
// 3 => LevelFilter::INFO,
// 4 => LevelFilter::DEBUG,
// _ => LevelFilter::TRACE,
// });
EmbassyLogger::init(
match matches.occurrences_of("verbosity") {
0 => LevelFilter::OFF,
1 => LevelFilter::ERROR,
2 => LevelFilter::WARN,
3 => LevelFilter::INFO,
4 => LevelFilter::DEBUG,
_ => LevelFilter::TRACE,
},
Default::default(),
None,
false,
Default::default(),
);
let cfg_path = matches.value_of("config");
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
@@ -280,7 +299,7 @@ fn main() {
Ok(_) => (),
Err(e) => {
eprintln!("{}", e.source);
log::debug!("{:?}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}

View File

@@ -23,12 +23,12 @@ fn inner_main() -> Result<(), Error> {
),
context: matches => {
simple_logging::log_to_stderr(match matches.occurrences_of("verbosity") {
0 => log::LevelFilter::Off,
1 => log::LevelFilter::Error,
2 => log::LevelFilter::Warn,
3 => log::LevelFilter::Info,
4 => log::LevelFilter::Debug,
_ => log::LevelFilter::Trace,
0 => tracing::LevelFilter::Off,
1 => tracing::LevelFilter::Error,
2 => tracing::LevelFilter::Warn,
3 => tracing::LevelFilter::Info,
4 => tracing::LevelFilter::Debug,
_ => tracing::LevelFilter::Trace,
});
SdkContext::init(matches)?
},
@@ -52,7 +52,7 @@ fn main() {
Ok(_) => (),
Err(e) => {
eprintln!("{}", e.source);
log::debug!("{:?}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}

View File

@@ -1,7 +1,7 @@
use std::collections::BTreeMap;
use std::time::Duration;
use color_eyre::eyre::eyre;
use color_eyre::eyre::{self, eyre};
use embassy::context::{DiagnosticContext, RpcContext};
use embassy::db::subscribe;
use embassy::middleware::auth::auth;
@@ -15,25 +15,27 @@ use embassy::status::{check_all, synchronize_all};
use embassy::util::{daemon, Invoke};
use embassy::{Error, ErrorKind, ResultExt};
use futures::{FutureExt, TryFutureExt};
use log::LevelFilter;
use reqwest::{Client, Proxy};
use rpc_toolkit::hyper::{Body, Response, Server, StatusCode};
use rpc_toolkit::rpc_server;
use tokio::process::Command;
use tokio::signal::unix::signal;
use tracing::instrument;
use tracing::metadata::LevelFilter;
fn status_fn(_: i32) -> StatusCode {
StatusCode::OK
}
fn err_to_500(e: Error) -> Response<Body> {
log::error!("{}", e);
tracing::error!("{}", e);
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::empty())
.unwrap()
}
#[instrument]
async fn inner_main(
cfg_path: Option<&str>,
log_level: LevelFilter,
@@ -165,10 +167,10 @@ async fn inner_main(
let ctx = status_ctx.clone();
async move {
if let Err(e) = synchronize_all(&ctx).await {
log::error!("Error in Status Sync daemon: {}", e);
log::debug!("{:?}", e);
tracing::error!("Error in Status Sync daemon: {}", e);
tracing::debug!("{:?}", e);
} else {
log::trace!("Status Sync completed successfully");
tracing::trace!("Status Sync completed successfully");
}
}
},
@@ -181,10 +183,10 @@ async fn inner_main(
let ctx = health_ctx.clone();
async move {
if let Err(e) = check_all(&ctx).await {
log::error!("Error in Health Check daemon: {}", e);
log::debug!("{:?}", e);
tracing::error!("Error in Health Check daemon: {}", e);
tracing::debug!("{:?}", e);
} else {
log::trace!("Health Check completed successfully");
tracing::trace!("Health Check completed successfully");
}
}
},
@@ -270,11 +272,11 @@ fn main() {
// initializes the bootstrap logger, this will be replaced with the EmbassyLogger later
let filter = match matches.occurrences_of("verbosity") {
0 => log::LevelFilter::Error,
1 => log::LevelFilter::Warn,
2 => log::LevelFilter::Info,
3 => log::LevelFilter::Debug,
_ => log::LevelFilter::Trace,
0 => LevelFilter::ERROR,
1 => LevelFilter::WARN,
2 => LevelFilter::INFO,
3 => LevelFilter::DEBUG,
_ => LevelFilter::TRACE,
};
let module_logging = matches
.values_of("log-module")
@@ -301,8 +303,8 @@ fn main() {
Ok(a) => Ok(a),
Err(e) => {
(|| async {
log::error!("{}", e.source);
log::debug!("{}", e.source);
tracing::error!("{:?}", e.source);
tracing::debug!("{:?}", e.source);
embassy::sound::BEETHOVEN.play().await?;
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init();
@@ -353,7 +355,7 @@ fn main() {
Ok(Some(s)) => s.execute(),
Err(e) => {
eprintln!("{}", e.source);
log::debug!("{:?}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}

View File

@@ -403,7 +403,7 @@ pub fn configure<'a, Db: DbHandle>(
.into_iter()
.filter(|(dep_id, _)| {
if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) {
log::warn!("Illegal dependency specified: {}", dep_id);
tracing::warn!("Illegal dependency specified: {}", dep_id);
false
} else {
true

View File

@@ -8,7 +8,6 @@ use std::sync::Arc;
use std::time::Duration;
use bollard::Docker;
use log::LevelFilter;
use patch_db::json_ptr::JsonPointer;
use patch_db::{PatchDb, Revision};
use reqwest::Url;
@@ -20,6 +19,8 @@ use sqlx::SqlitePool;
use tokio::fs::File;
use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock;
use tracing::instrument;
use tracing::metadata::LevelFilter;
use crate::db::model::Database;
use crate::hostname::{get_hostname, get_id};
@@ -92,6 +93,7 @@ impl RpcContextConfig {
}
Ok(db)
}
#[instrument]
pub async fn secret_store(&self) -> Result<SqlitePool, Error> {
let secret_store = SqlitePool::connect_with(
SqliteConnectOptions::new()

View File

@@ -126,7 +126,7 @@ async fn ws_handler<
}
Some(Message::Close(frame)) => {
if let Some(reason) = frame.as_ref() {
log::info!("Closing WebSocket: Reason: {} {}", reason.code, reason.reason);
tracing::info!("Closing WebSocket: Reason: {} {}", reason.code, reason.reason);
}
stream
.send(Message::Close(frame))
@@ -155,7 +155,7 @@ pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<B
tokio::task::spawn(async move {
match ws_handler(ctx, ws_fut).await {
Ok(()) => (),
Err(e) => log::error!("WebSocket Closed: {}", e),
Err(e) => tracing::error!("WebSocket Closed: {}", e),
}
});
}

View File

@@ -17,9 +17,9 @@ pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
std::fs::create_dir_all(parent)
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
}
log::info!("Generating new developer key...");
tracing::info!("Generating new developer key...");
let keypair = Keypair::generate(&mut rand::thread_rng());
log::info!("Writing key to {}", ctx.developer_key_path.display());
tracing::info!("Writing key to {}", ctx.developer_key_path.display());
let mut dev_key_file = File::create(&ctx.developer_key_path)?;
dev_key_file.write_all(&keypair.to_bytes())?;
dev_key_file.sync_all()?;

View File

@@ -188,15 +188,17 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
let mut partitions = Vec::with_capacity(parts.len());
let vendor = get_vendor(&disk)
.await
.map_err(|e| log::warn!("Could not get vendor of {}: {}", disk.display(), e.source))
.map_err(|e| tracing::warn!("Could not get vendor of {}: {}", disk.display(), e.source))
.unwrap_or_default();
let model = get_model(&disk)
.await
.map_err(|e| log::warn!("Could not get model of {}: {}", disk.display(), e.source))
.map_err(|e| tracing::warn!("Could not get model of {}: {}", disk.display(), e.source))
.unwrap_or_default();
let capacity = get_capacity(&disk)
.await
.map_err(|e| log::warn!("Could not get capacity of {}: {}", disk.display(), e.source))
.map_err(|e| {
tracing::warn!("Could not get capacity of {}: {}", disk.display(), e.source)
})
.unwrap_or_default();
let mut embassy_os = None;
for part in parts {
@@ -204,7 +206,7 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
let capacity = get_capacity(&part)
.await
.map_err(|e| {
log::warn!("Could not get capacity of {}: {}", part.display(), e.source)
tracing::warn!("Could not get capacity of {}: {}", part.display(), e.source)
})
.unwrap_or_default();
let mut used = None;
@@ -212,7 +214,7 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
let tmp_mountpoint =
Path::new(TMP_MOUNTPOINT).join(&part.strip_prefix("/").unwrap_or(&part));
if let Err(e) = mount(&part, &tmp_mountpoint).await {
log::warn!("Could not collect usage information: {}", e.source)
tracing::warn!("Could not collect usage information: {}", e.source)
} else {
let mount_guard = GeneralGuard::new(|| {
let path = tmp_mountpoint.clone();
@@ -221,7 +223,7 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
used = get_used(&tmp_mountpoint)
.await
.map_err(|e| {
log::warn!("Could not get usage of {}: {}", part.display(), e.source)
tracing::warn!("Could not get usage of {}: {}", part.display(), e.source)
})
.ok();
if label.as_deref() == Some("rootfs") {
@@ -323,7 +325,7 @@ pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
dst: P1,
read_only: bool,
) -> Result<(), Error> {
log::info!(
tracing::info!(
"Binding {} to {}",
src.as_ref().display(),
dst.as_ref().display()
@@ -362,7 +364,7 @@ pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
}
pub async fn unmount<P: AsRef<Path>>(mount_point: P) -> Result<(), Error> {
log::info!("Unmounting {}.", mount_point.as_ref().display());
tracing::info!("Unmounting {}.", mount_point.as_ref().display());
let umount_output = tokio::process::Command::new("umount")
.arg(mount_point.as_ref())
.output()

View File

@@ -1,6 +1,6 @@
use std::fmt::Display;
use color_eyre::eyre::eyre;
use color_eyre::eyre::{eyre, ErrReport};
use patch_db::Revision;
use rpc_toolkit::yajrc::RpcError;
@@ -225,7 +225,7 @@ impl From<Error> for RpcError {
match serde_json::to_value(&e.revision) {
Ok(a) => a,
Err(e) => {
log::warn!("Error serializing revision for Error object: {}", e);
tracing::warn!("Error serializing revision for Error object: {}", e);
serde_json::Value::Null
}
},

View File

@@ -127,7 +127,7 @@ pub async fn cleanup_failed<Db: DbHandle>(
}
}
_ => {
log::warn!("{}: Nothing to clean up!", id);
tracing::warn!("{}: Nothing to clean up!", id);
false
}
} {

View File

@@ -113,7 +113,7 @@ pub async fn install(
tokio::spawn(async move {
if let Err(e) = download_install_s9pk(&ctx, &man, s9pk).await {
log::error!("Install of {}@{} Failed: {}", man.id, man.version, e);
tracing::error!("Install of {}@{} Failed: {}", man.id, man.version, e);
}
});
@@ -180,7 +180,7 @@ pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevisi
tokio::spawn(async move {
if let Err(e) = cleanup::uninstall(&ctx, &mut ctx.db.handle(), &installed).await {
log::error!("Uninstall of {} Failed: {}", id, e);
tracing::error!("Uninstall of {} Failed: {}", id, e);
}
});
@@ -267,7 +267,7 @@ pub async fn download_install_s9pk(
if let Err(e) = cleanup_failed(&ctx, &mut tx, pkg_id, version).await {
let mut tx = handle.begin().await?;
log::error!(
tracing::error!(
"Failed to clean up {}@{}: {}: Adding to broken packages",
pkg_id,
version,
@@ -303,13 +303,13 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
.idx_model(pkg_id);
let progress_model = model.clone().and_then(|m| m.install_progress());
log::info!("Install {}@{}: Unpacking Manifest", pkg_id, version);
tracing::info!("Install {}@{}: Unpacking Manifest", pkg_id, version);
let manifest = progress
.track_read_during(progress_model.clone(), &ctx.db, || rdr.manifest())
.await?;
log::info!("Install {}@{}: Unpacked Manifest", pkg_id, version);
tracing::info!("Install {}@{}: Unpacked Manifest", pkg_id, version);
log::info!("Install {}@{}: Fetching Dependency Info", pkg_id, version);
tracing::info!("Install {}@{}: Fetching Dependency Info", pkg_id, version);
let mut dependency_info = BTreeMap::new();
let reg_url = ctx.package_registry_url().await?;
for (dep, info) in &manifest.dependencies.0 {
@@ -379,7 +379,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
);
}
}
log::info!("Install {}@{}: Fetched Dependency Info", pkg_id, version);
tracing::info!("Install {}@{}: Fetched Dependency Info", pkg_id, version);
let public_dir_path = ctx
.datadir
@@ -388,7 +388,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
.join(version.as_str());
tokio::fs::create_dir_all(&public_dir_path).await?;
log::info!("Install {}@{}: Unpacking LICENSE.md", pkg_id, version);
tracing::info!("Install {}@{}: Unpacking LICENSE.md", pkg_id, version);
progress
.track_read_during(progress_model.clone(), &ctx.db, || async {
let license_path = public_dir_path.join("LICENSE.md");
@@ -398,9 +398,9 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
Ok(())
})
.await?;
log::info!("Install {}@{}: Unpacked LICENSE.md", pkg_id, version);
tracing::info!("Install {}@{}: Unpacked LICENSE.md", pkg_id, version);
log::info!("Install {}@{}: Unpacking INSTRUCTIONS.md", pkg_id, version);
tracing::info!("Install {}@{}: Unpacking INSTRUCTIONS.md", pkg_id, version);
progress
.track_read_during(progress_model.clone(), &ctx.db, || async {
let instructions_path = public_dir_path.join("INSTRUCTIONS.md");
@@ -410,10 +410,10 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
Ok(())
})
.await?;
log::info!("Install {}@{}: Unpacked INSTRUCTIONS.md", pkg_id, version);
tracing::info!("Install {}@{}: Unpacked INSTRUCTIONS.md", pkg_id, version);
let icon_path = Path::new("icon").with_extension(&manifest.assets.icon_type());
log::info!(
tracing::info!(
"Install {}@{}: Unpacking {}",
pkg_id,
version,
@@ -428,14 +428,14 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
Ok(())
})
.await?;
log::info!(
tracing::info!(
"Install {}@{}: Unpacked {}",
pkg_id,
version,
icon_path.display()
);
log::info!("Install {}@{}: Unpacking Docker Images", pkg_id, version);
tracing::info!("Install {}@{}: Unpacking Docker Images", pkg_id, version);
progress
.track_read_during(progress_model.clone(), &ctx.db, || async {
let image_tar_dir = ctx
@@ -502,9 +502,9 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
}
})
.await?;
log::info!("Install {}@{}: Unpacked Docker Images", pkg_id, version,);
tracing::info!("Install {}@{}: Unpacked Docker Images", pkg_id, version,);
log::info!("Install {}@{}: Unpacking Assets", pkg_id, version);
tracing::info!("Install {}@{}: Unpacking Assets", pkg_id, version);
progress
.track_read_during(progress_model.clone(), &ctx.db, || async {
let asset_dir = asset_dir(&ctx.datadir, pkg_id, version);
@@ -517,7 +517,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
Ok(())
})
.await?;
log::info!("Install {}@{}: Unpacked Assets", pkg_id, version);
tracing::info!("Install {}@{}: Unpacked Assets", pkg_id, version);
progress.unpack_complete.store(true, Ordering::SeqCst);
@@ -531,15 +531,15 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
.lock(&mut tx, true)
.await;
log::info!("Install {}@{}: Creating volumes", pkg_id, version);
tracing::info!("Install {}@{}: Creating volumes", pkg_id, version);
manifest.volumes.install(ctx, pkg_id, version).await?;
log::info!("Install {}@{}: Created volumes", pkg_id, version);
tracing::info!("Install {}@{}: Created volumes", pkg_id, version);
log::info!("Install {}@{}: Installing interfaces", pkg_id, version);
tracing::info!("Install {}@{}: Installing interfaces", pkg_id, version);
let interface_addresses = manifest.interfaces.install(&mut sql_tx, pkg_id).await?;
log::info!("Install {}@{}: Installed interfaces", pkg_id, version);
tracing::info!("Install {}@{}: Installed interfaces", pkg_id, version);
log::info!("Install {}@{}: Creating manager", pkg_id, version);
tracing::info!("Install {}@{}: Creating manager", pkg_id, version);
ctx.managers
.add(
ctx.clone(),
@@ -547,7 +547,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
manifest.interfaces.tor_keys(&mut sql_tx, pkg_id).await?,
)
.await?;
log::info!("Install {}@{}: Created manager", pkg_id, version);
tracing::info!("Install {}@{}: Created manager", pkg_id, version);
let static_files = StaticFiles::local(pkg_id, version, manifest.assets.icon_type());
let current_dependencies: BTreeMap<_, _> = manifest
@@ -715,7 +715,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
sql_tx.commit().await?;
tx.commit(None).await?;
log::info!("Install {}@{}: Complete", pkg_id, version);
tracing::info!("Install {}@{}: Complete", pkg_id, version);
Ok(())
}

View File

@@ -85,7 +85,7 @@ impl ManagerMap {
pub async fn remove(&self, id: &(PackageId, Version)) {
if let Some(man) = self.0.write().await.remove(id) {
if let Err(e) = man.exit().await {
log::error!("Error shutting down manager: {}", e);
tracing::error!("Error shutting down manager: {}", e);
}
}
}
@@ -312,14 +312,14 @@ impl Manager {
match res {
Err(e) => {
// TODO for code review: Do we return this error or just log it?
log::error!("Failed to issue notification: {}", e);
tracing::error!("Failed to issue notification: {}", e);
}
Ok(()) => {}
}
log::error!("service crashed: {}: {}", e.0, e.1)
tracing::error!("service crashed: {}: {}", e.0, e.1)
}
Err(e) => {
log::error!("failed to start service: {}", e)
tracing::error!("failed to start service: {}", e)
}
}
}

View File

@@ -46,7 +46,7 @@ unsafe impl Sync for MdnsControllerInner {}
impl MdnsControllerInner {
fn load_services(&mut self) {
unsafe {
log::debug!("Loading services for mDNS");
tracing::debug!("Loading services for mDNS");
let mut res;
let http_tcp_cstr = std::ffi::CString::new("_http._tcp")
.expect("Could not cast _http._tcp to c string");
@@ -70,14 +70,14 @@ impl MdnsControllerInner {
);
if res < avahi_sys::AVAHI_OK {
let e_str = avahi_strerror(res);
log::error!(
tracing::error!(
"Could not add service to Avahi entry group: {:?}",
std::ffi::CStr::from_ptr(e_str)
);
avahi_free(e_str as *mut c_void);
panic!("Failed to load Avahi services");
}
log::info!(
tracing::info!(
"Published {:?}",
std::ffi::CStr::from_ptr(self.hostname_raw)
);
@@ -87,7 +87,7 @@ impl MdnsControllerInner {
.get_onion_address()
.get_address_without_dot_onion()
+ ".local";
log::debug!("Adding mdns CNAME entry for {}", &lan_address);
tracing::debug!("Adding mdns CNAME entry for {}", &lan_address);
let lan_address_ptr = std::ffi::CString::new(lan_address)
.expect("Could not cast lan address to c string");
res = avahi_sys::avahi_entry_group_add_record(
@@ -105,7 +105,7 @@ impl MdnsControllerInner {
);
if res < avahi_sys::AVAHI_OK {
let e_str = avahi_strerror(res);
log::error!(
tracing::error!(
"Could not add record for {:?} to Avahi entry group: {:?}",
lan_address_ptr,
std::ffi::CStr::from_ptr(e_str)
@@ -113,13 +113,13 @@ impl MdnsControllerInner {
avahi_free(e_str as *mut c_void);
panic!("Failed to load Avahi services");
}
log::info!("Published {:?}", lan_address_ptr);
tracing::info!("Published {:?}", lan_address_ptr);
}
}
}
fn init() -> Self {
unsafe {
log::debug!("Initializing mDNS controller");
tracing::debug!("Initializing mDNS controller");
let simple_poll = avahi_sys::avahi_simple_poll_new();
let poll = avahi_sys::avahi_simple_poll_get(simple_poll);
let mut box_err = Box::pin(0 as i32);
@@ -133,7 +133,7 @@ impl MdnsControllerInner {
);
if avahi_client == std::ptr::null_mut::<AvahiClient>() {
let e_str = avahi_strerror(*box_err);
log::error!(
tracing::error!(
"Could not create avahi client: {:?}",
std::ffi::CStr::from_ptr(e_str)
);
@@ -144,7 +144,7 @@ impl MdnsControllerInner {
avahi_sys::avahi_entry_group_new(avahi_client, Some(noop), std::ptr::null_mut());
if group == std::ptr::null_mut() {
let e_str = avahi_strerror(avahi_client_errno(avahi_client));
log::error!(
tracing::error!(
"Could not create avahi entry group: {:?}",
std::ffi::CStr::from_ptr(e_str)
);

View File

@@ -233,7 +233,7 @@ impl TorControllerInner {
}
async fn add_embassyd_onion(&mut self) -> Result<(), Error> {
log::info!(
tracing::info!(
"Registering Main Tor Service: {}",
self.embassyd_tor_key.public().get_onion_address()
);
@@ -249,7 +249,7 @@ impl TorControllerInner {
&mut std::iter::once(&(self.embassyd_addr.port(), self.embassyd_addr)),
)
.await?;
log::info!(
tracing::info!(
"Registered Main Tor Service: {}",
self.embassyd_tor_key.public().get_onion_address()
);
@@ -301,7 +301,7 @@ impl TorControllerInner {
}
}
Err(e) => {
log::info!("Failed to reconnect to tor control socket: {}", e);
tracing::info!("Failed to reconnect to tor control socket: {}", e);
}
}
tokio::time::sleep(Duration::from_secs(1)).await;
@@ -345,7 +345,7 @@ impl TorControllerInner {
}
pub async fn tor_health_check(client: &Client, tor_controller: &TorController) {
log::debug!("Attempting to self-check tor address");
tracing::debug!("Attempting to self-check tor address");
let onion = tor_controller.embassyd_onion().await;
let result = client
.post(format!("http://{}/rpc/v1", onion))
@@ -363,24 +363,24 @@ pub async fn tor_health_check(client: &Client, tor_controller: &TorController) {
match result {
// if success, do nothing
Ok(_) => {
log::debug!(
tracing::debug!(
"Successfully verified main tor address liveness at {}",
onion
)
}
// if failure, disconnect tor control port, and restart tor controller
Err(e) => {
log::error!("Unable to reach self over tor: {}", e);
tracing::error!("Unable to reach self over tor: {}", e);
loop {
match tor_controller.replace().await {
Ok(restarted) => {
if restarted {
log::error!("Tor has been recently restarted, refusing to restart");
tracing::error!("Tor has been recently restarted, refusing to restart");
}
break;
}
Err(e) => {
log::error!("Unable to restart tor: {}", e);
tracing::error!("Unable to restart tor: {}", e);
}
}
}

View File

@@ -42,13 +42,13 @@ pub async fn add(
priority: isize,
connect: bool,
) -> Result<(), Error> {
log::info!("Adding new WiFi network: '{}'", ssid);
tracing::info!("Adding new WiFi network: '{}'", ssid);
wpa_supplicant.add_network(ssid, password, priority).await?;
if connect {
let current = wpa_supplicant.get_current_network().await?;
let connected = wpa_supplicant.select_network(ssid).await?;
if !connected {
log::error!("Faild to add new WiFi network: '{}'", ssid);
tracing::error!("Faild to add new WiFi network: '{}'", ssid);
wpa_supplicant.remove_network(ssid).await?;
match current {
None => {}
@@ -63,7 +63,7 @@ pub async fn add(
tokio::spawn(async move {
match add_procedure(wpa_supplicant, &ssid, &password, priority, connect).await {
Err(e) => {
log::error!("Failed to add new WiFi network '{}': {}", ssid, e);
tracing::error!("Failed to add new WiFi network '{}': {}", ssid, e);
}
Ok(_) => {}
}
@@ -83,12 +83,12 @@ pub async fn connect(#[arg] ssid: String) -> Result<(), Error> {
let current = wpa_supplicant.get_current_network().await?;
let connected = wpa_supplicant.select_network(&ssid).await?;
if connected {
log::info!("Successfully connected to WiFi: '{}'", ssid);
tracing::info!("Successfully connected to WiFi: '{}'", ssid);
} else {
log::error!("Failed to connect to WiFi: '{}'", ssid);
tracing::error!("Failed to connect to WiFi: '{}'", ssid);
match current {
None => {
log::warn!("No WiFi to revert to!");
tracing::warn!("No WiFi to revert to!");
}
Some(current) => {
wpa_supplicant.select_network(&current).await?;
@@ -101,7 +101,7 @@ pub async fn connect(#[arg] ssid: String) -> Result<(), Error> {
tokio::spawn(async move {
match connect_procedure(wpa_supplicant, &ssid).await {
Err(e) => {
log::error!("Failed to connect to WiFi network '{}': {}", &ssid, e);
tracing::error!("Failed to connect to WiFi network '{}': {}", &ssid, e);
}
Ok(_) => {}
}

View File

@@ -42,7 +42,7 @@ impl<
pub fn pack(mut self, key: &ed25519_dalek::Keypair) -> Result<(), Error> {
let header_pos = self.writer.stream_position()?;
if header_pos != 0 {
log::warn!("Appending to non-empty file.");
tracing::warn!("Appending to non-empty file.");
}
let mut header = Header::placeholder();
header.serialize(&mut self.writer).with_ctx(|_| {

View File

@@ -96,11 +96,11 @@ pub async fn execute(
.await
{
Ok(a) => {
log::info!("Setup Successful! Tor Address: {}", a);
tracing::info!("Setup Successful! Tor Address: {}", a);
Ok(a)
}
Err(e) => {
log::error!("Error Setting Up Embassy: {}", e);
tracing::error!("Error Setting Up Embassy: {}", e);
Err(e)
}
}
@@ -178,7 +178,7 @@ pub async fn execute_inner(
tokio::spawn(async move {
if let Err(e) = recover(ctx.clone(), guid, recovery_drive, recovery_password).await {
BEETHOVEN.play().await.unwrap_or_default(); // ignore error in playing the song
log::error!("Error recovering drive!: {}", e);
tracing::error!("Error recovering drive!: {}", e);
*ctx.recovery_status.write().await = Some(Err(e.into()));
}
});

View File

@@ -33,7 +33,7 @@ impl Shutdown {
.invoke(crate::ErrorKind::Journald)
.await
{
log::error!("Error Stopping Journald: {}", e);
tracing::error!("Error Stopping Journald: {}", e);
}
if let Err(e) = Command::new("systemctl")
.arg("stop")
@@ -41,13 +41,13 @@ impl Shutdown {
.invoke(crate::ErrorKind::Docker)
.await
{
log::error!("Error Stopping Docker: {}", e);
tracing::error!("Error Stopping Docker: {}", e);
}
if let Err(e) = export(&*self.zfs_pool).await {
log::error!("Error Exporting ZFS Pool: {}", e);
tracing::error!("Error Exporting ZFS Pool: {}", e);
}
if let Err(e) = MARIO_DEATH.play().await {
log::error!("Error Playing Shutdown Song: {}", e);
tracing::error!("Error Playing Shutdown Song: {}", e);
}
});
if self.restart {

View File

@@ -132,7 +132,7 @@ impl Drop for SoundInterface {
let guard = self.0.take();
tokio::spawn(async move {
if let Err(e) = tokio::fs::write(&*UNEXPORT_FILE, "0").await {
log::error!("Failed to Unexport Sound Interface: {}", e)
tracing::error!("Failed to Unexport Sound Interface: {}", e)
}
if let Some(mut guard) = guard {
if let Some(lock) = guard.take() {
@@ -140,7 +140,7 @@ impl Drop for SoundInterface {
.await
.unwrap()
{
log::error!("Failed to drop Sound Interface File Lock: {}", e.1)
tracing::error!("Failed to drop Sound Interface File Lock: {}", e.1)
}
}
}

View File

@@ -74,7 +74,7 @@ pub async fn synchronize_all(ctx: &RpcContext) -> Result<(), Error> {
Ok(res)
}
if let Err(e) = status(ctx, id.clone()).await {
log::error!("Error syncronizing status of {}: {}", id, e);
tracing::error!("Error syncronizing status of {}: {}", id, e);
}
})
.await;
@@ -162,8 +162,8 @@ pub async fn check_all(ctx: &RpcContext) -> Result<(), Error> {
async move {
match main_status(ctx.clone(), status, manifest, ctx.db.handle()).await {
Err(e) => {
log::error!("Error running main health check for {}: {}", id, e);
log::debug!("{:?}", e);
tracing::error!("Error running main health check for {}: {}", id, e);
tracing::debug!("{:?}", e);
}
Ok(status) => {
status_sender.send((id, status)).await.expect("unreachable");
@@ -235,8 +235,8 @@ pub async fn check_all(ctx: &RpcContext) -> Result<(), Error> {
if let Err(e) =
dependency_status(&id, statuses, installed, deps, ctx.db.handle()).await
{
log::error!("Error running dependency health check for {}: {}", id, e);
log::debug!("{:?}", e);
tracing::error!("Error running dependency health check for {}: {}", id, e);
tracing::debug!("{:?}", e);
}
}
})

View File

@@ -230,7 +230,7 @@ pub async fn launch_metrics_task<F: FnMut() -> Receiver<Option<Shutdown>>>(
break;
}
Err(e) => {
log::error!("Could not get initial temperature: {}", e);
tracing::error!("Could not get initial temperature: {}", e);
}
}
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
@@ -247,11 +247,11 @@ pub async fn launch_metrics_task<F: FnMut() -> Receiver<Option<Shutdown>>>(
break;
}
Err(e) => {
log::error!("Could not get initial cpu info: {}", e);
tracing::error!("Could not get initial cpu info: {}", e);
}
},
Err(e) => {
log::error!("Could not get initial proc stat: {}", e);
tracing::error!("Could not get initial proc stat: {}", e);
}
}
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
@@ -265,7 +265,7 @@ pub async fn launch_metrics_task<F: FnMut() -> Receiver<Option<Shutdown>>>(
break;
}
Err(e) => {
log::error!("Could not get initial mem info: {}", e);
tracing::error!("Could not get initial mem info: {}", e);
}
}
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
@@ -279,7 +279,7 @@ pub async fn launch_metrics_task<F: FnMut() -> Receiver<Option<Shutdown>>>(
break;
}
Err(e) => {
log::error!("Could not get initial disk info: {}", e);
tracing::error!("Could not get initial disk info: {}", e);
}
}
tokio::time::sleep(tokio::time::Duration::from_secs(1)).await;
@@ -331,7 +331,7 @@ async fn launch_temp_task(
(*lock).as_mut().unwrap().general.temperature = a
}
Err(e) => {
log::error!("Could not get new temperature: {}", e);
tracing::error!("Could not get new temperature: {}", e);
}
}
tokio::select! {
@@ -354,7 +354,7 @@ async fn launch_cpu_task(
(*lock).as_mut().unwrap().cpu = info;
}
Err(e) => {
log::error!("Could not get new CPU Metrics: {}", e);
tracing::error!("Could not get new CPU Metrics: {}", e);
}
}
tokio::select! {
@@ -376,7 +376,7 @@ async fn launch_mem_task(
(*lock).as_mut().unwrap().memory = a;
}
Err(e) => {
log::error!("Could not get new Memory Metrics: {}", e);
tracing::error!("Could not get new Memory Metrics: {}", e);
}
}
tokio::select! {
@@ -397,7 +397,7 @@ async fn launch_disk_task(
(*lock).as_mut().unwrap().disk = a;
}
Err(e) => {
log::error!("Could not get new Disk Metrics: {}", e);
tracing::error!("Could not get new Disk Metrics: {}", e);
}
}
tokio::select! {

View File

@@ -2,10 +2,9 @@ use std::collections::BTreeMap;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use log::{set_boxed_logger, set_max_level, LevelFilter, Metadata, Record};
use reqwest::{Client, Url};
use reqwest::Url;
use sequence_trie::SequenceTrie;
use stderrlog::{StdErrLog, Timestamp};
use tracing_subscriber::filter::LevelFilter;
#[derive(Clone, Debug)]
pub struct ModuleMap {
@@ -68,12 +67,12 @@ impl ModuleMap {
let module_key = k.split("::");
match self.trie.get(module_key.clone()) {
None => match self.trie.get_ancestor(module_key) {
None => &LevelFilter::Off,
None => &LevelFilter::OFF,
Some((level_filter, include_submodules)) => {
if *include_submodules {
level_filter
} else {
&LevelFilter::Off
&LevelFilter::OFF
}
}
},
@@ -84,16 +83,15 @@ impl ModuleMap {
#[derive(Clone)]
pub struct EmbassyLogger {
log_level: log::LevelFilter,
log_level: LevelFilter,
log_epoch: Arc<AtomicU64>,
logger: StdErrLog,
sharing: Arc<AtomicBool>,
share_dest: Url,
module_map: ModuleMap,
}
impl EmbassyLogger {
pub fn init(
log_level: log::LevelFilter,
log_level: LevelFilter,
log_epoch: Arc<AtomicU64>,
share_dest: Option<Url>,
share_errors: bool,
@@ -103,26 +101,27 @@ impl EmbassyLogger {
None => Url::parse("https://beta-registry-0-3.start9labs.com/error-logs").unwrap(), // TODO
Some(a) => a,
};
let mut logger = stderrlog::new();
logger.timestamp(Timestamp::Millisecond);
match log_level {
LevelFilter::Off => logger.quiet(true),
LevelFilter::Error => logger.verbosity(0),
LevelFilter::Warn => logger.verbosity(1),
LevelFilter::Info => logger.verbosity(2),
LevelFilter::Debug => logger.verbosity(3),
LevelFilter::Trace => logger.verbosity(4),
};
use tracing_error::ErrorLayer;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter};
let fmt_layer = fmt::layer().with_target(false);
let filter_layer = EnvFilter::from_default_env().add_directive(log_level.into());
tracing_subscriber::registry()
.with(filter_layer)
.with(fmt_layer)
.with(ErrorLayer::default())
.init();
color_eyre::install().expect("Color Eyre Init");
let embassy_logger = EmbassyLogger {
log_level,
log_epoch,
logger,
sharing: Arc::new(AtomicBool::new(share_errors)),
share_dest: share_dest,
module_map: ModuleMap::new(module_map),
};
set_boxed_logger(Box::new(embassy_logger.clone())).unwrap();
set_max_level(log_level);
embassy_logger
}
pub fn set_sharing(&self, sharing: bool) {
@@ -130,45 +129,9 @@ impl EmbassyLogger {
}
}
impl log::Log for EmbassyLogger {
fn enabled(&self, metadata: &Metadata) -> bool {
let top = metadata.target().split("::").next().unwrap();
if vec!["embassy", "embassyd", "embassy-cli", "embassy-sdk"].contains(&top) {
metadata.level() <= self.log_level
} else {
&metadata.level() <= self.module_map.level_for(metadata.target())
}
}
fn log(&self, record: &Record) {
if self.enabled(record.metadata()) {
self.logger.log(record);
}
if self.sharing.load(Ordering::SeqCst) {
if record.level() <= log::Level::Warn {
let mut body = BTreeMap::new();
body.insert(
"log-epoch",
format!("{}", self.log_epoch.load(Ordering::SeqCst)),
);
body.insert("log-message", format!("{}", record.args()));
// we don't care about the result and need it to be fast
tokio::spawn(
Client::new()
.post(self.share_dest.clone())
.json(&body)
.send(),
);
}
}
}
fn flush(&self) {
self.logger.flush()
}
}
#[tokio::test]
pub async fn order_level() {
assert!(log::Level::Warn > log::Level::Error)
assert!(tracing::Level::WARN > tracing::Level::ERROR)
}
#[test]
@@ -179,7 +142,7 @@ pub fn module() {
proptest::proptest! {
#[test]
fn submodules_handled_by_parent(s0 in "[a-z][a-z0-9_]+", s1 in "[a-z][a-z0-9_]+", level in filter_strategy()) {
proptest::prop_assume!(level > LevelFilter::Off);
proptest::prop_assume!(level > LevelFilter::OFF);
let mut hm = BTreeMap::new();
hm.insert(format!("{}::*", s0.clone()), level);
let mod_map = ModuleMap::new(hm);
@@ -187,15 +150,15 @@ proptest::proptest! {
}
#[test]
fn submodules_ignored_by_parent(s0 in "[a-z][a-z0-9_]+", s1 in "[a-z][a-z0-9_]+", level in filter_strategy()) {
proptest::prop_assume!(level > LevelFilter::Off);
proptest::prop_assume!(level > LevelFilter::OFF);
let mut hm = BTreeMap::new();
hm.insert(s0.clone(), level);
let mod_map = ModuleMap::new(hm);
proptest::prop_assert_eq!(mod_map.level_for(&format!("{}::{}", s0, s1)), &LevelFilter::Off)
proptest::prop_assert_eq!(mod_map.level_for(&format!("{}::{}", s0, s1)), &LevelFilter::OFF)
}
#[test]
fn duplicate_insertion_ignored(s0 in "[a-z][a-z0-9_]+", s1 in "[a-z][a-z0-9_]+", level in filter_strategy()) {
proptest::prop_assume!(level > LevelFilter::Off);
proptest::prop_assume!(level > LevelFilter::OFF);
let mut hm = BTreeMap::new();
hm.insert(format!("{}::*", s0.clone()), level);
let sub = format!("{}::{}", s0, s1);
@@ -217,11 +180,11 @@ proptest::proptest! {
fn filter_strategy() -> impl proptest::strategy::Strategy<Value = LevelFilter> {
use proptest::strategy::Just;
proptest::prop_oneof![
Just(LevelFilter::Off),
Just(LevelFilter::Error),
Just(LevelFilter::Warn),
Just(LevelFilter::Info),
Just(LevelFilter::Debug),
Just(LevelFilter::Trace),
Just(LevelFilter::OFF),
Just(LevelFilter::ERROR),
Just(LevelFilter::WARN),
Just(LevelFilter::INFO),
Just(LevelFilter::DEBUG),
Just(LevelFilter::TRACE),
]
}

View File

@@ -96,7 +96,7 @@ where
if version.semver() != previous.semver() {
previous.migrate_from_unchecked(version, db).await?;
}
log::info!(
tracing::info!(
"{} -> {}",
previous.semver().as_str(),
self.semver().as_str()
@@ -111,7 +111,7 @@ where
db: &mut Db,
) -> Result<(), Error> {
let previous = Self::Previous::new();
log::info!(
tracing::info!(
"{} -> {}",
self.semver().as_str(),
previous.semver().as_str()