remove product key from setup flow (#1750)

* remove product key flow from setup

* feat: backend turned off encryption + new Id + no package id

* implement new encryption scheme in FE

* decode response string

* crypto not working

* update setup wizard closes #1762

* feat: Get the encryption key

* fix: Get to recovery

* remove old code

* fix build

* fix: Install works for now

* fix bug in config for adding new list items

* dismiss action modal on success

* clear button in config

* wip: Currently broken in avahi mdns

* include headers with req/res and refactor patchDB init and usage

* fix: Can now run in the main

* flatline on failed init

* update patch DB

* add last-wifi-region to data model even though not used by FE

* chore: Fix the start.

* wip: Fix wrong order for getting hostname before sql has been
created

* fix edge case where union keys displayed as new when not new

* fix: Can start

* last backup color, markdown links always new tab, fix bug with login

* refactor to remove WithRevision

* resolve circular dep issue

* update submodule

* fix patch-db

* update patchDB

* update patch again

* escape error

* decodeuricomponent

* increase proxy buffer size

* increase proxy buffer size

* fix nginx

Co-authored-by: BluJ <mogulslayer@gmail.com>
Co-authored-by: BluJ <dragondef@gmail.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
This commit is contained in:
Matt Hill
2022-09-07 09:25:01 -06:00
committed by GitHub
parent 76682ebef0
commit 50111e37da
175 changed files with 11436 additions and 2906 deletions

669
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -80,6 +80,7 @@ imbl = "2.0.0"
indexmap = { version = "1.9.1", features = ["serde"] }
isocountry = "0.3.2"
itertools = "0.10.3"
josekit = "0.8.1"
js_engine = { path = '../libs/js_engine', optional = true }
jsonpath_lib = "0.3.0"
lazy_static = "1.4.0"
@@ -141,6 +142,7 @@ tracing-subscriber = { version = "0.3.14", features = ["env-filter"] }
trust-dns-server = "0.21.2"
typed-builder = "0.10.0"
url = { version = "2.2.2", features = ["serde"] }
uuid = { version = "1.1.2", features = ["v4"] }
[profile.test]
opt-level = 3

View File

@@ -6,7 +6,8 @@ Wants=avahi-daemon.service nginx.service tor.service
[Service]
Type=oneshot
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug,patch_db=trace
Environment=RUST_LIB_BACKTRACE=full
ExecStart=/usr/local/bin/embassy-init
RemainAfterExit=true
StandardOutput=file:/var/log/embassy-init.out.log

File diff suppressed because it is too large Load Diff

7776
backend/src/assets/nouns.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -22,7 +22,6 @@ use crate::auth::check_password_against_db;
use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext;
use crate::db::model::BackupProgress;
use crate::db::util::WithRevision;
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard;
@@ -135,7 +134,7 @@ pub async fn backup_all(
)]
package_ids: Option<BTreeSet<PackageId>>,
#[arg] password: String,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let mut db = ctx.db.handle();
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?;
let fs = target_id
@@ -159,7 +158,7 @@ pub async fn backup_all(
if old_password.is_some() {
backup_guard.change_password(&password)?;
}
let revision = assure_backing_up(&mut db, &package_ids).await?;
assure_backing_up(&mut db, &package_ids).await?;
tokio::task::spawn(async move {
let backup_res = perform_backup(&ctx, &mut db, backup_guard, &package_ids).await;
let backup_progress = crate::db::DatabaseModel::new()
@@ -238,17 +237,14 @@ pub async fn backup_all(
.await
.expect("failed to change server status");
});
Ok(WithRevision {
response: (),
revision,
})
Ok(())
}
#[instrument(skip(db, packages))]
async fn assure_backing_up(
db: &mut PatchDbHandle,
packages: impl IntoIterator<Item = &PackageId>,
) -> Result<Option<Arc<Revision>>, Error> {
) -> Result<(), Error> {
let mut tx = db.begin().await?;
let mut backing_up = crate::db::DatabaseModel::new()
.server_info()
@@ -279,7 +275,8 @@ async fn assure_backing_up(
.collect(),
);
backing_up.save(&mut tx).await?;
Ok(tx.commit(None).await?)
tx.commit().await?;
Ok(())
}
#[instrument(skip(ctx, db, backup_guard))]

View File

@@ -20,7 +20,6 @@ use super::target::BackupTargetId;
use crate::backup::backup_bulk::OsBackup;
use crate::context::{RpcContext, SetupContext};
use crate::db::model::{PackageDataEntry, StaticFiles};
use crate::db::util::WithRevision;
use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard};
use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::guard::TmpMountGuard;
@@ -50,7 +49,7 @@ pub async fn restore_packages_rpc(
#[arg(parse(parse_comma_separated))] ids: Vec<PackageId>,
#[arg(rename = "target-id")] target_id: BackupTargetId,
#[arg] password: String,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let mut db = ctx.db.handle();
let fs = target_id
.load(&mut ctx.secret_store.acquire().await?)
@@ -114,10 +113,7 @@ pub async fn restore_packages_rpc(
}
});
Ok(WithRevision {
response: (),
revision,
})
Ok(())
}
async fn approximate_progress(
@@ -418,7 +414,7 @@ async fn assure_restoring(
guards.push((manifest, guard));
}
Ok((tx.commit(None).await?, guards))
Ok((tx.commit().await?, guards))
}
#[instrument(skip(ctx, guard))]

View File

@@ -7,11 +7,9 @@ use embassy::context::{DiagnosticContext, SetupContext};
use embassy::disk::fsck::RepairStrategy;
use embassy::disk::main::DEFAULT_PASSWORD;
use embassy::disk::REPAIR_DISK_PATH;
use embassy::hostname::get_product_key;
use embassy::init::STANDBY_MODE_PATH;
use embassy::middleware::cors::cors;
use embassy::middleware::diagnostic::diagnostic;
use embassy::middleware::encrypt::encrypt;
#[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController;
use embassy::shutdown::Shutdown;
@@ -50,12 +48,7 @@ async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
.invoke(embassy::ErrorKind::Nginx)
.await?;
let ctx = SetupContext::init(cfg_path).await?;
let keysource_ctx = ctx.clone();
let keysource = move || {
let ctx = keysource_ctx.clone();
async move { ctx.product_key().await }
};
let encrypt = encrypt(keysource);
let encrypt = embassy::middleware::encrypt::encrypt(ctx.clone());
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
rpc_server!({
@@ -103,7 +96,7 @@ async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
.await?;
}
tracing::info!("Loaded Disk");
embassy::init::init(&cfg, &get_product_key().await?).await?;
embassy::init::init(&cfg).await?;
}
Ok(())

View File

@@ -7,6 +7,7 @@ use embassy::core::rpc_continuations::RequestGuid;
use embassy::db::subscribe;
use embassy::middleware::auth::auth;
use embassy::middleware::cors::cors;
use embassy::middleware::db::db as db_middleware;
use embassy::middleware::diagnostic::diagnostic;
#[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController;
@@ -40,7 +41,6 @@ fn err_to_500(e: Error) -> Response<Body> {
#[instrument]
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, shutdown) = {
embassy::hostname::sync_hostname().await?;
let rpc_ctx = RpcContext::init(
cfg_path,
Arc::new(
@@ -82,11 +82,13 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
});
let mut db = rpc_ctx.db.handle();
embassy::hostname::sync_hostname(&mut db).await?;
let receipts = embassy::context::rpc::RpcSetNginxReceipts::new(&mut db).await?;
rpc_ctx.set_nginx_conf(&mut db, receipts).await?;
drop(db);
let auth = auth(rpc_ctx.clone());
let db_middleware = db_middleware(rpc_ctx.clone());
let ctx = rpc_ctx.clone();
let server = rpc_server!({
command: embassy::main_api,
@@ -95,6 +97,7 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
middleware: [
cors,
auth,
db_middleware,
]
})
.with_graceful_shutdown({
@@ -112,29 +115,6 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
.await
});
let rev_cache_ctx = rpc_ctx.clone();
let revision_cache_task = tokio::spawn(async move {
let mut sub = rev_cache_ctx.db.subscribe();
let mut shutdown = rev_cache_ctx.shutdown.subscribe();
loop {
let rev = match tokio::select! {
a = sub.recv() => a,
_ = shutdown.recv() => break,
} {
Ok(a) => a,
Err(_) => {
rev_cache_ctx.revision_cache.write().await.truncate(0);
continue;
}
}; // TODO: handle falling behind
let mut cache = rev_cache_ctx.revision_cache.write().await;
cache.push_back(rev);
if cache.len() > rev_cache_ctx.revision_cache_size {
cache.pop_front();
}
}
});
let ws_ctx = rpc_ctx.clone();
let ws_server = {
let builder = Server::bind(&ws_ctx.bind_ws);
@@ -268,12 +248,6 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
ErrorKind::Unknown
))
.map_ok(|_| tracing::debug!("Metrics daemon Shutdown")),
revision_cache_task
.map_err(|e| Error::new(
eyre!("{}", e).wrap_err("Revision Cache daemon panicked!"),
ErrorKind::Unknown
))
.map_ok(|_| tracing::debug!("Revision Cache daemon Shutdown")),
ws_server
.map_err(|e| Error::new(e, ErrorKind::Network))
.map_ok(|_| tracing::debug!("WebSocket Server Shutdown")),

View File

@@ -15,7 +15,6 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents};
use crate::db::util::WithRevision;
use crate::dependencies::{
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
BreakTransitiveReceipts, BreakageRes, Dependencies, DependencyConfig, DependencyError,
@@ -237,7 +236,8 @@ pub async fn get(
#[command(
subcommands(self(set_impl(async, context(RpcContext))), set_dry),
display(display_none)
display(display_none),
metadata(sync_db = true)
)]
#[instrument]
pub fn set(
@@ -247,9 +247,8 @@ pub fn set(
format: Option<IoFormat>,
#[arg(long = "timeout")] timeout: Option<crate::util::serde::Duration>,
#[arg(stdin, parse(parse_stdin_deserializable))] config: Option<Config>,
#[arg(rename = "expire-id", long = "expire-id")] expire_id: Option<String>,
) -> Result<(PackageId, Option<Config>, Option<Duration>, Option<String>), Error> {
Ok((id, config, timeout.map(|d| *d), expire_id))
) -> Result<(PackageId, Option<Config>, Option<Duration>), Error> {
Ok((id, config, timeout.map(|d| *d)))
}
/// So, the new locking finds all the possible locks and lifts them up into a bundle of locks.
@@ -407,12 +406,7 @@ impl ConfigReceipts {
#[instrument(skip(ctx))]
pub async fn set_dry(
#[context] ctx: RpcContext,
#[parent_data] (id, config, timeout, _): (
PackageId,
Option<Config>,
Option<Duration>,
Option<String>,
),
#[parent_data] (id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
) -> Result<BreakageRes, Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
@@ -439,8 +433,8 @@ pub async fn set_dry(
#[instrument(skip(ctx))]
pub async fn set_impl(
ctx: RpcContext,
(id, config, timeout, expire_id): (PackageId, Option<Config>, Option<Duration>, Option<String>),
) -> Result<WithRevision<()>, Error> {
(id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let mut breakages = BTreeMap::new();
@@ -457,10 +451,8 @@ pub async fn set_impl(
&locks,
)
.await?;
Ok(WithRevision {
response: (),
revision: tx.commit(expire_id).await?,
})
tx.commit().await?;
Ok(())
}
#[instrument(skip(ctx, db, receipts))]

View File

@@ -22,7 +22,6 @@ use tracing::instrument;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::hostname::{derive_hostname, derive_id, get_product_key};
use crate::init::{init_postgres, pgloader};
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
use crate::manager::ManagerMap;
@@ -71,23 +70,18 @@ impl RpcContextConfig {
.as_deref()
.unwrap_or_else(|| Path::new("/embassy-data"))
}
pub async fn db(&self, secret_store: &PgPool, product_key: &str) -> Result<PatchDb, Error> {
let sid = derive_id(product_key);
let hostname = derive_hostname(&sid);
pub async fn db(&self, secret_store: &PgPool) -> Result<PatchDb, Error> {
let db_path = self.datadir().join("main").join("embassy.db");
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await? {
if !db.exists(&<JsonPointer>::default()).await {
db.put(
&<JsonPointer>::default(),
&Database::init(
sid,
&hostname,
&os_key(&mut secret_store.acquire().await?).await?,
password_hash(&mut secret_store.acquire().await?).await?,
),
None,
)
.await?;
}
@@ -216,7 +210,7 @@ impl RpcContext {
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let secret_store = base.secret_store().await?;
tracing::info!("Opened Pg DB");
let db = base.db(&secret_store, &get_product_key().await?).await?;
let db = base.db(&secret_store).await?;
tracing::info!("Opened PatchDB");
let docker = Docker::connect_with_unix_defaults()?;
tracing::info!("Connected to Docker");
@@ -231,6 +225,7 @@ impl RpcContext {
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
secret_store.clone(),
None,
&mut db.handle(),
)
.await?;
tracing::info!("Initialized Net Controller");

View File

@@ -2,29 +2,28 @@ use std::net::{IpAddr, SocketAddr};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use patch_db::json_ptr::JsonPointer;
use patch_db::PatchDb;
use rand::distributions::Alphanumeric;
use rand::{thread_rng, Rng};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::Context;
use serde::{Deserialize, Serialize};
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use tokio::fs::File;
use tokio::process::Command;
use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock;
use tracing::instrument;
use url::Host;
use crate::db::model::Database;
use crate::hostname::{derive_hostname, derive_id, get_product_key};
use crate::init::{init_postgres, pgloader};
use crate::net::tor::os_key;
use crate::setup::{password_hash, RecoveryStatus};
use crate::util::io::from_yaml_async_reader;
use crate::util::{AsyncFileExt, Invoke};
use crate::util::AsyncFileExt;
use crate::{Error, ResultExt};
#[derive(Clone, Serialize, Deserialize)]
@@ -69,6 +68,9 @@ pub struct SetupContextSeed {
pub bind_rpc: SocketAddr,
pub shutdown: Sender<()>,
pub datadir: PathBuf,
/// Used to encrypt for hidding from snoopers for setups create password
/// Set via path
pub current_secret: RwLock<Option<String>>,
pub selected_v2_drive: RwLock<Option<PathBuf>>,
pub cached_product_key: RwLock<Option<Arc<String>>>,
pub recovery_status: RwLock<Option<Result<RecoveryStatus, RpcError>>>,
@@ -88,6 +90,7 @@ impl SetupContext {
bind_rpc: cfg.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
shutdown,
datadir,
current_secret: RwLock::new(None),
selected_v2_drive: RwLock::new(None),
cached_product_key: RwLock::new(None),
recovery_status: RwLock::new(None),
@@ -100,19 +103,13 @@ impl SetupContext {
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await? {
let pkey = self.product_key().await?;
let sid = derive_id(&*pkey);
let hostname = derive_hostname(&sid);
if !db.exists(&<JsonPointer>::default()).await {
db.put(
&<JsonPointer>::default(),
&Database::init(
sid,
&hostname,
&os_key(&mut secret_store.acquire().await?).await?,
password_hash(&mut secret_store.acquire().await?).await?,
),
None,
)
.await?;
}
@@ -134,22 +131,17 @@ impl SetupContext {
}
Ok(secret_store)
}
#[instrument(skip(self))]
pub async fn product_key(&self) -> Result<Arc<String>, Error> {
Ok(
if let Some(k) = {
let guard = self.cached_product_key.read().await;
let res = guard.clone();
drop(guard);
res
} {
k
} else {
let k = Arc::new(get_product_key().await?);
*self.cached_product_key.write().await = Some(k.clone());
k
},
)
/// So we assume that there will only be one client that will ask for a secret,
/// And during that time do we upsert to a new key
pub async fn update_secret(&self) -> Result<String, Error> {
let new_secret: String = thread_rng()
.sample_iter(&Alphanumeric)
.take(30)
.map(char::from)
.collect();
*self.current_secret.write().await = Some(new_secret.clone());
Ok(new_secret)
}
}

View File

@@ -6,7 +6,6 @@ use rpc_toolkit::command;
use tracing::instrument;
use crate::context::RpcContext;
use crate::db::util::WithRevision;
use crate::dependencies::{
break_all_dependents_transitive, heal_all_dependents_transitive, BreakageRes, DependencyError,
DependencyReceipt, TaggedDependencyError,
@@ -61,12 +60,9 @@ impl StartReceipts {
}
}
#[command(display(display_none))]
#[command(display(display_none), metadata(sync_db = true))]
#[instrument(skip(ctx))]
pub async fn start(
#[context] ctx: RpcContext,
#[arg] id: PackageId,
) -> Result<WithRevision<()>, Error> {
pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let receipts = StartReceipts::new(&mut tx, &id).await?;
@@ -77,7 +73,7 @@ pub async fn start(
.await?;
heal_all_dependents_transitive(&ctx, &mut tx, &id, &receipts.dependency_receipt).await?;
let revision = tx.commit(None).await?;
tx.commit().await?;
drop(receipts);
ctx.managers
@@ -87,10 +83,7 @@ pub async fn start(
.synchronize()
.await;
Ok(WithRevision {
revision,
response: (),
})
Ok(())
}
#[derive(Clone)]
pub struct StopReceipts {
@@ -150,7 +143,11 @@ async fn stop_common<Db: DbHandle>(
Ok(())
}
#[command(subcommands(self(stop_impl(async)), stop_dry), display(display_none))]
#[command(
subcommands(self(stop_impl(async)), stop_dry),
display(display_none),
metadata(sync_db = true)
)]
pub fn stop(#[arg] id: PackageId) -> Result<PackageId, Error> {
Ok(id)
}
@@ -173,23 +170,19 @@ pub async fn stop_dry(
}
#[instrument(skip(ctx))]
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevision<()>, Error> {
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
stop_common(&mut tx, &id, &mut BTreeMap::new()).await?;
Ok(WithRevision {
revision: tx.commit(None).await?,
response: (),
})
tx.commit().await?;
Ok(())
}
#[command(display(display_none))]
pub async fn restart(
#[context] ctx: RpcContext,
#[arg] id: PackageId,
) -> Result<WithRevision<()>, Error> {
#[command(display(display_none), metadata(sync_db = true))]
pub async fn restart(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
@@ -208,9 +201,7 @@ pub async fn restart(
}
*status = Some(MainStatus::Restarting);
status.save(&mut tx).await?;
tx.commit().await?;
Ok(WithRevision {
revision: tx.commit(None).await?,
response: (),
})
Ok(())
}

View File

@@ -1,6 +1,5 @@
pub mod model;
pub mod package;
pub mod util;
use std::future::Future;
use std::sync::Arc;
@@ -14,7 +13,7 @@ use rpc_toolkit::hyper::{Body, Error as HyperError, Request, Response};
use rpc_toolkit::yajrc::RpcError;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::sync::{broadcast, oneshot};
use tokio::sync::oneshot;
use tokio::task::JoinError;
use tokio_tungstenite::tungstenite::protocol::frame::coding::CloseCode;
use tokio_tungstenite::tungstenite::protocol::CloseFrame;
@@ -23,7 +22,6 @@ use tokio_tungstenite::WebSocketStream;
use tracing::instrument;
pub use self::model::DatabaseModel;
use self::util::WithRevision;
use crate::context::RpcContext;
use crate::middleware::auth::{HasValidSession, HashSessionToken};
use crate::util::serde::{display_serializable, IoFormat};
@@ -37,7 +35,7 @@ async fn ws_handler<
session: Option<(HasValidSession, HashSessionToken)>,
ws_fut: WSFut,
) -> Result<(), Error> {
let (dump, sub) = ctx.db.dump_and_sub().await;
let (dump, sub) = ctx.db.dump_and_sub().await?;
let mut stream = ws_fut
.await
.with_kind(crate::ErrorKind::Network)?
@@ -79,7 +77,7 @@ async fn subscribe_to_session_kill(
async fn deal_with_messages(
_has_valid_authentication: HasValidSession,
mut kill: oneshot::Receiver<()>,
mut sub: broadcast::Receiver<Arc<Revision>>,
mut sub: patch_db::Subscriber,
mut stream: WebSocketStream<Upgraded>,
) -> Result<(), Error> {
loop {
@@ -95,8 +93,8 @@ async fn deal_with_messages(
.with_kind(crate::ErrorKind::Network)?;
return Ok(())
}
new_rev = sub.recv().fuse() => {
let rev = new_rev.with_kind(crate::ErrorKind::Database)?;
new_rev = sub.recv_async().fuse() => {
let rev = new_rev.expect("UNREACHABLE: patch-db is dropped");
stream
.send(Message::Text(serde_json::to_string(&rev).with_kind(crate::ErrorKind::Serialization)?))
.await
@@ -184,24 +182,11 @@ pub async fn revisions(
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<RevisionsRes, RpcError> {
let cache = ctx.revision_cache.read().await;
if cache
.front()
.map(|rev| rev.id <= since + 1)
.unwrap_or(false)
{
Ok(RevisionsRes::Revisions(
cache
.iter()
.skip_while(|rev| rev.id < since + 1)
.cloned()
.collect(),
))
} else {
drop(cache);
Ok(RevisionsRes::Dump(ctx.db.dump().await))
}
) -> Result<RevisionsRes, Error> {
Ok(match ctx.db.sync(since).await? {
Ok(revs) => RevisionsRes::Revisions(revs),
Err(dump) => RevisionsRes::Dump(dump),
})
}
#[command(display(display_serializable))]
@@ -210,8 +195,8 @@ pub async fn dump(
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<Dump, RpcError> {
Ok(ctx.db.dump().await)
) -> Result<Dump, Error> {
Ok(ctx.db.dump().await?)
}
#[command(subcommands(ui))]
@@ -228,13 +213,11 @@ pub async fn ui(
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let ptr = "/ui"
.parse::<JsonPointer>()
.with_kind(crate::ErrorKind::Database)?
+ &pointer;
Ok(WithRevision {
response: (),
revision: ctx.db.put(&ptr, &value, None).await?,
})
ctx.db.put(&ptr, &value).await?;
Ok(())
}

View File

@@ -12,6 +12,7 @@ use serde_json::Value;
use torut::onion::TorSecretKeyV3;
use crate::config::spec::{PackagePointerSpec, SystemPointerSpec};
use crate::hostname::{generate_hostname, generate_id};
use crate::install::progress::InstallProgress;
use crate::net::interface::InterfaceId;
use crate::s9pk::manifest::{Manifest, ManifestModel, PackageId};
@@ -32,21 +33,20 @@ pub struct Database {
pub ui: Value,
}
impl Database {
pub fn init(
id: String,
hostname: &str,
tor_key: &TorSecretKeyV3,
password_hash: String,
) -> Self {
pub fn init(tor_key: &TorSecretKeyV3, password_hash: String) -> Self {
let id = generate_id();
let my_hostname = generate_hostname();
let lan_address = my_hostname.lan_address().parse().unwrap();
// TODO
Database {
server_info: ServerInfo {
id,
version: Current::new().semver().into(),
hostname: Some(my_hostname.0),
last_backup: None,
last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(),
lan_address: format!("https://{}.local", hostname).parse().unwrap(),
lan_address,
tor_address: format!("http://{}", tor_key.public().get_onion_address())
.parse()
.unwrap(),
@@ -83,6 +83,7 @@ impl DatabaseModel {
#[serde(rename_all = "kebab-case")]
pub struct ServerInfo {
pub id: String,
pub hostname: Option<String>,
pub version: Version,
pub last_backup: Option<DateTime<Utc>>,
/// Used in the wifi to determine the region to set the system to

View File

@@ -1,10 +0,0 @@
use std::sync::Arc;
use patch_db::Revision;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct WithRevision<T> {
pub response: T,
pub revision: Option<Arc<Revision>>,
}

View File

@@ -1,34 +1,53 @@
use digest::Digest;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use patch_db::DbHandle;
use rand::{thread_rng, Rng};
use tokio::process::Command;
use tracing::instrument;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
use crate::{Error, ErrorKind};
#[derive(Clone, serde::Deserialize, serde::Serialize, Debug)]
pub struct Hostname(pub String);
pub const PRODUCT_KEY_PATH: &'static str = "/embassy-os/product_key.txt";
#[instrument]
pub async fn get_hostname() -> Result<String, Error> {
Ok(derive_hostname(&get_id().await?))
lazy_static::lazy_static! {
static ref ADJECTIVES: Vec<String> = include_str!("./assets/adjectives.txt").lines().map(|x| x.to_string()).collect();
static ref NOUNS: Vec<String> = include_str!("./assets/nouns.txt").lines().map(|x| x.to_string()).collect();
}
impl AsRef<str> for Hostname {
fn as_ref(&self) -> &str {
&self.0
}
}
pub fn derive_hostname(id: &str) -> String {
format!("embassy-{}", id)
impl Hostname {
pub fn lan_address(&self) -> String {
format!("https://{}.local", self.0)
}
}
pub fn generate_hostname() -> Hostname {
let mut rng = thread_rng();
let adjective = &ADJECTIVES[rng.gen_range(0..ADJECTIVES.len())];
let noun = &NOUNS[rng.gen_range(0..NOUNS.len())];
Hostname(format!("{adjective}-{noun}"))
}
pub fn generate_id() -> String {
let id = uuid::Uuid::new_v4();
id.to_string()
}
#[instrument]
pub async fn get_current_hostname() -> Result<String, Error> {
pub async fn get_current_hostname() -> Result<Hostname, Error> {
let out = Command::new("hostname")
.invoke(ErrorKind::ParseSysInfo)
.await?;
let out_string = String::from_utf8(out)?;
Ok(out_string.trim().to_owned())
Ok(Hostname(out_string.trim().to_owned()))
}
#[instrument]
pub async fn set_hostname(hostname: &str) -> Result<(), Error> {
pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> {
let hostname: &String = &hostname.0;
let _out = Command::new("hostnamectl")
.arg("set-hostname")
.arg(hostname)
@@ -37,38 +56,36 @@ pub async fn set_hostname(hostname: &str) -> Result<(), Error> {
Ok(())
}
#[instrument]
pub async fn get_product_key() -> Result<String, Error> {
let out = tokio::fs::read_to_string(PRODUCT_KEY_PATH)
#[instrument(skip(handle))]
pub async fn get_id<Db: DbHandle>(handle: &mut Db) -> Result<String, Error> {
let id = crate::db::DatabaseModel::new()
.server_info()
.id()
.get(handle, false)
.await?;
Ok(id.to_string())
}
pub async fn get_hostname<Db: DbHandle>(handle: &mut Db) -> Result<Hostname, Error> {
if let Ok(hostname) = crate::db::DatabaseModel::new()
.server_info()
.hostname()
.get(handle, false)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, PRODUCT_KEY_PATH))?;
Ok(out.trim().to_owned())
{
if let Some(hostname) = hostname.to_owned() {
return Ok(Hostname(hostname));
}
}
let id = get_id(handle).await?;
if id.len() != 8 {
return Ok(generate_hostname());
}
return Ok(Hostname(format!("embassy-{}", id)));
}
#[instrument]
pub async fn set_product_key(key: &str) -> Result<(), Error> {
let mut pkey_file = File::create(PRODUCT_KEY_PATH).await?;
pkey_file.write_all(key.as_bytes()).await?;
Ok(())
}
pub fn derive_id(key: &str) -> String {
let mut hasher = sha2::Sha256::new();
hasher.update(key.as_bytes());
let res = hasher.finalize();
hex::encode(&res[0..4])
}
#[instrument]
pub async fn get_id() -> Result<String, Error> {
let key = get_product_key().await?;
Ok(derive_id(&key))
}
// cat /embassy-os/product_key.txt | shasum -a 256 | head -c 8 | awk '{print "embassy-"$1}' | xargs hostnamectl set-hostname && systemctl restart avahi-daemon
#[instrument]
pub async fn sync_hostname() -> Result<(), Error> {
set_hostname(&format!("embassy-{}", get_id().await?)).await?;
#[instrument(skip(handle))]
pub async fn sync_hostname<Db: DbHandle>(handle: &mut Db) -> Result<(), Error> {
set_hostname(&get_hostname(handle).await?).await?;
Command::new("systemctl")
.arg("restart")
.arg("avahi-daemon")

View File

@@ -8,7 +8,6 @@ use tokio::process::Command;
use crate::context::rpc::RpcContextConfig;
use crate::db::model::ServerStatus;
use crate::disk::mount::util::unmount;
use crate::install::PKG_DOCKER_DIR;
use crate::util::Invoke;
use crate::Error;
@@ -152,7 +151,11 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
Ok(())
}
pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error> {
pub struct InitResult {
pub db: patch_db::PatchDb,
}
pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok();
let secret_store = cfg.secret_store().await?;
let log_dir = cfg.datadir().join("main/logs");
@@ -213,9 +216,14 @@ pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error
crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?;
tracing::info!("Synced SSH Keys");
let db = cfg.db(&secret_store, product_key).await?;
let db = cfg.db(&secret_store).await?;
let mut handle = db.handle();
crate::db::DatabaseModel::new()
.server_info()
.lock(&mut handle, LockType::Write)
.await?;
let receipts = InitReceipts::new(&mut handle).await?;
crate::net::wifi::synchronize_wpa_supplicant_conf(
@@ -258,5 +266,5 @@ pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error
tracing::info!("System initialized.");
Ok(())
Ok(InitResult { db })
}

View File

@@ -375,7 +375,7 @@ where
if tokio::fs::metadata(&volumes).await.is_ok() {
tokio::fs::remove_dir_all(&volumes).await?;
}
tx.commit(None).await?;
tx.commit().await?;
remove_tor_keys(secrets, &entry.manifest.id).await?;
Ok(())
}

View File

@@ -32,7 +32,6 @@ use crate::db::model::{
CurrentDependencies, CurrentDependencyInfo, CurrentDependents, InstalledPackageDataEntry,
PackageDataEntry, RecoveredPackageInfo, StaticDependencyInfo, StaticFiles,
};
use crate::db::util::WithRevision;
use crate::dependencies::{
add_dependent_to_current_dependents_lists, break_all_dependents_transitive,
reconfigure_dependents_with_live_pointers, BreakTransitiveReceipts, BreakageRes,
@@ -115,7 +114,8 @@ impl std::fmt::Display for MinMax {
#[command(
custom_cli(cli_install(async, context(CliContext))),
display(display_none)
display(display_none),
metadata(sync_db = true)
)]
#[instrument(skip(ctx))]
pub async fn install(
@@ -127,7 +127,7 @@ pub async fn install(
String,
>,
#[arg(long = "version-priority", rename = "version-priority")] version_priority: Option<MinMax>,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let version_str = match &version_spec {
None => "*",
Some(v) => &*v,
@@ -287,7 +287,7 @@ pub async fn install(
}
}
pde.save(&mut tx).await?;
let res = tx.commit(None).await?;
tx.commit().await?;
drop(db_handle);
tokio::spawn(async move {
@@ -323,10 +323,7 @@ pub async fn install(
}
});
Ok(WithRevision {
revision: res,
response: (),
})
Ok(())
}
#[command(rpc_only, display(display_none))]
@@ -427,7 +424,7 @@ pub async fn sideload(
}
}
pde.save(&mut tx).await?;
tx.commit(None).await?;
tx.commit().await?;
if let Err(e) = download_install_s9pk(
&new_ctx,
@@ -559,7 +556,7 @@ async fn cli_install(
ctx,
"package.install",
params,
PhantomData::<WithRevision<()>>,
PhantomData::<()>,
)
.await?
.result?;
@@ -570,7 +567,8 @@ async fn cli_install(
#[command(
subcommands(self(uninstall_impl(async)), uninstall_dry),
display(display_none)
display(display_none),
metadata(sync_db = true)
)]
pub async fn uninstall(#[arg] id: PackageId) -> Result<PackageId, Error> {
Ok(id)
@@ -601,7 +599,7 @@ pub async fn uninstall_dry(
}
#[instrument(skip(ctx))]
pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevision<()>, Error> {
pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<(), Error> {
let mut handle = ctx.db.handle();
let mut tx = handle.begin().await?;
@@ -629,7 +627,7 @@ pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevisi
removing: installed,
});
pde.save(&mut tx).await?;
let res = tx.commit(None).await?;
tx.commit().await?;
drop(handle);
tokio::spawn(async move {
@@ -666,17 +664,18 @@ pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevisi
}
});
Ok(WithRevision {
revision: res,
response: (),
})
Ok(())
}
#[command(rename = "delete-recovered", display(display_none))]
#[command(
rename = "delete-recovered",
display(display_none),
metadata(sync_db = true)
)]
pub async fn delete_recovered(
#[context] ctx: RpcContext,
#[arg] id: PackageId,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let mut handle = ctx.db.handle();
let mut tx = handle.begin().await?;
let mut sql_tx = ctx.secret_store.begin().await?;
@@ -699,13 +698,10 @@ pub async fn delete_recovered(
}
cleanup::remove_tor_keys(&mut sql_tx, &id).await?;
let res = tx.commit(None).await?;
tx.commit().await?;
sql_tx.commit().await?;
Ok(WithRevision {
revision: res,
response: (),
})
Ok(())
}
pub struct DownloadInstallReceipts {
@@ -858,7 +854,7 @@ pub async fn download_install_s9pk(
tracing::error!("Failed to clean up {}@{}: {}", pkg_id, version, e);
tracing::debug!("{:?}", e);
} else {
tx.commit(None).await?;
tx.commit().await?;
}
Err(e)
} else {
@@ -1147,7 +1143,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
if let Some(mut hdl) = rdr.scripts().await? {
tokio::io::copy(
&mut hdl,
&mut File::create(dbg!(script_dir.join("embassy.js"))).await?,
&mut File::create(script_dir.join("embassy.js")).await?,
)
.await?;
}
@@ -1505,7 +1501,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
}
sql_tx.commit().await?;
tx.commit(None).await?;
tx.commit().await?;
tracing::info!("Install {}@{}: Complete", pkg_id, version);

View File

@@ -120,7 +120,7 @@ pub struct LogResponse {
end_cursor: Option<String>,
}
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
#[serde(rename_all = "kebab-case", tag = "type")]
#[serde(rename_all = "kebab-case")]
pub struct LogFollowResponse {
start_cursor: Option<String>,
guid: RequestGuid,

View File

@@ -128,20 +128,17 @@ pub async fn check<Db: DbHandle>(
let status = receipts.status.get(&mut checkpoint).await?;
match status {
MainStatus::Running { health, started } => {
receipts
.status
.set(
&mut checkpoint,
MainStatus::Running {
health: health_results.clone(),
started,
},
)
.await?;
}
_ => (),
if let MainStatus::Running { health: _, started } = status {
receipts
.status
.set(
&mut checkpoint,
MainStatus::Running {
health: health_results.clone(),
started,
},
)
.await?;
}
let current_dependents = receipts.current_dependents.get(&mut checkpoint).await?;

View File

@@ -0,0 +1,84 @@
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::FutureExt;
use http::HeaderValue;
use rpc_toolkit::hyper::http::Error as HttpError;
use rpc_toolkit::hyper::{Body, Request, Response};
use rpc_toolkit::rpc_server_helpers::{
noop4, DynMiddleware, DynMiddlewareStage2, DynMiddlewareStage3,
};
use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::Metadata;
use crate::context::RpcContext;
use crate::{Error, ResultExt};
pub fn db<M: Metadata>(ctx: RpcContext) -> DynMiddleware<M> {
Box::new(
move |_: &mut Request<Body>,
metadata: M|
-> BoxFuture<Result<Result<DynMiddlewareStage2, Response<Body>>, HttpError>> {
let ctx = ctx.clone();
async move {
let m2: DynMiddlewareStage2 = Box::new(move |req, rpc_req| {
async move {
let seq = req.headers.remove("x-patch-sequence");
let sync_db = metadata
.get(rpc_req.method.as_str(), "sync_db")
.unwrap_or(false);
let m3: DynMiddlewareStage3 = Box::new(move |res, _| {
async move {
if sync_db && seq.is_some() {
match async {
let seq = seq
.ok_or_else(|| {
Error::new(
eyre!("Missing X-Patch-Sequence"),
crate::ErrorKind::InvalidRequest,
)
})?
.to_str()
.with_kind(crate::ErrorKind::InvalidRequest)?
.parse()?;
let res = ctx.db.sync(seq).await?;
let json = match res {
Ok(revs) => serde_json::to_vec(&revs),
Err(dump) => serde_json::to_vec(&[dump]),
}
.with_kind(crate::ErrorKind::Serialization)?;
Ok::<_, Error>(
url::form_urlencoded::byte_serialize(&json)
.collect::<String>(),
)
}
.await
{
Ok(a) => res
.headers
.append("X-Patch-Updates", HeaderValue::from_str(&a)?),
Err(e) => res.headers.append(
"X-Patch-Error",
HeaderValue::from_str(
&url::form_urlencoded::byte_serialize(
e.to_string().as_bytes(),
)
.collect::<String>(),
)?,
),
};
}
Ok(Ok(noop4()))
}
.boxed()
});
Ok(Ok(m3))
}
.boxed()
});
Ok(Ok(m2))
}
.boxed()
},
)
}

View File

@@ -1,4 +1,3 @@
use std::future::Future;
use std::sync::Arc;
use aes::cipher::{CipherKey, NewCipher, Nonce, StreamCipher};
@@ -17,6 +16,7 @@ use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::Metadata;
use sha2::Sha256;
use crate::context::SetupContext;
use crate::util::Apply;
use crate::Error;
@@ -35,7 +35,7 @@ pub fn encrypt_slice(input: impl AsRef<[u8]>, password: impl AsRef<[u8]>) -> Vec
let prefix: [u8; 32] = rand::random();
let aeskey = pbkdf2(password.as_ref(), &prefix[16..]);
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
let mut aes = Aes256Ctr::new(&aeskey, ctr);
let mut res = Vec::with_capacity(32 + input.as_ref().len());
res.extend_from_slice(&prefix[..]);
res.extend_from_slice(input.as_ref());
@@ -50,7 +50,7 @@ pub fn decrypt_slice(input: impl AsRef<[u8]>, password: impl AsRef<[u8]>) -> Vec
let (prefix, rest) = input.as_ref().split_at(32);
let aeskey = pbkdf2(password.as_ref(), &prefix[16..]);
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
let mut aes = Aes256Ctr::new(&aeskey, ctr);
let mut res = rest.to_vec();
aes.apply_keystream(&mut res);
res
@@ -92,20 +92,20 @@ impl Stream for DecryptStream {
aes.apply_keystream(&mut res);
res.into()
} else {
if this.ctr.len() < 16 && buf.len() > 0 {
if this.ctr.len() < 16 && !buf.is_empty() {
let to_read = std::cmp::min(16 - this.ctr.len(), buf.len());
this.ctr.extend_from_slice(&buf[0..to_read]);
buf = &buf[to_read..];
}
if this.salt.len() < 16 && buf.len() > 0 {
if this.salt.len() < 16 && !buf.is_empty() {
let to_read = std::cmp::min(16 - this.salt.len(), buf.len());
this.salt.extend_from_slice(&buf[0..to_read]);
buf = &buf[to_read..];
}
if this.ctr.len() == 16 && this.salt.len() == 16 {
let aeskey = pbkdf2(this.key.as_bytes(), &this.salt);
let ctr = Nonce::<Aes256Ctr>::from_slice(&this.ctr);
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
let ctr = Nonce::<Aes256Ctr>::from_slice(this.ctr);
let mut aes = Aes256Ctr::new(&aeskey, ctr);
let mut res = buf.to_vec();
aes.apply_keystream(&mut res);
*this.aes = Some(aes);
@@ -132,7 +132,7 @@ impl EncryptStream {
let prefix: [u8; 32] = rand::random();
let aeskey = pbkdf2(key.as_bytes(), &prefix[16..]);
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
let aes = Aes256Ctr::new(&aeskey, &ctr);
let aes = Aes256Ctr::new(&aeskey, ctr);
EncryptStream {
body,
aes,
@@ -169,42 +169,42 @@ fn encrypted(headers: &HeaderMap) -> bool {
.and_then(|h| {
h.to_str()
.ok()?
.split(",")
.split(',')
.any(|s| s == "aesctr256")
.apply(Some)
})
.unwrap_or_default()
}
pub fn encrypt<
F: Fn() -> Fut + Send + Sync + Clone + 'static,
Fut: Future<Output = Result<Arc<String>, Error>> + Send + Sync + 'static,
M: Metadata,
>(
keysource: F,
) -> DynMiddleware<M> {
pub fn encrypt<M: Metadata>(ctx: SetupContext) -> DynMiddleware<M> {
Box::new(
move |req: &mut Request<Body>,
metadata: M|
-> BoxFuture<Result<Result<DynMiddlewareStage2, Response<Body>>, HttpError>> {
let keysource = keysource.clone();
let keysource = ctx.clone();
async move {
let encrypted = encrypted(req.headers());
let current_secret: Option<String> = keysource.current_secret.read().await.clone();
let key = if encrypted {
let key = match keysource().await {
Ok(s) => s,
Err(e) => {
let key = match current_secret {
Some(s) => s,
None => {
let (res_parts, _) = Response::new(()).into_parts();
return Ok(Err(to_response(
req.headers(),
res_parts,
Err(e.into()),
Err(Error::new(
eyre!("No Secret has been set"),
crate::ErrorKind::RateLimited,
)
.into()),
|_| StatusCode::OK,
)?));
}
};
let body = std::mem::take(req.body_mut());
*req.body_mut() = Body::wrap_stream(DecryptStream::new(key.clone(), body));
*req.body_mut() =
Body::wrap_stream(DecryptStream::new(Arc::new(key.clone()), body));
Some(key)
} else {
None
@@ -213,7 +213,7 @@ pub fn encrypt<
async move {
if !encrypted
&& metadata
.get(&rpc_req.method.as_str(), "authenticated")
.get(rpc_req.method.as_str(), "authenticated")
.unwrap_or(true)
{
let (res_parts, _) = Response::new(()).into_parts();

View File

@@ -1,4 +1,5 @@
pub mod auth;
pub mod cors;
pub mod db;
pub mod diagnostic;
pub mod encrypt;

View File

@@ -3,14 +3,14 @@ use std::net::Ipv4Addr;
use avahi_sys::{
self, avahi_client_errno, avahi_entry_group_add_service, avahi_entry_group_commit,
avahi_entry_group_free, avahi_entry_group_reset, avahi_free, avahi_strerror, AvahiClient,
AvahiEntryGroup,
avahi_entry_group_free, avahi_free, avahi_strerror, AvahiClient, AvahiEntryGroup,
};
use color_eyre::eyre::eyre;
use libc::c_void;
use tokio::process::Command;
use tokio::sync::Mutex;
use torut::onion::TorSecretKeyV3;
use tracing::instrument;
use super::interface::InterfaceId;
use crate::s9pk::manifest::PackageId;
@@ -59,17 +59,64 @@ impl MdnsController {
}
pub struct MdnsControllerInner {
hostname: Vec<u8>,
hostname_raw: *const libc::c_char,
entry_group: *mut AvahiEntryGroup,
entry_group: Option<MdnsEntryGroup>,
services: BTreeMap<(PackageId, InterfaceId), TorSecretKeyV3>,
_client_error: std::pin::Pin<Box<i32>>,
}
unsafe impl Send for MdnsControllerInner {}
unsafe impl Sync for MdnsControllerInner {}
impl MdnsControllerInner {
fn load_services(&mut self) {
fn init() -> Self {
MdnsControllerInner {
entry_group: Some(MdnsEntryGroup::init(&BTreeMap::new())),
services: BTreeMap::new(),
}
}
fn sync(&mut self) {
drop(self.entry_group.take());
self.entry_group = Some(MdnsEntryGroup::init(&self.services));
}
fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>(
&mut self,
pkg_id: &PackageId,
interfaces: I,
) {
self.services.extend(
interfaces
.into_iter()
.map(|(interface_id, key)| ((pkg_id.clone(), interface_id), key)),
);
self.sync();
}
fn remove<I: IntoIterator<Item = InterfaceId>>(&mut self, pkg_id: &PackageId, interfaces: I) {
for interface_id in interfaces {
self.services.remove(&(pkg_id.clone(), interface_id));
}
self.sync();
}
fn free(&self) {}
}
fn log_str_error(action: &str, e: i32) {
unsafe {
let e_str = avahi_strerror(e);
tracing::error!(
"Could not {}: {:?}",
action,
std::ffi::CStr::from_ptr(e_str)
);
}
}
struct MdnsEntryGroup {
hostname: Vec<u8>,
hostname_raw: *const libc::c_char,
entry_group: *mut AvahiEntryGroup,
_client_error: std::pin::Pin<Box<i32>>,
}
impl MdnsEntryGroup {
#[instrument(skip(self))]
fn load_services(&mut self, services: &BTreeMap<(PackageId, InterfaceId), TorSecretKeyV3>) {
unsafe {
tracing::debug!("Loading services for mDNS");
let mut res;
@@ -101,7 +148,7 @@ impl MdnsControllerInner {
"Published {:?}",
std::ffi::CStr::from_ptr(self.hostname_raw)
);
for key in self.services.values() {
for key in services.values() {
let lan_address = key
.public()
.get_onion_address()
@@ -131,9 +178,10 @@ impl MdnsControllerInner {
}
}
}
fn init() -> Self {
fn init(services: &BTreeMap<(PackageId, InterfaceId), TorSecretKeyV3>) -> Self {
unsafe {
tracing::debug!("Initializing mDNS controller");
let simple_poll = avahi_sys::avahi_simple_poll_new();
let poll = avahi_sys::avahi_simple_poll_get(simple_poll);
let mut box_err = Box::pin(0 as i32);
@@ -168,15 +216,13 @@ impl MdnsControllerInner {
// assume fixed length prefix on hostname due to local address
hostname_buf[0] = (buflen - 8) as u8; // set the prefix length to len - 8 (leading byte, .local, nul) for the main address
hostname_buf[buflen - 7] = 5; // set the prefix length to 5 for "local"
let mut res = MdnsControllerInner {
let mut res = MdnsEntryGroup {
hostname: hostname_buf,
hostname_raw,
entry_group: group,
services: BTreeMap::new(),
_client_error: box_err,
};
res.load_services();
res.load_services(services);
let commit_err = avahi_entry_group_commit(res.entry_group);
if commit_err < avahi_sys::AVAHI_OK {
log_str_error("reset Avahi entry group", commit_err);
@@ -185,62 +231,17 @@ impl MdnsControllerInner {
res
}
}
fn sync(&mut self) {
unsafe {
let mut res;
res = avahi_entry_group_reset(self.entry_group);
if res < avahi_sys::AVAHI_OK {
log_str_error("reset Avahi entry group", res);
panic!("Failed to load Avahi services: reset");
}
self.load_services();
res = avahi_entry_group_commit(self.entry_group);
if res < avahi_sys::AVAHI_OK {
log_str_error("commit Avahi entry group", res);
panic!("Failed to load Avahi services: commit");
}
}
}
fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>(
&mut self,
pkg_id: &PackageId,
interfaces: I,
) {
self.services.extend(
interfaces
.into_iter()
.map(|(interface_id, key)| ((pkg_id.clone(), interface_id), key)),
);
self.sync();
}
fn remove<I: IntoIterator<Item = InterfaceId>>(&mut self, pkg_id: &PackageId, interfaces: I) {
for interface_id in interfaces {
self.services.remove(&(pkg_id.clone(), interface_id));
}
self.sync();
}
}
impl Drop for MdnsControllerInner {
impl Drop for MdnsEntryGroup {
fn drop(&mut self) {
unsafe {
avahi_free(self.hostname_raw as *mut c_void);
avahi_entry_group_free(self.entry_group);
// avahi_client_free(self.avahi_client);
}
}
}
fn log_str_error(action: &str, e: i32) {
unsafe {
let e_str = avahi_strerror(e);
tracing::error!(
"Could not {}: {:?}",
action,
std::ffi::CStr::from_ptr(e_str)
);
avahi_free(e_str as *mut c_void);
}
}
unsafe extern "C" fn entry_group_callback(
_group: *mut avahi_sys::AvahiEntryGroup,
state: avahi_sys::AvahiEntryGroupState,

View File

@@ -3,6 +3,7 @@ use std::path::PathBuf;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use patch_db::DbHandle;
use rpc_toolkit::command;
use sqlx::PgPool;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
@@ -14,6 +15,7 @@ use self::mdns::MdnsController;
use self::nginx::NginxController;
use self::ssl::SslManager;
use self::tor::TorController;
use crate::hostname::get_hostname;
use crate::net::dns::DnsController;
use crate::net::interface::TorConfig;
use crate::net::nginx::InterfaceMetadata;
@@ -50,24 +52,26 @@ pub struct NetController {
pub dns: DnsController,
}
impl NetController {
#[instrument(skip(db))]
pub async fn init(
#[instrument(skip(db, handle))]
pub async fn init<Db: DbHandle>(
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
tor_control: SocketAddr,
dns_bind: &[SocketAddr],
db: PgPool,
import_root_ca: Option<(PKey<Private>, X509)>,
handle: &mut Db,
) -> Result<Self, Error> {
let ssl = match import_root_ca {
None => SslManager::init(db).await,
None => SslManager::init(db, handle).await,
Some(a) => SslManager::import_root_ca(db, a.0, a.1).await,
}?;
let hostname = get_hostname(handle).await?;
Ok(Self {
tor: TorController::init(embassyd_addr, embassyd_tor_key, tor_control).await?,
#[cfg(feature = "avahi")]
mdns: MdnsController::init(),
nginx: NginxController::init(PathBuf::from("/etc/nginx"), &ssl).await?,
nginx: NginxController::init(PathBuf::from("/etc/nginx"), &ssl, &hostname).await?,
ssl,
dns: DnsController::init(dns_bind).await?,
})

View File

@@ -9,7 +9,7 @@ use tracing::instrument;
use super::interface::{InterfaceId, LanPortConfig};
use super::ssl::SslManager;
use crate::hostname::get_hostname;
use crate::hostname::Hostname;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::Port;
use crate::util::Invoke;
@@ -20,9 +20,15 @@ pub struct NginxController {
inner: Mutex<NginxControllerInner>,
}
impl NginxController {
pub async fn init(nginx_root: PathBuf, ssl_manager: &SslManager) -> Result<Self, Error> {
pub async fn init(
nginx_root: PathBuf,
ssl_manager: &SslManager,
host_name: &Hostname,
) -> Result<Self, Error> {
Ok(NginxController {
inner: Mutex::new(NginxControllerInner::init(&nginx_root, ssl_manager).await?),
inner: Mutex::new(
NginxControllerInner::init(&nginx_root, ssl_manager, host_name).await?,
),
nginx_root,
})
}
@@ -53,13 +59,17 @@ pub struct NginxControllerInner {
}
impl NginxControllerInner {
#[instrument]
async fn init(nginx_root: &Path, ssl_manager: &SslManager) -> Result<Self, Error> {
async fn init(
nginx_root: &Path,
ssl_manager: &SslManager,
host_name: &Hostname,
) -> Result<Self, Error> {
let inner = NginxControllerInner {
interfaces: BTreeMap::new(),
};
// write main ssl key/cert to fs location
let (key, cert) = ssl_manager
.certificate_for(&get_hostname().await?, &"embassy".parse().unwrap())
.certificate_for(&host_name.lan_address(), &"embassy".parse().unwrap())
.await?;
let ssl_path_key = nginx_root.join(format!("ssl/embassy_main.key.pem"));
let ssl_path_cert = nginx_root.join(format!("ssl/embassy_main.cert.pem"));

View File

@@ -11,6 +11,7 @@ use openssl::nid::Nid;
use openssl::pkey::{PKey, Private};
use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::*;
use patch_db::DbHandle;
use sqlx::PgPool;
use tokio::process::Command;
use tokio::sync::Mutex;
@@ -161,13 +162,14 @@ lazy_static::lazy_static! {
}
impl SslManager {
#[instrument(skip(db))]
pub async fn init(db: PgPool) -> Result<Self, Error> {
#[instrument(skip(db, handle))]
pub async fn init<Db: DbHandle>(db: PgPool, handle: &mut Db) -> Result<Self, Error> {
let store = SslStore::new(db)?;
let id = crate::hostname::get_id(handle).await?;
let (root_key, root_cert) = match store.load_root_certificate().await? {
None => {
let root_key = generate_key()?;
let server_id = crate::hostname::get_id().await?;
let server_id = id;
let root_cert = make_root_cert(&root_key, &server_id)?;
store.save_root_certificate(&root_key, &root_cert).await?;
Ok::<_, Error>((root_key, root_cert))
@@ -511,56 +513,56 @@ fn make_leaf_cert(
Ok(cert)
}
#[tokio::test]
async fn ca_details_persist() -> Result<(), Error> {
let pool = sqlx::Pool::<sqlx::Postgres>::connect("postgres::memory:").await?;
sqlx::migrate!()
.run(&pool)
.await
.with_kind(crate::ErrorKind::Database)?;
let mgr = SslManager::init(pool.clone()).await?;
let root_cert0 = mgr.root_cert;
let int_key0 = mgr.int_key;
let int_cert0 = mgr.int_cert;
let mgr = SslManager::init(pool).await?;
let root_cert1 = mgr.root_cert;
let int_key1 = mgr.int_key;
let int_cert1 = mgr.int_cert;
assert_eq!(root_cert0.to_pem()?, root_cert1.to_pem()?);
assert_eq!(
int_key0.private_key_to_pem_pkcs8()?,
int_key1.private_key_to_pem_pkcs8()?
);
assert_eq!(int_cert0.to_pem()?, int_cert1.to_pem()?);
Ok(())
}
#[tokio::test]
async fn certificate_details_persist() -> Result<(), Error> {
let pool = sqlx::Pool::<sqlx::Postgres>::connect("postgres::memory:").await?;
sqlx::migrate!()
.run(&pool)
.await
.with_kind(crate::ErrorKind::Database)?;
let mgr = SslManager::init(pool.clone()).await?;
let package_id = "bitcoind".parse().unwrap();
let (key0, cert_chain0) = mgr.certificate_for("start9", &package_id).await?;
let (key1, cert_chain1) = mgr.certificate_for("start9", &package_id).await?;
assert_eq!(
key0.private_key_to_pem_pkcs8()?,
key1.private_key_to_pem_pkcs8()?
);
assert_eq!(
cert_chain0
.iter()
.map(|cert| cert.to_pem().unwrap())
.collect::<Vec<Vec<u8>>>(),
cert_chain1
.iter()
.map(|cert| cert.to_pem().unwrap())
.collect::<Vec<Vec<u8>>>()
);
Ok(())
}
// #[tokio::test]
// async fn ca_details_persist() -> Result<(), Error> {
// let pool = sqlx::Pool::<sqlx::Postgres>::connect("postgres::memory:").await?;
// sqlx::migrate!()
// .run(&pool)
// .await
// .with_kind(crate::ErrorKind::Database)?;
// let mgr = SslManager::init(pool.clone()).await?;
// let root_cert0 = mgr.root_cert;
// let int_key0 = mgr.int_key;
// let int_cert0 = mgr.int_cert;
// let mgr = SslManager::init(pool).await?;
// let root_cert1 = mgr.root_cert;
// let int_key1 = mgr.int_key;
// let int_cert1 = mgr.int_cert;
//
// assert_eq!(root_cert0.to_pem()?, root_cert1.to_pem()?);
// assert_eq!(
// int_key0.private_key_to_pem_pkcs8()?,
// int_key1.private_key_to_pem_pkcs8()?
// );
// assert_eq!(int_cert0.to_pem()?, int_cert1.to_pem()?);
// Ok(())
// }
//
// #[tokio::test]
// async fn certificate_details_persist() -> Result<(), Error> {
// let pool = sqlx::Pool::<sqlx::Postgres>::connect("postgres::memory:").await?;
// sqlx::migrate!()
// .run(&pool)
// .await
// .with_kind(crate::ErrorKind::Database)?;
// let mgr = SslManager::init(pool.clone()).await?;
// let package_id = "bitcoind".parse().unwrap();
// let (key0, cert_chain0) = mgr.certificate_for("start9", &package_id).await?;
// let (key1, cert_chain1) = mgr.certificate_for("start9", &package_id).await?;
//
// assert_eq!(
// key0.private_key_to_pem_pkcs8()?,
// key1.private_key_to_pem_pkcs8()?
// );
// assert_eq!(
// cert_chain0
// .iter()
// .map(|cert| cert.to_pem().unwrap())
// .collect::<Vec<Vec<u8>>>(),
// cert_chain1
// .iter()
// .map(|cert| cert.to_pem().unwrap())
// .collect::<Vec<Vec<u8>>>()
// );
// Ok(())
// }

View File

@@ -16,6 +16,8 @@ server {{
server_name .{lan_hostname};
proxy_buffers 4 512k;
proxy_buffer_size 512k;
proxy_buffering off;
proxy_request_buffering off;
proxy_socket_keepalive on;
@@ -71,6 +73,8 @@ server {{
server_name .{tor_hostname};
proxy_buffers 4 512k;
proxy_buffer_size 512k;
proxy_buffering off;
proxy_request_buffering off;
proxy_socket_keepalive on;

View File

@@ -12,7 +12,6 @@ use tracing::instrument;
use crate::backup::BackupReport;
use crate::context::RpcContext;
use crate::db::util::WithRevision;
use crate::s9pk::manifest::PackageId;
use crate::util::display_none;
use crate::util::serde::display_serializable;
@@ -29,7 +28,7 @@ pub async fn list(
#[context] ctx: RpcContext,
#[arg] before: Option<i32>,
#[arg] limit: Option<u32>,
) -> Result<WithRevision<Vec<Notification>>, Error> {
) -> Result<Vec<Notification>, Error> {
let limit = limit.unwrap_or(40);
let mut handle = ctx.db.handle();
match before {
@@ -72,11 +71,8 @@ pub async fn list(
})
.collect::<Result<Vec<Notification>, Error>>()?;
// set notification count to zero
let r = model.put(&mut handle, &0).await?;
Ok(WithRevision {
response: notifs,
revision: r,
})
model.put(&mut handle, &0).await?;
Ok(notifs)
}
Some(before) => {
let records = sqlx::query!(
@@ -113,10 +109,7 @@ pub async fn list(
})
})
.collect::<Result<Vec<Notification>, Error>>()?;
Ok(WithRevision {
response: res,
revision: None,
})
Ok(res)
}
}
}

View File

@@ -10,6 +10,7 @@ use digest::generic_array::GenericArray;
use digest::OutputSizeUser;
use futures::future::BoxFuture;
use futures::{FutureExt, TryFutureExt, TryStreamExt};
use josekit::jwk::Jwk;
use nix::unistd::{Gid, Uid};
use openssl::x509::X509;
use patch_db::{DbHandle, LockType};
@@ -37,7 +38,7 @@ use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::util::{pvscan, recovery_info, DiskListResponse, EmbassyOsRecoveryInfo};
use crate::disk::REPAIR_DISK_PATH;
use crate::hostname::PRODUCT_KEY_PATH;
use crate::hostname::{get_hostname, Hostname};
use crate::id::Id;
use crate::init::init;
use crate::install::PKG_PUBLIC_DIR;
@@ -62,7 +63,7 @@ where
Ok(password)
}
#[command(subcommands(status, disk, attach, execute, recovery, cifs, complete))]
#[command(subcommands(status, disk, attach, execute, recovery, cifs, complete, get_secret))]
pub fn setup() -> Result<(), Error> {
Ok(())
}
@@ -70,14 +71,12 @@ pub fn setup() -> Result<(), Error> {
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct StatusRes {
product_key: bool,
migrating: bool,
}
#[command(rpc_only, metadata(authenticated = false))]
pub async fn status(#[context] ctx: SetupContext) -> Result<StatusRes, Error> {
Ok(StatusRes {
product_key: tokio::fs::metadata(PRODUCT_KEY_PATH).await.is_ok(),
migrating: ctx.recovery_status.read().await.is_some(),
})
}
@@ -123,31 +122,7 @@ pub async fn attach(
ErrorKind::DiskManagement,
));
}
let product_key = ctx.product_key().await?;
let product_key_path = Path::new("/embassy-data/main/product_key.txt");
if tokio::fs::metadata(product_key_path).await.is_ok() {
let pkey = Arc::new(
tokio::fs::read_to_string(product_key_path)
.await?
.trim()
.to_owned(),
);
if pkey != product_key {
crate::disk::main::export(&*guid, &ctx.datadir).await?;
return Err(Error::new(
eyre!(
"The EmbassyOS product key does not match the supplied drive: {}",
pkey
),
ErrorKind::ProductKeyMismatch,
));
}
}
init(
&RpcContextConfig::load(ctx.config_path.as_ref()).await?,
&*product_key,
)
.await?;
init(&RpcContextConfig::load(ctx.config_path.as_ref()).await?).await?;
let secrets = ctx.secret_store().await?;
let db = ctx.db(&secrets).await?;
let mut secrets_handle = secrets.acquire().await?;
@@ -168,16 +143,17 @@ pub async fn attach(
let tor_key = crate::net::tor::os_key(&mut secrets_tx).await?;
db_tx.commit(None).await?;
db_tx.commit().await?;
secrets_tx.commit().await?;
let hostname = get_hostname(&mut db_handle).await?;
let (_, root_ca) = SslManager::init(secrets).await?.export_root_ca().await?;
let (_, root_ca) = SslManager::init(secrets, &mut db_handle)
.await?
.export_root_ca()
.await?;
let setup_result = SetupResult {
tor_address: format!("http://{}", tor_key.public().get_onion_address()),
lan_address: format!(
"https://embassy-{}.local",
crate::hostname::derive_id(&*product_key)
),
lan_address: hostname.lan_address(),
root_ca: String::from_utf8(root_ca.to_pem()?)?,
};
*ctx.setup_result.write().await = Some((guid, setup_result.clone()));
@@ -222,6 +198,29 @@ pub async fn recovery_status(
ctx.recovery_status.read().await.clone().transpose()
}
/// We want to be able to get a secret, a shared private key with the frontend
/// This way the frontend can send a secret, like the password for the setup/ recovory
/// without knowing the password over clearnet. We use the public key shared across the network
/// since it is fine to share the public, and encrypt against the public.
#[command(rename = "get-secret", rpc_only, metadata(authenticated = false))]
pub async fn get_secret(
#[context] ctx: SetupContext,
#[arg] pubkey: Jwk,
) -> Result<String, RpcError> {
let secret = ctx.update_secret().await?;
let mut header = josekit::jwe::JweHeader::new();
header.set_algorithm("ECDH-ES");
header.set_content_encryption("A256GCM");
let encrypter = josekit::jwe::alg::ecdh_es::EcdhEsJweAlgorithm::EcdhEs
.encrypter_from_jwk(&pubkey)
.unwrap();
Ok(josekit::jwe::serialize_compact(secret.as_bytes(), &header, &encrypter).unwrap())
// Need to encrypt from the public key sent
// then encode via hex
}
#[command(subcommands(verify_cifs))]
pub fn cifs() -> Result<(), Error> {
Ok(())
@@ -269,14 +268,11 @@ pub async fn execute(
)
.await
{
Ok((tor_addr, root_ca)) => {
Ok((hostname, tor_addr, root_ca)) => {
tracing::info!("Setup Successful! Tor Address: {}", tor_addr);
Ok(SetupResult {
tor_address: format!("http://{}", tor_addr),
lan_address: format!(
"https://embassy-{}.local",
crate::hostname::derive_id(&ctx.product_key().await?)
),
lan_address: hostname.lan_address(),
root_ca: String::from_utf8(root_ca.to_pem()?)?,
})
}
@@ -299,33 +295,14 @@ pub async fn complete(#[context] ctx: SetupContext) -> Result<SetupResult, Error
crate::ErrorKind::InvalidRequest,
));
};
if tokio::fs::metadata(PRODUCT_KEY_PATH).await.is_err() {
crate::hostname::set_product_key(&*ctx.product_key().await?).await?;
} else {
let key_on_disk = crate::hostname::get_product_key().await?;
let key_in_cache = ctx.product_key().await?;
if *key_in_cache != key_on_disk {
crate::hostname::set_product_key(&*ctx.product_key().await?).await?;
}
}
tokio::fs::write(
Path::new("/embassy-data/main/product_key.txt"),
&*ctx.product_key().await?,
)
.await?;
let secrets = ctx.secret_store().await?;
let mut db = ctx.db(&secrets).await?.handle();
let hostname = crate::hostname::get_hostname().await?;
let hostname = crate::hostname::get_hostname(&mut db).await?;
let si = crate::db::DatabaseModel::new().server_info();
si.clone()
.id()
.put(&mut db, &crate::hostname::get_id().await?)
.await?;
let id = crate::hostname::get_id(&mut db).await?;
si.clone().id().put(&mut db, &id).await?;
si.lan_address()
.put(
&mut db,
&format!("https://{}.local", &hostname).parse().unwrap(),
)
.put(&mut db, &hostname.lan_address().parse().unwrap())
.await?;
let mut guid_file = File::create("/embassy-os/disk.guid").await?;
guid_file.write_all(guid.as_bytes()).await?;
@@ -341,7 +318,7 @@ pub async fn execute_inner(
embassy_password: String,
recovery_source: Option<BackupTargetFS>,
recovery_password: Option<String>,
) -> Result<(OnionAddressV3, X509), Error> {
) -> Result<(Hostname, OnionAddressV3, X509), Error> {
if ctx.recovery_status.read().await.is_some() {
return Err(Error::new(
eyre!("Cannot execute setup while in recovery!"),
@@ -374,12 +351,11 @@ pub async fn execute_inner(
recovery_password,
)
.await?;
init(
&RpcContextConfig::load(ctx.config_path.as_ref()).await?,
&ctx.product_key().await?,
)
.await?;
let res = (tor_addr, root_ca.clone());
let db = init(&RpcContextConfig::load(ctx.config_path.as_ref()).await?)
.await?
.db;
let hostname = get_hostname(&mut db.handle()).await?;
let res = (hostname.clone(), tor_addr, root_ca.clone());
tokio::spawn(async move {
if let Err(e) = recover_fut
.and_then(|_| async {
@@ -387,10 +363,7 @@ pub async fn execute_inner(
guid,
SetupResult {
tor_address: format!("http://{}", tor_addr),
lan_address: format!(
"https://embassy-{}.local",
crate::hostname::derive_id(&ctx.product_key().await?)
),
lan_address: hostname.lan_address(),
root_ca: String::from_utf8(root_ca.to_pem()?)?,
},
));
@@ -412,23 +385,19 @@ pub async fn execute_inner(
res
} else {
let (tor_addr, root_ca) = fresh_setup(&ctx, &embassy_password).await?;
init(
&RpcContextConfig::load(ctx.config_path.as_ref()).await?,
&ctx.product_key().await?,
)
.await?;
let db = init(&RpcContextConfig::load(ctx.config_path.as_ref()).await?)
.await?
.db;
*ctx.setup_result.write().await = Some((
guid,
SetupResult {
tor_address: format!("http://{}", tor_addr),
lan_address: format!(
"https://embassy-{}.local",
crate::hostname::derive_id(&ctx.product_key().await?)
),
lan_address: get_hostname(&mut db.handle()).await?.lan_address(),
root_ca: String::from_utf8(root_ca.to_pem()?)?,
},
));
(tor_addr, root_ca)
let hostname = get_hostname(&mut db.handle()).await?;
(hostname, tor_addr, root_ca)
};
Ok(res)
@@ -455,7 +424,8 @@ async fn fresh_setup(
)
.execute(&mut sqlite_pool.acquire().await?)
.await?;
let (_, root_ca) = SslManager::init(sqlite_pool.clone())
let db = ctx.db(&sqlite_pool).await?;
let (_, root_ca) = SslManager::init(sqlite_pool.clone(), &mut db.handle())
.await?
.export_root_ca()
.await?;

View File

@@ -24,7 +24,6 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::UpdateProgress;
use crate::db::util::WithRevision;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::{FileSystem, ReadWrite};
use crate::disk::mount::guard::TmpMountGuard;
@@ -46,26 +45,24 @@ lazy_static! {
/// An user/ daemon would call this to update the system to the latest version and do the updates available,
/// and this will return something if there is an update, and in that case there will need to be a restart.
#[command(rename = "update", display(display_update_result))]
#[command(
rename = "update",
display(display_update_result),
metadata(sync_db = true)
)]
#[instrument(skip(ctx))]
pub async fn update_system(
#[context] ctx: RpcContext,
#[arg(rename = "marketplace-url")] marketplace_url: Url,
) -> Result<WithRevision<UpdateResult>, Error> {
let noop = WithRevision {
response: UpdateResult::NoUpdates,
revision: None,
};
) -> Result<UpdateResult, Error> {
if UPDATED.load(Ordering::SeqCst) {
return Ok(noop);
}
match maybe_do_update(ctx, marketplace_url).await? {
None => Ok(noop),
Some(r) => Ok(WithRevision {
response: UpdateResult::Updating,
revision: Some(r),
}),
return Ok(UpdateResult::NoUpdates);
}
Ok(if maybe_do_update(ctx, marketplace_url).await?.is_some() {
UpdateResult::Updating
} else {
UpdateResult::NoUpdates
})
}
/// What is the status of the updates?
@@ -76,8 +73,8 @@ pub enum UpdateResult {
Updating,
}
fn display_update_result(status: WithRevision<UpdateResult>, _: &ArgMatches) {
match status.response {
fn display_update_result(status: UpdateResult, _: &ArgMatches) {
match status {
UpdateResult::Updating => {
println!("Updating...");
}
@@ -190,7 +187,7 @@ async fn maybe_do_update(
downloaded: 0,
});
status.save(&mut tx).await?;
let rev = tx.commit(None).await?;
let rev = tx.commit().await?;
tokio::spawn(async move {
let mut db = ctx.db.handle();

View File

@@ -14,8 +14,9 @@ mod v0_3_0_2;
mod v0_3_0_3;
mod v0_3_1;
mod v0_3_1_1;
mod v0_3_1_2;
pub type Current = v0_3_1_1::Version;
pub type Current = v0_3_1_2::Version;
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
#[serde(untagged)]
@@ -26,6 +27,7 @@ enum Version {
V0_3_0_3(Wrapper<v0_3_0_3::Version>),
V0_3_1(Wrapper<v0_3_1::Version>),
V0_3_1_1(Wrapper<v0_3_1_1::Version>),
V0_3_1_2(Wrapper<v0_3_1_2::Version>),
Other(emver::Version),
}
@@ -47,6 +49,7 @@ impl Version {
Version::V0_3_0_3(Wrapper(x)) => x.semver(),
Version::V0_3_1(Wrapper(x)) => x.semver(),
Version::V0_3_1_1(Wrapper(x)) => x.semver(),
Version::V0_3_1_2(Wrapper(x)) => x.semver(),
Version::Other(x) => x.clone(),
}
}
@@ -179,6 +182,7 @@ pub async fn init<Db: DbHandle>(
Version::V0_3_0_3(v) => v.0.migrate_to(&Current::new(), db, receipts).await?,
Version::V0_3_1(v) => v.0.migrate_to(&Current::new(), db, receipts).await?,
Version::V0_3_1_1(v) => v.0.migrate_to(&Current::new(), db, receipts).await?,
Version::V0_3_1_2(v) => v.0.migrate_to(&Current::new(), db, receipts).await?,
Version::Other(_) => {
return Err(Error::new(
eyre!("Cannot downgrade"),

View File

@@ -0,0 +1,61 @@
use emver::VersionRange;
use crate::hostname::{generate_id, get_hostname, sync_hostname};
use super::v0_3_0::V0_3_0_COMPAT;
use super::*;
const V0_3_1_2: emver::Version = emver::Version::new(0, 3, 1, 2);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_1_1::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_1_2
}
fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT
}
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
let hostname = get_hostname(db).await?;
crate::db::DatabaseModel::new()
.server_info()
.hostname()
.put(db, &Some(hostname.0))
.await?;
crate::db::DatabaseModel::new()
.server_info()
.id()
.put(db, &generate_id())
.await?;
sync_hostname(db).await?;
let mut ui = crate::db::DatabaseModel::new()
.ui()
.get(db, false)
.await?
.clone();
if let serde_json::Value::Object(ref mut ui) = ui {
ui.insert("ack-instructions".to_string(), serde_json::json!({}));
}
crate::db::DatabaseModel::new().ui().put(db, &ui).await?;
Ok(())
}
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
let mut ui = crate::db::DatabaseModel::new()
.ui()
.get(db, false)
.await?
.clone();
if let serde_json::Value::Object(ref mut ui) = ui {
ui.remove("ack-instructions");
}
crate::db::DatabaseModel::new().ui().put(db, &ui).await?;
Ok(())
}
}