Feature/remove postgres (#2570)

* wip: move postgres data to patchdb

* wip

* wip

* wip

* complete notifications and clean up warnings

* fill in user agent

* move os tor bindings to single call
This commit is contained in:
Aiden McClelland
2024-03-07 14:40:22 -07:00
committed by GitHub
parent a17ec4221b
commit e0c9f8a5aa
70 changed files with 2429 additions and 2383 deletions

View File

@@ -1,15 +1,14 @@
use std::time::SystemTime;
use ed25519_dalek::SecretKey;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use sqlx::PgExecutor;
use torut::onion::TorSecretKeyV3;
use crate::db::model::DatabaseModel;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key;
use crate::net::ssl::{generate_key, make_root_cert};
use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
use crate::util::serde::Pem;
fn hash_password(password: &str) -> Result<String, Error> {
argon2::hash_encoded(
@@ -25,103 +24,83 @@ pub struct AccountInfo {
pub server_id: String,
pub hostname: Hostname,
pub password: String,
pub key: Key,
pub tor_key: TorSecretKeyV3,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
pub ssh_key: ssh_key::PrivateKey,
}
impl AccountInfo {
pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> {
let server_id = generate_id();
let hostname = generate_hostname();
let tor_key = TorSecretKeyV3::generate();
let root_ca_key = generate_key()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?;
let ssh_key = ssh_key::PrivateKey::from(ssh_key::private::Ed25519Keypair::random(
&mut rand::thread_rng(),
));
Ok(Self {
server_id,
hostname,
password: hash_password(password)?,
key: Key::new(None),
tor_key,
root_ca_key,
root_ca_cert,
ssh_key,
})
}
pub async fn load(secrets: impl PgExecutor<'_>) -> Result<Self, Error> {
let r = sqlx::query!("SELECT * FROM account WHERE id = 0")
.fetch_one(secrets)
.await?;
let server_id = r.server_id.unwrap_or_else(generate_id);
let hostname = r.hostname.map(Hostname).unwrap_or_else(generate_hostname);
let password = r.password;
let network_key = SecretKey::try_from(r.network_key).map_err(|e| {
Error::new(
eyre!("expected vec of len 32, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?;
let tor_key = if let Some(k) = &r.tor_key {
<[u8; 64]>::try_from(&k[..]).map_err(|_| {
Error::new(
eyre!("expected vec of len 64, got len {}", k.len()),
ErrorKind::ParseDbField,
)
})?
} else {
ed25519_expand_key(&network_key)
};
let key = Key::from_pair(None, network_key, tor_key);
let root_ca_key = PKey::private_key_from_pem(r.root_ca_key_pem.as_bytes())?;
let root_ca_cert = X509::from_pem(r.root_ca_cert_pem.as_bytes())?;
pub fn load(db: &DatabaseModel) -> Result<Self, Error> {
let server_id = db.as_public().as_server_info().as_id().de()?;
let hostname = Hostname(db.as_public().as_server_info().as_hostname().de()?);
let password = db.as_private().as_password().de()?;
let key_store = db.as_private().as_key_store();
let tor_addr = db.as_public().as_server_info().as_onion_address().de()?;
let tor_key = key_store.as_onion().get_key(&tor_addr)?;
let cert_store = key_store.as_local_certs();
let root_ca_key = cert_store.as_root_key().de()?.0;
let root_ca_cert = cert_store.as_root_cert().de()?.0;
let ssh_key = db.as_private().as_ssh_privkey().de()?.0;
Ok(Self {
server_id,
hostname,
password,
key,
tor_key,
root_ca_key,
root_ca_cert,
ssh_key,
})
}
pub async fn save(&self, secrets: impl PgExecutor<'_>) -> Result<(), Error> {
let server_id = self.server_id.as_str();
let hostname = self.hostname.0.as_str();
let password = self.password.as_str();
let network_key = self.key.as_bytes();
let network_key = network_key.as_slice();
let root_ca_key = String::from_utf8(self.root_ca_key.private_key_to_pem_pkcs8()?)?;
let root_ca_cert = String::from_utf8(self.root_ca_cert.to_pem()?)?;
sqlx::query!(
r#"
INSERT INTO account (
id,
server_id,
hostname,
password,
network_key,
root_ca_key_pem,
root_ca_cert_pem
) VALUES (
0, $1, $2, $3, $4, $5, $6
) ON CONFLICT (id) DO UPDATE SET
server_id = EXCLUDED.server_id,
hostname = EXCLUDED.hostname,
password = EXCLUDED.password,
network_key = EXCLUDED.network_key,
root_ca_key_pem = EXCLUDED.root_ca_key_pem,
root_ca_cert_pem = EXCLUDED.root_ca_cert_pem
"#,
server_id,
hostname,
password,
network_key,
root_ca_key,
root_ca_cert,
)
.execute(secrets)
.await?;
pub fn save(&self, db: &mut DatabaseModel) -> Result<(), Error> {
let server_info = db.as_public_mut().as_server_info_mut();
server_info.as_id_mut().ser(&self.server_id)?;
server_info.as_hostname_mut().ser(&self.hostname.0)?;
server_info
.as_lan_address_mut()
.ser(&self.hostname.lan_address().parse()?)?;
server_info
.as_pubkey_mut()
.ser(&self.ssh_key.public_key().to_openssh()?)?;
let onion_address = self.tor_key.public().get_onion_address();
server_info.as_onion_address_mut().ser(&onion_address)?;
server_info
.as_tor_address_mut()
.ser(&format!("https://{onion_address}").parse()?)?;
db.as_private_mut().as_password_mut().ser(&self.password)?;
db.as_private_mut()
.as_ssh_privkey_mut()
.ser(Pem::new_ref(&self.ssh_key))?;
let key_store = db.as_private_mut().as_key_store_mut();
key_store.as_onion_mut().insert_key(&self.tor_key)?;
let cert_store = key_store.as_local_certs_mut();
cert_store
.as_root_key_mut()
.ser(Pem::new_ref(&self.root_ca_key))?;
cert_store
.as_root_cert_mut()
.ser(Pem::new_ref(&self.root_ca_cert))?;
Ok(())
}

View File

@@ -1,17 +1,17 @@
use std::collections::BTreeMap;
use chrono::{DateTime, Utc};
use clap::{ArgMatches, Parser};
use clap::Parser;
use color_eyre::eyre::eyre;
use imbl_value::{json, InternedString};
use josekit::jwk::Jwk;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, from_fn_async, AnyContext, CallRemote, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use sqlx::{Executor, Postgres};
use tracing::instrument;
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::middleware::auth::{
AsLogoutSessionId, HasLoggedOutSessions, HashSessionToken, LoginRes,
};
@@ -19,6 +19,25 @@ use crate::prelude::*;
use crate::util::crypto::EncryptedWire;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::{ensure_code, Error, ResultExt};
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
pub struct Sessions(pub BTreeMap<InternedString, Session>);
impl Sessions {
pub fn new() -> Self {
Self(BTreeMap::new())
}
}
impl Map for Sessions {
type Key = InternedString;
type Value = Session;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(key.clone())
}
}
#[derive(Clone, Serialize, Deserialize)]
#[serde(untagged)]
pub enum PasswordType {
@@ -95,16 +114,6 @@ pub fn auth() -> ParentHandler {
)
}
pub fn cli_metadata() -> Value {
imbl_value::json!({
"platforms": ["cli"],
})
}
pub fn parse_metadata(_: &str, _: &ArgMatches) -> Result<Value, Error> {
Ok(cli_metadata())
}
#[test]
fn gen_pwd() {
println!(
@@ -163,14 +172,8 @@ pub fn check_password(hash: &str, password: &str) -> Result<(), Error> {
Ok(())
}
pub async fn check_password_against_db<Ex>(secrets: &mut Ex, password: &str) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let pw_hash = sqlx::query!("SELECT password FROM account")
.fetch_one(secrets)
.await?
.password;
pub fn check_password_against_db(db: &DatabaseModel, password: &str) -> Result<(), Error> {
let pw_hash = db.as_private().as_password().de()?;
check_password(&pw_hash, password)?;
Ok(())
}
@@ -180,7 +183,8 @@ where
#[command(rename_all = "kebab-case")]
pub struct LoginParams {
password: Option<PasswordType>,
#[arg(skip = cli_metadata())]
#[serde(default)]
user_agent: Option<String>,
#[serde(default)]
metadata: Value,
}
@@ -188,26 +192,31 @@ pub struct LoginParams {
#[instrument(skip_all)]
pub async fn login_impl(
ctx: RpcContext,
LoginParams { password, metadata }: LoginParams,
) -> Result<LoginRes, Error> {
let password = password.unwrap_or_default().decrypt(&ctx)?;
let mut handle = ctx.secret_store.acquire().await?;
check_password_against_db(handle.as_mut(), &password).await?;
let hash_token = HashSessionToken::new();
let user_agent = "".to_string(); // todo!() as String;
let metadata = serde_json::to_string(&metadata).with_kind(crate::ErrorKind::Database)?;
let hash_token_hashed = hash_token.hashed();
sqlx::query!(
"INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)",
hash_token_hashed,
LoginParams {
password,
user_agent,
metadata,
)
.execute(handle.as_mut())
.await?;
}: LoginParams,
) -> Result<LoginRes, Error> {
let password = password.unwrap_or_default().decrypt(&ctx)?;
Ok(hash_token.to_login_res())
ctx.db
.mutate(|db| {
check_password_against_db(db, &password)?;
let hash_token = HashSessionToken::new();
db.as_private_mut().as_sessions_mut().insert(
hash_token.hashed(),
&Session {
logged_in: Utc::now(),
last_active: Utc::now(),
user_agent,
metadata,
},
)?;
Ok(hash_token.to_login_res())
})
.await
}
#[derive(Deserialize, Serialize, Parser)]
@@ -226,20 +235,20 @@ pub async fn logout(
))
}
#[derive(Deserialize, Serialize)]
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Session {
logged_in: DateTime<Utc>,
last_active: DateTime<Utc>,
user_agent: Option<String>,
metadata: Value,
pub logged_in: DateTime<Utc>,
pub last_active: DateTime<Utc>,
pub user_agent: Option<String>,
pub metadata: Value,
}
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct SessionList {
current: String,
sessions: BTreeMap<String, Session>,
current: InternedString,
sessions: Sessions,
}
pub fn session() -> ParentHandler {
@@ -277,7 +286,7 @@ fn display_sessions(params: WithIoFormat<ListParams>, arg: SessionList) {
"USER AGENT",
"METADATA",
]);
for (id, session) in arg.sessions {
for (id, session) in arg.sessions.0 {
let mut row = row![
&id,
&format!("{}", session.logged_in),
@@ -310,33 +319,11 @@ pub async fn list(
ListParams { session, .. }: ListParams,
) -> Result<SessionList, Error> {
Ok(SessionList {
current: HashSessionToken::from_token(session).hashed().to_owned(),
sessions: sqlx::query!(
"SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
)
.fetch_all(ctx.secret_store.acquire().await?.as_mut())
.await?
.into_iter()
.map(|row| {
Ok((
row.id,
Session {
logged_in: DateTime::from_utc(row.logged_in, Utc),
last_active: DateTime::from_utc(row.last_active, Utc),
user_agent: row.user_agent,
metadata: serde_json::from_str(&row.metadata)
.with_kind(crate::ErrorKind::Database)?,
},
))
})
.collect::<Result<_, Error>>()?,
current: HashSessionToken::from_token(session).hashed().clone(),
sessions: ctx.db.peek().await.into_private().into_sessions().de()?,
})
}
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<String>, RpcError> {
Ok(arg.split(",").map(|s| s.trim().to_owned()).collect())
}
#[derive(Debug, Clone, Serialize, Deserialize)]
struct KillSessionId(InternedString);
@@ -433,14 +420,17 @@ pub async fn reset_password_impl(
));
}
account.set_password(&new_password)?;
account.save(&ctx.secret_store).await?;
let account_password = &account.password;
let account = account.clone();
ctx.db
.mutate(|d| {
d.as_public_mut()
.as_server_info_mut()
.as_password_hash_mut()
.ser(account_password)
.ser(account_password)?;
account.save(d)?;
Ok(())
})
.await
}

View File

@@ -1,5 +1,4 @@
use std::collections::BTreeMap;
use std::panic::UnwindSafe;
use std::path::{Path, PathBuf};
use std::sync::Arc;
@@ -19,11 +18,11 @@ use crate::auth::check_password_against_db;
use crate::backup::os::OsBackup;
use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext;
use crate::db::model::BackupProgress;
use crate::db::model::{BackupProgress, DatabaseModel};
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::notifications::NotificationLevel;
use crate::notifications::{notify, NotificationLevel};
use crate::prelude::*;
use crate::util::io::dir_copy;
use crate::util::serde::IoFormat;
@@ -41,6 +40,111 @@ pub struct BackupParams {
password: crate::auth::PasswordType,
}
struct BackupStatusGuard(Option<PatchDb>);
impl BackupStatusGuard {
fn new(db: PatchDb) -> Self {
Self(Some(db))
}
async fn handle_result(
mut self,
result: Result<BTreeMap<PackageId, PackageBackupReport>, Error>,
) -> Result<(), Error> {
if let Some(db) = self.0.as_ref() {
db.mutate(|v| {
v.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut()
.ser(&None)
})
.await?;
}
if let Some(db) = self.0.take() {
match result {
Ok(report) if report.iter().all(|(_, rep)| rep.error.is_none()) => {
db.mutate(|db| {
notify(
db,
None,
NotificationLevel::Success,
"Backup Complete".to_owned(),
"Your backup has completed".to_owned(),
BackupReport {
server: ServerBackupReport {
attempted: true,
error: None,
},
packages: report,
},
)
})
.await
}
Ok(report) => {
db.mutate(|db| {
notify(
db,
None,
NotificationLevel::Warning,
"Backup Complete".to_owned(),
"Your backup has completed, but some package(s) failed to backup"
.to_owned(),
BackupReport {
server: ServerBackupReport {
attempted: true,
error: None,
},
packages: report,
},
)
})
.await
}
Err(e) => {
tracing::error!("Backup Failed: {}", e);
tracing::debug!("{:?}", e);
let err_string = e.to_string();
db.mutate(|db| {
notify(
db,
None,
NotificationLevel::Error,
"Backup Failed".to_owned(),
"Your backup failed to complete.".to_owned(),
BackupReport {
server: ServerBackupReport {
attempted: true,
error: Some(err_string),
},
packages: BTreeMap::new(),
},
)
})
.await
}
}?;
}
Ok(())
}
}
impl Drop for BackupStatusGuard {
fn drop(&mut self) {
if let Some(db) = self.0.take() {
tokio::spawn(async move {
db.mutate(|v| {
v.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut()
.ser(&None)
})
.await
.unwrap()
});
}
}
}
#[instrument(skip(ctx, old_password, password))]
pub async fn backup_all(
ctx: RpcContext,
@@ -57,139 +161,81 @@ pub async fn backup_all(
.clone()
.decrypt(&ctx)?;
let password = password.decrypt(&ctx)?;
check_password_against_db(ctx.secret_store.acquire().await?.as_mut(), &password).await?;
let fs = target_id
.load(ctx.secret_store.acquire().await?.as_mut())
.await?;
let ((fs, package_ids), status_guard) = (
ctx.db
.mutate(|db| {
check_password_against_db(db, &password)?;
let fs = target_id.load(db)?;
let package_ids = if let Some(ids) = package_ids {
ids.into_iter().collect()
} else {
db.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.filter(|(_, m)| m.expect_as_installed().is_ok())
.map(|(id, _)| id)
.collect()
};
assure_backing_up(db, &package_ids)?;
Ok((fs, package_ids))
})
.await?,
BackupStatusGuard::new(ctx.db.clone()),
);
let mut backup_guard = BackupMountGuard::mount(
TmpMountGuard::mount(&fs, ReadWrite).await?,
&old_password_decrypted,
)
.await?;
let package_ids = if let Some(ids) = package_ids {
ids.into_iter().collect()
} else {
todo!("all installed packages");
};
if old_password.is_some() {
backup_guard.change_password(&password)?;
}
assure_backing_up(&ctx.db, &package_ids).await?;
tokio::task::spawn(async move {
let backup_res = perform_backup(&ctx, backup_guard, &package_ids).await;
match backup_res {
Ok(report) if report.iter().all(|(_, rep)| rep.error.is_none()) => ctx
.notification_manager
.notify(
ctx.db.clone(),
None,
NotificationLevel::Success,
"Backup Complete".to_owned(),
"Your backup has completed".to_owned(),
BackupReport {
server: ServerBackupReport {
attempted: true,
error: None,
},
packages: report,
},
None,
)
.await
.expect("failed to send notification"),
Ok(report) => ctx
.notification_manager
.notify(
ctx.db.clone(),
None,
NotificationLevel::Warning,
"Backup Complete".to_owned(),
"Your backup has completed, but some package(s) failed to backup".to_owned(),
BackupReport {
server: ServerBackupReport {
attempted: true,
error: None,
},
packages: report,
},
None,
)
.await
.expect("failed to send notification"),
Err(e) => {
tracing::error!("Backup Failed: {}", e);
tracing::debug!("{:?}", e);
ctx.notification_manager
.notify(
ctx.db.clone(),
None,
NotificationLevel::Error,
"Backup Failed".to_owned(),
"Your backup failed to complete.".to_owned(),
BackupReport {
server: ServerBackupReport {
attempted: true,
error: Some(e.to_string()),
},
packages: BTreeMap::new(),
},
None,
)
.await
.expect("failed to send notification");
}
}
ctx.db
.mutate(|v| {
v.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut()
.ser(&None)
})
.await?;
Ok::<(), Error>(())
status_guard
.handle_result(perform_backup(&ctx, backup_guard, &package_ids).await)
.await
.unwrap();
});
Ok(())
}
#[instrument(skip(db, packages))]
async fn assure_backing_up(
db: &PatchDb,
packages: impl IntoIterator<Item = &PackageId> + UnwindSafe + Send,
fn assure_backing_up<'a>(
db: &mut DatabaseModel,
packages: impl IntoIterator<Item = &'a PackageId>,
) -> Result<(), Error> {
db.mutate(|v| {
let backing_up = v
.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut();
if backing_up
.clone()
.de()?
.iter()
.flat_map(|x| x.values())
.fold(false, |acc, x| {
if !x.complete {
return true;
}
acc
})
{
return Err(Error::new(
eyre!("Server is already backing up!"),
ErrorKind::InvalidRequest,
));
}
backing_up.ser(&Some(
packages
.into_iter()
.map(|x| (x.clone(), BackupProgress { complete: false }))
.collect(),
))?;
Ok(())
})
.await
let backing_up = db
.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut();
if backing_up
.clone()
.de()?
.iter()
.flat_map(|x| x.values())
.fold(false, |acc, x| {
if !x.complete {
return true;
}
acc
})
{
return Err(Error::new(
eyre!("Server is already backing up!"),
ErrorKind::InvalidRequest,
));
}
backing_up.ser(&Some(
packages
.into_iter()
.map(|x| (x.clone(), BackupProgress { complete: false }))
.collect(),
))?;
Ok(())
}
#[instrument(skip(ctx, backup_guard))]

View File

@@ -1,13 +1,15 @@
use openssl::pkey::PKey;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use patch_db::Value;
use serde::{Deserialize, Serialize};
use ssh_key::private::Ed25519Keypair;
use torut::onion::TorSecretKeyV3;
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key;
use crate::prelude::*;
use crate::util::serde::Base64;
use crate::util::crypto::ed25519_expand_key;
use crate::util::serde::{Base32, Base64, Pem};
pub struct OsBackup {
pub account: AccountInfo,
@@ -19,19 +21,23 @@ impl<'de> Deserialize<'de> for OsBackup {
D: serde::Deserializer<'de>,
{
let tagged = OsBackupSerDe::deserialize(deserializer)?;
match tagged.version {
Ok(match tagged.version {
0 => patch_db::value::from_value::<OsBackupV0>(tagged.rest)
.map_err(serde::de::Error::custom)?
.project()
.map_err(serde::de::Error::custom),
.map_err(serde::de::Error::custom)?,
1 => patch_db::value::from_value::<OsBackupV1>(tagged.rest)
.map_err(serde::de::Error::custom)?
.project()
.map_err(serde::de::Error::custom),
v => Err(serde::de::Error::custom(&format!(
"Unknown backup version {v}"
))),
}
.project(),
2 => patch_db::value::from_value::<OsBackupV2>(tagged.rest)
.map_err(serde::de::Error::custom)?
.project(),
v => {
return Err(serde::de::Error::custom(&format!(
"Unknown backup version {v}"
)))
}
})
}
}
impl Serialize for OsBackup {
@@ -40,11 +46,9 @@ impl Serialize for OsBackup {
S: serde::Serializer,
{
OsBackupSerDe {
version: 1,
rest: patch_db::value::to_value(
&OsBackupV1::unproject(self).map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,
version: 2,
rest: patch_db::value::to_value(&OsBackupV2::unproject(self))
.map_err(serde::ser::Error::custom)?,
}
.serialize(serializer)
}
@@ -62,10 +66,10 @@ struct OsBackupSerDe {
#[derive(Deserialize)]
#[serde(rename = "kebab-case")]
struct OsBackupV0 {
// tor_key: Base32<[u8; 64]>,
root_ca_key: String, // PEM Encoded OpenSSL Key
root_ca_cert: String, // PEM Encoded OpenSSL X509 Certificate
ui: Value, // JSON Value
tor_key: Base32<[u8; 64]>, // Base32 Encoded Ed25519 Expanded Secret Key
root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key
root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate
ui: Value, // JSON Value
}
impl OsBackupV0 {
fn project(self) -> Result<OsBackup, Error> {
@@ -74,9 +78,13 @@ impl OsBackupV0 {
server_id: generate_id(),
hostname: generate_hostname(),
password: Default::default(),
key: Key::new(None),
root_ca_key: PKey::private_key_from_pem(self.root_ca_key.as_bytes())?,
root_ca_cert: X509::from_pem(self.root_ca_cert.as_bytes())?,
root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0,
ssh_key: ssh_key::PrivateKey::random(
&mut rand::thread_rng(),
ssh_key::Algorithm::Ed25519,
)?,
tor_key: TorSecretKeyV3::from(self.tor_key.0),
},
ui: self.ui,
})
@@ -87,36 +95,67 @@ impl OsBackupV0 {
#[derive(Deserialize, Serialize)]
#[serde(rename = "kebab-case")]
struct OsBackupV1 {
server_id: String, // uuidv4
hostname: String, // embassy-<adjective>-<noun>
net_key: Base64<[u8; 32]>, // Ed25519 Secret Key
root_ca_key: String, // PEM Encoded OpenSSL Key
root_ca_cert: String, // PEM Encoded OpenSSL X509 Certificate
ui: Value, // JSON Value
// TODO add more
server_id: String, // uuidv4
hostname: String, // embassy-<adjective>-<noun>
net_key: Base64<[u8; 32]>, // Ed25519 Secret Key
root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key
root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate
ui: Value, // JSON Value
}
impl OsBackupV1 {
fn project(self) -> Result<OsBackup, Error> {
Ok(OsBackup {
fn project(self) -> OsBackup {
OsBackup {
account: AccountInfo {
server_id: self.server_id,
hostname: Hostname(self.hostname),
password: Default::default(),
key: Key::from_bytes(None, self.net_key.0),
root_ca_key: PKey::private_key_from_pem(self.root_ca_key.as_bytes())?,
root_ca_cert: X509::from_pem(self.root_ca_cert.as_bytes())?,
root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0,
ssh_key: ssh_key::PrivateKey::from(Ed25519Keypair::from_seed(&self.net_key.0)),
tor_key: TorSecretKeyV3::from(ed25519_expand_key(&self.net_key.0)),
},
ui: self.ui,
})
}
fn unproject(backup: &OsBackup) -> Result<Self, Error> {
Ok(Self {
server_id: backup.account.server_id.clone(),
hostname: backup.account.hostname.0.clone(),
net_key: Base64(backup.account.key.as_bytes()),
root_ca_key: String::from_utf8(backup.account.root_ca_key.private_key_to_pem_pkcs8()?)?,
root_ca_cert: String::from_utf8(backup.account.root_ca_cert.to_pem()?)?,
ui: backup.ui.clone(),
})
}
}
}
/// V2
#[derive(Deserialize, Serialize)]
#[serde(rename = "kebab-case")]
struct OsBackupV2 {
server_id: String, // uuidv4
hostname: String, // <adjective>-<noun>
root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key
root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate
ssh_key: Pem<ssh_key::PrivateKey>, // PEM Encoded OpenSSH Key
tor_key: TorSecretKeyV3, // Base64 Encoded Ed25519 Expanded Secret Key
ui: Value, // JSON Value
}
impl OsBackupV2 {
fn project(self) -> OsBackup {
OsBackup {
account: AccountInfo {
server_id: self.server_id,
hostname: Hostname(self.hostname),
password: Default::default(),
root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0,
ssh_key: self.ssh_key.0,
tor_key: self.tor_key,
},
ui: self.ui,
}
}
fn unproject(backup: &OsBackup) -> Self {
Self {
server_id: backup.account.server_id.clone(),
hostname: backup.account.hostname.0.clone(),
root_ca_key: Pem(backup.account.root_ca_key.clone()),
root_ca_cert: Pem(backup.account.root_ca_cert.clone()),
ssh_key: Pem(backup.account.ssh_key.clone()),
tor_key: backup.account.tor_key.clone(),
ui: backup.ui.clone(),
}
}
}

View File

@@ -5,6 +5,7 @@ use clap::Parser;
use futures::{stream, StreamExt};
use models::PackageId;
use openssl::x509::X509;
use patch_db::json_ptr::ROOT;
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use tracing::instrument;
@@ -12,6 +13,7 @@ use tracing::instrument;
use super::target::BackupTargetId;
use crate::backup::os::OsBackup;
use crate::context::{RpcContext, SetupContext};
use crate::db::model::Database;
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
@@ -42,9 +44,7 @@ pub async fn restore_packages_rpc(
password,
}: RestorePackageParams,
) -> Result<(), Error> {
let fs = target_id
.load(ctx.secret_store.acquire().await?.as_mut())
.await?;
let fs = target_id.load(&ctx.db.peek().await)?;
let backup_guard =
BackupMountGuard::mount(TmpMountGuard::mount(&fs, ReadWrite).await?, &password).await?;
@@ -95,11 +95,8 @@ pub async fn recover_full_embassy(
)
.with_kind(ErrorKind::PasswordHashGeneration)?;
let secret_store = ctx.secret_store().await?;
os_backup.account.save(&secret_store).await?;
secret_store.close().await;
let db = ctx.db().await?;
db.put(&ROOT, &Database::init(&os_backup.account)?).await?;
init(&ctx.config).await?;
@@ -129,7 +126,7 @@ pub async fn recover_full_embassy(
Ok((
disk_guid,
os_backup.account.hostname,
os_backup.account.key.tor_address(),
os_backup.account.tor_key.public().get_onion_address(),
os_backup.account.root_ca_cert,
))
}

View File

@@ -1,14 +1,15 @@
use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::TryStreamExt;
use imbl_value::InternedString;
use rpc_toolkit::{command, from_fn_async, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use sqlx::{Executor, Postgres};
use super::{BackupTarget, BackupTargetId};
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
@@ -16,6 +17,24 @@ use crate::disk::util::{recovery_info, EmbassyOsRecoveryInfo};
use crate::prelude::*;
use crate::util::serde::KeyVal;
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct CifsTargets(pub BTreeMap<u32, Cifs>);
impl CifsTargets {
pub fn new() -> Self {
Self(BTreeMap::new())
}
}
impl Map for CifsTargets {
type Key = u32;
type Value = Cifs;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Self::key_string(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(InternedString::from_display(key))
}
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct CifsBackupTarget {
@@ -69,23 +88,27 @@ pub async fn add(
) -> Result<KeyVal<BackupTargetId, BackupTarget>, Error> {
let cifs = Cifs {
hostname,
path,
path: Path::new("/").join(path),
username,
password,
};
let guard = TmpMountGuard::mount(&cifs, ReadOnly).await?;
let embassy_os = recovery_info(guard.path()).await?;
guard.unmount().await?;
let path_string = Path::new("/").join(&cifs.path).display().to_string();
let id: i32 = sqlx::query!(
"INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id",
cifs.hostname,
path_string,
cifs.username,
cifs.password,
)
.fetch_one(&ctx.secret_store)
.await?.id;
let id = ctx
.db
.mutate(|db| {
let id = db
.as_private()
.as_cifs()
.keys()?
.into_iter()
.max()
.map_or(0, |a| a + 1);
db.as_private_mut().as_cifs_mut().insert(&id, &cifs)?;
Ok(id)
})
.await?;
Ok(KeyVal {
key: BackupTargetId::Cifs { id },
value: BackupTarget::Cifs(CifsBackupTarget {
@@ -129,32 +152,27 @@ pub async fn update(
};
let cifs = Cifs {
hostname,
path,
path: Path::new("/").join(path),
username,
password,
};
let guard = TmpMountGuard::mount(&cifs, ReadOnly).await?;
let embassy_os = recovery_info(guard.path()).await?;
guard.unmount().await?;
let path_string = Path::new("/").join(&cifs.path).display().to_string();
if sqlx::query!(
"UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5",
cifs.hostname,
path_string,
cifs.username,
cifs.password,
id,
)
.execute(&ctx.secret_store)
.await?
.rows_affected()
== 0
{
return Err(Error::new(
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
ErrorKind::NotFound,
));
};
ctx.db
.mutate(|db| {
db.as_private_mut()
.as_cifs_mut()
.as_idx_mut(&id)
.ok_or_else(|| {
Error::new(
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
ErrorKind::NotFound,
)
})?
.ser(&cifs)
})
.await?;
Ok(KeyVal {
key: BackupTargetId::Cifs { id },
value: BackupTarget::Cifs(CifsBackupTarget {
@@ -183,74 +201,46 @@ pub async fn remove(ctx: RpcContext, RemoveParams { id }: RemoveParams) -> Resul
ErrorKind::NotFound,
));
};
if sqlx::query!("DELETE FROM cifs_shares WHERE id = $1", id)
.execute(&ctx.secret_store)
.await?
.rows_affected()
== 0
{
return Err(Error::new(
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
ErrorKind::NotFound,
));
};
ctx.db
.mutate(|db| db.as_private_mut().as_cifs_mut().remove(&id))
.await?;
Ok(())
}
pub async fn load<Ex>(secrets: &mut Ex, id: i32) -> Result<Cifs, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let record = sqlx::query!(
"SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1",
id
)
.fetch_one(secrets)
.await?;
Ok(Cifs {
hostname: record.hostname,
path: PathBuf::from(record.path),
username: record.username,
password: record.password,
})
pub fn load(db: &DatabaseModel, id: u32) -> Result<Cifs, Error> {
db.as_private()
.as_cifs()
.as_idx(&id)
.ok_or_else(|| {
Error::new(
eyre!("Backup Target ID {} Not Found", id),
ErrorKind::NotFound,
)
})?
.de()
}
pub async fn list<Ex>(secrets: &mut Ex) -> Result<Vec<(i32, CifsBackupTarget)>, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let mut records =
sqlx::query!("SELECT id, hostname, path, username, password FROM cifs_shares")
.fetch_many(secrets);
pub async fn list(db: &DatabaseModel) -> Result<Vec<(u32, CifsBackupTarget)>, Error> {
let mut cifs = Vec::new();
while let Some(query_result) = records.try_next().await? {
if let Some(record) = query_result.right() {
let mount_info = Cifs {
hostname: record.hostname,
path: PathBuf::from(record.path),
username: record.username,
password: record.password,
};
let embassy_os = async {
let guard = TmpMountGuard::mount(&mount_info, ReadOnly).await?;
let embassy_os = recovery_info(guard.path()).await?;
guard.unmount().await?;
Ok::<_, Error>(embassy_os)
}
.await;
cifs.push((
record.id,
CifsBackupTarget {
hostname: mount_info.hostname,
path: mount_info.path,
username: mount_info.username,
mountable: embassy_os.is_ok(),
embassy_os: embassy_os.ok().and_then(|a| a),
},
));
for (id, model) in db.as_private().as_cifs().as_entries()? {
let mount_info = model.de()?;
let embassy_os = async {
let guard = TmpMountGuard::mount(&mount_info, ReadOnly).await?;
let embassy_os = recovery_info(guard.path()).await?;
guard.unmount().await?;
Ok::<_, Error>(embassy_os)
}
.await;
cifs.push((
id,
CifsBackupTarget {
hostname: mount_info.hostname,
path: mount_info.path,
username: mount_info.username,
mountable: embassy_os.is_ok(),
embassy_os: embassy_os.ok().and_then(|a| a),
},
));
}
Ok(cifs)

View File

@@ -11,12 +11,12 @@ use models::PackageId;
use rpc_toolkit::{command, from_fn_async, AnyContext, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use sqlx::{Executor, Postgres};
use tokio::sync::Mutex;
use tracing::instrument;
use self::cifs::CifsBackupTarget;
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::cifs::Cifs;
@@ -49,18 +49,15 @@ pub enum BackupTarget {
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub enum BackupTargetId {
Disk { logicalname: PathBuf },
Cifs { id: i32 },
Cifs { id: u32 },
}
impl BackupTargetId {
pub async fn load<Ex>(self, secrets: &mut Ex) -> Result<BackupTargetFS, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
pub fn load(self, db: &DatabaseModel) -> Result<BackupTargetFS, Error> {
Ok(match self {
BackupTargetId::Disk { logicalname } => {
BackupTargetFS::Disk(BlockDev::new(logicalname))
}
BackupTargetId::Cifs { id } => BackupTargetFS::Cifs(cifs::load(secrets, id).await?),
BackupTargetId::Cifs { id } => BackupTargetFS::Cifs(cifs::load(db, id)?),
})
}
}
@@ -161,10 +158,10 @@ pub fn target() -> ParentHandler {
// #[command(display(display_serializable))]
pub async fn list(ctx: RpcContext) -> Result<BTreeMap<BackupTargetId, BackupTarget>, Error> {
let mut sql_handle = ctx.secret_store.acquire().await?;
let peek = ctx.db.peek().await;
let (disks_res, cifs) = tokio::try_join!(
crate::disk::util::list(&ctx.os_partitions),
cifs::list(sql_handle.as_mut()),
cifs::list(&peek),
)?;
Ok(disks_res
.into_iter()
@@ -262,13 +259,7 @@ pub async fn info(
}: InfoParams,
) -> Result<BackupInfo, Error> {
let guard = BackupMountGuard::mount(
TmpMountGuard::mount(
&target_id
.load(ctx.secret_store.acquire().await?.as_mut())
.await?,
ReadWrite,
)
.await?,
TmpMountGuard::mount(&target_id.load(&ctx.db.peek().await)?, ReadWrite).await?,
&password,
)
.await?;
@@ -308,14 +299,7 @@ pub async fn mount(
}
let guard = BackupMountGuard::mount(
TmpMountGuard::mount(
&target_id
.clone()
.load(ctx.secret_store.acquire().await?.as_mut())
.await?,
ReadWrite,
)
.await?,
TmpMountGuard::mount(&target_id.clone().load(&ctx.db.peek().await)?, ReadWrite).await?,
&password,
)
.await?;

View File

@@ -3,15 +3,12 @@ use std::net::SocketAddr;
use std::path::{Path, PathBuf};
use clap::Parser;
use patch_db::json_ptr::JsonPointer;
use reqwest::Url;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use crate::account::AccountInfo;
use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::init_postgres;
use crate::prelude::*;
@@ -149,15 +146,12 @@ impl ServerConfig {
.as_deref()
.unwrap_or_else(|| Path::new("/embassy-data"))
}
pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> {
pub async fn db(&self) -> Result<PatchDb, Error> {
let db_path = self.datadir().join("main").join("embassy.db");
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await {
db.put(&<JsonPointer>::default(), &Database::init(account))
.await?;
}
Ok(db)
}
#[instrument(skip_all)]

View File

@@ -11,7 +11,6 @@ use josekit::jwk::Jwk;
use patch_db::PatchDb;
use reqwest::{Client, Proxy};
use rpc_toolkit::Context;
use sqlx::PgPool;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tokio::time::Instant;
use tracing::instrument;
@@ -28,14 +27,11 @@ use crate::init::check_time_is_synchronized;
use crate::lxc::{LxcContainer, LxcManager};
use crate::middleware::auth::HashSessionToken;
use crate::net::net_controller::NetController;
use crate::net::ssl::{root_ca_start_time, SslManager};
use crate::net::utils::find_eth_iface;
use crate::net::wifi::WpaCli;
use crate::notifications::NotificationManager;
use crate::prelude::*;
use crate::service::ServiceMap;
use crate::shutdown::Shutdown;
use crate::status::MainStatus;
use crate::system::get_mem_info;
use crate::util::lshw::{lshw, LshwDevice};
@@ -47,14 +43,12 @@ pub struct RpcContextSeed {
pub datadir: PathBuf,
pub disk_guid: Arc<String>,
pub db: PatchDb,
pub secret_store: PgPool,
pub account: RwLock<AccountInfo>,
pub net_controller: Arc<NetController>,
pub services: ServiceMap,
pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
pub shutdown: broadcast::Sender<Option<Shutdown>>,
pub tor_socks: SocketAddr,
pub notification_manager: NotificationManager,
pub lxc_manager: Arc<LxcManager>,
pub open_authed_websockets: Mutex<BTreeMap<HashSessionToken, Vec<oneshot::Sender<()>>>>,
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
@@ -86,13 +80,14 @@ impl RpcContext {
9050,
)));
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let secret_store = config.secret_store().await?;
tracing::info!("Opened Pg DB");
let account = AccountInfo::load(&secret_store).await?;
let db = config.db(&account).await?;
let db = config.db().await?;
let peek = db.peek().await;
let account = AccountInfo::load(&peek)?;
tracing::info!("Opened PatchDB");
let net_controller = Arc::new(
NetController::init(
db.clone(),
config
.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
@@ -101,16 +96,14 @@ impl RpcContext {
.dns_bind
.as_deref()
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
SslManager::new(&account, root_ca_start_time().await?)?,
&account.hostname,
&account.key,
account.tor_key.clone(),
)
.await?,
);
tracing::info!("Initialized Net Controller");
let services = ServiceMap::default();
let metrics_cache = RwLock::<Option<crate::system::Metrics>>::new(None);
let notification_manager = NotificationManager::new(secret_store.clone());
tracing::info!("Initialized Notification Manager");
let tor_proxy_url = format!("socks5h://{tor_proxy}");
let devices = lshw().await?;
@@ -157,14 +150,12 @@ impl RpcContext {
},
disk_guid,
db,
secret_store,
account: RwLock::new(account),
net_controller,
services,
metrics_cache,
shutdown,
tor_socks: tor_proxy,
notification_manager,
lxc_manager: Arc::new(LxcManager::new()),
open_authed_websockets: Mutex::new(BTreeMap::new()),
rpc_stream_continuations: Mutex::new(BTreeMap::new()),
@@ -208,7 +199,6 @@ impl RpcContext {
#[instrument(skip_all)]
pub async fn shutdown(self) -> Result<(), Error> {
self.services.shutdown_all().await?;
self.secret_store.close().await;
self.is_closed.store(true, Ordering::SeqCst);
tracing::info!("RPC Context is shutdown");
// TODO: shutdown http servers

View File

@@ -3,7 +3,6 @@ use std::path::PathBuf;
use std::sync::Arc;
use josekit::jwk::Jwk;
use patch_db::json_ptr::JsonPointer;
use patch_db::PatchDb;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::Context;
@@ -14,9 +13,7 @@ use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock;
use tracing::instrument;
use crate::account::AccountInfo;
use crate::context::config::ServerConfig;
use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::init_postgres;
use crate::prelude::*;
@@ -81,15 +78,11 @@ impl SetupContext {
})))
}
#[instrument(skip_all)]
pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> {
pub async fn db(&self) -> Result<PatchDb, Error> {
let db_path = self.datadir.join("main").join("embassy.db");
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await {
db.put(&<JsonPointer>::default(), &Database::init(account))
.await?;
}
Ok(db)
}
#[instrument(skip_all)]

View File

@@ -24,7 +24,7 @@ pub async fn start(ctx: RpcContext, ControlParams { id }: ControlParams) -> Resu
.as_ref()
.or_not_found(lazy_format!("Manager for {id}"))?
.start()
.await;
.await?;
Ok(())
}
@@ -37,7 +37,7 @@ pub async fn stop(ctx: RpcContext, ControlParams { id }: ControlParams) -> Resul
.as_ref()
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
.stop()
.await;
.await?;
Ok(())
}
@@ -49,7 +49,7 @@ pub async fn restart(ctx: RpcContext, ControlParams { id }: ControlParams) -> Re
.as_ref()
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
.restart()
.await;
.await?;
Ok(())
}

View File

@@ -13,30 +13,46 @@ use patch_db::json_ptr::JsonPointer;
use patch_db::{HasModel, Value};
use reqwest::Url;
use serde::{Deserialize, Serialize};
use ssh_key::public::Ed25519PublicKey;
use torut::onion::OnionAddressV3;
use crate::account::AccountInfo;
use crate::auth::Sessions;
use crate::backup::target::cifs::CifsTargets;
use crate::net::forward::AvailablePorts;
use crate::net::host::HostInfo;
use crate::net::keys::KeyStore;
use crate::net::utils::{get_iface_ipv4_addr, get_iface_ipv6_addr};
use crate::notifications::Notifications;
use crate::prelude::*;
use crate::progress::FullProgress;
use crate::s9pk::manifest::Manifest;
use crate::ssh::SshKeys;
use crate::status::Status;
use crate::util::cpupower::Governor;
use crate::util::serde::Pem;
use crate::util::Version;
use crate::version::{Current, VersionT};
use crate::{ARCH, PLATFORM};
fn get_arch() -> InternedString {
(*ARCH).into()
}
fn get_platform() -> InternedString {
(&*PLATFORM).into()
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct Database {
pub public: Public,
pub private: (), // TODO
pub private: Private,
}
impl Database {
pub fn init(account: &AccountInfo) -> Self {
pub fn init(account: &AccountInfo) -> Result<Self, Error> {
let lan_address = account.hostname.lan_address().parse().unwrap();
Database {
Ok(Database {
public: Public {
server_info: ServerInfo {
arch: get_arch(),
@@ -48,9 +64,13 @@ impl Database {
last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(),
lan_address,
tor_address: format!("https://{}", account.key.tor_address())
.parse()
.unwrap(),
onion_address: account.tor_key.public().get_onion_address(),
tor_address: format!(
"https://{}",
account.tor_key.public().get_onion_address()
)
.parse()
.unwrap(),
ip_info: BTreeMap::new(),
status_info: ServerStatus {
backup_progress: None,
@@ -70,11 +90,9 @@ impl Database {
clearnet: Vec::new(),
},
password_hash: account.password.clone(),
pubkey: ssh_key::PublicKey::from(Ed25519PublicKey::from(
&account.key.ssh_key(),
))
.to_openssh()
.unwrap(),
pubkey: ssh_key::PublicKey::from(&account.ssh_key)
.to_openssh()
.unwrap(),
ca_fingerprint: account
.root_ca_cert
.digest(MessageDigest::sha256())
@@ -93,11 +111,22 @@ impl Database {
)))
.unwrap(),
},
private: (), // TODO
}
private: Private {
key_store: KeyStore::new(account)?,
password: account.password.clone(),
ssh_privkey: Pem(account.ssh_key.clone()),
ssh_pubkeys: SshKeys::new(),
available_ports: AvailablePorts::new(),
sessions: Sessions::new(),
notifications: Notifications::new(),
cifs: CifsTargets::new(),
}, // TODO
})
}
}
pub type DatabaseModel = Model<Database>;
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
@@ -108,14 +137,18 @@ pub struct Public {
pub ui: Value,
}
pub type DatabaseModel = Model<Database>;
fn get_arch() -> InternedString {
(*ARCH).into()
}
fn get_platform() -> InternedString {
(&*PLATFORM).into()
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct Private {
pub key_store: KeyStore,
pub password: String, // argon2 hash
pub ssh_privkey: Pem<ssh_key::PrivateKey>,
pub ssh_pubkeys: SshKeys,
pub available_ports: AvailablePorts,
pub sessions: Sessions,
pub notifications: Notifications,
pub cifs: CifsTargets,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
@@ -134,6 +167,8 @@ pub struct ServerInfo {
pub last_wifi_region: Option<CountryCode>,
pub eos_version_compat: VersionRange,
pub lan_address: Url,
pub onion_address: OnionAddressV3,
/// for backwards compatibility
pub tor_address: Url,
pub ip_info: BTreeMap<String, IpInfo>,
#[serde(default)]
@@ -229,6 +264,12 @@ pub struct AllPackageData(pub BTreeMap<PackageId, PackageDataEntry>);
impl Map for AllPackageData {
type Key = PackageId;
type Value = PackageDataEntry;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(key.clone().into())
}
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
@@ -471,6 +512,7 @@ pub struct InstalledPackageInfo {
pub current_dependents: CurrentDependents,
pub current_dependencies: CurrentDependencies,
pub interface_addresses: InterfaceAddressMap,
pub hosts: HostInfo,
pub store: Value,
pub store_exposed_ui: Vec<ExposedUI>,
pub store_exposed_dependents: Vec<JsonPointer>,
@@ -512,6 +554,12 @@ impl CurrentDependents {
impl Map for CurrentDependents {
type Key = PackageId;
type Value = CurrentDependencyInfo;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(key.clone().into())
}
}
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
pub struct CurrentDependencies(pub BTreeMap<PackageId, CurrentDependencyInfo>);
@@ -529,6 +577,12 @@ impl CurrentDependencies {
impl Map for CurrentDependencies {
type Key = PackageId;
type Value = CurrentDependencyInfo;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(key.clone().into())
}
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
@@ -552,6 +606,12 @@ pub struct InterfaceAddressMap(pub BTreeMap<HostId, InterfaceAddresses>);
impl Map for InterfaceAddressMap {
type Key = HostId;
type Value = InterfaceAddresses;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(key.clone().into())
}
}
#[derive(Debug, Deserialize, Serialize, HasModel)]

View File

@@ -1,13 +1,15 @@
use std::collections::BTreeMap;
use std::marker::PhantomData;
use std::panic::UnwindSafe;
use std::str::FromStr;
use chrono::{DateTime, Utc};
pub use imbl_value::Value;
use patch_db::json_ptr::ROOT;
use patch_db::value::InternedString;
pub use patch_db::{HasModel, PatchDb};
use serde::de::DeserializeOwned;
use serde::Serialize;
use serde::{Deserialize, Serialize};
use crate::db::model::DatabaseModel;
use crate::prelude::*;
@@ -92,12 +94,37 @@ impl<T: Serialize> Model<T> {
}
}
impl<T> Serialize for Model<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
self.value.serialize(serializer)
}
}
impl<'de, T: Serialize + Deserialize<'de>> Deserialize<'de> for Model<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::de::Error;
Self::new(&T::deserialize(deserializer)?).map_err(D::Error::custom)
}
}
impl<T: Serialize + DeserializeOwned> Model<T> {
pub fn replace(&mut self, value: &T) -> Result<T, Error> {
let orig = self.de()?;
self.ser(value)?;
Ok(orig)
}
pub fn mutate<U>(&mut self, f: impl FnOnce(&mut T) -> Result<U, Error>) -> Result<U, Error> {
let mut orig = self.de()?;
let res = f(&mut orig)?;
self.ser(&orig)?;
Ok(res)
}
}
impl<T> Clone for Model<T> {
fn clone(&self) -> Self {
@@ -181,20 +208,38 @@ impl<T> Model<Option<T>> {
pub trait Map: DeserializeOwned + Serialize {
type Key;
type Value;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error>;
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(InternedString::intern(Self::key_str(key)?.as_ref()))
}
}
impl<A, B> Map for BTreeMap<A, B>
where
A: serde::Serialize + serde::de::DeserializeOwned + Ord + AsRef<str>,
B: serde::Serialize + serde::de::DeserializeOwned,
{
type Key = A;
type Value = B;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key.as_ref())
}
}
impl<A, B> Map for BTreeMap<JsonKey<A>, B>
where
A: serde::Serialize + serde::de::DeserializeOwned + Ord,
B: serde::Serialize + serde::de::DeserializeOwned,
{
type Key = A;
type Value = B;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
serde_json::to_string(key).with_kind(ErrorKind::Serialization)
}
}
impl<T: Map> Model<T>
where
T::Key: AsRef<str>,
T::Value: Serialize,
{
pub fn insert(&mut self, key: &T::Key, value: &T::Value) -> Result<(), Error> {
@@ -202,7 +247,7 @@ where
let v = patch_db::value::to_value(value)?;
match &mut self.value {
Value::Object(o) => {
o.insert(InternedString::intern(key.as_ref()), v);
o.insert(T::key_string(key)?, v);
Ok(())
}
v => Err(patch_db::value::Error {
@@ -212,13 +257,40 @@ where
.into()),
}
}
pub fn upsert<F, D>(&mut self, key: &T::Key, value: F) -> Result<&mut Model<T::Value>, Error>
where
F: FnOnce() -> D,
D: AsRef<T::Value>,
{
use serde::ser::Error;
match &mut self.value {
Value::Object(o) => {
use patch_db::ModelExt;
let s = T::key_str(key)?;
let exists = o.contains_key(s.as_ref());
let res = self.transmute_mut(|v| {
use patch_db::value::index::Index;
s.as_ref().index_or_insert(v)
});
if !exists {
res.ser(value().as_ref())?;
}
Ok(res)
}
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
kind: patch_db::value::ErrorKind::Serialization,
}
.into()),
}
}
pub fn insert_model(&mut self, key: &T::Key, value: Model<T::Value>) -> Result<(), Error> {
use patch_db::ModelExt;
use serde::ser::Error;
let v = value.into_value();
match &mut self.value {
Value::Object(o) => {
o.insert(InternedString::intern(key.as_ref()), v);
o.insert(T::key_string(key)?, v);
Ok(())
}
v => Err(patch_db::value::Error {
@@ -232,25 +304,16 @@ where
impl<T: Map> Model<T>
where
T::Key: DeserializeOwned + Ord + Clone,
T::Key: FromStr + Ord + Clone,
Error: From<<T::Key as FromStr>::Err>,
{
pub fn keys(&self) -> Result<Vec<T::Key>, Error> {
use serde::de::Error;
use serde::Deserialize;
match &self.value {
Value::Object(o) => o
.keys()
.cloned()
.map(|k| {
T::Key::deserialize(patch_db::value::de::InternedStringDeserializer::from(k))
.map_err(|e| {
patch_db::value::Error {
kind: patch_db::value::ErrorKind::Deserialization,
source: e,
}
.into()
})
})
.map(|k| Ok(T::Key::from_str(&*k)?))
.collect(),
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
@@ -263,19 +326,10 @@ where
pub fn into_entries(self) -> Result<Vec<(T::Key, Model<T::Value>)>, Error> {
use patch_db::ModelExt;
use serde::de::Error;
use serde::Deserialize;
match self.value {
Value::Object(o) => o
.into_iter()
.map(|(k, v)| {
Ok((
T::Key::deserialize(patch_db::value::de::InternedStringDeserializer::from(
k,
))
.with_kind(ErrorKind::Deserialization)?,
Model::from_value(v),
))
})
.map(|(k, v)| Ok((T::Key::from_str(&*k)?, Model::from_value(v))))
.collect(),
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
@@ -287,19 +341,10 @@ where
pub fn as_entries(&self) -> Result<Vec<(T::Key, &Model<T::Value>)>, Error> {
use patch_db::ModelExt;
use serde::de::Error;
use serde::Deserialize;
match &self.value {
Value::Object(o) => o
.iter()
.map(|(k, v)| {
Ok((
T::Key::deserialize(patch_db::value::de::InternedStringDeserializer::from(
k.clone(),
))
.with_kind(ErrorKind::Deserialization)?,
Model::value_as(v),
))
})
.map(|(k, v)| Ok((T::Key::from_str(&**k)?, Model::value_as(v))))
.collect(),
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
@@ -311,19 +356,10 @@ where
pub fn as_entries_mut(&mut self) -> Result<Vec<(T::Key, &mut Model<T::Value>)>, Error> {
use patch_db::ModelExt;
use serde::de::Error;
use serde::Deserialize;
match &mut self.value {
Value::Object(o) => o
.iter_mut()
.map(|(k, v)| {
Ok((
T::Key::deserialize(patch_db::value::de::InternedStringDeserializer::from(
k.clone(),
))
.with_kind(ErrorKind::Deserialization)?,
Model::value_as_mut(v),
))
})
.map(|(k, v)| Ok((T::Key::from_str(&**k)?, Model::value_as_mut(v))))
.collect(),
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
@@ -333,36 +369,36 @@ where
}
}
}
impl<T: Map> Model<T>
where
T::Key: AsRef<str>,
{
impl<T: Map> Model<T> {
pub fn into_idx(self, key: &T::Key) -> Option<Model<T::Value>> {
use patch_db::ModelExt;
let s = T::key_str(key).ok()?;
match &self.value {
Value::Object(o) if o.contains_key(key.as_ref()) => Some(self.transmute(|v| {
Value::Object(o) if o.contains_key(s.as_ref()) => Some(self.transmute(|v| {
use patch_db::value::index::Index;
key.as_ref().index_into_owned(v).unwrap()
s.as_ref().index_into_owned(v).unwrap()
})),
_ => None,
}
}
pub fn as_idx<'a>(&'a self, key: &T::Key) -> Option<&'a Model<T::Value>> {
use patch_db::ModelExt;
let s = T::key_str(key).ok()?;
match &self.value {
Value::Object(o) if o.contains_key(key.as_ref()) => Some(self.transmute_ref(|v| {
Value::Object(o) if o.contains_key(s.as_ref()) => Some(self.transmute_ref(|v| {
use patch_db::value::index::Index;
key.as_ref().index_into(v).unwrap()
s.as_ref().index_into(v).unwrap()
})),
_ => None,
}
}
pub fn as_idx_mut<'a>(&'a mut self, key: &T::Key) -> Option<&'a mut Model<T::Value>> {
use patch_db::ModelExt;
let s = T::key_str(key).ok()?;
match &mut self.value {
Value::Object(o) if o.contains_key(key.as_ref()) => Some(self.transmute_mut(|v| {
Value::Object(o) if o.contains_key(s.as_ref()) => Some(self.transmute_mut(|v| {
use patch_db::value::index::Index;
key.as_ref().index_or_insert(v)
s.as_ref().index_or_insert(v)
})),
_ => None,
}
@@ -371,7 +407,7 @@ where
use serde::ser::Error;
match &mut self.value {
Value::Object(o) => {
let v = o.remove(key.as_ref());
let v = o.remove(T::key_str(key)?.as_ref());
Ok(v.map(patch_db::ModelExt::from_value))
}
v => Err(patch_db::value::Error {
@@ -382,3 +418,90 @@ where
}
}
}
#[repr(transparent)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct JsonKey<T>(pub T);
impl<T> From<T> for JsonKey<T> {
fn from(value: T) -> Self {
Self::new(value)
}
}
impl<T> JsonKey<T> {
pub fn new(value: T) -> Self {
Self(value)
}
pub fn unwrap(self) -> T {
self.0
}
pub fn new_ref(value: &T) -> &Self {
unsafe { std::mem::transmute(value) }
}
pub fn new_mut(value: &mut T) -> &mut Self {
unsafe { std::mem::transmute(value) }
}
}
impl<T> std::ops::Deref for JsonKey<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<T> std::ops::DerefMut for JsonKey<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
impl<T: Serialize> Serialize for JsonKey<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
use serde::ser::Error;
serde_json::to_string(&self.0)
.map_err(S::Error::custom)?
.serialize(serializer)
}
}
// { "foo": "bar" } -> "{ \"foo\": \"bar\" }"
impl<'de, T: Serialize + DeserializeOwned> Deserialize<'de> for JsonKey<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::de::Error;
let string = String::deserialize(deserializer)?;
Ok(Self(
serde_json::from_str(&string).map_err(D::Error::custom)?,
))
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct WithTimeData<T> {
pub created_at: DateTime<Utc>,
pub updated_at: DateTime<Utc>,
pub value: T,
}
impl<T> WithTimeData<T> {
pub fn new(value: T) -> Self {
let now = Utc::now();
Self {
created_at: now,
updated_at: now,
value,
}
}
}
impl<T> std::ops::Deref for WithTimeData<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
&self.value
}
}
impl<T> std::ops::DerefMut for WithTimeData<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.updated_at = Utc::now();
&mut self.value
}
}

View File

@@ -9,13 +9,11 @@ use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::config::{Config, ConfigSpec, ConfigureContext};
use crate::context::{CliContext, RpcContext};
use crate::context::RpcContext;
use crate::db::model::{CurrentDependencies, Database};
use crate::prelude::*;
use crate::s9pk::manifest::Manifest;
use crate::status::DependencyConfigErrors;
use crate::util::serde::HandlerExtSerde;
use crate::util::Version;
use crate::Error;
pub fn dependency() -> ParentHandler {
@@ -28,6 +26,12 @@ pub struct Dependencies(pub BTreeMap<PackageId, DepInfo>);
impl Map for Dependencies {
type Key = PackageId;
type Value = DepInfo;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<imbl_value::InternedString, Error> {
Ok(key.clone().into())
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]

View File

@@ -2,7 +2,6 @@ use std::collections::BTreeSet;
use std::path::Path;
use async_compression::tokio::bufread::GzipDecoder;
use clap::Parser;
use serde::{Deserialize, Serialize};
use tokio::fs::File;
use tokio::io::BufReader;

View File

@@ -6,7 +6,6 @@ use std::time::{Duration, SystemTime};
use color_eyre::eyre::eyre;
use models::ResultExt;
use rand::random;
use sqlx::{Pool, Postgres};
use tokio::process::Command;
use tracing::instrument;
@@ -179,7 +178,6 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
}
pub struct InitResult {
pub secret_store: Pool<Postgres>,
pub db: patch_db::PatchDb,
}
@@ -208,16 +206,19 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
.await?;
}
let secret_store = cfg.secret_store().await?;
tracing::info!("Opened Postgres");
let db = cfg.db().await?;
let peek = db.peek().await;
tracing::info!("Opened PatchDB");
crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?;
crate::ssh::sync_keys(
&peek.as_private().as_ssh_pubkeys().de()?,
"/home/start9/.ssh/authorized_keys",
)
.await?;
tracing::info!("Synced SSH Keys");
let account = AccountInfo::load(&secret_store).await?;
let db = cfg.db(&account).await?;
tracing::info!("Opened PatchDB");
let peek = db.peek().await;
let account = AccountInfo::load(&peek)?;
let mut server_info = peek.as_public().as_server_info().de()?;
// write to ca cert store
@@ -348,7 +349,7 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
})
.await?;
crate::version::init(&db, &secret_store).await?;
crate::version::init(&db).await?;
db.mutate(|d| {
let model = d.de()?;
@@ -366,5 +367,5 @@ pub async fn init(cfg: &ServerConfig) -> Result<InitResult, Error> {
tracing::info!("System initialized.");
Ok(InitResult { secret_store, db })
Ok(InitResult { db })
}

View File

@@ -1,4 +1,3 @@
use std::io::SeekFrom;
use std::path::PathBuf;
use std::time::Duration;
@@ -14,7 +13,6 @@ use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::CallRemote;
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use tokio::io::{AsyncReadExt, AsyncSeekExt};
use tokio::sync::oneshot;
use tracing::instrument;
@@ -28,8 +26,6 @@ use crate::prelude::*;
use crate::progress::{FullProgress, PhasedProgressBar};
use crate::s9pk::manifest::PackageId;
use crate::s9pk::merkle_archive::source::http::HttpSource;
use crate::s9pk::v1::reader::S9pkReader;
use crate::s9pk::v2::compat::{self, MAGIC_AND_VERSION};
use crate::s9pk::S9pk;
use crate::upload::upload;
use crate::util::clap::FromStrParser;

View File

@@ -7,7 +7,7 @@ use chrono::{DateTime, Utc};
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::stream::BoxStream;
use futures::{FutureExt, SinkExt, Stream, StreamExt, TryStreamExt};
use futures::{FutureExt, Stream, StreamExt, TryStreamExt};
use models::PackageId;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, from_fn_async, CallRemote, Empty, HandlerExt, ParentHandler};

View File

@@ -1,4 +1,5 @@
use std::collections::BTreeSet;
use std::net::Ipv4Addr;
use std::ops::Deref;
use std::path::Path;
use std::sync::{Arc, Weak};
@@ -109,6 +110,7 @@ impl LxcManager {
pub struct LxcContainer {
manager: Weak<LxcManager>,
rootfs: OverlayGuard,
ip: Ipv4Addr,
guid: Arc<InternedString>,
rpc_bind: TmpMountGuard,
config: LxcConfig,
@@ -169,9 +171,20 @@ impl LxcContainer {
.arg(&*guid)
.invoke(ErrorKind::Lxc)
.await?;
let ip = String::from_utf8(
Command::new("lxc-info")
.arg("--name")
.arg(&*guid)
.arg("-iH")
.invoke(ErrorKind::Docker)
.await?,
)?
.trim()
.parse()?;
Ok(Self {
manager: Arc::downgrade(manager),
rootfs,
ip,
guid: Arc::new(guid),
rpc_bind,
config,
@@ -183,6 +196,10 @@ impl LxcContainer {
self.rootfs.path()
}
pub fn ip(&self) -> Ipv4Addr {
self.ip
}
pub fn rpc_dir(&self) -> &Path {
self.rpc_bind.path()
}

View File

@@ -1,14 +1,17 @@
use std::borrow::Borrow;
use std::collections::BTreeSet;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
use axum::extract::Request;
use axum::response::Response;
use basic_cookies::Cookie;
use chrono::Utc;
use color_eyre::eyre::eyre;
use digest::Digest;
use helpers::const_true;
use http::header::COOKIE;
use http::header::{COOKIE, USER_AGENT};
use http::HeaderValue;
use imbl_value::InternedString;
use rpc_toolkit::yajrc::INTERNAL_ERROR;
@@ -38,24 +41,36 @@ pub struct HasLoggedOutSessions(());
impl HasLoggedOutSessions {
pub async fn new(
logged_out_sessions: impl IntoIterator<Item = impl AsLogoutSessionId>,
sessions: impl IntoIterator<Item = impl AsLogoutSessionId>,
ctx: &RpcContext,
) -> Result<Self, Error> {
let mut open_authed_websockets = ctx.open_authed_websockets.lock().await;
let mut sqlx_conn = ctx.secret_store.acquire().await?;
for session in logged_out_sessions {
let session = session.as_logout_session_id();
let session = &*session;
sqlx::query!(
"UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1",
session
)
.execute(sqlx_conn.as_mut())
let to_log_out: BTreeSet<_> = sessions
.into_iter()
.map(|s| s.as_logout_session_id())
.collect();
ctx.open_authed_websockets
.lock()
.await
.retain(|session, sockets| {
if to_log_out.contains(session.hashed()) {
for socket in std::mem::take(sockets) {
let _ = socket.send(());
}
false
} else {
true
}
});
ctx.db
.mutate(|db| {
let sessions = db.as_private_mut().as_sessions_mut();
for sid in &to_log_out {
sessions.remove(sid)?;
}
Ok(())
})
.await?;
for socket in open_authed_websockets.remove(session).unwrap_or_default() {
let _ = socket.send(());
}
}
Ok(HasLoggedOutSessions(()))
}
}
@@ -105,15 +120,20 @@ impl HasValidSession {
ctx: &RpcContext,
) -> Result<Self, Error> {
let session_hash = session_token.hashed();
let session = sqlx::query!("UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP", session_hash)
.execute(ctx.secret_store.acquire().await?.as_mut())
ctx.db
.mutate(|db| {
db.as_private_mut()
.as_sessions_mut()
.as_idx_mut(session_hash)
.ok_or_else(|| {
Error::new(eyre!("UNAUTHORIZED"), crate::ErrorKind::Authorization)
})?
.mutate(|s| {
s.last_active = Utc::now();
Ok(())
})
})
.await?;
if session.rows_affected() == 0 {
return Err(Error::new(
eyre!("UNAUTHORIZED"),
crate::ErrorKind::Authorization,
));
}
Ok(Self(SessionType::Session(session_token)))
}
@@ -181,8 +201,8 @@ impl HashSessionToken {
}
}
pub fn hashed(&self) -> &str {
&*self.hashed
pub fn hashed(&self) -> &InternedString {
&self.hashed
}
fn hash(token: &str) -> InternedString {
@@ -241,6 +261,7 @@ pub struct Auth {
cookie: Option<HeaderValue>,
is_login: bool,
set_cookie: Option<HeaderValue>,
user_agent: Option<HeaderValue>,
}
impl Auth {
pub fn new() -> Self {
@@ -249,6 +270,7 @@ impl Auth {
cookie: None,
is_login: false,
set_cookie: None,
user_agent: None,
}
}
}
@@ -260,7 +282,8 @@ impl Middleware<RpcContext> for Auth {
_: &RpcContext,
request: &mut Request,
) -> Result<(), Response> {
self.cookie = request.headers_mut().get(COOKIE).cloned();
self.cookie = request.headers_mut().remove(COOKIE);
self.user_agent = request.headers_mut().remove(USER_AGENT);
Ok(())
}
async fn process_rpc_request(
@@ -282,6 +305,10 @@ impl Middleware<RpcContext> for Auth {
.into()),
});
}
if let Some(user_agent) = self.user_agent.as_ref().and_then(|h| h.to_str().ok()) {
request.params["user-agent"] = Value::String(Arc::new(user_agent.to_owned()))
// TODO: will this panic?
}
} else if metadata.authenticated {
match HasValidSession::from_header(self.cookie.as_ref(), &context).await {
Err(e) => {
@@ -291,7 +318,8 @@ impl Middleware<RpcContext> for Auth {
})
}
Ok(HasValidSession(SessionType::Session(s))) if metadata.get_session => {
request.params["session"] = Value::String(Arc::new(s.hashed().into()));
request.params["session"] =
Value::String(Arc::new(s.hashed().deref().to_owned()));
// TODO: will this panic?
}
_ => (),

View File

@@ -18,6 +18,7 @@ use trust_dns_server::proto::rr::{Name, Record, RecordType};
use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use trust_dns_server::ServerFuture;
use crate::net::forward::START9_BRIDGE_IFACE;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
@@ -163,13 +164,13 @@ impl DnsController {
Command::new("resolvectl")
.arg("dns")
.arg("lxcbr0")
.arg(START9_BRIDGE_IFACE)
.arg("127.0.0.1")
.invoke(ErrorKind::Network)
.await?;
Command::new("resolvectl")
.arg("domain")
.arg("lxcbr0")
.arg(START9_BRIDGE_IFACE)
.arg("embassy")
.invoke(ErrorKind::Network)
.await?;

View File

@@ -0,0 +1,177 @@
use std::collections::BTreeMap;
use std::net::SocketAddr;
use std::sync::{Arc, Weak};
use id_pool::IdPool;
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use tokio::sync::Mutex;
use crate::prelude::*;
use crate::util::Invoke;
pub const START9_BRIDGE_IFACE: &str = "lxcbr0";
pub const FIRST_DYNAMIC_PRIVATE_PORT: u16 = 49152;
#[derive(Debug, Deserialize, Serialize)]
pub struct AvailablePorts(IdPool);
impl AvailablePorts {
pub fn new() -> Self {
Self(IdPool::new_ranged(FIRST_DYNAMIC_PRIVATE_PORT..u16::MAX))
}
pub fn alloc(&mut self) -> Result<u16, Error> {
self.0.request_id().ok_or_else(|| {
Error::new(
eyre!("No more dynamic ports available!"),
ErrorKind::Network,
)
})
}
pub fn free(&mut self, ports: impl IntoIterator<Item = u16>) {
for port in ports {
self.0.return_id(port).unwrap_or_default();
}
}
}
pub struct LanPortForwardController {
forwards: Mutex<BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
}
impl LanPortForwardController {
pub fn new() -> Self {
Self {
forwards: Mutex::new(BTreeMap::new()),
}
}
pub async fn add(&self, port: u16, addr: SocketAddr) -> Result<Arc<()>, Error> {
let mut writable = self.forwards.lock().await;
let (prev, mut forward) = if let Some(forward) = writable.remove(&port) {
(
forward.keys().next().cloned(),
forward
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect(),
)
} else {
(None, BTreeMap::new())
};
let rc = Arc::new(());
forward.insert(addr, Arc::downgrade(&rc));
let next = forward.keys().next().cloned();
if !forward.is_empty() {
writable.insert(port, forward);
}
update_forward(port, prev, next).await?;
Ok(rc)
}
pub async fn gc(&self, external: u16) -> Result<(), Error> {
let mut writable = self.forwards.lock().await;
let (prev, forward) = if let Some(forward) = writable.remove(&external) {
(
forward.keys().next().cloned(),
forward
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect(),
)
} else {
(None, BTreeMap::new())
};
let next = forward.keys().next().cloned();
if !forward.is_empty() {
writable.insert(external, forward);
}
update_forward(external, prev, next).await
}
}
async fn update_forward(
external: u16,
prev: Option<SocketAddr>,
next: Option<SocketAddr>,
) -> Result<(), Error> {
if prev != next {
if let Some(prev) = prev {
unforward(START9_BRIDGE_IFACE, external, prev).await?;
}
if let Some(next) = next {
forward(START9_BRIDGE_IFACE, external, next).await?;
}
}
Ok(())
}
// iptables -I FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT
// iptables -t nat -I PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333
async fn forward(iface: &str, external: u16, addr: SocketAddr) -> Result<(), Error> {
Command::new("iptables")
.arg("-I")
.arg("FORWARD")
.arg("-o")
.arg(iface)
.arg("-p")
.arg("tcp")
.arg("-d")
.arg(addr.ip().to_string())
.arg("--dport")
.arg(addr.port().to_string())
.arg("-j")
.arg("ACCEPT")
.invoke(crate::ErrorKind::Network)
.await?;
Command::new("iptables")
.arg("-t")
.arg("nat")
.arg("-I")
.arg("PREROUTING")
.arg("-p")
.arg("tcp")
.arg("--dport")
.arg(external.to_string())
.arg("-j")
.arg("DNAT")
.arg("--to")
.arg(addr.to_string())
.invoke(crate::ErrorKind::Network)
.await?;
Ok(())
}
// iptables -D FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT
// iptables -t nat -D PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333
async fn unforward(iface: &str, external: u16, addr: SocketAddr) -> Result<(), Error> {
Command::new("iptables")
.arg("-D")
.arg("FORWARD")
.arg("-o")
.arg(iface)
.arg("-p")
.arg("tcp")
.arg("-d")
.arg(addr.ip().to_string())
.arg("--dport")
.arg(addr.port().to_string())
.arg("-j")
.arg("ACCEPT")
.invoke(crate::ErrorKind::Network)
.await?;
Command::new("iptables")
.arg("-t")
.arg("nat")
.arg("-D")
.arg("PREROUTING")
.arg("-p")
.arg("tcp")
.arg("--dport")
.arg(external.to_string())
.arg("-j")
.arg("DNAT")
.arg("--to")
.arg(addr.to_string())
.invoke(crate::ErrorKind::Network)
.await?;
Ok(())
}

View File

@@ -0,0 +1,9 @@
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
#[derive(Debug, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "camelCase")]
#[serde(tag = "kind")]
pub enum HostAddress {
Onion { address: OnionAddressV3 },
}

View File

@@ -0,0 +1,71 @@
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use crate::net::forward::AvailablePorts;
use crate::net::vhost::AlpnInfo;
use crate::prelude::*;
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct BindInfo {
pub options: BindOptions,
pub assigned_lan_port: Option<u16>,
}
impl BindInfo {
pub fn new(available_ports: &mut AvailablePorts, options: BindOptions) -> Result<Self, Error> {
let mut assigned_lan_port = None;
if options.add_ssl.is_some() || options.secure {
assigned_lan_port = Some(available_ports.alloc()?);
}
Ok(Self {
options,
assigned_lan_port,
})
}
pub fn update(
self,
available_ports: &mut AvailablePorts,
options: BindOptions,
) -> Result<Self, Error> {
let Self {
mut assigned_lan_port,
..
} = self;
if options.add_ssl.is_some() || options.secure {
assigned_lan_port = if let Some(port) = assigned_lan_port.take() {
Some(port)
} else {
Some(available_ports.alloc()?)
};
} else {
if let Some(port) = assigned_lan_port.take() {
available_ports.free([port]);
}
}
Ok(Self {
options,
assigned_lan_port,
})
}
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct BindOptions {
pub scheme: InternedString,
pub preferred_external_port: u16,
pub add_ssl: Option<AddSslOptions>,
pub secure: bool,
pub ssl: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]
#[serde(rename_all = "camelCase")]
pub struct AddSslOptions {
pub scheme: InternedString,
pub preferred_external_port: u16,
// #[serde(default)]
// pub add_x_forwarded_headers: bool, // TODO
#[serde(default)]
pub alpn: AlpnInfo,
}

View File

@@ -1,29 +1,84 @@
use std::collections::{BTreeMap, BTreeSet};
use imbl_value::InternedString;
use models::HostId;
use serde::{Deserialize, Serialize};
use crate::net::host::multi::MultiHost;
use crate::net::forward::AvailablePorts;
use crate::net::host::address::HostAddress;
use crate::net::host::binding::{BindInfo, BindOptions};
use crate::prelude::*;
pub mod multi;
pub mod address;
pub mod binding;
pub enum Host {
Multi(MultiHost),
// Single(SingleHost),
// Static(StaticHost),
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
pub struct Host {
pub kind: HostKind,
pub bindings: BTreeMap<u16, BindInfo>,
pub addresses: BTreeSet<HostAddress>,
pub primary: Option<HostAddress>,
}
impl AsRef<Host> for Host {
fn as_ref(&self) -> &Host {
self
}
}
impl Host {
pub fn new(kind: HostKind) -> Self {
Self {
kind,
bindings: BTreeMap::new(),
addresses: BTreeSet::new(),
primary: None,
}
}
}
#[derive(Deserialize, Serialize)]
pub struct BindOptions {
scheme: InternedString,
preferred_external_port: u16,
add_ssl: Option<AddSslOptions>,
secure: bool,
ssl: bool,
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub enum HostKind {
Multi,
// Single,
// Static,
}
#[derive(Deserialize, Serialize)]
pub struct AddSslOptions {
scheme: InternedString,
preferred_external_port: u16,
#[serde(default)]
add_x_forwarded_headers: bool,
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]
pub struct HostInfo(BTreeMap<HostId, Host>);
impl Map for HostInfo {
type Key = HostId;
type Value = Host;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(key.clone().into())
}
}
impl Model<HostInfo> {
pub fn add_binding(
&mut self,
available_ports: &mut AvailablePorts,
kind: HostKind,
id: &HostId,
internal_port: u16,
options: BindOptions,
) -> Result<(), Error> {
self.upsert(id, || Host::new(kind))?
.as_bindings_mut()
.mutate(|b| {
let info = if let Some(info) = b.remove(&internal_port) {
info.update(available_ports, options)?
} else {
BindInfo::new(available_ports, options)?
};
b.insert(internal_port, info);
Ok(())
}) // TODO: handle host kind change
}
}

View File

@@ -1,13 +0,0 @@
use std::collections::BTreeMap;
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use crate::net::host::BindOptions;
use crate::net::keys::Key;
pub struct MultiHost {
id: InternedString,
key: Key,
binds: BTreeMap<u16, BindOptions>,
}

View File

@@ -1,393 +1,24 @@
use clap::Parser;
use color_eyre::eyre::eyre;
use models::{HostId, Id, PackageId};
use openssl::pkey::{PKey, Private};
use openssl::sha::Sha256;
use openssl::x509::X509;
use p256::elliptic_curve::pkcs8::EncodePrivateKey;
use serde::{Deserialize, Serialize};
use sqlx::{Acquire, PgExecutor};
use ssh_key::private::Ed25519PrivateKey;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use zeroize::Zeroize;
use crate::config::ConfigureContext;
use crate::context::RpcContext;
use crate::control::{restart, ControlParams};
use crate::disk::fsck::RequiresReboot;
use crate::net::ssl::CertPair;
use crate::account::AccountInfo;
use crate::net::ssl::CertStore;
use crate::net::tor::OnionStore;
use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
// TODO: delete once we may change tor addresses
async fn compat(
secrets: impl PgExecutor<'_>,
host: &Option<(PackageId, HostId)>,
) -> Result<Option<[u8; 64]>, Error> {
if let Some((package, host)) = host {
if let Some(r) = sqlx::query!(
"SELECT key FROM tor WHERE package = $1 AND interface = $2",
package,
host
)
.fetch_optional(secrets)
.await?
{
Ok(Some(<[u8; 64]>::try_from(r.key).map_err(|e| {
Error::new(
eyre!("expected vec of len 64, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?))
} else {
Ok(None)
}
} else if let Some(key) = sqlx::query!("SELECT tor_key FROM account WHERE id = 0")
.fetch_one(secrets)
.await?
.tor_key
{
Ok(Some(<[u8; 64]>::try_from(key).map_err(|e| {
Error::new(
eyre!("expected vec of len 64, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?))
} else {
Ok(None)
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]
pub struct KeyStore {
pub onion: OnionStore,
pub local_certs: CertStore,
// pub letsencrypt_certs: BTreeMap<BTreeSet<InternedString>, CertData>
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Key {
host: Option<(PackageId, HostId)>,
base: [u8; 32],
tor_key: [u8; 64], // Does NOT necessarily match base
}
impl Key {
pub fn host(&self) -> Option<(PackageId, HostId)> {
self.host.clone()
}
pub fn as_bytes(&self) -> [u8; 32] {
self.base
}
pub fn internal_address(&self) -> String {
self.host
.as_ref()
.map(|(pkg_id, _)| format!("{}.embassy", pkg_id))
.unwrap_or_else(|| "embassy".to_owned())
}
pub fn tor_key(&self) -> TorSecretKeyV3 {
self.tor_key.into()
}
pub fn tor_address(&self) -> OnionAddressV3 {
self.tor_key().public().get_onion_address()
}
pub fn base_address(&self) -> String {
self.tor_key()
.public()
.get_onion_address()
.get_address_without_dot_onion()
}
pub fn local_address(&self) -> String {
self.base_address() + ".local"
}
pub fn openssl_key_ed25519(&self) -> PKey<Private> {
PKey::private_key_from_raw_bytes(&self.base, openssl::pkey::Id::ED25519).unwrap()
}
pub fn openssl_key_nistp256(&self) -> PKey<Private> {
let mut buf = self.base;
loop {
if let Ok(k) = p256::SecretKey::from_slice(&buf) {
return PKey::private_key_from_pkcs8(&*k.to_pkcs8_der().unwrap().as_bytes())
.unwrap();
}
let mut sha = Sha256::new();
sha.update(&buf);
buf = sha.finish();
}
}
pub fn ssh_key(&self) -> Ed25519PrivateKey {
Ed25519PrivateKey::from_bytes(&self.base)
}
pub(crate) fn from_pair(
host: Option<(PackageId, HostId)>,
bytes: [u8; 32],
tor_key: [u8; 64],
) -> Self {
Self {
host,
tor_key,
base: bytes,
}
}
pub fn from_bytes(host: Option<(PackageId, HostId)>, bytes: [u8; 32]) -> Self {
Self::from_pair(host, bytes, ed25519_expand_key(&bytes))
}
pub fn new(host: Option<(PackageId, HostId)>) -> Self {
Self::from_bytes(host, rand::random())
}
pub(super) fn with_certs(self, certs: CertPair, int: X509, root: X509) -> KeyInfo {
KeyInfo {
key: self,
certs,
int,
root,
}
}
pub async fn for_package(
secrets: impl PgExecutor<'_>,
package: &PackageId,
) -> Result<Vec<Self>, Error> {
sqlx::query!(
r#"
SELECT
network_keys.package,
network_keys.interface,
network_keys.key,
tor.key AS "tor_key?"
FROM
network_keys
LEFT JOIN
tor
ON
network_keys.package = tor.package
AND
network_keys.interface = tor.interface
WHERE
network_keys.package = $1
"#,
package
)
.fetch_all(secrets)
.await?
.into_iter()
.map(|row| {
let host = Some((package.clone(), HostId::from(Id::try_from(row.interface)?)));
let bytes = row.key.try_into().map_err(|e: Vec<u8>| {
Error::new(
eyre!("Invalid length for network key {} expected 32", e.len()),
crate::ErrorKind::Database,
)
})?;
Ok(match row.tor_key {
Some(tor_key) => Key::from_pair(
host,
bytes,
tor_key.try_into().map_err(|e: Vec<u8>| {
Error::new(
eyre!("Invalid length for tor key {} expected 64", e.len()),
crate::ErrorKind::Database,
)
})?,
),
None => Key::from_bytes(host, bytes),
})
})
.collect()
}
pub async fn for_host<Ex>(
secrets: &mut Ex,
host: Option<(PackageId, HostId)>,
) -> Result<Self, Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let tentative = rand::random::<[u8; 32]>();
let actual = if let Some((pkg, iface)) = &host {
let k = tentative.as_slice();
let actual = sqlx::query!(
"INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key",
pkg,
iface,
k,
)
.fetch_one(&mut *secrets)
.await?.key;
let mut bytes = tentative;
bytes.clone_from_slice(actual.get(0..32).ok_or_else(|| {
Error::new(
eyre!("Invalid key size returned from DB"),
crate::ErrorKind::Database,
)
})?);
bytes
} else {
let actual = sqlx::query!("SELECT network_key FROM account WHERE id = 0")
.fetch_one(&mut *secrets)
.await?
.network_key;
let mut bytes = tentative;
bytes.clone_from_slice(actual.get(0..32).ok_or_else(|| {
Error::new(
eyre!("Invalid key size returned from DB"),
crate::ErrorKind::Database,
)
})?);
bytes
impl KeyStore {
pub fn new(account: &AccountInfo) -> Result<Self, Error> {
let mut res = Self {
onion: OnionStore::new(),
local_certs: CertStore::new(account)?,
};
let mut res = Self::from_bytes(host, actual);
if let Some(tor_key) = compat(secrets, &res.host).await? {
res.tor_key = tor_key;
}
res.onion.insert(account.tor_key.clone());
Ok(res)
}
}
impl Drop for Key {
fn drop(&mut self) {
self.base.zeroize();
self.tor_key.zeroize();
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct KeyInfo {
key: Key,
certs: CertPair,
int: X509,
root: X509,
}
impl KeyInfo {
pub fn key(&self) -> &Key {
&self.key
}
pub fn certs(&self) -> &CertPair {
&self.certs
}
pub fn int_ca(&self) -> &X509 {
&self.int
}
pub fn root_ca(&self) -> &X509 {
&self.root
}
pub fn fullchain_ed25519(&self) -> Vec<&X509> {
vec![&self.certs.ed25519, &self.int, &self.root]
}
pub fn fullchain_nistp256(&self) -> Vec<&X509> {
vec![&self.certs.nistp256, &self.int, &self.root]
}
}
#[test]
pub fn test_keygen() {
let key = Key::new(None);
key.tor_key();
key.openssl_key_nistp256();
}
pub fn display_requires_reboot(_: RotateKeysParams, args: RequiresReboot) {
if args.0 {
println!("Server must be restarted for changes to take effect");
}
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
pub struct RotateKeysParams {
package: Option<PackageId>,
host: Option<HostId>,
}
// #[command(display(display_requires_reboot))]
pub async fn rotate_key(
ctx: RpcContext,
RotateKeysParams { package, host }: RotateKeysParams,
) -> Result<RequiresReboot, Error> {
let mut pgcon = ctx.secret_store.acquire().await?;
let mut tx = pgcon.begin().await?;
if let Some(package) = package {
let Some(host) = host else {
return Err(Error::new(
eyre!("Must specify host"),
ErrorKind::InvalidRequest,
));
};
sqlx::query!(
"DELETE FROM tor WHERE package = $1 AND interface = $2",
&package,
&host,
)
.execute(&mut *tx)
.await?;
sqlx::query!(
"DELETE FROM network_keys WHERE package = $1 AND interface = $2",
&package,
&host,
)
.execute(&mut *tx)
.await?;
let new_key = Key::for_host(&mut *tx, Some((package.clone(), host.clone()))).await?;
let needs_config = ctx
.db
.mutate(|v| {
let installed = v
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_installed_mut()
.or_not_found("installed")?;
let addrs = installed
.as_interface_addresses_mut()
.as_idx_mut(&host)
.or_not_found(&host)?;
if let Some(lan) = addrs.as_lan_address_mut().transpose_mut() {
lan.ser(&new_key.local_address())?;
}
if let Some(lan) = addrs.as_tor_address_mut().transpose_mut() {
lan.ser(&new_key.tor_address().to_string())?;
}
// TODO
// if installed
// .as_manifest()
// .as_config()
// .transpose_ref()
// .is_some()
// {
// installed
// .as_status_mut()
// .as_configured_mut()
// .replace(&false)
// } else {
// Ok(false)
// }
Ok(false)
})
.await?;
tx.commit().await?;
if needs_config {
ctx.services
.get(&package)
.await
.as_ref()
.ok_or_else(|| {
Error::new(
eyre!("There is no manager running for {package}"),
ErrorKind::Unknown,
)
})?
.configure(ConfigureContext::default())
.await?;
} else {
restart(ctx, ControlParams { id: package }).await?;
}
Ok(RequiresReboot(false))
} else {
sqlx::query!("UPDATE account SET tor_key = NULL, network_key = gen_random_bytes(32)")
.execute(&mut *tx)
.await?;
let new_key = Key::for_host(&mut *tx, None).await?;
let url = format!("https://{}", new_key.tor_address()).parse()?;
ctx.db
.mutate(|v| {
v.as_public_mut()
.as_server_info_mut()
.as_tor_address_mut()
.ser(&url)
})
.await?;
tx.commit().await?;
Ok(RequiresReboot(true))
}
}

View File

@@ -1,14 +1,10 @@
use std::collections::BTreeMap;
use std::net::Ipv4Addr;
use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre;
use tokio::process::{Child, Command};
use tokio::sync::Mutex;
use tracing::instrument;
use tokio::process::Command;
use crate::prelude::*;
use crate::util::Invoke;
use crate::{Error, ResultExt};
pub async fn resolve_mdns(hostname: &str) -> Result<Ipv4Addr, Error> {
Ok(String::from_utf8(

View File

@@ -1,9 +1,8 @@
use rpc_toolkit::{from_fn_async, AnyContext, HandlerExt, ParentHandler};
use crate::context::CliContext;
use rpc_toolkit::ParentHandler;
pub mod dhcp;
pub mod dns;
pub mod forward;
pub mod host;
pub mod keys;
pub mod mdns;
@@ -22,13 +21,4 @@ pub fn net() -> ParentHandler {
ParentHandler::new()
.subcommand("tor", tor::tor())
.subcommand("dhcp", dhcp::dhcp())
.subcommand("ssl", ssl::ssl())
.subcommand(
"rotate-key",
from_fn_async(keys::rotate_key)
.with_custom_display_fn::<AnyContext, _>(|handle, result| {
Ok(keys::display_requires_reboot(handle.params, result))
})
.with_remote_cli::<CliContext>(),
)
}

View File

@@ -1,59 +1,72 @@
use std::collections::BTreeMap;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::collections::{BTreeMap, BTreeSet};
use std::net::{Ipv4Addr, SocketAddr};
use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre;
use models::{HostId, PackageId};
use sqlx::PgExecutor;
use imbl::OrdMap;
use lazy_format::lazy_format;
use models::{HostId, OptionExt, PackageId};
use patch_db::PatchDb;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument;
use crate::db::prelude::PatchDbExt;
use crate::error::ErrorCollection;
use crate::hostname::Hostname;
use crate::net::dns::DnsController;
use crate::net::keys::Key;
use crate::net::ssl::{export_cert, export_key, SslManager};
use crate::net::forward::LanPortForwardController;
use crate::net::host::address::HostAddress;
use crate::net::host::binding::{AddSslOptions, BindOptions};
use crate::net::host::{Host, HostKind};
use crate::net::tor::TorController;
use crate::net::vhost::{AlpnInfo, VHostController};
use crate::volume::cert_dir;
use crate::util::serde::MaybeUtf8String;
use crate::{Error, HOST_IP};
pub struct NetController {
db: PatchDb,
pub(super) tor: TorController,
pub(super) vhost: VHostController,
pub(super) dns: DnsController,
pub(super) ssl: Arc<SslManager>,
pub(super) forward: LanPortForwardController,
pub(super) os_bindings: Vec<Arc<()>>,
}
impl NetController {
#[instrument(skip_all)]
pub async fn init(
db: PatchDb,
tor_control: SocketAddr,
tor_socks: SocketAddr,
dns_bind: &[SocketAddr],
ssl: SslManager,
hostname: &Hostname,
os_key: &Key,
os_tor_key: TorSecretKeyV3,
) -> Result<Self, Error> {
let ssl = Arc::new(ssl);
let mut res = Self {
db: db.clone(),
tor: TorController::new(tor_control, tor_socks),
vhost: VHostController::new(ssl.clone()),
vhost: VHostController::new(db),
dns: DnsController::init(dns_bind).await?,
ssl,
forward: LanPortForwardController::new(),
os_bindings: Vec::new(),
};
res.add_os_bindings(hostname, os_key).await?;
res.add_os_bindings(hostname, os_tor_key).await?;
Ok(res)
}
async fn add_os_bindings(&mut self, hostname: &Hostname, key: &Key) -> Result<(), Error> {
let alpn = Err(AlpnInfo::Specified(vec!["http/1.1".into(), "h2".into()]));
async fn add_os_bindings(
&mut self,
hostname: &Hostname,
tor_key: TorSecretKeyV3,
) -> Result<(), Error> {
let alpn = Err(AlpnInfo::Specified(vec![
MaybeUtf8String("http/1.1".into()),
MaybeUtf8String("h2".into()),
]));
// Internal DNS
self.vhost
.add(
key.clone(),
Some("embassy".into()),
443,
([127, 0, 0, 1], 80).into(),
@@ -66,13 +79,7 @@ impl NetController {
// LAN IP
self.os_bindings.push(
self.vhost
.add(
key.clone(),
None,
443,
([127, 0, 0, 1], 80).into(),
alpn.clone(),
)
.add(None, 443, ([127, 0, 0, 1], 80).into(), alpn.clone())
.await?,
);
@@ -80,7 +87,6 @@ impl NetController {
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some("localhost".into()),
443,
([127, 0, 0, 1], 80).into(),
@@ -91,7 +97,6 @@ impl NetController {
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some(hostname.no_dot_host_name()),
443,
([127, 0, 0, 1], 80).into(),
@@ -104,7 +109,6 @@ impl NetController {
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some(hostname.local_domain_name()),
443,
([127, 0, 0, 1], 80).into(),
@@ -113,28 +117,26 @@ impl NetController {
.await?,
);
// Tor (http)
self.os_bindings.push(
self.tor
.add(key.tor_key(), 80, ([127, 0, 0, 1], 80).into())
.await?,
);
// Tor (https)
// Tor
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some(key.tor_address().to_string()),
Some(tor_key.public().get_onion_address().to_string()),
443,
([127, 0, 0, 1], 80).into(),
alpn.clone(),
)
.await?,
);
self.os_bindings.push(
self.os_bindings.extend(
self.tor
.add(key.tor_key(), 443, ([127, 0, 0, 1], 443).into())
.add(
tor_key,
vec![
(80, ([127, 0, 0, 1], 80).into()), // http
(443, ([127, 0, 0, 1], 443).into()), // https
],
)
.await?,
);
@@ -155,57 +157,15 @@ impl NetController {
ip,
dns,
controller: Arc::downgrade(self),
tor: BTreeMap::new(),
lan: BTreeMap::new(),
binds: BTreeMap::new(),
})
}
}
async fn add_tor(
&self,
key: &Key,
external: u16,
target: SocketAddr,
) -> Result<Vec<Arc<()>>, Error> {
let mut rcs = Vec::with_capacity(1);
rcs.push(self.tor.add(key.tor_key(), external, target).await?);
Ok(rcs)
}
async fn remove_tor(&self, key: &Key, external: u16, rcs: Vec<Arc<()>>) -> Result<(), Error> {
drop(rcs);
self.tor.gc(Some(key.tor_key()), Some(external)).await
}
async fn add_lan(
&self,
key: Key,
external: u16,
target: SocketAddr,
connect_ssl: Result<(), AlpnInfo>,
) -> Result<Vec<Arc<()>>, Error> {
let mut rcs = Vec::with_capacity(2);
rcs.push(
self.vhost
.add(
key.clone(),
Some(key.local_address()),
external,
target.into(),
connect_ssl,
)
.await?,
);
// rcs.push(self.mdns.add(key.base_address()).await?);
// TODO
Ok(rcs)
}
async fn remove_lan(&self, key: &Key, external: u16, rcs: Vec<Arc<()>>) -> Result<(), Error> {
drop(rcs);
// self.mdns.gc(key.base_address()).await?;
// TODO
self.vhost.gc(Some(key.local_address()), external).await
}
#[derive(Default)]
struct HostBinds {
lan: BTreeMap<u16, (u16, Option<AddSslOptions>, Arc<()>)>,
tor: BTreeMap<OnionAddressV3, (OrdMap<u16, SocketAddr>, Vec<Arc<()>>)>,
}
pub struct NetService {
@@ -214,8 +174,7 @@ pub struct NetService {
ip: Ipv4Addr,
dns: Arc<()>,
controller: Weak<NetController>,
tor: BTreeMap<(HostId, u16), (Key, Vec<Arc<()>>)>,
lan: BTreeMap<(HostId, u16), (Key, Vec<Arc<()>>)>,
binds: BTreeMap<HostId, HostBinds>,
}
impl NetService {
fn net_controller(&self) -> Result<Arc<NetController>, Error> {
@@ -226,111 +185,196 @@ impl NetService {
)
})
}
pub async fn add_tor<Ex>(
pub async fn bind(
&mut self,
secrets: &mut Ex,
kind: HostKind,
id: HostId,
external: u16,
internal: u16,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_host(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let tor_idx = (id, external);
let mut tor = self
.tor
.remove(&tor_idx)
.unwrap_or_else(|| (key.clone(), Vec::new()));
tor.1.append(
&mut ctrl
.add_tor(&key, external, SocketAddr::new(self.ip.into(), internal))
.await?,
);
self.tor.insert(tor_idx, tor);
Ok(())
internal_port: u16,
options: BindOptions,
) -> Result<(), Error> {
let id_ref = &id;
let pkg_id = &self.id;
let host = self
.net_controller()?
.db
.mutate(|d| {
let mut ports = d.as_private().as_available_ports().de()?;
let hosts = d
.as_public_mut()
.as_package_data_mut()
.as_idx_mut(pkg_id)
.or_not_found(pkg_id)?
.as_installed_mut()
.or_not_found(pkg_id)?
.as_hosts_mut();
hosts.add_binding(&mut ports, kind, &id, internal_port, options)?;
let host = hosts
.as_idx(&id)
.or_not_found(lazy_format!("Host {id_ref} for {pkg_id}"))?
.de()?;
d.as_private_mut().as_available_ports_mut().ser(&ports)?;
Ok(host)
})
.await?;
self.update(id, host).await
}
pub async fn remove_tor(&mut self, id: HostId, external: u16) -> Result<(), Error> {
async fn update(&mut self, id: HostId, host: Host) -> Result<(), Error> {
let ctrl = self.net_controller()?;
if let Some((key, rcs)) = self.tor.remove(&(id, external)) {
ctrl.remove_tor(&key, external, rcs).await?;
let binds = {
if !self.binds.contains_key(&id) {
self.binds.insert(id.clone(), Default::default());
}
self.binds.get_mut(&id).unwrap()
};
if true
// TODO: if should listen lan
{
for (port, bind) in &host.bindings {
let old_lan_bind = binds.lan.remove(port);
let old_lan_port = old_lan_bind.as_ref().map(|(external, _, _)| *external);
let lan_bind = old_lan_bind.filter(|(external, ssl, _)| {
ssl == &bind.options.add_ssl
&& bind.assigned_lan_port.as_ref() == Some(external)
}); // only keep existing binding if relevant details match
if let Some(external) = bind.assigned_lan_port {
let new_lan_bind = if let Some(b) = lan_bind {
b
} else {
if let Some(ssl) = &bind.options.add_ssl {
let rc = ctrl
.vhost
.add(
None,
external,
(self.ip, *port).into(),
if bind.options.ssl {
Ok(())
} else {
Err(ssl.alpn.clone())
},
)
.await?;
(*port, Some(ssl.clone()), rc)
} else {
let rc = ctrl.forward.add(external, (self.ip, *port).into()).await?;
(*port, None, rc)
}
};
binds.lan.insert(*port, new_lan_bind);
}
if let Some(external) = old_lan_port {
ctrl.vhost.gc(None, external).await?;
ctrl.forward.gc(external).await?;
}
}
let mut removed = BTreeSet::new();
let mut removed_ssl = BTreeSet::new();
binds.lan.retain(|internal, (external, ssl, _)| {
if host.bindings.contains_key(internal) {
true
} else {
if ssl.is_some() {
removed_ssl.insert(*external);
} else {
removed.insert(*external);
}
false
}
});
for external in removed {
ctrl.forward.gc(external).await?;
}
for external in removed_ssl {
ctrl.vhost.gc(None, external).await?;
}
}
let tor_binds: OrdMap<u16, SocketAddr> = host
.bindings
.iter()
.flat_map(|(internal, info)| {
let non_ssl = (
info.options.preferred_external_port,
SocketAddr::from((self.ip, *internal)),
);
if let (Some(ssl), Some(ssl_internal)) =
(&info.options.add_ssl, info.assigned_lan_port)
{
itertools::Either::Left(
[
(
ssl.preferred_external_port,
SocketAddr::from(([127, 0, 0, 1], ssl_internal)),
),
non_ssl,
]
.into_iter(),
)
} else {
itertools::Either::Right([non_ssl].into_iter())
}
})
.collect();
let mut keep_tor_addrs = BTreeSet::new();
for addr in match host.kind {
HostKind::Multi => {
// itertools::Either::Left(
host.addresses.iter()
// )
} // HostKind::Single | HostKind::Static => itertools::Either::Right(&host.primary),
} {
match addr {
HostAddress::Onion { address } => {
keep_tor_addrs.insert(address);
let old_tor_bind = binds.tor.remove(address);
let tor_bind = old_tor_bind.filter(|(ports, _)| ports == &tor_binds);
let new_tor_bind = if let Some(tor_bind) = tor_bind {
tor_bind
} else {
let key = ctrl
.db
.peek()
.await
.into_private()
.into_key_store()
.into_onion()
.get_key(address)?;
let rcs = ctrl
.tor
.add(key, tor_binds.clone().into_iter().collect())
.await?;
(tor_binds.clone(), rcs)
};
binds.tor.insert(address.clone(), new_tor_bind);
}
}
}
for addr in binds.tor.keys() {
if !keep_tor_addrs.contains(addr) {
ctrl.tor.gc(Some(addr.clone()), None).await?;
}
}
Ok(())
}
pub async fn add_lan<Ex>(
&mut self,
secrets: &mut Ex,
id: HostId,
external: u16,
internal: u16,
connect_ssl: Result<(), AlpnInfo>,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_host(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let lan_idx = (id, external);
let mut lan = self
.lan
.remove(&lan_idx)
.unwrap_or_else(|| (key.clone(), Vec::new()));
lan.1.append(
&mut ctrl
.add_lan(
key,
external,
SocketAddr::new(self.ip.into(), internal),
connect_ssl,
)
.await?,
);
self.lan.insert(lan_idx, lan);
Ok(())
}
pub async fn remove_lan(&mut self, id: HostId, external: u16) -> Result<(), Error> {
let ctrl = self.net_controller()?;
if let Some((key, rcs)) = self.lan.remove(&(id, external)) {
ctrl.remove_lan(&key, external, rcs).await?;
}
Ok(())
}
pub async fn export_cert<Ex>(
&self,
secrets: &mut Ex,
id: &HostId,
ip: IpAddr,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_host(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let cert = ctrl.ssl.with_certs(key, ip).await?;
let cert_dir = cert_dir(&self.id, id);
tokio::fs::create_dir_all(&cert_dir).await?;
export_key(
&cert.key().openssl_key_nistp256(),
&cert_dir.join(format!("{id}.key.pem")),
)
.await?;
export_cert(
&cert.fullchain_nistp256(),
&cert_dir.join(format!("{id}.cert.pem")),
)
.await?; // TODO: can upgrade to ed25519?
Ok(())
}
pub async fn remove_all(mut self) -> Result<(), Error> {
self.shutdown = true;
let mut errors = ErrorCollection::new();
if let Some(ctrl) = Weak::upgrade(&self.controller) {
for ((_, external), (key, rcs)) in std::mem::take(&mut self.lan) {
errors.handle(ctrl.remove_lan(&key, external, rcs).await);
}
for ((_, external), (key, rcs)) in std::mem::take(&mut self.tor) {
errors.handle(ctrl.remove_tor(&key, external, rcs).await);
for (_, binds) in std::mem::take(&mut self.binds) {
for (_, (external, ssl, rc)) in binds.lan {
drop(rc);
if ssl.is_some() {
errors.handle(ctrl.vhost.gc(None, external).await);
} else {
errors.handle(ctrl.forward.gc(external).await);
}
}
for (addr, (_, rcs)) in binds.tor {
drop(rcs);
errors.handle(ctrl.tor.gc(Some(addr), None).await);
}
}
std::mem::take(&mut self.dns);
errors.handle(ctrl.dns.gc(Some(self.id.clone()), self.ip).await);
@@ -357,8 +401,7 @@ impl Drop for NetService {
ip: Ipv4Addr::new(0, 0, 0, 0),
dns: Default::default(),
controller: Default::default(),
tor: Default::default(),
lan: Default::default(),
binds: BTreeMap::new(),
},
);
tokio::spawn(async move { svc.remove_all().await.unwrap() });

View File

@@ -5,6 +5,7 @@ use std::path::Path;
use std::time::{SystemTime, UNIX_EPOCH};
use futures::FutureExt;
use imbl_value::InternedString;
use libc::time_t;
use openssl::asn1::{Asn1Integer, Asn1Time};
use openssl::bn::{BigNum, MsbOption};
@@ -14,17 +15,137 @@ use openssl::nid::Nid;
use openssl::pkey::{PKey, Private};
use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::*;
use rpc_toolkit::{from_fn_async, HandlerExt, ParentHandler};
use tokio::sync::{Mutex, RwLock};
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use tokio::sync::Mutex;
use tracing::instrument;
use crate::account::AccountInfo;
use crate::context::{CliContext, RpcContext};
use crate::hostname::Hostname;
use crate::init::check_time_is_synchronized;
use crate::net::dhcp::ips;
use crate::net::keys::{Key, KeyInfo};
use crate::{Error, ErrorKind, ResultExt, SOURCE_DATE};
use crate::prelude::*;
use crate::util::serde::Pem;
use crate::SOURCE_DATE;
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]
#[serde(rename_all = "kebab-case")]
pub struct CertStore {
pub root_key: Pem<PKey<Private>>,
pub root_cert: Pem<X509>,
pub int_key: Pem<PKey<Private>>,
pub int_cert: Pem<X509>,
pub leaves: BTreeMap<JsonKey<BTreeSet<InternedString>>, CertData>,
}
impl CertStore {
pub fn new(account: &AccountInfo) -> Result<Self, Error> {
let int_key = generate_key()?;
let int_cert = make_int_cert((&account.root_ca_key, &account.root_ca_cert), &int_key)?;
Ok(Self {
root_key: Pem::new(account.root_ca_key.clone()),
root_cert: Pem::new(account.root_ca_cert.clone()),
int_key: Pem::new(int_key),
int_cert: Pem::new(int_cert),
leaves: BTreeMap::new(),
})
}
}
impl Model<CertStore> {
/// This function will grant any cert for any domain. It is up to the *caller* to enusure that the calling service has permission to sign a cert for the requested domain
pub fn cert_for(
&mut self,
hostnames: &BTreeSet<InternedString>,
) -> Result<FullchainCertData, Error> {
let keys = if let Some(cert_data) = self
.as_leaves()
.as_idx(JsonKey::new_ref(hostnames))
.map(|m| m.de())
.transpose()?
{
if cert_data
.certs
.ed25519
.not_before()
.compare(Asn1Time::days_from_now(0)?.as_ref())?
== Ordering::Less
&& cert_data
.certs
.ed25519
.not_after()
.compare(Asn1Time::days_from_now(30)?.as_ref())?
== Ordering::Greater
&& cert_data
.certs
.nistp256
.not_before()
.compare(Asn1Time::days_from_now(0)?.as_ref())?
== Ordering::Less
&& cert_data
.certs
.nistp256
.not_after()
.compare(Asn1Time::days_from_now(30)?.as_ref())?
== Ordering::Greater
{
return Ok(FullchainCertData {
root: self.as_root_cert().de()?.0,
int: self.as_int_cert().de()?.0,
leaf: cert_data,
});
}
cert_data.keys
} else {
PKeyPair {
ed25519: PKey::generate_ed25519()?,
nistp256: PKey::from_ec_key(EcKey::generate(&*EcGroup::from_curve_name(
Nid::X9_62_PRIME256V1,
)?)?)?,
}
};
let int_key = self.as_int_key().de()?.0;
let int_cert = self.as_int_cert().de()?.0;
let cert_data = CertData {
certs: CertPair {
ed25519: make_leaf_cert(
(&int_key, &int_cert),
(&keys.ed25519, &SANInfo::new(hostnames)),
)?,
nistp256: make_leaf_cert(
(&int_key, &int_cert),
(&keys.nistp256, &SANInfo::new(hostnames)),
)?,
},
keys,
};
self.as_leaves_mut()
.insert(JsonKey::new_ref(hostnames), &cert_data)?;
Ok(FullchainCertData {
root: self.as_root_cert().de()?.0,
int: self.as_int_cert().de()?.0,
leaf: cert_data,
})
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct CertData {
pub keys: PKeyPair,
pub certs: CertPair,
}
pub struct FullchainCertData {
pub root: X509,
pub int: X509,
pub leaf: CertData,
}
impl FullchainCertData {
pub fn fullchain_ed25519(&self) -> Vec<&X509> {
vec![&self.root, &self.int, &self.leaf.certs.ed25519]
}
pub fn fullchain_nistp256(&self) -> Vec<&X509> {
vec![&self.root, &self.int, &self.leaf.certs.nistp256]
}
}
static CERTIFICATE_VERSION: i32 = 2; // X509 version 3 is actually encoded as '2' in the cert because fuck you.
@@ -35,62 +156,20 @@ fn unix_time(time: SystemTime) -> time_t {
.unwrap_or_default()
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct CertPair {
pub ed25519: X509,
pub nistp256: X509,
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct PKeyPair {
#[serde(with = "crate::util::serde::pem")]
pub ed25519: PKey<Private>,
#[serde(with = "crate::util::serde::pem")]
pub nistp256: PKey<Private>,
}
impl CertPair {
fn updated(
pair: Option<&Self>,
hostname: &Hostname,
signer: (&PKey<Private>, &X509),
applicant: &Key,
ip: BTreeSet<IpAddr>,
) -> Result<(Self, bool), Error> {
let mut updated = false;
let mut updated_cert = |cert: Option<&X509>, osk: PKey<Private>| -> Result<X509, Error> {
let mut ips = BTreeSet::new();
if let Some(cert) = cert {
ips.extend(
cert.subject_alt_names()
.iter()
.flatten()
.filter_map(|a| a.ipaddress())
.filter_map(|a| match a.len() {
4 => Some::<IpAddr>(<[u8; 4]>::try_from(a).unwrap().into()),
16 => Some::<IpAddr>(<[u8; 16]>::try_from(a).unwrap().into()),
_ => None,
}),
);
if cert
.not_before()
.compare(Asn1Time::days_from_now(0)?.as_ref())?
== Ordering::Less
&& cert
.not_after()
.compare(Asn1Time::days_from_now(30)?.as_ref())?
== Ordering::Greater
&& ips.is_superset(&ip)
{
return Ok(cert.clone());
}
}
ips.extend(ip.iter().copied());
updated = true;
make_leaf_cert(signer, (&osk, &SANInfo::new(&applicant, hostname, ips)))
};
Ok((
Self {
ed25519: updated_cert(pair.map(|c| &c.ed25519), applicant.openssl_key_ed25519())?,
nistp256: updated_cert(
pair.map(|c| &c.nistp256),
applicant.openssl_key_nistp256(),
)?,
},
updated,
))
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
pub struct CertPair {
#[serde(with = "crate::util::serde::pem")]
pub ed25519: X509,
#[serde(with = "crate::util::serde::pem")]
pub nistp256: X509,
}
pub async fn root_ca_start_time() -> Result<SystemTime, Error> {
@@ -101,51 +180,6 @@ pub async fn root_ca_start_time() -> Result<SystemTime, Error> {
})
}
#[derive(Debug)]
pub struct SslManager {
hostname: Hostname,
root_cert: X509,
int_key: PKey<Private>,
int_cert: X509,
cert_cache: RwLock<BTreeMap<Key, CertPair>>,
}
impl SslManager {
pub fn new(account: &AccountInfo, start_time: SystemTime) -> Result<Self, Error> {
let int_key = generate_key()?;
let int_cert = make_int_cert(
(&account.root_ca_key, &account.root_ca_cert),
&int_key,
start_time,
)?;
Ok(Self {
hostname: account.hostname.clone(),
root_cert: account.root_ca_cert.clone(),
int_key,
int_cert,
cert_cache: RwLock::new(BTreeMap::new()),
})
}
pub async fn with_certs(&self, key: Key, ip: IpAddr) -> Result<KeyInfo, Error> {
let mut ips = ips().await?;
ips.insert(ip);
let (pair, updated) = CertPair::updated(
self.cert_cache.read().await.get(&key),
&self.hostname,
(&self.int_key, &self.int_cert),
&key,
ips,
)?;
if updated {
self.cert_cache
.write()
.await
.insert(key.clone(), pair.clone());
}
Ok(key.with_certs(pair, self.int_cert.clone(), self.root_cert.clone()))
}
}
const EC_CURVE_NAME: nid::Nid = nid::Nid::X9_62_PRIME256V1;
lazy_static::lazy_static! {
static ref EC_GROUP: EcGroup = EcGroup::from_curve_name(EC_CURVE_NAME).unwrap();
@@ -245,18 +279,13 @@ pub fn make_root_cert(
pub fn make_int_cert(
signer: (&PKey<Private>, &X509),
applicant: &PKey<Private>,
start_time: SystemTime,
) -> Result<X509, Error> {
let mut builder = X509Builder::new()?;
builder.set_version(CERTIFICATE_VERSION)?;
let unix_start_time = unix_time(start_time);
builder.set_not_before(signer.1.not_before())?;
let embargo = Asn1Time::from_unix(unix_start_time - 86400)?;
builder.set_not_before(&embargo)?;
let expiration = Asn1Time::from_unix(unix_start_time + (10 * 364 * 86400))?;
builder.set_not_after(&expiration)?;
builder.set_not_after(signer.1.not_after())?;
builder.set_serial_number(&*rand_serial()?)?;
@@ -309,13 +338,13 @@ pub fn make_int_cert(
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum MaybeWildcard {
WithWildcard(String),
WithoutWildcard(String),
WithoutWildcard(InternedString),
}
impl MaybeWildcard {
pub fn as_str(&self) -> &str {
match self {
MaybeWildcard::WithWildcard(s) => s.as_str(),
MaybeWildcard::WithoutWildcard(s) => s.as_str(),
MaybeWildcard::WithoutWildcard(s) => &**s,
}
}
}
@@ -334,18 +363,16 @@ pub struct SANInfo {
pub ips: BTreeSet<IpAddr>,
}
impl SANInfo {
pub fn new(key: &Key, hostname: &Hostname, ips: BTreeSet<IpAddr>) -> Self {
pub fn new(hostnames: &BTreeSet<InternedString>) -> Self {
let mut dns = BTreeSet::new();
if let Some((id, _)) = key.host() {
dns.insert(MaybeWildcard::WithWildcard(format!("{id}.embassy")));
dns.insert(MaybeWildcard::WithWildcard(key.local_address().to_string()));
} else {
dns.insert(MaybeWildcard::WithoutWildcard("embassy".to_owned()));
dns.insert(MaybeWildcard::WithWildcard(hostname.local_domain_name()));
dns.insert(MaybeWildcard::WithoutWildcard(hostname.no_dot_host_name()));
dns.insert(MaybeWildcard::WithoutWildcard("localhost".to_owned()));
let mut ips = BTreeSet::new();
for hostname in hostnames {
if let Ok(ip) = hostname.parse::<IpAddr>() {
ips.insert(ip);
} else {
dns.insert(MaybeWildcard::WithoutWildcard(hostname.clone())); // TODO: wildcards?
}
}
dns.insert(MaybeWildcard::WithWildcard(key.tor_address().to_string()));
Self { dns, ips }
}
}
@@ -443,14 +470,3 @@ pub fn make_leaf_cert(
let cert = builder.build();
Ok(cert)
}
pub fn ssl() -> ParentHandler {
ParentHandler::new().subcommand("size", from_fn_async(size).with_remote_cli::<CliContext>())
}
pub async fn size(ctx: RpcContext) -> Result<String, Error> {
Ok(format!(
"Cert Catch size: {}",
ctx.net_controller.ssl.cert_cache.read().await.len()
))
}

View File

@@ -11,7 +11,6 @@ use axum::routing::{any, get, post};
use axum::Router;
use digest::Digest;
use futures::future::ready;
use futures::{FutureExt, TryFutureExt};
use http::header::ACCEPT_ENCODING;
use http::request::Parts as RequestParts;
use http::{HeaderMap, Method, StatusCode};
@@ -28,7 +27,6 @@ use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext
use crate::core::rpc_continuations::RequestGuid;
use crate::db::subscribe;
use crate::hostname::Hostname;
use crate::install::PKG_PUBLIC_DIR;
use crate::middleware::auth::{Auth, HasValidSession};
use crate::middleware::cors::Cors;
use crate::middleware::db::SyncDb;
@@ -131,8 +129,7 @@ pub fn main_ui_server_router(ctx: RpcContext) -> Router {
"/ws/rpc/*path",
get({
let ctx = ctx.clone();
move |headers: HeaderMap,
x::Path(path): x::Path<String>,
move |x::Path(path): x::Path<String>,
ws: axum::extract::ws::WebSocketUpgrade| async move {
match RequestGuid::from(&path) {
None => {
@@ -155,7 +152,6 @@ pub fn main_ui_server_router(ctx: RpcContext) -> Router {
let path = request
.uri()
.path()
.clone()
.strip_prefix("/rest/rpc/")
.unwrap_or_default();
match RequestGuid::from(&path) {

View File

@@ -28,13 +28,44 @@ use crate::logs::{
cli_logs_generic_follow, cli_logs_generic_nofollow, fetch_logs, follow_logs, journalctl,
LogFollowResponse, LogResponse, LogSource,
};
use crate::prelude::*;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt as _};
pub const SYSTEMD_UNIT: &str = "tor@default";
const STARTING_HEALTH_TIMEOUT: u64 = 120; // 2min
#[derive(Debug, Default, Deserialize, Serialize)]
pub struct OnionStore(BTreeMap<OnionAddressV3, TorSecretKeyV3>);
impl Map for OnionStore {
type Key = OnionAddressV3;
type Value = TorSecretKeyV3;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key.get_address_without_dot_onion())
}
}
impl OnionStore {
pub fn new() -> Self {
Self::default()
}
pub fn insert(&mut self, key: TorSecretKeyV3) {
self.0.insert(key.public().get_onion_address(), key);
}
}
impl Model<OnionStore> {
pub fn new_key(&mut self) -> Result<TorSecretKeyV3, Error> {
let key = TorSecretKeyV3::generate();
self.insert(&key.public().get_onion_address(), &key)?;
Ok(key)
}
pub fn insert_key(&mut self, key: &TorSecretKeyV3) -> Result<(), Error> {
self.insert(&key.public().get_onion_address(), &key)
}
pub fn get_key(&self, address: &OnionAddressV3) -> Result<TorSecretKeyV3, Error> {
self.as_idx(address).or_not_found(address)?.de()
}
}
enum ErrorLogSeverity {
Fatal { wipe_state: bool },
Unknown { wipe_state: bool },
@@ -208,33 +239,29 @@ impl TorController {
pub async fn add(
&self,
key: TorSecretKeyV3,
external: u16,
target: SocketAddr,
) -> Result<Arc<()>, Error> {
bindings: Vec<(u16, SocketAddr)>,
) -> Result<Vec<Arc<()>>, Error> {
let (reply, res) = oneshot::channel();
self.0
.send
.send(TorCommand::AddOnion {
key,
external,
target,
bindings,
reply,
})
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))?;
.map_err(|_| Error::new(eyre!("TorControl died"), ErrorKind::Tor))?;
res.await
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))
.map_err(|_| Error::new(eyre!("TorControl died"), ErrorKind::Tor))
}
pub async fn gc(
&self,
key: Option<TorSecretKeyV3>,
addr: Option<OnionAddressV3>,
external: Option<u16>,
) -> Result<(), Error> {
self.0
.send
.send(TorCommand::GC { key, external })
.send(TorCommand::GC { addr, external })
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))
}
@@ -279,12 +306,11 @@ type AuthenticatedConnection = AuthenticatedConn<
enum TorCommand {
AddOnion {
key: TorSecretKeyV3,
external: u16,
target: SocketAddr,
reply: oneshot::Sender<Arc<()>>,
bindings: Vec<(u16, SocketAddr)>,
reply: oneshot::Sender<Vec<Arc<()>>>,
},
GC {
key: Option<TorSecretKeyV3>,
addr: Option<OnionAddressV3>,
external: Option<u16>,
},
GetInfo {
@@ -302,7 +328,13 @@ async fn torctl(
tor_control: SocketAddr,
tor_socks: SocketAddr,
recv: &mut mpsc::UnboundedReceiver<TorCommand>,
services: &mut BTreeMap<[u8; 64], BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
services: &mut BTreeMap<
OnionAddressV3,
(
TorSecretKeyV3,
BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>,
),
>,
wipe_state: &AtomicBool,
health_timeout: &mut Duration,
) -> Result<(), Error> {
@@ -420,27 +452,32 @@ async fn torctl(
match command {
TorCommand::AddOnion {
key,
external,
target,
bindings,
reply,
} => {
let mut service = if let Some(service) = services.remove(&key.as_bytes()) {
let addr = key.public().get_onion_address();
let mut service = if let Some((_key, service)) = services.remove(&addr) {
debug_assert_eq!(key, _key);
service
} else {
BTreeMap::new()
};
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) =
Weak::upgrade(&binding.remove(&target).unwrap_or_default())
{
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding);
services.insert(key.as_bytes(), service);
reply.send(rc).unwrap_or_default();
let mut rcs = Vec::with_capacity(bindings.len());
for (external, target) in bindings {
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) =
Weak::upgrade(&binding.remove(&target).unwrap_or_default())
{
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding);
rcs.push(rc);
}
services.insert(addr, (key, service));
reply.send(rcs).unwrap_or_default();
}
TorCommand::GetInfo { reply, .. } => {
reply
@@ -480,8 +517,7 @@ async fn torctl(
)
.await?;
for (key, service) in std::mem::take(services) {
let key = TorSecretKeyV3::from(key);
for (addr, (key, service)) in std::mem::take(services) {
let bindings = service
.iter()
.flat_map(|(ext, int)| {
@@ -491,7 +527,7 @@ async fn torctl(
})
.collect::<Vec<_>>();
if !bindings.is_empty() {
services.insert(key.as_bytes(), service);
services.insert(addr, (key.clone(), service));
connection
.add_onion_v3(&key, false, false, false, None, &mut bindings.iter())
.await?;
@@ -503,31 +539,33 @@ async fn torctl(
match command {
TorCommand::AddOnion {
key,
external,
target,
bindings,
reply,
} => {
let mut rm_res = Ok(());
let onion_base = key
.public()
.get_onion_address()
.get_address_without_dot_onion();
let mut service = if let Some(service) = services.remove(&key.as_bytes()) {
let addr = key.public().get_onion_address();
let onion_base = addr.get_address_without_dot_onion();
let mut service = if let Some((_key, service)) = services.remove(&addr) {
debug_assert_eq!(_key, key);
rm_res = connection.del_onion(&onion_base).await;
service
} else {
BTreeMap::new()
};
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) =
Weak::upgrade(&binding.remove(&target).unwrap_or_default())
{
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding);
let mut rcs = Vec::with_capacity(bindings.len());
for (external, target) in bindings {
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) =
Weak::upgrade(&binding.remove(&target).unwrap_or_default())
{
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding);
rcs.push(rc);
}
let bindings = service
.iter()
.flat_map(|(ext, int)| {
@@ -536,25 +574,21 @@ async fn torctl(
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
services.insert(key.as_bytes(), service);
reply.send(rc).unwrap_or_default();
services.insert(addr, (key.clone(), service));
reply.send(rcs).unwrap_or_default();
rm_res?;
connection
.add_onion_v3(&key, false, false, false, None, &mut bindings.iter())
.await?;
}
TorCommand::GC { key, external } => {
for key in if key.is_some() {
itertools::Either::Left(key.into_iter().map(|k| k.as_bytes()))
TorCommand::GC { addr, external } => {
for addr in if addr.is_some() {
itertools::Either::Left(addr.into_iter())
} else {
itertools::Either::Right(services.keys().cloned().collect_vec().into_iter())
} {
let key = TorSecretKeyV3::from(key);
let onion_base = key
.public()
.get_onion_address()
.get_address_without_dot_onion();
if let Some(mut service) = services.remove(&key.as_bytes()) {
if let Some((key, mut service)) = services.remove(&addr) {
let onion_base: String = addr.get_address_without_dot_onion();
for external in if external.is_some() {
itertools::Either::Left(external.into_iter())
} else {
@@ -583,7 +617,7 @@ async fn torctl(
})
.collect::<Vec<_>>();
if !bindings.is_empty() {
services.insert(key.as_bytes(), service);
services.insert(addr, (key.clone(), service));
}
rm_res?;
if !bindings.is_empty() {

View File

@@ -5,7 +5,9 @@ use std::time::Duration;
use color_eyre::eyre::eyre;
use helpers::NonDetachingJoinHandle;
use imbl_value::InternedString;
use models::ResultExt;
use serde::{Deserialize, Serialize};
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{Mutex, RwLock};
use tokio_rustls::rustls::pki_types::{
@@ -16,38 +18,36 @@ use tokio_rustls::rustls::{RootCertStore, ServerConfig};
use tokio_rustls::{LazyConfigAcceptor, TlsConnector};
use tracing::instrument;
use crate::net::keys::Key;
use crate::net::ssl::SslManager;
use crate::prelude::*;
use crate::util::io::{BackTrackingReader, TimeoutStream};
use crate::util::serde::MaybeUtf8String;
// not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
pub struct VHostController {
ssl: Arc<SslManager>,
db: PatchDb,
servers: Mutex<BTreeMap<u16, VHostServer>>,
}
impl VHostController {
pub fn new(ssl: Arc<SslManager>) -> Self {
pub fn new(db: PatchDb) -> Self {
Self {
ssl,
db,
servers: Mutex::new(BTreeMap::new()),
}
}
#[instrument(skip_all)]
pub async fn add(
&self,
key: Key,
hostname: Option<String>,
external: u16,
target: SocketAddr,
connect_ssl: Result<(), AlpnInfo>,
connect_ssl: Result<(), AlpnInfo>, // Ok: yes, connect using ssl, pass through alpn; Err: connect tcp, use provided strategy for alpn
) -> Result<Arc<()>, Error> {
let mut writable = self.servers.lock().await;
let server = if let Some(server) = writable.remove(&external) {
server
} else {
VHostServer::new(external, self.ssl.clone()).await?
VHostServer::new(external, self.db.clone()).await?
};
let rc = server
.add(
@@ -55,7 +55,6 @@ impl VHostController {
TargetInfo {
addr: target,
connect_ssl,
key,
},
)
.await;
@@ -79,13 +78,18 @@ impl VHostController {
struct TargetInfo {
addr: SocketAddr,
connect_ssl: Result<(), AlpnInfo>,
key: Key,
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub enum AlpnInfo {
Reflect,
Specified(Vec<Vec<u8>>),
Specified(Vec<MaybeUtf8String>),
}
impl Default for AlpnInfo {
fn default() -> Self {
Self::Reflect
}
}
struct VHostServer {
@@ -94,7 +98,7 @@ struct VHostServer {
}
impl VHostServer {
#[instrument(skip_all)]
async fn new(port: u16, ssl: Arc<SslManager>) -> Result<Self, Error> {
async fn new(port: u16, db: PatchDb) -> Result<Self, Error> {
// check if port allowed
let listener = TcpListener::bind(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), port))
.await
@@ -105,13 +109,13 @@ impl VHostServer {
_thread: tokio::spawn(async move {
loop {
match listener.accept().await {
Ok((stream, _)) => {
Ok((stream, sock_addr)) => {
let stream =
Box::pin(TimeoutStream::new(stream, Duration::from_secs(300)));
let mut stream = BackTrackingReader::new(stream);
stream.start_buffering();
let mapping = mapping.clone();
let ssl = ssl.clone();
let db = db.clone();
tokio::spawn(async move {
if let Err(e) = async {
let mid = match LazyConfigAcceptor::new(
@@ -167,6 +171,7 @@ impl VHostServer {
.find(|(_, rc)| rc.strong_count() > 0)
.or_else(|| {
if target_name
.as_ref()
.map(|s| s.parse::<IpAddr>().is_ok())
.unwrap_or(true)
{
@@ -184,8 +189,22 @@ impl VHostServer {
if let Some(target) = target {
let mut tcp_stream =
TcpStream::connect(target.addr).await?;
let key =
ssl.with_certs(target.key, target.addr.ip()).await?;
let hostnames = target_name
.as_ref()
.into_iter()
.map(InternedString::intern)
.chain(std::iter::once(InternedString::from_display(
&sock_addr.ip(),
)))
.collect();
let key = db
.mutate(|v| {
v.as_private_mut()
.as_key_store_mut()
.as_local_certs_mut()
.cert_for(&hostnames)
})
.await?;
let cfg = ServerConfig::builder()
.with_no_client_auth();
let mut cfg =
@@ -202,8 +221,9 @@ impl VHostServer {
})
.collect::<Result<_, Error>>()?,
PrivateKeyDer::from(PrivatePkcs8KeyDer::from(
key.key()
.openssl_key_ed25519()
key.leaf
.keys
.ed25519
.private_key_to_pkcs8()?,
)),
)
@@ -218,8 +238,9 @@ impl VHostServer {
})
.collect::<Result<_, Error>>()?,
PrivateKeyDer::from(PrivatePkcs8KeyDer::from(
key.key()
.openssl_key_nistp256()
key.leaf
.keys
.nistp256
.private_key_to_pkcs8()?,
)),
)
@@ -233,7 +254,7 @@ impl VHostServer {
let mut store = RootCertStore::empty();
store.add(
CertificateDer::from(
key.root_ca().to_der()?,
key.root.to_der()?,
),
).with_kind(crate::ErrorKind::OpenSsl)?;
store
@@ -249,9 +270,9 @@ impl VHostServer {
let mut target_stream =
TlsConnector::from(Arc::new(client_cfg))
.connect_with(
ServerName::try_from(
key.key().internal_address(),
).with_kind(crate::ErrorKind::OpenSsl)?,
ServerName::IpAddress(
target.addr.ip().into(),
),
tcp_stream,
|conn| {
cfg.alpn_protocols.extend(
@@ -302,7 +323,7 @@ impl VHostServer {
.await
}
Err(AlpnInfo::Specified(alpn)) => {
cfg.alpn_protocols = alpn;
cfg.alpn_protocols = alpn.into_iter().map(|a| a.0).collect();
let mut tls_stream =
match mid.into_stream(Arc::new(cfg)).await {
Ok(a) => a,

View File

@@ -1,24 +1,23 @@
use std::collections::HashMap;
use std::collections::BTreeMap;
use std::fmt;
use std::str::FromStr;
use chrono::{DateTime, TimeZone, Utc};
use chrono::{DateTime, Utc};
use clap::builder::ValueParserFactory;
use clap::Parser;
use color_eyre::eyre::eyre;
use imbl_value::InternedString;
use models::PackageId;
use rpc_toolkit::{command, from_fn_async, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use tokio::sync::Mutex;
use tracing::instrument;
use crate::backup::BackupReport;
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::prelude::*;
use crate::util::clap::FromStrParser;
use crate::util::serde::HandlerExtSerde;
use crate::{Error, ErrorKind, ResultExt};
// #[command(subcommands(list, delete, delete_before, create))]
pub fn notification() -> ParentHandler {
@@ -53,132 +52,102 @@ pub fn notification() -> ParentHandler {
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
pub struct ListParams {
before: Option<i32>,
limit: Option<u32>,
before: Option<u32>,
limit: Option<usize>,
}
// #[command(display(display_serializable))]
#[instrument(skip_all)]
pub async fn list(
ctx: RpcContext,
ListParams { before, limit }: ListParams,
) -> Result<Vec<Notification>, Error> {
let limit = limit.unwrap_or(40);
match before {
None => {
let records = sqlx::query!(
"SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT $1",
limit as i64
).fetch_all(&ctx.secret_store).await?;
let notifs = records
.into_iter()
.map(|r| {
Ok(Notification {
id: r.id as u32,
package_id: r.package_id.and_then(|p| p.parse().ok()),
created_at: Utc.from_utc_datetime(&r.created_at),
code: r.code as u32,
level: match r.level.parse::<NotificationLevel>() {
Ok(a) => a,
Err(e) => return Err(e.into()),
},
title: r.title,
message: r.message,
data: match r.data {
None => serde_json::Value::Null,
Some(v) => match v.parse::<serde_json::Value>() {
Ok(a) => a,
Err(e) => {
return Err(Error::new(
eyre!("Invalid Notification Data: {}", e),
ErrorKind::ParseDbField,
))
}
},
},
})
})
.collect::<Result<Vec<Notification>, Error>>()?;
ctx.db
.mutate(|d| {
d.as_public_mut()
) -> Result<Vec<NotificationWithId>, Error> {
ctx.db
.mutate(|db| {
let limit = limit.unwrap_or(40);
match before {
None => {
let records = db
.as_private()
.as_notifications()
.as_entries()?
.into_iter()
.take(limit);
let notifs = records
.into_iter()
.map(|(id, notification)| {
Ok(NotificationWithId {
id,
notification: notification.de()?,
})
})
.collect::<Result<Vec<NotificationWithId>, Error>>()?;
db.as_public_mut()
.as_server_info_mut()
.as_unread_notification_count_mut()
.ser(&0)
})
.await?;
Ok(notifs)
}
Some(before) => {
let records = sqlx::query!(
"SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2",
before,
limit as i64
).fetch_all(&ctx.secret_store).await?;
let res = records
.into_iter()
.map(|r| {
Ok(Notification {
id: r.id as u32,
package_id: r.package_id.and_then(|p| p.parse().ok()),
created_at: Utc.from_utc_datetime(&r.created_at),
code: r.code as u32,
level: match r.level.parse::<NotificationLevel>() {
Ok(a) => a,
Err(e) => return Err(e.into()),
},
title: r.title,
message: r.message,
data: match r.data {
None => serde_json::Value::Null,
Some(v) => match v.parse::<serde_json::Value>() {
Ok(a) => a,
Err(e) => {
return Err(Error::new(
eyre!("Invalid Notification Data: {}", e),
ErrorKind::ParseDbField,
))
}
},
},
})
})
.collect::<Result<Vec<Notification>, Error>>()?;
Ok(res)
}
}
.ser(&0)?;
Ok(notifs)
}
Some(before) => {
let records = db
.as_private()
.as_notifications()
.as_entries()?
.into_iter()
.filter(|(id, _)| *id < before)
.take(limit);
records
.into_iter()
.map(|(id, notification)| {
Ok(NotificationWithId {
id,
notification: notification.de()?,
})
})
.collect()
}
}
})
.await
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
pub struct DeleteParams {
id: i32,
id: u32,
}
pub async fn delete(ctx: RpcContext, DeleteParams { id }: DeleteParams) -> Result<(), Error> {
sqlx::query!("DELETE FROM notifications WHERE id = $1", id)
.execute(&ctx.secret_store)
.await?;
Ok(())
ctx.db
.mutate(|db| {
db.as_private_mut().as_notifications_mut().remove(&id)?;
Ok(())
})
.await
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
pub struct DeleteBeforeParams {
before: i32,
before: u32,
}
pub async fn delete_before(
ctx: RpcContext,
DeleteBeforeParams { before }: DeleteBeforeParams,
) -> Result<(), Error> {
sqlx::query!("DELETE FROM notifications WHERE id < $1", before)
.execute(&ctx.secret_store)
.await?;
Ok(())
ctx.db
.mutate(|db| {
for id in db.as_private().as_notifications().keys()? {
if id < before {
db.as_private_mut().as_notifications_mut().remove(&id)?;
}
}
Ok(())
})
.await
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
@@ -198,8 +167,8 @@ pub async fn create(
message,
}: CreateParams,
) -> Result<(), Error> {
ctx.notification_manager
.notify(ctx.db.clone(), package, level, title, message, (), None)
ctx.db
.mutate(|db| notify(db, package, level, title, message, ()))
.await
}
@@ -254,120 +223,95 @@ impl fmt::Display for InvalidNotificationLevel {
write!(f, "Invalid Notification Level: {}", self.0)
}
}
#[derive(Debug, serde::Serialize, serde::Deserialize)]
#[derive(Debug, Default, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Notifications(pub BTreeMap<u32, Notification>);
impl Notifications {
pub fn new() -> Self {
Self(BTreeMap::new())
}
}
impl Map for Notifications {
type Key = u32;
type Value = Notification;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Self::key_string(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(InternedString::from_display(key))
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct Notification {
id: u32,
package_id: Option<PackageId>,
created_at: DateTime<Utc>,
code: u32,
level: NotificationLevel,
title: String,
message: String,
data: serde_json::Value,
data: Value,
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct NotificationWithId {
id: u32,
#[serde(flatten)]
notification: Notification,
}
pub trait NotificationType:
serde::Serialize + for<'de> serde::Deserialize<'de> + std::fmt::Debug
{
const CODE: i32;
const CODE: u32;
}
impl NotificationType for () {
const CODE: i32 = 0;
const CODE: u32 = 0;
}
impl NotificationType for BackupReport {
const CODE: i32 = 1;
const CODE: u32 = 1;
}
pub struct NotificationManager {
sqlite: PgPool,
cache: Mutex<HashMap<(Option<PackageId>, NotificationLevel, String), i64>>,
}
impl NotificationManager {
pub fn new(sqlite: PgPool) -> Self {
NotificationManager {
sqlite,
cache: Mutex::new(HashMap::new()),
}
}
#[instrument(skip(db, subtype, self))]
pub async fn notify<T: NotificationType>(
&self,
db: PatchDb,
package_id: Option<PackageId>,
level: NotificationLevel,
title: String,
message: String,
subtype: T,
debounce_interval: Option<u32>,
) -> Result<(), Error> {
let peek = db.peek().await;
if !self
.should_notify(&package_id, &level, &title, debounce_interval)
.await
{
return Ok(());
}
let mut count = peek
.as_public()
.as_server_info()
.as_unread_notification_count()
.de()?;
let sql_package_id = package_id.as_ref().map(|p| &**p);
let sql_code = T::CODE;
let sql_level = format!("{}", level);
let sql_data =
serde_json::to_string(&subtype).with_kind(crate::ErrorKind::Serialization)?;
sqlx::query!(
"INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)",
sql_package_id,
sql_code as i32,
sql_level,
title,
message,
sql_data
).execute(&self.sqlite).await?;
count += 1;
db.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_unread_notification_count_mut()
.ser(&count)
})
.await
}
async fn should_notify(
&self,
package_id: &Option<PackageId>,
level: &NotificationLevel,
title: &String,
debounce_interval: Option<u32>,
) -> bool {
let mut guard = self.cache.lock().await;
let k = (package_id.clone(), level.clone(), title.clone());
let v = (*guard).get(&k);
match v {
None => {
(*guard).insert(k, Utc::now().timestamp());
true
}
Some(last_issued) => match debounce_interval {
None => {
(*guard).insert(k, Utc::now().timestamp());
true
}
Some(interval) => {
if last_issued + interval as i64 > Utc::now().timestamp() {
false
} else {
(*guard).insert(k, Utc::now().timestamp());
true
}
}
},
}
}
#[instrument(skip(subtype, db))]
pub fn notify<T: NotificationType>(
db: &mut DatabaseModel,
package_id: Option<PackageId>,
level: NotificationLevel,
title: String,
message: String,
subtype: T,
) -> Result<(), Error> {
let data = to_value(&subtype)?;
db.as_public_mut()
.as_server_info_mut()
.as_unread_notification_count_mut()
.mutate(|c| {
*c += 1;
Ok(())
})?;
let id = db
.as_private()
.as_notifications()
.keys()?
.into_iter()
.max()
.map_or(0, |id| id + 1);
db.as_private_mut().as_notifications_mut().insert(
&id,
&Notification {
package_id,
created_at: Utc::now(),
code: T::CODE,
level,
title,
message,
data,
},
)
}
#[test]

View File

@@ -133,7 +133,7 @@ pub async fn publish(
.with_prefix("[1/3]")
.with_message("Querying s9pk");
pb.enable_steady_tick(Duration::from_millis(200));
let mut s9pk = S9pk::open(&path, None).await?;
let s9pk = S9pk::open(&path, None).await?;
let m = s9pk.as_manifest().clone();
pb.set_style(plain_line_style.clone());
pb.abandon();
@@ -144,7 +144,7 @@ pub async fn publish(
.with_prefix("[1/3]")
.with_message("Verifying s9pk");
pb.enable_steady_tick(Duration::from_millis(200));
let mut s9pk = S9pk::open(&path, None).await?;
let s9pk = S9pk::open(&path, None).await?;
// s9pk.validate().await?;
todo!();
let m = s9pk.as_manifest().clone();

View File

@@ -1,7 +1,5 @@
use std::path::Path;
use std::sync::Arc;
use ed25519::signature::Keypair;
use ed25519_dalek::{Signature, SigningKey, VerifyingKey};
use tokio::io::AsyncRead;

View File

@@ -1,7 +1,7 @@
use std::os::fd::{AsRawFd, FromRawFd, RawFd};
use std::io::SeekFrom;
use std::os::fd::{AsRawFd, RawFd};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::{borrow::Borrow, io::SeekFrom};
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncReadExt};

View File

@@ -1,6 +1,4 @@
use std::collections::BTreeMap;
use models::{ActionId, PackageId, ProcedureName};
use models::ProcedureName;
use crate::config::ConfigureContext;
use crate::prelude::*;

View File

@@ -1,5 +1,5 @@
use std::sync::Arc;
use std::time::Duration;
use std::{ops::Deref, sync::Arc};
use chrono::{DateTime, Utc};
use clap::Parser;
@@ -10,25 +10,25 @@ use persistent_container::PersistentContainer;
use rpc_toolkit::{from_fn_async, CallRemoteHandler, Empty, Handler, HandlerArgs};
use serde::{Deserialize, Serialize};
use start_stop::StartStop;
use tokio::sync::{watch, Notify};
use tokio::sync::Notify;
use crate::action::ActionResult;
use crate::config::action::ConfigRes;
use crate::context::{CliContext, RpcContext};
use crate::core::rpc_continuations::RequestGuid;
use crate::db::model::{
CurrentDependencies, CurrentDependents, InstalledPackageInfo, PackageDataEntry,
PackageDataEntryInstalled, PackageDataEntryMatchModel, StaticFiles,
InstalledPackageInfo, PackageDataEntry, PackageDataEntryInstalled, PackageDataEntryMatchModel,
StaticFiles,
};
use crate::disk::mount::guard::GenericMountGuard;
use crate::install::PKG_ARCHIVE_DIR;
use crate::prelude::*;
use crate::progress::{self, NamedProgress, Progress};
use crate::progress::{NamedProgress, Progress};
use crate::s9pk::S9pk;
use crate::service::service_map::InstallProgressHandles;
use crate::service::transition::{TempDesiredState, TransitionKind, TransitionState};
use crate::service::transition::TransitionKind;
use crate::status::health_check::HealthCheckResult;
use crate::status::{DependencyConfigErrors, MainStatus, Status};
use crate::status::{MainStatus, Status};
use crate::util::actor::{Actor, BackgroundJobs, SimpleActor};
use crate::volume::data_dir;
@@ -289,6 +289,7 @@ impl Service {
marketplace_url: None, // TODO
manifest: manifest.clone(),
last_backup: None, // TODO
hosts: Default::default(), // TODO
store: Value::Null, // TODO
store_exposed_dependents: Default::default(), // TODO
store_exposed_ui: Default::default(), // TODO

View File

@@ -15,19 +15,18 @@ use tokio::process::Command;
use tokio::sync::{oneshot, watch, Mutex, OnceCell};
use tracing::instrument;
use super::{
service_effect_handler::{service_effect_handler, EffectContext},
transition::{TempDesiredState, TransitionKind},
};
use super::{transition::TransitionState, ServiceActorSeed};
use super::service_effect_handler::{service_effect_handler, EffectContext};
use super::transition::{TransitionKind, TransitionState};
use super::ServiceActorSeed;
use crate::context::RpcContext;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::idmapped::IdMapped;
use crate::disk::mount::filesystem::loop_dev::LoopDev;
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::disk::mount::filesystem::{MountType, ReadOnly};
use crate::disk::mount::guard::{GenericMountGuard, MountGuard};
use crate::disk::mount::guard::MountGuard;
use crate::lxc::{LxcConfig, LxcContainer, HOST_RPC_SERVER_SOCKET};
use crate::net::net_controller::NetService;
use crate::prelude::*;
use crate::s9pk::merkle_archive::source::FileSource;
use crate::s9pk::S9pk;
@@ -94,6 +93,7 @@ pub struct PersistentContainer {
assets: BTreeMap<VolumeId, MountGuard>,
pub(super) overlays: Arc<Mutex<BTreeMap<InternedString, OverlayGuard>>>,
pub(super) state: Arc<watch::Sender<ServiceState>>,
pub(super) net_service: Mutex<NetService>,
}
impl PersistentContainer {
@@ -178,6 +178,10 @@ impl PersistentContainer {
.await?;
}
}
let net_service = ctx
.net_controller
.create_service(s9pk.as_manifest().id.clone(), lxc_container.ip())
.await?;
Ok(Self {
s9pk,
lxc_container: OnceCell::new_with(Some(lxc_container)),
@@ -189,6 +193,7 @@ impl PersistentContainer {
assets,
overlays: Arc::new(Mutex::new(BTreeMap::new())),
state: Arc::new(watch::channel(ServiceState::new(start)).0),
net_service: Mutex::new(net_service),
})
}

View File

@@ -2,7 +2,7 @@ use std::time::Duration;
use imbl_value::Value;
use models::ProcedureName;
use rpc_toolkit::yajrc::{RpcError, RpcMethod};
use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::Empty;
use crate::prelude::*;

View File

@@ -1,11 +1,10 @@
use std::ffi::OsString;
use std::os::unix::process::CommandExt;
use std::path::{Path, PathBuf};
use std::str::FromStr;
use std::sync::{Arc, Weak};
use std::{ffi::OsString, time::Instant};
use chrono::Utc;
use clap::builder::{TypedValueParser, ValueParserFactory};
use clap::builder::ValueParserFactory;
use clap::Parser;
use imbl_value::{json, InternedString};
use models::{ActionId, HealthCheckId, ImageId, PackageId};
@@ -13,19 +12,18 @@ use patch_db::json_ptr::JsonPointer;
use rpc_toolkit::{from_fn, from_fn_async, AnyContext, Context, Empty, HandlerExt, ParentHandler};
use tokio::process::Command;
use crate::db::model::ExposedUI;
use crate::disk::mount::filesystem::idmapped::IdMapped;
use crate::disk::mount::filesystem::loop_dev::LoopDev;
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
use crate::prelude::*;
use crate::s9pk::rpc::SKIP_ENV;
use crate::service::cli::ContainerCliContext;
use crate::service::start_stop::StartStop;
use crate::service::ServiceActorSeed;
use crate::status::health_check::HealthCheckResult;
use crate::status::health_check::{HealthCheckResult, HealthCheckString};
use crate::status::MainStatus;
use crate::util::clap::FromStrParser;
use crate::util::{new_guid, Invoke};
use crate::{db::model::ExposedUI, service::RunningStatus};
use crate::{disk::mount::filesystem::idmapped::IdMapped, status::health_check::HealthCheckString};
use crate::{echo, ARCH};
#[derive(Clone)]

View File

@@ -12,12 +12,12 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::{
InstalledPackageInfo, PackageDataEntry, PackageDataEntryInstalled, PackageDataEntryInstalling,
PackageDataEntry, PackageDataEntryInstalled, PackageDataEntryInstalling,
PackageDataEntryRestoring, PackageDataEntryUpdating, StaticFiles,
};
use crate::disk::mount::guard::GenericMountGuard;
use crate::install::PKG_ARCHIVE_DIR;
use crate::notifications::NotificationLevel;
use crate::notifications::{notify, NotificationLevel};
use crate::prelude::*;
use crate::progress::{
FullProgressTracker, FullProgressTrackerHandle, PhaseProgressTrackerHandle,
@@ -370,17 +370,19 @@ impl ServiceReloadInfo {
.load(&self.ctx, &self.id, LoadDisposition::Undo)
.await?;
if let Some(error) = error {
let error_string = error.to_string();
self.ctx
.notification_manager
.notify(
self.ctx.db.clone(),
Some(self.id.clone()),
NotificationLevel::Error,
format!("{} Failed", self.operation),
error.to_string(),
(),
None,
)
.db
.mutate(|db| {
notify(
db,
Some(self.id.clone()),
NotificationLevel::Error,
format!("{} Failed", self.operation),
error_string,
(),
)
})
.await?;
}
Ok(())

View File

@@ -1,15 +1,13 @@
use std::sync::Arc;
use std::{fmt::Display, ops::Deref};
use futures::{Future, FutureExt};
use tokio::sync::watch;
use super::persistent_container::ServiceState;
use crate::service::start_stop::StartStop;
use crate::util::actor::BackgroundJobs;
use crate::util::future::{CancellationHandle, RemoteCancellable};
use super::persistent_container::ServiceState;
pub mod backup;
pub mod restart;

View File

@@ -5,10 +5,10 @@ use std::time::Duration;
use color_eyre::eyre::eyre;
use josekit::jwk::Jwk;
use openssl::x509::X509;
use patch_db::json_ptr::ROOT;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{from_fn_async, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use sqlx::Connection;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tokio::try_join;
@@ -20,6 +20,7 @@ use crate::backup::restore::recover_full_embassy;
use crate::backup::target::BackupTargetFS;
use crate::context::setup::SetupResult;
use crate::context::SetupContext;
use crate::db::model::Database;
use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::mount::filesystem::cifs::Cifs;
@@ -74,29 +75,26 @@ async fn setup_init(
ctx: &SetupContext,
password: Option<String>,
) -> Result<(Hostname, OnionAddressV3, X509), Error> {
let InitResult { secret_store, db } = init(&ctx.config).await?;
let mut secrets_handle = secret_store.acquire().await?;
let mut secrets_tx = secrets_handle.begin().await?;
let InitResult { db } = init(&ctx.config).await?;
let mut account = AccountInfo::load(secrets_tx.as_mut()).await?;
if let Some(password) = password {
account.set_password(&password)?;
account.save(secrets_tx.as_mut()).await?;
db.mutate(|m| {
let account = db
.mutate(|m| {
let mut account = AccountInfo::load(m)?;
if let Some(password) = password {
account.set_password(&password)?;
}
account.save(m)?;
m.as_public_mut()
.as_server_info_mut()
.as_password_hash_mut()
.ser(&account.password)
.ser(&account.password)?;
Ok(account)
})
.await?;
}
secrets_tx.commit().await?;
Ok((
account.hostname,
account.key.tor_address(),
account.tor_key.public().get_onion_address(),
account.root_ca_cert,
))
}
@@ -419,15 +417,13 @@ async fn fresh_setup(
embassy_password: &str,
) -> Result<(Hostname, OnionAddressV3, X509), Error> {
let account = AccountInfo::new(embassy_password, root_ca_start_time().await?)?;
let sqlite_pool = ctx.secret_store().await?;
account.save(&sqlite_pool).await?;
sqlite_pool.close().await;
let InitResult { secret_store, .. } = init(&ctx.config).await?;
secret_store.close().await;
let db = ctx.db().await?;
db.put(&ROOT, &Database::init(&account)?).await?;
init(&ctx.config).await?;
Ok((
account.hostname.clone(),
account.key.tor_address(),
account.root_ca_cert.clone(),
account.hostname,
account.tor_key.public().get_onion_address(),
account.root_ca_cert,
))
}

View File

@@ -1,28 +1,46 @@
use std::collections::BTreeMap;
use std::path::Path;
use chrono::Utc;
use clap::builder::ValueParserFactory;
use clap::Parser;
use color_eyre::eyre::eyre;
use imbl_value::InternedString;
use rpc_toolkit::{command, from_fn_async, AnyContext, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use sqlx::{Pool, Postgres};
use tracing::instrument;
use crate::context::{CliContext, RpcContext};
use crate::prelude::*;
use crate::util::clap::FromStrParser;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::{Error, ErrorKind};
static SSH_AUTHORIZED_KEYS_FILE: &str = "/home/start9/.ssh/authorized_keys";
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct PubKey(
pub struct SshKeys(BTreeMap<InternedString, WithTimeData<SshPubKey>>);
impl SshKeys {
pub fn new() -> Self {
Self(BTreeMap::new())
}
}
impl Map for SshKeys {
type Key = InternedString;
type Value = WithTimeData<SshPubKey>;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(key.clone())
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct SshPubKey(
#[serde(serialize_with = "crate::util::serde::serialize_display")]
#[serde(deserialize_with = "crate::util::serde::deserialize_from_str")]
openssh_keys::PublicKey,
);
impl ValueParserFactory for PubKey {
impl ValueParserFactory for SshPubKey {
type Parser = FromStrParser<Self>;
fn value_parser() -> Self::Parser {
FromStrParser::new()
@@ -33,7 +51,7 @@ impl ValueParserFactory for PubKey {
#[serde(rename_all = "kebab-case")]
pub struct SshKeyResponse {
pub alg: String,
pub fingerprint: String,
pub fingerprint: InternedString,
pub hostname: String,
pub created_at: String,
}
@@ -47,10 +65,10 @@ impl std::fmt::Display for SshKeyResponse {
}
}
impl std::str::FromStr for PubKey {
impl std::str::FromStr for SshPubKey {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
s.parse().map(|pk| PubKey(pk)).map_err(|e| Error {
s.parse().map(|pk| SshPubKey(pk)).map_err(|e| Error {
source: e.into(),
kind: crate::ErrorKind::ParseSshKey,
revision: None,
@@ -88,49 +106,34 @@ pub fn ssh() -> ParentHandler {
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
pub struct AddParams {
key: PubKey,
key: SshPubKey,
}
#[instrument(skip_all)]
pub async fn add(ctx: RpcContext, AddParams { key }: AddParams) -> Result<SshKeyResponse, Error> {
let pool = &ctx.secret_store;
// check fingerprint for duplicates
let fp = key.0.fingerprint_md5();
match sqlx::query!("SELECT * FROM ssh_keys WHERE fingerprint = $1", fp)
.fetch_optional(pool)
.await?
{
None => {
// if no duplicates, insert into DB
let raw_key = format!("{}", key.0);
let created_at = Utc::now().to_rfc3339();
sqlx::query!(
"INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)",
fp,
raw_key,
created_at
)
.execute(pool)
.await?;
// insert into live key file, for now we actually do a wholesale replacement of the keys file, for maximum
// consistency
sync_keys_from_db(pool, Path::new(SSH_AUTHORIZED_KEYS_FILE)).await?;
let mut key = WithTimeData::new(key);
let fingerprint = InternedString::intern(key.0.fingerprint_md5());
ctx.db
.mutate(move |m| {
m.as_private_mut()
.as_ssh_pubkeys_mut()
.insert(&fingerprint, &key)?;
Ok(SshKeyResponse {
alg: key.0.keytype().to_owned(),
fingerprint: fp,
hostname: key.0.comment.unwrap_or(String::new()).to_owned(),
created_at,
fingerprint,
hostname: key.0.comment.take().unwrap_or_default(),
created_at: key.created_at.to_rfc3339(),
})
}
Some(_) => Err(Error::new(eyre!("Duplicate ssh key"), ErrorKind::Duplicate)),
}
})
.await
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
pub struct DeleteParams {
fingerprint: String,
fingerprint: InternedString,
}
#[instrument(skip_all)]
@@ -138,25 +141,22 @@ pub async fn delete(
ctx: RpcContext,
DeleteParams { fingerprint }: DeleteParams,
) -> Result<(), Error> {
let pool = &ctx.secret_store;
// check if fingerprint is in DB
// if in DB, remove it from DB
let n = sqlx::query!("DELETE FROM ssh_keys WHERE fingerprint = $1", fingerprint)
.execute(pool)
.await?
.rows_affected();
// if not in DB, Err404
if n == 0 {
Err(Error {
source: color_eyre::eyre::eyre!("SSH Key Not Found"),
kind: crate::error::ErrorKind::NotFound,
revision: None,
let keys = ctx
.db
.mutate(|m| {
let keys_ref = m.as_private_mut().as_ssh_pubkeys_mut();
if keys_ref.remove(&fingerprint)?.is_some() {
keys_ref.de()
} else {
Err(Error {
source: color_eyre::eyre::eyre!("SSH Key Not Found"),
kind: crate::error::ErrorKind::NotFound,
revision: None,
})
}
})
} else {
// AND overlay key file
sync_keys_from_db(pool, Path::new(SSH_AUTHORIZED_KEYS_FILE)).await?;
Ok(())
}
.await?;
sync_keys(&keys, SSH_AUTHORIZED_KEYS_FILE).await
}
fn display_all_ssh_keys(params: WithIoFormat<Empty>, result: Vec<SshKeyResponse>) {
@@ -186,43 +186,31 @@ fn display_all_ssh_keys(params: WithIoFormat<Empty>, result: Vec<SshKeyResponse>
}
#[instrument(skip_all)]
pub async fn list(ctx: RpcContext, _: Empty) -> Result<Vec<SshKeyResponse>, Error> {
let pool = &ctx.secret_store;
// list keys in DB and return them
let entries = sqlx::query!("SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys")
.fetch_all(pool)
.await?;
Ok(entries
pub async fn list(ctx: RpcContext) -> Result<Vec<SshKeyResponse>, Error> {
ctx.db
.peek()
.await
.into_private()
.into_ssh_pubkeys()
.into_entries()?
.into_iter()
.map(|r| {
let k = PubKey(r.openssh_pubkey.parse().unwrap()).0;
let alg = k.keytype().to_owned();
let fingerprint = k.fingerprint_md5();
let hostname = k.comment.unwrap_or("".to_owned());
let created_at = r.created_at;
SshKeyResponse {
alg,
.map(|(fingerprint, key)| {
let mut key = key.de()?;
Ok(SshKeyResponse {
alg: key.0.keytype().to_owned(),
fingerprint,
hostname,
created_at,
}
hostname: key.0.comment.take().unwrap_or_default(),
created_at: key.created_at.to_rfc3339(),
})
})
.collect())
.collect()
}
#[instrument(skip_all)]
pub async fn sync_keys_from_db<P: AsRef<Path>>(
pool: &Pool<Postgres>,
dest: P,
) -> Result<(), Error> {
pub async fn sync_keys<P: AsRef<Path>>(keys: &SshKeys, dest: P) -> Result<(), Error> {
use tokio::io::AsyncWriteExt;
let dest = dest.as_ref();
let keys = sqlx::query!("SELECT openssh_pubkey FROM ssh_keys")
.fetch_all(pool)
.await?;
let contents: String = keys
.into_iter()
.map(|k| format!("{}\n", k.openssh_pubkey))
.collect();
let ssh_dir = dest.parent().ok_or_else(|| {
Error::new(
eyre!("SSH Key File cannot be \"/\""),
@@ -232,5 +220,10 @@ pub async fn sync_keys_from_db<P: AsRef<Path>>(
if tokio::fs::metadata(ssh_dir).await.is_err() {
tokio::fs::create_dir_all(ssh_dir).await?;
}
std::fs::write(dest, contents).map_err(|e| e.into())
let mut f = tokio::fs::File::create(dest).await?;
for key in keys.0.values() {
f.write_all(key.0.to_key_format().as_bytes()).await?;
f.write_all(b"\n").await?;
}
Ok(())
}

View File

@@ -27,6 +27,12 @@ pub struct DependencyConfigErrors(pub BTreeMap<PackageId, String>);
impl Map for DependencyConfigErrors {
type Key = PackageId;
type Value = String;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<imbl_value::InternedString, Error> {
Ok(key.clone().into())
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq)]

View File

@@ -18,7 +18,7 @@ use crate::db::model::UpdateProgress;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::MountGuard;
use crate::notifications::NotificationLevel;
use crate::notifications::{notify, NotificationLevel};
use crate::prelude::*;
use crate::registry::marketplace::with_query_params;
use crate::sound::{
@@ -66,7 +66,7 @@ pub enum UpdateResult {
Updating,
}
pub fn display_update_result(params: UpdateSystemParams, status: UpdateResult) {
pub fn display_update_result(_: UpdateSystemParams, status: UpdateResult) {
match status {
UpdateResult::Updating => {
println!("Updating...");
@@ -131,24 +131,14 @@ async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option
tokio::spawn(async move {
let res = do_update(ctx.clone(), eos_url).await;
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_update_progress_mut()
.ser(&None)
})
.await?;
match res {
Ok(()) => {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_updated_mut()
.ser(&true)
let status_info =
db.as_public_mut().as_server_info_mut().as_status_info_mut();
status_info.as_update_progress_mut().ser(&None)?;
status_info.as_updated_mut().ser(&true)
})
.await?;
CIRCLE_OF_5THS_SHORT
@@ -157,18 +147,25 @@ async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option
.expect("could not play sound");
}
Err(e) => {
ctx.notification_manager
.notify(
ctx.db.clone(),
None,
NotificationLevel::Error,
"StartOS Update Failed".to_owned(),
format!("Update was not successful because of {}", e),
(),
None,
)
let err_string = format!("Update was not successful because of {}", e);
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_update_progress_mut()
.ser(&None)?;
notify(
db,
None,
NotificationLevel::Error,
"StartOS Update Failed".to_owned(),
err_string,
(),
)
})
.await
.expect("");
.unwrap();
// TODO: refactor sound lib to make compound tempos easier to deal with
UPDATE_FAILED_1
.play()

View File

@@ -1,4 +1,3 @@
use std::path::PathBuf;
use std::pin::Pin;
use std::sync::Arc;
use std::task::Poll;
@@ -6,18 +5,17 @@ use std::time::Duration;
use axum::body::Body;
use axum::response::Response;
use clap::Parser;
use futures::{FutureExt, StreamExt};
use http::header::CONTENT_LENGTH;
use http::StatusCode;
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncWrite, AsyncWriteExt};
use tokio::sync::{watch, OwnedMutexGuard};
use tokio::io::{AsyncWrite, AsyncWriteExt};
use tokio::sync::watch;
use crate::context::RpcContext;
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::{FileSectionReader, MultiCursorFile};
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::s9pk::merkle_archive::source::ArchiveSource;
use crate::util::io::TmpDir;

View File

@@ -3,7 +3,7 @@ use std::task::{Context, Poll};
use futures::future::abortable;
use futures::stream::{AbortHandle, Abortable};
use futures::{Future, FutureExt};
use futures::Future;
use tokio::sync::watch;
#[pin_project::pin_project(PinnedDrop)]
@@ -44,7 +44,7 @@ impl<F> PinnedDrop for DropSignaling<F> {
pub struct DropHandle(watch::Receiver<bool>);
impl DropHandle {
pub async fn wait(&mut self) {
self.0.wait_for(|a| *a).await;
let _ = self.0.wait_for(|a| *a).await;
}
}

View File

@@ -8,6 +8,8 @@ use clap::builder::ValueParserFactory;
use clap::{ArgMatches, CommandFactory, FromArgMatches};
use color_eyre::eyre::eyre;
use imbl::OrdMap;
use openssl::pkey::{PKey, Private};
use openssl::x509::{X509Ref, X509};
use rpc_toolkit::{AnyContext, Handler, HandlerArgs, HandlerArgsFor, HandlerTypes, PrintCliResult};
use serde::de::DeserializeOwned;
use serde::ser::{SerializeMap, SerializeSeq};
@@ -1090,3 +1092,151 @@ pub fn apply_expr(input: jaq_core::Val, expr: &str) -> Result<jaq_core::Val, Err
Ok(res)
}
pub trait PemEncoding: Sized {
fn from_pem<E: serde::de::Error>(pem: &str) -> Result<Self, E>;
fn to_pem<E: serde::ser::Error>(&self) -> Result<String, E>;
}
impl PemEncoding for X509 {
fn from_pem<E: serde::de::Error>(pem: &str) -> Result<Self, E> {
X509::from_pem(pem.as_bytes()).map_err(E::custom)
}
fn to_pem<E: serde::ser::Error>(&self) -> Result<String, E> {
String::from_utf8((&**self).to_pem().map_err(E::custom)?).map_err(E::custom)
}
}
impl PemEncoding for PKey<Private> {
fn from_pem<E: serde::de::Error>(pem: &str) -> Result<Self, E> {
PKey::<Private>::private_key_from_pem(pem.as_bytes()).map_err(E::custom)
}
fn to_pem<E: serde::ser::Error>(&self) -> Result<String, E> {
String::from_utf8((&**self).private_key_to_pem_pkcs8().map_err(E::custom)?)
.map_err(E::custom)
}
}
impl PemEncoding for ssh_key::PrivateKey {
fn from_pem<E: serde::de::Error>(pem: &str) -> Result<Self, E> {
ssh_key::PrivateKey::from_openssh(pem.as_bytes()).map_err(E::custom)
}
fn to_pem<E: serde::ser::Error>(&self) -> Result<String, E> {
self.to_openssh(ssh_key::LineEnding::LF)
.map_err(E::custom)
.map(|s| (&*s).clone())
}
}
pub mod pem {
use serde::{Deserialize, Deserializer, Serializer};
use crate::util::serde::PemEncoding;
pub fn serialize<T: PemEncoding, S: Serializer>(
value: &T,
serializer: S,
) -> Result<S::Ok, S::Error> {
serializer.serialize_str(&value.to_pem()?)
}
pub fn deserialize<'de, T: PemEncoding, D: Deserializer<'de>>(
deserializer: D,
) -> Result<T, D::Error> {
let pem = String::deserialize(deserializer)?;
Ok(T::from_pem(&pem)?)
}
}
#[repr(transparent)]
#[derive(Debug, Deserialize, Serialize)]
pub struct Pem<T: PemEncoding>(#[serde(with = "pem")] pub T);
impl<T: PemEncoding> Pem<T> {
pub fn new(value: T) -> Self {
Pem(value)
}
pub fn new_ref(value: &T) -> &Self {
unsafe { std::mem::transmute(value) }
}
pub fn new_mut(value: &mut T) -> &mut Self {
unsafe { std::mem::transmute(value) }
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct MaybeUtf8String(pub Vec<u8>);
impl std::fmt::Debug for MaybeUtf8String {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Ok(s) = std::str::from_utf8(&self.0) {
s.fmt(f)
} else {
self.0.fmt(f)
}
}
}
impl<'de> Deserialize<'de> for MaybeUtf8String {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = Vec<u8>;
fn expecting(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(formatter, "a string or byte array")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v.as_bytes().to_owned())
}
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v.into_bytes())
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v.to_owned())
}
fn visit_byte_buf<E>(self, v: Vec<u8>) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(v)
}
fn visit_unit<E>(self) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(Vec::new())
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
std::iter::repeat_with(|| seq.next_element::<u8>().transpose())
.take_while(|a| a.is_some())
.flatten()
.collect::<Result<Vec<u8>, _>>()
}
}
deserializer.deserialize_any(Visitor).map(Self)
}
}
impl Serialize for MaybeUtf8String {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
if let Ok(s) = std::str::from_utf8(&self.0) {
serializer.serialize_str(s)
} else {
serializer.serialize_bytes(&self.0)
}
}
}

View File

@@ -1,33 +1,25 @@
use std::cmp::Ordering;
use async_trait::async_trait;
use color_eyre::eyre::eyre;
use futures::Future;
use imbl_value::InternedString;
use sqlx::PgPool;
use crate::prelude::*;
use crate::Error;
mod v0_3_4;
mod v0_3_4_1;
mod v0_3_4_2;
mod v0_3_4_3;
mod v0_3_4_4;
mod v0_3_5;
mod v0_3_5_1;
mod v0_3_6;
pub type Current = v0_3_5_1::Version;
pub type Current = v0_3_6::Version;
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
#[serde(untagged)]
enum Version {
V0_3_4(Wrapper<v0_3_4::Version>),
V0_3_4_1(Wrapper<v0_3_4_1::Version>),
V0_3_4_2(Wrapper<v0_3_4_2::Version>),
V0_3_4_3(Wrapper<v0_3_4_3::Version>),
V0_3_4_4(Wrapper<v0_3_4_4::Version>),
LT0_3_5(LTWrapper<v0_3_5::Version>),
V0_3_5(Wrapper<v0_3_5::Version>),
V0_3_5_1(Wrapper<v0_3_5_1::Version>),
V0_3_6(Wrapper<v0_3_6::Version>),
Other(emver::Version),
}
@@ -43,19 +35,15 @@ impl Version {
#[cfg(test)]
fn as_sem_ver(&self) -> emver::Version {
match self {
Version::V0_3_4(Wrapper(x)) => x.semver(),
Version::V0_3_4_1(Wrapper(x)) => x.semver(),
Version::V0_3_4_2(Wrapper(x)) => x.semver(),
Version::V0_3_4_3(Wrapper(x)) => x.semver(),
Version::V0_3_4_4(Wrapper(x)) => x.semver(),
Version::LT0_3_5(LTWrapper(_, x)) => x.clone(),
Version::V0_3_5(Wrapper(x)) => x.semver(),
Version::V0_3_5_1(Wrapper(x)) => x.semver(),
Version::V0_3_6(Wrapper(x)) => x.semver(),
Version::Other(x) => x.clone(),
}
}
}
#[async_trait]
pub trait VersionT
where
Self: Sized + Send + Sync,
@@ -64,86 +52,115 @@ where
fn new() -> Self;
fn semver(&self) -> emver::Version;
fn compat(&self) -> &'static emver::VersionRange;
async fn up(&self, db: PatchDb, secrets: &PgPool) -> Result<(), Error>;
async fn down(&self, db: PatchDb, secrets: &PgPool) -> Result<(), Error>;
async fn commit(&self, db: PatchDb) -> Result<(), Error> {
let semver = self.semver().into();
let compat = self.compat().clone();
db.mutate(|d| {
d.as_public_mut()
.as_server_info_mut()
.as_version_mut()
.ser(&semver)?;
d.as_public_mut()
.as_server_info_mut()
.as_eos_version_compat_mut()
.ser(&compat)?;
fn up(&self, db: &PatchDb) -> impl Future<Output = Result<(), Error>> + Send;
fn down(&self, db: &PatchDb) -> impl Future<Output = Result<(), Error>> + Send;
fn commit(&self, db: &PatchDb) -> impl Future<Output = Result<(), Error>> + Send {
async {
let semver = self.semver().into();
let compat = self.compat().clone();
db.mutate(|d| {
d.as_public_mut()
.as_server_info_mut()
.as_version_mut()
.ser(&semver)?;
d.as_public_mut()
.as_server_info_mut()
.as_eos_version_compat_mut()
.ser(&compat)?;
Ok(())
})
.await?;
Ok(())
})
.await?;
Ok(())
}
async fn migrate_to<V: VersionT>(
&self,
version: &V,
db: PatchDb,
secrets: &PgPool,
) -> Result<(), Error> {
match self.semver().cmp(&version.semver()) {
Ordering::Greater => self.rollback_to_unchecked(version, db, secrets).await,
Ordering::Less => version.migrate_from_unchecked(self, db, secrets).await,
Ordering::Equal => Ok(()),
}
}
async fn migrate_from_unchecked<V: VersionT>(
fn migrate_to<V: VersionT>(
&self,
version: &V,
db: PatchDb,
secrets: &PgPool,
) -> Result<(), Error> {
let previous = Self::Previous::new();
if version.semver() < previous.semver() {
previous
.migrate_from_unchecked(version, db.clone(), secrets)
.await?;
} else if version.semver() > previous.semver() {
return Err(Error::new(
eyre!(
"NO PATH FROM {}, THIS IS LIKELY A MISTAKE IN THE VERSION DEFINITION",
version.semver()
),
crate::ErrorKind::MigrationFailed,
));
db: &PatchDb,
) -> impl Future<Output = Result<(), Error>> + Send {
async {
match self.semver().cmp(&version.semver()) {
Ordering::Greater => self.rollback_to_unchecked(version, db).await,
Ordering::Less => version.migrate_from_unchecked(self, db).await,
Ordering::Equal => Ok(()),
}
}
tracing::info!("{} -> {}", previous.semver(), self.semver(),);
self.up(db.clone(), secrets).await?;
self.commit(db).await?;
Ok(())
}
async fn rollback_to_unchecked<V: VersionT>(
fn migrate_from_unchecked<V: VersionT>(
&self,
version: &V,
db: PatchDb,
secrets: &PgPool,
) -> Result<(), Error> {
let previous = Self::Previous::new();
tracing::info!("{} -> {}", self.semver(), previous.semver(),);
self.down(db.clone(), secrets).await?;
previous.commit(db.clone()).await?;
if version.semver() < previous.semver() {
previous.rollback_to_unchecked(version, db, secrets).await?;
} else if version.semver() > previous.semver() {
return Err(Error::new(
eyre!(
"NO PATH TO {}, THIS IS LIKELY A MISTAKE IN THE VERSION DEFINITION",
version.semver()
),
crate::ErrorKind::MigrationFailed,
));
db: &PatchDb,
) -> impl Future<Output = Result<(), Error>> + Send {
async {
let previous = Self::Previous::new();
if version.semver() < previous.semver() {
previous.migrate_from_unchecked(version, db).await?;
} else if version.semver() > previous.semver() {
return Err(Error::new(
eyre!(
"NO PATH FROM {}, THIS IS LIKELY A MISTAKE IN THE VERSION DEFINITION",
version.semver()
),
crate::ErrorKind::MigrationFailed,
));
}
tracing::info!("{} -> {}", previous.semver(), self.semver(),);
self.up(db).await?;
self.commit(db).await?;
Ok(())
}
}
fn rollback_to_unchecked<V: VersionT>(
&self,
version: &V,
db: &PatchDb,
) -> impl Future<Output = Result<(), Error>> + Send {
async {
let previous = Self::Previous::new();
tracing::info!("{} -> {}", self.semver(), previous.semver(),);
self.down(db).await?;
previous.commit(db).await?;
if version.semver() < previous.semver() {
previous.rollback_to_unchecked(version, db).await?;
} else if version.semver() > previous.semver() {
return Err(Error::new(
eyre!(
"NO PATH TO {}, THIS IS LIKELY A MISTAKE IN THE VERSION DEFINITION",
version.semver()
),
crate::ErrorKind::MigrationFailed,
));
}
Ok(())
}
Ok(())
}
}
#[derive(Debug, Clone)]
struct LTWrapper<T>(T, emver::Version);
impl<T> serde::Serialize for LTWrapper<T>
where
T: VersionT,
{
fn serialize<S: serde::Serializer>(&self, serializer: S) -> Result<S::Ok, S::Error> {
self.0.semver().serialize(serializer)
}
}
impl<'de, T> serde::Deserialize<'de> for LTWrapper<T>
where
T: VersionT,
{
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
let v = crate::util::Version::deserialize(deserializer)?;
let version = T::new();
if *v < version.semver() {
Ok(Self(version, v.into_version()))
} else {
Err(serde::de::Error::custom("Mismatched Version"))
}
}
}
#[derive(Debug, Clone)]
struct Wrapper<T>(T);
impl<T> serde::Serialize for Wrapper<T>
@@ -169,7 +186,7 @@ where
}
}
pub async fn init(db: &PatchDb, secrets: &PgPool) -> Result<(), Error> {
pub async fn init(db: &PatchDb) -> Result<(), Error> {
let version = Version::from_util_version(
db.peek()
.await
@@ -180,13 +197,15 @@ pub async fn init(db: &PatchDb, secrets: &PgPool) -> Result<(), Error> {
);
match version {
Version::V0_3_4(v) => v.0.migrate_to(&Current::new(), db.clone(), secrets).await?,
Version::V0_3_4_1(v) => v.0.migrate_to(&Current::new(), db.clone(), secrets).await?,
Version::V0_3_4_2(v) => v.0.migrate_to(&Current::new(), db.clone(), secrets).await?,
Version::V0_3_4_3(v) => v.0.migrate_to(&Current::new(), db.clone(), secrets).await?,
Version::V0_3_4_4(v) => v.0.migrate_to(&Current::new(), db.clone(), secrets).await?,
Version::V0_3_5(v) => v.0.migrate_to(&Current::new(), db.clone(), secrets).await?,
Version::V0_3_5_1(v) => v.0.migrate_to(&Current::new(), db.clone(), secrets).await?,
Version::LT0_3_5(_) => {
return Err(Error::new(
eyre!("Cannot migrate from pre-0.3.5. Please update to v0.3.5 first."),
ErrorKind::MigrationFailed,
));
}
Version::V0_3_5(v) => v.0.migrate_to(&Current::new(), &db).await?,
Version::V0_3_5_1(v) => v.0.migrate_to(&Current::new(), &db).await?,
Version::V0_3_6(v) => v.0.migrate_to(&Current::new(), &db).await?,
Version::Other(_) => {
return Err(Error::new(
eyre!("Cannot downgrade"),
@@ -218,11 +237,6 @@ mod tests {
fn versions() -> impl Strategy<Value = Version> {
prop_oneof![
Just(Version::V0_3_4(Wrapper(v0_3_4::Version::new()))),
Just(Version::V0_3_4_1(Wrapper(v0_3_4_1::Version::new()))),
Just(Version::V0_3_4_2(Wrapper(v0_3_4_2::Version::new()))),
Just(Version::V0_3_4_3(Wrapper(v0_3_4_3::Version::new()))),
Just(Version::V0_3_4_4(Wrapper(v0_3_4_4::Version::new()))),
Just(Version::V0_3_5(Wrapper(v0_3_5::Version::new()))),
Just(Version::V0_3_5_1(Wrapper(v0_3_5_1::Version::new()))),
em_version().prop_map(Version::Other),

View File

@@ -1,140 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use itertools::Itertools;
use openssl::hash::MessageDigest;
use serde_json::json;
use ssh_key::public::Ed25519PublicKey;
use super::*;
use crate::account::AccountInfo;
use crate::hostname::{sync_hostname, Hostname};
use crate::prelude::*;
const V0_3_4: emver::Version = emver::Version::new(0, 3, 4, 0);
lazy_static::lazy_static! {
pub static ref V0_3_0_COMPAT: VersionRange = VersionRange::Conj(
Box::new(VersionRange::Anchor(
emver::GTE,
emver::Version::new(0, 3, 0, 0),
)),
Box::new(VersionRange::Anchor(emver::LTE, Current::new().semver())),
);
}
const COMMUNITY_URL: &str = "https://community-registry.start9.com/";
const MAIN_REGISTRY: &str = "https://registry.start9.com/";
const COMMUNITY_SERVICES: &[&str] = &[
"ipfs",
"agora",
"lightning-jet",
"balanceofsatoshis",
"mastodon",
"lndg",
"robosats",
"thunderhub",
"syncthing",
"sphinx-relay",
];
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = Self;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4
}
fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT
}
async fn up(&self, db: PatchDb, secrets: &PgPool) -> Result<(), Error> {
let mut account = AccountInfo::load(secrets).await?;
let account = db
.mutate(|d| {
d.as_public_mut().as_server_info_mut().as_pubkey_mut().ser(
&ssh_key::PublicKey::from(Ed25519PublicKey::from(&account.key.ssh_key()))
.to_openssh()?,
)?;
d.as_public_mut()
.as_server_info_mut()
.as_ca_fingerprint_mut()
.ser(
&account
.root_ca_cert
.digest(MessageDigest::sha256())
.unwrap()
.iter()
.map(|x| format!("{x:X}"))
.join(":"),
)?;
let server_info = d.as_public_mut().as_server_info();
account.hostname = server_info.as_hostname().de().map(Hostname)?;
account.server_id = server_info.as_id().de()?;
Ok(account)
})
.await?;
account.save(secrets).await?;
sync_hostname(&account.hostname).await?;
let parsed_url = Some(COMMUNITY_URL.parse().unwrap());
db.mutate(|d| {
let mut ui = d.as_public().as_ui().de()?;
use imbl_value::json;
ui["marketplace"]["known-hosts"][COMMUNITY_URL] = json!({});
ui["marketplace"]["known-hosts"][MAIN_REGISTRY] = json!({});
for package_id in d.as_public().as_package_data().keys()? {
if !COMMUNITY_SERVICES.contains(&&*package_id.to_string()) {
continue;
}
d.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_installed_mut()
.or_not_found(&package_id)?
.as_marketplace_url_mut()
.ser(&parsed_url)?;
}
ui["theme"] = json!("Dark".to_string());
ui["widgets"] = json!([]);
d.as_public_mut().as_ui_mut().ser(&ui)
})
.await
}
async fn down(&self, db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
db.mutate(|d| {
let mut ui = d.as_public().as_ui().de()?;
let parsed_url = Some(MAIN_REGISTRY.parse().unwrap());
for package_id in d.as_public().as_package_data().keys()? {
if !COMMUNITY_SERVICES.contains(&&*package_id.to_string()) {
continue;
}
d.as_public_mut()
.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_installed_mut()
.or_not_found(&package_id)?
.as_marketplace_url_mut()
.ser(&parsed_url)?;
}
if let imbl_value::Value::Object(ref mut obj) = ui {
obj.remove("theme");
obj.remove("widgets");
}
ui["marketplace"]["known-hosts"][COMMUNITY_URL].take();
ui["marketplace"]["known-hosts"][MAIN_REGISTRY].take();
d.as_public_mut().as_ui_mut().ser(&ui)
})
.await
}
}

View File

@@ -1,31 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use super::v0_3_4::V0_3_0_COMPAT;
use super::*;
use crate::prelude::*;
const V0_3_4_1: emver::Version = emver::Version::new(0, 3, 4, 1);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4_1
}
fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT
}
async fn up(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,31 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use super::v0_3_4::V0_3_0_COMPAT;
use super::*;
use crate::prelude::*;
const V0_3_4_2: emver::Version = emver::Version::new(0, 3, 4, 2);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4_1::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4_2
}
fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT
}
async fn up(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,31 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use super::v0_3_4::V0_3_0_COMPAT;
use super::*;
use crate::prelude::*;
const V0_3_4_3: emver::Version = emver::Version::new(0, 3, 4, 3);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4_2::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4_3
}
fn compat(&self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
async fn up(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,43 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use models::ResultExt;
use sqlx::PgPool;
use super::v0_3_4::V0_3_0_COMPAT;
use super::{v0_3_4_3, VersionT};
use crate::prelude::*;
const V0_3_4_4: emver::Version = emver::Version::new(0, 3, 4, 4);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4_3::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4_4
}
fn compat(&self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
async fn up(&self, db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
db.mutate(|v| {
let tor_address_lens = v.as_public_mut().as_server_info_mut().as_tor_address_mut();
let mut tor_addr = tor_address_lens.de()?;
tor_addr
.set_scheme("https")
.map_err(|_| eyre!("unable to update url scheme to https"))
.with_kind(crate::ErrorKind::ParseUrl)?;
tor_address_lens.ser(&tor_addr)
})
.await?;
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,23 +1,26 @@
use std::collections::BTreeMap;
use std::path::Path;
use async_trait::async_trait;
use emver::VersionRange;
use models::DataUrl;
use sqlx::PgPool;
use super::v0_3_4::V0_3_0_COMPAT;
use super::{v0_3_4_4, VersionT};
use super::VersionT;
use crate::prelude::*;
use crate::version::Current;
lazy_static::lazy_static! {
pub static ref V0_3_0_COMPAT: VersionRange = VersionRange::Conj(
Box::new(VersionRange::Anchor(
emver::GTE,
emver::Version::new(0, 3, 0, 0),
)),
Box::new(VersionRange::Anchor(emver::LTE, Current::new().semver())),
);
}
const V0_3_5: emver::Version = emver::Version::new(0, 3, 5, 0);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4_4::Version;
type Previous = Self;
fn new() -> Self {
Version
}
@@ -27,86 +30,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
async fn up(&self, db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
let peek = db.peek().await;
let mut url_replacements = BTreeMap::new();
for (_, pde) in peek.as_public().as_package_data().as_entries()? {
for (dependency, info) in pde
.as_installed()
.map(|i| i.as_dependency_info().as_entries())
.transpose()?
.into_iter()
.flatten()
{
if !url_replacements.contains_key(&dependency) {
url_replacements.insert(
dependency,
DataUrl::from_path(
<&Value>::from(info.as_icon())
.as_str()
.and_then(|i| i.strip_prefix("/public/package-data/"))
.map(|path| {
Path::new("/embassy-data/package-data/public").join(path)
})
.unwrap_or_default(),
)
.await
.unwrap_or_else(|_| {
DataUrl::from_slice(
"image/png",
include_bytes!("../install/package-icon.png"),
)
}),
);
}
}
}
let prev_zram = db
.mutate(|v| {
for (_, pde) in v.as_public_mut().as_package_data_mut().as_entries_mut()? {
for (dependency, info) in pde
.as_installed_mut()
.map(|i| i.as_dependency_info_mut().as_entries_mut())
.transpose()?
.into_iter()
.flatten()
{
if let Some(url) = url_replacements.get(&dependency) {
info.as_icon_mut().ser(url)?;
} else {
info.as_icon_mut().ser(&DataUrl::from_slice(
"image/png",
include_bytes!("../install/package-icon.png"),
))?;
}
let manifest = <&mut Value>::from(&mut *info)
.as_object_mut()
.and_then(|o| o.remove("manifest"));
if let Some(title) = manifest
.as_ref()
.and_then(|m| m.as_object())
.and_then(|m| m.get("title"))
.and_then(|t| t.as_str())
.map(|s| s.to_owned())
{
info.as_title_mut().ser(&title)?;
} else {
info.as_title_mut().ser(&dependency.to_string())?;
}
}
}
v.as_public_mut()
.as_server_info_mut()
.as_zram_mut()
.replace(&true)
})
.await?;
if !prev_zram {
crate::system::enable_zram().await?;
}
async fn up(&self, _db: &PatchDb) -> Result<(), Error> {
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
async fn down(&self, _db: &PatchDb) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,8 +1,6 @@
use async_trait::async_trait;
use emver::VersionRange;
use sqlx::PgPool;
use super::v0_3_4::V0_3_0_COMPAT;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_5, VersionT};
use crate::prelude::*;
@@ -11,7 +9,6 @@ const V0_3_5_1: emver::Version = emver::Version::new(0, 3, 5, 1);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_5::Version;
fn new() -> Self {
@@ -23,10 +20,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
async fn up(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
async fn up(&self, _db: &PatchDb) -> Result<(), Error> {
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
async fn down(&self, _db: &PatchDb) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -0,0 +1,29 @@
use emver::VersionRange;
use super::v0_3_5::V0_3_0_COMPAT;
use super::{v0_3_5_1, VersionT};
use crate::prelude::*;
const V0_3_6: emver::Version = emver::Version::new(0, 3, 6, 0);
#[derive(Clone, Debug)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_3_5_1::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_6
}
fn compat(&self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
async fn up(&self, _db: &PatchDb) -> Result<(), Error> {
Err(Error::new(eyre!("unimplemented"), ErrorKind::Unknown))
}
async fn down(&self, _db: &PatchDb) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -3,6 +3,7 @@ use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
pub use helpers::script_dir;
use imbl_value::InternedString;
pub use models::VolumeId;
use models::{HostId, PackageId};
use serde::{Deserialize, Serialize};
@@ -71,6 +72,15 @@ impl DerefMut for Volumes {
impl Map for Volumes {
type Key = VolumeId;
type Value = Volume;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
match key {
VolumeId::Custom(id) => Ok(id.clone().into()),
_ => Self::key_str(key).map(|s| InternedString::intern(s.as_ref())),
}
}
}
pub fn data_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, volume_id: &VolumeId) -> PathBuf {