Refactor/networking (#2189)

* refactor networking and account

* add interfaces from manifest automatically

* use nistp256 to satisfy firefox

* use ed25519 if available

* fix ip signing

* fix SQL error

* update prettytable to fix segfault

* fix migration

* fix migration

* bump welcome-ack

* add redirect if connecting to https over http

* misc rebase fixes

* fix compression

* bump rustc version
This commit is contained in:
Aiden McClelland
2023-03-08 19:30:46 -07:00
committed by GitHub
parent da55d6f7cd
commit bbb9980941
79 changed files with 3577 additions and 3587 deletions

View File

@@ -5,7 +5,7 @@ on:
workflow_dispatch: workflow_dispatch:
env: env:
RUST_VERSION: "1.62.1" RUST_VERSION: "1.67.1"
ENVIRONMENT: "dev" ENVIRONMENT: "dev"
jobs: jobs:

1254
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -90,6 +90,8 @@ hyper = { version = "0.14.20", features = ["full"] }
hyper-ws-listener = "0.2.0" hyper-ws-listener = "0.2.0"
imbl = "2.0.0" imbl = "2.0.0"
indexmap = { version = "1.9.1", features = ["serde"] } indexmap = { version = "1.9.1", features = ["serde"] }
ipnet = { version = "2.7.1", features = ["serde"] }
iprange = { version = "0.6.7", features = ["serde"] }
isocountry = "0.3.2" isocountry = "0.3.2"
itertools = "0.10.3" itertools = "0.10.3"
josekit = "0.8.1" josekit = "0.8.1"
@@ -109,10 +111,11 @@ openssl = { version = "0.10.41", features = ["vendored"] }
patch-db = { version = "*", path = "../patch-db/patch-db", features = [ patch-db = { version = "*", path = "../patch-db/patch-db", features = [
"trace", "trace",
] } ] }
p256 = { version = "0.12.0", features = ["pem"] }
pbkdf2 = "0.11.0" pbkdf2 = "0.11.0"
pin-project = "1.0.11" pin-project = "1.0.11"
pkcs8 = { version = "0.9.0", features = ["std"] } pkcs8 = { version = "0.9.0", features = ["std"] }
prettytable-rs = "0.9.0" prettytable-rs = "0.10.0"
proptest = "1.0.0" proptest = "1.0.0"
proptest-derive = "0.3.0" proptest-derive = "0.3.0"
rand = { version = "0.8.5", features = ["std"] } rand = { version = "0.8.5", features = ["std"] }
@@ -158,6 +161,7 @@ trust-dns-server = "0.22.0"
typed-builder = "0.10.0" typed-builder = "0.10.0"
url = { version = "2.2.2", features = ["serde"] } url = { version = "2.2.2", features = ["serde"] }
uuid = { version = "1.1.2", features = ["v4"] } uuid = { version = "1.1.2", features = ["v4"] }
zeroize = "1.5.7"
[profile.test] [profile.test]
opt-level = 3 opt-level = 3

View File

@@ -1,21 +0,0 @@
-- Add migration script here
CREATE EXTENSION pgcrypto;
ALTER TABLE
account
ADD
COLUMN ssh_key BYTEA CHECK (length(ssh_key) = 32);
UPDATE
account
SET
ssh_key = gen_random_bytes(32)
WHERE
id = 0;
ALTER TABLE
account
ALTER COLUMN
ssh_key
SET
NOT NULL;

View File

@@ -0,0 +1,62 @@
-- Add migration script here
CREATE EXTENSION pgcrypto;
ALTER TABLE
account
ADD
COLUMN server_id TEXT,
ADD
COLUMN hostname TEXT,
ADD
COLUMN network_key BYTEA CHECK (length(network_key) = 32),
ADD
COLUMN root_ca_key_pem TEXT,
ADD
COLUMN root_ca_cert_pem TEXT;
UPDATE
account
SET
network_key = gen_random_bytes(32),
root_ca_key_pem = (
SELECT
priv_key_pem
FROM
certificates
WHERE
id = 0
),
root_ca_cert_pem = (
SELECT
certificate_pem
FROM
certificates
WHERE
id = 0
)
WHERE
id = 0;
ALTER TABLE
account
ALTER COLUMN
tor_key DROP NOT NULL,
ALTER COLUMN
network_key
SET
NOT NULL,
ALTER COLUMN
root_ca_key_pem
SET
NOT NULL,
ALTER COLUMN
root_ca_cert_pem
SET
NOT NULL;
CREATE TABLE IF NOT EXISTS network_keys (
package TEXT NOT NULL,
interface TEXT NOT NULL,
key BYTEA NOT NULL CHECK (length(key) = 32),
PRIMARY KEY (package, interface)
);

View File

@@ -1,6 +1,6 @@
{ {
"db": "PostgreSQL", "db": "PostgreSQL",
"094882d4d46d52e814f9aaf5fae172a5dd745b06cbde347f47b18e6498167269": { "1ce5254f27de971fd87f5ab66d300f2b22433c86617a0dbf796bf2170186dd2e": {
"describe": { "describe": {
"columns": [], "columns": [],
"nullable": [], "nullable": [],
@@ -8,35 +8,11 @@
"Left": [ "Left": [
"Text", "Text",
"Text", "Text",
"Text" "Bytea"
] ]
} }
}, },
"query": "UPDATE certificates SET priv_key_pem = $1, certificate_pem = $2, updated_at = now() WHERE lookup_string = $3" "query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING"
},
"165daa7d6a60cb42122373b2c5ac7d39399bcc99992f0002ee7bfef50a8daceb": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": []
}
},
"query": "DELETE FROM certificates WHERE id = 0 OR id = 1;"
},
"1f7936d27d63f01118ecd6f824e8a79607ed2b6e6def23f3e2487466dd2ddfe1": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES ($1, $2, $3, now(), now())"
}, },
"21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930": { "21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930": {
"describe": { "describe": {
@@ -50,20 +26,6 @@
}, },
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1" "query": "DELETE FROM ssh_keys WHERE fingerprint = $1"
}, },
"22613628ff50341fdc35366e194fdcd850118824763cfe0dfff68dadc72167e9": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET key = $3"
},
"28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec": { "28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec": {
"describe": { "describe": {
"columns": [ "columns": [
@@ -102,39 +64,6 @@
}, },
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1" "query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1"
}, },
"2f615764532e975c964f1d0e063a02110d781644b0eaae1ff85a7d6ed903bfe5": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4",
"Text",
"Bytea",
"Bytea"
]
}
},
"query": "INSERT INTO account (id, password, tor_key, ssh_key) VALUES ($1, $2, $3, $4) ON CONFLICT (id) DO UPDATE SET password = $2, tor_key = $3, ssh_key = $4"
},
"3502e58f2ab48fb4566d21c920c096f81acfa3ff0d02f970626a4dcd67bac71d": {
"describe": {
"columns": [
{
"name": "tor_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT tor_key FROM account"
},
"4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997": { "4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997": {
"describe": { "describe": {
"columns": [ "columns": [
@@ -167,32 +96,6 @@
}, },
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1" "query": "SELECT * FROM ssh_keys WHERE fingerprint = $1"
}, },
"46815a4ac2c43e1dfbab3c0017ed09d5c833062e523205db4245a5218b2562b8": {
"describe": {
"columns": [
{
"name": "priv_key_pem",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "certificate_pem",
"ordinal": 1,
"type_info": "Text"
}
],
"nullable": [
false,
false
],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = $1"
},
"4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": { "4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": {
"describe": { "describe": {
"columns": [ "columns": [
@@ -253,42 +156,6 @@
}, },
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1" "query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1"
}, },
"548448e8ed8bcdf9efdc813d65af2cc55064685293b936f0f09e07f91a328eb9": {
"describe": {
"columns": [
{
"name": "setval",
"ordinal": 0,
"type_info": "Int8"
}
],
"nullable": [
null
],
"parameters": {
"Left": []
}
},
"query": "SELECT setval('certificates_id_seq', GREATEST(MAX(id) + 1, nextval('certificates_id_seq') - 1)) FROM certificates"
},
"5c0ea94081695dba827e525ecc0c555757b43ea513c2c93f9c7f7f8c174d36bf": {
"describe": {
"columns": [
{
"name": "ssh_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT ssh_key FROM account"
},
"629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": { "629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": {
"describe": { "describe": {
"columns": [ "columns": [
@@ -328,30 +195,6 @@
}, },
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2" "query": "SELECT key FROM tor WHERE package = $1 AND interface = $2"
}, },
"6c96d76bffcc5f03290d8d8544a58521345ed2a843a509b17bbcd6257bb81821": {
"describe": {
"columns": [
{
"name": "priv_key_pem",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "certificate_pem",
"ordinal": 1,
"type_info": "Text"
}
],
"nullable": [
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;"
},
"6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca": { "6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca": {
"describe": { "describe": {
"columns": [], "columns": [],
@@ -364,6 +207,28 @@
}, },
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP" "query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
}, },
"770c1017734720453dc87b58c385b987c5af5807151ff71a59000014586752e0": {
"describe": {
"columns": [
{
"name": "key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key"
},
"7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210": { "7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210": {
"describe": { "describe": {
"columns": [ "columns": [
@@ -427,6 +292,23 @@
}, },
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2" "query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2"
}, },
"7c7a3549c997eb75bf964ea65fbb98a73045adf618696cd838d79203ef5383fb": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Bytea",
"Text",
"Text"
]
}
},
"query": "\n INSERT INTO account (\n id,\n server_id,\n hostname,\n password,\n network_key,\n root_ca_key_pem,\n root_ca_cert_pem\n ) VALUES (\n 0, $1, $2, $3, $4, $5, $6\n ) ON CONFLICT (id) DO UPDATE SET\n server_id = EXCLUDED.server_id,\n hostname = EXCLUDED.hostname,\n password = EXCLUDED.password,\n network_key = EXCLUDED.network_key,\n root_ca_key_pem = EXCLUDED.root_ca_key_pem,\n root_ca_cert_pem = EXCLUDED.root_ca_cert_pem\n "
},
"7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576": { "7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576": {
"describe": { "describe": {
"columns": [], "columns": [],
@@ -615,19 +497,6 @@
}, },
"query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5" "query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5"
}, },
"cec8112be0ebc02ef7e651631be09efe26d1677b5b8aa95ceb3a92aff1afdbcc": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text"
]
}
},
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, $1, $2, NULL, now(), now())"
},
"d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": { "d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": {
"describe": { "describe": {
"columns": [ "columns": [
@@ -663,19 +532,6 @@
}, },
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)" "query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)"
}, },
"df4428ccb891bd791824bcd7990550cc9651e1cfaab1db33833ff7959d113b2c": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text"
]
}
},
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, $1, $2, NULL, now(), now())"
},
"e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7": { "e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7": {
"describe": { "describe": {
"columns": [], "columns": [],
@@ -688,17 +544,23 @@
}, },
"query": "DELETE FROM notifications WHERE id = $1" "query": "DELETE FROM notifications WHERE id = $1"
}, },
"e25e53c45c5a494a45cdb4d145de507df6f322ac6706e71b86895f1c64195f41": { "e545696735f202f9d13cf22a561f3ff3f9aed7f90027a9ba97634bcb47d772f0": {
"describe": { "describe": {
"columns": [], "columns": [
"nullable": [], {
"name": "tor_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
true
],
"parameters": { "parameters": {
"Left": [ "Left": []
"Text"
]
} }
}, },
"query": "UPDATE account SET password = $1" "query": "SELECT tor_key FROM account WHERE id = 0"
}, },
"e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d": { "e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d": {
"describe": { "describe": {
@@ -714,21 +576,33 @@
}, },
"query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)" "query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)"
}, },
"e85749336fce4afaf16627bee8cfcb70be6f189ea7d1f05f9a26bead4be11839": { "e95322a8e2ae3b93f1e974b24c0b81803f1e9ec9e8ebbf15cafddfc1c5a028ed": {
"describe": { "describe": {
"columns": [ "columns": [
{ {
"name": "interface", "name": "package",
"ordinal": 0, "ordinal": 0,
"type_info": "Text" "type_info": "Text"
}, },
{ {
"name": "key", "name": "interface",
"ordinal": 1, "ordinal": 1,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "tor_key?",
"ordinal": 3,
"type_info": "Bytea" "type_info": "Bytea"
} }
], ],
"nullable": [ "nullable": [
false,
false,
false, false,
false false
], ],
@@ -738,7 +612,7 @@
] ]
} }
}, },
"query": "SELECT interface, key FROM tor WHERE package = $1" "query": "\n SELECT\n network_keys.package,\n network_keys.interface,\n network_keys.key,\n tor.key AS \"tor_key?\"\n FROM\n network_keys\n LEFT JOIN\n tor\n ON\n network_keys.package = tor.package\n AND\n network_keys.interface = tor.interface\n WHERE\n network_keys.package = $1\n "
}, },
"eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a": { "eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a": {
"describe": { "describe": {
@@ -775,30 +649,6 @@
}, },
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id" "query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id"
}, },
"ed848affa5bf92997cd441e3a50b3616b6724df3884bd9d199b3225e0bea8a54": {
"describe": {
"columns": [
{
"name": "priv_key_pem",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "certificate_pem",
"ordinal": 1,
"type_info": "Text"
}
],
"nullable": [
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;"
},
"f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556": { "f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556": {
"describe": { "describe": {
"columns": [], "columns": [],
@@ -812,5 +662,83 @@
} }
}, },
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)" "query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)"
},
"f7d2dae84613bcef330f7403352cc96547f3f6dbec11bf2eadfaf53ad8ab51b5": {
"describe": {
"columns": [
{
"name": "network_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT network_key FROM account WHERE id = 0"
},
"fe6e4f09f3028e5b6b6259e86cbad285680ce157aae9d7837ac020c8b2945e7f": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "password",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "tor_key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "server_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "hostname",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "network_key",
"ordinal": 5,
"type_info": "Bytea"
},
{
"name": "root_ca_key_pem",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "root_ca_cert_pem",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
false,
true,
true,
true,
false,
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT * FROM account WHERE id = 0"
} }
} }

120
backend/src/account.rs Normal file
View File

@@ -0,0 +1,120 @@
use ed25519_dalek::{ExpandedSecretKey, SecretKey};
use models::ResultExt;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use sqlx::PgExecutor;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key;
use crate::net::ssl::{generate_key, make_root_cert};
use crate::Error;
fn hash_password(password: &str) -> Result<String, Error> {
argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)
}
#[derive(Debug, Clone)]
pub struct AccountInfo {
pub server_id: String,
pub hostname: Hostname,
pub password: String,
pub key: Key,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
}
impl AccountInfo {
pub fn new(password: &str) -> Result<Self, Error> {
let server_id = generate_id();
let hostname = generate_hostname();
let root_ca_key = generate_key()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname)?;
Ok(Self {
server_id,
hostname,
password: hash_password(password)?,
key: Key::new(None),
root_ca_key,
root_ca_cert,
})
}
pub async fn load(secrets: impl PgExecutor<'_>) -> Result<Self, Error> {
let r = sqlx::query!("SELECT * FROM account WHERE id = 0")
.fetch_one(secrets)
.await?;
let server_id = r.server_id.unwrap_or_else(generate_id);
let hostname = r.hostname.map(Hostname).unwrap_or_else(generate_hostname);
let password = r.password;
let network_key = SecretKey::from_bytes(&r.network_key)?;
let tor_key = if let Some(k) = &r.tor_key {
ExpandedSecretKey::from_bytes(k)?
} else {
ExpandedSecretKey::from(&network_key)
};
let key = Key::from_pair(None, network_key.to_bytes(), tor_key.to_bytes());
let root_ca_key = PKey::private_key_from_pem(r.root_ca_key_pem.as_bytes())?;
let root_ca_cert = X509::from_pem(r.root_ca_cert_pem.as_bytes())?;
Ok(Self {
server_id,
hostname,
password,
key,
root_ca_key,
root_ca_cert,
})
}
pub async fn save(&self, secrets: impl PgExecutor<'_>) -> Result<(), Error> {
let server_id = self.server_id.as_str();
let hostname = self.hostname.0.as_str();
let password = self.password.as_str();
let network_key = self.key.as_bytes();
let network_key = network_key.as_slice();
let root_ca_key = String::from_utf8(self.root_ca_key.private_key_to_pem_pkcs8()?)?;
let root_ca_cert = String::from_utf8(self.root_ca_cert.to_pem()?)?;
sqlx::query!(
r#"
INSERT INTO account (
id,
server_id,
hostname,
password,
network_key,
root_ca_key_pem,
root_ca_cert_pem
) VALUES (
0, $1, $2, $3, $4, $5, $6
) ON CONFLICT (id) DO UPDATE SET
server_id = EXCLUDED.server_id,
hostname = EXCLUDED.hostname,
password = EXCLUDED.password,
network_key = EXCLUDED.network_key,
root_ca_key_pem = EXCLUDED.root_ca_key_pem,
root_ca_cert_pem = EXCLUDED.root_ca_cert_pem
"#,
server_id,
hostname,
password,
network_key,
root_ca_key,
root_ca_cert,
)
.execute(secrets)
.await?;
Ok(())
}
pub fn set_password(&mut self, password: &str) -> Result<(), Error> {
self.password = hash_password(password)?;
Ok(())
}
}

View File

@@ -4,13 +4,13 @@ use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use indexmap::IndexSet; use indexmap::IndexSet;
pub use models::ActionId; pub use models::ActionId;
use models::ImageId;
use rpc_toolkit::command; use rpc_toolkit::command;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tracing::instrument; use tracing::instrument;
use crate::config::{Config, ConfigSpec}; use crate::config::{Config, ConfigSpec};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainers; use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName}; use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;

View File

@@ -364,31 +364,6 @@ impl SetPasswordReceipt {
} }
} }
pub async fn set_password<Db: DbHandle, Ex>(
db: &mut Db,
receipt: &SetPasswordReceipt,
secrets: &mut Ex,
password: &str,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let password = argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
sqlx::query!("UPDATE account SET password = $1", password,)
.execute(secrets)
.await?;
receipt.0.set(db, password).await?;
Ok(())
}
#[command( #[command(
rename = "reset-password", rename = "reset-password",
custom_cli(cli_reset_password(async, context(CliContext))), custom_cli(cli_reset_password(async, context(CliContext))),
@@ -403,14 +378,22 @@ pub async fn reset_password(
let old_password = old_password.unwrap_or_default().decrypt(&ctx)?; let old_password = old_password.unwrap_or_default().decrypt(&ctx)?;
let new_password = new_password.unwrap_or_default().decrypt(&ctx)?; let new_password = new_password.unwrap_or_default().decrypt(&ctx)?;
let mut secrets = ctx.secret_store.acquire().await?; let mut account = ctx.account.write().await;
check_password_against_db(&mut secrets, &old_password).await?; if !argon2::verify_encoded(&account.password, old_password.as_bytes())
.with_kind(crate::ErrorKind::IncorrectPassword)?
let mut db = ctx.db.handle(); {
return Err(Error::new(
let set_password_receipt = SetPasswordReceipt::new(&mut db).await?; eyre!("Incorrect Password"),
crate::ErrorKind::IncorrectPassword,
set_password(&mut db, &set_password_receipt, &mut secrets, &new_password).await?; ));
}
account.set_password(&new_password)?;
account.save(&ctx.secret_store).await?;
crate::db::DatabaseModel::new()
.server_info()
.password_hash()
.put(&mut ctx.db.handle(), &account.password)
.await?;
Ok(()) Ok(())
} }

View File

@@ -5,21 +5,15 @@ use chrono::Utc;
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use helpers::AtomicFile; use helpers::AtomicFile;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use patch_db::{DbHandle, LockType, PatchDbHandle}; use patch_db::{DbHandle, LockType, PatchDbHandle};
use rand::random;
use rpc_toolkit::command; use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use ssh_key::private::Ed25519PrivateKey;
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
use torut::onion::TorSecretKeyV3;
use tracing::instrument; use tracing::instrument;
use super::target::BackupTargetId; use super::target::BackupTargetId;
use super::PackageBackupReport; use super::PackageBackupReport;
use crate::auth::check_password_against_db; use crate::auth::check_password_against_db;
use crate::backup::os::OsBackup;
use crate::backup::{BackupReport, ServerBackupReport}; use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::BackupProgress; use crate::db::model::BackupProgress;
@@ -34,109 +28,6 @@ use crate::util::serde::IoFormat;
use crate::version::VersionT; use crate::version::VersionT;
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
#[derive(Debug)]
pub struct OsBackup {
pub tor_key: TorSecretKeyV3,
pub ssh_key: Ed25519PrivateKey,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
pub ui: Value,
}
impl<'de> Deserialize<'de> for OsBackup {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(rename = "kebab-case")]
struct OsBackupDe {
tor_key: String,
ssh_key: Option<String>,
root_ca_key: String,
root_ca_cert: String,
ui: Value,
}
fn vec_from_base32<E: serde::de::Error>(base32: &str, len: usize) -> Result<Vec<u8>, E> {
let key_vec = base32::decode(base32::Alphabet::RFC4648 { padding: true }, base32)
.ok_or_else(|| {
serde::de::Error::invalid_value(
serde::de::Unexpected::Str(base32),
&"an RFC4648 encoded string",
)
})?;
if key_vec.len() != 64 {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(base32),
&"a 64 byte value encoded as an RFC4648 string",
));
}
Ok(key_vec)
}
let int = OsBackupDe::deserialize(deserializer)?;
let tor_key = {
let mut key_slice = [0; 64];
key_slice.clone_from_slice(&vec_from_base32(&int.tor_key, 64)?);
TorSecretKeyV3::from(key_slice)
};
let ssh_key = int
.ssh_key
.as_ref()
.map(|ssh_key| {
let mut key_slice = [0; 32];
key_slice.clone_from_slice(&vec_from_base32(ssh_key, 32)?);
Ok(Ed25519PrivateKey::from_bytes(&key_slice))
})
.transpose()?
.unwrap_or_else(|| Ed25519PrivateKey::from_bytes(&random()));
let root_ca_key = PKey::<Private>::private_key_from_pem(int.root_ca_key.as_bytes())
.map_err(serde::de::Error::custom)?;
let root_ca_cert =
X509::from_pem(int.root_ca_cert.as_bytes()).map_err(serde::de::Error::custom)?;
Ok(OsBackup {
tor_key,
ssh_key,
root_ca_key,
root_ca_cert,
ui: int.ui,
})
}
}
impl Serialize for OsBackup {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
#[derive(Serialize)]
#[serde(rename = "kebab-case")]
struct OsBackupSer<'a> {
tor_key: String,
root_ca_key: String,
root_ca_cert: String,
ui: &'a Value,
}
OsBackupSer {
tor_key: base32::encode(
base32::Alphabet::RFC4648 { padding: true },
&self.tor_key.as_bytes(),
),
root_ca_key: String::from_utf8(
self.root_ca_key
.private_key_to_pem_pkcs8()
.map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,
root_ca_cert: String::from_utf8(
self.root_ca_cert
.to_pem()
.map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,
ui: &self.ui,
}
.serialize(serializer)
}
}
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<BTreeSet<PackageId>, Error> { fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<BTreeSet<PackageId>, Error> {
arg.split(',') arg.split(',')
.map(|s| s.trim().parse().map_err(Error::from)) .map(|s| s.trim().parse().map_err(Error::from))
@@ -317,7 +208,6 @@ async fn perform_backup<Db: DbHandle>(
package_ids: &BTreeSet<PackageId>, package_ids: &BTreeSet<PackageId>,
) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> { ) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> {
let mut backup_report = BTreeMap::new(); let mut backup_report = BTreeMap::new();
for package_id in crate::db::DatabaseModel::new() for package_id in crate::db::DatabaseModel::new()
.package_data() .package_data()
.keys(&mut db) .keys(&mut db)
@@ -445,11 +335,12 @@ async fn perform_backup<Db: DbHandle>(
tx.save().await?; tx.save().await?;
} }
crate::db::DatabaseModel::new() let ui = crate::db::DatabaseModel::new()
.lock(&mut db, LockType::Write) .ui()
.await?; .get(&mut db)
.await?
.into_owned();
let (root_ca_key, root_ca_cert) = ctx.net_controller.ssl.export_root_ca().await?;
let mut os_backup_file = AtomicFile::new( let mut os_backup_file = AtomicFile::new(
backup_guard.as_ref().join("os-backup.cbor"), backup_guard.as_ref().join("os-backup.cbor"),
None::<PathBuf>, None::<PathBuf>,
@@ -457,19 +348,10 @@ async fn perform_backup<Db: DbHandle>(
.await .await
.with_kind(ErrorKind::Filesystem)?; .with_kind(ErrorKind::Filesystem)?;
os_backup_file os_backup_file
.write_all( .write_all(&IoFormat::Cbor.to_vec(&OsBackup {
&IoFormat::Cbor.to_vec(&OsBackup { account: ctx.account.read().await.clone(),
tor_key: ctx.net_controller.tor.embassyd_tor_key().await, ui,
ssh_key: crate::ssh::os_key(&mut ctx.secret_store.acquire().await?).await?, })?)
root_ca_key,
root_ca_cert,
ui: crate::db::DatabaseModel::new()
.ui()
.get(&mut db)
.await?
.into_owned(),
})?,
)
.await?; .await?;
os_backup_file os_backup_file
.save() .save()

View File

@@ -4,6 +4,7 @@ use std::path::{Path, PathBuf};
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use helpers::AtomicFile; use helpers::AtomicFile;
use models::ImageId;
use patch_db::{DbHandle, HasModel}; use patch_db::{DbHandle, HasModel};
use reqwest::Url; use reqwest::Url;
use rpc_toolkit::command; use rpc_toolkit::command;
@@ -15,19 +16,20 @@ use tracing::instrument;
use self::target::PackageBackupInfo; use self::target::PackageBackupInfo;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::dependencies::reconfigure_dependents_with_live_pointers; use crate::dependencies::reconfigure_dependents_with_live_pointers;
use crate::id::ImageId;
use crate::install::PKG_ARCHIVE_DIR; use crate::install::PKG_ARCHIVE_DIR;
use crate::net::interface::{InterfaceId, Interfaces}; use crate::net::interface::{InterfaceId, Interfaces};
use crate::net::keys::Key;
use crate::procedure::docker::DockerContainers; use crate::procedure::docker::DockerContainers;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName}; use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::serde::IoFormat; use crate::util::serde::{Base32, Base64, IoFormat};
use crate::util::Version; use crate::util::Version;
use crate::version::{Current, VersionT}; use crate::version::{Current, VersionT};
use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR}; use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
pub mod backup_bulk; pub mod backup_bulk;
pub mod os;
pub mod restore; pub mod restore;
pub mod target; pub mod target;
@@ -61,7 +63,10 @@ pub fn package_backup() -> Result<(), Error> {
#[derive(Deserialize, Serialize)] #[derive(Deserialize, Serialize)]
struct BackupMetadata { struct BackupMetadata {
pub timestamp: DateTime<Utc>, pub timestamp: DateTime<Utc>,
pub tor_keys: BTreeMap<InterfaceId, String>, #[serde(default)]
pub network_keys: BTreeMap<InterfaceId, Base64<[u8; 32]>>,
#[serde(default)]
pub tor_keys: BTreeMap<InterfaceId, Base32<[u8; 64]>>, // DEPRECATED
pub marketplace_url: Option<Url>, pub marketplace_url: Option<Url>,
} }
@@ -117,17 +122,17 @@ impl BackupActions {
.await? .await?
.map_err(|e| eyre!("{}", e.1)) .map_err(|e| eyre!("{}", e.1))
.with_kind(crate::ErrorKind::Backup)?; .with_kind(crate::ErrorKind::Backup)?;
let tor_keys = interfaces let (network_keys, tor_keys) = Key::for_package(&ctx.secret_store, pkg_id)
.tor_keys(&mut ctx.secret_store.acquire().await?, pkg_id)
.await? .await?
.into_iter() .into_iter()
.map(|(id, key)| { .filter_map(|k| {
( let interface = k.interface().map(|(_, i)| i)?;
id, Some((
base32::encode(base32::Alphabet::RFC4648 { padding: true }, &key.as_bytes()), (interface.clone(), Base64(k.as_bytes())),
) (interface, Base32(k.tor_key().as_bytes())),
))
}) })
.collect(); .unzip();
let marketplace_url = crate::db::DatabaseModel::new() let marketplace_url = crate::db::DatabaseModel::new()
.package_data() .package_data()
.idx_model(pkg_id) .idx_model(pkg_id)
@@ -170,6 +175,7 @@ impl BackupActions {
outfile outfile
.write_all(&IoFormat::Cbor.to_vec(&BackupMetadata { .write_all(&IoFormat::Cbor.to_vec(&BackupMetadata {
timestamp, timestamp,
network_keys,
tor_keys, tor_keys,
marketplace_url, marketplace_url,
})?) })?)

121
backend/src/backup/os.rs Normal file
View File

@@ -0,0 +1,121 @@
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key;
use crate::util::serde::Base64;
use crate::Error;
use openssl::pkey::PKey;
use openssl::x509::X509;
use serde::{Deserialize, Serialize};
use serde_json::Value;
pub struct OsBackup {
pub account: AccountInfo,
pub ui: Value,
}
impl<'de> Deserialize<'de> for OsBackup {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let tagged = OsBackupSerDe::deserialize(deserializer)?;
match tagged.version {
0 => serde_json::from_value::<OsBackupV0>(tagged.rest)
.map_err(serde::de::Error::custom)?
.project()
.map_err(serde::de::Error::custom),
1 => serde_json::from_value::<OsBackupV1>(tagged.rest)
.map_err(serde::de::Error::custom)?
.project()
.map_err(serde::de::Error::custom),
v => Err(serde::de::Error::custom(&format!(
"Unknown backup version {v}"
))),
}
}
}
impl Serialize for OsBackup {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
OsBackupSerDe {
version: 1,
rest: serde_json::to_value(
&OsBackupV1::unproject(self).map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,
}
.serialize(serializer)
}
}
#[derive(Deserialize, Serialize)]
struct OsBackupSerDe {
#[serde(default)]
version: usize,
#[serde(flatten)]
rest: Value,
}
/// V0
#[derive(Deserialize)]
#[serde(rename = "kebab-case")]
struct OsBackupV0 {
// tor_key: Base32<[u8; 64]>,
root_ca_key: String, // PEM Encoded OpenSSL Key
root_ca_cert: String, // PEM Encoded OpenSSL X509 Certificate
ui: Value, // JSON Value
}
impl OsBackupV0 {
fn project(self) -> Result<OsBackup, Error> {
Ok(OsBackup {
account: AccountInfo {
server_id: generate_id(),
hostname: generate_hostname(),
password: Default::default(),
key: Key::new(None),
root_ca_key: PKey::private_key_from_pem(self.root_ca_key.as_bytes())?,
root_ca_cert: X509::from_pem(self.root_ca_cert.as_bytes())?,
},
ui: self.ui,
})
}
}
/// V1
#[derive(Deserialize, Serialize)]
#[serde(rename = "kebab-case")]
struct OsBackupV1 {
server_id: String, // uuidv4
hostname: String, // embassy-<adjective>-<noun>
net_key: Base64<[u8; 32]>, // Ed25519 Secret Key
root_ca_key: String, // PEM Encoded OpenSSL Key
root_ca_cert: String, // PEM Encoded OpenSSL X509 Certificate
ui: Value, // JSON Value
// TODO add more
}
impl OsBackupV1 {
fn project(self) -> Result<OsBackup, Error> {
Ok(OsBackup {
account: AccountInfo {
server_id: self.server_id,
hostname: Hostname(self.hostname),
password: Default::default(),
key: Key::from_bytes(None, self.net_key.0),
root_ca_key: PKey::private_key_from_pem(self.root_ca_key.as_bytes())?,
root_ca_cert: X509::from_pem(self.root_ca_cert.as_bytes())?,
},
ui: self.ui,
})
}
fn unproject(backup: &OsBackup) -> Result<Self, Error> {
Ok(Self {
server_id: backup.account.server_id.clone(),
hostname: backup.account.hostname.0.clone(),
net_key: Base64(backup.account.key.as_bytes()),
root_ca_key: String::from_utf8(backup.account.root_ca_key.private_key_to_pem_pkcs8()?)?,
root_ca_cert: String::from_utf8(backup.account.root_ca_cert.to_pem()?)?,
ui: backup.ui.clone(),
})
}
}

View File

@@ -11,12 +11,13 @@ use futures::{FutureExt, StreamExt};
use openssl::x509::X509; use openssl::x509::X509;
use patch_db::{DbHandle, PatchDbHandle}; use patch_db::{DbHandle, PatchDbHandle};
use rpc_toolkit::command; use rpc_toolkit::command;
use sqlx::Connection;
use tokio::fs::File; use tokio::fs::File;
use torut::onion::OnionAddressV3; use torut::onion::OnionAddressV3;
use tracing::instrument; use tracing::instrument;
use super::target::BackupTargetId; use super::target::BackupTargetId;
use crate::backup::backup_bulk::OsBackup; use crate::backup::os::OsBackup;
use crate::backup::BackupMetadata; use crate::backup::BackupMetadata;
use crate::context::rpc::RpcContextConfig; use crate::context::rpc::RpcContextConfig;
use crate::context::{RpcContext, SetupContext}; use crate::context::{RpcContext, SetupContext};
@@ -24,11 +25,10 @@ use crate::db::model::{PackageDataEntry, StaticFiles};
use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard}; use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard};
use crate::disk::mount::filesystem::ReadWrite; use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard; use crate::disk::mount::guard::TmpMountGuard;
use crate::hostname::{get_hostname, Hostname}; use crate::hostname::Hostname;
use crate::init::init; use crate::init::init;
use crate::install::progress::InstallProgress; use crate::install::progress::InstallProgress;
use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR}; use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR};
use crate::net::ssl::SslManager;
use crate::notifications::NotificationLevel; use crate::notifications::NotificationLevel;
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::s9pk::reader::S9pkReader; use crate::s9pk::reader::S9pkReader;
@@ -184,7 +184,7 @@ pub async fn recover_full_embassy(
.await?; .await?;
let os_backup_path = backup_guard.as_ref().join("os-backup.cbor"); let os_backup_path = backup_guard.as_ref().join("os-backup.cbor");
let os_backup: OsBackup = let mut os_backup: OsBackup =
IoFormat::Cbor.from_slice(&tokio::fs::read(&os_backup_path).await.with_ctx(|_| { IoFormat::Cbor.from_slice(&tokio::fs::read(&os_backup_path).await.with_ctx(|_| {
( (
crate::ErrorKind::Filesystem, crate::ErrorKind::Filesystem,
@@ -192,31 +192,17 @@ pub async fn recover_full_embassy(
) )
})?)?; })?)?;
let password = argon2::hash_encoded( os_backup.account.password = argon2::hash_encoded(
embassy_password.as_bytes(), embassy_password.as_bytes(),
&rand::random::<[u8; 16]>()[..], &rand::random::<[u8; 16]>()[..],
&argon2::Config::default(), &argon2::Config::default(),
) )
.with_kind(crate::ErrorKind::PasswordHashGeneration)?; .with_kind(crate::ErrorKind::PasswordHashGeneration)?;
let tor_key_bytes = os_backup.tor_key.as_bytes().to_vec();
let ssh_key_bytes = os_backup.ssh_key.to_bytes().to_vec();
let secret_store = ctx.secret_store().await?;
sqlx::query!(
"INSERT INTO account (id, password, tor_key, ssh_key) VALUES ($1, $2, $3, $4) ON CONFLICT (id) DO UPDATE SET password = $2, tor_key = $3, ssh_key = $4",
0,
password,
tor_key_bytes,
ssh_key_bytes,
)
.execute(&mut secret_store.acquire().await?)
.await?;
SslManager::import_root_ca( let secret_store = ctx.secret_store().await?;
secret_store.clone(),
os_backup.root_ca_key, os_backup.account.save(&secret_store).await?;
os_backup.root_ca_cert.clone(),
)
.await?;
secret_store.close().await; secret_store.close().await;
let cfg = RpcContextConfig::load(ctx.config_path.clone()).await?; let cfg = RpcContextConfig::load(ctx.config_path.clone()).await?;
@@ -224,12 +210,7 @@ pub async fn recover_full_embassy(
init(&cfg).await?; init(&cfg).await?;
let rpc_ctx = RpcContext::init(ctx.config_path.clone(), disk_guid.clone()).await?; let rpc_ctx = RpcContext::init(ctx.config_path.clone(), disk_guid.clone()).await?;
let mut db = rpc_ctx.db.handle();
let receipts = crate::hostname::HostNameReceipt::new(&mut db).await?;
let hostname = get_hostname(&mut db, &receipts).await?;
drop(db);
let mut db = rpc_ctx.db.handle(); let mut db = rpc_ctx.db.handle();
let ids = backup_guard let ids = backup_guard
@@ -274,9 +255,9 @@ pub async fn recover_full_embassy(
Ok(( Ok((
disk_guid, disk_guid,
hostname, os_backup.account.hostname,
os_backup.tor_key.public().get_onion_address(), os_backup.account.key.tor_address(),
os_backup.root_ca_cert, os_backup.account.root_ca_cert,
)) ))
} }
@@ -414,23 +395,32 @@ async fn restore_package<'a>(
metadata_path.display().to_string(), metadata_path.display().to_string(),
) )
})?)?; })?)?;
for (iface, key) in metadata.tor_keys {
let key_vec = base32::decode(base32::Alphabet::RFC4648 { padding: true }, &key) let mut secrets = ctx.secret_store.acquire().await?;
.ok_or_else(|| { let mut secrets_tx = secrets.begin().await?;
Error::new( for (iface, key) in metadata.network_keys {
eyre!("invalid base32 string"), let k = key.0.as_slice();
crate::ErrorKind::Deserialization,
)
})?;
sqlx::query!( sqlx::query!(
"INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET key = $3", "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
*id, *id,
*iface, *iface,
key_vec, k,
) )
.execute(&ctx.secret_store) .execute(&mut secrets_tx).await?;
.await?;
} }
// DEPRECATED
for (iface, key) in metadata.tor_keys {
let k = key.0.as_slice();
sqlx::query!(
"INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
*id,
*iface,
k,
)
.execute(&mut secrets_tx).await?;
}
secrets_tx.commit().await?;
drop(secrets);
let len = tokio::fs::metadata(&s9pk_path) let len = tokio::fs::metadata(&s9pk_path)
.await .await

View File

@@ -8,13 +8,7 @@ use embassy::disk::fsck::RepairStrategy;
use embassy::disk::main::DEFAULT_PASSWORD; use embassy::disk::main::DEFAULT_PASSWORD;
use embassy::disk::REPAIR_DISK_PATH; use embassy::disk::REPAIR_DISK_PATH;
use embassy::init::STANDBY_MODE_PATH; use embassy::init::STANDBY_MODE_PATH;
use embassy::net::embassy_service_http_server::EmbassyServiceHTTPServer; use embassy::net::web_server::WebServer;
#[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController;
use embassy::net::net_utils::ResourceFqdn;
use embassy::net::static_server::{
diag_ui_file_router, install_ui_file_router, setup_ui_file_router,
};
use embassy::shutdown::Shutdown; use embassy::shutdown::Shutdown;
use embassy::sound::CHIME; use embassy::sound::CHIME;
use embassy::util::logger::EmbassyLogger; use embassy::util::logger::EmbassyLogger;
@@ -26,30 +20,9 @@ use tracing::instrument;
#[instrument] #[instrument]
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> { async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
if tokio::fs::metadata("/cdrom").await.is_ok() { if tokio::fs::metadata("/cdrom").await.is_ok() {
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init().await?;
let ctx = InstallContext::init(cfg_path).await?; let ctx = InstallContext::init(cfg_path).await?;
let embassy_ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr; let server = WebServer::install(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let embassy_fqdn: ResourceFqdn = "embassy.local".parse()?;
let localhost_fqdn = ResourceFqdn::LocalHost;
let install_ui_handler = install_ui_file_router(ctx.clone()).await?;
let mut install_http_server =
EmbassyServiceHTTPServer::new([0, 0, 0, 0].into(), 80, None).await?;
install_http_server
.add_svc_handler_mapping(embassy_ip_fqdn, install_ui_handler.clone())
.await?;
install_http_server
.add_svc_handler_mapping(embassy_fqdn, install_ui_handler.clone())
.await?;
install_http_server
.add_svc_handler_mapping(localhost_fqdn, install_ui_handler.clone())
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?; CHIME.play().await?;
@@ -59,7 +32,9 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
.recv() .recv()
.await .await
.expect("context dropped"); .expect("context dropped");
install_http_server.shutdown.send(()).unwrap();
server.shutdown().await;
Command::new("reboot") Command::new("reboot")
.invoke(embassy::ErrorKind::Unknown) .invoke(embassy::ErrorKind::Unknown)
.await?; .await?;
@@ -67,29 +42,9 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
.await .await
.is_err() .is_err()
{ {
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init().await?;
let ctx = SetupContext::init(cfg_path).await?; let ctx = SetupContext::init(cfg_path).await?;
let embassy_ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr; let server = WebServer::setup(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let embassy_fqdn: ResourceFqdn = "embassy.local".parse()?;
let localhost_fqdn = ResourceFqdn::LocalHost;
let setup_ui_handler = setup_ui_file_router(ctx.clone()).await?;
let mut setup_http_server =
EmbassyServiceHTTPServer::new([0, 0, 0, 0].into(), 80, None).await?;
setup_http_server
.add_svc_handler_mapping(embassy_ip_fqdn, setup_ui_handler.clone())
.await?;
setup_http_server
.add_svc_handler_mapping(embassy_fqdn, setup_ui_handler.clone())
.await?;
setup_http_server
.add_svc_handler_mapping(localhost_fqdn, setup_ui_handler.clone())
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?; CHIME.play().await?;
@@ -98,7 +53,9 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
.recv() .recv()
.await .await
.expect("context dropped"); .expect("context dropped");
setup_http_server.shutdown.send(()).unwrap();
server.shutdown().await;
tokio::task::yield_now().await; tokio::task::yield_now().await;
if let Err(e) = Command::new("killall") if let Err(e) = Command::new("killall")
.arg("firefox-esr") .arg("firefox-esr")
@@ -178,8 +135,6 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
tracing::error!("{}", e.source); tracing::error!("{}", e.source);
tracing::debug!("{}", e.source); tracing::debug!("{}", e.source);
embassy::sound::BEETHOVEN.play().await?; embassy::sound::BEETHOVEN.play().await?;
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init().await?;
let ctx = DiagnosticContext::init( let ctx = DiagnosticContext::init(
cfg_path, cfg_path,
@@ -200,28 +155,12 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
) )
.await?; .await?;
let embassy_ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr; let server = WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let embassy_fqdn: ResourceFqdn = "embassy.local".parse()?;
let localhost_fqdn = ResourceFqdn::LocalHost;
let diag_ui_handler = diag_ui_file_router(ctx.clone()).await?;
let mut diag_http_server =
EmbassyServiceHTTPServer::new([0, 0, 0, 0].into(), 80, None).await?;
diag_http_server
.add_svc_handler_mapping(embassy_ip_fqdn, diag_ui_handler.clone())
.await?;
diag_http_server
.add_svc_handler_mapping(embassy_fqdn, diag_ui_handler.clone())
.await?;
diag_http_server
.add_svc_handler_mapping(localhost_fqdn, diag_ui_handler.clone())
.await?;
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap(); let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();
diag_http_server.shutdown.send(()).unwrap();
server.shutdown().await;
Ok(shutdown) Ok(shutdown)
} }
.await .await

View File

@@ -3,12 +3,7 @@ use std::sync::Arc;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use embassy::context::{DiagnosticContext, RpcContext}; use embassy::context::{DiagnosticContext, RpcContext};
use embassy::net::embassy_service_http_server::EmbassyServiceHTTPServer; use embassy::net::web_server::WebServer;
#[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController;
use embassy::net::net_controller::NetController;
use embassy::net::net_utils::ResourceFqdn;
use embassy::net::static_server::diag_ui_file_router;
use embassy::shutdown::Shutdown; use embassy::shutdown::Shutdown;
use embassy::system::launch_metrics_task; use embassy::system::launch_metrics_task;
use embassy::util::logger::EmbassyLogger; use embassy::util::logger::EmbassyLogger;
@@ -19,7 +14,7 @@ use tracing::instrument;
#[instrument] #[instrument]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> { async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, shutdown) = { let (rpc_ctx, server, shutdown) = {
let rpc_ctx = RpcContext::init( let rpc_ctx = RpcContext::init(
cfg_path, cfg_path,
Arc::new( Arc::new(
@@ -30,7 +25,8 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
), ),
) )
.await?; .await?;
NetController::setup_embassy_ui(rpc_ctx.clone()).await?; embassy::hostname::sync_hostname(&*rpc_ctx.account.read().await).await?;
let server = WebServer::main(([0, 0, 0, 0], 80).into(), rpc_ctx.clone()).await?;
let mut shutdown_recv = rpc_ctx.shutdown.subscribe(); let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
@@ -62,12 +58,6 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
.expect("send shutdown signal"); .expect("send shutdown signal");
}); });
{
let mut db = rpc_ctx.db.handle();
let receipts = embassy::context::rpc::RpcSetHostNameReceipts::new(&mut db).await?;
embassy::hostname::sync_hostname(&mut db, &receipts.hostname_receipts).await?;
}
let metrics_ctx = rpc_ctx.clone(); let metrics_ctx = rpc_ctx.clone();
let metrics_task = tokio::spawn(async move { let metrics_task = tokio::spawn(async move {
launch_metrics_task(&metrics_ctx.metrics_cache, || { launch_metrics_task(&metrics_ctx.metrics_cache, || {
@@ -95,8 +85,9 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
sig_handler.abort(); sig_handler.abort();
(rpc_ctx, shutdown) (rpc_ctx, server, shutdown)
}; };
server.shutdown().await;
rpc_ctx.shutdown().await?; rpc_ctx.shutdown().await?;
Ok(shutdown) Ok(shutdown)
@@ -125,12 +116,10 @@ fn main() {
match inner_main(cfg_path.clone()).await { match inner_main(cfg_path.clone()).await {
Ok(a) => Ok(a), Ok(a) => Ok(a),
Err(e) => { Err(e) => {
(|| async { async {
tracing::error!("{}", e.source); tracing::error!("{}", e.source);
tracing::debug!("{:?}", e.source); tracing::debug!("{:?}", e.source);
embassy::sound::BEETHOVEN.play().await?; embassy::sound::BEETHOVEN.play().await?;
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init().await?;
let ctx = DiagnosticContext::init( let ctx = DiagnosticContext::init(
cfg_path, cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid") if tokio::fs::metadata("/media/embassy/config/disk.guid")
@@ -150,24 +139,18 @@ fn main() {
) )
.await?; .await?;
let embassy_ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr; let server =
let embassy_fqdn: ResourceFqdn = "embassy.local".parse()?; WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let diag_ui_handler = diag_ui_file_router(ctx.clone()).await?;
let mut diag_http_server =
EmbassyServiceHTTPServer::new([0, 0, 0, 0].into(), 80, None).await?;
diag_http_server
.add_svc_handler_mapping(embassy_ip_fqdn, diag_ui_handler.clone())
.await?;
diag_http_server
.add_svc_handler_mapping(embassy_fqdn, diag_ui_handler)
.await?;
let mut shutdown = ctx.shutdown.subscribe(); let mut shutdown = ctx.shutdown.subscribe();
shutdown.recv().await.with_kind(crate::ErrorKind::Unknown) let shutdown =
})() shutdown.recv().await.with_kind(crate::ErrorKind::Unknown)?;
server.shutdown().await;
Ok::<_, Error>(shutdown)
}
.await .await
} }
} }

View File

@@ -1,6 +1,7 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use models::ImageId;
use nix::sys::signal::Signal; use nix::sys::signal::Signal;
use patch_db::HasModel; use patch_db::HasModel;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -9,7 +10,6 @@ use tracing::instrument;
use super::{Config, ConfigSpec}; use super::{Config, ConfigSpec};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::dependencies::Dependencies; use crate::dependencies::Dependencies;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainers; use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName}; use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;

View File

@@ -25,6 +25,7 @@ use super::{Config, MatchError, NoMatchWithPath, TimeoutError, TypeOf};
use crate::config::ConfigurationError; use crate::config::ConfigurationError;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::net::interface::InterfaceId; use crate::net::interface::InterfaceId;
use crate::net::keys::Key;
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::Error; use crate::Error;
@@ -2059,22 +2060,19 @@ impl TorKeyPointer {
ValueSpecPointer::Package(PackagePointerSpec::TorKey(self.clone())), ValueSpecPointer::Package(PackagePointerSpec::TorKey(self.clone())),
)); ));
} }
let x = sqlx::query!( let key = Key::for_interface(
"SELECT key FROM tor WHERE package = $1 AND interface = $2", &mut secrets
*self.package_id, .acquire()
*self.interface
)
.fetch_optional(secrets)
.await .await
.map_err(|e| ConfigurationError::SystemError(e.into()))?; .map_err(|e| ConfigurationError::SystemError(e.into()))?,
if let Some(x) = x { Some((self.package_id.clone(), self.interface.clone())),
)
.await
.map_err(ConfigurationError::SystemError)?;
Ok(Value::String(base32::encode( Ok(Value::String(base32::encode(
base32::Alphabet::RFC4648 { padding: false }, base32::Alphabet::RFC4648 { padding: false },
&x.key, &key.tor_key().as_bytes(),
))) )))
} else {
Ok(Value::Null)
}
} }
} }
impl fmt::Display for TorKeyPointer { impl fmt::Display for TorKeyPointer {

View File

@@ -1,4 +1,4 @@
use std::collections::{BTreeMap, VecDeque}; use std::collections::BTreeMap;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4}; use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::ops::Deref; use std::ops::Deref;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
@@ -10,7 +10,7 @@ use bollard::Docker;
use helpers::to_tmp_path; use helpers::to_tmp_path;
use josekit::jwk::Jwk; use josekit::jwk::Jwk;
use patch_db::json_ptr::JsonPointer; use patch_db::json_ptr::JsonPointer;
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb, Revision}; use patch_db::{DbHandle, LockReceipt, LockType, PatchDb};
use reqwest::Url; use reqwest::Url;
use rpc_toolkit::Context; use rpc_toolkit::Context;
use serde::Deserialize; use serde::Deserialize;
@@ -19,10 +19,10 @@ use sqlx::PgPool;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock}; use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tracing::instrument; use tracing::instrument;
use crate::account::AccountInfo;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation}; use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry}; use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::disk::OsPartitionInfo; use crate::disk::OsPartitionInfo;
use crate::hostname::HostNameReceipt;
use crate::init::{init_postgres, pgloader}; use crate::init::{init_postgres, pgloader};
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts}; use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
use crate::manager::ManagerMap; use crate::manager::ManagerMap;
@@ -31,7 +31,6 @@ use crate::net::net_controller::NetController;
use crate::net::ssl::SslManager; use crate::net::ssl::SslManager;
use crate::net::wifi::WpaCli; use crate::net::wifi::WpaCli;
use crate::notifications::NotificationManager; use crate::notifications::NotificationManager;
use crate::setup::password_hash;
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::status::{MainStatus, Status}; use crate::status::{MainStatus, Status};
use crate::util::config::load_config_from_paths; use crate::util::config::load_config_from_paths;
@@ -76,25 +75,13 @@ impl RpcContextConfig {
.as_deref() .as_deref()
.unwrap_or_else(|| Path::new("/embassy-data")) .unwrap_or_else(|| Path::new("/embassy-data"))
} }
pub async fn db(&self, secret_store: &PgPool) -> Result<PatchDb, Error> { pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> {
let db_path = self.datadir().join("main").join("embassy.db"); let db_path = self.datadir().join("main").join("embassy.db");
let db = PatchDb::open(&db_path) let db = PatchDb::open(&db_path)
.await .await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?; .with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await { if !db.exists(&<JsonPointer>::default()).await {
db.put( db.put(&<JsonPointer>::default(), &Database::init(account))
&<JsonPointer>::default(),
&Database::init(
&crate::net::tor::os_key(&mut secret_store.acquire().await?).await?,
password_hash(&mut secret_store.acquire().await?).await?,
&crate::ssh::os_key(&mut secret_store.acquire().await?).await?,
&SslManager::init(secret_store.clone(), &mut db.handle())
.await?
.export_root_ca()
.await?
.1,
),
)
.await?; .await?;
} }
Ok(db) Ok(db)
@@ -131,11 +118,10 @@ pub struct RpcContextSeed {
pub disk_guid: Arc<String>, pub disk_guid: Arc<String>,
pub db: PatchDb, pub db: PatchDb,
pub secret_store: PgPool, pub secret_store: PgPool,
pub account: RwLock<AccountInfo>,
pub docker: Docker, pub docker: Docker,
pub net_controller: NetController, pub net_controller: Arc<NetController>,
pub managers: ManagerMap, pub managers: ManagerMap,
pub revision_cache_size: usize,
pub revision_cache: RwLock<VecDeque<Arc<Revision>>>,
pub metrics_cache: RwLock<Option<crate::system::Metrics>>, pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
pub shutdown: broadcast::Sender<Option<Shutdown>>, pub shutdown: broadcast::Sender<Option<Shutdown>>,
pub tor_socks: SocketAddr, pub tor_socks: SocketAddr,
@@ -184,37 +170,6 @@ impl RpcCleanReceipts {
} }
} }
pub struct RpcSetHostNameReceipts {
pub hostname_receipts: HostNameReceipt,
#[allow(dead_code)]
server_info: LockReceipt<crate::db::model::ServerInfo, ()>,
}
impl RpcSetHostNameReceipts {
pub async fn new(db: &'_ mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let hostname_receipts = HostNameReceipt::setup(locks);
let server_info = crate::db::DatabaseModel::new()
.server_info()
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
hostname_receipts: hostname_receipts(skeleton_key)?,
server_info: server_info.verify(skeleton_key)?,
})
}
}
}
#[derive(Clone)] #[derive(Clone)]
pub struct RpcContext(Arc<RpcContextSeed>); pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext { impl RpcContext {
@@ -232,25 +187,26 @@ impl RpcContext {
let (shutdown, _) = tokio::sync::broadcast::channel(1); let (shutdown, _) = tokio::sync::broadcast::channel(1);
let secret_store = base.secret_store().await?; let secret_store = base.secret_store().await?;
tracing::info!("Opened Pg DB"); tracing::info!("Opened Pg DB");
let db = base.db(&secret_store).await?; let account = AccountInfo::load(&secret_store).await?;
let db = base.db(&account).await?;
tracing::info!("Opened PatchDB"); tracing::info!("Opened PatchDB");
let mut docker = Docker::connect_with_unix_defaults()?; let mut docker = Docker::connect_with_unix_defaults()?;
docker.set_timeout(Duration::from_secs(600)); docker.set_timeout(Duration::from_secs(600));
tracing::info!("Connected to Docker"); tracing::info!("Connected to Docker");
let net_controller = NetController::init( let net_controller = Arc::new(
([0, 0, 0, 0], 80).into(), NetController::init(
crate::net::tor::os_key(&mut secret_store.acquire().await?).await?,
base.tor_control base.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))), .unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
base.dns_bind base.dns_bind
.as_ref() .as_ref()
.map(|v| v.as_slice()) .map(|v| v.as_slice())
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]), .unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
secret_store.clone(), SslManager::new(&account)?,
&mut db.handle(), &account.hostname,
None, &account.key,
) )
.await?; .await?,
);
tracing::info!("Initialized Net Controller"); tracing::info!("Initialized Net Controller");
let managers = ManagerMap::default(); let managers = ManagerMap::default();
let metrics_cache = RwLock::new(None); let metrics_cache = RwLock::new(None);
@@ -265,11 +221,10 @@ impl RpcContext {
disk_guid, disk_guid,
db, db,
secret_store, secret_store,
account: RwLock::new(account),
docker, docker,
net_controller, net_controller,
managers, managers,
revision_cache_size: base.revision_cache_size.unwrap_or(512),
revision_cache: RwLock::new(VecDeque::new()),
metrics_cache, metrics_cache,
shutdown, shutdown,
tor_socks: tor_proxy, tor_socks: tor_proxy,

View File

@@ -14,11 +14,11 @@ use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tracing::instrument; use tracing::instrument;
use crate::account::AccountInfo;
use crate::db::model::Database; use crate::db::model::Database;
use crate::disk::OsPartitionInfo; use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader}; use crate::init::{init_postgres, pgloader};
use crate::net::ssl::SslManager; use crate::setup::SetupStatus;
use crate::setup::{password_hash, SetupStatus};
use crate::util::config::load_config_from_paths; use crate::util::config::load_config_from_paths;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
@@ -111,25 +111,13 @@ impl SetupContext {
}))) })))
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn db(&self, secret_store: &PgPool) -> Result<PatchDb, Error> { pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> {
let db_path = self.datadir.join("main").join("embassy.db"); let db_path = self.datadir.join("main").join("embassy.db");
let db = PatchDb::open(&db_path) let db = PatchDb::open(&db_path)
.await .await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?; .with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await { if !db.exists(&<JsonPointer>::default()).await {
db.put( db.put(&<JsonPointer>::default(), &Database::init(account))
&<JsonPointer>::default(),
&Database::init(
&crate::net::tor::os_key(&mut secret_store.acquire().await?).await?,
password_hash(&mut secret_store.acquire().await?).await?,
&crate::ssh::os_key(&mut secret_store.acquire().await?).await?,
&SslManager::init(secret_store.clone(), &mut db.handle())
.await?
.export_root_ca()
.await?
.1,
),
)
.await?; .await?;
} }
Ok(db) Ok(db)

View File

@@ -4,21 +4,19 @@ use std::sync::Arc;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use emver::VersionRange; use emver::VersionRange;
use ipnet::{Ipv4Net, Ipv6Net};
use isocountry::CountryCode; use isocountry::CountryCode;
use itertools::Itertools; use itertools::Itertools;
use openssl::hash::MessageDigest; use openssl::hash::MessageDigest;
use openssl::x509::X509;
use patch_db::json_ptr::JsonPointer; use patch_db::json_ptr::JsonPointer;
use patch_db::{HasModel, Map, MapModel, OptionModel}; use patch_db::{HasModel, Map, MapModel, OptionModel};
use reqwest::Url; use reqwest::Url;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use serde_json::Value; use serde_json::Value;
use ssh_key::private::Ed25519PrivateKey;
use ssh_key::public::Ed25519PublicKey; use ssh_key::public::Ed25519PublicKey;
use torut::onion::TorSecretKeyV3;
use crate::account::AccountInfo;
use crate::config::spec::{PackagePointerSpec, SystemPointerSpec}; use crate::config::spec::{PackagePointerSpec, SystemPointerSpec};
use crate::hostname::{generate_hostname, generate_id};
use crate::install::progress::InstallProgress; use crate::install::progress::InstallProgress;
use crate::net::interface::InterfaceId; use crate::net::interface::InterfaceId;
use crate::net::net_utils::{get_iface_ipv4_addr, get_iface_ipv6_addr}; use crate::net::net_utils::{get_iface_ipv4_addr, get_iface_ipv6_addr};
@@ -39,26 +37,19 @@ pub struct Database {
pub ui: Value, pub ui: Value,
} }
impl Database { impl Database {
pub fn init( pub fn init(account: &AccountInfo) -> Self {
tor_key: &TorSecretKeyV3, let lan_address = account.hostname.lan_address().parse().unwrap();
password_hash: String,
ssh_key: &Ed25519PrivateKey,
cert: &X509,
) -> Self {
let id = generate_id();
let my_hostname = generate_hostname();
let lan_address = my_hostname.lan_address().parse().unwrap();
// TODO // TODO
Database { Database {
server_info: ServerInfo { server_info: ServerInfo {
id, id: account.server_id.clone(),
version: Current::new().semver().into(), version: Current::new().semver().into(),
hostname: Some(my_hostname.0), hostname: Some(account.hostname.no_dot_host_name()),
last_backup: None, last_backup: None,
last_wifi_region: None, last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(), eos_version_compat: Current::new().compat().clone(),
lan_address, lan_address,
tor_address: format!("http://{}", tor_key.public().get_onion_address()) tor_address: format!("http://{}", account.key.tor_address())
.parse() .parse()
.unwrap(), .unwrap(),
ip_info: BTreeMap::new(), ip_info: BTreeMap::new(),
@@ -77,11 +68,12 @@ impl Database {
tor: Vec::new(), tor: Vec::new(),
clearnet: Vec::new(), clearnet: Vec::new(),
}, },
password_hash, password_hash: account.password.clone(),
pubkey: ssh_key::PublicKey::from(Ed25519PublicKey::from(ssh_key)) pubkey: ssh_key::PublicKey::from(Ed25519PublicKey::from(&account.key.ssh_key()))
.to_openssh() .to_openssh()
.unwrap(), .unwrap(),
ca_fingerprint: cert ca_fingerprint: account
.root_ca_cert
.digest(MessageDigest::sha256()) .digest(MessageDigest::sha256())
.unwrap() .unwrap()
.iter() .iter()
@@ -130,14 +122,20 @@ pub struct ServerInfo {
#[derive(Debug, Deserialize, Serialize, HasModel)] #[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct IpInfo { pub struct IpInfo {
ipv4: Option<Ipv4Addr>, pub ipv4_range: Option<Ipv4Net>,
ipv6: Option<Ipv6Addr>, pub ipv4: Option<Ipv4Addr>,
pub ipv6_range: Option<Ipv6Net>,
pub ipv6: Option<Ipv6Addr>,
} }
impl IpInfo { impl IpInfo {
pub async fn for_interface(iface: &str) -> Result<Self, Error> { pub async fn for_interface(iface: &str) -> Result<Self, Error> {
let (ipv4, ipv4_range) = get_iface_ipv4_addr(iface).await?.unzip();
let (ipv6, ipv6_range) = get_iface_ipv6_addr(iface).await?.unzip();
Ok(Self { Ok(Self {
ipv4: get_iface_ipv4_addr(iface).await?, ipv4_range,
ipv6: get_iface_ipv6_addr(iface).await?, ipv4,
ipv6_range,
ipv6,
}) })
} }
} }

View File

@@ -1,10 +1,9 @@
use patch_db::DbHandle; use patch_db::DbHandle;
use rand::{thread_rng, Rng}; use rand::{thread_rng, Rng};
use sqlx::Connection;
use tokio::process::Command; use tokio::process::Command;
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext; use crate::account::AccountInfo;
use crate::util::Invoke; use crate::util::Invoke;
use crate::{Error, ErrorKind}; use crate::{Error, ErrorKind};
#[derive(Clone, serde::Deserialize, serde::Serialize, Debug)] #[derive(Clone, serde::Deserialize, serde::Serialize, Debug)]
@@ -33,33 +32,11 @@ impl Hostname {
} }
} }
pub async fn get_current_ip(eth: String) -> Result<String, Error> {
let cmd = format!(r"ifconfig {} | awk '/inet / {{print $2}}'", eth);
let out = Command::new("bash")
.arg("-c")
.arg(cmd)
.invoke(ErrorKind::ParseSysInfo)
.await?;
let out_string = String::from_utf8(out)?;
Ok(out_string.trim().to_owned())
}
pub async fn get_embassyd_tor_addr(rpc_ctx: RpcContext) -> Result<String, Error> {
let mut secrets_handle = rpc_ctx.secret_store.acquire().await?;
let mut secrets_tx = secrets_handle.begin().await?;
let tor_key = crate::net::tor::os_key(&mut secrets_tx).await?;
Ok(tor_key.public().get_onion_address().to_string())
}
pub fn generate_hostname() -> Hostname { pub fn generate_hostname() -> Hostname {
let mut rng = thread_rng(); let mut rng = thread_rng();
let adjective = &ADJECTIVES[rng.gen_range(0..ADJECTIVES.len())]; let adjective = &ADJECTIVES[rng.gen_range(0..ADJECTIVES.len())];
let noun = &NOUNS[rng.gen_range(0..NOUNS.len())]; let noun = &NOUNS[rng.gen_range(0..NOUNS.len())];
Hostname(format!("embassy-{adjective}-{noun}")) Hostname(format!("{adjective}-{noun}"))
} }
pub fn generate_id() -> String { pub fn generate_id() -> String {
@@ -87,83 +64,9 @@ pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> {
Ok(()) Ok(())
} }
#[instrument(skip(handle, receipts))] #[instrument]
pub async fn get_id<Db: DbHandle>( pub async fn sync_hostname(account: &AccountInfo) -> Result<(), Error> {
handle: &mut Db, set_hostname(&account.hostname).await?;
receipts: &HostNameReceipt,
) -> Result<String, Error> {
let id = receipts.id.get(handle).await?;
Ok(id)
}
pub async fn get_hostname<Db: DbHandle>(
handle: &mut Db,
receipts: &HostNameReceipt,
) -> Result<Hostname, Error> {
if let Ok(hostname) = receipts.hostname.get(handle).await {
if let Some(hostname) = hostname.to_owned() {
return Ok(Hostname(hostname));
}
}
let id = get_id(handle, receipts).await?;
if id.len() != 8 {
return Ok(generate_hostname());
}
return Ok(Hostname(format!("embassy-{}", id)));
}
pub async fn ensure_hostname_is_set<Db: DbHandle>(
handle: &mut Db,
receipts: &HostNameReceipt,
) -> Result<(), Error> {
let hostname = get_hostname(handle, &receipts).await?;
receipts.hostname.set(handle, Some(hostname.0)).await?;
Ok(())
}
#[derive(Clone)]
pub struct HostNameReceipt {
hostname: patch_db::LockReceipt<Option<String>, ()>,
pub id: patch_db::LockReceipt<String, ()>,
}
impl HostNameReceipt {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
setup(&db.lock_all(locks).await?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
use patch_db::LockType;
let hostname = crate::db::DatabaseModel::new()
.server_info()
.hostname()
.make_locker(LockType::Write)
.add_to_keys(locks);
let id = crate::db::DatabaseModel::new()
.server_info()
.id()
.make_locker(LockType::Write)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
hostname: hostname.verify(skeleton_key)?,
id: id.verify(skeleton_key)?,
})
}
}
}
#[instrument(skip(handle, receipts))]
pub async fn sync_hostname<Db: DbHandle>(
handle: &mut Db,
receipts: &HostNameReceipt,
) -> Result<(), Error> {
set_hostname(&get_hostname(handle, receipts).await?).await?;
Command::new("systemctl") Command::new("systemctl")
.arg("restart") .arg("restart")
.arg("avahi-daemon") .arg("avahi-daemon")

View File

@@ -13,6 +13,7 @@ use rand::random;
use sqlx::{Pool, Postgres}; use sqlx::{Pool, Postgres};
use tokio::process::Command; use tokio::process::Command;
use crate::account::AccountInfo;
use crate::context::rpc::RpcContextConfig; use crate::context::rpc::RpcContextConfig;
use crate::db::model::{IpInfo, ServerStatus}; use crate::db::model::{IpInfo, ServerStatus};
use crate::install::PKG_ARCHIVE_DIR; use crate::install::PKG_ARCHIVE_DIR;
@@ -240,7 +241,8 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?; crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?;
tracing::info!("Synced SSH Keys"); tracing::info!("Synced SSH Keys");
let db = cfg.db(&secret_store).await?; let account = AccountInfo::load(&secret_store).await?;
let db = cfg.db(&account).await?;
tracing::info!("Opened PatchDB"); tracing::info!("Opened PatchDB");
let mut handle = db.handle(); let mut handle = db.handle();
crate::db::DatabaseModel::new() crate::db::DatabaseModel::new()
@@ -249,6 +251,16 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
.await?; .await?;
let receipts = InitReceipts::new(&mut handle).await?; let receipts = InitReceipts::new(&mut handle).await?;
// write to ca cert store
tokio::fs::write(
"/usr/local/share/ca-certificates/embassy-root-ca.crt",
account.root_ca_cert.to_pem()?,
)
.await?;
Command::new("update-ca-certificates")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
if let Some(wifi_interface) = &cfg.wifi_interface { if let Some(wifi_interface) = &cfg.wifi_interface {
crate::net::wifi::synchronize_wpa_supplicant_conf( crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"), &cfg.datadir().join("main"),
@@ -392,7 +404,7 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
.set(&mut handle, time().await?) .set(&mut handle, time().await?)
.await?; .await?;
crate::version::init(&mut handle, &receipts).await?; crate::version::init(&mut handle, &secret_store, &receipts).await?;
if should_rebuild { if should_rebuild {
match tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await { match tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await {

View File

@@ -45,7 +45,7 @@ use crate::s9pk::reader::S9pkReader;
use crate::status::{MainStatus, Status}; use crate::status::{MainStatus, Status};
use crate::util::io::{copy_and_shutdown, response_to_reader}; use crate::util::io::{copy_and_shutdown, response_to_reader};
use crate::util::serde::{display_serializable, Port}; use crate::util::serde::{display_serializable, Port};
use crate::util::{display_none, AsyncFileExt, Version}; use crate::util::{assure_send, display_none, AsyncFileExt, Version};
use crate::version::{Current, VersionT}; use crate::version::{Current, VersionT};
use crate::volume::{asset_dir, script_dir}; use crate::volume::{asset_dir, script_dir};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
@@ -1116,13 +1116,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
tracing::info!("Install {}@{}: Installed interfaces", pkg_id, version); tracing::info!("Install {}@{}: Installed interfaces", pkg_id, version);
tracing::info!("Install {}@{}: Creating manager", pkg_id, version); tracing::info!("Install {}@{}: Creating manager", pkg_id, version);
ctx.managers ctx.managers.add(ctx.clone(), manifest.clone()).await?;
.add(
ctx.clone(),
manifest.clone(),
manifest.interfaces.tor_keys(&mut sql_tx, pkg_id).await?,
)
.await?;
tracing::info!("Install {}@{}: Created manager", pkg_id, version); tracing::info!("Install {}@{}: Created manager", pkg_id, version);
let static_files = StaticFiles::local(pkg_id, version, manifest.assets.icon_type()); let static_files = StaticFiles::local(pkg_id, version, manifest.assets.icon_type());

View File

@@ -15,6 +15,7 @@ lazy_static::lazy_static! {
}; };
} }
pub mod account;
pub mod action; pub mod action;
pub mod auth; pub mod auth;
pub mod backup; pub mod backup;
@@ -29,7 +30,6 @@ pub mod diagnostic;
pub mod disk; pub mod disk;
pub mod error; pub mod error;
pub mod hostname; pub mod hostname;
pub mod id;
pub mod init; pub mod init;
pub mod inspect; pub mod inspect;
pub mod install; pub mod install;

View File

@@ -12,23 +12,22 @@ use embassy_container_init::{ProcessGroupId, SignalGroupParams};
use helpers::UnixRpcClient; use helpers::UnixRpcClient;
use nix::sys::signal::Signal; use nix::sys::signal::Signal;
use patch_db::DbHandle; use patch_db::DbHandle;
use sqlx::{Executor, Postgres}; use sqlx::{Connection, Executor, Postgres};
use tokio::sync::watch::error::RecvError; use tokio::sync::watch::error::RecvError;
use tokio::sync::watch::{channel, Receiver, Sender}; use tokio::sync::watch::{channel, Receiver, Sender};
use tokio::sync::{oneshot, Notify, RwLock}; use tokio::sync::{oneshot, Notify, RwLock};
use torut::onion::TorSecretKeyV3;
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::manager::sync::synchronizer; use crate::manager::sync::synchronizer;
use crate::net::interface::InterfaceId; use crate::net::net_controller::NetService;
use crate::net::GeneratedCertificateMountPoint;
use crate::procedure::docker::{DockerContainer, DockerProcedure, LongRunning}; use crate::procedure::docker::{DockerContainer, DockerProcedure, LongRunning};
#[cfg(feature = "js_engine")] #[cfg(feature = "js_engine")]
use crate::procedure::js_scripts::JsProcedure; use crate::procedure::js_scripts::JsProcedure;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName}; use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::util::{ApplyRef, Container, NonDetachingJoinHandle, Version}; use crate::util::{ApplyRef, Container, NonDetachingJoinHandle, Version};
use crate::volume::Volume;
use crate::Error; use crate::Error;
pub mod health; pub mod health;
@@ -70,10 +69,9 @@ impl ManagerMap {
continue; continue;
}; };
let tor_keys = man.interfaces.tor_keys(secrets, &package).await?;
res.insert( res.insert(
(package, man.version.clone()), (package, man.version.clone()),
Arc::new(Manager::create(ctx.clone(), man, tor_keys).await?), Arc::new(Manager::create(ctx.clone(), man).await?),
); );
} }
*self.0.write().await = res; *self.0.write().await = res;
@@ -81,12 +79,7 @@ impl ManagerMap {
} }
#[instrument(skip(self, ctx))] #[instrument(skip(self, ctx))]
pub async fn add( pub async fn add(&self, ctx: RpcContext, manifest: Manifest) -> Result<(), Error> {
&self,
ctx: RpcContext,
manifest: Manifest,
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
) -> Result<(), Error> {
let mut lock = self.0.write().await; let mut lock = self.0.write().await;
let id = (manifest.id.clone(), manifest.version.clone()); let id = (manifest.id.clone(), manifest.version.clone());
if let Some(man) = lock.remove(&id) { if let Some(man) = lock.remove(&id) {
@@ -94,10 +87,7 @@ impl ManagerMap {
man.exit().await?; man.exit().await?;
} }
} }
lock.insert( lock.insert(id, Arc::new(Manager::create(ctx, manifest).await?));
id,
Arc::new(Manager::create(ctx, manifest, tor_keys).await?),
);
Ok(()) Ok(())
} }
@@ -162,7 +152,6 @@ struct ManagerSeed {
ctx: RpcContext, ctx: RpcContext,
manifest: Manifest, manifest: Manifest,
container_name: String, container_name: String,
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
} }
pub struct ManagerSharedState { pub struct ManagerSharedState {
@@ -190,13 +179,8 @@ async fn run_main(
state: &Arc<ManagerSharedState>, state: &Arc<ManagerSharedState>,
) -> Result<Result<NoOutput, (i32, String)>, Error> { ) -> Result<Result<NoOutput, (i32, String)>, Error> {
let rt_state = state.clone(); let rt_state = state.clone();
let interfaces = main_interfaces(&*state.seed)?;
let generated_certificate = generate_certificate(&*state.seed, &interfaces).await?;
let mut runtime = NonDetachingJoinHandle::from(tokio::spawn(start_up_image( let mut runtime = NonDetachingJoinHandle::from(tokio::spawn(start_up_image(rt_state)));
rt_state,
generated_certificate,
)));
let ip = match state.persistent_container.is_some() { let ip = match state.persistent_container.is_some() {
false => Some(match get_running_ip(state, &mut runtime).await { false => Some(match get_running_ip(state, &mut runtime).await {
GetRunningIp::Ip(x) => x, GetRunningIp::Ip(x) => x,
@@ -206,9 +190,11 @@ async fn run_main(
true => None, true => None,
}; };
if let Some(ip) = ip { let svc = if let Some(ip) = ip {
add_network_for_main(&*state.seed, ip, interfaces, generated_certificate).await?; Some(add_network_for_main(&*state.seed, ip).await?)
} } else {
None
};
set_commit_health_true(state); set_commit_health_true(state);
let health = main_health_check_daemon(state.clone()); let health = main_health_check_daemon(state.clone());
@@ -218,8 +204,8 @@ async fn run_main(
_ = health => Err(Error::new(eyre!("Health check daemon exited!"), crate::ErrorKind::Unknown)), _ = health => Err(Error::new(eyre!("Health check daemon exited!"), crate::ErrorKind::Unknown)),
_ = state.killer.notified() => Ok(Err((137, "Killed".to_string()))) _ = state.killer.notified() => Ok(Err((137, "Killed".to_string())))
}; };
if let Some(ip) = ip { if let Some(svc) = svc {
remove_network_for_main(&*state.seed, ip).await?; remove_network_for_main(svc).await?;
} }
res res
} }
@@ -228,7 +214,6 @@ async fn run_main(
/// Note for _generated_certificate: Needed to know that before we start the state we have generated the certificate /// Note for _generated_certificate: Needed to know that before we start the state we have generated the certificate
async fn start_up_image( async fn start_up_image(
rt_state: Arc<ManagerSharedState>, rt_state: Arc<ManagerSharedState>,
_generated_certificate: GeneratedCertificateMountPoint,
) -> Result<Result<NoOutput, (i32, String)>, Error> { ) -> Result<Result<NoOutput, (i32, String)>, Error> {
rt_state rt_state
.seed .seed
@@ -248,17 +233,12 @@ async fn start_up_image(
impl Manager { impl Manager {
#[instrument(skip(ctx))] #[instrument(skip(ctx))]
async fn create( async fn create(ctx: RpcContext, manifest: Manifest) -> Result<Self, Error> {
ctx: RpcContext,
manifest: Manifest,
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
) -> Result<Self, Error> {
let (on_stop, recv) = channel(OnStop::Sleep); let (on_stop, recv) = channel(OnStop::Sleep);
let seed = Arc::new(ManagerSeed { let seed = Arc::new(ManagerSeed {
ctx, ctx,
container_name: DockerProcedure::container_name(&manifest.id, None), container_name: DockerProcedure::container_name(&manifest.id, None),
manifest, manifest,
tor_keys,
}); });
let persistent_container = PersistentContainer::init(&seed).await?; let persistent_container = PersistentContainer::init(&seed).await?;
let shared = Arc::new(ManagerSharedState { let shared = Arc::new(ManagerSharedState {
@@ -479,8 +459,6 @@ async fn spawn_persistent_container(
let mut send_inserter: Option<oneshot::Sender<Receiver<Arc<UnixRpcClient>>>> = Some(send_inserter); let mut send_inserter: Option<oneshot::Sender<Receiver<Arc<UnixRpcClient>>>> = Some(send_inserter);
loop { loop {
if let Err(e) = async { if let Err(e) = async {
let interfaces = main_interfaces(&*seed)?;
let generated_certificate = generate_certificate(&*seed, &interfaces).await?;
let (mut runtime, inserter) = let (mut runtime, inserter) =
long_running_docker(&seed, &container).await?; long_running_docker(&seed, &container).await?;
@@ -493,7 +471,7 @@ async fn spawn_persistent_container(
return Ok(()); return Ok(());
} }
}; };
add_network_for_main(&*seed, ip, interfaces, generated_certificate).await?; let svc = add_network_for_main(&*seed, ip).await?;
if let Some(inserter_send) = inserter_send.as_mut() { if let Some(inserter_send) = inserter_send.as_mut() {
let _ = inserter_send.send(Arc::new(inserter)); let _ = inserter_send.send(Arc::new(inserter));
@@ -509,7 +487,7 @@ async fn spawn_persistent_container(
a = runtime.running_output => a.map_err(|_| Error::new(eyre!("Manager runtime panicked!"), crate::ErrorKind::Docker)).map(|_| ()), a = runtime.running_output => a.map_err(|_| Error::new(eyre!("Manager runtime panicked!"), crate::ErrorKind::Docker)).map(|_| ()),
}; };
remove_network_for_main(&*seed, ip).await?; remove_network_for_main(svc).await?;
res res
}.await { }.await {
@@ -540,16 +518,8 @@ async fn long_running_docker(
.await .await
} }
async fn remove_network_for_main(seed: &ManagerSeed, ip: std::net::Ipv4Addr) -> Result<(), Error> { async fn remove_network_for_main(svc: NetService) -> Result<(), Error> {
seed.ctx svc.remove_all().await
.net_controller
.remove(
&seed.manifest.id,
ip,
seed.manifest.interfaces.0.keys().cloned(),
)
.await?;
Ok(())
} }
fn fetch_starting_to_running(state: &Arc<ManagerSharedState>) { fn fetch_starting_to_running(state: &Arc<ManagerSharedState>) {
@@ -592,18 +562,32 @@ fn set_commit_health_true(state: &Arc<ManagerSharedState>) {
async fn add_network_for_main( async fn add_network_for_main(
seed: &ManagerSeed, seed: &ManagerSeed,
ip: std::net::Ipv4Addr, ip: std::net::Ipv4Addr,
interfaces: Vec<( ) -> Result<NetService, Error> {
InterfaceId, let mut svc = seed
&crate::net::interface::Interface, .ctx
TorSecretKeyV3,
)>,
generated_certificate: GeneratedCertificateMountPoint,
) -> Result<(), Error> {
seed.ctx
.net_controller .net_controller
.add(&seed.manifest.id, ip, interfaces, generated_certificate) .create_service(seed.manifest.id.clone(), ip)
.await?; .await?;
Ok(()) // DEPRECATED
let mut secrets = seed.ctx.secret_store.acquire().await?;
let mut tx = secrets.begin().await?;
for (id, interface) in &seed.manifest.interfaces.0 {
for (external, internal) in interface.lan_config.iter().flatten() {
svc.add_lan(&mut tx, id.clone(), external.0, internal.internal, false)
.await?;
}
for (external, internal) in interface.tor_config.iter().flat_map(|t| &t.port_mapping) {
svc.add_tor(&mut tx, id.clone(), external.0, internal.0)
.await?;
}
}
for volume in seed.manifest.volumes.values() {
if let Volume::Certificate { interface_id } = volume {
svc.export_cert(&mut tx, interface_id, ip.into()).await?;
}
}
tx.commit().await?;
Ok(svc)
} }
enum GetRunningIp { enum GetRunningIp {
@@ -716,49 +700,6 @@ async fn container_inspect(
.await .await
} }
async fn generate_certificate(
seed: &ManagerSeed,
interfaces: &Vec<(
InterfaceId,
&crate::net::interface::Interface,
TorSecretKeyV3,
)>,
) -> Result<GeneratedCertificateMountPoint, Error> {
seed.ctx
.net_controller
.generate_certificate_mountpoint(&seed.manifest.id, interfaces)
.await
}
fn main_interfaces(
seed: &ManagerSeed,
) -> Result<
Vec<(
InterfaceId,
&crate::net::interface::Interface,
TorSecretKeyV3,
)>,
Error,
> {
seed.manifest
.interfaces
.0
.iter()
.map(|(id, info)| {
Ok((
id.clone(),
info,
seed.tor_keys
.get(id)
.ok_or_else(|| {
Error::new(eyre!("interface {} missing key", id), crate::ErrorKind::Tor)
})?
.clone(),
))
})
.collect::<Result<Vec<_>, Error>>()
}
async fn wait_for_status(shared: &ManagerSharedState, status: Status) { async fn wait_for_status(shared: &ManagerSharedState, status: Status) {
let mut recv = shared.status.0.subscribe(); let mut recv = shared.status.0.subscribe();
while { while {

View File

@@ -4,12 +4,12 @@ use color_eyre::eyre::eyre;
use emver::VersionRange; use emver::VersionRange;
use futures::{Future, FutureExt}; use futures::{Future, FutureExt};
use indexmap::IndexMap; use indexmap::IndexMap;
use models::ImageId;
use patch_db::HasModel; use patch_db::HasModel;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainers; use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName}; use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;

View File

@@ -1,215 +0,0 @@
use std::collections::BTreeMap;
use std::io;
use std::pin::Pin;
use std::str::FromStr;
use std::sync::{Arc, RwLock};
use std::task::{Context, Poll};
use color_eyre::eyre::eyre;
use futures::{ready, Future};
use hyper::server::accept::Accept;
use hyper::server::conn::{AddrIncoming, AddrStream};
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio_rustls::rustls::server::ResolvesServerCert;
use tokio_rustls::rustls::sign::{any_supported_type, CertifiedKey};
use tokio_rustls::rustls::{Certificate, PrivateKey, ServerConfig};
use crate::net::net_utils::ResourceFqdn;
use crate::Error;
enum State {
Handshaking(tokio_rustls::Accept<AddrStream>),
Streaming(tokio_rustls::server::TlsStream<AddrStream>),
}
// tokio_rustls::server::TlsStream doesn't expose constructor methods,
// so we have to TlsAcceptor::accept and handshake to have access to it
// TlsStream implements AsyncRead/AsyncWrite handshaking tokio_rustls::Accept first
pub struct TlsStream {
state: State,
}
impl TlsStream {
fn new(stream: AddrStream, config: Arc<ServerConfig>) -> TlsStream {
let accept = tokio_rustls::TlsAcceptor::from(config).accept(stream);
TlsStream {
state: State::Handshaking(accept),
}
}
}
impl AsyncRead for TlsStream {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<io::Result<()>> {
let pin = self.get_mut();
match pin.state {
State::Handshaking(ref mut accept) => match ready!(Pin::new(accept).poll(cx)) {
Ok(mut stream) => {
let result = Pin::new(&mut stream).poll_read(cx, buf);
pin.state = State::Streaming(stream);
result
}
Err(err) => Poll::Ready(Err(err)),
},
State::Streaming(ref mut stream) => Pin::new(stream).poll_read(cx, buf),
}
}
}
impl AsyncWrite for TlsStream {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let pin = self.get_mut();
match pin.state {
State::Handshaking(ref mut accept) => match ready!(Pin::new(accept).poll(cx)) {
Ok(mut stream) => {
let result = Pin::new(&mut stream).poll_write(cx, buf);
pin.state = State::Streaming(stream);
result
}
Err(err) => Poll::Ready(Err(err)),
},
State::Streaming(ref mut stream) => Pin::new(stream).poll_write(cx, buf),
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.state {
State::Handshaking(_) => Poll::Ready(Ok(())),
State::Streaming(ref mut stream) => Pin::new(stream).poll_flush(cx),
}
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.state {
State::Handshaking(_) => Poll::Ready(Ok(())),
State::Streaming(ref mut stream) => Pin::new(stream).poll_shutdown(cx),
}
}
}
impl ResolvesServerCert for EmbassyCertResolver {
fn resolve(
&self,
client_hello: tokio_rustls::rustls::server::ClientHello,
) -> Option<Arc<tokio_rustls::rustls::sign::CertifiedKey>> {
let hostname_raw = client_hello.server_name();
match hostname_raw {
Some(hostname_str) => {
let full_fqdn = match ResourceFqdn::from_str(hostname_str) {
Ok(fqdn) => fqdn,
Err(_) => {
tracing::error!("Error converting {} to fqdn struct", hostname_str);
return None;
}
};
let lock = self.cert_mapping.read();
match lock {
Ok(lock) => lock
.get(&full_fqdn)
.map(|cert_key| Arc::new(cert_key.to_owned())),
Err(err) => {
tracing::error!("resolve fn Error: {}", err);
None
}
}
}
None => None,
}
}
}
#[derive(Clone, Default)]
pub struct EmbassyCertResolver {
cert_mapping: Arc<RwLock<BTreeMap<ResourceFqdn, CertifiedKey>>>,
}
impl EmbassyCertResolver {
pub fn new() -> Self {
Self::default()
}
pub async fn add_certificate_to_resolver(
&mut self,
service_resource_fqdn: ResourceFqdn,
package_cert_data: (PKey<Private>, Vec<X509>),
) -> Result<(), Error> {
let x509_cert_chain = package_cert_data.1;
let private_keys = package_cert_data
.0
.private_key_to_der()
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::OpenSsl))?;
let mut full_rustls_certs = Vec::new();
for cert in x509_cert_chain.iter() {
let cert = Certificate(
cert.to_der()
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::OpenSsl))?,
);
full_rustls_certs.push(cert);
}
let pre_sign_key = PrivateKey(private_keys);
let actual_sign_key = any_supported_type(&pre_sign_key)
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::OpenSsl))?;
let cert_key = CertifiedKey::new(full_rustls_certs, actual_sign_key);
let mut lock = self
.cert_mapping
.write()
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::Network))?;
lock.insert(service_resource_fqdn, cert_key);
Ok(())
}
pub async fn remove_cert(&mut self, hostname: ResourceFqdn) -> Result<(), Error> {
let mut lock = self
.cert_mapping
.write()
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::Network))?;
lock.remove(&hostname);
Ok(())
}
}
pub struct TlsAcceptor {
config: Arc<ServerConfig>,
incoming: AddrIncoming,
}
impl TlsAcceptor {
pub fn new(config: Arc<ServerConfig>, incoming: AddrIncoming) -> TlsAcceptor {
TlsAcceptor { config, incoming }
}
}
impl Accept for TlsAcceptor {
type Conn = TlsStream;
type Error = io::Error;
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
let pin = self.get_mut();
match ready!(Pin::new(&mut pin.incoming).poll_accept(cx)) {
Some(Ok(sock)) => Poll::Ready(Some(Ok(TlsStream::new(sock, pin.config.clone())))),
Some(Err(e)) => Poll::Ready(Some(Err(e))),
None => Poll::Ready(None),
}
}
}

View File

@@ -1,7 +1,9 @@
use std::collections::BTreeMap; use std::collections::{BTreeMap, BTreeSet};
use std::net::IpAddr;
use futures::TryStreamExt; use futures::TryStreamExt;
use rpc_toolkit::command; use rpc_toolkit::command;
use tokio::sync::RwLock;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::IpInfo; use crate::db::model::IpInfo;
@@ -9,6 +11,32 @@ use crate::net::net_utils::{iface_is_physical, list_interfaces};
use crate::util::display_none; use crate::util::display_none;
use crate::Error; use crate::Error;
lazy_static::lazy_static! {
static ref CACHED_IPS: RwLock<BTreeSet<IpAddr>> = RwLock::new(BTreeSet::new());
}
async fn _ips() -> Result<BTreeSet<IpAddr>, Error> {
Ok(init_ips()
.await?
.values()
.flat_map(|i| {
std::iter::empty()
.chain(i.ipv4.map(IpAddr::from))
.chain(i.ipv6.map(IpAddr::from))
})
.collect())
}
pub async fn ips() -> Result<BTreeSet<IpAddr>, Error> {
let ips = CACHED_IPS.read().await.clone();
if !ips.is_empty() {
return Ok(ips);
}
let ips = _ips().await?;
*CACHED_IPS.write().await = ips.clone();
Ok(ips)
}
pub async fn init_ips() -> Result<BTreeMap<String, IpInfo>, Error> { pub async fn init_ips() -> Result<BTreeMap<String, IpInfo>, Error> {
let mut res = BTreeMap::new(); let mut res = BTreeMap::new();
let mut ifaces = list_interfaces(); let mut ifaces = list_interfaces();
@@ -29,15 +57,23 @@ pub async fn dhcp() -> Result<(), Error> {
#[command(display(display_none))] #[command(display(display_none))]
pub async fn update(#[context] ctx: RpcContext, #[arg] interface: String) -> Result<(), Error> { pub async fn update(#[context] ctx: RpcContext, #[arg] interface: String) -> Result<(), Error> {
if iface_is_physical(&interface).await { if iface_is_physical(&interface).await {
let ip_info = IpInfo::for_interface(&interface).await?;
crate::db::DatabaseModel::new() crate::db::DatabaseModel::new()
.server_info() .server_info()
.ip_info() .ip_info()
.idx_model(&interface) .idx_model(&interface)
.put( .put(&mut ctx.db.handle(), &ip_info)
&mut ctx.db.handle(),
&IpInfo::for_interface(&interface).await?,
)
.await?; .await?;
let mut cached = CACHED_IPS.write().await;
if cached.is_empty() {
*cached = _ips().await?;
} else {
cached.extend(
std::iter::empty()
.chain(ip_info.ipv4.map(IpAddr::from))
.chain(ip_info.ipv6.map(IpAddr::from)),
);
}
} }
Ok(()) Ok(())
} }

View File

@@ -1,9 +1,10 @@
use std::borrow::Borrow; use std::borrow::Borrow;
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::net::{Ipv4Addr, SocketAddr}; use std::net::{Ipv4Addr, SocketAddr};
use std::sync::Arc; use std::sync::{Arc, Weak};
use std::time::Duration; use std::time::Duration;
use color_eyre::eyre::eyre;
use futures::TryFutureExt; use futures::TryFutureExt;
use helpers::NonDetachingJoinHandle; use helpers::NonDetachingJoinHandle;
use models::PackageId; use models::PackageId;
@@ -13,39 +14,52 @@ use tokio::sync::RwLock;
use trust_dns_server::authority::MessageResponseBuilder; use trust_dns_server::authority::MessageResponseBuilder;
use trust_dns_server::client::op::{Header, ResponseCode}; use trust_dns_server::client::op::{Header, ResponseCode};
use trust_dns_server::client::rr::{Name, Record, RecordType}; use trust_dns_server::client::rr::{Name, Record, RecordType};
use trust_dns_server::proto::rr::rdata::a;
use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo}; use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use trust_dns_server::ServerFuture; use trust_dns_server::ServerFuture;
use crate::util::Invoke; use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, HOST_IP}; use crate::{Error, ErrorKind, ResultExt};
pub struct DnsController { pub struct DnsController {
services: Arc<RwLock<BTreeMap<PackageId, Vec<Ipv4Addr>>>>, services: Weak<RwLock<BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>>>,
#[allow(dead_code)] #[allow(dead_code)]
dns_server: NonDetachingJoinHandle<Result<(), Error>>, dns_server: NonDetachingJoinHandle<Result<(), Error>>,
} }
struct Resolver { struct Resolver {
services: Arc<RwLock<BTreeMap<PackageId, Vec<Ipv4Addr>>>>, services: Arc<RwLock<BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>>>,
} }
impl Resolver { impl Resolver {
async fn resolve(&self, name: &Name) -> Option<Vec<Ipv4Addr>> { async fn resolve(&self, name: &Name) -> Option<Vec<Ipv4Addr>> {
match name.iter().next_back() { match name.iter().next_back() {
Some(b"embassy") => { Some(b"embassy") => {
if let Some(pkg) = name.iter().rev().skip(1).next() { if let Some(pkg) = name.iter().rev().skip(1).next() {
if let Some(ip) = self if let Some(ip) = self.services.read().await.get(&Some(
.services std::str::from_utf8(pkg)
.read() .unwrap_or_default()
.await .parse()
.get(std::str::from_utf8(pkg).unwrap_or_default()) .unwrap_or_default(),
{ )) {
Some(ip.iter().copied().collect()) Some(
ip.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(ip, _)| *ip)
.collect(),
)
} else { } else {
None None
} }
} else { } else {
Some(vec![HOST_IP.into()]) if let Some(ip) = self.services.read().await.get(&None) {
Some(
ip.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(ip, _)| *ip)
.collect(),
)
} else {
None
}
} }
} }
_ => None, _ => None,
@@ -162,26 +176,47 @@ impl DnsController {
.into(); .into();
Ok(Self { Ok(Self {
services, services: Arc::downgrade(&services),
dns_server, dns_server,
}) })
} }
pub async fn add(&self, pkg_id: &PackageId, ip: Ipv4Addr) { pub async fn add(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<Arc<()>, Error> {
let mut writable = self.services.write().await; if let Some(services) = Weak::upgrade(&self.services) {
let mut ips = writable.remove(pkg_id).unwrap_or_default(); let mut writable = services.write().await;
ips.push(ip); let mut ips = writable.remove(&pkg_id).unwrap_or_default();
writable.insert(pkg_id.clone(), ips); let rc = if let Some(rc) = Weak::upgrade(&ips.remove(&ip).unwrap_or_default()) {
rc
} else {
Arc::new(())
};
ips.insert(ip, Arc::downgrade(&rc));
writable.insert(pkg_id, ips);
Ok(rc)
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
crate::ErrorKind::Network,
))
}
} }
pub async fn remove(&self, pkg_id: &PackageId, ip: Ipv4Addr) { pub async fn gc(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<(), Error> {
let mut writable = self.services.write().await; if let Some(services) = Weak::upgrade(&self.services) {
let mut ips = writable.remove(pkg_id).unwrap_or_default(); let mut writable = services.write().await;
if let Some((idx, _)) = ips.iter().copied().enumerate().find(|(_, x)| *x == ip) { let mut ips = writable.remove(&pkg_id).unwrap_or_default();
ips.swap_remove(idx); if let Some(rc) = Weak::upgrade(&ips.remove(&ip).unwrap_or_default()) {
ips.insert(ip, Arc::downgrade(&rc));
} }
if !ips.is_empty() { if !ips.is_empty() {
writable.insert(pkg_id.clone(), ips); writable.insert(pkg_id, ips);
}
Ok(())
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
crate::ErrorKind::Network,
))
} }
} }
} }

View File

@@ -1,173 +0,0 @@
use std::collections::BTreeMap;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use helpers::NonDetachingJoinHandle;
use http::StatusCode;
use hyper::server::conn::AddrIncoming;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Error as HyperError, Response, Server};
use tokio::sync::oneshot;
use tokio_rustls::rustls::ServerConfig;
use tracing::error;
use crate::net::cert_resolver::TlsAcceptor;
use crate::net::net_utils::{host_addr_fqdn, ResourceFqdn};
use crate::net::HttpHandler;
use crate::Error;
static RES_NOT_FOUND: &[u8] = b"503 Service Unavailable";
static NO_HOST: &[u8] = b"No host header found";
pub struct EmbassyServiceHTTPServer {
pub svc_mapping: Arc<tokio::sync::RwLock<BTreeMap<ResourceFqdn, HttpHandler>>>,
pub shutdown: oneshot::Sender<()>,
pub handle: NonDetachingJoinHandle<()>,
pub ssl_cfg: Option<Arc<ServerConfig>>,
}
impl EmbassyServiceHTTPServer {
pub async fn new(
listener_addr: IpAddr,
port: u16,
ssl_cfg: Option<Arc<ServerConfig>>,
) -> Result<Self, Error> {
let (tx, rx) = tokio::sync::oneshot::channel::<()>();
let listener_socket_addr = SocketAddr::from((listener_addr, port));
let server_service_mapping = Arc::new(tokio::sync::RwLock::new(BTreeMap::<
ResourceFqdn,
HttpHandler,
>::new()));
let server_service_mapping1 = server_service_mapping.clone();
let bare_make_service_fn = move || {
let server_service_mapping = server_service_mapping.clone();
async move {
Ok::<_, HyperError>(service_fn(move |req| {
let mut server_service_mapping = server_service_mapping.clone();
async move {
server_service_mapping = server_service_mapping.clone();
let host = host_addr_fqdn(&req);
match host {
Ok(host_uri) => {
let res = {
let mapping = server_service_mapping.read().await;
let opt_handler = mapping.get(&host_uri).cloned();
opt_handler
};
match res {
Some(opt_handler) => {
let response = opt_handler(req).await;
match response {
Ok(resp) => Ok::<Response<Body>, hyper::Error>(resp),
Err(err) => Ok(respond_hyper_error(err)),
}
}
None => Ok(res_not_found()),
}
}
Err(e) => Ok(no_host_found(e)),
}
}
}))
}
};
let inner_ssl_cfg = ssl_cfg.clone();
let handle = tokio::spawn(async move {
match inner_ssl_cfg {
Some(cfg) => {
let incoming = AddrIncoming::bind(&listener_socket_addr).unwrap();
let server = Server::builder(TlsAcceptor::new(cfg, incoming))
.http1_preserve_header_case(true)
.http1_title_case_headers(true)
.serve(make_service_fn(|_| bare_make_service_fn()))
.with_graceful_shutdown({
async {
rx.await.ok();
}
});
if let Err(e) = server.await {
error!("Spawning hyper server errorr: {}", e);
}
}
None => {
let server = Server::bind(&listener_socket_addr)
.http1_preserve_header_case(true)
.http1_title_case_headers(true)
.serve(make_service_fn(|_| bare_make_service_fn()))
.with_graceful_shutdown({
async {
rx.await.ok();
}
});
if let Err(e) = server.await {
error!("Spawning hyper server errorr: {}", e);
}
}
};
});
Ok(Self {
svc_mapping: server_service_mapping1,
handle: handle.into(),
shutdown: tx,
ssl_cfg,
})
}
pub async fn add_svc_handler_mapping(
&mut self,
fqdn: ResourceFqdn,
svc_handle: HttpHandler,
) -> Result<(), Error> {
let mut mapping = self.svc_mapping.write().await;
mapping.insert(fqdn.clone(), svc_handle);
Ok(())
}
pub async fn remove_svc_handler_mapping(&mut self, fqdn: ResourceFqdn) -> Result<(), Error> {
let mut mapping = self.svc_mapping.write().await;
mapping.remove(&fqdn);
Ok(())
}
}
/// HTTP status code 503
fn res_not_found() -> Response<Body> {
Response::builder()
.status(StatusCode::SERVICE_UNAVAILABLE)
.body(RES_NOT_FOUND.into())
.unwrap()
}
fn no_host_found(err: Error) -> Response<Body> {
let err_txt = format!("{}: Error {}", String::from_utf8_lossy(NO_HOST), err);
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(err_txt.into())
.unwrap()
}
fn respond_hyper_error(err: hyper::Error) -> Response<Body> {
let err_txt = format!("{}: Error {}", String::from_utf8_lossy(NO_HOST), err);
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(err_txt.into())
.unwrap()
}

View File

@@ -1,9 +1,6 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use color_eyre::eyre::eyre;
use futures::TryStreamExt;
use indexmap::IndexSet; use indexmap::IndexSet;
use itertools::Either;
pub use models::InterfaceId; pub use models::InterfaceId;
use serde::{Deserialize, Deserializer, Serialize}; use serde::{Deserialize, Deserializer, Serialize};
use sqlx::{Executor, Postgres}; use sqlx::{Executor, Postgres};
@@ -11,7 +8,6 @@ use torut::onion::TorSecretKeyV3;
use tracing::instrument; use tracing::instrument;
use crate::db::model::{InterfaceAddressMap, InterfaceAddresses}; use crate::db::model::{InterfaceAddressMap, InterfaceAddresses};
use crate::id::Id;
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::serde::Port; use crate::util::serde::Port;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
@@ -81,36 +77,6 @@ impl Interfaces {
} }
Ok(interface_addresses) Ok(interface_addresses)
} }
#[instrument(skip(secrets))]
pub async fn tor_keys<Ex>(
&self,
secrets: &mut Ex,
package_id: &PackageId,
) -> Result<BTreeMap<InterfaceId, TorSecretKeyV3>, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
Ok(sqlx::query!(
"SELECT interface, key FROM tor WHERE package = $1",
**package_id
)
.fetch_many(secrets)
.map_err(Error::from)
.try_filter_map(|qr| async move {
Ok(if let Either::Right(r) = qr {
let mut buf = [0; 64];
buf.clone_from_slice(r.key.get(0..64).ok_or_else(|| {
Error::new(eyre!("Invalid Tor Key Length"), crate::ErrorKind::Database)
})?);
Some((InterfaceId::from(Id::try_from(r.interface)?), buf.into()))
} else {
None
})
})
.try_collect()
.await?)
}
} }
#[derive(Clone, Debug, Deserialize, Serialize)] #[derive(Clone, Debug, Deserialize, Serialize)]

272
backend/src/net/keys.rs Normal file
View File

@@ -0,0 +1,272 @@
use color_eyre::eyre::eyre;
use ed25519_dalek::{ExpandedSecretKey, SecretKey};
use models::{Id, InterfaceId, PackageId};
use openssl::pkey::{PKey, Private};
use openssl::sha::Sha256;
use openssl::x509::X509;
use p256::elliptic_curve::pkcs8::EncodePrivateKey;
use sqlx::PgExecutor;
use ssh_key::private::Ed25519PrivateKey;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use zeroize::Zeroize;
use crate::net::ssl::CertPair;
use crate::Error;
// TODO: delete once we may change tor addresses
async fn compat(
secrets: impl PgExecutor<'_>,
interface: &Option<(PackageId, InterfaceId)>,
) -> Result<Option<ExpandedSecretKey>, Error> {
if let Some((package, interface)) = interface {
if let Some(r) = sqlx::query!(
"SELECT key FROM tor WHERE package = $1 AND interface = $2",
**package,
**interface
)
.fetch_optional(secrets)
.await?
{
Ok(Some(ExpandedSecretKey::from_bytes(&r.key)?))
} else {
Ok(None)
}
} else {
if let Some(key) = sqlx::query!("SELECT tor_key FROM account WHERE id = 0")
.fetch_one(secrets)
.await?
.tor_key
{
Ok(Some(ExpandedSecretKey::from_bytes(&key)?))
} else {
Ok(None)
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Key {
interface: Option<(PackageId, InterfaceId)>,
base: [u8; 32],
tor_key: [u8; 64], // Does NOT necessarily match base
}
impl Key {
pub fn interface(&self) -> Option<(PackageId, InterfaceId)> {
self.interface.clone()
}
pub fn as_bytes(&self) -> [u8; 32] {
self.base
}
pub fn internal_address(&self) -> String {
self.interface
.as_ref()
.map(|(pkg_id, _)| format!("{}.embassy", pkg_id))
.unwrap_or_else(|| "embassy".to_owned())
}
pub fn tor_key(&self) -> TorSecretKeyV3 {
ed25519_dalek::ExpandedSecretKey::from_bytes(&self.tor_key)
.unwrap()
.to_bytes()
.into()
}
pub fn tor_address(&self) -> OnionAddressV3 {
self.tor_key().public().get_onion_address()
}
pub fn base_address(&self) -> String {
self.tor_key()
.public()
.get_onion_address()
.get_address_without_dot_onion()
}
pub fn local_address(&self) -> String {
self.base_address() + ".local"
}
pub fn openssl_key_ed25519(&self) -> PKey<Private> {
PKey::private_key_from_raw_bytes(&self.base, openssl::pkey::Id::ED25519).unwrap()
}
pub fn openssl_key_nistp256(&self) -> PKey<Private> {
let mut buf = self.base;
loop {
if let Ok(k) = p256::SecretKey::from_be_bytes(&buf) {
return PKey::private_key_from_pkcs8(&*k.to_pkcs8_der().unwrap().as_bytes())
.unwrap();
}
let mut sha = Sha256::new();
sha.update(&buf);
buf = sha.finish();
}
}
pub fn ssh_key(&self) -> Ed25519PrivateKey {
Ed25519PrivateKey::from_bytes(&self.base)
}
pub(crate) fn from_pair(
interface: Option<(PackageId, InterfaceId)>,
bytes: [u8; 32],
tor_key: [u8; 64],
) -> Self {
Self {
interface,
tor_key,
base: bytes,
}
}
pub fn from_bytes(interface: Option<(PackageId, InterfaceId)>, bytes: [u8; 32]) -> Self {
Self::from_pair(
interface,
bytes,
ExpandedSecretKey::from(&SecretKey::from_bytes(&bytes).unwrap()).to_bytes(),
)
}
pub fn new(interface: Option<(PackageId, InterfaceId)>) -> Self {
Self::from_bytes(interface, rand::random())
}
pub(super) fn with_certs(self, certs: CertPair, int: X509, root: X509) -> KeyInfo {
KeyInfo {
key: self,
certs,
int,
root,
}
}
pub async fn for_package(
secrets: impl PgExecutor<'_>,
package: &PackageId,
) -> Result<Vec<Self>, Error> {
sqlx::query!(
r#"
SELECT
network_keys.package,
network_keys.interface,
network_keys.key,
tor.key AS "tor_key?"
FROM
network_keys
LEFT JOIN
tor
ON
network_keys.package = tor.package
AND
network_keys.interface = tor.interface
WHERE
network_keys.package = $1
"#,
**package
)
.fetch_all(secrets)
.await?
.into_iter()
.map(|row| {
let interface = Some((
package.clone(),
InterfaceId::from(Id::try_from(row.interface)?),
));
let bytes = row.key.try_into().map_err(|e: Vec<u8>| {
Error::new(
eyre!("Invalid length for network key {} expected 32", e.len()),
crate::ErrorKind::Database,
)
})?;
Ok(match row.tor_key {
Some(tor_key) => Key::from_pair(
interface,
bytes,
tor_key.try_into().map_err(|e: Vec<u8>| {
Error::new(
eyre!("Invalid length for tor key {} expected 64", e.len()),
crate::ErrorKind::Database,
)
})?,
),
None => Key::from_bytes(interface, bytes),
})
})
.collect()
}
pub async fn for_interface<Ex>(
secrets: &mut Ex,
interface: Option<(PackageId, InterfaceId)>,
) -> Result<Self, Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let tentative = rand::random::<[u8; 32]>();
let actual = if let Some((pkg, iface)) = &interface {
let k = tentative.as_slice();
let actual = sqlx::query!(
"INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key",
**pkg,
**iface,
k,
)
.fetch_one(&mut *secrets)
.await?.key;
let mut bytes = tentative;
bytes.clone_from_slice(actual.get(0..32).ok_or_else(|| {
Error::new(
eyre!("Invalid key size returned from DB"),
crate::ErrorKind::Database,
)
})?);
bytes
} else {
let actual = sqlx::query!("SELECT network_key FROM account WHERE id = 0")
.fetch_one(&mut *secrets)
.await?
.network_key;
let mut bytes = tentative;
bytes.clone_from_slice(actual.get(0..32).ok_or_else(|| {
Error::new(
eyre!("Invalid key size returned from DB"),
crate::ErrorKind::Database,
)
})?);
bytes
};
let mut res = Self::from_bytes(interface, actual);
if let Some(tor_key) = compat(secrets, &res.interface).await? {
res.tor_key = tor_key.to_bytes();
}
Ok(res)
}
}
impl Drop for Key {
fn drop(&mut self) {
self.base.zeroize();
self.tor_key.zeroize();
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct KeyInfo {
key: Key,
certs: CertPair,
int: X509,
root: X509,
}
impl KeyInfo {
pub fn key(&self) -> &Key {
&self.key
}
pub fn certs(&self) -> &CertPair {
&self.certs
}
pub fn int_ca(&self) -> &X509 {
&self.int
}
pub fn root_ca(&self) -> &X509 {
&self.root
}
pub fn fullchain_ed25519(&self) -> Vec<&X509> {
vec![&self.certs.ed25519, &self.int, &self.root]
}
pub fn fullchain_nistp256(&self) -> Vec<&X509> {
vec![&self.certs.nistp256, &self.int, &self.root]
}
}
#[test]
pub fn test_keygen() {
let key = Key::new(None);
key.tor_key();
key.openssl_key_nistp256();
}

View File

@@ -1,13 +1,11 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::net::Ipv4Addr; use std::net::Ipv4Addr;
use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use tokio::process::{Child, Command}; use tokio::process::{Child, Command};
use tokio::sync::Mutex; use tokio::sync::Mutex;
use torut::onion::TorSecretKeyV3;
use super::interface::InterfaceId;
use crate::s9pk::manifest::PackageId;
use crate::util::Invoke; use crate::util::Invoke;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
@@ -39,25 +37,17 @@ impl MdnsController {
MdnsControllerInner::init().await?, MdnsControllerInner::init().await?,
))) )))
} }
pub async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>( pub async fn add(&self, alias: String) -> Result<Arc<()>, Error> {
&self, self.0.lock().await.add(alias).await
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
self.0.lock().await.add(pkg_id, interfaces).await
} }
pub async fn remove<I: IntoIterator<Item = InterfaceId>>( pub async fn gc(&self, alias: String) -> Result<(), Error> {
&self, self.0.lock().await.gc(alias).await
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
self.0.lock().await.remove(pkg_id, interfaces).await
} }
} }
pub struct MdnsControllerInner { pub struct MdnsControllerInner {
alias_cmd: Option<Child>, alias_cmd: Option<Child>,
services: BTreeMap<(PackageId, InterfaceId), TorSecretKeyV3>, services: BTreeMap<String, Weak<()>>,
} }
impl MdnsControllerInner { impl MdnsControllerInner {
@@ -76,35 +66,30 @@ impl MdnsControllerInner {
self.alias_cmd = Some( self.alias_cmd = Some(
Command::new("avahi-alias") Command::new("avahi-alias")
.kill_on_drop(true) .kill_on_drop(true)
.args(self.services.iter().map(|(_, key)| { .args(
key.public() self.services
.get_onion_address() .iter()
.get_address_without_dot_onion() .filter(|(_, rc)| rc.strong_count() > 0)
})) .map(|(s, _)| s),
)
.spawn()?, .spawn()?,
); );
Ok(()) Ok(())
} }
async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>( async fn add(&mut self, alias: String) -> Result<Arc<()>, Error> {
&mut self, let rc = if let Some(rc) = Weak::upgrade(&self.services.remove(&alias).unwrap_or_default())
pkg_id: &PackageId, {
interfaces: I, rc
) -> Result<(), Error> { } else {
self.services.extend( Arc::new(())
interfaces };
.into_iter() self.services.insert(alias, Arc::downgrade(&rc));
.map(|(interface_id, key)| ((pkg_id.clone(), interface_id), key)),
);
self.sync().await?; self.sync().await?;
Ok(()) Ok(rc)
} }
async fn remove<I: IntoIterator<Item = InterfaceId>>( async fn gc(&mut self, alias: String) -> Result<(), Error> {
&mut self, if let Some(rc) = Weak::upgrade(&self.services.remove(&alias).unwrap_or_default()) {
pkg_id: &PackageId, self.services.insert(alias, Arc::downgrade(&rc));
interfaces: I,
) -> Result<(), Error> {
for interface_id in interfaces {
self.services.remove(&(pkg_id.clone(), interface_id));
} }
self.sync().await?; self.sync().await?;
Ok(()) Ok(())

View File

@@ -1,54 +1,33 @@
use std::collections::BTreeMap;
use std::sync::Arc; use std::sync::Arc;
use futures::future::BoxFuture; use futures::future::BoxFuture;
use hyper::{Body, Error as HyperError, Request, Response}; use hyper::{Body, Error as HyperError, Request, Response};
use indexmap::IndexSet;
use rpc_toolkit::command; use rpc_toolkit::command;
use self::interface::InterfaceId;
use crate::net::interface::LanPortConfig;
use crate::util::serde::Port;
use crate::Error; use crate::Error;
pub mod cert_resolver;
pub mod dhcp; pub mod dhcp;
pub mod dns; pub mod dns;
pub mod embassy_service_http_server;
pub mod interface; pub mod interface;
pub mod keys;
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
pub mod mdns; pub mod mdns;
pub mod net_controller; pub mod net_controller;
pub mod net_utils; pub mod net_utils;
pub mod proxy_controller;
pub mod ssl; pub mod ssl;
pub mod static_server; pub mod static_server;
pub mod tor; pub mod tor;
pub mod vhost_controller; pub mod vhost_controller;
pub mod web_server;
pub mod wifi; pub mod wifi;
const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl"; pub const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
#[command(subcommands(tor::tor, dhcp::dhcp))] #[command(subcommands(tor::tor, dhcp::dhcp))]
pub fn net() -> Result<(), Error> { pub fn net() -> Result<(), Error> {
Ok(()) Ok(())
} }
#[derive(Default)]
struct PackageNetInfo {
interfaces: BTreeMap<InterfaceId, InterfaceMetadata>,
}
pub struct InterfaceMetadata {
pub fqdn: String,
pub lan_config: BTreeMap<Port, LanPortConfig>,
pub protocols: IndexSet<String>,
}
/// Indicates that the net controller has created the
/// SSL keys
#[derive(Clone, Copy)]
pub struct GeneratedCertificateMountPoint(());
pub type HttpHandler = Arc< pub type HttpHandler = Arc<
dyn Fn(Request<Body>) -> BoxFuture<'static, Result<Response<Body>, HyperError>> + Send + Sync, dyn Fn(Request<Body>) -> BoxFuture<'static, Result<Response<Body>, HyperError>> + Send + Sync,
>; >;

View File

@@ -1,278 +1,346 @@
use std::net::{Ipv4Addr, SocketAddr}; use std::collections::BTreeMap;
use std::path::PathBuf; use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::str::FromStr; use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre;
use models::InterfaceId; use models::InterfaceId;
use openssl::pkey::{PKey, Private}; use sqlx::PgExecutor;
use openssl::x509::X509;
use patch_db::DbHandle;
use sqlx::PgPool;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext; use crate::error::ErrorCollection;
use crate::hostname::{get_embassyd_tor_addr, get_hostname, HostNameReceipt}; use crate::hostname::Hostname;
use crate::net::dns::DnsController; use crate::net::dns::DnsController;
use crate::net::interface::{Interface, TorConfig}; use crate::net::keys::Key;
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
use crate::net::mdns::MdnsController; use crate::net::mdns::MdnsController;
use crate::net::net_utils::ResourceFqdn; use crate::net::ssl::{export_cert, SslManager};
use crate::net::proxy_controller::ProxyController;
use crate::net::ssl::SslManager;
use crate::net::tor::TorController; use crate::net::tor::TorController;
use crate::net::{ use crate::net::vhost_controller::VHostController;
GeneratedCertificateMountPoint, HttpHandler, InterfaceMetadata, PACKAGE_CERT_PATH,
};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::Error; use crate::volume::cert_dir;
use crate::{Error, HOST_IP};
pub struct NetController { pub struct NetController {
pub tor: TorController, pub(super) tor: TorController,
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
pub mdns: MdnsController, pub(super) mdns: MdnsController,
pub proxy: ProxyController, pub(super) vhost: VHostController,
pub ssl: SslManager, pub(super) dns: DnsController,
pub dns: DnsController, pub(super) ssl: Arc<SslManager>,
pub(super) os_bindings: Vec<Arc<()>>,
} }
impl NetController { impl NetController {
#[instrument(skip(db, db_handle))] #[instrument]
pub async fn init<Db: DbHandle>( pub async fn init(
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
tor_control: SocketAddr, tor_control: SocketAddr,
dns_bind: &[SocketAddr], dns_bind: &[SocketAddr],
db: PgPool, ssl: SslManager,
db_handle: &mut Db, hostname: &Hostname,
import_root_ca: Option<(PKey<Private>, X509)>, os_key: &Key,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let receipts = HostNameReceipt::new(db_handle).await?; let ssl = Arc::new(ssl);
let embassy_host_name = get_hostname(db_handle, &receipts).await?; let mut res = Self {
let embassy_name = embassy_host_name.local_domain_name(); tor: TorController::init(tor_control).await?,
let fqdn_name = ResourceFqdn::from_str(&embassy_name)?;
let ssl = match import_root_ca {
None => SslManager::init(db.clone(), db_handle).await,
Some(a) => SslManager::import_root_ca(db.clone(), a.0, a.1).await,
}?;
Ok(Self {
tor: TorController::init(embassyd_addr, embassyd_tor_key, tor_control).await?,
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
mdns: MdnsController::init().await?, mdns: MdnsController::init().await?,
proxy: ProxyController::init(embassyd_addr, fqdn_name, ssl.clone()).await?, vhost: VHostController::new(ssl.clone()),
ssl,
dns: DnsController::init(dns_bind).await?, dns: DnsController::init(dns_bind).await?,
}) ssl,
} os_bindings: Vec::new(),
pub async fn setup_embassy_ui(rpc_ctx: RpcContext) -> Result<(), Error> {
NetController::setup_embassy_http_ui_handle(rpc_ctx.clone()).await?;
NetController::setup_embassy_https_ui_handle(rpc_ctx.clone()).await?;
Ok(())
}
async fn setup_embassy_https_ui_handle(rpc_ctx: RpcContext) -> Result<(), Error> {
let host_name = rpc_ctx.net_controller.proxy.get_hostname().await;
let host_name_fqdn: ResourceFqdn = host_name.parse()?;
let handler: HttpHandler =
crate::net::static_server::main_ui_server_router(rpc_ctx.clone()).await?;
let eos_pkg_id: PackageId = "embassy".parse().unwrap();
if let ResourceFqdn::Uri {
full_uri: _,
root,
tld: _,
} = host_name_fqdn.clone()
{
let root_cert = rpc_ctx
.net_controller
.ssl
.certificate_for(&root, &eos_pkg_id)
.await?;
rpc_ctx
.net_controller
.proxy
.add_certificate_to_resolver(host_name_fqdn.clone(), root_cert.clone())
.await?;
rpc_ctx
.net_controller
.proxy
.add_handle(443, host_name_fqdn.clone(), handler.clone(), true)
.await?;
}; };
res.add_os_bindings(hostname, os_key).await?;
// serving ip https is not yet supported Ok(res)
Ok(())
} }
async fn setup_embassy_http_ui_handle(rpc_ctx: RpcContext) -> Result<(), Error> { async fn add_os_bindings(&mut self, hostname: &Hostname, key: &Key) -> Result<(), Error> {
let host_name = rpc_ctx.net_controller.proxy.get_hostname().await; // Internal DNS
self.vhost
let embassy_tor_addr = get_embassyd_tor_addr(rpc_ctx.clone()).await?; .add(
let embassy_tor_fqdn: ResourceFqdn = embassy_tor_addr.parse()?; key.clone(),
let host_name_fqdn: ResourceFqdn = host_name.parse()?; Some("embassy".into()),
let ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr; 443,
([127, 0, 0, 1], 80).into(),
let localhost_fqdn = ResourceFqdn::LocalHost; false,
let handler: HttpHandler =
crate::net::static_server::main_ui_server_router(rpc_ctx.clone()).await?;
rpc_ctx
.net_controller
.proxy
.add_handle(80, embassy_tor_fqdn.clone(), handler.clone(), false)
.await?;
rpc_ctx
.net_controller
.proxy
.add_handle(80, host_name_fqdn.clone(), handler.clone(), false)
.await?;
rpc_ctx
.net_controller
.proxy
.add_handle(80, ip_fqdn.clone(), handler.clone(), false)
.await?;
rpc_ctx
.net_controller
.proxy
.add_handle(80, localhost_fqdn.clone(), handler.clone(), false)
.await?;
Ok(())
}
pub fn ssl_directory_for(pkg_id: &PackageId) -> PathBuf {
PathBuf::from(PACKAGE_CERT_PATH).join(pkg_id)
}
#[instrument(skip(self, interfaces, _generated_certificate))]
pub async fn add<'a, I>(
&self,
pkg_id: &PackageId,
ip: Ipv4Addr,
interfaces: I,
_generated_certificate: GeneratedCertificateMountPoint,
) -> Result<(), Error>
where
I: IntoIterator<Item = (InterfaceId, &'a Interface, TorSecretKeyV3)> + Clone,
for<'b> &'b I: IntoIterator<Item = &'b (InterfaceId, &'a Interface, TorSecretKeyV3)>,
{
let interfaces_tor = interfaces
.clone()
.into_iter()
.filter_map(|i| match i.1.tor_config.clone() {
None => None,
Some(cfg) => Some((i.0, cfg, i.2)),
})
.collect::<Vec<(InterfaceId, TorConfig, TorSecretKeyV3)>>();
let (tor_res, _, proxy_res, _) = tokio::join!(
self.tor.add(pkg_id, ip, interfaces_tor),
{
#[cfg(feature = "avahi")]
let mdns_fut = self.mdns.add(
pkg_id,
interfaces
.clone()
.into_iter()
.map(|(interface_id, _, key)| (interface_id, key)),
);
#[cfg(not(feature = "avahi"))]
let mdns_fut = futures::future::ready(());
mdns_fut
},
{
let interfaces =
interfaces
.clone()
.into_iter()
.filter_map(|(id, interface, tor_key)| {
interface.lan_config.as_ref().map(|cfg| {
(
id,
InterfaceMetadata {
fqdn: OnionAddressV3::from(&tor_key.public())
.get_address_without_dot_onion()
+ ".local",
lan_config: cfg.clone(),
protocols: interface.protocols.clone(),
},
) )
}) .await?;
}); self.os_bindings
self.proxy .push(self.dns.add(None, HOST_IP.into()).await?);
.add_docker_service(pkg_id.clone(), ip, interfaces)
}, // LAN IP
self.dns.add(pkg_id, ip), self.os_bindings.push(
self.vhost
.add(key.clone(), None, 443, ([127, 0, 0, 1], 80).into(), false)
.await?,
);
// localhost
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some("localhost".into()),
443,
([127, 0, 0, 1], 80).into(),
false,
)
.await?,
);
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some(hostname.no_dot_host_name()),
443,
([127, 0, 0, 1], 80).into(),
false,
)
.await?,
);
// LAN mDNS
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some(hostname.local_domain_name()),
443,
([127, 0, 0, 1], 80).into(),
false,
)
.await?,
);
// Tor (http)
self.os_bindings.push(
self.tor
.add(&key.tor_key(), 80, ([127, 0, 0, 1], 80).into())
.await?,
);
// Tor (https)
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some(key.tor_address().to_string()),
443,
([127, 0, 0, 1], 80).into(),
false,
)
.await?,
);
self.os_bindings.push(
self.tor
.add(&key.tor_key(), 443, ([127, 0, 0, 1], 443).into())
.await?,
); );
tor_res?;
proxy_res?;
Ok(()) Ok(())
} }
#[instrument(skip(self, interfaces))] #[instrument(skip(self))]
pub async fn remove<I: IntoIterator<Item = InterfaceId> + Clone>( pub async fn create_service(
&self, self: &Arc<Self>,
pkg_id: &PackageId, package: PackageId,
ip: Ipv4Addr, ip: Ipv4Addr,
interfaces: I, ) -> Result<NetService, Error> {
) -> Result<(), Error> { let dns = self.dns.add(Some(package.clone()), ip).await?;
let (tor_res, _, proxy_res, _) = tokio::join!(
self.tor.remove(pkg_id, interfaces.clone()), Ok(NetService {
{ id: package,
#[cfg(feature = "avahi")] ip,
let mdns_fut = self.mdns.remove(pkg_id, interfaces); dns,
#[cfg(not(feature = "avahi"))] controller: Arc::downgrade(self),
let mdns_fut = futures::future::ready(()); tor: BTreeMap::new(),
mdns_fut lan: BTreeMap::new(),
}, })
self.proxy.remove_docker_service(pkg_id),
self.dns.remove(pkg_id, ip),
);
tor_res?;
proxy_res?;
Ok(())
} }
pub async fn generate_certificate_mountpoint<'a, I>( async fn add_tor(
&self, &self,
pkg_id: &PackageId, key: &Key,
interfaces: &I, external: u16,
) -> Result<GeneratedCertificateMountPoint, Error> target: SocketAddr,
where ) -> Result<Vec<Arc<()>>, Error> {
I: IntoIterator<Item = (InterfaceId, &'a Interface, TorSecretKeyV3)> + Clone, let mut rcs = Vec::with_capacity(1);
for<'b> &'b I: IntoIterator<Item = &'b (InterfaceId, &'a Interface, TorSecretKeyV3)>, rcs.push(self.tor.add(&key.tor_key(), external, target).await?);
{ Ok(rcs)
tracing::info!("Generating SSL Certificate mountpoints for {}", pkg_id);
let package_path = PathBuf::from(PACKAGE_CERT_PATH).join(pkg_id);
tokio::fs::create_dir_all(&package_path).await?;
for (id, _, key) in interfaces {
let dns_base = OnionAddressV3::from(&key.public()).get_address_without_dot_onion();
let ssl_path_key = package_path.join(format!("{}.key.pem", id));
let ssl_path_cert = package_path.join(format!("{}.cert.pem", id));
let (key, chain) = self.ssl.certificate_for(&dns_base, pkg_id).await?;
tokio::try_join!(
crate::net::ssl::export_key(&key, &ssl_path_key),
crate::net::ssl::export_cert(&chain, &ssl_path_cert)
)?;
}
Ok(GeneratedCertificateMountPoint(()))
} }
pub async fn export_root_ca(&self) -> Result<(PKey<Private>, X509), Error> { async fn remove_tor(&self, key: &Key, external: u16, rcs: Vec<Arc<()>>) -> Result<(), Error> {
self.ssl.export_root_ca().await drop(rcs);
self.tor.gc(&key.tor_key(), external).await
}
async fn add_lan(
&self,
key: Key,
external: u16,
target: SocketAddr,
connect_ssl: bool,
) -> Result<Vec<Arc<()>>, Error> {
let mut rcs = Vec::with_capacity(2);
rcs.push(
self.vhost
.add(
key.clone(),
Some(key.local_address()),
external,
target.into(),
connect_ssl,
)
.await?,
);
#[cfg(feature = "avahi")]
rcs.push(self.mdns.add(key.base_address()).await?);
Ok(rcs)
}
async fn remove_lan(&self, key: &Key, external: u16, rcs: Vec<Arc<()>>) -> Result<(), Error> {
drop(rcs);
#[cfg(feature = "avahi")]
self.mdns.gc(key.base_address()).await?;
self.vhost.gc(Some(key.local_address()), external).await
}
}
pub struct NetService {
id: PackageId,
ip: Ipv4Addr,
dns: Arc<()>,
controller: Weak<NetController>,
tor: BTreeMap<(InterfaceId, u16), (Key, Vec<Arc<()>>)>,
lan: BTreeMap<(InterfaceId, u16), (Key, Vec<Arc<()>>)>,
}
impl NetService {
fn net_controller(&self) -> Result<Arc<NetController>, Error> {
Weak::upgrade(&self.controller).ok_or_else(|| {
Error::new(
eyre!("NetController is shutdown"),
crate::ErrorKind::Network,
)
})
}
pub async fn add_tor<Ex>(
&mut self,
secrets: &mut Ex,
id: InterfaceId,
external: u16,
internal: u16,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_interface(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let tor_idx = (id, external);
let mut tor = self
.tor
.remove(&tor_idx)
.unwrap_or_else(|| (key.clone(), Vec::new()));
tor.1.append(
&mut ctrl
.add_tor(&key, external, SocketAddr::new(self.ip.into(), internal))
.await?,
);
self.tor.insert(tor_idx, tor);
Ok(())
}
pub async fn remove_tor(&mut self, id: InterfaceId, external: u16) -> Result<(), Error> {
let ctrl = self.net_controller()?;
if let Some((key, rcs)) = self.tor.remove(&(id, external)) {
ctrl.remove_tor(&key, external, rcs).await?;
}
Ok(())
}
pub async fn add_lan<Ex>(
&mut self,
secrets: &mut Ex,
id: InterfaceId,
external: u16,
internal: u16,
connect_ssl: bool,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_interface(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let lan_idx = (id, external);
let mut lan = self
.lan
.remove(&lan_idx)
.unwrap_or_else(|| (key.clone(), Vec::new()));
lan.1.append(
&mut ctrl
.add_lan(
key,
external,
SocketAddr::new(self.ip.into(), internal),
connect_ssl,
)
.await?,
);
self.lan.insert(lan_idx, lan);
Ok(())
}
pub async fn remove_lan(&mut self, id: InterfaceId, external: u16) -> Result<(), Error> {
let ctrl = self.net_controller()?;
if let Some((key, rcs)) = self.lan.remove(&(id, external)) {
ctrl.remove_lan(&key, external, rcs).await?;
}
Ok(())
}
pub async fn export_cert<Ex>(
&self,
secrets: &mut Ex,
id: &InterfaceId,
ip: IpAddr,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_interface(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let cert = ctrl.ssl.with_certs(key, ip).await?;
export_cert(&cert.fullchain_nistp256(), &cert_dir(&self.id, id)).await?; // TODO: can upgrade to ed25519?
Ok(())
}
pub async fn remove_all(mut self) -> Result<(), Error> {
let mut errors = ErrorCollection::new();
if let Some(ctrl) = Weak::upgrade(&self.controller) {
for ((_, external), (key, rcs)) in std::mem::take(&mut self.lan) {
errors.handle(ctrl.remove_lan(&key, external, rcs).await);
}
for ((_, external), (key, rcs)) in std::mem::take(&mut self.tor) {
errors.handle(ctrl.remove_tor(&key, external, rcs).await);
}
std::mem::take(&mut self.dns);
errors.handle(ctrl.dns.gc(Some(self.id.clone()), self.ip).await);
errors.into_result()
} else {
Err(Error::new(
eyre!("NetController is shutdown"),
crate::ErrorKind::Network,
))
}
}
}
impl Drop for NetService {
fn drop(&mut self) {
let svc = std::mem::replace(
self,
NetService {
id: Default::default(),
ip: [0, 0, 0, 0].into(),
dns: Default::default(),
controller: Default::default(),
tor: Default::default(),
lan: Default::default(),
},
);
tokio::spawn(async move { svc.remove_all().await.unwrap() });
} }
} }

View File

@@ -1,14 +1,12 @@
use std::fmt; use std::convert::Infallible;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr}; use std::net::{Ipv4Addr, Ipv6Addr};
use std::path::Path; use std::path::Path;
use std::str::FromStr;
use async_stream::try_stream; use async_stream::try_stream;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use futures::stream::BoxStream; use futures::stream::BoxStream;
use futures::{StreamExt, TryStreamExt}; use futures::{StreamExt, TryStreamExt};
use http::{Request, Uri}; use ipnet::{Ipv4Net, Ipv6Net};
use hyper::Body;
use tokio::process::Command; use tokio::process::Command;
use crate::util::Invoke; use crate::util::Invoke;
@@ -19,11 +17,7 @@ fn parse_iface_ip(output: &str) -> Result<Option<&str>, Error> {
if output.is_empty() { if output.is_empty() {
return Ok(None); return Ok(None);
} }
if let Some(ip) = output if let Some(ip) = output.split_ascii_whitespace().nth(3) {
.split_ascii_whitespace()
.nth(3)
.and_then(|range| range.split("/").next())
{
Ok(Some(ip)) Ok(Some(ip))
} else { } else {
Err(Error::new( Err(Error::new(
@@ -33,7 +27,7 @@ fn parse_iface_ip(output: &str) -> Result<Option<&str>, Error> {
} }
} }
pub async fn get_iface_ipv4_addr(iface: &str) -> Result<Option<Ipv4Addr>, Error> { pub async fn get_iface_ipv4_addr(iface: &str) -> Result<Option<(Ipv4Addr, Ipv4Net)>, Error> {
Ok(parse_iface_ip(&String::from_utf8( Ok(parse_iface_ip(&String::from_utf8(
Command::new("ip") Command::new("ip")
.arg("-4") .arg("-4")
@@ -44,11 +38,11 @@ pub async fn get_iface_ipv4_addr(iface: &str) -> Result<Option<Ipv4Addr>, Error>
.invoke(crate::ErrorKind::Network) .invoke(crate::ErrorKind::Network)
.await?, .await?,
)?)? )?)?
.map(|s| s.parse()) .map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?)))
.transpose()?) .transpose()?)
} }
pub async fn get_iface_ipv6_addr(iface: &str) -> Result<Option<Ipv6Addr>, Error> { pub async fn get_iface_ipv6_addr(iface: &str) -> Result<Option<(Ipv6Addr, Ipv6Net)>, Error> {
Ok(parse_iface_ip(&String::from_utf8( Ok(parse_iface_ip(&String::from_utf8(
Command::new("ip") Command::new("ip")
.arg("-6") .arg("-6")
@@ -59,7 +53,7 @@ pub async fn get_iface_ipv6_addr(iface: &str) -> Result<Option<Ipv6Addr>, Error>
.invoke(crate::ErrorKind::Network) .invoke(crate::ErrorKind::Network)
.await?, .await?,
)?)? )?)?
.map(|s| s.parse()) .map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?)))
.transpose()?) .transpose()?)
} }
@@ -110,132 +104,20 @@ pub async fn find_eth_iface() -> Result<String, Error> {
)) ))
} }
pub fn host_addr_fqdn(req: &Request<Body>) -> Result<ResourceFqdn, Error> { #[pin_project::pin_project]
let host = req.headers().get(http::header::HOST); pub struct SingleAccept<T>(Option<T>);
impl<T> SingleAccept<T> {
match host { pub fn new(conn: T) -> Self {
Some(host) => { Self(Some(conn))
let host_str = host
.to_str()
.map_err(|e| Error::new(eyre!("{}", e), crate::ErrorKind::Ascii))?
.to_string();
let host_uri: ResourceFqdn = host_str.split(':').next().unwrap().parse()?;
Ok(host_uri)
}
None => Err(Error::new(
eyre!("No Host header"),
crate::ErrorKind::MissingHeader,
)),
} }
} }
impl<T> hyper::server::accept::Accept for SingleAccept<T> {
#[derive(Eq, PartialEq, PartialOrd, Ord, Debug, Clone)] type Conn = T;
pub enum ResourceFqdn { type Error = Infallible;
IpAddr, fn poll_accept(
Uri { self: std::pin::Pin<&mut Self>,
full_uri: String, _cx: &mut std::task::Context<'_>,
root: String, ) -> std::task::Poll<Option<Result<Self::Conn, Self::Error>>> {
tld: Tld, std::task::Poll::Ready(self.project().0.take().map(Ok))
},
LocalHost,
}
impl fmt::Display for ResourceFqdn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ResourceFqdn::Uri {
full_uri,
root: _,
tld: _,
} => {
write!(f, "{}", full_uri)
}
ResourceFqdn::LocalHost => write!(f, "localhost"),
ResourceFqdn::IpAddr => write!(f, "ip-address"),
}
} }
} }
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
pub enum Tld {
Local,
Onion,
Embassy,
}
impl fmt::Display for Tld {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Tld::Local => write!(f, ".local"),
Tld::Onion => write!(f, ".onion"),
Tld::Embassy => write!(f, ".embassy"),
}
}
}
impl FromStr for ResourceFqdn {
type Err = Error;
fn from_str(input: &str) -> Result<ResourceFqdn, Self::Err> {
if input.parse::<IpAddr>().is_ok() {
return Ok(ResourceFqdn::IpAddr);
}
if input == "localhost" {
return Ok(ResourceFqdn::LocalHost);
}
let hostname_split: Vec<&str> = input.split('.').collect();
if hostname_split.len() != 2 {
return Err(Error::new(
eyre!("invalid url tld number: add support for tldextract to parse complex urls like blah.domain.co.uk and etc?"),
crate::ErrorKind::ParseUrl,
));
}
match hostname_split[1] {
"local" => Ok(ResourceFqdn::Uri {
full_uri: input.to_owned(),
root: hostname_split[0].to_owned(),
tld: Tld::Local,
}),
"embassy" => Ok(ResourceFqdn::Uri {
full_uri: input.to_owned(),
root: hostname_split[0].to_owned(),
tld: Tld::Embassy,
}),
"onion" => Ok(ResourceFqdn::Uri {
full_uri: input.to_owned(),
root: hostname_split[0].to_owned(),
tld: Tld::Onion,
}),
_ => Err(Error::new(
eyre!("Unknown TLD for enum"),
crate::ErrorKind::ParseUrl,
)),
}
}
}
impl TryFrom<Uri> for ResourceFqdn {
type Error = Error;
fn try_from(value: Uri) -> Result<Self, Self::Error> {
Self::from_str(&value.to_string())
}
}
pub fn is_upgrade_req(req: &Request<Body>) -> bool {
req.headers()
.get("connection")
.and_then(|c| c.to_str().ok())
.map(|c| {
c.split(",")
.any(|c| c.trim().eq_ignore_ascii_case("upgrade"))
})
.unwrap_or(false)
}

View File

@@ -1,334 +0,0 @@
use std::collections::BTreeMap;
use std::net::{Ipv4Addr, SocketAddr};
use std::str::FromStr;
use std::sync::Arc;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use http::uri::{Authority, Scheme};
use http::{Request, Response, Uri};
use hyper::{Body, Error as HyperError};
use models::{InterfaceId, PackageId};
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use tokio::sync::Mutex;
use tracing::{error, instrument};
use crate::net::net_utils::{is_upgrade_req, ResourceFqdn};
use crate::net::ssl::SslManager;
use crate::net::vhost_controller::VHOSTController;
use crate::net::{HttpHandler, InterfaceMetadata, PackageNetInfo};
use crate::{Error, ResultExt};
pub struct ProxyController {
inner: Mutex<ProxyControllerInner>,
}
impl ProxyController {
pub async fn init(
embassyd_socket_addr: SocketAddr,
embassy_fqdn: ResourceFqdn,
ssl_manager: SslManager,
) -> Result<Self, Error> {
Ok(ProxyController {
inner: Mutex::new(
ProxyControllerInner::init(embassyd_socket_addr, embassy_fqdn, ssl_manager).await?,
),
})
}
pub async fn add_docker_service<I: IntoIterator<Item = (InterfaceId, InterfaceMetadata)>>(
&self,
package: PackageId,
ipv4: Ipv4Addr,
interfaces: I,
) -> Result<(), Error> {
self.inner
.lock()
.await
.add_docker_service(package, ipv4, interfaces)
.await
}
pub async fn remove_docker_service(&self, package: &PackageId) -> Result<(), Error> {
self.inner.lock().await.remove_docker_service(package).await
}
pub async fn add_certificate_to_resolver(
&self,
fqdn: ResourceFqdn,
cert_data: (PKey<Private>, Vec<X509>),
) -> Result<(), Error> {
self.inner
.lock()
.await
.add_certificate_to_resolver(fqdn, cert_data)
.await
}
pub async fn add_handle(
&self,
ext_port: u16,
fqdn: ResourceFqdn,
handler: HttpHandler,
is_ssl: bool,
) -> Result<(), Error> {
self.inner
.lock()
.await
.add_handle(ext_port, fqdn, handler, is_ssl)
.await
}
pub async fn get_hostname(&self) -> String {
self.inner.lock().await.get_embassy_hostname()
}
async fn proxy(
client: &hyper::Client<hyper::client::HttpConnector>,
mut req: Request<Body>,
addr: SocketAddr,
) -> Result<Response<Body>, HyperError> {
let mut uri = std::mem::take(req.uri_mut()).into_parts();
uri.scheme = Some(Scheme::HTTP);
uri.authority = Authority::from_str(&addr.to_string()).ok();
match Uri::from_parts(uri) {
Ok(uri) => *req.uri_mut() = uri,
Err(e) => error!("Error rewriting uri: {}", e),
}
let addr = req.uri().to_string();
if is_upgrade_req(&req) {
let upgraded_req = hyper::upgrade::on(&mut req);
let mut res = client.request(req).await?;
let upgraded_res = hyper::upgrade::on(&mut res);
tokio::spawn(async move {
if let Err(e) = async {
let mut req = upgraded_req.await?;
let mut res = upgraded_res.await?;
tokio::io::copy_bidirectional(&mut req, &mut res).await?;
Ok::<_, color_eyre::eyre::Report>(())
}
.await
{
error!("error binding together tcp streams for {}: {}", addr, e);
}
});
Ok(res)
} else {
client.request(req).await
}
}
}
struct ProxyControllerInner {
ssl_manager: SslManager,
vhosts: VHOSTController,
embassyd_fqdn: ResourceFqdn,
docker_interfaces: BTreeMap<PackageId, PackageNetInfo>,
docker_iface_lookups: BTreeMap<(PackageId, InterfaceId), ResourceFqdn>,
}
impl ProxyControllerInner {
#[instrument]
async fn init(
embassyd_socket_addr: SocketAddr,
embassyd_fqdn: ResourceFqdn,
ssl_manager: SslManager,
) -> Result<Self, Error> {
let inner = ProxyControllerInner {
vhosts: VHOSTController::init(embassyd_socket_addr),
ssl_manager,
embassyd_fqdn,
docker_interfaces: BTreeMap::new(),
docker_iface_lookups: BTreeMap::new(),
};
Ok(inner)
}
async fn add_certificate_to_resolver(
&mut self,
hostname: ResourceFqdn,
cert_data: (PKey<Private>, Vec<X509>),
) -> Result<(), Error> {
self.vhosts
.cert_resolver
.add_certificate_to_resolver(hostname, cert_data)
.await
.map_err(|err| {
Error::new(
eyre!("Unable to add ssl cert to the resolver: {}", err),
crate::ErrorKind::Network,
)
})?;
Ok(())
}
async fn add_package_certificate_to_resolver(
&mut self,
resource_fqdn: ResourceFqdn,
pkg_id: PackageId,
) -> Result<(), Error> {
let package_cert = match resource_fqdn.clone() {
ResourceFqdn::IpAddr => {
return Err(Error::new(
eyre!("ssl not supported for ip addresses"),
crate::ErrorKind::Network,
))
}
ResourceFqdn::Uri {
full_uri: _,
root,
tld: _,
} => self.ssl_manager.certificate_for(&root, &pkg_id).await?,
ResourceFqdn::LocalHost => {
return Err(Error::new(
eyre!("ssl not supported for localhost"),
crate::ErrorKind::Network,
))
}
};
self.vhosts
.cert_resolver
.add_certificate_to_resolver(resource_fqdn, package_cert)
.await
.map_err(|err| {
Error::new(
eyre!("Unable to add ssl cert to the resolver: {}", err),
crate::ErrorKind::Network,
)
})?;
Ok(())
}
pub async fn add_handle(
&mut self,
external_svc_port: u16,
fqdn: ResourceFqdn,
svc_handler: HttpHandler,
is_ssl: bool,
) -> Result<(), Error> {
self.vhosts
.add_server_or_handle(external_svc_port, fqdn, svc_handler, is_ssl)
.await
}
#[instrument(skip(self, interfaces))]
pub async fn add_docker_service<I: IntoIterator<Item = (InterfaceId, InterfaceMetadata)>>(
&mut self,
package: PackageId,
docker_ipv4: Ipv4Addr,
interfaces: I,
) -> Result<(), Error> {
let mut interface_map = interfaces
.into_iter()
.filter(|(_, meta)| {
// don't add stuff for anything we can't connect to over some flavor of http
(meta.protocols.contains("http") || meta.protocols.contains("https"))
// also don't add anything unless it has at least one exposed port
&& !meta.lan_config.is_empty()
})
.collect::<BTreeMap<InterfaceId, InterfaceMetadata>>();
for (id, meta) in interface_map.iter() {
for (external_svc_port, lan_port_config) in meta.lan_config.iter() {
let full_fqdn = ResourceFqdn::from_str(&meta.fqdn).unwrap();
self.docker_iface_lookups
.insert((package.clone(), id.clone()), full_fqdn.clone());
self.add_package_certificate_to_resolver(full_fqdn.clone(), package.clone())
.await?;
let svc_handler =
Self::create_docker_handle((docker_ipv4, lan_port_config.internal).into())
.await;
self.add_handle(
external_svc_port.0,
full_fqdn.clone(),
svc_handler,
lan_port_config.ssl,
)
.await?;
}
}
let docker_interface = self.docker_interfaces.entry(package.clone()).or_default();
docker_interface.interfaces.append(&mut interface_map);
Ok(())
}
async fn create_docker_handle(internal_addr: SocketAddr) -> HttpHandler {
let svc_handler: HttpHandler = Arc::new(move |req| {
let client = hyper::client::Client::builder()
.set_host(false)
.build_http();
async move { ProxyController::proxy(&client, req, internal_addr).await }.boxed()
});
svc_handler
}
#[instrument(skip(self))]
pub async fn remove_docker_service(&mut self, package: &PackageId) -> Result<(), Error> {
let mut server_removals: Vec<(u16, InterfaceId)> = Default::default();
let net_info = match self.docker_interfaces.get(package) {
Some(a) => a,
None => return Ok(()),
};
for (id, meta) in &net_info.interfaces {
for (service_ext_port, _lan_port_config) in meta.lan_config.iter() {
if let Some(server) = self.vhosts.service_servers.get_mut(&service_ext_port.0) {
if let Some(fqdn) = self
.docker_iface_lookups
.get(&(package.clone(), id.clone()))
{
server.remove_svc_handler_mapping(fqdn.to_owned()).await?;
self.vhosts
.cert_resolver
.remove_cert(fqdn.to_owned())
.await?;
let mapping = server.svc_mapping.read().await;
if mapping.is_empty() {
server_removals.push((service_ext_port.0, id.to_owned()));
}
}
}
}
}
for (port, interface_id) in server_removals {
if let Some(removed_server) = self.vhosts.service_servers.remove(&port) {
removed_server.shutdown.send(()).map_err(|_| {
Error::new(
eyre!("Hyper server did not quit properly"),
crate::ErrorKind::Unknown,
)
})?;
removed_server
.handle
.await
.with_kind(crate::ErrorKind::Unknown)?;
self.docker_interfaces.remove(&package.clone());
self.docker_iface_lookups
.remove(&(package.clone(), interface_id));
}
}
Ok(())
}
pub fn get_embassy_hostname(&self) -> String {
self.embassyd_fqdn.to_string()
}
}

View File

@@ -1,7 +1,8 @@
use std::cmp::Ordering; use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet};
use std::net::IpAddr;
use std::path::Path; use std::path::Path;
use color_eyre::eyre::eyre;
use futures::FutureExt; use futures::FutureExt;
use openssl::asn1::{Asn1Integer, Asn1Time}; use openssl::asn1::{Asn1Integer, Asn1Time};
use openssl::bn::{BigNum, MsbOption}; use openssl::bn::{BigNum, MsbOption};
@@ -11,147 +12,109 @@ use openssl::nid::Nid;
use openssl::pkey::{PKey, Private}; use openssl::pkey::{PKey, Private};
use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509}; use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::*; use openssl::*;
use patch_db::DbHandle; use tokio::sync::{Mutex, RwLock};
use sqlx::PgPool;
use tokio::process::Command;
use tokio::sync::Mutex;
use tracing::instrument; use tracing::instrument;
use crate::s9pk::manifest::PackageId; use crate::account::AccountInfo;
use crate::util::Invoke; use crate::hostname::Hostname;
use crate::net::dhcp::ips;
use crate::net::keys::{Key, KeyInfo};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
static CERTIFICATE_VERSION: i32 = 2; // X509 version 3 is actually encoded as '2' in the cert because fuck you. static CERTIFICATE_VERSION: i32 = 2; // X509 version 3 is actually encoded as '2' in the cert because fuck you.
pub const ROOT_CA_STATIC_PATH: &str = "/var/lib/embassy/ssl/root-ca.crt";
#[derive(Debug, Clone)] #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct CertPair {
pub ed25519: X509,
pub nistp256: X509,
}
impl CertPair {
fn updated(
pair: Option<&Self>,
hostname: &Hostname,
signer: (&PKey<Private>, &X509),
applicant: &Key,
ip: BTreeSet<IpAddr>,
) -> Result<(Self, bool), Error> {
let mut updated = false;
let mut updated_cert = |cert: Option<&X509>, osk: PKey<Private>| -> Result<X509, Error> {
let mut ips = BTreeSet::new();
if let Some(cert) = cert {
ips.extend(
cert.subject_alt_names()
.iter()
.flatten()
.filter_map(|a| a.ipaddress())
.filter_map(|a| match a.len() {
4 => Some::<IpAddr>(<[u8; 4]>::try_from(a).unwrap().into()),
16 => Some::<IpAddr>(<[u8; 16]>::try_from(a).unwrap().into()),
_ => None,
}),
);
if cert
.not_after()
.compare(Asn1Time::days_from_now(30)?.as_ref())?
== Ordering::Greater
&& ips.is_superset(&ip)
{
return Ok(cert.clone());
}
}
ips.extend(ip.iter().copied());
updated = true;
make_leaf_cert(signer, (&osk, &SANInfo::new(&applicant, hostname, ips)))
};
Ok((
Self {
ed25519: updated_cert(pair.map(|c| &c.ed25519), applicant.openssl_key_ed25519())?,
nistp256: updated_cert(
pair.map(|c| &c.nistp256),
applicant.openssl_key_nistp256(),
)?,
},
updated,
))
}
}
#[derive(Debug)]
pub struct SslManager { pub struct SslManager {
store: SslStore, hostname: Hostname,
root_cert: X509, root_cert: X509,
int_key: PKey<Private>, int_key: PKey<Private>,
int_cert: X509, int_cert: X509,
cert_cache: RwLock<BTreeMap<Key, CertPair>>,
} }
impl SslManager {
pub fn new(account: &AccountInfo) -> Result<Self, Error> {
let int_key = generate_key()?;
let int_cert = make_int_cert((&account.root_ca_key, &account.root_ca_cert), &int_key)?;
Ok(Self {
hostname: account.hostname.clone(),
root_cert: account.root_ca_cert.clone(),
int_key,
int_cert,
cert_cache: RwLock::new(BTreeMap::new()),
})
}
pub async fn with_certs(&self, key: Key, ip: IpAddr) -> Result<KeyInfo, Error> {
let mut ips = ips().await?;
ips.insert(ip);
let (pair, updated) = CertPair::updated(
self.cert_cache.read().await.get(&key),
&self.hostname,
(&self.int_key, &self.int_cert),
&key,
ips,
)?;
if updated {
self.cert_cache
.write()
.await
.insert(key.clone(), pair.clone());
}
#[derive(Debug, Clone)] Ok(key.with_certs(pair, self.int_cert.clone(), self.root_cert.clone()))
struct SslStore {
secret_store: PgPool,
}
impl SslStore {
fn new(db: PgPool) -> Result<Self, Error> {
Ok(SslStore { secret_store: db })
}
#[instrument(skip(self))]
async fn save_root_certificate(&self, key: &PKey<Private>, cert: &X509) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, $1, $2, NULL, now(), now())", key_str, cert_str).execute(&self.secret_store).await?;
Ok(())
}
#[instrument(skip(self))]
async fn load_root_certificate(&self) -> Result<Option<(PKey<Private>, X509)>, Error> {
let m_row =
sqlx::query!("SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;")
.fetch_optional(&self.secret_store)
.await?;
match m_row {
None => Ok(None),
Some(row) => {
let priv_key = PKey::private_key_from_pem(&row.priv_key_pem.into_bytes())?;
let certificate = X509::from_pem(&row.certificate_pem.into_bytes())?;
Ok(Some((priv_key, certificate)))
}
}
}
#[instrument(skip(self))]
async fn save_intermediate_certificate(
&self,
key: &PKey<Private>,
cert: &X509,
) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, $1, $2, NULL, now(), now())", key_str, cert_str).execute(&self.secret_store).await?;
Ok(())
}
async fn load_intermediate_certificate(&self) -> Result<Option<(PKey<Private>, X509)>, Error> {
let m_row =
sqlx::query!("SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;")
.fetch_optional(&self.secret_store)
.await?;
match m_row {
None => Ok(None),
Some(row) => {
let priv_key = PKey::private_key_from_pem(&row.priv_key_pem.into_bytes())?;
let certificate = X509::from_pem(&row.certificate_pem.into_bytes())?;
Ok(Some((priv_key, certificate)))
}
}
}
#[instrument(skip(self))]
async fn import_root_certificate(
&self,
root_key: &PKey<Private>,
root_cert: &X509,
) -> Result<(), Error> {
// remove records for both root and intermediate CA
sqlx::query!("DELETE FROM certificates WHERE id = 0 OR id = 1;")
.execute(&self.secret_store)
.await?;
self.save_root_certificate(root_key, root_cert).await?;
Ok(())
}
#[instrument(skip(self))]
async fn save_certificate(
&self,
key: &PKey<Private>,
cert: &X509,
lookup_string: &str,
) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let _n = sqlx::query!("INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES ($1, $2, $3, now(), now())", key_str, cert_str, lookup_string).execute(&self.secret_store).await?;
Ok(())
}
async fn load_certificate(
&self,
lookup_string: &str,
) -> Result<Option<(PKey<Private>, X509)>, Error> {
let m_row = sqlx::query!(
"SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = $1",
lookup_string
)
.fetch_optional(&self.secret_store)
.await?;
match m_row {
None => Ok(None),
Some(row) => {
let priv_key = PKey::private_key_from_pem(&row.priv_key_pem.into_bytes())?;
let certificate = X509::from_pem(&row.certificate_pem.into_bytes())?;
Ok(Some((priv_key, certificate)))
}
}
}
#[instrument(skip(self))]
async fn update_certificate(
&self,
key: &PKey<Private>,
cert: &X509,
lookup_string: &str,
) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let n = sqlx::query!("UPDATE certificates SET priv_key_pem = $1, certificate_pem = $2, updated_at = now() WHERE lookup_string = $3", key_str, cert_str, lookup_string)
.execute(&self.secret_store).await?;
if n.rows_affected() == 0 {
return Err(Error::new(
eyre!(
"Attempted to update non-existent certificate: {}",
lookup_string
),
ErrorKind::OpenSsl,
));
}
Ok(())
} }
} }
@@ -161,150 +124,13 @@ lazy_static::lazy_static! {
static ref SSL_MUTEX: Mutex<()> = Mutex::new(()); // TODO: make thread safe static ref SSL_MUTEX: Mutex<()> = Mutex::new(()); // TODO: make thread safe
} }
impl SslManager {
#[instrument(skip(db, handle))]
pub async fn init<Db: DbHandle>(db: PgPool, handle: &mut Db) -> Result<Self, Error> {
let store = SslStore::new(db)?;
let receipts = crate::hostname::HostNameReceipt::new(handle).await?;
let id = crate::hostname::get_id(handle, &receipts).await?;
let (root_key, root_cert) = match store.load_root_certificate().await? {
None => {
let root_key = generate_key()?;
let server_id = id;
let root_cert = make_root_cert(&root_key, &server_id)?;
store.save_root_certificate(&root_key, &root_cert).await?;
Ok::<_, Error>((root_key, root_cert))
}
Some((key, cert)) => Ok((key, cert)),
}?;
// generate static file for download, this will gte blown up on embassy restart so it's good to write it on
// every ssl manager init
tokio::fs::create_dir_all(
Path::new(ROOT_CA_STATIC_PATH)
.parent()
.unwrap_or(Path::new("/")),
)
.await?;
tokio::fs::write(ROOT_CA_STATIC_PATH, root_cert.to_pem()?).await?;
// write to ca cert store
tokio::fs::write(
"/usr/local/share/ca-certificates/embassy-root-ca.crt",
root_cert.to_pem()?,
)
.await?;
Command::new("update-ca-certificates")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
let (int_key, int_cert) = match store.load_intermediate_certificate().await? {
None => {
let int_key = generate_key()?;
let int_cert = make_int_cert((&root_key, &root_cert), &int_key)?;
store
.save_intermediate_certificate(&int_key, &int_cert)
.await?;
Ok::<_, Error>((int_key, int_cert))
}
Some((key, cert)) => Ok((key, cert)),
}?;
sqlx::query!("SELECT setval('certificates_id_seq', GREATEST(MAX(id) + 1, nextval('certificates_id_seq') - 1)) FROM certificates")
.fetch_one(&store.secret_store).await?;
Ok(SslManager {
store,
root_cert,
int_key,
int_cert,
})
}
// TODO: currently the burden of proof is on the caller to ensure that all of the arguments to this function are
// consistent. The following properties are assumed and not verified:
// 1. `root_cert` is self-signed and contains the public key that matches the private key `root_key`
// 2. certificate is not past its expiration date
// Warning: If this function ever fails, you must either call it again or regenerate your certificates from scratch
// since it is possible for it to fail after successfully saving the root certificate but before successfully saving
// the intermediate certificate
#[instrument(skip(db))]
pub async fn import_root_ca(
db: PgPool,
root_key: PKey<Private>,
root_cert: X509,
) -> Result<Self, Error> {
let store = SslStore::new(db)?;
store.import_root_certificate(&root_key, &root_cert).await?;
let int_key = generate_key()?;
let int_cert = make_int_cert((&root_key, &root_cert), &int_key)?;
store
.save_intermediate_certificate(&int_key, &int_cert)
.await?;
Ok(SslManager {
store,
root_cert,
int_key,
int_cert,
})
}
#[instrument(skip(self))]
pub async fn export_root_ca(&self) -> Result<(PKey<Private>, X509), Error> {
match self.store.load_root_certificate().await? {
None => Err(Error::new(
eyre!("Failed to export root certificate: root certificate has not been generated"),
ErrorKind::OpenSsl,
)),
Some(a) => Ok(a),
}
}
#[instrument(skip(self))]
pub async fn certificate_for(
&self,
dns_base: &str,
package_id: &PackageId,
) -> Result<(PKey<Private>, Vec<X509>), Error> {
let (key, cert) = match self.store.load_certificate(dns_base).await? {
None => {
let key = generate_key()?;
let cert = make_leaf_cert(
(&self.int_key, &self.int_cert),
(&key, dns_base, package_id),
)?;
self.store.save_certificate(&key, &cert, dns_base).await?;
Ok::<_, Error>((key, cert))
}
Some((key, cert)) => {
let window_end = Asn1Time::days_from_now(30)?;
let expiration = cert.not_after();
if expiration.compare(&window_end)? == Ordering::Less {
let key = generate_key()?;
let cert = make_leaf_cert(
(&self.int_key, &self.int_cert),
(&key, dns_base, package_id),
)?;
self.store.update_certificate(&key, &cert, dns_base).await?;
Ok((key, cert))
} else {
Ok((key, cert))
}
}
}?;
Ok((
key,
vec![cert, self.int_cert.clone(), self.root_cert.clone()],
))
}
}
pub async fn export_key(key: &PKey<Private>, target: &Path) -> Result<(), Error> { pub async fn export_key(key: &PKey<Private>, target: &Path) -> Result<(), Error> {
tokio::fs::write(target, key.private_key_to_pem_pkcs8()?) tokio::fs::write(target, key.private_key_to_pem_pkcs8()?)
.map(|res| res.with_ctx(|_| (ErrorKind::Filesystem, target.display().to_string()))) .map(|res| res.with_ctx(|_| (ErrorKind::Filesystem, target.display().to_string())))
.await?; .await?;
Ok(()) Ok(())
} }
pub async fn export_cert(chain: &Vec<X509>, target: &Path) -> Result<(), Error> { pub async fn export_cert(chain: &[&X509], target: &Path) -> Result<(), Error> {
tokio::fs::write( tokio::fs::write(
target, target,
chain chain
@@ -315,6 +141,7 @@ pub async fn export_cert(chain: &Vec<X509>, target: &Path) -> Result<(), Error>
.await?; .await?;
Ok(()) Ok(())
} }
#[instrument] #[instrument]
fn rand_serial() -> Result<Asn1Integer, Error> { fn rand_serial() -> Result<Asn1Integer, Error> {
let mut bn = BigNum::new()?; let mut bn = BigNum::new()?;
@@ -323,13 +150,14 @@ fn rand_serial() -> Result<Asn1Integer, Error> {
Ok(asn1) Ok(asn1)
} }
#[instrument] #[instrument]
fn generate_key() -> Result<PKey<Private>, Error> { pub fn generate_key() -> Result<PKey<Private>, Error> {
let new_key = EcKey::generate(EC_GROUP.as_ref())?; let new_key = EcKey::generate(EC_GROUP.as_ref())?;
let key = PKey::from_ec_key(new_key)?; let key = PKey::from_ec_key(new_key)?;
Ok(key) Ok(key)
} }
#[instrument] #[instrument]
fn make_root_cert(root_key: &PKey<Private>, server_id: &str) -> Result<X509, Error> { pub fn make_root_cert(root_key: &PKey<Private>, hostname: &Hostname) -> Result<X509, Error> {
let mut builder = X509Builder::new()?; let mut builder = X509Builder::new()?;
builder.set_version(CERTIFICATE_VERSION)?; builder.set_version(CERTIFICATE_VERSION)?;
@@ -342,8 +170,7 @@ fn make_root_cert(root_key: &PKey<Private>, server_id: &str) -> Result<X509, Err
builder.set_serial_number(&*rand_serial()?)?; builder.set_serial_number(&*rand_serial()?)?;
let mut subject_name_builder = X509NameBuilder::new()?; let mut subject_name_builder = X509NameBuilder::new()?;
subject_name_builder subject_name_builder.append_entry_by_text("CN", &format!("{} Local Root CA", &*hostname.0))?;
.append_entry_by_text("CN", &format!("Embassy Local Root CA ({})", server_id))?;
subject_name_builder.append_entry_by_text("O", "Start9")?; subject_name_builder.append_entry_by_text("O", "Start9")?;
subject_name_builder.append_entry_by_text("OU", "Embassy")?; subject_name_builder.append_entry_by_text("OU", "Embassy")?;
let subject_name = subject_name_builder.build(); let subject_name = subject_name_builder.build();
@@ -381,7 +208,7 @@ fn make_root_cert(root_key: &PKey<Private>, server_id: &str) -> Result<X509, Err
Ok(cert) Ok(cert)
} }
#[instrument] #[instrument]
fn make_int_cert( pub fn make_int_cert(
signer: (&PKey<Private>, &X509), signer: (&PKey<Private>, &X509),
applicant: &PKey<Private>, applicant: &PKey<Private>,
) -> Result<X509, Error> { ) -> Result<X509, Error> {
@@ -442,10 +269,52 @@ fn make_int_cert(
Ok(cert) Ok(cert)
} }
#[derive(Debug)]
pub struct SANInfo {
pub dns: BTreeSet<String>,
pub ips: BTreeSet<IpAddr>,
}
impl SANInfo {
pub fn new(key: &Key, hostname: &Hostname, ips: BTreeSet<IpAddr>) -> Self {
let mut dns = BTreeSet::new();
if let Some((id, _)) = key.interface() {
dns.insert(format!("{id}.embassy"));
dns.insert(key.local_address().to_string());
} else {
dns.insert("embassy".to_owned());
dns.insert(hostname.local_domain_name());
dns.insert(hostname.no_dot_host_name());
dns.insert("localhost".to_owned());
}
dns.insert(key.tor_address().to_string());
Self { dns, ips }
}
}
impl std::fmt::Display for SANInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut written = false;
for dns in &self.dns {
if written {
write!(f, ",")?;
}
written = true;
write!(f, "DNS:{dns},DNS:*.{dns}")?;
}
for ip in &self.ips {
if written {
write!(f, ",")?;
}
written = true;
write!(f, "IP:{ip}")?;
}
Ok(())
}
}
#[instrument] #[instrument]
fn make_leaf_cert( pub fn make_leaf_cert(
signer: (&PKey<Private>, &X509), signer: (&PKey<Private>, &X509),
applicant: (&PKey<Private>, &str, &PackageId), applicant: (&PKey<Private>, &SANInfo),
) -> Result<X509, Error> { ) -> Result<X509, Error> {
let mut builder = X509Builder::new()?; let mut builder = X509Builder::new()?;
builder.set_version(CERTIFICATE_VERSION)?; builder.set_version(CERTIFICATE_VERSION)?;
@@ -461,7 +330,15 @@ fn make_leaf_cert(
builder.set_serial_number(&*rand_serial()?)?; builder.set_serial_number(&*rand_serial()?)?;
let mut subject_name_builder = X509NameBuilder::new()?; let mut subject_name_builder = X509NameBuilder::new()?;
subject_name_builder.append_entry_by_text("CN", &format!("{}.local", &applicant.1))?; subject_name_builder.append_entry_by_text(
"CN",
applicant
.1
.dns
.first()
.map(String::as_str)
.unwrap_or("localhost"),
)?;
subject_name_builder.append_entry_by_text("O", "Start9")?; subject_name_builder.append_entry_by_text("O", "Start9")?;
subject_name_builder.append_entry_by_text("OU", "Embassy")?; subject_name_builder.append_entry_by_text("OU", "Embassy")?;
let subject_name = subject_name_builder.build(); let subject_name = subject_name_builder.build();
@@ -493,15 +370,9 @@ fn make_leaf_cert(
"critical,digitalSignature,keyEncipherment", "critical,digitalSignature,keyEncipherment",
)?; )?;
let subject_alt_name = X509Extension::new_nid( let san_string = applicant.1.to_string();
Some(&cfg), let subject_alt_name =
Some(&ctx), X509Extension::new_nid(Some(&cfg), Some(&ctx), Nid::SUBJECT_ALT_NAME, &san_string)?;
Nid::SUBJECT_ALT_NAME,
&format!(
"DNS:{}.local,DNS:*.{}.local,DNS:{}.onion,DNS:*.{}.onion,DNS:{}.embassy,DNS:*.{}.embassy",
&applicant.1, &applicant.1, &applicant.1, &applicant.1, &applicant.2, &applicant.2,
),
)?;
builder.append_extension(subject_key_identifier)?; builder.append_extension(subject_key_identifier)?;
builder.append_extension(authority_key_identifier)?; builder.append_extension(authority_key_identifier)?;
builder.append_extension(subject_alt_name)?; builder.append_extension(subject_alt_name)?;

View File

@@ -1,5 +1,5 @@
use std::fs::Metadata; use std::fs::Metadata;
use std::path::{Path, PathBuf}; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use std::time::UNIX_EPOCH; use std::time::UNIX_EPOCH;
@@ -9,8 +9,11 @@ use color_eyre::eyre::eyre;
use digest::Digest; use digest::Digest;
use futures::FutureExt; use futures::FutureExt;
use http::header::ACCEPT_ENCODING; use http::header::ACCEPT_ENCODING;
use http::header::CONTENT_ENCODING;
use http::response::Builder; use http::response::Builder;
use hyper::{Body, Method, Request, Response, StatusCode}; use hyper::{Body, Method, Request, Response, StatusCode};
use openssl::hash::MessageDigest;
use openssl::x509::X509;
use rpc_toolkit::rpc_handler; use rpc_toolkit::rpc_handler;
use tokio::fs::File; use tokio::fs::File;
use tokio::io::BufReader; use tokio::io::BufReader;
@@ -249,7 +252,12 @@ async fn alt_ui(req: Request<Body>, ui_mode: UiMode) -> Result<Response<Body>, E
let full_path = Path::new(selected_root_dir).join(uri_path); let full_path = Path::new(selected_root_dir).join(uri_path);
file_send( file_send(
if tokio::fs::metadata(&full_path).await.is_ok() { if tokio::fs::metadata(&full_path)
.await
.ok()
.map(|f| f.is_file())
.unwrap_or(false)
{
full_path full_path
} else { } else {
Path::new(selected_root_dir).join("index.html") Path::new(selected_root_dir).join("index.html")
@@ -296,10 +304,7 @@ async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response
.await .await
} else if let Ok(rest) = sub_path.strip_prefix("eos") { } else if let Ok(rest) = sub_path.strip_prefix("eos") {
match rest.to_str() { match rest.to_str() {
Some("local.crt") => { Some("local.crt") => cert_send(&ctx.account.read().await.root_ca_cert),
file_send(crate::net::ssl::ROOT_CA_STATIC_PATH, &accept_encoding)
.await
}
None => Ok(bad_request()), None => Ok(bad_request()),
_ => Ok(not_found()), _ => Ok(not_found()),
} }
@@ -312,13 +317,7 @@ async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response
} }
(&Method::GET, Some(("eos", "local.crt"))) => { (&Method::GET, Some(("eos", "local.crt"))) => {
match HasValidSession::from_request_parts(&request_parts, &ctx).await { match HasValidSession::from_request_parts(&request_parts, &ctx).await {
Ok(_) => { Ok(_) => cert_send(&ctx.account.read().await.root_ca_cert),
file_send(
PathBuf::from(crate::net::ssl::ROOT_CA_STATIC_PATH),
&accept_encoding,
)
.await
}
Err(e) => un_authorized(e, "eos/local.crt"), Err(e) => un_authorized(e, "eos/local.crt"),
} }
} }
@@ -331,7 +330,12 @@ async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response
let full_path = Path::new(selected_root_dir).join(uri_path); let full_path = Path::new(selected_root_dir).join(uri_path);
file_send( file_send(
if tokio::fs::metadata(&full_path).await.is_ok() { if tokio::fs::metadata(&full_path)
.await
.ok()
.map(|f| f.is_file())
.unwrap_or(false)
{
full_path full_path
} else { } else {
Path::new(selected_root_dir).join("index.html") Path::new(selected_root_dir).join("index.html")
@@ -383,6 +387,24 @@ fn bad_request() -> Response<Body> {
.unwrap() .unwrap()
} }
fn cert_send(cert: &X509) -> Result<Response<Body>, Error> {
let pem = cert.to_pem()?;
Response::builder()
.status(StatusCode::OK)
.header(
http::header::ETAG,
base32::encode(
base32::Alphabet::RFC4648 { padding: false },
&*cert.digest(MessageDigest::sha256())?,
)
.to_lowercase(),
)
.header(http::header::CONTENT_TYPE, "application/x-pem-file")
.header(http::header::CONTENT_LENGTH, pem.len())
.body(Body::from(pem))
.with_kind(ErrorKind::Network)
}
async fn file_send( async fn file_send(
path: impl AsRef<Path>, path: impl AsRef<Path>,
accept_encoding: &[&str], accept_encoding: &[&str],
@@ -407,12 +429,14 @@ async fn file_send(
let mut builder = Response::builder().status(StatusCode::OK); let mut builder = Response::builder().status(StatusCode::OK);
builder = with_e_tag(path, &metadata, builder)?; builder = with_e_tag(path, &metadata, builder)?;
builder = with_content_type(path, builder); builder = with_content_type(path, builder);
builder = with_content_length(&metadata, builder); let body = if accept_encoding.contains(&"br") && metadata.len() > u16::MAX as u64 {
let body = if accept_encoding.contains(&"br") { builder = builder.header(CONTENT_ENCODING, "br");
Body::wrap_stream(ReaderStream::new(BrotliEncoder::new(BufReader::new(file)))) Body::wrap_stream(ReaderStream::new(BrotliEncoder::new(BufReader::new(file))))
} else if accept_encoding.contains(&"gzip") { } else if accept_encoding.contains(&"gzip") && metadata.len() > u16::MAX as u64 {
builder = builder.header(CONTENT_ENCODING, "gzip");
Body::wrap_stream(ReaderStream::new(GzipEncoder::new(BufReader::new(file)))) Body::wrap_stream(ReaderStream::new(GzipEncoder::new(BufReader::new(file))))
} else { } else {
builder = with_content_length(&metadata, builder);
Body::wrap_stream(ReaderStream::new(file)) Body::wrap_stream(ReaderStream::new(file))
}; };
builder.body(body).with_kind(ErrorKind::Network) builder.body(body).with_kind(ErrorKind::Network)
@@ -446,7 +470,7 @@ fn with_e_tag(path: &Path, metadata: &Metadata, builder: Builder) -> Result<Buil
); );
let res = hasher.finalize(); let res = hasher.finalize();
Ok(builder.header( Ok(builder.header(
"ETag", http::header::ETAG,
base32::encode(base32::Alphabet::RFC4648 { padding: false }, res.as_slice()).to_lowercase(), base32::encode(base32::Alphabet::RFC4648 { padding: false }, res.as_slice()).to_lowercase(),
)) ))
} }
@@ -476,7 +500,7 @@ fn with_content_type(path: &Path, builder: Builder) -> Builder {
}, },
None => "text/plain", None => "text/plain",
}; };
builder.header("Content-Type", content_type) builder.header(http::header::CONTENT_TYPE, content_type)
} }
fn with_content_length(metadata: &Metadata, builder: Builder) -> Builder { fn with_content_length(metadata: &Metadata, builder: Builder) -> Builder {

View File

@@ -1,27 +1,25 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::net::{Ipv4Addr, SocketAddr}; use std::net::SocketAddr;
use std::sync::{Arc, Weak};
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use futures::future::BoxFuture; use futures::future::BoxFuture;
use futures::FutureExt; use futures::FutureExt;
use rpc_toolkit::command; use rpc_toolkit::command;
use sqlx::{Executor, Postgres};
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio::sync::Mutex; use tokio::sync::Mutex;
use torut::control::{AsyncEvent, AuthenticatedConn, ConnError}; use torut::control::{AsyncEvent, AuthenticatedConn, ConnError};
use torut::onion::{OnionAddressV3, TorSecretKeyV3}; use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument; use tracing::instrument;
use super::interface::{InterfaceId, TorConfig};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::{display_serializable, IoFormat}; use crate::util::serde::{display_serializable, IoFormat};
use crate::{Error, ErrorKind, ResultExt as _}; use crate::{Error, ErrorKind, ResultExt as _};
#[test] #[test]
fn random_key() { fn random_key() {
println!("x'{}'", hex::encode(TorSecretKeyV3::generate().as_bytes())); println!("x'{}'", hex::encode(rand::random::<[u8; 32]>()));
} }
#[command(subcommands(list_services))] #[command(subcommands(list_services))]
@@ -54,64 +52,29 @@ pub async fn list_services(
ctx.net_controller.tor.list_services().await ctx.net_controller.tor.list_services().await
} }
#[instrument(skip(secrets))]
pub async fn os_key<Ex>(secrets: &mut Ex) -> Result<TorSecretKeyV3, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let key = sqlx::query!("SELECT tor_key FROM account")
.fetch_one(secrets)
.await?
.tor_key;
let mut buf = [0; 64];
buf.clone_from_slice(
key.get(0..64).ok_or_else(|| {
Error::new(eyre!("Invalid Tor Key Length"), crate::ErrorKind::Database)
})?,
);
Ok(buf.into())
}
fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> { fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> {
async move { Ok(()) }.boxed() async move { Ok(()) }.boxed()
} }
pub struct TorController(Mutex<TorControllerInner>); pub struct TorController(Mutex<TorControllerInner>);
impl TorController { impl TorController {
pub async fn init( pub async fn init(tor_control: SocketAddr) -> Result<Self, Error> {
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
tor_control: SocketAddr,
) -> Result<Self, Error> {
Ok(TorController(Mutex::new( Ok(TorController(Mutex::new(
TorControllerInner::init(embassyd_addr, embassyd_tor_key, tor_control).await?, TorControllerInner::init(tor_control).await?,
))) )))
} }
pub async fn add<I: IntoIterator<Item = (InterfaceId, TorConfig, TorSecretKeyV3)> + Clone>( pub async fn add(
&self, &self,
pkg_id: &PackageId, key: &TorSecretKeyV3,
ip: Ipv4Addr, external: u16,
interfaces: I, target: SocketAddr,
) -> Result<(), Error> { ) -> Result<Arc<()>, Error> {
self.0.lock().await.add(pkg_id, ip, interfaces).await self.0.lock().await.add(key, external, target).await
} }
pub async fn remove<I: IntoIterator<Item = InterfaceId> + Clone>( pub async fn gc(&self, key: &TorSecretKeyV3, external: u16) -> Result<(), Error> {
&self, self.0.lock().await.gc(key, external).await
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
self.0.lock().await.remove(pkg_id, interfaces).await
}
pub async fn embassyd_tor_key(&self) -> TorSecretKeyV3 {
self.0.lock().await.embassyd_tor_key.clone()
}
pub async fn embassyd_onion(&self) -> OnionAddressV3 {
self.0.lock().await.embassyd_onion()
} }
pub async fn list_services(&self) -> Result<Vec<OnionAddressV3>, Error> { pub async fn list_services(&self) -> Result<Vec<OnionAddressV3>, Error> {
@@ -124,92 +87,95 @@ type AuthenticatedConnection = AuthenticatedConn<
fn(AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>>, fn(AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>>,
>; >;
#[derive(Clone, Debug, PartialEq, Eq)]
struct HiddenServiceConfig {
ip: Ipv4Addr,
cfg: TorConfig,
}
pub struct TorControllerInner { pub struct TorControllerInner {
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
control_addr: SocketAddr, control_addr: SocketAddr,
connection: Option<AuthenticatedConnection>, connection: AuthenticatedConnection,
services: BTreeMap<(PackageId, InterfaceId), (TorSecretKeyV3, TorConfig, Ipv4Addr)>, services: BTreeMap<String, BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
} }
impl TorControllerInner { impl TorControllerInner {
#[instrument(skip(self, interfaces))] #[instrument(skip(self))]
async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorConfig, TorSecretKeyV3)>>( async fn add(
&mut self, &mut self,
pkg_id: &PackageId, key: &TorSecretKeyV3,
ip: Ipv4Addr, external: u16,
interfaces: I, target: SocketAddr,
) -> Result<(), Error> { ) -> Result<Arc<()>, Error> {
for (interface_id, tor_cfg, key) in interfaces { let mut rm_res = Ok(());
let id = (pkg_id.clone(), interface_id); let onion_base = key
match self.services.get(&id) { .public()
Some(k) if k.0 != key => { .get_onion_address()
self.remove(pkg_id, std::iter::once(id.1.clone())).await?; .get_address_without_dot_onion();
} let mut service = if let Some(service) = self.services.remove(&onion_base) {
Some(_) => continue, rm_res = self.connection.del_onion(&onion_base).await;
None => (), service
} } else {
self.connection BTreeMap::new()
.as_mut() };
.ok_or_else(|| { let mut binding = service.remove(&external).unwrap_or_default();
Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Unknown) let rc = if let Some(rc) = Weak::upgrade(&binding.remove(&target).unwrap_or_default()) {
})? rc
.add_onion_v3( } else {
&key, Arc::new(())
false, };
false, binding.insert(target, Arc::downgrade(&rc));
false, service.insert(external, binding);
None, let bindings = service
&mut tor_cfg
.port_mapping
.iter() .iter()
.map(|(external, internal)| { .flat_map(|(ext, int)| {
(external.0, SocketAddr::from((ip, internal.0))) int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
}) })
.collect::<Vec<_>>() .collect::<Vec<_>>();
.iter(), self.services.insert(onion_base, service);
) rm_res?;
self.connection
.add_onion_v3(key, false, false, false, None, &mut bindings.iter())
.await?; .await?;
self.services.insert(id, (key, tor_cfg, ip)); Ok(rc)
}
Ok(())
} }
#[instrument(skip(self, interfaces))] #[instrument(skip(self))]
async fn remove<I: IntoIterator<Item = InterfaceId>>( async fn gc(&mut self, key: &TorSecretKeyV3, external: u16) -> Result<(), Error> {
&mut self, let onion_base = key
pkg_id: &PackageId, .public()
interfaces: I,
) -> Result<(), Error> {
for interface_id in interfaces {
if let Some((key, _cfg, _ip)) = self.services.remove(&(pkg_id.clone(), interface_id)) {
self.connection
.as_mut()
.ok_or_else(|| {
Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Tor)
})?
.del_onion(
&key.public()
.get_onion_address() .get_onion_address()
.get_address_without_dot_onion(), .get_address_without_dot_onion();
) if let Some(mut service) = self.services.remove(&onion_base) {
if let Some(mut binding) = service.remove(&external) {
binding = binding
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect();
if !binding.is_empty() {
service.insert(external, binding);
}
}
let rm_res = self.connection.del_onion(&onion_base).await;
if !service.is_empty() {
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
self.services.insert(onion_base, service);
rm_res?;
self.connection
.add_onion_v3(&key, false, false, false, None, &mut bindings.iter())
.await?; .await?;
} else {
rm_res?;
} }
} }
Ok(()) Ok(())
} }
#[instrument] #[instrument]
async fn init( async fn init(tor_control: SocketAddr) -> Result<Self, Error> {
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
tor_control: SocketAddr,
) -> Result<Self, Error> {
let mut conn = torut::control::UnauthenticatedConn::new( let mut conn = torut::control::UnauthenticatedConn::new(
TcpStream::connect(tor_control).await?, // TODO TcpStream::connect(tor_control).await?, // TODO
); );
@@ -223,51 +189,16 @@ impl TorControllerInner {
let mut connection: AuthenticatedConnection = conn.into_authenticated().await; let mut connection: AuthenticatedConnection = conn.into_authenticated().await;
connection.set_async_event_handler(Some(event_handler)); connection.set_async_event_handler(Some(event_handler));
let mut controller = TorControllerInner { Ok(Self {
embassyd_addr,
embassyd_tor_key,
control_addr: tor_control, control_addr: tor_control,
connection: Some(connection), connection,
services: BTreeMap::new(), services: BTreeMap::new(),
}; })
controller.add_embassyd_onion().await?;
Ok(controller)
}
#[instrument(skip(self))]
async fn add_embassyd_onion(&mut self) -> Result<(), Error> {
tracing::info!(
"Registering Main Tor Service: {}",
self.embassyd_tor_key.public().get_onion_address()
);
self.connection
.as_mut()
.ok_or_else(|| Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Tor))?
.add_onion_v3(
&self.embassyd_tor_key,
false,
false,
false,
None,
&mut std::iter::once(&(self.embassyd_addr.port(), self.embassyd_addr)),
)
.await?;
tracing::info!(
"Registered Main Tor Service: {}",
self.embassyd_tor_key.public().get_onion_address()
);
Ok(())
}
fn embassyd_onion(&self) -> OnionAddressV3 {
self.embassyd_tor_key.public().get_onion_address()
} }
#[instrument(skip(self))] #[instrument(skip(self))]
async fn list_services(&mut self) -> Result<Vec<OnionAddressV3>, Error> { async fn list_services(&mut self) -> Result<Vec<OnionAddressV3>, Error> {
self.connection self.connection
.as_mut()
.ok_or_else(|| Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Tor))?
.get_info("onions/current") .get_info("onions/current")
.await? .await?
.lines() .lines()
@@ -312,6 +243,15 @@ async fn test() {
) )
.await .await
.unwrap(); .unwrap();
connection
.del_onion(
&tor_key
.public()
.get_onion_address()
.get_address_without_dot_onion(),
)
.await
.unwrap();
connection connection
.add_onion_v3( .add_onion_v3(
&tor_key, &tor_key,

View File

@@ -1,81 +1,322 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::net::SocketAddr; use std::convert::Infallible;
use std::sync::Arc; use std::net::{IpAddr, SocketAddr};
use std::sync::{Arc, Weak};
use tokio_rustls::rustls::ServerConfig; use color_eyre::eyre::eyre;
use helpers::NonDetachingJoinHandle;
use http::Response;
use hyper::service::{make_service_fn, service_fn};
use hyper::Body;
use models::ResultExt;
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{Mutex, RwLock};
use tokio_rustls::rustls::server::Acceptor;
use tokio_rustls::rustls::{RootCertStore, ServerConfig};
use tokio_rustls::{LazyConfigAcceptor, TlsConnector};
use crate::net::cert_resolver::EmbassyCertResolver; use crate::net::keys::Key;
use crate::net::embassy_service_http_server::EmbassyServiceHTTPServer; use crate::net::net_utils::SingleAccept;
use crate::net::net_utils::ResourceFqdn; use crate::net::ssl::SslManager;
use crate::net::HttpHandler; use crate::util::io::BackTrackingReader;
use crate::Error; use crate::Error;
pub struct VHOSTController { // not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
pub service_servers: BTreeMap<u16, EmbassyServiceHTTPServer>,
pub cert_resolver: EmbassyCertResolver, pub struct VHostController {
embassyd_addr: SocketAddr, ssl: Arc<SslManager>,
servers: Mutex<BTreeMap<u16, VHostServer>>,
} }
impl VHostController {
impl VHOSTController { pub fn new(ssl: Arc<SslManager>) -> Self {
pub fn init(embassyd_addr: SocketAddr) -> Self {
Self { Self {
embassyd_addr, ssl,
service_servers: BTreeMap::new(), servers: Mutex::new(BTreeMap::new()),
cert_resolver: EmbassyCertResolver::new(),
} }
} }
pub async fn add(
pub fn build_ssl_svr_cfg(&self) -> Result<Arc<ServerConfig>, Error> { &self,
let ssl_cfg = ServerConfig::builder() key: Key,
.with_safe_default_cipher_suites() hostname: Option<String>,
.with_safe_default_kx_groups() external: u16,
.with_safe_default_protocol_versions() target: SocketAddr,
.unwrap() connect_ssl: bool,
.with_no_client_auth() ) -> Result<Arc<()>, Error> {
.with_cert_resolver(Arc::new(self.cert_resolver.clone())); let mut writable = self.servers.lock().await;
let server = if let Some(server) = writable.remove(&external) {
Ok(Arc::new(ssl_cfg)) server
}
pub async fn add_server_or_handle(
&mut self,
external_svc_port: u16,
fqdn: ResourceFqdn,
svc_handler: HttpHandler,
is_ssl: bool,
) -> Result<(), Error> {
if let Some(server) = self.service_servers.get_mut(&external_svc_port) {
server.add_svc_handler_mapping(fqdn, svc_handler).await?;
} else { } else {
self.add_server(is_ssl, external_svc_port, fqdn, svc_handler) VHostServer::new(external, self.ssl.clone()).await?
.await?; };
let rc = server
.add(
hostname,
TargetInfo {
addr: target,
connect_ssl,
key,
},
)
.await;
writable.insert(external, server);
Ok(rc?)
}
pub async fn gc(&self, hostname: Option<String>, external: u16) -> Result<(), Error> {
let mut writable = self.servers.lock().await;
if let Some(server) = writable.remove(&external) {
server.gc(hostname).await?;
if !server.is_empty().await? {
writable.insert(external, server);
}
} }
Ok(()) Ok(())
} }
}
async fn add_server( #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
&mut self, struct TargetInfo {
is_ssl: bool, addr: SocketAddr,
external_svc_port: u16, connect_ssl: bool,
fqdn: ResourceFqdn, key: Key,
svc_handler: HttpHandler, }
) -> Result<(), Error> {
let ssl_cfg = if is_ssl { struct VHostServer {
Some(self.build_ssl_svr_cfg()?) mapping: Weak<RwLock<BTreeMap<Option<String>, BTreeMap<TargetInfo, Weak<()>>>>>,
_thread: NonDetachingJoinHandle<()>,
}
impl VHostServer {
async fn new(port: u16, ssl: Arc<SslManager>) -> Result<Self, Error> {
// check if port allowed
let listener = TcpListener::bind(SocketAddr::new([0, 0, 0, 0].into(), port))
.await
.with_kind(crate::ErrorKind::Network)?;
let mapping = Arc::new(RwLock::new(BTreeMap::new()));
Ok(Self {
mapping: Arc::downgrade(&mapping),
_thread: tokio::spawn(async move {
loop {
match listener.accept().await {
Ok((stream, _)) => {
let mut stream = BackTrackingReader::new(stream);
stream.start_buffering();
let mapping = mapping.clone();
let ssl = ssl.clone();
tokio::spawn(async move {
if let Err(e) = async {
let mid = match LazyConfigAcceptor::new(
Acceptor::default(),
&mut stream,
)
.await
{
Ok(a) => a,
Err(e) => {
stream.rewind();
return hyper::server::Server::builder(
SingleAccept::new(stream),
)
.serve(make_service_fn(|_| async {
Ok::<_, Infallible>(service_fn(|req| async move {
Response::builder()
.status(
http::StatusCode::TEMPORARY_REDIRECT,
)
.header(
http::header::LOCATION,
req.headers()
.get(http::header::HOST)
.and_then(|host| host.to_str().ok())
.map(|host| {
format!("https://{host}")
})
.unwrap_or_default(),
)
.body(Body::default())
}))
}))
.await
.with_kind(crate::ErrorKind::Network);
}
};
let target_name =
mid.client_hello().server_name().map(|s| s.to_owned());
let target = {
let mapping = mapping.read().await;
mapping
.get(&target_name)
.into_iter()
.flatten()
.find(|(_, rc)| rc.strong_count() > 0)
.or_else(|| {
if target_name
.map(|s| s.parse::<IpAddr>().is_ok())
.unwrap_or(true)
{
mapping
.get(&None)
.into_iter()
.flatten()
.find(|(_, rc)| rc.strong_count() > 0)
} else { } else {
None None
}
})
.map(|(target, _)| target.clone())
}; };
if let Some(target) = target {
let mut new_service_server = let mut tcp_stream =
EmbassyServiceHTTPServer::new(self.embassyd_addr.ip(), external_svc_port, ssl_cfg) TcpStream::connect(target.addr).await?;
let key =
ssl.with_certs(target.key, target.addr.ip()).await?;
let cfg = ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth();
let cfg =
if mid.client_hello().signature_schemes().contains(
&tokio_rustls::rustls::SignatureScheme::ED25519,
) {
cfg.with_single_cert(
key.fullchain_ed25519()
.into_iter()
.map(|c| {
Ok(tokio_rustls::rustls::Certificate(
c.to_der()?,
))
})
.collect::<Result<_, Error>>()?,
tokio_rustls::rustls::PrivateKey(
key.key()
.openssl_key_ed25519()
.private_key_to_der()?,
),
)
} else {
cfg.with_single_cert(
key.fullchain_nistp256()
.into_iter()
.map(|c| {
Ok(tokio_rustls::rustls::Certificate(
c.to_der()?,
))
})
.collect::<Result<_, Error>>()?,
tokio_rustls::rustls::PrivateKey(
key.key()
.openssl_key_nistp256()
.private_key_to_der()?,
),
)
};
let mut tls_stream = mid
.into_stream(Arc::new(
cfg.with_kind(crate::ErrorKind::OpenSsl)?,
))
.await?; .await?;
new_service_server tls_stream.get_mut().0.stop_buffering();
.add_svc_handler_mapping(fqdn.clone(), svc_handler) if target.connect_ssl {
tokio::io::copy_bidirectional(
&mut tls_stream,
&mut TlsConnector::from(Arc::new(
tokio_rustls::rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates({
let mut store = RootCertStore::empty();
store.add(
&tokio_rustls::rustls::Certificate(
key.root_ca().to_der()?,
),
).with_kind(crate::ErrorKind::OpenSsl)?;
store
})
.with_no_client_auth(),
))
.connect(
key.key()
.internal_address()
.as_str()
.try_into()
.with_kind(crate::ErrorKind::OpenSsl)?,
tcp_stream,
)
.await
.with_kind(crate::ErrorKind::OpenSsl)?,
)
.await?; .await?;
self.service_servers } else {
.insert(external_svc_port, new_service_server); tokio::io::copy_bidirectional(
&mut tls_stream,
&mut tcp_stream,
)
.await?;
}
} else {
// 503
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error in VHostController on port {port}: {e}");
tracing::debug!("{e:?}")
}
});
}
Err(e) => {
tracing::error!("Error in VHostController on port {port}: {e}");
tracing::debug!("{e:?}");
}
}
}
})
.into(),
})
}
async fn add(&self, hostname: Option<String>, target: TargetInfo) -> Result<Arc<()>, Error> {
if let Some(mapping) = Weak::upgrade(&self.mapping) {
let mut writable = mapping.write().await;
let mut targets = writable.remove(&hostname).unwrap_or_default();
let rc = if let Some(rc) = Weak::upgrade(&targets.remove(&target).unwrap_or_default()) {
rc
} else {
Arc::new(())
};
targets.insert(target, Arc::downgrade(&rc));
writable.insert(hostname, targets);
Ok(rc)
} else {
Err(Error::new(
eyre!("VHost Service Thread has exited"),
crate::ErrorKind::Network,
))
}
}
async fn gc(&self, hostname: Option<String>) -> Result<(), Error> {
if let Some(mapping) = Weak::upgrade(&self.mapping) {
let mut writable = mapping.write().await;
let mut targets = writable.remove(&hostname).unwrap_or_default();
targets = targets
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect();
if !targets.is_empty() {
writable.insert(hostname, targets);
}
Ok(()) Ok(())
} else {
Err(Error::new(
eyre!("VHost Service Thread has exited"),
crate::ErrorKind::Network,
))
}
}
async fn is_empty(&self) -> Result<bool, Error> {
if let Some(mapping) = Weak::upgrade(&self.mapping) {
Ok(mapping.read().await.is_empty())
} else {
Err(Error::new(
eyre!("VHost Service Thread has exited"),
crate::ErrorKind::Network,
))
}
} }
} }

View File

@@ -0,0 +1,61 @@
use std::convert::Infallible;
use std::net::SocketAddr;
use futures::future::ready;
use futures::FutureExt;
use helpers::NonDetachingJoinHandle;
use hyper::service::{make_service_fn, service_fn};
use hyper::Server;
use tokio::sync::oneshot;
use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::net::static_server::{
diag_ui_file_router, install_ui_file_router, main_ui_server_router, setup_ui_file_router,
};
use crate::net::HttpHandler;
use crate::Error;
pub struct WebServer {
shutdown: oneshot::Sender<()>,
thread: NonDetachingJoinHandle<()>,
}
impl WebServer {
pub fn new(bind: SocketAddr, router: HttpHandler) -> Self {
let (shutdown, shutdown_recv) = oneshot::channel();
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
let server = Server::bind(&bind)
.http1_preserve_header_case(true)
.http1_title_case_headers(true)
.serve(make_service_fn(move |_| {
let router = router.clone();
ready(Ok::<_, Infallible>(service_fn(move |req| router(req))))
}))
.with_graceful_shutdown(shutdown_recv.map(|_| ()));
if let Err(e) = server.await {
tracing::error!("Spawning hyper server error: {}", e);
}
}));
Self { shutdown, thread }
}
pub async fn shutdown(self) {
self.shutdown.send(()).unwrap_or_default();
self.thread.await.unwrap()
}
pub async fn main(bind: SocketAddr, ctx: RpcContext) -> Result<Self, Error> {
Ok(Self::new(bind, main_ui_server_router(ctx).await?))
}
pub async fn setup(bind: SocketAddr, ctx: SetupContext) -> Result<Self, Error> {
Ok(Self::new(bind, setup_ui_file_router(ctx).await?))
}
pub async fn diagnostic(bind: SocketAddr, ctx: DiagnosticContext) -> Result<Self, Error> {
Ok(Self::new(bind, diag_ui_file_router(ctx).await?))
}
pub async fn install(bind: SocketAddr, ctx: InstallContext) -> Result<Self, Error> {
Ok(Self::new(bind, install_ui_file_router(ctx).await?))
}
}

View File

@@ -254,7 +254,7 @@ impl NotificationManager {
.unread_notification_count() .unread_notification_count()
.get_mut(db) .get_mut(db)
.await?; .await?;
let sql_package_id = package_id.map::<String, _>(|p| p.into()); let sql_package_id = package_id.as_ref().map(|p| &**p);
let sql_code = T::CODE; let sql_code = T::CODE;
let sql_level = format!("{}", level); let sql_level = format!("{}", level);
let sql_data = let sql_data =

View File

@@ -12,6 +12,7 @@ use color_eyre::Report;
use futures::future::Either as EitherFuture; use futures::future::Either as EitherFuture;
use futures::TryStreamExt; use futures::TryStreamExt;
use helpers::{NonDetachingJoinHandle, UnixRpcClient}; use helpers::{NonDetachingJoinHandle, UnixRpcClient};
use models::{Id, ImageId};
use nix::sys::signal; use nix::sys::signal;
use nix::unistd::Pid; use nix::unistd::Pid;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
@@ -25,7 +26,6 @@ use tracing::instrument;
use super::ProcedureName; use super::ProcedureName;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::id::{Id, ImageId};
use crate::s9pk::manifest::{PackageId, SYSTEM_PACKAGE_ID}; use crate::s9pk::manifest::{PackageId, SYSTEM_PACKAGE_ID};
use crate::util::serde::{Duration as SerdeDuration, IoFormat}; use crate::util::serde::{Duration as SerdeDuration, IoFormat};
use crate::util::Version; use crate::util::Version;
@@ -668,7 +668,7 @@ impl DockerProcedure {
} }
} }
pub fn uncontainer_name(name: &str) -> Option<(PackageId<&str>, Option<&str>)> { pub fn uncontainer_name(name: &str) -> Option<(PackageId, Option<&str>)> {
let (pre_tld, _) = name.split_once('.')?; let (pre_tld, _) = name.split_once('.')?;
if pre_tld.contains('_') { if pre_tld.contains('_') {
let (pkg, name) = name.split_once('_')?; let (pkg, name) = name.split_once('_')?;
@@ -716,7 +716,7 @@ impl DockerProcedure {
res.push(OsStr::new("--entrypoint").into()); res.push(OsStr::new("--entrypoint").into());
res.push(OsStr::new(&self.entrypoint).into()); res.push(OsStr::new(&self.entrypoint).into());
if self.system { if self.system {
res.push(OsString::from(self.image.for_package(SYSTEM_PACKAGE_ID, None)).into()); res.push(OsString::from(self.image.for_package(&*SYSTEM_PACKAGE_ID, None)).into());
} else { } else {
res.push(OsString::from(self.image.for_package(pkg_id, Some(pkg_version))).into()); res.push(OsString::from(self.image.for_package(pkg_id, Some(pkg_version))).into());
} }
@@ -804,7 +804,7 @@ impl LongRunning {
.arg("'{{.Architecture}}'"); .arg("'{{.Architecture}}'");
if docker.system { if docker.system {
cmd.arg(docker.image.for_package(SYSTEM_PACKAGE_ID, None)); cmd.arg(docker.image.for_package(&*SYSTEM_PACKAGE_ID, None));
} else { } else {
cmd.arg(docker.image.for_package(pkg_id, Some(pkg_version))); cmd.arg(docker.image.for_package(pkg_id, Some(pkg_version)));
} }
@@ -856,7 +856,7 @@ impl LongRunning {
} }
cmd.arg("--log-driver=journald"); cmd.arg("--log-driver=journald");
if docker.system { if docker.system {
cmd.arg(docker.image.for_package(SYSTEM_PACKAGE_ID, None)); cmd.arg(docker.image.for_package(&*SYSTEM_PACKAGE_ID, None));
} else { } else {
cmd.arg(docker.image.for_package(pkg_id, Some(pkg_version))); cmd.arg(docker.image.for_package(pkg_id, Some(pkg_version)));
} }

View File

@@ -2,6 +2,7 @@ use std::collections::BTreeSet;
use std::time::Duration; use std::time::Duration;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use models::ImageId;
use patch_db::HasModel; use patch_db::HasModel;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -9,7 +10,6 @@ use tracing::instrument;
use self::docker::{DockerContainers, DockerProcedure}; use self::docker::{DockerContainers, DockerProcedure};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::id::ImageId;
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::Version; use crate::util::Version;
use crate::volume::Volumes; use crate::volume::Volumes;

View File

@@ -10,6 +10,7 @@ use color_eyre::eyre::eyre;
use digest_old::Output; use digest_old::Output;
use ed25519_dalek::PublicKey; use ed25519_dalek::PublicKey;
use futures::TryStreamExt; use futures::TryStreamExt;
use models::ImageId;
use sha2_old::{Digest, Sha512}; use sha2_old::{Digest, Sha512};
use tokio::fs::File; use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf};
@@ -18,7 +19,6 @@ use tracing::instrument;
use super::header::{FileSection, Header, TableOfContents}; use super::header::{FileSection, Header, TableOfContents};
use super::manifest::{Manifest, PackageId}; use super::manifest::{Manifest, PackageId};
use super::SIG_CONTEXT; use super::SIG_CONTEXT;
use crate::id::ImageId;
use crate::install::progress::InstallProgressTracker; use crate::install::progress::InstallProgressTracker;
use crate::s9pk::docker::DockerReader; use crate::s9pk::docker::DockerReader;
use crate::util::Version; use crate::util::Version;

View File

@@ -7,17 +7,16 @@ use helpers::{Rsync, RsyncOptions};
use josekit::jwk::Jwk; use josekit::jwk::Jwk;
use openssl::x509::X509; use openssl::x509::X509;
use patch_db::DbHandle; use patch_db::DbHandle;
use rand::random;
use rpc_toolkit::command; use rpc_toolkit::command;
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sqlx::{Connection, Executor, Postgres}; use sqlx::Connection;
use ssh_key::private::Ed25519PrivateKey;
use tokio::fs::File; use tokio::fs::File;
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
use torut::onion::{OnionAddressV3, TorSecretKeyV3}; use torut::onion::OnionAddressV3;
use tracing::instrument; use tracing::instrument;
use crate::account::AccountInfo;
use crate::backup::restore::recover_full_embassy; use crate::backup::restore::recover_full_embassy;
use crate::backup::target::BackupTargetFS; use crate::backup::target::BackupTargetFS;
use crate::context::rpc::RpcContextConfig; use crate::context::rpc::RpcContextConfig;
@@ -30,25 +29,11 @@ use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard; use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::util::{pvscan, recovery_info, DiskInfo, EmbassyOsRecoveryInfo}; use crate::disk::util::{pvscan, recovery_info, DiskInfo, EmbassyOsRecoveryInfo};
use crate::disk::REPAIR_DISK_PATH; use crate::disk::REPAIR_DISK_PATH;
use crate::hostname::{get_hostname, HostNameReceipt, Hostname}; use crate::hostname::Hostname;
use crate::init::{init, InitResult}; use crate::init::{init, InitResult};
use crate::middleware::encrypt::EncryptedWire; use crate::middleware::encrypt::EncryptedWire;
use crate::net::ssl::SslManager;
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip(secrets))]
pub async fn password_hash<Ex>(secrets: &mut Ex) -> Result<String, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let password = sqlx::query!("SELECT password FROM account")
.fetch_one(secrets)
.await?
.password;
Ok(password)
}
#[command(subcommands(status, disk, attach, execute, cifs, complete, get_pubkey, exit))] #[command(subcommands(status, disk, attach, execute, cifs, complete, get_pubkey, exit))]
pub fn setup() -> Result<(), Error> { pub fn setup() -> Result<(), Error> {
Ok(()) Ok(())
@@ -75,30 +60,26 @@ async fn setup_init(
let mut secrets_tx = secrets_handle.begin().await?; let mut secrets_tx = secrets_handle.begin().await?;
let mut db_tx = db_handle.begin().await?; let mut db_tx = db_handle.begin().await?;
let mut account = AccountInfo::load(&mut secrets_tx).await?;
if let Some(password) = password { if let Some(password) = password {
let set_password_receipt = crate::auth::SetPasswordReceipt::new(&mut db_tx).await?; account.set_password(&password)?;
crate::auth::set_password( account.save(&mut secrets_tx).await?;
&mut db_tx, crate::db::DatabaseModel::new()
&set_password_receipt, .server_info()
&mut secrets_tx, .password_hash()
&password, .put(&mut db_tx, &account.password)
)
.await?; .await?;
} }
let tor_key = crate::net::tor::os_key(&mut secrets_tx).await?;
db_tx.commit().await?; db_tx.commit().await?;
secrets_tx.commit().await?; secrets_tx.commit().await?;
let hostname_receipts = HostNameReceipt::new(&mut db_handle).await?; Ok((
let hostname = get_hostname(&mut db_handle, &hostname_receipts).await?; account.hostname,
account.key.tor_address(),
let (_, root_ca) = SslManager::init(secret_store, &mut db_handle) account.root_ca_cert,
.await? ))
.export_root_ca()
.await?;
Ok((hostname, tor_key.public().get_onion_address(), root_ca))
} }
#[command(rpc_only)] #[command(rpc_only)]
@@ -385,38 +366,18 @@ async fn fresh_setup(
ctx: &SetupContext, ctx: &SetupContext,
embassy_password: &str, embassy_password: &str,
) -> Result<(Hostname, OnionAddressV3, X509), Error> { ) -> Result<(Hostname, OnionAddressV3, X509), Error> {
let password = argon2::hash_encoded( let account = AccountInfo::new(embassy_password)?;
embassy_password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
let tor_key = TorSecretKeyV3::generate();
let tor_key_bytes = tor_key.as_bytes().to_vec();
let ssh_key = Ed25519PrivateKey::from_bytes(&random());
let ssh_key_bytes = ssh_key.to_bytes().to_vec();
let sqlite_pool = ctx.secret_store().await?; let sqlite_pool = ctx.secret_store().await?;
sqlx::query!( account.save(&sqlite_pool).await?;
"INSERT INTO account (id, password, tor_key, ssh_key) VALUES ($1, $2, $3, $4) ON CONFLICT (id) DO UPDATE SET password = $2, tor_key = $3, ssh_key = $4",
0,
password,
tor_key_bytes,
ssh_key_bytes,
)
.execute(&mut sqlite_pool.acquire().await?)
.await?;
sqlite_pool.close().await; sqlite_pool.close().await;
let InitResult { secret_store, db } = let InitResult { secret_store, .. } =
init(&RpcContextConfig::load(ctx.config_path.clone()).await?).await?; init(&RpcContextConfig::load(ctx.config_path.clone()).await?).await?;
let mut handle = db.handle();
let receipts = crate::hostname::HostNameReceipt::new(&mut handle).await?;
let hostname = get_hostname(&mut handle, &receipts).await?;
let (_, root_ca) = SslManager::init(secret_store.clone(), &mut handle)
.await?
.export_root_ca()
.await?;
secret_store.close().await; secret_store.close().await;
Ok((hostname, tor_key.public().get_onion_address(), root_ca)) Ok((
account.hostname.clone(),
account.key.tor_address(),
account.root_ca_cert.clone(),
))
} }
#[instrument(skip(ctx, embassy_password, recovery_password))] #[instrument(skip(ctx, embassy_password, recovery_password))]

View File

@@ -15,25 +15,6 @@ use crate::{Error, ErrorKind};
static SSH_AUTHORIZED_KEYS_FILE: &str = "/home/start9/.ssh/authorized_keys"; static SSH_AUTHORIZED_KEYS_FILE: &str = "/home/start9/.ssh/authorized_keys";
#[instrument(skip(secrets))]
pub async fn os_key<Ex>(secrets: &mut Ex) -> Result<Ed25519PrivateKey, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let key = sqlx::query!("SELECT ssh_key FROM account")
.fetch_one(secrets)
.await?
.ssh_key;
let mut buf = [0; 32];
buf.clone_from_slice(
key.get(0..64).ok_or_else(|| {
Error::new(eyre!("Invalid Ssh Key Length"), crate::ErrorKind::Database)
})?,
);
Ok(Ed25519PrivateKey::from_bytes(&buf))
}
#[derive(Debug, serde::Deserialize, serde::Serialize)] #[derive(Debug, serde::Deserialize, serde::Serialize)]
pub struct PubKey( pub struct PubKey(
#[serde(serialize_with = "crate::util::serde::serialize_display")] #[serde(serialize_with = "crate::util::serde::serialize_display")]

View File

@@ -2,11 +2,11 @@ use std::collections::{BTreeMap, BTreeSet};
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
pub use models::HealthCheckId; pub use models::HealthCheckId;
use models::ImageId;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainers; use crate::procedure::docker::DockerContainers;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName}; use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;

View File

@@ -1,4 +1,5 @@
use std::future::Future; use std::future::Future;
use std::io::Cursor;
use std::path::Path; use std::path::Path;
use std::task::Poll; use std::task::Poll;
@@ -295,3 +296,111 @@ impl AsyncRead for BufferedWriteReader {
} }
} }
} }
pub trait CursorExt {
fn pure_read(&mut self, buf: &mut ReadBuf<'_>);
}
impl<T: AsRef<[u8]>> CursorExt for Cursor<T> {
fn pure_read(&mut self, buf: &mut ReadBuf<'_>) {
let end = self.position() as usize
+ std::cmp::max(
buf.remaining(),
self.get_ref().as_ref().len() - self.position() as usize,
);
buf.put_slice(&self.get_ref().as_ref()[self.position() as usize..end]);
self.set_position(end as u64);
}
}
#[pin_project::pin_project]
pub struct BackTrackingReader<T> {
#[pin]
reader: T,
buffer: Cursor<Vec<u8>>,
buffering: bool,
}
impl<T> BackTrackingReader<T> {
pub fn new(reader: T) -> Self {
Self {
reader,
buffer: Cursor::new(Vec::new()),
buffering: false,
}
}
pub fn start_buffering(&mut self) {
self.buffer.set_position(0);
self.buffer.get_mut().truncate(0);
self.buffering = true;
}
pub fn stop_buffering(&mut self) {
self.buffer.set_position(0);
self.buffer.get_mut().truncate(0);
self.buffering = false;
}
pub fn rewind(&mut self) {
self.buffering = false;
}
pub fn unwrap(self) -> T {
self.reader
}
}
impl<T: AsyncRead> AsyncRead for BackTrackingReader<T> {
fn poll_read(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let this = self.project();
if *this.buffering {
let filled = buf.filled().len();
let res = this.reader.poll_read(cx, buf);
this.buffer
.get_mut()
.extend_from_slice(&buf.filled()[filled..]);
res
} else {
if (this.buffer.position() as usize) < this.buffer.get_ref().len() {
this.buffer.pure_read(buf);
}
if buf.remaining() > 0 {
this.reader.poll_read(cx, buf)
} else {
Poll::Ready(Ok(()))
}
}
}
}
impl<T: AsyncWrite> AsyncWrite for BackTrackingReader<T> {
fn is_write_vectored(&self) -> bool {
self.reader.is_write_vectored()
}
fn poll_flush(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
self.project().reader.poll_flush(cx)
}
fn poll_shutdown(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
self.project().reader.poll_shutdown(cx)
}
fn poll_write(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
self.project().reader.poll_write(cx, buf)
}
fn poll_write_vectored(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
self.project().reader.poll_write_vectored(cx, bufs)
}
}

View File

@@ -330,3 +330,7 @@ impl FileLock {
Ok(()) Ok(())
} }
} }
pub fn assure_send<T: Send>(x: T) -> T {
x
}

View File

@@ -739,3 +739,57 @@ impl<'de, K: Deserialize<'de>, V: Deserialize<'de>> Deserialize<'de> for KeyVal<
deserializer.deserialize_map(Visitor(PhantomData)) deserializer.deserialize_map(Visitor(PhantomData))
} }
} }
pub struct Base32<T>(pub T);
impl<'de, T: TryFrom<Vec<u8>>> Deserialize<'de> for Base32<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
base32::decode(base32::Alphabet::RFC4648 { padding: true }, &s)
.ok_or_else(|| {
serde::de::Error::invalid_value(
serde::de::Unexpected::Str(&s),
&"a valid base32 string",
)
})?
.try_into()
.map_err(|_| serde::de::Error::custom("invalid length"))
.map(Self)
}
}
impl<T: AsRef<[u8]>> Serialize for Base32<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&base32::encode(
base32::Alphabet::RFC4648 { padding: true },
self.0.as_ref(),
))
}
}
pub struct Base64<T>(pub T);
impl<'de, T: TryFrom<Vec<u8>>> Deserialize<'de> for Base64<T> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let s = String::deserialize(deserializer)?;
base64::decode(&s)
.map_err(serde::de::Error::custom)?
.try_into()
.map_err(|_| serde::de::Error::custom("invalid length"))
.map(Self)
}
}
impl<T: AsRef<[u8]>> Serialize for Base64<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_str(&base64::encode(self.0.as_ref()))
}
}

View File

@@ -4,6 +4,7 @@ use async_trait::async_trait;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use patch_db::DbHandle; use patch_db::DbHandle;
use rpc_toolkit::command; use rpc_toolkit::command;
use sqlx::PgPool;
use crate::init::InitReceipts; use crate::init::InitReceipts;
use crate::Error; use crate::Error;
@@ -76,8 +77,8 @@ where
fn new() -> Self; fn new() -> Self;
fn semver(&self) -> emver::Version; fn semver(&self) -> emver::Version;
fn compat(&self) -> &'static emver::VersionRange; fn compat(&self) -> &'static emver::VersionRange;
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>; async fn up<Db: DbHandle>(&self, db: &mut Db, secrets: &PgPool) -> Result<(), Error>;
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>; async fn down<Db: DbHandle>(&self, db: &mut Db, secrets: &PgPool) -> Result<(), Error>;
async fn commit<Db: DbHandle>( async fn commit<Db: DbHandle>(
&self, &self,
db: &mut Db, db: &mut Db,
@@ -98,11 +99,19 @@ where
&self, &self,
version: &V, version: &V,
db: &mut Db, db: &mut Db,
secrets: &PgPool,
receipts: &InitReceipts, receipts: &InitReceipts,
) -> Result<(), Error> { ) -> Result<(), Error> {
match self.semver().cmp(&version.semver()) { match self.semver().cmp(&version.semver()) {
Ordering::Greater => self.rollback_to_unchecked(version, db, receipts).await, Ordering::Greater => {
Ordering::Less => version.migrate_from_unchecked(self, db, receipts).await, self.rollback_to_unchecked(version, db, secrets, receipts)
.await
}
Ordering::Less => {
version
.migrate_from_unchecked(self, db, secrets, receipts)
.await
}
Ordering::Equal => Ok(()), Ordering::Equal => Ok(()),
} }
} }
@@ -110,12 +119,13 @@ where
&self, &self,
version: &V, version: &V,
db: &mut Db, db: &mut Db,
secrets: &PgPool,
receipts: &InitReceipts, receipts: &InitReceipts,
) -> Result<(), Error> { ) -> Result<(), Error> {
let previous = Self::Previous::new(); let previous = Self::Previous::new();
if version.semver() < previous.semver() { if version.semver() < previous.semver() {
previous previous
.migrate_from_unchecked(version, db, receipts) .migrate_from_unchecked(version, db, secrets, receipts)
.await?; .await?;
} else if version.semver() > previous.semver() { } else if version.semver() > previous.semver() {
return Err(Error::new( return Err(Error::new(
@@ -127,7 +137,7 @@ where
)); ));
} }
tracing::info!("{} -> {}", previous.semver(), self.semver(),); tracing::info!("{} -> {}", previous.semver(), self.semver(),);
self.up(db).await?; self.up(db, secrets).await?;
self.commit(db, receipts).await?; self.commit(db, receipts).await?;
Ok(()) Ok(())
} }
@@ -135,15 +145,16 @@ where
&self, &self,
version: &V, version: &V,
db: &mut Db, db: &mut Db,
secrets: &PgPool,
receipts: &InitReceipts, receipts: &InitReceipts,
) -> Result<(), Error> { ) -> Result<(), Error> {
let previous = Self::Previous::new(); let previous = Self::Previous::new();
tracing::info!("{} -> {}", self.semver(), previous.semver(),); tracing::info!("{} -> {}", self.semver(), previous.semver(),);
self.down(db).await?; self.down(db, secrets).await?;
previous.commit(db, receipts).await?; previous.commit(db, receipts).await?;
if version.semver() < previous.semver() { if version.semver() < previous.semver() {
previous previous
.rollback_to_unchecked(version, db, receipts) .rollback_to_unchecked(version, db, secrets, receipts)
.await?; .await?;
} else if version.semver() > previous.semver() { } else if version.semver() > previous.semver() {
return Err(Error::new( return Err(Error::new(
@@ -184,21 +195,55 @@ where
pub async fn init<Db: DbHandle>( pub async fn init<Db: DbHandle>(
db: &mut Db, db: &mut Db,
secrets: &PgPool,
receipts: &crate::init::InitReceipts, receipts: &crate::init::InitReceipts,
) -> Result<(), Error> { ) -> Result<(), Error> {
let version = Version::from_util_version(receipts.server_version.get(db).await?); let version = Version::from_util_version(receipts.server_version.get(db).await?);
match version { match version {
Version::V0_3_0(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, Version::V0_3_0(v) => {
Version::V0_3_0_1(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, v.0.migrate_to(&Current::new(), db, secrets, receipts)
Version::V0_3_0_2(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, .await?
Version::V0_3_0_3(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, }
Version::V0_3_1(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, Version::V0_3_0_1(v) => {
Version::V0_3_1_1(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, v.0.migrate_to(&Current::new(), db, secrets, receipts)
Version::V0_3_1_2(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, .await?
Version::V0_3_2(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, }
Version::V0_3_2_1(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, Version::V0_3_0_2(v) => {
Version::V0_3_3(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, v.0.migrate_to(&Current::new(), db, secrets, receipts)
Version::V0_3_4(v) => v.0.migrate_to(&Current::new(), db, receipts).await?, .await?
}
Version::V0_3_0_3(v) => {
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::V0_3_1(v) => {
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::V0_3_1_1(v) => {
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::V0_3_1_2(v) => {
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::V0_3_2(v) => {
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::V0_3_2_1(v) => {
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::V0_3_3(v) => {
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::V0_3_4(v) => {
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::Other(_) => { Version::Other(_) => {
return Err(Error::new( return Err(Error::new(
eyre!("Cannot downgrade"), eyre!("Cannot downgrade"),

View File

@@ -28,10 +28,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT &*V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }

View File

@@ -18,10 +18,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*v0_3_0::V0_3_0_COMPAT &*v0_3_0::V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }

View File

@@ -18,10 +18,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*v0_3_0::V0_3_0_COMPAT &*v0_3_0::V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }

View File

@@ -18,10 +18,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*v0_3_0::V0_3_0_COMPAT &*v0_3_0::V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }

View File

@@ -19,10 +19,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT &*V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }

View File

@@ -19,10 +19,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT &*V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }

View File

@@ -19,10 +19,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT &*V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }

View File

@@ -2,8 +2,6 @@ use emver::VersionRange;
use super::v0_3_0::V0_3_0_COMPAT; use super::v0_3_0::V0_3_0_COMPAT;
use super::*; use super::*;
use crate::config::util::MergeWith;
use crate::hostname::{generate_id, sync_hostname};
const V0_3_2: emver::Version = emver::Version::new(0, 3, 2, 0); const V0_3_2: emver::Version = emver::Version::new(0, 3, 2, 0);
@@ -42,19 +40,117 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT &*V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
let receipts = crate::hostname::HostNameReceipt::new(db).await?; let hostname = legacy::hostname::get_hostname(db).await?;
crate::hostname::ensure_hostname_is_set(db, &receipts).await?; crate::db::DatabaseModel::new()
receipts.id.set(db, generate_id()).await?; .server_info()
.hostname()
.put(db, &Some(hostname.0))
.await?;
crate::db::DatabaseModel::new()
.server_info()
.id()
.put(db, &legacy::hostname::generate_id())
.await?;
let mut ui = crate::db::DatabaseModel::new().ui().get(db).await?.clone(); legacy::hostname::sync_hostname(db).await?;
ui.merge_with(&DEFAULT_UI);
crate::db::DatabaseModel::new().ui().put(db, &ui).await?;
sync_hostname(db, &receipts).await?;
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }
mod legacy {
pub mod hostname {
use patch_db::DbHandle;
use rand::{thread_rng, Rng};
use tokio::process::Command;
use tracing::instrument;
use crate::util::Invoke;
use crate::{Error, ErrorKind};
#[derive(Clone, serde::Deserialize, serde::Serialize, Debug)]
pub struct Hostname(pub String);
lazy_static::lazy_static! {
static ref ADJECTIVES: Vec<String> = include_str!("../assets/adjectives.txt").lines().map(|x| x.to_string()).collect();
static ref NOUNS: Vec<String> = include_str!("../assets/nouns.txt").lines().map(|x| x.to_string()).collect();
}
impl AsRef<str> for Hostname {
fn as_ref(&self) -> &str {
&self.0
}
}
pub fn generate_hostname() -> Hostname {
let mut rng = thread_rng();
let adjective = &ADJECTIVES[rng.gen_range(0..ADJECTIVES.len())];
let noun = &NOUNS[rng.gen_range(0..NOUNS.len())];
Hostname(format!("embassy-{adjective}-{noun}"))
}
pub fn generate_id() -> String {
let id = uuid::Uuid::new_v4();
id.to_string()
}
#[instrument]
pub async fn get_current_hostname() -> Result<Hostname, Error> {
let out = Command::new("hostname")
.invoke(ErrorKind::ParseSysInfo)
.await?;
let out_string = String::from_utf8(out)?;
Ok(Hostname(out_string.trim().to_owned()))
}
#[instrument]
pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> {
let hostname: &String = &hostname.0;
let _out = Command::new("hostnamectl")
.arg("set-hostname")
.arg(hostname)
.invoke(ErrorKind::ParseSysInfo)
.await?;
Ok(())
}
#[instrument(skip(handle))]
pub async fn get_id<Db: DbHandle>(handle: &mut Db) -> Result<String, Error> {
let id = crate::db::DatabaseModel::new()
.server_info()
.id()
.get(handle)
.await?;
Ok(id.to_string())
}
pub async fn get_hostname<Db: DbHandle>(handle: &mut Db) -> Result<Hostname, Error> {
if let Ok(hostname) = crate::db::DatabaseModel::new()
.server_info()
.hostname()
.get(handle)
.await
{
if let Some(hostname) = hostname.to_owned() {
return Ok(Hostname(hostname));
}
}
let id = get_id(handle).await?;
if id.len() != 8 {
return Ok(generate_hostname());
}
return Ok(Hostname(format!("embassy-{}", id)));
}
#[instrument(skip(handle))]
pub async fn sync_hostname<Db: DbHandle>(handle: &mut Db) -> Result<(), Error> {
set_hostname(&get_hostname(handle).await?).await?;
Command::new("systemctl")
.arg("restart")
.arg("avahi-daemon")
.invoke(crate::ErrorKind::Network)
.await?;
Ok(())
}
}
}

View File

@@ -17,10 +17,10 @@ impl VersionT for Version {
fn compat(&self) -> &'static emver::VersionRange { fn compat(&self) -> &'static emver::VersionRange {
&*V0_3_0_COMPAT &*V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(()) Ok(())
} }
} }

View File

@@ -24,7 +24,7 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT &*V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
let mut ui = crate::db::DatabaseModel::new().ui().get_mut(db).await?; let mut ui = crate::db::DatabaseModel::new().ui().get_mut(db).await?;
if let Some(Value::String(selected_url)) = if let Some(Value::String(selected_url)) =
@@ -65,7 +65,7 @@ impl VersionT for Version {
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
let mut ui = crate::db::DatabaseModel::new().ui().get_mut(db).await?; let mut ui = crate::db::DatabaseModel::new().ui().get_mut(db).await?;
let selected_url = ui["marketplace"]["selected-url"] let selected_url = ui["marketplace"]["selected-url"]
.as_str() .as_str()

View File

@@ -1,6 +1,12 @@
use async_trait::async_trait; use async_trait::async_trait;
use emver::VersionRange; use emver::VersionRange;
use itertools::Itertools;
use openssl::hash::MessageDigest;
use serde_json::{json, Value}; use serde_json::{json, Value};
use ssh_key::public::Ed25519PublicKey;
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, sync_hostname, Hostname};
use super::v0_3_0::V0_3_0_COMPAT; use super::v0_3_0::V0_3_0_COMPAT;
use super::*; use super::*;
@@ -37,7 +43,44 @@ impl VersionT for Version {
fn compat(&self) -> &'static VersionRange { fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT &*V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, db: &mut Db, secrets: &PgPool) -> Result<(), Error> {
let mut account = AccountInfo::load(secrets).await?;
crate::db::DatabaseModel::new()
.server_info()
.pubkey()
.put(
db,
&ssh_key::PublicKey::from(Ed25519PublicKey::from(&account.key.ssh_key()))
.to_openssh()?,
)
.await?;
crate::db::DatabaseModel::new()
.server_info()
.ca_fingerprint()
.put(
db,
&account
.root_ca_cert
.digest(MessageDigest::sha256())
.unwrap()
.iter()
.map(|x| format!("{x:X}"))
.join(":"),
)
.await?;
let server_info = crate::db::DatabaseModel::new()
.server_info()
.get(db)
.await?
.into_owned();
account.hostname = server_info
.hostname
.map(Hostname)
.unwrap_or_else(generate_hostname);
account.server_id = server_info.id;
account.save(secrets).await?;
sync_hostname(&account).await?;
let parsed_url = Some(COMMUNITY_URL.parse().unwrap()); let parsed_url = Some(COMMUNITY_URL.parse().unwrap());
let mut ui = crate::db::DatabaseModel::new().ui().get_mut(db).await?; let mut ui = crate::db::DatabaseModel::new().ui().get_mut(db).await?;
ui["marketplace"]["known-hosts"][COMMUNITY_URL] = json!({}); ui["marketplace"]["known-hosts"][COMMUNITY_URL] = json!({});
@@ -66,7 +109,7 @@ impl VersionT for Version {
ui.save(db).await?; ui.save(db).await?;
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
let mut ui = crate::db::DatabaseModel::new().ui().get_mut(db).await?; let mut ui = crate::db::DatabaseModel::new().ui().get_mut(db).await?;
let parsed_url = Some(MAIN_REGISTRY.parse().unwrap()); let parsed_url = Some(MAIN_REGISTRY.parse().unwrap());
for package_id in crate::db::DatabaseModel::new() for package_id in crate::db::DatabaseModel::new()

View File

@@ -10,7 +10,7 @@ use tracing::instrument;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::net::interface::{InterfaceId, Interfaces}; use crate::net::interface::{InterfaceId, Interfaces};
use crate::net::net_controller::NetController; use crate::net::PACKAGE_CERT_PATH;
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::Version; use crate::util::Version;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
@@ -113,6 +113,10 @@ pub fn backup_dir(pkg_id: &PackageId) -> PathBuf {
Path::new(BACKUP_DIR).join(pkg_id).join("data") Path::new(BACKUP_DIR).join(pkg_id).join("data")
} }
pub fn cert_dir(pkg_id: &PackageId, interface_id: &InterfaceId) -> PathBuf {
Path::new(PACKAGE_CERT_PATH).join(pkg_id).join(interface_id)
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)] #[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[serde(tag = "type")] #[serde(tag = "type")]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
@@ -185,7 +189,7 @@ impl Volume {
} else { } else {
path.as_ref() path.as_ref()
}), }),
Volume::Certificate { interface_id: _ } => NetController::ssl_directory_for(pkg_id), Volume::Certificate { interface_id } => cert_dir(pkg_id, &interface_id),
Volume::Backup { .. } => backup_dir(pkg_id), Volume::Backup { .. } => backup_dir(pkg_id),
} }
} }

View File

@@ -1,6 +1,6 @@
{ {
"name": null, "name": null,
"ack-welcome": "0.3.3.1", "ack-welcome": "0.3.4",
"marketplace": { "marketplace": {
"selected-url": "https://registry.start9.com/", "selected-url": "https://registry.start9.com/",
"known-hosts": { "known-hosts": {

18
libs/Cargo.lock generated
View File

@@ -1560,6 +1560,20 @@ dependencies = [
"cfg-if", "cfg-if",
] ]
[[package]]
name = "internment"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a798d7677f07d6f1e77be484ea8626ddb1566194de399f1206306820c406371"
dependencies = [
"ahash",
"dashmap",
"hashbrown",
"once_cell",
"parking_lot 0.12.1",
"serde",
]
[[package]] [[package]]
name = "io-lifetimes" name = "io-lifetimes"
version = "1.0.4" version = "1.0.4"
@@ -1923,10 +1937,14 @@ dependencies = [
"color-eyre", "color-eyre",
"ed25519-dalek", "ed25519-dalek",
"emver", "emver",
"internment",
"ipnet",
"lazy_static",
"mbrman", "mbrman",
"openssl", "openssl",
"patch-db", "patch-db",
"rand 0.8.5", "rand 0.8.5",
"regex",
"rpc-toolkit", "rpc-toolkit",
"serde", "serde",
"serde_json", "serde_json",

View File

@@ -9,15 +9,19 @@ edition = "2021"
bollard = "0.13.0" bollard = "0.13.0"
color-eyre = "0.6.1" color-eyre = "0.6.1"
ed25519-dalek = { version = "1.0.1", features = ["serde"] } ed25519-dalek = { version = "1.0.1", features = ["serde"] }
lazy_static = "1.4"
mbrman = "0.5.0" mbrman = "0.5.0"
emver = { version = "0.1", git = "https://github.com/Start9Labs/emver-rs.git", features = [ emver = { version = "0.1", git = "https://github.com/Start9Labs/emver-rs.git", features = [
"serde", "serde",
] } ] }
internment = { version = "0.7.0", features = ["arc", "serde"] }
ipnet = "2.7.1"
openssl = { version = "0.10.41", features = ["vendored"] } openssl = { version = "0.10.41", features = ["vendored"] }
patch-db = { version = "*", path = "../../patch-db/patch-db", features = [ patch-db = { version = "*", path = "../../patch-db/patch-db", features = [
"trace", "trace",
] } ] }
rand = "0.8" rand = "0.8"
regex = "1.7.1"
rpc-toolkit = "0.2.1" rpc-toolkit = "0.2.1"
serde = { version = "1.0", features = ["derive", "rc"] } serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0.82" serde_json = "1.0.82"

View File

@@ -6,43 +6,34 @@ use serde::{Deserialize, Serialize};
use crate::{Id, InvalidId}; use crate::{Id, InvalidId};
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
pub struct ActionId<S: AsRef<str> = String>(Id<S>); pub struct ActionId(Id);
impl FromStr for ActionId { impl FromStr for ActionId {
type Err = InvalidId; type Err = InvalidId;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(ActionId(Id::try_from(s.to_owned())?)) Ok(ActionId(Id::try_from(s.to_owned())?))
} }
} }
impl From<ActionId> for String { impl AsRef<ActionId> for ActionId {
fn from(value: ActionId) -> Self { fn as_ref(&self) -> &ActionId {
value.0.into()
}
}
impl<S: AsRef<str>> AsRef<ActionId<S>> for ActionId<S> {
fn as_ref(&self) -> &ActionId<S> {
self self
} }
} }
impl<S: AsRef<str>> std::fmt::Display for ActionId<S> { impl std::fmt::Display for ActionId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0) write!(f, "{}", &self.0)
} }
} }
impl<S: AsRef<str>> AsRef<str> for ActionId<S> { impl AsRef<str> for ActionId {
fn as_ref(&self) -> &str { fn as_ref(&self) -> &str {
self.0.as_ref() self.0.as_ref()
} }
} }
impl<S: AsRef<str>> AsRef<Path> for ActionId<S> { impl AsRef<Path> for ActionId {
fn as_ref(&self) -> &Path { fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref() self.0.as_ref().as_ref()
} }
} }
impl<'de, S> Deserialize<'de> for ActionId<S> impl<'de> Deserialize<'de> for ActionId {
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: serde::de::Deserializer<'de>, D: serde::de::Deserializer<'de>,

View File

@@ -246,6 +246,11 @@ impl From<std::net::AddrParseError> for Error {
Error::new(e, ErrorKind::ParseNetAddress) Error::new(e, ErrorKind::ParseNetAddress)
} }
} }
impl From<ipnet::AddrParseError> for Error {
fn from(e: ipnet::AddrParseError) -> Self {
Error::new(e, ErrorKind::ParseNetAddress)
}
}
impl From<openssl::error::ErrorStack> for Error { impl From<openssl::error::ErrorStack> for Error {
fn from(e: openssl::error::ErrorStack) -> Self { fn from(e: openssl::error::ErrorStack) -> Self {
Error::new(eyre!("{}", e), ErrorKind::OpenSsl) Error::new(eyre!("{}", e), ErrorKind::OpenSsl)

View File

@@ -4,23 +4,19 @@ use serde::{Deserialize, Deserializer, Serialize};
use crate::Id; use crate::Id;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
pub struct HealthCheckId<S: AsRef<str> = String>(Id<S>); pub struct HealthCheckId(Id);
impl<S: AsRef<str>> std::fmt::Display for HealthCheckId<S> { impl std::fmt::Display for HealthCheckId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0) write!(f, "{}", &self.0)
} }
} }
impl<S: AsRef<str>> AsRef<str> for HealthCheckId<S> { impl AsRef<str> for HealthCheckId {
fn as_ref(&self) -> &str { fn as_ref(&self) -> &str {
self.0.as_ref() self.0.as_ref()
} }
} }
impl<'de, S> Deserialize<'de> for HealthCheckId<S> impl<'de> Deserialize<'de> for HealthCheckId {
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
@@ -28,7 +24,7 @@ where
Ok(HealthCheckId(Deserialize::deserialize(deserializer)?)) Ok(HealthCheckId(Deserialize::deserialize(deserializer)?))
} }
} }
impl<S: AsRef<str>> AsRef<Path> for HealthCheckId<S> { impl AsRef<Path> for HealthCheckId {
fn as_ref(&self) -> &Path { fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref() self.0.as_ref().as_ref()
} }

View File

@@ -1,72 +1,79 @@
use std::borrow::Borrow; use std::borrow::Borrow;
use internment::ArcIntern;
use regex::Regex;
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
use crate::id_unchecked::IdUnchecked;
use crate::invalid_id::InvalidId; use crate::invalid_id::InvalidId;
pub const SYSTEM_ID: Id<&'static str> = Id("x_system"); lazy_static::lazy_static! {
static ref ID_REGEX: Regex = Regex::new("^[a-z]+(-[a-z]+)*$").unwrap();
pub static ref SYSTEM_ID: Id = Id(ArcIntern::from_ref("x_system"));
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Id<S: AsRef<str> = String>(S); pub struct Id(ArcIntern<String>);
impl<S: AsRef<str>> Id<S> { impl TryFrom<ArcIntern<String>> for Id {
pub fn try_from(value: S) -> Result<Self, InvalidId> { type Error = InvalidId;
if value fn try_from(value: ArcIntern<String>) -> Result<Self, Self::Error> {
.as_ref() if ID_REGEX.is_match(&*value) {
.chars()
.all(|c| c.is_ascii_lowercase() || c == '-')
{
Ok(Id(value)) Ok(Id(value))
} else { } else {
Err(InvalidId) Err(InvalidId)
} }
} }
} }
impl<'a> Id<&'a str> { impl TryFrom<String> for Id {
pub fn owned(&self) -> Id { type Error = InvalidId;
Id(self.0.to_owned()) fn try_from(value: String) -> Result<Self, Self::Error> {
if ID_REGEX.is_match(&value) {
Ok(Id(ArcIntern::new(value)))
} else {
Err(InvalidId)
}
} }
} }
impl From<Id> for String { impl TryFrom<&str> for Id {
fn from(value: Id) -> Self { type Error = InvalidId;
value.0 fn try_from(value: &str) -> Result<Self, Self::Error> {
if ID_REGEX.is_match(&value) {
Ok(Id(ArcIntern::from_ref(value)))
} else {
Err(InvalidId)
}
} }
} }
impl<S: AsRef<str>> std::ops::Deref for Id<S> { impl std::ops::Deref for Id {
type Target = S; type Target = String;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&self.0 &*self.0
} }
} }
impl<S: AsRef<str>> std::fmt::Display for Id<S> { impl std::fmt::Display for Id {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.0.as_ref()) write!(f, "{}", &*self.0)
} }
} }
impl<S: AsRef<str>> AsRef<str> for Id<S> { impl AsRef<str> for Id {
fn as_ref(&self) -> &str { fn as_ref(&self) -> &str {
self.0.as_ref() &*self.0
} }
} }
impl<S: AsRef<str>> Borrow<str> for Id<S> { impl Borrow<str> for Id {
fn borrow(&self) -> &str { fn borrow(&self) -> &str {
self.0.as_ref() self.0.as_ref()
} }
} }
impl<'de, S> Deserialize<'de> for Id<S> impl<'de> Deserialize<'de> for Id {
where
S: AsRef<str>,
IdUnchecked<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let unchecked: IdUnchecked<S> = Deserialize::deserialize(deserializer)?; let unchecked: String = Deserialize::deserialize(deserializer)?;
Id::try_from(unchecked.0).map_err(serde::de::Error::custom) Id::try_from(unchecked).map_err(serde::de::Error::custom)
} }
} }
impl<S: AsRef<str>> Serialize for Id<S> { impl Serialize for Id {
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error> fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
where where
Ser: Serializer, Ser: Serializer,

View File

@@ -1,55 +0,0 @@
use std::borrow::Cow;
use serde::{Deserialize, Deserializer};
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub struct IdUnchecked<S: AsRef<str>>(pub S);
impl<'de> Deserialize<'de> for IdUnchecked<Cow<'de, str>> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
struct Visitor;
impl<'de> serde::de::Visitor<'de> for Visitor {
type Value = IdUnchecked<Cow<'de, str>>;
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(formatter, "a valid ID")
}
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(IdUnchecked(Cow::Owned(v.to_owned())))
}
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(IdUnchecked(Cow::Owned(v)))
}
fn visit_borrowed_str<E>(self, v: &'de str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
Ok(IdUnchecked(Cow::Borrowed(v)))
}
}
deserializer.deserialize_any(Visitor)
}
}
impl<'de> Deserialize<'de> for IdUnchecked<String> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(IdUnchecked(String::deserialize(deserializer)?))
}
}
impl<'de> Deserialize<'de> for IdUnchecked<&'de str> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
Ok(IdUnchecked(<&'de str>::deserialize(deserializer)?))
}
}

View File

@@ -1,27 +1,22 @@
use std::fmt::Debug; use std::fmt::Debug;
use std::str::FromStr; use std::str::FromStr;
pub use models::{Id, IdUnchecked, InvalidId, SYSTEM_ID};
use serde::{Deserialize, Deserializer, Serialize}; use serde::{Deserialize, Deserializer, Serialize};
use crate::util::Version; use crate::{Id, InvalidId, PackageId, Version};
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
pub struct ImageId<S: AsRef<str> = String>(Id<S>); pub struct ImageId(Id);
impl<S: AsRef<str>> std::fmt::Display for ImageId<S> { impl std::fmt::Display for ImageId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0) write!(f, "{}", &self.0)
} }
} }
impl<S: AsRef<str>> ImageId<S> { impl ImageId {
pub fn for_package<PkgId: AsRef<crate::s9pk::manifest::PackageId<S0>>, S0: AsRef<str>>( pub fn for_package(&self, pkg_id: &PackageId, pkg_version: Option<&Version>) -> String {
&self,
pkg_id: PkgId,
pkg_version: Option<&Version>,
) -> String {
format!( format!(
"start9/{}/{}:{}", "start9/{}/{}:{}",
pkg_id.as_ref(), pkg_id,
self.0, self.0,
pkg_version.map(|v| { v.as_str() }).unwrap_or("latest") pkg_version.map(|v| { v.as_str() }).unwrap_or("latest")
) )
@@ -33,11 +28,7 @@ impl FromStr for ImageId {
Ok(ImageId(Id::try_from(s.to_owned())?)) Ok(ImageId(Id::try_from(s.to_owned())?))
} }
} }
impl<'de, S> Deserialize<'de> for ImageId<S> impl<'de> Deserialize<'de> for ImageId {
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,

View File

@@ -4,34 +4,30 @@ use serde::{Deserialize, Deserializer, Serialize};
use crate::Id; use crate::Id;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Default)] #[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
pub struct InterfaceId<S: AsRef<str> = String>(Id<S>); pub struct InterfaceId(Id);
impl<S: AsRef<str>> From<Id<S>> for InterfaceId<S> { impl From<Id> for InterfaceId {
fn from(id: Id<S>) -> Self { fn from(id: Id) -> Self {
Self(id) Self(id)
} }
} }
impl<S: AsRef<str>> std::fmt::Display for InterfaceId<S> { impl std::fmt::Display for InterfaceId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0) write!(f, "{}", &self.0)
} }
} }
impl<S: AsRef<str>> std::ops::Deref for InterfaceId<S> { impl std::ops::Deref for InterfaceId {
type Target = S; type Target = String;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&*self.0 &*self.0
} }
} }
impl<S: AsRef<str>> AsRef<str> for InterfaceId<S> { impl AsRef<str> for InterfaceId {
fn as_ref(&self) -> &str { fn as_ref(&self) -> &str {
self.0.as_ref() self.0.as_ref()
} }
} }
impl<'de, S> Deserialize<'de> for InterfaceId<S> impl<'de> Deserialize<'de> for InterfaceId {
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
@@ -39,7 +35,7 @@ where
Ok(InterfaceId(Deserialize::deserialize(deserializer)?)) Ok(InterfaceId(Deserialize::deserialize(deserializer)?))
} }
} }
impl<S: AsRef<str>> AsRef<Path> for InterfaceId<S> { impl AsRef<Path> for InterfaceId {
fn as_ref(&self) -> &Path { fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref() self.0.as_ref().as_ref()
} }

View File

@@ -2,7 +2,7 @@ mod action_id;
mod errors; mod errors;
mod health_check_id; mod health_check_id;
mod id; mod id;
mod id_unchecked; mod image_id;
mod interface_id; mod interface_id;
mod invalid_id; mod invalid_id;
mod package_id; mod package_id;
@@ -14,7 +14,7 @@ pub use action_id::*;
pub use errors::*; pub use errors::*;
pub use health_check_id::*; pub use health_check_id::*;
pub use id::*; pub use id::*;
pub use id_unchecked::*; pub use image_id::*;
pub use interface_id::*; pub use interface_id::*;
pub use invalid_id::*; pub use invalid_id::*;
pub use package_id::*; pub use package_id::*;

View File

@@ -6,66 +6,54 @@ use serde::{Deserialize, Serialize, Serializer};
use crate::{Id, InvalidId, SYSTEM_ID}; use crate::{Id, InvalidId, SYSTEM_ID};
pub const SYSTEM_PACKAGE_ID: PackageId<&'static str> = PackageId(SYSTEM_ID); lazy_static::lazy_static! {
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub static ref SYSTEM_PACKAGE_ID: PackageId = PackageId(SYSTEM_ID.clone());
pub struct PackageId<S: AsRef<str> = String>(Id<S>);
impl<'a> PackageId<&'a str> {
pub fn owned(&self) -> PackageId {
PackageId(self.0.owned())
}
} }
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct PackageId(Id);
impl FromStr for PackageId { impl FromStr for PackageId {
type Err = InvalidId; type Err = InvalidId;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(PackageId(Id::try_from(s.to_owned())?)) Ok(PackageId(Id::try_from(s.to_owned())?))
} }
} }
impl From<PackageId> for String { impl From<Id> for PackageId {
fn from(value: PackageId) -> Self { fn from(id: Id) -> Self {
value.0.into()
}
}
impl<S: AsRef<str>> From<Id<S>> for PackageId<S> {
fn from(id: Id<S>) -> Self {
PackageId(id) PackageId(id)
} }
} }
impl<S: AsRef<str>> std::ops::Deref for PackageId<S> { impl std::ops::Deref for PackageId {
type Target = S; type Target = String;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {
&*self.0 &*self.0
} }
} }
impl<S: AsRef<str>> AsRef<PackageId<S>> for PackageId<S> { impl AsRef<PackageId> for PackageId {
fn as_ref(&self) -> &PackageId<S> { fn as_ref(&self) -> &PackageId {
self self
} }
} }
impl<S: AsRef<str>> std::fmt::Display for PackageId<S> { impl std::fmt::Display for PackageId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0) write!(f, "{}", &self.0)
} }
} }
impl<S: AsRef<str>> AsRef<str> for PackageId<S> { impl AsRef<str> for PackageId {
fn as_ref(&self) -> &str { fn as_ref(&self) -> &str {
self.0.as_ref() self.0.as_ref()
} }
} }
impl<S: AsRef<str>> Borrow<str> for PackageId<S> { impl Borrow<str> for PackageId {
fn borrow(&self) -> &str { fn borrow(&self) -> &str {
self.0.as_ref() self.0.as_ref()
} }
} }
impl<S: AsRef<str>> AsRef<Path> for PackageId<S> { impl AsRef<Path> for PackageId {
fn as_ref(&self) -> &Path { fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref() self.0.as_ref().as_ref()
} }
} }
impl<'de, S> Deserialize<'de> for PackageId<S> impl<'de> Deserialize<'de> for PackageId {
where
S: AsRef<str>,
Id<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: serde::de::Deserializer<'de>, D: serde::de::Deserializer<'de>,
@@ -73,10 +61,7 @@ where
Ok(PackageId(Deserialize::deserialize(deserializer)?)) Ok(PackageId(Deserialize::deserialize(deserializer)?))
} }
} }
impl<S> Serialize for PackageId<S> impl Serialize for PackageId {
where
S: AsRef<str>,
{
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error> fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
where where
Ser: Serializer, Ser: Serializer,

View File

@@ -3,14 +3,14 @@ use std::path::Path;
use serde::{Deserialize, Deserializer, Serialize}; use serde::{Deserialize, Deserializer, Serialize};
use crate::{Id, IdUnchecked}; use crate::Id;
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum VolumeId<S: AsRef<str> = String> { pub enum VolumeId {
Backup, Backup,
Custom(Id<S>), Custom(Id),
} }
impl<S: AsRef<str>> std::fmt::Display for VolumeId<S> { impl std::fmt::Display for VolumeId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self { match self {
VolumeId::Backup => write!(f, "BACKUP"), VolumeId::Backup => write!(f, "BACKUP"),
@@ -18,7 +18,7 @@ impl<S: AsRef<str>> std::fmt::Display for VolumeId<S> {
} }
} }
} }
impl<S: AsRef<str>> AsRef<str> for VolumeId<S> { impl AsRef<str> for VolumeId {
fn as_ref(&self) -> &str { fn as_ref(&self) -> &str {
match self { match self {
VolumeId::Backup => "BACKUP", VolumeId::Backup => "BACKUP",
@@ -26,33 +26,29 @@ impl<S: AsRef<str>> AsRef<str> for VolumeId<S> {
} }
} }
} }
impl<S: AsRef<str>> Borrow<str> for VolumeId<S> { impl Borrow<str> for VolumeId {
fn borrow(&self) -> &str { fn borrow(&self) -> &str {
self.as_ref() self.as_ref()
} }
} }
impl<S: AsRef<str>> AsRef<Path> for VolumeId<S> { impl AsRef<Path> for VolumeId {
fn as_ref(&self) -> &Path { fn as_ref(&self) -> &Path {
AsRef::<str>::as_ref(self).as_ref() AsRef::<str>::as_ref(self).as_ref()
} }
} }
impl<'de, S> Deserialize<'de> for VolumeId<S> impl<'de> Deserialize<'de> for VolumeId {
where
S: AsRef<str>,
IdUnchecked<S>: Deserialize<'de>,
{
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: Deserializer<'de>, D: Deserializer<'de>,
{ {
let unchecked: IdUnchecked<S> = Deserialize::deserialize(deserializer)?; let unchecked: String = Deserialize::deserialize(deserializer)?;
Ok(match unchecked.0.as_ref() { Ok(match unchecked.as_ref() {
"BACKUP" => VolumeId::Backup, "BACKUP" => VolumeId::Backup,
_ => VolumeId::Custom(Id::try_from(unchecked.0).map_err(serde::de::Error::custom)?), _ => VolumeId::Custom(Id::try_from(unchecked).map_err(serde::de::Error::custom)?),
}) })
} }
} }
impl<S: AsRef<str>> Serialize for VolumeId<S> { impl Serialize for VolumeId {
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error> fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
where where
Ser: serde::Serializer, Ser: serde::Serializer,

View File

@@ -50,6 +50,21 @@ dependencies = [
"memchr", "memchr",
] ]
[[package]]
name = "alloc-no-stdlib"
version = "2.0.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cc7bb162ec39d46ab1ca8c77bf72e890535becd1751bb45f64c597edb4c8c6b3"
[[package]]
name = "alloc-stdlib"
version = "0.2.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "94fb8275041c72129eb51b7d0322c29b8387a0386127718b096429201a5d6ece"
dependencies = [
"alloc-no-stdlib",
]
[[package]] [[package]]
name = "android_system_properties" name = "android_system_properties"
version = "0.1.5" version = "0.1.5"
@@ -109,6 +124,20 @@ dependencies = [
"futures-core", "futures-core",
] ]
[[package]]
name = "async-compression"
version = "0.3.15"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "942c7cd7ae39e91bde4820d74132e9862e62c2f386c3aa90ccf55949f5bad63a"
dependencies = [
"brotli",
"flate2",
"futures-core",
"memchr",
"pin-project-lite",
"tokio",
]
[[package]] [[package]]
name = "async-stream" name = "async-stream"
version = "0.3.3" version = "0.3.3"
@@ -364,6 +393,27 @@ dependencies = [
"serde_with 1.14.0", "serde_with 1.14.0",
] ]
[[package]]
name = "brotli"
version = "3.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a1a0b1dbcc8ae29329621f8d4f0d835787c1c38bb1401979b49d13b0b305ff68"
dependencies = [
"alloc-no-stdlib",
"alloc-stdlib",
"brotli-decompressor",
]
[[package]]
name = "brotli-decompressor"
version = "2.3.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4b6561fd3f895a11e8f72af2cb7d22e08366bebc2b6b57f7744c4bda27034744"
dependencies = [
"alloc-no-stdlib",
"alloc-stdlib",
]
[[package]] [[package]]
name = "bstr" name = "bstr"
version = "0.2.17" version = "0.2.17"
@@ -378,9 +428,9 @@ dependencies = [
[[package]] [[package]]
name = "bumpalo" name = "bumpalo"
version = "3.11.1" version = "3.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "572f695136211188308f16ad2ca5c851a712c464060ae6974944458eb83880ba" checksum = "0d261e256854913907f67ed06efbc3338dfe6179796deefc1ff763fc1aee5535"
[[package]] [[package]]
name = "byteorder" name = "byteorder"
@@ -1033,7 +1083,19 @@ dependencies = [
"der", "der",
"elliptic-curve", "elliptic-curve",
"rfc6979", "rfc6979",
"signature", "signature 1.6.4",
]
[[package]]
name = "ecdsa"
version = "0.15.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12844141594ad74185a926d030f3b605f6a903b4e3fec351f3ea338ac5b7637e"
dependencies = [
"der",
"elliptic-curve",
"rfc6979",
"signature 2.0.0",
] ]
[[package]] [[package]]
@@ -1044,7 +1106,7 @@ checksum = "91cff35c70bba8a626e3185d8cd48cc11b5437e1a5bcd15b9b5fa3c64b6dfee7"
dependencies = [ dependencies = [
"pkcs8", "pkcs8",
"serde", "serde",
"signature", "signature 1.6.4",
] ]
[[package]] [[package]]
@@ -1084,6 +1146,8 @@ dependencies = [
"ff", "ff",
"generic-array", "generic-array",
"group", "group",
"pem-rfc7468",
"pkcs8",
"rand_core 0.6.4", "rand_core 0.6.4",
"sec1", "sec1",
"subtle", "subtle",
@@ -1092,9 +1156,10 @@ dependencies = [
[[package]] [[package]]
name = "embassy-os" name = "embassy-os"
version = "0.3.3" version = "0.3.4"
dependencies = [ dependencies = [
"aes", "aes",
"async-compression",
"async-stream", "async-stream",
"async-trait", "async-trait",
"base32", "base32",
@@ -1128,6 +1193,8 @@ dependencies = [
"hyper-ws-listener", "hyper-ws-listener",
"imbl 2.0.0", "imbl 2.0.0",
"indexmap", "indexmap",
"ipnet",
"iprange",
"isocountry", "isocountry",
"itertools 0.10.5", "itertools 0.10.5",
"josekit", "josekit",
@@ -1143,6 +1210,7 @@ dependencies = [
"num_enum", "num_enum",
"openssh-keys", "openssh-keys",
"openssl", "openssl",
"p256 0.12.0",
"patch-db", "patch-db",
"pbkdf2", "pbkdf2",
"pin-project", "pin-project",
@@ -1187,6 +1255,7 @@ dependencies = [
"typed-builder", "typed-builder",
"url", "url",
"uuid", "uuid",
"zeroize",
] ]
[[package]] [[package]]
@@ -1373,7 +1442,7 @@ dependencies = [
"cfg-if", "cfg-if",
"libc", "libc",
"redox_syscall 0.2.16", "redox_syscall 0.2.16",
"windows-sys", "windows-sys 0.42.0",
] ]
[[package]] [[package]]
@@ -1704,6 +1773,12 @@ dependencies = [
"libc", "libc",
] ]
[[package]]
name = "hermit-abi"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fed44880c466736ef9a5c5b5facefb5ed0785676d0c02d612db14e54f0d84286"
[[package]] [[package]]
name = "hex" name = "hex"
version = "0.4.3" version = "0.4.3"
@@ -1958,6 +2033,20 @@ dependencies = [
"cfg-if", "cfg-if",
] ]
[[package]]
name = "internment"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2a798d7677f07d6f1e77be484ea8626ddb1566194de399f1206306820c406371"
dependencies = [
"ahash",
"dashmap",
"hashbrown",
"once_cell",
"parking_lot 0.12.1",
"serde",
]
[[package]] [[package]]
name = "io-lifetimes" name = "io-lifetimes"
version = "1.0.4" version = "1.0.4"
@@ -1965,7 +2054,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e" checksum = "e7d6c6f8c91b4b9ed43484ad1a938e393caf35960fce7f82a040497207bd8e9e"
dependencies = [ dependencies = [
"libc", "libc",
"windows-sys", "windows-sys 0.42.0",
] ]
[[package]] [[package]]
@@ -1973,6 +2062,31 @@ name = "ipnet"
version = "2.7.1" version = "2.7.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146" checksum = "30e22bd8629359895450b59ea7a776c850561b96a3b1d31321c1949d9e6c9146"
dependencies = [
"serde",
]
[[package]]
name = "iprange"
version = "0.6.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "37209be0ad225457e63814401415e748e2453a5297f9b637338f5fb8afa4ec00"
dependencies = [
"ipnet",
"serde",
]
[[package]]
name = "is-terminal"
version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "21b6b32576413a8e69b90e952e4a026476040d81017b80445deda5f2d3921857"
dependencies = [
"hermit-abi 0.3.1",
"io-lifetimes",
"rustix",
"windows-sys 0.45.0",
]
[[package]] [[package]]
name = "isocountry" name = "isocountry"
@@ -2295,7 +2409,7 @@ dependencies = [
"libc", "libc",
"log", "log",
"wasi 0.11.0+wasi-snapshot-preview1", "wasi 0.11.0+wasi-snapshot-preview1",
"windows-sys", "windows-sys 0.42.0",
] ]
[[package]] [[package]]
@@ -2306,10 +2420,14 @@ dependencies = [
"color-eyre", "color-eyre",
"ed25519-dalek", "ed25519-dalek",
"emver", "emver",
"internment",
"ipnet",
"lazy_static",
"mbrman", "mbrman",
"openssl", "openssl",
"patch-db", "patch-db",
"rand 0.8.5", "rand 0.8.5",
"regex",
"rpc-toolkit", "rpc-toolkit",
"serde", "serde",
"serde_json", "serde_json",
@@ -2651,18 +2769,30 @@ version = "0.11.1"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594" checksum = "51f44edd08f51e2ade572f141051021c5af22677e42b7dd28a88155151c33594"
dependencies = [ dependencies = [
"ecdsa", "ecdsa 0.14.8",
"elliptic-curve", "elliptic-curve",
"sha2 0.10.6", "sha2 0.10.6",
] ]
[[package]]
name = "p256"
version = "0.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "49c124b3cbce43bcbac68c58ec181d98ed6cc7e6d0aa7c3ba97b2563410b0e55"
dependencies = [
"ecdsa 0.15.1",
"elliptic-curve",
"primeorder",
"sha2 0.10.6",
]
[[package]] [[package]]
name = "p384" name = "p384"
version = "0.11.2" version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa" checksum = "dfc8c5bf642dde52bb9e87c0ecd8ca5a76faac2eeed98dedb7c717997e1080aa"
dependencies = [ dependencies = [
"ecdsa", "ecdsa 0.14.8",
"elliptic-curve", "elliptic-curve",
"sha2 0.10.6", "sha2 0.10.6",
] ]
@@ -2712,7 +2842,7 @@ dependencies = [
"libc", "libc",
"redox_syscall 0.2.16", "redox_syscall 0.2.16",
"smallvec", "smallvec",
"windows-sys", "windows-sys 0.42.0",
] ]
[[package]] [[package]]
@@ -2943,18 +3073,27 @@ checksum = "925383efa346730478fb4838dbe9137d2a47675ad789c546d150a6e1dd4ab31c"
[[package]] [[package]]
name = "prettytable-rs" name = "prettytable-rs"
version = "0.9.0" version = "0.10.0"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5f375cb74c23b51d23937ffdeb48b1fbf5b6409d4b9979c1418c1de58bc8f801" checksum = "eea25e07510aa6ab6547308ebe3c036016d162b8da920dbb079e3ba8acf3d95a"
dependencies = [ dependencies = [
"atty",
"csv", "csv",
"encode_unicode", "encode_unicode",
"is-terminal",
"lazy_static", "lazy_static",
"term", "term",
"unicode-width", "unicode-width",
] ]
[[package]]
name = "primeorder"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0b54f7131b3dba65a2f414cf5bd25b66d4682e4608610668eae785750ba4c5b2"
dependencies = [
"elliptic-curve",
]
[[package]] [[package]]
name = "proc-macro-crate" name = "proc-macro-crate"
version = "1.2.1" version = "1.2.1"
@@ -3399,7 +3538,7 @@ dependencies = [
"pkcs1", "pkcs1",
"pkcs8", "pkcs8",
"rand_core 0.6.4", "rand_core 0.6.4",
"signature", "signature 1.6.4",
"smallvec", "smallvec",
"subtle", "subtle",
"zeroize", "zeroize",
@@ -3453,7 +3592,7 @@ dependencies = [
"io-lifetimes", "io-lifetimes",
"libc", "libc",
"linux-raw-sys", "linux-raw-sys",
"windows-sys", "windows-sys 0.42.0",
] ]
[[package]] [[package]]
@@ -3507,7 +3646,7 @@ version = "0.1.21"
source = "registry+https://github.com/rust-lang/crates.io-index" source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3" checksum = "713cfb06c7059f3588fb8044c0fad1d09e3c01d225e25b9220dbfdcf16dbb1b3"
dependencies = [ dependencies = [
"windows-sys", "windows-sys 0.42.0",
] ]
[[package]] [[package]]
@@ -3825,6 +3964,16 @@ dependencies = [
"rand_core 0.6.4", "rand_core 0.6.4",
] ]
[[package]]
name = "signature"
version = "2.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8fe458c98333f9c8152221191a77e2a44e8325d0193484af2e9421a53019e57d"
dependencies = [
"digest 0.10.6",
"rand_core 0.6.4",
]
[[package]] [[package]]
name = "simple-logging" name = "simple-logging"
version = "2.0.2" version = "2.0.2"
@@ -4019,13 +4168,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "288d8f5562af5a3be4bda308dd374b2c807b940ac370b5efa1c99311da91d9a1" checksum = "288d8f5562af5a3be4bda308dd374b2c807b940ac370b5efa1c99311da91d9a1"
dependencies = [ dependencies = [
"ed25519-dalek", "ed25519-dalek",
"p256", "p256 0.11.1",
"p384", "p384",
"rand_core 0.6.4", "rand_core 0.6.4",
"rsa", "rsa",
"sec1", "sec1",
"sha2 0.10.6", "sha2 0.10.6",
"signature", "signature 1.6.4",
"ssh-encoding", "ssh-encoding",
"zeroize", "zeroize",
] ]
@@ -4303,7 +4452,7 @@ dependencies = [
"signal-hook-registry", "signal-hook-registry",
"socket2", "socket2",
"tokio-macros", "tokio-macros",
"windows-sys", "windows-sys 0.42.0",
] ]
[[package]] [[package]]
@@ -4969,6 +5118,30 @@ dependencies = [
"windows_x86_64_msvc", "windows_x86_64_msvc",
] ]
[[package]]
name = "windows-sys"
version = "0.45.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75283be5efb2831d37ea142365f009c02ec203cd29a3ebecbc093d52315b66d0"
dependencies = [
"windows-targets",
]
[[package]]
name = "windows-targets"
version = "0.42.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8e2522491fbfcd58cc84d47aeb2958948c4b8982e9a2d8a2a35bbaed431390e7"
dependencies = [
"windows_aarch64_gnullvm",
"windows_aarch64_msvc",
"windows_i686_gnu",
"windows_i686_msvc",
"windows_x86_64_gnu",
"windows_x86_64_gnullvm",
"windows_x86_64_msvc",
]
[[package]] [[package]]
name = "windows_aarch64_gnullvm" name = "windows_aarch64_gnullvm"
version = "0.42.1" version = "0.42.1"