Compare commits

...

55 Commits

Author SHA1 Message Date
Aiden McClelland
dd28ad20ef use port instead of pidof to detect tor going down (#2320)
* use port instead of pidof to detect tor going down

* fix errors

* healthcheck timeout
2023-06-23 13:06:00 -06:00
Aiden McClelland
ef416ef60b prevent tor from spinning if a service is in a crash loop (#2316) 2023-06-22 18:09:59 +00:00
Aiden McClelland
95b3b55971 fix rootflags for btrfs update (#2315) 2023-06-21 15:26:27 +00:00
Aiden McClelland
b3f32ae03e don't use cp when over cifs 2023-06-21 00:36:36 +00:00
Aiden McClelland
c7472174e5 fix btrfs rootflags 2023-06-21 00:36:36 +00:00
gStart9
2ad749354d Add qemu-guest-agent for advanced VM shutdown options (#2309) 2023-06-21 00:36:36 +00:00
Aiden McClelland
4ed9d2ea22 add grub-common to build 2023-06-21 00:36:36 +00:00
Lucy Cifferello
280eb47de7 update marketplace project to include mime type pipe for icons 2023-06-21 00:36:36 +00:00
Aiden McClelland
324a12b0ff reset config after pg_upgrade 2023-06-21 00:36:36 +00:00
Aiden McClelland
a2543ccddc trim fs name 2023-06-21 00:36:36 +00:00
Aiden McClelland
22666412c3 use fsck instead of e2fsck 2023-06-21 00:36:36 +00:00
Aiden McClelland
dd58044cdf fix build 2023-06-21 00:36:36 +00:00
Aiden McClelland
10312d89d7 fix ipv6 2023-06-21 00:36:36 +00:00
Aiden McClelland
b4c0d877cb fix postgres migration 2023-06-21 00:36:36 +00:00
Aiden McClelland
e95d56a5d0 fix update-grub2 2023-06-21 00:36:36 +00:00
Aiden McClelland
90424e8329 install fixes 2023-06-21 00:36:36 +00:00
Aiden McClelland
1bfeb42a06 force btrfs creation 2023-06-21 00:36:36 +00:00
Aiden McClelland
a936f92954 use postgres user 2023-06-21 00:36:36 +00:00
Aiden McClelland
0bc514ec17 include old pg 2023-06-21 00:36:36 +00:00
Aiden McClelland
a2cf4001af improve invoke error reporting 2023-06-21 00:36:36 +00:00
Aiden McClelland
cb4e12a68c fix build 2023-06-21 00:36:36 +00:00
Aiden McClelland
a7f5124dfe postgresql migration 2023-06-21 00:36:36 +00:00
Aiden McClelland
ccbf71c5e7 fix ipv6 2023-06-21 00:36:36 +00:00
Aiden McClelland
04bf5f58d9 fix tor listener bug 2023-06-21 00:36:36 +00:00
Aiden McClelland
ab3f5956d4 ipv6 2023-06-21 00:36:36 +00:00
Aiden McClelland
c1fe8e583f backup target mount/umount 2023-06-21 00:36:36 +00:00
Lucy Cifferello
fd166c4433 do not load array buffer into memory 2023-06-21 00:36:36 +00:00
Aiden McClelland
f29c7ba4f2 don't wait for install to complete on sideload 2023-06-21 00:36:36 +00:00
Aiden McClelland
88869e9710 gpu acceleration 2023-06-21 00:36:36 +00:00
Aiden McClelland
f8404ab043 btrfs 2023-06-21 00:36:36 +00:00
Aiden McClelland
9fa5d1ff9e suite independent 2023-06-21 00:36:36 +00:00
Aiden McClelland
483f353fd0 backup luks headers 2023-06-21 00:36:36 +00:00
Aiden McClelland
a11bf5b5c7 bookworm 2023-06-21 00:36:36 +00:00
Aiden McClelland
d4113ff753 re-add server version and version range 2023-06-21 00:36:36 +00:00
Aiden McClelland
1969f036fa deser full server info 2023-06-21 00:36:36 +00:00
Matt Hill
8c90e01016 hide range ip addresses, update release notes 2023-06-15 13:20:37 -06:00
Matt Hill
756c5c9b99 small spelling mistake 2023-06-11 15:04:59 -06:00
Lucy Cifferello
ee54b355af fix compliation error on widgets page 2023-06-11 15:04:59 -06:00
Lucy Cifferello
26cbbc0c56 adjust start9 registry icon 2023-06-11 15:04:59 -06:00
Aiden McClelland
f4f719d52a misc fixes 2023-06-11 15:04:59 -06:00
Aiden McClelland
f2071d8b7e update zram bool 2023-06-11 15:04:59 -06:00
Aiden McClelland
df88a55784 v0.3.4.3 2023-06-11 15:04:59 -06:00
Matt Hill
3ccbc626ff experimental features for zram and reset tor (#2299)
* experimental features for zram and reset tor

* zram backend

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-06-11 15:04:59 -06:00
Aiden McClelland
71a15cf222 add diskUsage effect (#2297) 2023-06-11 15:04:59 -06:00
Aiden McClelland
26ddf769b1 remove overload restart rule 2023-06-11 15:04:59 -06:00
Aiden McClelland
3137387c0c only set static hostname 2023-06-11 15:04:59 -06:00
Aiden McClelland
fc142cfde8 reset tor (#2296)
* reset tor

* Update tor.rs

* timeout connect

* handle stuck bootstrapping
2023-06-11 15:04:59 -06:00
Aiden McClelland
b0503fa507 Bugfix/incoherent (#2293)
* debug incoherent error

* fix incoherent error

* use new debspawn
2023-06-11 15:04:59 -06:00
Matt Hill
b86a97c9c0 add resetTor to rpc client 2023-06-11 15:04:59 -06:00
Lucy Cifferello
eb6cd23772 update registry icon 2023-06-11 15:04:59 -06:00
Matt Hill
efae1e7e6c add Tor logs to UI 2023-06-11 15:04:59 -06:00
Lucy Cifferello
19d55b840e add registry icon to preloader 2023-06-11 15:04:59 -06:00
Lucy Cifferello
cc0c1d05ab update frontend to 0.3.4.3 2023-06-11 15:04:59 -06:00
Lucy Cifferello
f088f65d5a update branding 2023-06-11 15:04:59 -06:00
Lucy Cifferello
5441b5a06b add missing items to preloader 2023-06-11 15:04:59 -06:00
91 changed files with 2147 additions and 565 deletions

View File

@@ -107,9 +107,9 @@ jobs:
run: |
sudo apt-get update
sudo apt-get install -y qemu-user-static
wget http://ftp.us.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.1-1_all.deb
sha256sum ./debspawn_0.6.1-1_all.deb | grep fb8a3f588438ff9ef51e713ec1d83306db893f0aa97447565e28bbba9c6e90c6
sudo apt-get install -y ./debspawn_0.6.1-1_all.deb
wget https://deb.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.2-1_all.deb
sha256sum ./debspawn_0.6.2-1_all.deb | grep 37ef27458cb1e35e8bce4d4f639b06b4b3866fc0b9191ec6b9bd157afd06a817
sudo apt-get install -y ./debspawn_0.6.2-1_all.deb
- name: Configure debspawn
run: |
@@ -119,10 +119,7 @@ jobs:
- uses: actions/cache@v3
with:
path: /var/lib/debspawn
key: ${{ runner.os }}-debspawn-init-bullseye
- name: Make build container
run: "debspawn list | grep bullseye || debspawn create bullseye"
key: ${{ runner.os }}-debspawn-init
- run: "mkdir -p overlays/deb"

4
backend/Cargo.lock generated
View File

@@ -1354,7 +1354,7 @@ dependencies = [
[[package]]
name = "embassy-os"
version = "0.3.4-rev.2"
version = "0.3.4-rev.3"
dependencies = [
"aes",
"async-compression",
@@ -1442,6 +1442,7 @@ dependencies = [
"thiserror",
"tokio",
"tokio-rustls",
"tokio-socks",
"tokio-stream",
"tokio-tar",
"tokio-tungstenite",
@@ -2438,6 +2439,7 @@ dependencies = [
"dprint-swc-ext",
"embassy_container_init",
"helpers",
"itertools 0.10.5",
"models",
"reqwest",
"serde",

View File

@@ -14,7 +14,7 @@ keywords = [
name = "embassy-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.3.4-rev.2"
version = "0.3.4-rev.3"
[lib]
name = "embassy"
@@ -152,6 +152,7 @@ tokio-stream = { version = "0.1.11", features = ["io-util", "sync", "net"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.17.1", features = ["native-tls"] }
tokio-rustls = "0.23.4"
tokio-socks = "0.5.1"
tokio-util = { version = "0.7.3", features = ["io"] }
torut = "0.2.1"
tracing = "0.1.35"

View File

@@ -1,5 +1,5 @@
use std::collections::{BTreeMap, BTreeSet};
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use chrono::Utc;
use clap::ArgMatches;
@@ -8,6 +8,7 @@ use helpers::AtomicFile;
use patch_db::{DbHandle, LockType, PatchDbHandle};
use rpc_toolkit::command;
use tokio::io::AsyncWriteExt;
use tokio::process::Command;
use tracing::instrument;
use super::target::BackupTargetId;
@@ -23,8 +24,9 @@ use crate::disk::mount::guard::TmpMountGuard;
use crate::notifications::NotificationLevel;
use crate::s9pk::manifest::PackageId;
use crate::status::MainStatus;
use crate::util::display_none;
use crate::util::io::dir_copy;
use crate::util::serde::IoFormat;
use crate::util::{display_none, Invoke};
use crate::version::VersionT;
use crate::{Error, ErrorKind, ResultExt};
@@ -358,6 +360,19 @@ async fn perform_backup<Db: DbHandle>(
.await
.with_kind(ErrorKind::Filesystem)?;
let luks_folder_old = backup_guard.as_ref().join("luks.old");
if tokio::fs::metadata(&luks_folder_old).await.is_ok() {
tokio::fs::remove_dir_all(&luks_folder_old).await?;
}
let luks_folder_bak = backup_guard.as_ref().join("luks");
if tokio::fs::metadata(&luks_folder_bak).await.is_ok() {
tokio::fs::rename(&luks_folder_bak, &luks_folder_old).await?;
}
let luks_folder = Path::new("/media/embassy/config/luks");
if tokio::fs::metadata(&luks_folder).await.is_ok() {
dir_copy(&luks_folder, &luks_folder_bak).await?;
}
let timestamp = Some(Utc::now());
backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into();

View File

@@ -1,12 +1,13 @@
use openssl::pkey::PKey;
use openssl::x509::X509;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key;
use crate::util::serde::Base64;
use crate::Error;
use openssl::pkey::PKey;
use openssl::x509::X509;
use serde::{Deserialize, Serialize};
use serde_json::Value;
pub struct OsBackup {
pub account: AccountInfo,

View File

@@ -6,8 +6,8 @@ use std::time::Duration;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use futures::{future::BoxFuture, stream};
use futures::{FutureExt, StreamExt};
use futures::future::BoxFuture;
use futures::{stream, FutureExt, StreamExt};
use openssl::x509::X509;
use patch_db::{DbHandle, PatchDbHandle};
use rpc_toolkit::command;
@@ -443,7 +443,7 @@ async fn restore_package<'a>(
Ok((
progress.clone(),
async move {
download_install_s9pk(&ctx, &manifest, None, progress, file).await?;
download_install_s9pk(&ctx, &manifest, None, progress, file, None).await?;
guard.unmount().await?;

View File

@@ -7,10 +7,12 @@ use clap::ArgMatches;
use color_eyre::eyre::eyre;
use digest::generic_array::GenericArray;
use digest::OutputSizeUser;
use lazy_static::lazy_static;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use sqlx::{Executor, Postgres};
use tokio::sync::Mutex;
use tracing::instrument;
use self::cifs::CifsBackupTarget;
@@ -23,7 +25,7 @@ use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::util::PartitionInfo;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::{deserialize_from_str, display_serializable, serialize_display};
use crate::util::Version;
use crate::util::{display_none, Version};
use crate::Error;
pub mod cifs;
@@ -42,7 +44,7 @@ pub enum BackupTarget {
Cifs(CifsBackupTarget),
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum BackupTargetId {
Disk { logicalname: PathBuf },
Cifs { id: i32 },
@@ -129,7 +131,7 @@ impl FileSystem for BackupTargetFS {
}
}
#[command(subcommands(cifs::cifs, list, info))]
#[command(subcommands(cifs::cifs, list, info, mount, umount))]
pub fn target() -> Result<(), Error> {
Ok(())
}
@@ -247,3 +249,61 @@ pub async fn info(
Ok(res)
}
lazy_static! {
static ref USER_MOUNTS: Mutex<BTreeMap<BackupTargetId, BackupMountGuard<TmpMountGuard>>> =
Mutex::new(BTreeMap::new());
}
#[command]
#[instrument(skip_all)]
pub async fn mount(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId,
#[arg] password: String,
) -> Result<String, Error> {
let mut mounts = USER_MOUNTS.lock().await;
if let Some(existing) = mounts.get(&target_id) {
return Ok(existing.as_ref().display().to_string());
}
let guard = BackupMountGuard::mount(
TmpMountGuard::mount(
&target_id
.clone()
.load(&mut ctx.secret_store.acquire().await?)
.await?,
ReadWrite,
)
.await?,
&password,
)
.await?;
let res = guard.as_ref().display().to_string();
mounts.insert(target_id, guard);
Ok(res)
}
#[command(display(display_none))]
#[instrument(skip_all)]
pub async fn umount(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: Option<BackupTargetId>,
) -> Result<(), Error> {
let mut mounts = USER_MOUNTS.lock().await;
if let Some(target_id) = target_id {
if let Some(existing) = mounts.remove(&target_id) {
existing.unmount().await?;
}
} else {
for (_, existing) in std::mem::take(&mut *mounts) {
existing.unmount().await?;
}
}
Ok(())
}

View File

@@ -1,3 +1,4 @@
use std::net::{Ipv6Addr, SocketAddr};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
@@ -59,7 +60,11 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
let ctx = InstallContext::init(cfg_path).await?;
let server = WebServer::install(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let server = WebServer::install(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
@@ -81,7 +86,11 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
{
let ctx = SetupContext::init(cfg_path).await?;
let server = WebServer::setup(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let server = WebServer::setup(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
@@ -192,7 +201,11 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
)
.await?;
let server = WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let server = WebServer::diagnostic(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();

View File

@@ -1,3 +1,4 @@
use std::net::{Ipv6Addr, SocketAddr};
use std::path::{Path, PathBuf};
use std::sync::Arc;
@@ -26,7 +27,11 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
)
.await?;
embassy::hostname::sync_hostname(&rpc_ctx.account.read().await.hostname).await?;
let server = WebServer::main(([0, 0, 0, 0], 80).into(), rpc_ctx.clone()).await?;
let server = WebServer::main(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
rpc_ctx.clone(),
)
.await?;
let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
@@ -141,8 +146,11 @@ fn main() {
)
.await?;
let server =
WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let server = WebServer::diagnostic(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
let mut shutdown = ctx.shutdown.subscribe();

View File

@@ -503,19 +503,27 @@ pub fn configure_rec<'a, Db: DbHandle>(
.config_actions
.get(db, id)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
let dependencies = receipts
.dependencies
.get(db, id)
.await?
.ok_or_else(not_found)?;
let volumes = receipts.volumes.get(db, id).await?.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
let volumes = receipts
.volumes
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
let is_needs_config = !receipts
.configured
.get(db, id)
.await?
.ok_or_else(not_found)?;
let version = receipts.version.get(db, id).await?.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
let version = receipts
.version
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
// get current config and current spec
let ConfigRes {
@@ -530,7 +538,11 @@ pub fn configure_rec<'a, Db: DbHandle>(
spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)?
};
let manifest = receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
let manifest = receipts
.manifest
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
spec.validate(&manifest)?;
spec.matches(&config)?; // check that new config matches spec
@@ -549,7 +561,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
.system_pointers
.get(db, &id)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
sys.truncate(0);
let mut current_dependencies: CurrentDependencies = CurrentDependencies(
dependencies
@@ -655,7 +667,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
.dependency_errors
.get(db, &id)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
tracing::warn!("Dependency Errors: {:?}", errs);
let errs = DependencyErrors::init(
ctx,
@@ -675,7 +687,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
.current_dependents
.get(db, id)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
let prev = if is_needs_config { None } else { old_config }
.map(Value::Object)
.unwrap_or_default();
@@ -693,7 +705,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
.manifest
.get(db, &dependent)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
if let Err(error) = cfg
.check(
ctx,
@@ -771,10 +783,16 @@ pub fn configure_rec<'a, Db: DbHandle>(
}
.boxed()
}
#[instrument(skip_all)]
pub fn not_found() -> Error {
Error::new(eyre!("Could not find"), crate::ErrorKind::Incoherent)
macro_rules! not_found {
($x:expr) => {
crate::Error::new(
color_eyre::eyre::eyre!("Could not find {} at {}:{}", $x, module_path!(), line!()),
crate::ErrorKind::Incoherent,
)
};
}
pub(crate) use not_found;
/// We want to have a double check that the paths are what we expect them to be.
/// Found that earlier the paths where not what we expected them to be.

View File

@@ -17,12 +17,11 @@ use rpc_toolkit::Context;
use serde::Deserialize;
use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::util::config::{load_config_from_paths, local_config_path};
use crate::ResultExt;
use super::setup::CURRENT_SECRET;
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct CliContextConfig {

View File

@@ -19,11 +19,12 @@ use sqlx::PgPool;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::account::AccountInfo;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::db::model::{CurrentDependents, Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader};
use crate::init::init_postgres;
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
use crate::manager::ManagerMap;
use crate::middleware::auth::HashSessionToken;
@@ -36,8 +37,6 @@ use crate::status::{MainStatus, Status};
use crate::util::config::load_config_from_paths;
use crate::{Error, ErrorKind, ResultExt};
use super::setup::CURRENT_SECRET;
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct RpcContextConfig {
@@ -96,15 +95,6 @@ impl RpcContextConfig {
.run(&secret_store)
.await
.with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir().join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows.unwrap_or(25000),
self.migration_prefetch_rows.unwrap_or(100_000),
)
.await?;
}
Ok(secret_store)
}
}
@@ -197,6 +187,7 @@ impl RpcContext {
NetController::init(
base.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
base.dns_bind
.as_ref()
.map(|v| v.as_slice())
@@ -345,6 +336,31 @@ impl RpcContext {
tracing::debug!("{:?}", e);
}
}
let mut current_dependents = BTreeMap::new();
for (package_id, package) in receipts.packages.get(&mut db).await?.0 {
for (k, v) in package
.into_installed()
.into_iter()
.flat_map(|i| i.current_dependencies.0)
{
let mut entry: BTreeMap<_, _> = current_dependents.remove(&k).unwrap_or_default();
entry.insert(package_id.clone(), v);
current_dependents.insert(k, entry);
}
}
for (package_id, current_dependents) in current_dependents {
if let Some(deps) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&package_id)
.and_then(|pde| pde.installed())
.map::<_, CurrentDependents>(|i| i.current_dependents())
.check(&mut db)
.await?
{
deps.put(&mut db, &CurrentDependents(current_dependents))
.await?;
}
}
Ok(())
}

View File

@@ -17,7 +17,7 @@ use tracing::instrument;
use crate::account::AccountInfo;
use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader};
use crate::init::init_postgres;
use crate::setup::SetupStatus;
use crate::util::config::load_config_from_paths;
use crate::{Error, ResultExt};
@@ -132,15 +132,6 @@ impl SetupContext {
.run(&secret_store)
.await
.with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir.join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows,
self.migration_prefetch_rows,
)
.await?;
}
Ok(secret_store)
}
}

View File

@@ -80,6 +80,7 @@ impl Database {
.map(|x| format!("{x:X}"))
.join(":"),
system_start_time: Utc::now().to_rfc3339(),
zram: false,
},
package_data: AllPackageData::default(),
ui: serde_json::from_str(include_str!("../../../frontend/patchdb-ui-seed.json"))
@@ -117,6 +118,8 @@ pub struct ServerInfo {
pub pubkey: String,
pub ca_fingerprint: String,
pub system_start_time: String,
#[serde(default)]
pub zram: bool,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]

View File

@@ -237,13 +237,16 @@ impl DependencyError {
}
}
DependencyError::ConfigUnsatisfied { .. } => {
let dependent_manifest =
receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
let dependent_manifest = receipts
.manifest
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
let dependency_manifest = receipts
.manifest
.get(db, dependency)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(dependency))?;
let dependency_config = if let Some(cfg) = dependency_config.take() {
cfg
@@ -294,7 +297,7 @@ impl DependencyError {
.status
.get(db, dependency)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(dependency))?;
if status.main.running() {
DependencyError::HealthChecksFailed {
failures: BTreeMap::new(),
@@ -310,7 +313,7 @@ impl DependencyError {
.status
.get(db, dependency)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(dependency))?;
match status.main {
MainStatus::BackingUp {
started: Some(_),
@@ -324,7 +327,7 @@ impl DependencyError {
.current_dependencies
.get(db, id)
.await?
.ok_or_else(not_found)?
.ok_or_else(|| not_found!(id))?
.get(dependency)
.map(|x| x.health_checks.contains(&check))
.unwrap_or(false)
@@ -934,7 +937,7 @@ pub fn break_transitive<'a, Db: DbHandle>(
.dependency_errors
.get(&mut tx, id)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
let old = dependency_errors.0.remove(dependency);
let newly_broken = if let Some(e) = &old {
@@ -997,7 +1000,7 @@ pub async fn heal_all_dependents_transitive<'a, Db: DbHandle>(
.current_dependents
.get(db, id)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
for dependent in dependents.0.keys().filter(|dependent| id != *dependent) {
heal_transitive(ctx, db, dependent, id, locks).await?;
}
@@ -1013,7 +1016,11 @@ pub fn heal_transitive<'a, Db: DbHandle>(
receipts: &'a DependencyReceipt,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
let mut status = receipts.status.get(db, id).await?.ok_or_else(not_found)?;
let mut status = receipts
.status
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
let old = status.dependency_errors.0.remove(dependency);
@@ -1022,7 +1029,7 @@ pub fn heal_transitive<'a, Db: DbHandle>(
.dependency
.get(db, (id, dependency))
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(format!("{id}'s dependency: {dependency}")))?;
if let Some(new) = old
.try_heal(ctx, db, id, dependency, None, &info, &receipts.try_heal)
.await?

View File

@@ -0,0 +1,31 @@
use std::path::Path;
use tokio::process::Command;
use tracing::instrument;
use crate::disk::fsck::RequiresReboot;
use crate::util::Invoke;
use crate::Error;
#[instrument(skip_all)]
pub async fn btrfs_check_readonly(logicalname: impl AsRef<Path>) -> Result<RequiresReboot, Error> {
Command::new("btrfs")
.arg("check")
.arg("--readonly")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Ok(RequiresReboot(false))
}
pub async fn btrfs_check_repair(logicalname: impl AsRef<Path>) -> Result<RequiresReboot, Error> {
Command::new("btrfs")
.arg("check")
.arg("--repair")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Ok(RequiresReboot(false))
}

View File

@@ -7,34 +7,9 @@ use futures::FutureExt;
use tokio::process::Command;
use tracing::instrument;
use crate::disk::fsck::RequiresReboot;
use crate::Error;
#[derive(Debug, Clone, Copy)]
#[must_use]
pub struct RequiresReboot(pub bool);
impl std::ops::BitOrAssign for RequiresReboot {
fn bitor_assign(&mut self, rhs: Self) {
self.0 |= rhs.0
}
}
#[derive(Debug, Clone, Copy)]
pub enum RepairStrategy {
Preen,
Aggressive,
}
impl RepairStrategy {
pub async fn e2fsck(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match self {
RepairStrategy::Preen => e2fsck_preen(logicalname).await,
RepairStrategy::Aggressive => e2fsck_aggressive(logicalname).await,
}
}
}
#[instrument(skip_all)]
pub async fn e2fsck_preen(
logicalname: impl AsRef<Path> + std::fmt::Debug,

View File

@@ -0,0 +1,70 @@
use std::path::Path;
use color_eyre::eyre::eyre;
use tokio::process::Command;
use crate::disk::fsck::btrfs::{btrfs_check_readonly, btrfs_check_repair};
use crate::disk::fsck::ext4::{e2fsck_aggressive, e2fsck_preen};
use crate::util::Invoke;
use crate::Error;
pub mod btrfs;
pub mod ext4;
#[derive(Debug, Clone, Copy)]
#[must_use]
pub struct RequiresReboot(pub bool);
impl std::ops::BitOrAssign for RequiresReboot {
fn bitor_assign(&mut self, rhs: Self) {
self.0 |= rhs.0
}
}
#[derive(Debug, Clone, Copy)]
pub enum RepairStrategy {
Preen,
Aggressive,
}
impl RepairStrategy {
pub async fn fsck(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match &*String::from_utf8(
Command::new("grub-probe")
.arg("-d")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?,
)?
.trim()
{
"ext2" => self.e2fsck(logicalname).await,
"btrfs" => self.btrfs_check(logicalname).await,
fs => {
return Err(Error::new(
eyre!("Unknown filesystem {fs}"),
crate::ErrorKind::DiskManagement,
))
}
}
}
pub async fn e2fsck(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match self {
RepairStrategy::Preen => e2fsck_preen(logicalname).await,
RepairStrategy::Aggressive => e2fsck_aggressive(logicalname).await,
}
}
pub async fn btrfs_check(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match self {
RepairStrategy::Preen => btrfs_check_readonly(logicalname).await,
RepairStrategy::Aggressive => btrfs_check_repair(logicalname).await,
}
}
}

View File

@@ -106,12 +106,13 @@ pub async fn create_fs<P: AsRef<Path>>(
.arg(guid)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
let crypt_path = Path::new("/dev").join(guid).join(name);
Command::new("cryptsetup")
.arg("-q")
.arg("luksFormat")
.arg(format!("--key-file={}", PASSWORD_PATH))
.arg(format!("--keyfile-size={}", password.len()))
.arg(Path::new("/dev").join(guid).join(name))
.arg(&crypt_path)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("cryptsetup")
@@ -119,11 +120,11 @@ pub async fn create_fs<P: AsRef<Path>>(
.arg("luksOpen")
.arg(format!("--key-file={}", PASSWORD_PATH))
.arg(format!("--keyfile-size={}", password.len()))
.arg(Path::new("/dev").join(guid).join(name))
.arg(&crypt_path)
.arg(format!("{}_{}", guid, name))
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("mkfs.ext4")
Command::new("mkfs.btrfs")
.arg(Path::new("/dev/mapper").join(format!("{}_{}", guid, name)))
.invoke(crate::ErrorKind::DiskManagement)
.await?;
@@ -265,17 +266,36 @@ pub async fn mount_fs<P: AsRef<Path>>(
tokio::fs::write(PASSWORD_PATH, password)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
let crypt_path = Path::new("/dev").join(guid).join(name);
let full_name = format!("{}_{}", guid, name);
Command::new("cryptsetup")
.arg("-q")
.arg("luksOpen")
.arg(format!("--key-file={}", PASSWORD_PATH))
.arg(format!("--keyfile-size={}", password.len()))
.arg(Path::new("/dev").join(guid).join(name))
.arg(format!("{}_{}", guid, name))
.arg(&crypt_path)
.arg(&full_name)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
let mapper_path = Path::new("/dev/mapper").join(format!("{}_{}", guid, name));
let reboot = repair.e2fsck(&mapper_path).await?;
let mapper_path = Path::new("/dev/mapper").join(&full_name);
let reboot = repair.fsck(&mapper_path).await?;
// Backup LUKS header if e2fsck succeeded
let luks_folder = Path::new("/media/embassy/config/luks");
tokio::fs::create_dir_all(luks_folder).await?;
let tmp_luks_bak = luks_folder.join(format!(".{full_name}.luks.bak.tmp"));
if tokio::fs::metadata(&tmp_luks_bak).await.is_ok() {
tokio::fs::remove_file(&tmp_luks_bak).await?;
}
let luks_bak = luks_folder.join(format!("{full_name}.luks.bak"));
Command::new("cryptsetup")
.arg("-q")
.arg("luksHeaderBackup")
.arg("--header-backup-file")
.arg(&tmp_luks_bak)
.arg(&crypt_path)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
tokio::fs::rename(&tmp_luks_bak, &luks_bak).await?;
mount(&mapper_path, datadir.as_ref().join(name), ReadWrite).await?;
tokio::fs::remove_file(PASSWORD_PATH)

View File

@@ -1,4 +1,3 @@
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use async_trait::async_trait;

View File

@@ -1,4 +1,4 @@
use std::collections::BTreeMap;
use std::collections::{BTreeMap, BTreeSet};
use std::path::{Path, PathBuf};
use color_eyre::eyre::{self, eyre};
@@ -251,7 +251,7 @@ pub async fn recovery_info(
#[instrument(skip_all)]
pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
struct DiskIndex {
parts: IndexSet<PathBuf>,
parts: BTreeSet<PathBuf>,
internal: bool,
}
let disk_guids = pvscan().await?;
@@ -301,7 +301,7 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
disks.insert(
disk.clone(),
DiskIndex {
parts: IndexSet::new(),
parts: BTreeSet::new(),
internal: false,
},
);

View File

@@ -56,7 +56,8 @@ pub async fn get_current_hostname() -> Result<Hostname, Error> {
#[instrument(skip_all)]
pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> {
let hostname: &String = &hostname.0;
let _out = Command::new("hostnamectl")
Command::new("hostnamectl")
.arg("--static")
.arg("set-hostname")
.arg(hostname)
.invoke(ErrorKind::ParseSysInfo)

View File

@@ -1,9 +1,7 @@
use std::collections::{BTreeMap, HashMap};
use std::collections::HashMap;
use std::fs::Permissions;
use std::net::SocketAddr;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::process::Stdio;
use std::time::Duration;
use color_eyre::eyre::eyre;
@@ -16,7 +14,8 @@ use tokio::process::Command;
use crate::account::AccountInfo;
use crate::context::rpc::RpcContextConfig;
use crate::db::model::{IpInfo, ServerStatus};
use crate::db::model::{ServerInfo, ServerStatus};
use crate::disk::mount::util::unmount;
use crate::install::PKG_ARCHIVE_DIR;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::sound::BEP;
@@ -40,22 +39,19 @@ pub async fn check_time_is_synchronized() -> Result<bool, Error> {
== "NTPSynchronized=yes")
}
pub async fn check_tor_is_ready(tor_control: SocketAddr) -> bool {
tokio::net::TcpStream::connect(tor_control).await.is_ok()
}
pub struct InitReceipts {
pub server_info: LockReceipt<ServerInfo, ()>,
pub server_version: LockReceipt<crate::util::Version, ()>,
pub version_range: LockReceipt<emver::VersionRange, ()>,
pub last_wifi_region: LockReceipt<Option<isocountry::CountryCode>, ()>,
pub status_info: LockReceipt<ServerStatus, ()>,
pub ip_info: LockReceipt<BTreeMap<String, IpInfo>, ()>,
pub system_start_time: LockReceipt<String, ()>,
}
impl InitReceipts {
pub async fn new(db: &mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let server_info = crate::db::DatabaseModel::new()
.server_info()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let server_version = crate::db::DatabaseModel::new()
.server_info()
.version()
@@ -66,112 +62,29 @@ impl InitReceipts {
.eos_version_compat()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let last_wifi_region = crate::db::DatabaseModel::new()
.server_info()
.last_wifi_region()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let ip_info = crate::db::DatabaseModel::new()
.server_info()
.ip_info()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let status_info = crate::db::DatabaseModel::new()
.server_info()
.status_info()
.into_model()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let system_start_time = crate::db::DatabaseModel::new()
.server_info()
.system_start_time()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let skeleton_key = db.lock_all(locks).await?;
Ok(Self {
server_info: server_info.verify(&skeleton_key)?,
server_version: server_version.verify(&skeleton_key)?,
version_range: version_range.verify(&skeleton_key)?,
ip_info: ip_info.verify(&skeleton_key)?,
status_info: status_info.verify(&skeleton_key)?,
last_wifi_region: last_wifi_region.verify(&skeleton_key)?,
system_start_time: system_start_time.verify(&skeleton_key)?,
})
}
}
pub async fn pgloader(
old_db_path: impl AsRef<Path>,
batch_rows: usize,
prefetch_rows: usize,
) -> Result<(), Error> {
tokio::fs::write(
"/etc/embassy/migrate.load",
format!(
include_str!("migrate.load"),
sqlite_path = old_db_path.as_ref().display(),
batch_rows = batch_rows,
prefetch_rows = prefetch_rows
),
)
.await?;
match tokio::fs::remove_dir_all("/tmp/pgloader").await {
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(()),
a => a,
}?;
tracing::info!("Running pgloader");
let out = Command::new("pgloader")
.arg("-v")
.arg("/etc/embassy/migrate.load")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await?;
let stdout = String::from_utf8(out.stdout)?;
for line in stdout.lines() {
tracing::debug!("pgloader: {}", line);
}
let stderr = String::from_utf8(out.stderr)?;
for line in stderr.lines() {
tracing::debug!("pgloader err: {}", line);
}
tracing::debug!("pgloader exited with code {:?}", out.status);
if let Some(err) = stdout.lines().chain(stderr.lines()).find_map(|l| {
if l.split_ascii_whitespace()
.any(|word| word == "ERROR" || word == "FATAL")
{
Some(l)
} else {
None
}
}) {
return Err(Error::new(
eyre!("pgloader error: {}", err),
crate::ErrorKind::Database,
));
}
tokio::fs::rename(
old_db_path.as_ref(),
old_db_path.as_ref().with_extension("bak"),
)
.await?;
Ok(())
}
// must be idempotent
pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
let db_dir = datadir.as_ref().join("main/postgresql");
let is_mountpoint = || async {
Ok::<_, Error>(
tokio::process::Command::new("mountpoint")
.arg("/var/lib/postgresql")
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.await?
.success(),
)
};
if tokio::process::Command::new("mountpoint")
.arg("/var/lib/postgresql")
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.await?
.success()
{
unmount("/var/lib/postgresql").await?;
}
let exists = tokio::fs::metadata(&db_dir).await.is_ok();
if !exists {
Command::new("cp")
@@ -181,18 +94,73 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
.invoke(crate::ErrorKind::Filesystem)
.await?;
}
if !is_mountpoint().await? {
crate::disk::mount::util::bind(&db_dir, "/var/lib/postgresql", false).await?;
}
Command::new("chown")
.arg("-R")
.arg("postgres")
.arg("/var/lib/postgresql")
.arg("postgres:postgres")
.arg(&db_dir)
.invoke(crate::ErrorKind::Database)
.await?;
let mut pg_paths = tokio::fs::read_dir("/usr/lib/postgresql").await?;
let mut pg_version = None;
while let Some(pg_path) = pg_paths.next_entry().await? {
let pg_path_version = pg_path
.file_name()
.to_str()
.map(|v| v.parse())
.transpose()?
.unwrap_or(0);
if pg_path_version > pg_version.unwrap_or(0) {
pg_version = Some(pg_path_version)
}
}
let pg_version = pg_version.ok_or_else(|| {
Error::new(
eyre!("could not determine postgresql version"),
crate::ErrorKind::Database,
)
})?;
crate::disk::mount::util::bind(&db_dir, "/var/lib/postgresql", false).await?;
let pg_version_string = pg_version.to_string();
let pg_version_path = db_dir.join(&pg_version_string);
if tokio::fs::metadata(&pg_version_path).await.is_err() {
let conf_dir = Path::new("/etc/postgresql").join(pg_version.to_string());
let conf_dir_tmp = {
let mut tmp = conf_dir.clone();
tmp.set_extension("tmp");
tmp
};
if tokio::fs::metadata(&conf_dir).await.is_ok() {
tokio::fs::rename(&conf_dir, &conf_dir_tmp).await?;
}
let mut old_version = pg_version;
while old_version > 13
/* oldest pg version included in startos */
{
old_version -= 1;
let old_datadir = db_dir.join(old_version.to_string());
if tokio::fs::metadata(&old_datadir).await.is_ok() {
Command::new("pg_upgradecluster")
.arg(old_version.to_string())
.arg("main")
.invoke(crate::ErrorKind::Database)
.await?;
break;
}
}
if tokio::fs::metadata(&conf_dir).await.is_ok() {
if tokio::fs::metadata(&conf_dir).await.is_ok() {
tokio::fs::remove_dir_all(&conf_dir).await?;
}
tokio::fs::rename(&conf_dir_tmp, &conf_dir).await?;
}
}
Command::new("systemctl")
.arg("start")
.arg("postgresql")
.arg(format!("postgresql@{pg_version}-main.service"))
.invoke(crate::ErrorKind::Database)
.await?;
if !exists {
@@ -213,6 +181,7 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
.invoke(crate::ErrorKind::Database)
.await?;
}
Ok(())
}
@@ -255,9 +224,9 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
let db = cfg.db(&account).await?;
tracing::info!("Opened PatchDB");
let mut handle = db.handle();
crate::db::DatabaseModel::new()
let mut server_info = crate::db::DatabaseModel::new()
.server_info()
.lock(&mut handle, LockType::Write)
.get_mut(&mut handle)
.await?;
let receipts = InitReceipts::new(&mut handle).await?;
@@ -275,17 +244,15 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"),
wifi_interface,
&receipts.last_wifi_region.get(&mut handle).await?,
&server_info.last_wifi_region,
)
.await?;
tracing::info!("Synchronized WiFi");
}
let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok()
|| &*receipts.server_version.get(&mut handle).await? < &emver::Version::new(0, 3, 2, 0)
|| (*ARCH == "x86_64"
&& &*receipts.server_version.get(&mut handle).await?
< &emver::Version::new(0, 3, 4, 0));
|| &*server_info.version < &emver::Version::new(0, 3, 2, 0)
|| (*ARCH == "x86_64" && &*server_info.version < &emver::Version::new(0, 3, 4, 0));
let song = if should_rebuild {
Some(NonDetachingJoinHandle::from(tokio::spawn(async {
@@ -403,46 +370,19 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
tracing::info!("Syncronized system clock");
}
Command::new("systemctl")
.arg("start")
.arg("tor")
.invoke(crate::ErrorKind::Tor)
.await?;
let mut warn_tor_not_ready = true;
for _ in 0..60 {
if check_tor_is_ready(cfg.tor_control.unwrap_or(([127, 0, 0, 1], 9051).into())).await {
warn_tor_not_ready = false;
break;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
if warn_tor_not_ready {
tracing::warn!("Timed out waiting for tor to start");
} else {
tracing::info!("Tor is started");
if server_info.zram {
crate::system::enable_zram().await?
}
server_info.ip_info = crate::net::dhcp::init_ips().await?;
server_info.status_info = ServerStatus {
updated: false,
update_progress: None,
backup_progress: None,
};
receipts
.ip_info
.set(&mut handle, crate::net::dhcp::init_ips().await?)
.await?;
receipts
.status_info
.set(
&mut handle,
ServerStatus {
updated: false,
update_progress: None,
backup_progress: None,
},
)
.await?;
server_info.system_start_time = time().await?;
receipts
.system_start_time
.set(&mut handle, time().await?)
.await?;
server_info.save(&mut handle).await?;
crate::version::init(&mut handle, &secret_store, &receipts).await?;

View File

@@ -82,7 +82,7 @@ pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>(
.dependency_errors
.get(db, dep)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(dep))?;
errs.0.insert(id.clone(), e);
receipts.dependency_errors.set(db, errs, dep).await?
} else {
@@ -90,7 +90,7 @@ pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>(
.dependency_errors
.get(db, dep)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(dep))?;
errs.0.remove(id);
receipts.dependency_errors.set(db, errs, dep).await?
}
@@ -215,7 +215,7 @@ pub async fn cleanup_failed<Db: DbHandle>(
.package_data_entry
.get(db, id)
.await?
.ok_or_else(not_found)?;
.ok_or_else(|| not_found!(id))?;
if let Some(manifest) = match &pde {
PackageDataEntry::Installing { manifest, .. }
| PackageDataEntry::Restoring { manifest, .. } => Some(manifest),

View File

@@ -21,6 +21,7 @@ use rpc_toolkit::yajrc::RpcError;
use tokio::fs::{File, OpenOptions};
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt};
use tokio::process::Command;
use tokio::sync::oneshot;
use tokio_stream::wrappers::ReadDirStream;
use tracing::instrument;
@@ -45,7 +46,7 @@ use crate::s9pk::reader::S9pkReader;
use crate::status::{MainStatus, Status};
use crate::util::io::{copy_and_shutdown, response_to_reader};
use crate::util::serde::{display_serializable, Port};
use crate::util::{assure_send, display_none, AsyncFileExt, Version};
use crate::util::{display_none, AsyncFileExt, Version};
use crate::version::{Current, VersionT};
use crate::volume::{asset_dir, script_dir};
use crate::{Error, ErrorKind, ResultExt};
@@ -297,6 +298,7 @@ pub async fn install(
Some(marketplace_url),
InstallProgress::new(s9pk.content_length()),
response_to_reader(s9pk),
None,
)
.await
{
@@ -425,52 +427,64 @@ pub async fn sideload(
pde.save(&mut tx).await?;
tx.commit().await?;
if let Err(e) = download_install_s9pk(
&new_ctx,
&manifest,
None,
progress,
tokio_util::io::StreamReader::new(req.into_body().map_err(|e| {
std::io::Error::new(
match &e {
e if e.is_connect() => std::io::ErrorKind::ConnectionRefused,
e if e.is_timeout() => std::io::ErrorKind::TimedOut,
_ => std::io::ErrorKind::Other,
},
e,
)
})),
)
.await
{
let err_str = format!(
"Install of {}@{} Failed: {}",
manifest.id, manifest.version, e
);
tracing::error!("{}", err_str);
tracing::debug!("{:?}", e);
if let Err(e) = new_ctx
.notification_manager
.notify(
&mut hdl,
Some(manifest.id),
NotificationLevel::Error,
String::from("Install Failed"),
err_str,
(),
None,
)
.await
{
tracing::error!("Failed to issue Notification: {}", e);
tracing::debug!("{:?}", e);
}
}
let (send, recv) = oneshot::channel();
Response::builder()
.status(StatusCode::OK)
.body(Body::empty())
.with_kind(ErrorKind::Network)
tokio::spawn(async move {
if let Err(e) = download_install_s9pk(
&new_ctx,
&manifest,
None,
progress,
tokio_util::io::StreamReader::new(req.into_body().map_err(|e| {
std::io::Error::new(
match &e {
e if e.is_connect() => std::io::ErrorKind::ConnectionRefused,
e if e.is_timeout() => std::io::ErrorKind::TimedOut,
_ => std::io::ErrorKind::Other,
},
e,
)
})),
Some(send),
)
.await
{
let err_str = format!(
"Install of {}@{} Failed: {}",
manifest.id, manifest.version, e
);
tracing::error!("{}", err_str);
tracing::debug!("{:?}", e);
if let Err(e) = new_ctx
.notification_manager
.notify(
&mut hdl,
Some(manifest.id),
NotificationLevel::Error,
String::from("Install Failed"),
err_str,
(),
None,
)
.await
{
tracing::error!("Failed to issue Notification: {}", e);
tracing::debug!("{:?}", e);
}
}
});
if let Ok(_) = recv.await {
Response::builder()
.status(StatusCode::OK)
.body(Body::empty())
.with_kind(ErrorKind::Network)
} else {
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from("installation aborted before upload completed"))
.with_kind(ErrorKind::Network)
}
}
.boxed()
});
@@ -707,6 +721,7 @@ pub async fn download_install_s9pk(
marketplace_url: Option<Url>,
progress: Arc<InstallProgress>,
mut s9pk: impl AsyncRead + Unpin,
download_complete: Option<oneshot::Sender<()>>,
) -> Result<(), Error> {
let pkg_id = &temp_manifest.id;
let version = &temp_manifest.version;
@@ -799,6 +814,9 @@ pub async fn download_install_s9pk(
let mut progress_writer = InstallProgressTracker::new(&mut dst, progress.clone());
tokio::io::copy(&mut s9pk, &mut progress_writer).await?;
progress.download_complete();
if let Some(complete) = download_complete {
complete.send(()).unwrap_or_default();
}
Ok(())
})
.await?;
@@ -1279,6 +1297,14 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
migration.or(prev_migration)
};
remove_from_current_dependents_lists(
&mut tx,
pkg_id,
&prev.current_dependencies,
&receipts.config.current_dependents,
)
.await?; // remove previous
let configured = if let Some(f) = viable_migration {
f.await?.configured && prev_is_configured
} else {
@@ -1298,13 +1324,6 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
)
.await?;
} else {
remove_from_current_dependents_lists(
&mut tx,
pkg_id,
&prev.current_dependencies,
&receipts.config.current_dependents,
)
.await?; // remove previous
add_dependent_to_current_dependents_lists(
&mut tx,
pkg_id,

View File

@@ -76,7 +76,7 @@ pub async fn dry(
.current_dependents
.get(&mut tx, &id)
.await?
.ok_or_else(not_found)?
.ok_or_else(|| not_found!(id))?
.0
.keys()
.into_iter()

View File

@@ -85,6 +85,7 @@ pub fn main_api() -> Result<(), RpcError> {
#[command(subcommands(
system::time,
system::experimental,
system::logs,
system::kernel_logs,
system::metrics,

View File

@@ -33,7 +33,7 @@ use crate::util::serde::Reversible;
use crate::{Error, ErrorKind};
#[pin_project::pin_project]
struct LogStream {
pub struct LogStream {
_child: Child,
#[pin]
entries: BoxStream<'static, Result<JournalctlEntry, Error>>,
@@ -141,14 +141,14 @@ impl std::fmt::Display for LogEntry {
}
#[derive(Serialize, Deserialize, Debug)]
struct JournalctlEntry {
pub struct JournalctlEntry {
#[serde(rename = "__REALTIME_TIMESTAMP")]
timestamp: String,
pub timestamp: String,
#[serde(rename = "MESSAGE")]
#[serde(deserialize_with = "deserialize_string_or_utf8_array")]
message: String,
pub message: String,
#[serde(rename = "__CURSOR")]
cursor: String,
pub cursor: String,
}
impl JournalctlEntry {
fn log_entry(self) -> Result<(String, LogEntry), Error> {
@@ -344,7 +344,7 @@ pub async fn cli_logs_generic_follow(
Ok(())
}
async fn journalctl(
pub async fn journalctl(
id: LogSource,
limit: usize,
cursor: Option<&str>,

View File

@@ -34,6 +34,7 @@ impl NetController {
#[instrument(skip_all)]
pub async fn init(
tor_control: SocketAddr,
tor_socks: SocketAddr,
dns_bind: &[SocketAddr],
ssl: SslManager,
hostname: &Hostname,
@@ -41,7 +42,7 @@ impl NetController {
) -> Result<Self, Error> {
let ssl = Arc::new(ssl);
let mut res = Self {
tor: TorController::init(tor_control).await?,
tor: TorController::new(tor_control, tor_socks),
#[cfg(feature = "avahi")]
mdns: MdnsController::init().await?,
vhost: VHostController::new(ssl.clone()),
@@ -114,7 +115,7 @@ impl NetController {
// Tor (http)
self.os_bindings.push(
self.tor
.add(&key.tor_key(), 80, ([127, 0, 0, 1], 80).into())
.add(key.tor_key(), 80, ([127, 0, 0, 1], 80).into())
.await?,
);
@@ -132,7 +133,7 @@ impl NetController {
);
self.os_bindings.push(
self.tor
.add(&key.tor_key(), 443, ([127, 0, 0, 1], 443).into())
.add(key.tor_key(), 443, ([127, 0, 0, 1], 443).into())
.await?,
);
@@ -164,13 +165,13 @@ impl NetController {
target: SocketAddr,
) -> Result<Vec<Arc<()>>, Error> {
let mut rcs = Vec::with_capacity(1);
rcs.push(self.tor.add(&key.tor_key(), external, target).await?);
rcs.push(self.tor.add(key.tor_key(), external, target).await?);
Ok(rcs)
}
async fn remove_tor(&self, key: &Key, external: u16, rcs: Vec<Arc<()>>) -> Result<(), Error> {
drop(rcs);
self.tor.gc(&key.tor_key(), external).await
self.tor.gc(Some(key.tor_key()), Some(external)).await
}
async fn add_lan(

View File

@@ -3,13 +3,11 @@ use std::path::Path;
use std::sync::Arc;
use std::time::UNIX_EPOCH;
use async_compression::tokio::bufread::BrotliEncoder;
use async_compression::tokio::bufread::GzipEncoder;
use async_compression::tokio::bufread::{BrotliEncoder, GzipEncoder};
use color_eyre::eyre::eyre;
use digest::Digest;
use futures::FutureExt;
use http::header::ACCEPT_ENCODING;
use http::header::CONTENT_ENCODING;
use http::header::{ACCEPT_ENCODING, CONTENT_ENCODING};
use http::request::Parts as RequestParts;
use http::response::Builder;
use hyper::{Body, Method, Request, Response, StatusCode};

View File

@@ -1,32 +1,80 @@
use std::collections::BTreeMap;
use std::net::SocketAddr;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Weak};
use std::time::Duration;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::FutureExt;
use futures::{FutureExt, TryStreamExt};
use helpers::NonDetachingJoinHandle;
use itertools::Itertools;
use lazy_static::lazy_static;
use regex::Regex;
use rpc_toolkit::command;
use rpc_toolkit::yajrc::RpcError;
use tokio::net::TcpStream;
use tokio::sync::Mutex;
use tokio::process::Command;
use tokio::sync::{mpsc, oneshot};
use tokio::time::Instant;
use torut::control::{AsyncEvent, AuthenticatedConn, ConnError};
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument;
use crate::context::RpcContext;
use crate::context::{CliContext, RpcContext};
use crate::logs::{
cli_logs_generic_follow, cli_logs_generic_nofollow, fetch_logs, follow_logs, journalctl,
LogFollowResponse, LogResponse, LogSource,
};
use crate::util::serde::{display_serializable, IoFormat};
use crate::util::{display_none, Invoke};
use crate::{Error, ErrorKind, ResultExt as _};
pub const SYSTEMD_UNIT: &str = "tor@default";
const STARTING_HEALTH_TIMEOUT: u64 = 120; // 2min
enum ErrorLogSeverity {
Fatal { wipe_state: bool },
Unknown { wipe_state: bool },
}
lazy_static! {
static ref LOG_REGEXES: Vec<(Regex, ErrorLogSeverity)> = vec![(
Regex::new("This could indicate a route manipulation attack, network overload, bad local network connectivity, or a bug\\.").unwrap(),
ErrorLogSeverity::Unknown { wipe_state: true }
),(
Regex::new("died due to an invalid selected path").unwrap(),
ErrorLogSeverity::Fatal { wipe_state: false }
),(
Regex::new("Tor has not observed any network activity for the past").unwrap(),
ErrorLogSeverity::Unknown { wipe_state: false }
)];
static ref PROGRESS_REGEX: Regex = Regex::new("PROGRESS=([0-9]+)").unwrap();
}
#[test]
fn random_key() {
println!("x'{}'", hex::encode(rand::random::<[u8; 32]>()));
}
#[command(subcommands(list_services))]
#[command(subcommands(list_services, logs, reset))]
pub fn tor() -> Result<(), Error> {
Ok(())
}
#[command(display(display_none))]
pub async fn reset(
#[context] ctx: RpcContext,
#[arg(rename = "wipe-state", short = 'w', long = "wipe-state")] wipe_state: bool,
#[arg] reason: String,
) -> Result<(), Error> {
ctx.net_controller
.tor
.reset(wipe_state, Error::new(eyre!("{reason}"), ErrorKind::Tor))
.await
}
fn display_services(services: Vec<OnionAddressV3>, matches: &ArgMatches) {
use prettytable::*;
@@ -52,133 +100,227 @@ pub async fn list_services(
ctx.net_controller.tor.list_services().await
}
#[command(
custom_cli(cli_logs(async, context(CliContext))),
subcommands(self(logs_nofollow(async)), logs_follow),
display(display_none)
)]
pub async fn logs(
#[arg(short = 'l', long = "limit")] limit: Option<usize>,
#[arg(short = 'c', long = "cursor")] cursor: Option<String>,
#[arg(short = 'B', long = "before", default)] before: bool,
#[arg(short = 'f', long = "follow", default)] follow: bool,
) -> Result<(Option<usize>, Option<String>, bool, bool), Error> {
Ok((limit, cursor, before, follow))
}
pub async fn cli_logs(
ctx: CliContext,
(limit, cursor, before, follow): (Option<usize>, Option<String>, bool, bool),
) -> Result<(), RpcError> {
if follow {
if cursor.is_some() {
return Err(RpcError::from(Error::new(
eyre!("The argument '--cursor <cursor>' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
if before {
return Err(RpcError::from(Error::new(
eyre!("The argument '--before' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
cli_logs_generic_follow(ctx, "net.tor.logs.follow", None, limit).await
} else {
cli_logs_generic_nofollow(ctx, "net.tor.logs", None, limit, cursor, before).await
}
}
pub async fn logs_nofollow(
_ctx: (),
(limit, cursor, before, _): (Option<usize>, Option<String>, bool, bool),
) -> Result<LogResponse, Error> {
fetch_logs(LogSource::Service(SYSTEMD_UNIT), limit, cursor, before).await
}
#[command(rpc_only, rename = "follow", display(display_none))]
pub async fn logs_follow(
#[context] ctx: RpcContext,
#[parent_data] (limit, _, _, _): (Option<usize>, Option<String>, bool, bool),
) -> Result<LogFollowResponse, Error> {
follow_logs(ctx, LogSource::Service(SYSTEMD_UNIT), limit).await
}
fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> {
async move { Ok(()) }.boxed()
}
pub struct TorController(Mutex<TorControllerInner>);
pub struct TorController(TorControl);
impl TorController {
pub async fn init(tor_control: SocketAddr) -> Result<Self, Error> {
Ok(TorController(Mutex::new(
TorControllerInner::init(tor_control).await?,
)))
pub fn new(tor_control: SocketAddr, tor_socks: SocketAddr) -> Self {
TorController(TorControl::new(tor_control, tor_socks))
}
pub async fn add(
&self,
key: &TorSecretKeyV3,
key: TorSecretKeyV3,
external: u16,
target: SocketAddr,
) -> Result<Arc<()>, Error> {
self.0.lock().await.add(key, external, target).await
let (reply, res) = oneshot::channel();
self.0
.send
.send(TorCommand::AddOnion {
key,
external,
target,
reply,
})
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))?;
res.await
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))
}
pub async fn gc(&self, key: &TorSecretKeyV3, external: u16) -> Result<(), Error> {
self.0.lock().await.gc(key, external).await
pub async fn gc(
&self,
key: Option<TorSecretKeyV3>,
external: Option<u16>,
) -> Result<(), Error> {
self.0
.send
.send(TorCommand::GC { key, external })
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))
}
pub async fn reset(&self, wipe_state: bool, context: Error) -> Result<(), Error> {
self.0
.send
.send(TorCommand::Reset {
wipe_state,
context,
})
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))
}
pub async fn list_services(&self) -> Result<Vec<OnionAddressV3>, Error> {
self.0.lock().await.list_services().await
let (reply, res) = oneshot::channel();
self.0
.send
.send(TorCommand::GetInfo {
query: "onions/current".into(),
reply,
})
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))?;
res.await
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))??
.lines()
.map(|l| l.trim())
.filter(|l| !l.is_empty())
.map(|l| l.parse().with_kind(ErrorKind::Tor))
.collect()
}
}
type AuthenticatedConnection = AuthenticatedConn<
TcpStream,
fn(AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>>,
Box<dyn Fn(AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> + Send + Sync>,
>;
pub struct TorControllerInner {
control_addr: SocketAddr,
connection: AuthenticatedConnection,
services: BTreeMap<String, BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
}
impl TorControllerInner {
#[instrument(skip_all)]
async fn add(
&mut self,
key: &TorSecretKeyV3,
enum TorCommand {
AddOnion {
key: TorSecretKeyV3,
external: u16,
target: SocketAddr,
) -> Result<Arc<()>, Error> {
let mut rm_res = Ok(());
let onion_base = key
.public()
.get_onion_address()
.get_address_without_dot_onion();
let mut service = if let Some(service) = self.services.remove(&onion_base) {
rm_res = self.connection.del_onion(&onion_base).await;
service
} else {
BTreeMap::new()
};
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) = Weak::upgrade(&binding.remove(&target).unwrap_or_default()) {
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding);
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
self.services.insert(onion_base, service);
rm_res?;
self.connection
.add_onion_v3(key, false, false, false, None, &mut bindings.iter())
.await?;
Ok(rc)
}
reply: oneshot::Sender<Arc<()>>,
},
GC {
key: Option<TorSecretKeyV3>,
external: Option<u16>,
},
GetInfo {
query: String,
reply: oneshot::Sender<Result<String, Error>>,
},
Reset {
wipe_state: bool,
context: Error,
},
}
#[instrument(skip_all)]
async fn gc(&mut self, key: &TorSecretKeyV3, external: u16) -> Result<(), Error> {
let onion_base = key
.public()
.get_onion_address()
.get_address_without_dot_onion();
if let Some(mut service) = self.services.remove(&onion_base) {
if let Some(mut binding) = service.remove(&external) {
binding = binding
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect();
if !binding.is_empty() {
service.insert(external, binding);
#[instrument(skip_all)]
async fn torctl(
tor_control: SocketAddr,
tor_socks: SocketAddr,
recv: &mut mpsc::UnboundedReceiver<TorCommand>,
services: &mut BTreeMap<[u8; 64], BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
wipe_state: &AtomicBool,
health_timeout: &mut Duration,
) -> Result<(), Error> {
let bootstrap = async {
if Command::new("systemctl")
.arg("is-active")
.arg("--quiet")
.arg("tor")
.invoke(ErrorKind::Tor)
.await
.is_ok()
{
Command::new("systemctl")
.arg("stop")
.arg("tor")
.invoke(ErrorKind::Tor)
.await?;
for _ in 0..30 {
if TcpStream::connect(tor_control).await.is_err() {
break;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
let rm_res = self.connection.del_onion(&onion_base).await;
if !service.is_empty() {
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
self.services.insert(onion_base, service);
rm_res?;
self.connection
.add_onion_v3(&key, false, false, false, None, &mut bindings.iter())
.await?;
} else {
rm_res?;
if TcpStream::connect(tor_control).await.is_ok() {
return Err(Error::new(
eyre!("Tor is failing to shut down"),
ErrorKind::Tor,
));
}
}
if wipe_state.load(std::sync::atomic::Ordering::SeqCst) {
tokio::fs::remove_dir_all("/var/lib/tor").await?;
wipe_state.store(false, std::sync::atomic::Ordering::SeqCst);
}
tokio::fs::create_dir_all("/var/lib/tor").await?;
Command::new("chown")
.arg("-R")
.arg("debian-tor")
.arg("/var/lib/tor")
.invoke(ErrorKind::Filesystem)
.await?;
Command::new("systemctl")
.arg("start")
.arg("tor")
.invoke(ErrorKind::Tor)
.await?;
Ok(())
}
let logs = journalctl(LogSource::Service(SYSTEMD_UNIT), 0, None, false, true).await?;
#[instrument(skip_all)]
async fn init(tor_control: SocketAddr) -> Result<Self, Error> {
let mut conn = torut::control::UnauthenticatedConn::new(
TcpStream::connect(tor_control).await?, // TODO
);
let mut tcp_stream = None;
for _ in 0..60 {
if let Ok(conn) = TcpStream::connect(tor_control).await {
tcp_stream = Some(conn);
break;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
let tcp_stream = tcp_stream.ok_or_else(|| {
Error::new(eyre!("Timed out waiting for tor to start"), ErrorKind::Tor)
})?;
tracing::info!("Tor is started");
let mut conn = torut::control::UnauthenticatedConn::new(tcp_stream);
let auth = conn
.load_protocol_info()
.await?
@@ -187,25 +329,356 @@ impl TorControllerInner {
.with_kind(crate::ErrorKind::Tor)?;
conn.authenticate(&auth).await?;
let mut connection: AuthenticatedConnection = conn.into_authenticated().await;
connection.set_async_event_handler(Some(event_handler));
connection.set_async_event_handler(Some(Box::new(|event| event_handler(event))));
Ok(Self {
control_addr: tor_control,
connection,
services: BTreeMap::new(),
})
let mut bootstrapped = false;
let mut last_increment = (String::new(), Instant::now());
for _ in 0..300 {
match connection.get_info("status/bootstrap-phase").await {
Ok(a) => {
if a.contains("TAG=done") {
bootstrapped = true;
break;
}
if let Some(p) = PROGRESS_REGEX.captures(&a) {
if let Some(p) = p.get(1) {
if p.as_str() != &*last_increment.0 {
last_increment = (p.as_str().into(), Instant::now());
}
}
}
}
Err(e) => {
let e = Error::from(e);
tracing::error!("{}", e);
tracing::debug!("{:?}", e);
}
}
if last_increment.1.elapsed() > Duration::from_secs(30) {
return Err(Error::new(
eyre!("Tor stuck bootstrapping at {}%", last_increment.0),
ErrorKind::Tor,
));
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
if !bootstrapped {
return Err(Error::new(
eyre!("Timed out waiting for tor to bootstrap"),
ErrorKind::Tor,
));
}
Ok((connection, logs))
};
let pre_handler = async {
while let Some(command) = recv.recv().await {
match command {
TorCommand::AddOnion {
key,
external,
target,
reply,
} => {
let mut service = if let Some(service) = services.remove(&key.as_bytes()) {
service
} else {
BTreeMap::new()
};
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) =
Weak::upgrade(&binding.remove(&target).unwrap_or_default())
{
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding);
services.insert(key.as_bytes(), service);
reply.send(rc).unwrap_or_default();
}
TorCommand::GetInfo { reply, .. } => {
reply
.send(Err(Error::new(
eyre!("Tor has not finished bootstrapping..."),
ErrorKind::Tor,
)))
.unwrap_or_default();
}
TorCommand::GC { .. } => (),
TorCommand::Reset {
wipe_state: new_wipe_state,
context,
} => {
wipe_state.fetch_or(new_wipe_state, std::sync::atomic::Ordering::SeqCst);
return Err(context);
}
}
}
Ok(())
};
let (mut connection, mut logs) = tokio::select! {
res = bootstrap => res?,
res = pre_handler => return res,
};
let hck_key = TorSecretKeyV3::generate();
connection
.add_onion_v3(
&hck_key,
false,
false,
false,
None,
&mut [(80, SocketAddr::from(([127, 0, 0, 1], 80)))].iter(),
)
.await?;
for (key, service) in std::mem::take(services) {
let key = TorSecretKeyV3::from(key);
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
if !bindings.is_empty() {
services.insert(key.as_bytes(), service);
connection
.add_onion_v3(&key, false, false, false, None, &mut bindings.iter())
.await?;
}
}
#[instrument(skip_all)]
async fn list_services(&mut self) -> Result<Vec<OnionAddressV3>, Error> {
self.connection
.get_info("onions/current")
.await?
.lines()
.map(|l| l.trim())
.filter(|l| !l.is_empty())
.map(|l| l.parse().with_kind(ErrorKind::Tor))
.collect()
let handler = async {
while let Some(command) = recv.recv().await {
match command {
TorCommand::AddOnion {
key,
external,
target,
reply,
} => {
let mut rm_res = Ok(());
let onion_base = key
.public()
.get_onion_address()
.get_address_without_dot_onion();
let mut service = if let Some(service) = services.remove(&key.as_bytes()) {
rm_res = connection.del_onion(&onion_base).await;
service
} else {
BTreeMap::new()
};
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) =
Weak::upgrade(&binding.remove(&target).unwrap_or_default())
{
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding);
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
services.insert(key.as_bytes(), service);
reply.send(rc).unwrap_or_default();
rm_res?;
connection
.add_onion_v3(&key, false, false, false, None, &mut bindings.iter())
.await?;
}
TorCommand::GC { key, external } => {
for key in if key.is_some() {
itertools::Either::Left(key.into_iter().map(|k| k.as_bytes()))
} else {
itertools::Either::Right(services.keys().cloned().collect_vec().into_iter())
} {
let key = TorSecretKeyV3::from(key);
let onion_base = key
.public()
.get_onion_address()
.get_address_without_dot_onion();
if let Some(mut service) = services.remove(&key.as_bytes()) {
for external in if external.is_some() {
itertools::Either::Left(external.into_iter())
} else {
itertools::Either::Right(
service.keys().copied().collect_vec().into_iter(),
)
} {
if let Some(mut binding) = service.remove(&external) {
binding = binding
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect();
if !binding.is_empty() {
service.insert(external, binding);
}
}
}
let rm_res = connection.del_onion(&onion_base).await;
if !service.is_empty() {
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
if !bindings.is_empty() {
services.insert(key.as_bytes(), service);
}
rm_res?;
if !bindings.is_empty() {
connection
.add_onion_v3(
&key,
false,
false,
false,
None,
&mut bindings.iter(),
)
.await?;
}
} else {
rm_res?;
}
}
}
}
TorCommand::GetInfo { query, reply } => {
reply
.send(connection.get_info(&query).await.with_kind(ErrorKind::Tor))
.unwrap_or_default();
}
TorCommand::Reset {
wipe_state: new_wipe_state,
context,
} => {
wipe_state.fetch_or(new_wipe_state, std::sync::atomic::Ordering::SeqCst);
return Err(context);
}
}
}
Ok(())
};
let log_parser = async {
while let Some(log) = logs.try_next().await? {
for (regex, severity) in &*LOG_REGEXES {
if regex.is_match(&log.message) {
let (check, wipe_state) = match severity {
ErrorLogSeverity::Fatal { wipe_state } => (false, *wipe_state),
ErrorLogSeverity::Unknown { wipe_state } => (true, *wipe_state),
};
if !check
|| tokio::time::timeout(
Duration::from_secs(30),
tokio_socks::tcp::Socks5Stream::connect(
tor_socks,
(hck_key.public().get_onion_address().to_string(), 80),
),
)
.await
.map_err(|e| tracing::warn!("Tor is confirmed to be down: {e}"))
.and_then(|a| {
a.map_err(|e| tracing::warn!("Tor is confirmed to be down: {e}"))
})
.is_err()
{
if wipe_state {
Command::new("systemctl")
.arg("stop")
.arg("tor")
.invoke(ErrorKind::Tor)
.await?;
tokio::fs::remove_dir_all("/var/lib/tor").await?;
}
return Err(Error::new(eyre!("{}", log.message), ErrorKind::Tor));
}
}
}
}
Err(Error::new(eyre!("Log stream terminated"), ErrorKind::Tor))
};
let health_checker = async {
let mut last_success = Instant::now();
loop {
tokio::time::sleep(Duration::from_secs(30)).await;
if let Err(e) = tokio::time::timeout(
Duration::from_secs(30),
tokio_socks::tcp::Socks5Stream::connect(
tor_socks,
(hck_key.public().get_onion_address().to_string(), 80),
),
)
.await
.map_err(|e| e.to_string())
.and_then(|e| e.map_err(|e| e.to_string()))
{
if last_success.elapsed() > *health_timeout {
let err = Error::new(eyre!("Tor health check failed for longer than current timeout ({health_timeout:?})"), crate::ErrorKind::Tor);
*health_timeout *= 2;
wipe_state.store(true, std::sync::atomic::Ordering::SeqCst);
return Err(err);
}
} else {
last_success = Instant::now();
}
}
};
tokio::select! {
res = handler => res?,
res = log_parser => res?,
res = health_checker => res?,
}
Ok(())
}
struct TorControl {
_thread: NonDetachingJoinHandle<()>,
send: mpsc::UnboundedSender<TorCommand>,
}
impl TorControl {
pub fn new(tor_control: SocketAddr, tor_socks: SocketAddr) -> Self {
let (send, mut recv) = mpsc::unbounded_channel();
Self {
_thread: tokio::spawn(async move {
let mut services = BTreeMap::new();
let wipe_state = AtomicBool::new(false);
let mut health_timeout = Duration::from_secs(STARTING_HEALTH_TIMEOUT);
while let Err(e) = torctl(
tor_control,
tor_socks,
&mut recv,
&mut services,
&wipe_state,
&mut health_timeout,
)
.await
{
tracing::error!("{e}: Restarting tor");
tracing::debug!("{e:?}");
}
tracing::info!("TorControl is shut down.")
})
.into(),
send,
}
}
}

View File

@@ -1,5 +1,5 @@
use std::convert::Infallible;
use std::net::{Ipv4Addr, Ipv6Addr};
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
use std::path::Path;
use async_stream::try_stream;
@@ -7,24 +7,29 @@ use color_eyre::eyre::eyre;
use futures::stream::BoxStream;
use futures::{StreamExt, TryStreamExt};
use ipnet::{Ipv4Net, Ipv6Net};
use tokio::net::{TcpListener, TcpStream};
use tokio::process::Command;
use crate::util::Invoke;
use crate::Error;
fn parse_iface_ip(output: &str) -> Result<Option<&str>, Error> {
fn parse_iface_ip(output: &str) -> Result<Vec<&str>, Error> {
let output = output.trim();
if output.is_empty() {
return Ok(None);
return Ok(Vec::new());
}
if let Some(ip) = output.split_ascii_whitespace().nth(3) {
Ok(Some(ip))
} else {
Err(Error::new(
eyre!("malformed output from `ip`"),
crate::ErrorKind::Network,
))
let mut res = Vec::new();
for line in output.lines() {
if let Some(ip) = line.split_ascii_whitespace().nth(3) {
res.push(ip)
} else {
return Err(Error::new(
eyre!("malformed output from `ip`"),
crate::ErrorKind::Network,
));
}
}
Ok(res)
}
pub async fn get_iface_ipv4_addr(iface: &str) -> Result<Option<(Ipv4Addr, Ipv4Net)>, Error> {
@@ -38,7 +43,9 @@ pub async fn get_iface_ipv4_addr(iface: &str) -> Result<Option<(Ipv4Addr, Ipv4Ne
.invoke(crate::ErrorKind::Network)
.await?,
)?)?
.into_iter()
.map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?)))
.next()
.transpose()?)
}
@@ -53,6 +60,8 @@ pub async fn get_iface_ipv6_addr(iface: &str) -> Result<Option<(Ipv6Addr, Ipv6Ne
.invoke(crate::ErrorKind::Network)
.await?,
)?)?
.into_iter()
.find(|ip| !ip.starts_with("fe80::"))
.map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?)))
.transpose()?)
}
@@ -121,3 +130,37 @@ impl<T> hyper::server::accept::Accept for SingleAccept<T> {
std::task::Poll::Ready(self.project().0.take().map(Ok))
}
}
pub struct TcpListeners {
listeners: Vec<TcpListener>,
}
impl TcpListeners {
pub fn new(listeners: impl IntoIterator<Item = TcpListener>) -> Self {
Self {
listeners: listeners.into_iter().collect(),
}
}
pub async fn accept(&self) -> std::io::Result<(TcpStream, SocketAddr)> {
futures::future::select_all(self.listeners.iter().map(|l| Box::pin(l.accept())))
.await
.0
}
}
impl hyper::server::accept::Accept for TcpListeners {
type Conn = TcpStream;
type Error = std::io::Error;
fn poll_accept(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Result<Self::Conn, Self::Error>>> {
for listener in self.listeners.iter() {
let poll = listener.poll_accept(cx);
if poll.is_ready() {
return poll.map(|a| a.map(|a| a.0)).map(Some);
}
}
std::task::Poll::Pending
}
}

View File

@@ -1,6 +1,6 @@
use std::collections::BTreeMap;
use std::convert::Infallible;
use std::net::{IpAddr, SocketAddr};
use std::net::{IpAddr, Ipv6Addr, SocketAddr};
use std::str::FromStr;
use std::sync::{Arc, Weak};
@@ -88,7 +88,7 @@ struct VHostServer {
impl VHostServer {
async fn new(port: u16, ssl: Arc<SslManager>) -> Result<Self, Error> {
// check if port allowed
let listener = TcpListener::bind(SocketAddr::new([0, 0, 0, 0].into(), port))
let listener = TcpListener::bind(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), port))
.await
.with_kind(crate::ErrorKind::Network)?;
let mapping = Arc::new(RwLock::new(BTreeMap::new()));

View File

@@ -10,7 +10,7 @@ use crate::context::InstallContext;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::efivarfs::EfiVarFs;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::filesystem::{MountType, ReadWrite};
use crate::disk::mount::guard::{MountGuard, TmpMountGuard};
use crate::disk::util::{DiskInfo, PartitionTable};
use crate::disk::OsPartitionInfo;
@@ -147,18 +147,59 @@ pub async fn execute(
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("mkfs.ext4")
if !overwrite {
if let Ok(guard) =
TmpMountGuard::mount(&BlockDev::new(part_info.root.clone()), MountType::ReadOnly).await
{
if let Err(e) = async {
// cp -r ${guard}/config /tmp/config
Command::new("cp")
.arg("-r")
.arg(guard.as_ref().join("config"))
.arg("/tmp/config.bak")
.invoke(crate::ErrorKind::Filesystem)
.await?;
if tokio::fs::metadata(guard.as_ref().join("config/upgrade"))
.await
.is_ok()
{
tokio::fs::remove_file(guard.as_ref().join("config/upgrade")).await?;
}
guard.unmount().await
}
.await
{
tracing::error!("Error recovering previous config: {e}");
tracing::debug!("{e:?}");
}
}
}
Command::new("mkfs.btrfs")
.arg("-f")
.arg(&part_info.root)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("e2label")
Command::new("btrfs")
.arg("property")
.arg("set")
.arg(&part_info.root)
.arg("label")
.arg("rootfs")
.invoke(crate::ErrorKind::DiskManagement)
.await?;
let rootfs = TmpMountGuard::mount(&BlockDev::new(&part_info.root), ReadWrite).await?;
tokio::fs::create_dir(rootfs.as_ref().join("config")).await?;
if tokio::fs::metadata("/tmp/config.bak").await.is_ok() {
Command::new("cp")
.arg("-r")
.arg("/tmp/config.bak")
.arg(rootfs.as_ref().join("config"))
.invoke(crate::ErrorKind::Filesystem)
.await?;
} else {
tokio::fs::create_dir(rootfs.as_ref().join("config")).await?;
}
tokio::fs::create_dir(rootfs.as_ref().join("next")).await?;
let current = rootfs.as_ref().join("current");
tokio::fs::create_dir(&current).await?;
@@ -224,6 +265,12 @@ pub async fn execute(
.invoke(crate::ErrorKind::OpenSsh)
.await?;
let dev = MountGuard::mount(
&Bind::new(rootfs.as_ref()),
current.join("media/embassy/embassyfs"),
MountType::ReadOnly,
)
.await?;
let dev = MountGuard::mount(&Bind::new("/dev"), current.join("dev"), ReadWrite).await?;
let proc = MountGuard::mount(&Bind::new("/proc"), current.join("proc"), ReadWrite).await?;
let sys = MountGuard::mount(&Bind::new("/sys"), current.join("sys"), ReadWrite).await?;

View File

@@ -2,15 +2,17 @@ use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::ffi::{OsStr, OsString};
use std::net::Ipv4Addr;
use std::os::unix::prelude::FileTypeExt;
use std::path::{Path, PathBuf};
use std::time::Duration;
use async_stream::stream;
use bollard::container::RemoveContainerOptions;
use chrono::format::Item;
use color_eyre::eyre::eyre;
use color_eyre::Report;
use futures::future::Either as EitherFuture;
use futures::TryStreamExt;
use futures::future::{BoxFuture, Either as EitherFuture};
use futures::{FutureExt, TryStreamExt};
use helpers::{NonDetachingJoinHandle, UnixRpcClient};
use models::{Id, ImageId};
use nix::sys::signal;
@@ -18,10 +20,8 @@ use nix::unistd::Pid;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::{
io::{AsyncBufRead, AsyncBufReadExt, BufReader},
time::timeout,
};
use tokio::io::{AsyncBufRead, AsyncBufReadExt, BufReader};
use tokio::time::timeout;
use tracing::instrument;
use super::ProcedureName;
@@ -68,6 +68,8 @@ pub struct DockerContainer {
pub sigterm_timeout: Option<SerdeDuration>,
#[serde(default)]
pub system: bool,
#[serde(default)]
pub gpu_acceleration: bool,
}
impl DockerContainer {
@@ -154,6 +156,8 @@ pub struct DockerProcedure {
pub sigterm_timeout: Option<SerdeDuration>,
#[serde(default)]
pub shm_size_mb: Option<usize>, // TODO: use postfix sizing? like 1k vs 1m vs 1g
#[serde(default)]
pub gpu_acceleration: bool,
}
#[derive(Clone, Debug, Deserialize, Serialize, Default)]
@@ -184,6 +188,7 @@ impl DockerProcedure {
io_format: injectable.io_format,
sigterm_timeout: injectable.sigterm_timeout,
shm_size_mb: container.shm_size_mb,
gpu_acceleration: container.gpu_acceleration,
}
}
@@ -709,6 +714,32 @@ impl DockerProcedure {
res.push(OsStr::new("--shm-size").into());
res.push(OsString::from(format!("{}m", shm_size_mb)).into());
}
if self.gpu_acceleration {
fn get_devices<'a>(
path: &'a Path,
res: &'a mut Vec<PathBuf>,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
let mut read_dir = tokio::fs::read_dir(path).await?;
while let Some(entry) = read_dir.next_entry().await? {
let fty = entry.metadata().await?.file_type();
if fty.is_block_device() || fty.is_char_device() {
res.push(entry.path());
} else if fty.is_dir() {
get_devices(&*entry.path(), res).await?;
}
}
Ok(())
}
.boxed()
}
let mut devices = Vec::new();
get_devices(Path::new("/dev/dri"), &mut devices).await?;
for device in devices {
res.push(OsStr::new("--device").into());
res.push(OsString::from(device).into());
}
}
res.push(OsStr::new("--interactive").into());
res.push(OsStr::new("--log-driver=journald").into());
res.push(OsStr::new("--entrypoint").into());

View File

@@ -690,3 +690,49 @@ async fn js_rsync() {
.unwrap()
.unwrap();
}
#[tokio::test]
async fn js_disk_usage() {
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
.canonicalize()
.unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::Action("test-disk-usage".parse().unwrap());
let volumes: Volumes = serde_json::from_value(serde_json::json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap();
let input: Option<serde_json::Value> = None;
let timeout = Some(Duration::from_secs(10));
dbg!(js_action
.execute::<serde_json::Value, serde_json::Value>(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
ProcessGroupId(0),
None,
)
.await
.unwrap()
.unwrap());
}

View File

@@ -24,6 +24,66 @@ use crate::{Error, ErrorKind, ResultExt};
pub const SYSTEMD_UNIT: &'static str = "embassyd";
#[command(subcommands(zram))]
pub async fn experimental() -> Result<(), Error> {
Ok(())
}
pub async fn enable_zram() -> Result<(), Error> {
let mem_info = get_mem_info().await?;
Command::new("modprobe")
.arg("zram")
.invoke(ErrorKind::Zram)
.await?;
tokio::fs::write("/sys/block/zram0/comp_algorithm", "lz4")
.await
.with_kind(ErrorKind::Zram)?;
tokio::fs::write(
"/sys/block/zram0/disksize",
format!("{}M", mem_info.total.0 as u64 / 4),
)
.await
.with_kind(ErrorKind::Zram)?;
Command::new("mkswap")
.arg("/dev/zram0")
.invoke(ErrorKind::Zram)
.await?;
Command::new("swapon")
.arg("-p")
.arg("5")
.arg("/dev/zram0")
.invoke(ErrorKind::Zram)
.await?;
Ok(())
}
#[command(display(display_none))]
pub async fn zram(#[context] ctx: RpcContext, #[arg] enable: bool) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut zram = crate::db::DatabaseModel::new()
.server_info()
.zram()
.get_mut(&mut db)
.await?;
if enable == *zram {
return Ok(());
}
*zram = enable;
if enable {
enable_zram().await?;
} else {
Command::new("swapoff")
.arg("/dev/zram0")
.invoke(ErrorKind::Zram)
.await?;
tokio::fs::write("/sys/block/zram0/reset", "1")
.await
.with_kind(ErrorKind::Zram)?;
}
zram.save(&mut db).await?;
Ok(())
}
#[command]
pub async fn time() -> Result<String, Error> {
Ok(Utc::now().to_rfc3339())
@@ -699,7 +759,7 @@ async fn get_mem_info() -> Result<MetricsMemory, Error> {
let swap_total = MebiBytes(swap_total_k as f64 / 1024.0);
let swap_free = MebiBytes(swap_free_k as f64 / 1024.0);
let swap_used = MebiBytes((swap_total_k - swap_free_k) as f64 / 1024.0);
let percentage_used = Percentage(used.0 / total.0 * 100.0);
let percentage_used = Percentage((total.0 - available.0) / total.0 * 100.0);
Ok(MetricsMemory {
percentage_used,
total,

View File

@@ -1,11 +1,13 @@
use std::future::Future;
use std::io::Cursor;
use std::os::unix::prelude::MetadataExt;
use std::path::Path;
use std::task::Poll;
use futures::future::{BoxFuture, Fuse};
use futures::{AsyncSeek, FutureExt, TryStreamExt};
use helpers::NonDetachingJoinHandle;
use nix::unistd::{Gid, Uid};
use tokio::io::{
duplex, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, DuplexStream, ReadBuf, WriteHalf,
};
@@ -416,3 +418,120 @@ impl<T: AsyncWrite> AsyncWrite for BackTrackingReader<T> {
self.project().reader.poll_write_vectored(cx, bufs)
}
}
pub fn dir_copy<'a, P0: AsRef<Path> + 'a + Send + Sync, P1: AsRef<Path> + 'a + Send + Sync>(
src: P0,
dst: P1,
) -> BoxFuture<'a, Result<(), crate::Error>> {
async move {
let m = tokio::fs::metadata(&src).await?;
let dst_path = dst.as_ref();
tokio::fs::create_dir_all(&dst_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("mkdir {}", dst_path.display()),
)
})?;
tokio::fs::set_permissions(&dst_path, m.permissions())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("chmod {}", dst_path.display()),
)
})?;
let tmp_dst_path = dst_path.to_owned();
tokio::task::spawn_blocking(move || {
nix::unistd::chown(
&tmp_dst_path,
Some(Uid::from_raw(m.uid())),
Some(Gid::from_raw(m.gid())),
)
})
.await
.with_kind(crate::ErrorKind::Unknown)?
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("chown {}", dst_path.display()),
)
})?;
tokio_stream::wrappers::ReadDirStream::new(tokio::fs::read_dir(src.as_ref()).await?)
.map_err(|e| crate::Error::new(e, crate::ErrorKind::Filesystem))
.try_for_each(|e| async move {
let m = e.metadata().await?;
let src_path = e.path();
let dst_path = dst_path.join(e.file_name());
if m.is_file() {
let len = m.len();
let mut dst_file =
&mut tokio::fs::File::create(&dst_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("create {}", dst_path.display()),
)
})?;
tokio::io::copy(
&mut tokio::fs::File::open(&src_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("open {}", src_path.display()),
)
})?,
&mut dst_file,
)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("cp {} -> {}", src_path.display(), dst_path.display()),
)
})?;
dst_file.flush().await?;
dst_file.shutdown().await?;
dst_file.sync_all().await?;
drop(dst_file);
let tmp_dst_path = dst_path.clone();
tokio::task::spawn_blocking(move || {
nix::unistd::chown(
&tmp_dst_path,
Some(Uid::from_raw(m.uid())),
Some(Gid::from_raw(m.gid())),
)
})
.await
.with_kind(crate::ErrorKind::Unknown)?
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("chown {}", dst_path.display()),
)
})?;
} else if m.is_dir() {
dir_copy(src_path, dst_path).await?;
} else if m.file_type().is_symlink() {
tokio::fs::symlink(
tokio::fs::read_link(&src_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("readlink {}", src_path.display()),
)
})?,
&dst_path,
)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("cp -P {} -> {}", src_path.display(), dst_path.display()),
)
})?;
// Do not set permissions (see https://unix.stackexchange.com/questions/87200/change-permissions-for-a-symbolic-link)
}
Ok(())
})
.await?;
Ok(())
}
.boxed()
}

View File

@@ -58,7 +58,12 @@ impl Invoke for tokio::process::Command {
res.status.success(),
error_kind,
"{}",
std::str::from_utf8(&res.stderr).unwrap_or("Unknown Error")
Some(&res.stderr)
.filter(|a| !a.is_empty())
.or(Some(&res.stdout))
.filter(|a| !a.is_empty())
.and_then(|a| std::str::from_utf8(a).ok())
.unwrap_or(&format!("Unknown Error ({})", res.status))
);
Ok(res.stdout)
}

View File

@@ -22,8 +22,9 @@ mod v0_3_3;
mod v0_3_4;
mod v0_3_4_1;
mod v0_3_4_2;
mod v0_3_4_3;
pub type Current = v0_3_4_2::Version;
pub type Current = v0_3_4_3::Version;
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
#[serde(untagged)]
@@ -41,6 +42,7 @@ enum Version {
V0_3_4(Wrapper<v0_3_4::Version>),
V0_3_4_1(Wrapper<v0_3_4_1::Version>),
V0_3_4_2(Wrapper<v0_3_4_2::Version>),
V0_3_4_3(Wrapper<v0_3_4_3::Version>),
Other(emver::Version),
}
@@ -69,6 +71,7 @@ impl Version {
Version::V0_3_4(Wrapper(x)) => x.semver(),
Version::V0_3_4_1(Wrapper(x)) => x.semver(),
Version::V0_3_4_2(Wrapper(x)) => x.semver(),
Version::V0_3_4_3(Wrapper(x)) => x.semver(),
Version::Other(x) => x.clone(),
}
}
@@ -258,6 +261,10 @@ pub async fn init<Db: DbHandle>(
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::V0_3_4_3(v) => {
v.0.migrate_to(&Current::new(), db, secrets, receipts)
.await?
}
Version::Other(_) => {
return Err(Error::new(
eyre!("Cannot downgrade"),
@@ -303,6 +310,7 @@ mod tests {
Just(Version::V0_3_4(Wrapper(v0_3_4::Version::new()))),
Just(Version::V0_3_4_1(Wrapper(v0_3_4_1::Version::new()))),
Just(Version::V0_3_4_2(Wrapper(v0_3_4_2::Version::new()))),
Just(Version::V0_3_4_3(Wrapper(v0_3_4_3::Version::new()))),
em_version().prop_map(Version::Other),
]
}

View File

@@ -5,11 +5,10 @@ use openssl::hash::MessageDigest;
use serde_json::{json, Value};
use ssh_key::public::Ed25519PublicKey;
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, sync_hostname, Hostname};
use super::v0_3_0::V0_3_0_COMPAT;
use super::*;
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, sync_hostname, Hostname};
const V0_3_4: emver::Version = emver::Version::new(0, 3, 4, 0);

View File

@@ -0,0 +1,36 @@
use async_trait::async_trait;
use emver::VersionRange;
use super::v0_3_0::V0_3_0_COMPAT;
use super::*;
const V0_3_4_3: emver::Version = emver::Version::new(0, 3, 4, 3);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4_2::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4_3
}
fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT
}
async fn up<Db: DbHandle>(&self, db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
crate::db::DatabaseModel::new()
.server_info()
.get_mut(db)
.await?
.save(db)
.await?;
Ok(())
}
async fn down<Db: DbHandle>(&self, _db: &mut Db, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1039,4 +1039,9 @@ export const action = {
}
},
async "test-disk-usage"(effects, _input) {
const usage = await effects.diskUsage()
return usage
}
};

View File

@@ -3,11 +3,11 @@ avahi-utils
bash-completion
beep
bmon
btrfs-progs
ca-certificates
cifs-utils
containerd.io
curl
crda
cryptsetup
docker-ce
docker-ce-cli
@@ -15,7 +15,8 @@ docker-compose-plugin
dosfstools
e2fsprogs
ecryptfs-utils
exfat-utils
exfatprogs
grub-common
htop
httpdirfs
iotop
@@ -25,22 +26,27 @@ libavahi-client3
lm-sensors
lvm2
magic-wormhole
man-db
ncdu
net-tools
network-manager
nvme-cli
nyx
openssh-server
pgloader
postgresql
psmisc
qemu-guest-agent
rsync
samba-common-bin
smartmontools
sqlite3
squashfs-tools
sudo
systemd
systemd-resolved
systemd-sysv
systemd-timesyncd
tor
util-linux
vim
wireless-tools

View File

@@ -31,6 +31,8 @@ local_mount_root()
modprobe ${FSTYPE}
checkfs ${ROOT} root "${FSTYPE}"
ROOTFLAGS="$(echo "${ROOTFLAGS}" | sed 's/subvol=\(next\|current\)//' | sed 's/^-o *$//')"
if [ "${FSTYPE}" != "unknown" ]; then
mount -t ${FSTYPE} ${ROOTFLAGS} ${ROOT} ${rootmnt}
else

View File

@@ -3,30 +3,10 @@
ARGS=
for ARG in $@; do
if [ "${ARG%%[!/]*}" = "/" ]; then
OPTIONS=
path="$ARG"
while true; do
if FSTYPE=$( findmnt -n -o FSTYPE "$path" ); then
if [ "$FSTYPE" = "overlay" ]; then
OPTIONS=$(findmnt -n -o OPTIONS "$path")
break
else
break
fi
fi
if [ "$path" = "/" ]; then break; fi
path=$(dirname "$path")
done
if LOWERDIR=$(echo "$OPTIONS" | grep -m 1 -oP 'lowerdir=\K[^,]+'); then
#echo "[DEBUG] Overlay filesystem detected ${ARG} --> ${LOWERDIR}${ARG%*/}" 1>&2
ARG=/media/embassy/embassyfs"${ARG%*/}"
if [ -d "/media/embassy/embassyfs" ] && [ "$ARG" = "/" ]; then
ARG=/media/embassy/embassyfs
fi
fi
ARGS="$ARGS $ARG"
ARGS="$ARGS $ARG"
done
grub-probe-default $ARGS

View File

@@ -1,12 +1,12 @@
{
"name": "startos-ui",
"version": "0.3.4.2",
"version": "0.3.4.3",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "startos-ui",
"version": "0.3.4.2",
"version": "0.3.4.3",
"dependencies": {
"@angular/animations": "^14.1.0",
"@angular/common": "^14.1.0",

View File

@@ -1,6 +1,6 @@
{
"name": "startos-ui",
"version": "0.3.4.2",
"version": "0.3.4.3",
"author": "Start9 Labs, Inc",
"homepage": "https://start9.com/",
"scripts": {

View File

@@ -1,6 +1,6 @@
{
"name": null,
"ack-welcome": "0.3.4.2",
"ack-welcome": "0.3.4.3",
"marketplace": {
"selected-url": "https://registry.start9.com/",
"known-hosts": {

View File

@@ -1,6 +1,6 @@
{
"name": "@start9labs/marketplace",
"version": "0.3.10",
"version": "0.3.11",
"peerDependencies": {
"@angular/common": ">=13.2.0",
"@angular/core": ">=13.2.0",

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.2 KiB

After

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 12 KiB

After

Width:  |  Height:  |  Size: 74 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 32 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 8.0 KiB

After

Width:  |  Height:  |  Size: 40 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

After

Width:  |  Height:  |  Size: 42 KiB

View File

@@ -4,7 +4,7 @@
.logo {
display: block;
width: 50%;
width: 60%;
margin: 0 auto;
}

View File

@@ -6,8 +6,11 @@
<qr-code value="hello"></qr-code>
<!-- Ionic components -->
<ion-accordion></ion-accordion>
<ion-accordion-group></ion-accordion-group>
<ion-action-sheet></ion-action-sheet>
<ion-alert></ion-alert>
<ion-avatar></ion-avatar>
<ion-back-button></ion-back-button>
<ion-badge></ion-badge>
<ion-button></ion-button>
@@ -38,6 +41,7 @@
<ion-list></ion-list>
<ion-loading></ion-loading>
<ion-modal></ion-modal>
<ion-menu-button></ion-menu-button>
<ion-note></ion-note>
<ion-progress-bar></ion-progress-bar>
<ion-radio></ion-radio>
@@ -54,10 +58,12 @@
<ion-toast></ion-toast>
<ion-toggle></ion-toggle>
<ion-toolbar></ion-toolbar>
<ion-menu-button></ion-menu-button>
<!-- images -->
<img src="assets/img/logo.png" />
<img src="assets/img/icon.png" />
<img src="assets/img/icon_transparent.png" />
<img src="assets/img/community-store.png" />
<img src="assets/img/icons/snek.png" />
<img src="assets/img/icons/wifi-1.png" />
<img src="assets/img/icons/wifi-2.png" />

View File

@@ -40,6 +40,7 @@ const ICONS = [
'file-tray-stacked-outline',
'finger-print-outline',
'flash-outline',
'flash-off-outline',
'folder-open-outline',
'globe-outline',
'grid-outline',
@@ -54,7 +55,6 @@ const ICONS = [
'mail-outline',
'map-outline',
'medkit-outline',
'newspaper-outline',
'notifications-outline',
'open-outline',
'options-outline',

View File

@@ -2,10 +2,9 @@ import { NgModule } from '@angular/core'
import { CommonModule } from '@angular/common'
import { IonicModule } from '@ionic/angular'
import { BadgeMenuComponent } from './badge-menu.component'
import { TuiLetModule } from '@taiga-ui/cdk'
@NgModule({
imports: [CommonModule, IonicModule, TuiLetModule],
imports: [CommonModule, IonicModule],
declarations: [BadgeMenuComponent],
exports: [BadgeMenuComponent],
})

View File

@@ -15,8 +15,10 @@ import { sameUrl } from '@start9labs/shared'
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class StoreIconComponent {
@Input() url: string = ''
@Input() size?: string
@Input()
url: string = ''
@Input()
size?: string
}
@Pipe({
@@ -29,7 +31,7 @@ export class GetIconPipe implements PipeTransform {
const { start9, community } = this.config.marketplace
if (sameUrl(url, start9)) {
return 'assets/img/icon.png'
return 'assets/img/icon_transparent.png'
} else if (sameUrl(url, community)) {
return 'assets/img/community-store.png'
}

View File

@@ -12,6 +12,28 @@
<ion-content class="ion-padding">
<h2>This Release</h2>
<h4>0.3.4.3</h4>
<p class="note-padding">
View the complete
<a
href="https://github.com/Start9Labs/start-os/releases/tag/v0.3.4.3"
target="_blank"
noreferrer
>
release notes
</a>
for more details.
</p>
<h6>Highlights</h6>
<ul class="spaced-list">
<li>Improved Tor reliability</li>
<li>Experimental features tab</li>
<li>multiple bugfixes and general performance enhancements</li>
<li>Update branding</li>
</ul>
<h2>Previous Releases</h2>
<h4>0.3.4.2</h4>
<p class="note-padding">
View the complete
@@ -34,8 +56,6 @@
</li>
</ul>
<h2>Previous Releases</h2>
<h4>0.3.4.1</h4>
<p class="note-padding">
View the complete

View File

@@ -0,0 +1,24 @@
import { NgModule } from '@angular/core'
import { CommonModule } from '@angular/common'
import { Routes, RouterModule } from '@angular/router'
import { IonicModule } from '@ionic/angular'
import { ExperimentalFeaturesPage } from './experimental-features.page'
import { EmverPipesModule } from '@start9labs/shared'
const routes: Routes = [
{
path: '',
component: ExperimentalFeaturesPage,
},
]
@NgModule({
imports: [
CommonModule,
IonicModule,
RouterModule.forChild(routes),
EmverPipesModule,
],
declarations: [ExperimentalFeaturesPage],
})
export class ExperimentalFeaturesPageModule {}

View File

@@ -0,0 +1,36 @@
<ion-header>
<ion-toolbar>
<ion-buttons slot="start">
<ion-back-button defaultHref="system"></ion-back-button>
</ion-buttons>
<ion-title>Experimental Features</ion-title>
</ion-toolbar>
</ion-header>
<ion-content class="with-widgets">
<ion-item-group *ngIf="server$ | async as server">
<ion-item button (click)="presentAlertResetTor()">
<ion-icon slot="start" name="reload"></ion-icon>
<ion-label>
<h2>Reset Tor</h2>
<p>
Resetting the Tor daemon on your server may resolve Tor connectivity
issues.
</p>
</ion-label>
</ion-item>
<ion-item button (click)="presentAlertZram(server.zram)">
<ion-icon
slot="start"
[name]="server.zram ? 'flash-off-outline' : 'flash-outline'"
></ion-icon>
<ion-label>
<h2>{{ server.zram ? 'Disable' : 'Enable' }} zram</h2>
<p>
Enabling zram may improve server performance, especially on low RAM
devices
</p>
</ion-label>
</ion-item>
</ion-item-group>
</ion-content>

View File

@@ -0,0 +1,156 @@
import { ChangeDetectionStrategy, Component } from '@angular/core'
import {
AlertController,
LoadingController,
ToastController,
} from '@ionic/angular'
import { PatchDB } from 'patch-db-client'
import { ApiService } from 'src/app/services/api/embassy-api.service'
import { ConfigService } from 'src/app/services/config.service'
import { DataModel } from 'src/app/services/patch-db/data-model'
import { ErrorToastService } from '@start9labs/shared'
@Component({
selector: 'experimental-features',
templateUrl: './experimental-features.page.html',
styleUrls: ['./experimental-features.page.scss'],
changeDetection: ChangeDetectionStrategy.OnPush,
})
export class ExperimentalFeaturesPage {
readonly server$ = this.patch.watch$('server-info')
constructor(
private readonly toastCtrl: ToastController,
private readonly patch: PatchDB<DataModel>,
private readonly config: ConfigService,
private readonly alertCtrl: AlertController,
private readonly loadingCtrl: LoadingController,
private readonly api: ApiService,
private readonly errToast: ErrorToastService,
) {}
async presentAlertResetTor() {
const isTor = this.config.isTor()
const shared =
'Optionally wipe state to forcibly acquire new guard nodes. It is recommended to try without wiping state first.'
const alert = await this.alertCtrl.create({
header: isTor ? 'Warning' : 'Confirm',
message: isTor
? `You are currently connected over Tor. If you reset the Tor daemon, you will loose connectivity until it comes back online.<br/><br/>${shared}`
: `Reset Tor?<br/><br/>${shared}`,
inputs: [
{
label: 'Wipe state',
type: 'checkbox',
value: 'wipe',
handler: val => {
console.error(val)
},
},
],
buttons: [
{
text: 'Cancel',
role: 'cancel',
},
{
text: 'Reset',
handler: (value: string[]) => {
console.error(value)
this.resetTor(value.some(v => 'wipe'))
},
cssClass: 'enter-click',
},
],
cssClass: isTor ? 'alert-warning-message' : '',
})
await alert.present()
}
async presentAlertZram(enabled: boolean) {
const alert = await this.alertCtrl.create({
header: enabled ? 'Confirm' : 'Warning',
message: enabled
? 'Are you sure you want to disable zram?'
: 'zram on StartOS is experimental. It may increase performance of you server, especially if it is a low RAM device.',
buttons: [
{
text: 'Cancel',
role: 'cancel',
},
{
text: enabled ? 'Disable' : 'Enable',
handler: () => {
this.toggleZram(enabled)
},
cssClass: 'enter-click',
},
],
cssClass: enabled ? '' : 'alert-warning-message',
})
await alert.present()
}
private async resetTor(wipeState: boolean) {
const loader = await this.loadingCtrl.create({
message: 'Resetting Tor...',
})
await loader.present()
try {
await this.api.resetTor({
'wipe-state': wipeState,
reason: 'User triggered',
})
const toast = await this.toastCtrl.create({
header: 'Tor reset in progress',
position: 'bottom',
duration: 4000,
buttons: [
{
side: 'start',
icon: 'close',
handler: () => {
return true
},
},
],
})
await toast.present()
} catch (e: any) {
this.errToast.present(e)
} finally {
loader.dismiss()
}
}
private async toggleZram(enabled: boolean) {
const loader = await this.loadingCtrl.create({
message: enabled ? 'Disabling zram...' : 'Enabling zram',
})
await loader.present()
try {
await this.api.toggleZram({ enable: !enabled })
const toast = await this.toastCtrl.create({
header: `Zram ${enabled ? 'disabled' : 'enabled'}`,
position: 'bottom',
duration: 4000,
buttons: [
{
side: 'start',
icon: 'close',
handler: () => {
return true
},
},
],
})
await toast.present()
} catch (e: any) {
this.errToast.present(e)
} finally {
loader.dismiss()
}
}
}

View File

@@ -34,6 +34,11 @@ const routes: Routes = [
m => m.KernelLogsPageModule,
),
},
{
path: 'tor-logs',
loadChildren: () =>
import('./tor-logs/tor-logs.module').then(m => m.TorLogsPageModule),
},
{
path: 'metrics',
loadChildren: () =>
@@ -75,6 +80,13 @@ const routes: Routes = [
loadChildren: () =>
import('./wifi/wifi.module').then(m => m.WifiPageModule),
},
{
path: 'experimental-features',
loadChildren: () =>
import('./experimental-features/experimental-features.module').then(
m => m.ExperimentalFeaturesPageModule,
),
},
]
@NgModule({

View File

@@ -436,6 +436,17 @@ export class ServerShowPage {
detail: true,
disabled$: of(false),
},
{
title: 'Experimental Features',
description: 'Try out new and potentially unstable new features',
icon: 'flask-outline',
action: () =>
this.navCtrl.navigateForward(['experimental-features'], {
relativeTo: this.route,
}),
detail: true,
disabled$: of(false),
},
],
Insights: [
{
@@ -470,7 +481,7 @@ export class ServerShowPage {
{
title: 'OS Logs',
description: 'Raw, unfiltered operating system logs',
icon: 'newspaper-outline',
icon: 'receipt-outline',
action: () =>
this.navCtrl.navigateForward(['logs'], { relativeTo: this.route }),
detail: true,
@@ -488,6 +499,17 @@ export class ServerShowPage {
detail: true,
disabled$: of(false),
},
{
title: 'Tor Logs',
description: 'Diagnostic log stream for the Tor daemon on StartOS',
icon: 'receipt-outline',
action: () =>
this.navCtrl.navigateForward(['tor-logs'], {
relativeTo: this.route,
}),
detail: true,
disabled$: of(false),
},
],
Support: [
{

View File

@@ -4,6 +4,7 @@ import { Routes, RouterModule } from '@angular/router'
import { IonicModule } from '@ionic/angular'
import { ServerSpecsPage } from './server-specs.page'
import { EmverPipesModule } from '@start9labs/shared'
import { TuiLetModule } from '@taiga-ui/cdk'
const routes: Routes = [
{
@@ -18,6 +19,7 @@ const routes: Routes = [
IonicModule,
RouterModule.forChild(routes),
EmverPipesModule,
TuiLetModule,
],
declarations: [ServerSpecsPage],
})

View File

@@ -45,18 +45,25 @@
<ion-icon slot="icon-only" name="copy-outline"></ion-icon>
</ion-button>
</ion-item>
<ng-container *ngFor="let ip of server['ip-info'] | keyvalue">
<ng-container *ngFor="let entry of ip.value | keyvalue">
<ion-item *ngIf="entry.value as address">
<ion-label>
<h2>{{ ip.key }} ({{ entry.key }})</h2>
<p>{{ address }}</p>
</ion-label>
<ion-button slot="end" fill="clear" (click)="copy(address)">
<ion-icon slot="icon-only" name="copy-outline"></ion-icon>
</ion-button>
</ion-item>
</ng-container>
<ng-container *ngFor="let iface of server['ip-info'] | keyvalue">
<ion-item *tuiLet="iface.value.ipv4 as ipv4">
<ion-label>
<h2>{{ iface.key }} (IPv4)</h2>
<p>{{ ipv4 || 'n/a' }}</p>
</ion-label>
<ion-button *ngIf="ipv4" slot="end" fill="clear" (click)="copy(ipv4)">
<ion-icon slot="icon-only" name="copy-outline"></ion-icon>
</ion-button>
</ion-item>
<ion-item *tuiLet="iface.value.ipv6 as ipv6">
<ion-label>
<h2>{{ iface.key }} (IPv6)</h2>
<p>{{ ipv6 || 'n/a' }}</p>
</ion-label>
<ion-button *ngIf="ipv6" slot="end" fill="clear" (click)="copy(ipv6)">
<ion-icon slot="icon-only" name="copy-outline"></ion-icon>
</ion-button>
</ion-item>
</ng-container>
<ion-item-divider>Device Credentials</ion-item-divider>

View File

@@ -95,8 +95,9 @@ export class SideloadPage {
manifest: this.toUpload.manifest!,
icon: this.toUpload.icon!,
})
const buffer = await blobToBuffer(this.toUpload.file!)
this.api.uploadPackage(guid, buffer).catch(e => console.error(e))
this.api
.uploadPackage(guid, this.toUpload.file!)
.catch(e => console.error(e))
this.navCtrl.navigateRoot('/services')
} catch (e: any) {
@@ -190,20 +191,24 @@ async function readBlobAsDataURL(
}
async function blobToDataURL(data: Blob | File): Promise<string> {
const res = await readBlobAsDataURL(data)
if (res instanceof ArrayBuffer)
if (res instanceof ArrayBuffer) {
throw new Error('readBlobAsDataURL response should not be an array buffer')
if (res == null)
}
if (res == null) {
throw new Error('readBlobAsDataURL response should not be null')
}
if (typeof res === 'string') return res
throw new Error('no possible blob to data url resolution found')
}
async function blobToBuffer(data: Blob | File): Promise<ArrayBuffer> {
const res = await readBlobToArrayBuffer(data)
if (res instanceof String)
if (res instanceof String) {
throw new Error('readBlobToArrayBuffer response should not be a string')
if (res == null)
}
if (res == null) {
throw new Error('readBlobToArrayBuffer response should not be null')
}
if (res instanceof ArrayBuffer) return res
throw new Error('no possible blob to array buffer resolution found')
}

View File

@@ -0,0 +1,24 @@
import { NgModule } from '@angular/core'
import { CommonModule } from '@angular/common'
import { Routes, RouterModule } from '@angular/router'
import { IonicModule } from '@ionic/angular'
import { TorLogsPage } from './tor-logs.page'
import { LogsComponentModule } from 'src/app/components/logs/logs.component.module'
const routes: Routes = [
{
path: '',
component: TorLogsPage,
},
]
@NgModule({
imports: [
CommonModule,
IonicModule,
RouterModule.forChild(routes),
LogsComponentModule,
],
declarations: [TorLogsPage],
})
export class TorLogsPageModule {}

View File

@@ -0,0 +1,8 @@
<logs
[fetchLogs]="fetchLogs()"
[followLogs]="followLogs()"
context="tor"
defaultBack="system"
pageTitle="Tor Logs"
class="ion-page"
></logs>

View File

@@ -0,0 +1,24 @@
import { Component } from '@angular/core'
import { RR } from 'src/app/services/api/api.types'
import { ApiService } from 'src/app/services/api/embassy-api.service'
@Component({
selector: 'tor-logs',
templateUrl: './tor-logs.page.html',
styleUrls: ['./tor-logs.page.scss'],
})
export class TorLogsPage {
constructor(private readonly api: ApiService) {}
followLogs() {
return async (params: RR.FollowServerLogsReq) => {
return this.api.followTorLogs(params)
}
}
fetchLogs() {
return async (params: RR.GetServerLogsReq) => {
return this.api.getTorLogs(params)
}
}
}

View File

@@ -2,7 +2,6 @@ import { NgModule } from '@angular/core'
import { CommonModule } from '@angular/common'
import { IonicModule } from '@ionic/angular'
import { RouterModule, Routes } from '@angular/router'
import { TuiLetModule } from '@taiga-ui/cdk'
import { TuiLoaderModule } from '@taiga-ui/core'
import { TuiTilesModule } from '@taiga-ui/kit'
@@ -26,7 +25,6 @@ const routes: Routes = [
CommonModule,
IonicModule,
TuiTilesModule,
TuiLetModule,
AddWidgetModule,
FavoritesModule,
HealthModule,

View File

@@ -9,8 +9,8 @@ import {
} from '@angular/core'
import { TuiDialogContext, TuiDialogService } from '@taiga-ui/core'
import {
PolymorpheusComponent,
POLYMORPHEUS_CONTEXT,
PolymorpheusComponent,
} from '@tinkoff/ng-polymorpheus'
import { PatchDB } from 'patch-db-client'
import { DataModel, Widget } from '../../services/patch-db/data-model'
@@ -84,7 +84,7 @@ export class WidgetsPage {
add() {
this.dialog.open(ADD_WIDGET, { label: 'Add widget' }).subscribe(widget => {
this.addWidget(widget)
this.addWidget(widget!)
})
}

View File

@@ -20,9 +20,10 @@ export module Mock {
updated: true,
}
export const MarketplaceEos: RR.GetMarketplaceEosRes = {
version: '0.3.4.2',
version: '0.3.4.3',
headline: 'Our biggest release ever.',
'release-notes': {
'0.3.4.3': 'Some **Markdown** release _notes_ for 0.3.4.3',
'0.3.4.2': 'Some **Markdown** release _notes_ for 0.3.4.2',
'0.3.4.1': 'Some **Markdown** release _notes_ for 0.3.4.1',
'0.3.4': 'Some **Markdown** release _notes_ for 0.3.4',

View File

@@ -62,6 +62,17 @@ export module RR {
export type SystemRebuildReq = {} // server.rebuild
export type SystemRebuildRes = null
export type ResetTorReq = {
'wipe-state': boolean
reason: string
} // net.tor.reset
export type ResetTorRes = null
export type ToggleZramReq = {
enable: boolean
} // server.experimental.zram
export type ToggleZramRes = null
// sessions
export type GetSessionsReq = {} // sessions.list

View File

@@ -1,6 +1,6 @@
import { BehaviorSubject, Observable } from 'rxjs'
import { Update } from 'patch-db-client'
import { RR, Encrypted } from './api.types'
import { Encrypted, RR } from './api.types'
import { DataModel } from 'src/app/services/patch-db/data-model'
import { Log } from '@start9labs/shared'
import { WebSocketSubjectConfig } from 'rxjs/webSocket'
@@ -30,7 +30,7 @@ export abstract class ApiService {
abstract getStatic(url: string): Promise<string>
// for sideloading packages
abstract uploadPackage(guid: string, body: ArrayBuffer): Promise<string>
abstract uploadPackage(guid: string, body: Blob): Promise<string>
// db
@@ -73,6 +73,8 @@ export abstract class ApiService {
params: RR.GetServerLogsReq,
): Promise<RR.GetServerLogsRes>
abstract getTorLogs(params: RR.GetServerLogsReq): Promise<RR.GetServerLogsRes>
abstract followServerLogs(
params: RR.FollowServerLogsReq,
): Promise<RR.FollowServerLogsRes>
@@ -81,6 +83,10 @@ export abstract class ApiService {
params: RR.FollowServerLogsReq,
): Promise<RR.FollowServerLogsRes>
abstract followTorLogs(
params: RR.FollowServerLogsReq,
): Promise<RR.FollowServerLogsRes>
abstract getServerMetrics(
params: RR.GetServerMetricsReq,
): Promise<RR.GetServerMetricsRes>
@@ -105,6 +111,10 @@ export abstract class ApiService {
abstract repairDisk(params: RR.SystemRebuildReq): Promise<RR.SystemRebuildRes>
abstract resetTor(params: RR.ResetTorReq): Promise<RR.ResetTorRes>
abstract toggleZram(params: RR.ToggleZramReq): Promise<RR.ToggleZramRes>
// marketplace URLs
abstract marketplaceProxy<T>(

View File

@@ -44,7 +44,7 @@ export class LiveApiService extends ApiService {
}
// for sideloading packages
async uploadPackage(guid: string, body: ArrayBuffer): Promise<string> {
async uploadPackage(guid: string, body: Blob): Promise<string> {
return this.httpRequest({
method: Method.POST,
body,
@@ -135,6 +135,10 @@ export class LiveApiService extends ApiService {
return this.rpcRequest({ method: 'server.kernel-logs', params })
}
async getTorLogs(params: RR.GetServerLogsReq): Promise<RR.GetServerLogsRes> {
return this.rpcRequest({ method: 'net.tor.logs', params })
}
async followServerLogs(
params: RR.FollowServerLogsReq,
): Promise<RR.FollowServerLogsRes> {
@@ -147,6 +151,12 @@ export class LiveApiService extends ApiService {
return this.rpcRequest({ method: 'server.kernel-logs.follow', params })
}
async followTorLogs(
params: RR.FollowServerLogsReq,
): Promise<RR.FollowServerLogsRes> {
return this.rpcRequest({ method: 'net.tor.logs.follow', params })
}
async getServerMetrics(
params: RR.GetServerMetricsReq,
): Promise<RR.GetServerMetricsRes> {
@@ -182,6 +192,14 @@ export class LiveApiService extends ApiService {
return this.rpcRequest({ method: 'disk.repair', params })
}
async resetTor(params: RR.ResetTorReq): Promise<RR.ResetTorRes> {
return this.rpcRequest({ method: 'net.tor.reset', params })
}
async toggleZram(params: RR.ToggleZramReq): Promise<RR.ToggleZramRes> {
return this.rpcRequest({ method: 'server.experimental.zram', params })
}
// marketplace URLs
async marketplaceProxy<T>(

View File

@@ -1,12 +1,12 @@
import { Injectable } from '@angular/core'
import { pauseFor, Log } from '@start9labs/shared'
import { Log, pauseFor } from '@start9labs/shared'
import { ApiService } from './embassy-api.service'
import {
PatchOp,
Update,
Operation,
RemoveOperation,
PatchOp,
pathFromArray,
RemoveOperation,
Update,
} from 'patch-db-client'
import {
DataModel,
@@ -87,7 +87,7 @@ export class MockApiService extends ApiService {
return markdown
}
async uploadPackage(guid: string, body: ArrayBuffer): Promise<string> {
async uploadPackage(guid: string, body: Blob): Promise<string> {
await pauseFor(2000)
return 'success'
}
@@ -211,6 +211,17 @@ export class MockApiService extends ApiService {
}
}
async getTorLogs(params: RR.GetServerLogsReq): Promise<RR.GetServerLogsRes> {
await pauseFor(2000)
const entries = this.randomLogs(params.limit)
return {
entries,
'start-cursor': 'startCursor',
'end-cursor': 'endCursor',
}
}
async followServerLogs(
params: RR.FollowServerLogsReq,
): Promise<RR.FollowServerLogsRes> {
@@ -231,6 +242,16 @@ export class MockApiService extends ApiService {
}
}
async followTorLogs(
params: RR.FollowServerLogsReq,
): Promise<RR.FollowServerLogsRes> {
await pauseFor(2000)
return {
'start-cursor': 'start-cursor',
guid: '7251d5be-645f-4362-a51b-3a85be92b31e',
}
}
randomLogs(limit = 1): Log[] {
const arrLength = Math.ceil(limit / Mock.ServerLogs.length)
const logs = new Array(arrLength)
@@ -301,6 +322,23 @@ export class MockApiService extends ApiService {
return null
}
async resetTor(params: RR.ResetTorReq): Promise<RR.ResetTorRes> {
await pauseFor(2000)
return null
}
async toggleZram(params: RR.ToggleZramReq): Promise<RR.ToggleZramRes> {
await pauseFor(2000)
const patch = [
{
op: PatchOp.REPLACE,
path: '/server-info/zram',
value: params.enable,
},
]
return this.withRevision(patch, null)
}
// marketplace URLs
async marketplaceProxy(

View File

@@ -44,7 +44,7 @@ export const mockPatchData: DataModel = {
},
'server-info': {
id: 'abcdefgh',
version: '0.3.4.2',
version: '0.3.4.3',
'last-backup': new Date(new Date().valueOf() - 604800001).toISOString(),
'lan-address': 'https://adjective-noun.local',
'tor-address': 'http://myveryownspecialtoraddress.onion',
@@ -73,6 +73,7 @@ export const mockPatchData: DataModel = {
pubkey: 'npub1sg6plzptd64u62a878hep2kev88swjh3tw00gjsfl8f237lmu63q0uf63m',
'ca-fingerprint': 'SHA-256: 63 2B 11 99 44 40 17 DF 37 FC C3 DF 0F 3D 15',
'system-start-time': new Date(new Date().valueOf() - 360042).toUTCString(),
zram: false,
},
'package-data': {
bitcoind: {

View File

@@ -77,6 +77,7 @@ export interface ServerInfo {
pubkey: string
'ca-fingerprint': string
'system-start-time': string
zram: boolean
}
export interface IpInfo {

1
libs/Cargo.lock generated
View File

@@ -1647,6 +1647,7 @@ dependencies = [
"dprint-swc-ext",
"embassy_container_init",
"helpers",
"itertools 0.10.5",
"models",
"reqwest",
"serde",

View File

@@ -5,8 +5,8 @@ use std::process::Stdio;
use std::sync::Arc;
use embassy_container_init::{
OutputParams, OutputStrategy, ProcessGroupId, ProcessId, ReadLineStderrParams,
ReadLineStdoutParams, RunCommandParams, SendSignalParams, SignalGroupParams, LogParams,
LogParams, OutputParams, OutputStrategy, ProcessGroupId, ProcessId, ReadLineStderrParams,
ReadLineStdoutParams, RunCommandParams, SendSignalParams, SignalGroupParams,
};
use futures::StreamExt;
use helpers::NonDetachingJoinHandle;
@@ -219,7 +219,8 @@ impl Handler {
let mut child = {
self.children
.lock()
.await.processes
.await
.processes
.get(&pid)
.ok_or_else(not_found)?
.child
@@ -264,7 +265,8 @@ impl Handler {
if signal == 9 {
self.children
.lock()
.await.processes
.await
.processes
.remove(&pid)
.ok_or_else(not_found)?;
}
@@ -354,7 +356,6 @@ async fn main() {
tokio::spawn(async move {
let w = Arc::new(Mutex::new(w));
while let Some(line) = lines.next_line().await.transpose() {
let handler = handler.clone();
let w = w.clone();
tokio::spawn(async move {

View File

@@ -36,6 +36,7 @@ swc_macros_common = "=0.3.5"
swc_visit = "=0.3.0"
swc_visit_macros = "=0.3.1"
sha2 = "0.10.2"
itertools = "0.10.5"
models = { path = "../models" }
helpers = { path = "../helpers" }
serde = { version = "1.0", features = ["derive", "rc"] }

View File

@@ -185,6 +185,14 @@ const runRsync = (
}
};
const diskUsage = async ({
volumeId = requireParam("volumeId"),
path = requireParam("path"),
} = { volumeId: null, path: null }) => {
const [used, total] = await Deno.core.opAsync("disk_usage", volumeId, path);
return { used, total }
}
const currentFunction = Deno.core.opSync("current_function");
const input = Deno.core.opSync("get_input");
const variable_args = Deno.core.opSync("get_variable_args");
@@ -213,7 +221,8 @@ const effects = {
runDaemon,
signalGroup,
runRsync,
readDir
readDir,
diskUsage,
};
const defaults = {

View File

@@ -284,6 +284,7 @@ impl JsExecutionEnvironment {
fns::create_dir::decl(),
fns::remove_dir::decl(),
fns::read_dir::decl(),
fns::disk_usage::decl(),
fns::current_function::decl(),
fns::log_trace::decl(),
fns::log_warn::decl(),
@@ -396,10 +397,12 @@ mod fns {
SendSignal, SendSignalParams, SignalGroup, SignalGroupParams,
};
use helpers::{to_tmp_path, AtomicFile, Rsync, RsyncOptions};
use models::VolumeId;
use itertools::Itertools;
use models::{ErrorKind, VolumeId};
use serde::{Deserialize, Serialize};
use serde_json::{json, Value};
use tokio::io::AsyncWriteExt;
use tokio::process::Command;
use super::{AnswerState, JsContext};
use crate::{system_time_as_unix_ms, MetadataJs, ResultType};
@@ -490,6 +493,7 @@ mod fns {
.ok_or_else(|| anyhow!("There is no {} in volumes", volume_id))?
};
//get_path_for in volume.rs
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
let new_file = volume_path.join(path_in);
if !is_subset(&volume_path, &new_file).await? {
bail!(
@@ -515,6 +519,7 @@ mod fns {
.ok_or_else(|| anyhow!("There is no {} in volumes", volume_id))?
};
//get_path_for in volume.rs
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
let new_file = volume_path.join(path_in);
if !is_subset(&volume_path, &new_file).await? {
bail!(
@@ -573,6 +578,7 @@ mod fns {
bail!("Volume {} is readonly", volume_id);
}
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
let new_file = volume_path.join(&path_in);
let parent_new_file = new_file
.parent()
@@ -629,6 +635,7 @@ mod fns {
bail!("Volume {} is readonly", dst_volume);
}
let src_path = src_path.strip_prefix("/").unwrap_or(&src_path);
let old_file = volume_path.join(src_path);
let parent_old_file = old_file
.parent()
@@ -642,6 +649,7 @@ mod fns {
);
}
let dst_path = dst_path.strip_prefix("/").unwrap_or(&dst_path);
let new_file = volume_path_out.join(dst_path);
let parent_new_file = new_file
.parent()
@@ -689,6 +697,7 @@ mod fns {
bail!("Volume {} is readonly", dst_volume);
}
let src_path = src_path.strip_prefix("/").unwrap_or(&src_path);
let src = volume_path.join(src_path);
// With the volume check
if !is_subset(&volume_path, &src).await? {
@@ -702,6 +711,7 @@ mod fns {
bail!("Source at {} does not exists", src.to_string_lossy());
}
let dst_path = src_path.strip_prefix("/").unwrap_or(&dst_path);
let dst = volume_path_out.join(dst_path);
// With the volume check
if !is_subset(&volume_path_out, &dst).await? {
@@ -776,6 +786,7 @@ mod fns {
if volumes.readonly(&volume_id) {
bail!("Volume {} is readonly", volume_id);
}
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
let new_file = volume_path.join(path_in);
// With the volume check
if !is_subset(&volume_path, &new_file).await? {
@@ -806,6 +817,7 @@ mod fns {
if volumes.readonly(&volume_id) {
bail!("Volume {} is readonly", volume_id);
}
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
let new_file = volume_path.join(path_in);
// With the volume check
if !is_subset(&volume_path, &new_file).await? {
@@ -836,6 +848,7 @@ mod fns {
if volumes.readonly(&volume_id) {
bail!("Volume {} is readonly", volume_id);
}
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
let new_file = volume_path.join(path_in);
// With the volume check
@@ -864,6 +877,7 @@ mod fns {
.ok_or_else(|| anyhow!("There is no {} in volumes", volume_id))?;
volume_path
};
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
let new_file = volume_path.join(path_in);
// With the volume check
@@ -893,6 +907,86 @@ mod fns {
Ok(paths)
}
#[op]
async fn disk_usage(
state: Rc<RefCell<OpState>>,
volume_id: Option<VolumeId>,
path_in: Option<PathBuf>,
) -> Result<(u64, u64), AnyError> {
let (base_path, volume_path) = {
let state = state.borrow();
let ctx: &JsContext = state.borrow();
let volume_path = if let Some(volume_id) = volume_id {
Some(
ctx.volumes
.path_for(&ctx.datadir, &ctx.package_id, &ctx.version, &volume_id)
.ok_or_else(|| anyhow!("There is no {} in volumes", volume_id))?,
)
} else {
None
};
(ctx.datadir.join("package-data"), volume_path)
};
let path = if let (Some(volume_path), Some(path_in)) = (volume_path, path_in) {
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
Some(volume_path.join(path_in))
} else {
None
};
if let Some(path) = path {
let size = String::from_utf8(
Command::new("df")
.arg("--output=size")
.arg("--block-size=1")
.arg(&base_path)
.stdout(std::process::Stdio::piped())
.output()
.await?
.stdout,
)?
.lines()
.skip(1)
.next()
.unwrap_or_default()
.parse()?;
let used = String::from_utf8(
Command::new("du")
.arg("-s")
.arg("--block-size=1")
.arg(path)
.stdout(std::process::Stdio::piped())
.output()
.await?
.stdout,
)?
.split_ascii_whitespace()
.next()
.unwrap_or_default()
.parse()?;
Ok((used, size))
} else {
String::from_utf8(
Command::new("df")
.arg("--output=used,size")
.arg("--block-size=1")
.arg(&base_path)
.stdout(std::process::Stdio::piped())
.output()
.await?
.stdout,
)?
.lines()
.skip(1)
.next()
.unwrap_or_default()
.split_ascii_whitespace()
.next_tuple()
.and_then(|(used, size)| Some((used.parse().ok()?, size.parse().ok()?)))
.ok_or_else(|| anyhow!("invalid output from df"))
}
}
#[op]
fn current_function(state: &mut OpState) -> Result<String, AnyError> {
let ctx = state.borrow::<JsContext>();
@@ -1216,6 +1310,7 @@ mod fns {
if volumes.readonly(&volume_id) {
bail!("Volume {} is readonly", volume_id);
}
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
let new_file = volume_path.join(path_in);
// With the volume check
if !is_subset(&volume_path, &new_file).await? {
@@ -1265,6 +1360,7 @@ mod fns {
if volumes.readonly(&volume_id) {
bail!("Volume {} is readonly", volume_id);
}
let path_in = path_in.strip_prefix("/").unwrap_or(&path_in);
let new_file = volume_path.join(path_in);
// With the volume check
if !is_subset(&volume_path, &new_file).await? {

View File

@@ -75,6 +75,7 @@ pub enum ErrorKind {
Grub = 64,
Systemd = 65,
OpenSsh = 66,
Zram = 67,
}
impl ErrorKind {
pub fn as_str(&self) -> &'static str {
@@ -146,6 +147,7 @@ impl ErrorKind {
Grub => "Grub Error",
Systemd => "Systemd Error",
OpenSsh => "OpenSSH Error",
Zram => "Zram Error",
}
}
}

View File

@@ -1171,7 +1171,7 @@ dependencies = [
[[package]]
name = "embassy-os"
version = "0.3.4-rev.2"
version = "0.3.4-rev.3"
dependencies = [
"aes",
"async-compression",
@@ -1257,6 +1257,7 @@ dependencies = [
"thiserror",
"tokio",
"tokio-rustls",
"tokio-socks",
"tokio-stream",
"tokio-tar",
"tokio-tungstenite",