Merge branch 'next/major' of github.com:Start9Labs/start-os into rebase/feat/domains

This commit is contained in:
Matt Hill
2023-11-21 21:18:27 -07:00
46 changed files with 737 additions and 616 deletions

3
.gitignore vendored
View File

@@ -27,4 +27,5 @@ secrets.db
/results
/dpkg-workdir
/compiled.tar
/compiled-*.tar
/compiled-*.tar
/firmware

View File

@@ -8,7 +8,8 @@ ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else ec
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
BINS := core/target/$(ARCH)-unknown-linux-gnu/release/startbox core/target/aarch64-unknown-linux-musl/release/container-init core/target/x86_64-unknown-linux-musl/release/container-init
WEB_UIS := web/dist/raw/ui web/dist/raw/setup-wizard web/dist/raw/install-wizard
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts
FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
DEBIAN_SRC := $(shell git ls-files debian/)
IMAGE_RECIPE_SRC := $(shell git ls-files image-recipe/)
STARTD_SRC := core/startos/startd.service $(BUILD_SRC)
@@ -71,6 +72,7 @@ clean:
rm -rf dpkg-workdir
rm -rf image-recipe/deb
rm -rf results
rm -rf build/lib/firmware
rm -f ENVIRONMENT.txt
rm -f PLATFORM.txt
rm -f GIT_HASH.txt
@@ -133,6 +135,8 @@ install: $(ALL_TARGETS)
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/compat.tar)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/utils.tar)
$(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/binfmt.tar)
$(call cp,firmware/$(PLATFORM),$(DESTDIR)/usr/lib/startos/firmware)
update-overlay: $(ALL_TARGETS)
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
@@ -164,6 +168,9 @@ upload-ota: results/$(BASENAME).squashfs
build/lib/depends build/lib/conflicts: build/dpkg-deps/*
build/dpkg-deps/generate.sh
$(FIRMWARE_ROMS): build/lib/firmware.json download-firmware.sh $(PLATFORM_FILE)
./download-firmware.sh $(PLATFORM)
system-images/compat/docker-images/$(ARCH).tar: $(COMPAT_SRC) core/Cargo.lock
cd system-images/compat && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar

13
build/lib/firmware.json Normal file
View File

@@ -0,0 +1,13 @@
[
{
"id": "pureboot-librem_mini_v2-basic_usb_autoboot_blob_jail-Release-28.3",
"platform": ["x86_64"],
"system-product-name": "librem_mini_v2",
"bios-version": {
"semver-prefix": "PureBoot-Release-",
"semver-range": "<28.3"
},
"url": "https://source.puri.sm/firmware/releases/-/raw/master/librem_mini_v2/custom/pureboot-librem_mini_v2-basic_usb_autoboot_blob_jail-Release-28.3.rom.gz",
"shasum": "5019bcf53f7493c7aa74f8ef680d18b5fc26ec156c705a841433aaa2fdef8f35"
}
]

View File

@@ -1 +1 @@
embassy-cli net dhcp update $interface
start-cli net dhcp update $interface

6
core/Cargo.lock generated
View File

@@ -4281,6 +4281,9 @@ name = "semver"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090"
dependencies = [
"serde",
]
[[package]]
name = "semver-parser"
@@ -4964,7 +4967,7 @@ dependencies = [
[[package]]
name = "start-os"
version = "0.3.5"
version = "0.3.5-rev.1"
dependencies = [
"aes",
"async-compression",
@@ -5047,6 +5050,7 @@ dependencies = [
"rpc-toolkit",
"rust-argon2",
"scopeguard",
"semver 1.0.20",
"serde",
"serde_json",
"serde_with",

View File

@@ -1,11 +1,18 @@
use std::path::Path;
use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize};
use crate::Id;
use crate::{Id, InvalidId};
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
pub struct InterfaceId(Id);
impl FromStr for InterfaceId {
type Err = InvalidId;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(Id::try_from(s.to_owned())?))
}
}
impl From<Id> for InterfaceId {
fn from(id: Id) -> Self {
Self(id)

View File

@@ -0,0 +1,15 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM tor WHERE package = $1 AND interface = $2",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text"
]
},
"nullable": []
},
"hash": "350ab82048fb4a049042e4fdbe1b8c606ca400e43e31b9a05d2937217e0f6962"
}

View File

@@ -0,0 +1,12 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE account SET tor_key = NULL, network_key = gen_random_bytes(32)",
"describe": {
"columns": [],
"parameters": {
"Left": []
},
"nullable": []
},
"hash": "b81592b3a74940ab56d41537484090d45cfa4c85168a587b1a41dc5393cccea1"
}

View File

@@ -0,0 +1,15 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM network_keys WHERE package = $1 AND interface = $2",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text"
]
},
"nullable": []
},
"hash": "dfc23b7e966c3853284753a7e934351ba0cae3825988b3e0ecd3b6781bcff524"
}

View File

@@ -14,7 +14,7 @@ keywords = [
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.3.5"
version = "0.3.5-rev.1"
license = "MIT"
[lib]
@@ -135,6 +135,7 @@ rpassword = "7.2.0"
rpc-toolkit = "0.2.2"
rust-argon2 = "2.0.0"
scopeguard = "1.1" # because avahi-sys fucks your shit up
semver = { version = "1.0.20", features = ["serde"] }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.1" }
serde_json = "1.0"

View File

@@ -18,7 +18,7 @@ fn select_executable(name: &str) -> Option<fn()> {
match name {
#[cfg(feature = "avahi-alias")]
"avahi-alias" => Some(avahi_alias::main),
#[cfg(feature = "js_engine")]
#[cfg(feature = "js-engine")]
"start-deno" => Some(start_deno::main),
#[cfg(feature = "cli")]
"start-cli" => Some(start_cli::main),
@@ -36,24 +36,14 @@ fn select_executable(name: &str) -> Option<fn()> {
pub fn startbox() {
let args = std::env::args().take(2).collect::<Vec<_>>();
if let Some(x) = args
let executable = args
.get(0)
.and_then(|s| Path::new(&*s).file_name())
.and_then(|s| s.to_str())
.and_then(|s| select_executable(&s))
{
x()
} else if let Some(x) = args.get(1).and_then(|s| select_executable(&s)) {
.and_then(|s| s.to_str());
if let Some(x) = executable.and_then(|s| select_executable(&s)) {
x()
} else {
eprintln!(
"unknown executable: {}",
args.get(0)
.filter(|x| &**x != "startbox")
.or_else(|| args.get(1))
.map(|s| s.as_str())
.unwrap_or("N/A")
);
eprintln!("unknown executable: {}", executable.unwrap_or("N/A"));
std::process::exit(1);
}
}

View File

@@ -3,29 +3,45 @@ use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use helpers::NonDetachingJoinHandle;
use tokio::process::Command;
use tracing::instrument;
use crate::context::rpc::RpcContextConfig;
use crate::context::{DiagnosticContext, InstallContext, SetupContext};
use crate::disk::fsck::RepairStrategy;
use crate::disk::fsck::{RepairStrategy, RequiresReboot};
use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::REPAIR_DISK_PATH;
use crate::firmware::update_firmware;
use crate::init::STANDBY_MODE_PATH;
use crate::net::web_server::WebServer;
use crate::shutdown::Shutdown;
use crate::sound::CHIME;
use crate::sound::{BEP, CHIME};
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, PLATFORM};
#[instrument(skip_all)]
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if update_firmware().await?.0 {
return Ok(Some(Shutdown {
export_args: None,
restart: true,
}));
let song = NonDetachingJoinHandle::from(tokio::spawn(async {
loop {
BEP.play().await.unwrap();
BEP.play().await.unwrap();
tokio::time::sleep(Duration::from_secs(30)).await;
}
}));
match update_firmware().await {
Ok(RequiresReboot(true)) => {
return Ok(Some(Shutdown {
export_args: None,
restart: true,
}))
}
Err(e) => {
tracing::warn!("Error performing firmware update: {e}");
tracing::debug!("{e:?}");
}
_ => (),
}
Command::new("ln")
@@ -74,6 +90,7 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Er
)
.await?;
drop(song);
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
@@ -100,8 +117,10 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Er
)
.await?;
drop(song);
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
ctx.shutdown
.subscribe()
.recv()
@@ -152,6 +171,7 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Er
}
tracing::info!("Loaded Disk");
crate::init::init(&cfg).await?;
drop(song);
}
Ok(None)

View File

@@ -4,6 +4,7 @@ use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use helpers::to_tmp_path;
use josekit::jwk::Jwk;
@@ -25,7 +26,7 @@ use crate::db::model::{CurrentDependents, Database, PackageDataEntryMatchModelRe
use crate::db::prelude::PatchDbExt;
use crate::dependencies::compute_dependency_config_errs;
use crate::disk::OsPartitionInfo;
use crate::init::init_postgres;
use crate::init::{check_time_is_synchronized, init_postgres};
use crate::install::cleanup::{cleanup_failed, uninstall};
use crate::manager::ManagerMap;
use crate::middleware::auth::HashSessionToken;
@@ -174,6 +175,19 @@ impl RpcContext {
let tor_proxy_url = format!("socks5h://{tor_proxy}");
let devices = lshw().await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
if !db.peek().await.as_server_info().as_ntp_synced().de()? {
let db = db.clone();
tokio::spawn(async move {
while !check_time_is_synchronized().await.unwrap() {
tokio::time::sleep(Duration::from_secs(30)).await;
}
db.mutate(|v| v.as_server_info_mut().as_ntp_synced_mut().ser(&true))
.await
.unwrap()
});
}
let seed = Arc::new(RpcContextSeed {
is_closed: AtomicBool::new(false),
datadir: base.datadir().to_path_buf(),

View File

@@ -23,6 +23,7 @@ use crate::net::utils::{get_iface_ipv4_addr, get_iface_ipv6_addr};
use crate::prelude::*;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::Status;
use crate::util::cpupower::{Governor};
use crate::util::Version;
use crate::version::{Current, VersionT};
use crate::{ARCH, PLATFORM};
@@ -85,6 +86,7 @@ impl Database {
.join(":"),
ntp_synced: false,
zram: true,
governor: None,
},
package_data: AllPackageData::default(),
lan_port_forwards: LanPortForwards::new(),
@@ -137,6 +139,7 @@ pub struct ServerInfo {
pub ntp_synced: bool,
#[serde(default)]
pub zram: bool,
pub governor: Option<Governor>,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]

View File

@@ -11,7 +11,7 @@ use crate::Error;
pub mod btrfs;
pub mod ext4;
#[derive(Debug, Clone, Copy)]
#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize)]
#[must_use]
pub struct RequiresReboot(pub bool);
impl std::ops::BitOrAssign for RequiresReboot {

View File

@@ -1,16 +1,63 @@
use std::collections::BTreeSet;
use std::path::Path;
use async_compression::tokio::bufread::GzipDecoder;
use clap::ArgMatches;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use tokio::fs::File;
use tokio::io::{AsyncRead, BufReader};
use tokio::io::BufReader;
use tokio::process::Command;
use crate::disk::fsck::RequiresReboot;
use crate::prelude::*;
use crate::util::Invoke;
use crate::PLATFORM;
/// Part of the Firmware, look there for more about
#[derive(Clone, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct VersionMatcher {
/// Strip this prefix on the version matcher
semver_prefix: Option<String>,
/// Match the semver to this range
semver_range: Option<semver::VersionReq>,
/// Strip this suffix on the version matcher
semver_suffix: Option<String>,
}
/// Inside a file that is firmware.json, we
/// wanted a structure that could help decide what to do
/// for each of the firmware versions
#[derive(Clone, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Firmware {
id: String,
/// This is the platform(s) the firmware was built for
platform: BTreeSet<String>,
/// This usally comes from the dmidecode
system_product_name: Option<String>,
/// The version comes from dmidecode, then we decide if it matches
bios_version: Option<VersionMatcher>,
/// the hash of the firmware rom.gz
shasum: String,
}
fn display_firmware_update_result(arg: RequiresReboot, _: &ArgMatches) {
if arg.0 {
println!("Firmware successfully updated! Reboot to apply changes.");
} else {
println!("No firmware update available.");
}
}
/// We wanted to make sure during every init
/// that the firmware was the correct and updated for
/// systems like the Pure System that a new firmware
/// was released and the updates where pushed through the pure os.
#[command(rename = "update-firmware", display(display_firmware_update_result))]
pub async fn update_firmware() -> Result<RequiresReboot, Error> {
let product_name = String::from_utf8(
let system_product_name = String::from_utf8(
Command::new("dmidecode")
.arg("-s")
.arg("system-product-name")
@@ -19,52 +66,84 @@ pub async fn update_firmware() -> Result<RequiresReboot, Error> {
)?
.trim()
.to_owned();
if product_name.is_empty() {
let bios_version = String::from_utf8(
Command::new("dmidecode")
.arg("-s")
.arg("bios-version")
.invoke(ErrorKind::Firmware)
.await?,
)?
.trim()
.to_owned();
if system_product_name.is_empty() || bios_version.is_empty() {
return Ok(RequiresReboot(false));
}
let firmware_dir = Path::new("/usr/lib/startos/firmware").join(&product_name);
if tokio::fs::metadata(&firmware_dir).await.is_ok() {
let current_firmware = String::from_utf8(
Command::new("dmidecode")
.arg("-s")
.arg("bios-version")
.invoke(ErrorKind::Firmware)
.await?,
)?
.trim()
.to_owned();
if tokio::fs::metadata(firmware_dir.join(format!("{current_firmware}.rom.gz")))
.await
.is_err()
&& tokio::fs::metadata(firmware_dir.join(format!("{current_firmware}.rom")))
.await
.is_err()
{
let mut firmware_read_dir = tokio::fs::read_dir(&firmware_dir).await?;
while let Some(entry) = firmware_read_dir.next_entry().await? {
let filename = entry.file_name().to_string_lossy().into_owned();
let rdr: Option<Box<dyn AsyncRead + Unpin + Send>> =
if filename.ends_with(".rom.gz") {
Some(Box::new(GzipDecoder::new(BufReader::new(
File::open(entry.path()).await?,
))))
} else if filename.ends_with(".rom") {
Some(Box::new(File::open(entry.path()).await?))
} else {
None
};
if let Some(mut rdr) = rdr {
Command::new("flashrom")
.arg("-p")
.arg("internal")
.arg("-w-")
.input(Some(&mut rdr))
.invoke(ErrorKind::Firmware)
.await?;
return Ok(RequiresReboot(true));
let firmware_dir = Path::new("/usr/lib/startos/firmware");
for firmware in serde_json::from_str::<Vec<Firmware>>(
&tokio::fs::read_to_string("/usr/lib/startos/firmware.json").await?,
)
.with_kind(ErrorKind::Deserialization)?
{
let id = firmware.id;
let matches_product_name = firmware
.system_product_name
.map_or(true, |spn| spn == system_product_name);
let matches_bios_version = firmware
.bios_version
.map_or(Some(true), |bv| {
let mut semver_str = bios_version.as_str();
if let Some(prefix) = &bv.semver_prefix {
semver_str = semver_str.strip_prefix(prefix)?;
}
}
if let Some(suffix) = &bv.semver_suffix {
semver_str = semver_str.strip_suffix(suffix)?;
}
let semver = semver_str
.split(".")
.filter_map(|v| v.parse().ok())
.chain(std::iter::repeat(0))
.take(3)
.collect::<Vec<_>>();
let semver = semver::Version::new(semver[0], semver[1], semver[2]);
Some(
bv.semver_range
.as_ref()
.map_or(true, |r| r.matches(&semver)),
)
})
.unwrap_or(false);
if firmware.platform.contains(&*PLATFORM) && matches_product_name && matches_bios_version {
let filename = format!("{id}.rom.gz");
let firmware_path = firmware_dir.join(&filename);
Command::new("sha256sum")
.arg("-c")
.input(Some(&mut std::io::Cursor::new(format!(
"{} {}",
firmware.shasum,
firmware_path.display()
))))
.invoke(ErrorKind::Filesystem)
.await?;
let mut rdr = if tokio::fs::metadata(&firmware_path).await.is_ok() {
GzipDecoder::new(BufReader::new(File::open(&firmware_path).await?))
} else {
return Err(Error::new(
eyre!("Firmware {id}.rom.gz not found in {firmware_dir:?}"),
ErrorKind::NotFound,
));
};
Command::new("flashrom")
.arg("-p")
.arg("internal")
.arg("-w-")
.input(Some(&mut rdr))
.invoke(ErrorKind::Firmware)
.await?;
return Ok(RequiresReboot(true));
}
}
Ok(RequiresReboot(false))
}

View File

@@ -4,7 +4,7 @@ use std::path::Path;
use std::time::{Duration, SystemTime};
use color_eyre::eyre::eyre;
use helpers::NonDetachingJoinHandle;
use models::ResultExt;
use rand::random;
use sqlx::{Pool, Postgres};
@@ -18,9 +18,9 @@ use crate::disk::mount::util::unmount;
use crate::install::PKG_ARCHIVE_DIR;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::prelude::*;
use crate::sound::BEP;
use crate::util::cpupower::{
current_governor, get_available_governors, set_governor, GOVERNOR_PERFORMANCE,
get_available_governors, get_preferred_governor, set_governor,
};
use crate::util::docker::{create_bridge_network, CONTAINER_DATADIR, CONTAINER_TOOL};
use crate::util::Invoke;
@@ -96,44 +96,64 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
let pg_version_string = pg_version.to_string();
let pg_version_path = db_dir.join(&pg_version_string);
if tokio::fs::metadata(&pg_version_path).await.is_err() {
let conf_dir = Path::new("/etc/postgresql").join(pg_version.to_string());
let conf_dir_tmp = {
let mut tmp = conf_dir.clone();
tmp.set_extension("tmp");
tmp
};
if tokio::fs::metadata(&conf_dir).await.is_ok() {
Command::new("mv")
.arg(&conf_dir)
.arg(&conf_dir_tmp)
.invoke(ErrorKind::Filesystem)
.await?;
}
let mut old_version = pg_version;
while old_version > 13
/* oldest pg version included in startos */
if exists
// maybe migrate
{
let incomplete_path = db_dir.join(format!("{pg_version}.migration.incomplete"));
if tokio::fs::metadata(&incomplete_path).await.is_ok() // previous migration was incomplete
&& tokio::fs::metadata(&pg_version_path).await.is_ok()
{
old_version -= 1;
let old_datadir = db_dir.join(old_version.to_string());
if tokio::fs::metadata(&old_datadir).await.is_ok() {
Command::new("pg_upgradecluster")
.arg(old_version.to_string())
.arg("main")
.invoke(crate::ErrorKind::Database)
.await?;
break;
}
tokio::fs::remove_dir_all(&pg_version_path).await?;
}
if tokio::fs::metadata(&conf_dir).await.is_ok() {
if tokio::fs::metadata(&pg_version_path).await.is_err()
// need to migrate
{
let conf_dir = Path::new("/etc/postgresql").join(pg_version.to_string());
let conf_dir_tmp = {
let mut tmp = conf_dir.clone();
tmp.set_extension("tmp");
tmp
};
if tokio::fs::metadata(&conf_dir).await.is_ok() {
tokio::fs::remove_dir_all(&conf_dir).await?;
Command::new("mv")
.arg(&conf_dir)
.arg(&conf_dir_tmp)
.invoke(ErrorKind::Filesystem)
.await?;
}
Command::new("mv")
.arg(&conf_dir_tmp)
.arg(&conf_dir)
.invoke(ErrorKind::Filesystem)
.await?;
let mut old_version = pg_version;
while old_version > 13
/* oldest pg version included in startos */
{
old_version -= 1;
let old_datadir = db_dir.join(old_version.to_string());
if tokio::fs::metadata(&old_datadir).await.is_ok() {
tokio::fs::File::create(&incomplete_path)
.await?
.sync_all()
.await?;
Command::new("pg_upgradecluster")
.arg(old_version.to_string())
.arg("main")
.invoke(crate::ErrorKind::Database)
.await?;
break;
}
}
if tokio::fs::metadata(&conf_dir).await.is_ok() {
if tokio::fs::metadata(&conf_dir).await.is_ok() {
tokio::fs::remove_dir_all(&conf_dir).await?;
}
Command::new("mv")
.arg(&conf_dir_tmp)
.arg(&conf_dir)
.invoke(ErrorKind::Filesystem)
.await?;
}
tokio::fs::remove_file(&incomplete_path).await?;
}
if tokio::fs::metadata(&incomplete_path).await.is_ok() {
unreachable!() // paranoia
}
}
@@ -230,18 +250,6 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
|| &*server_info.version < &emver::Version::new(0, 3, 2, 0)
|| (*ARCH == "x86_64" && &*server_info.version < &emver::Version::new(0, 3, 4, 0));
let song = if should_rebuild {
Some(NonDetachingJoinHandle::from(tokio::spawn(async {
loop {
BEP.play().await.unwrap();
BEP.play().await.unwrap();
tokio::time::sleep(Duration::from_secs(60)).await;
}
})))
} else {
None
};
let log_dir = cfg.datadir().join("main/logs");
if tokio::fs::metadata(&log_dir).await.is_err() {
tokio::fs::create_dir_all(&log_dir).await?;
@@ -318,12 +326,13 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
tracing::info!("Created Docker Network");
}
let datadir = cfg.datadir();
tracing::info!("Loading System Docker Images");
crate::install::load_images("/usr/lib/startos/system-images").await?;
crate::install::rebuild_from("/usr/lib/startos/system-images", &datadir).await?;
tracing::info!("Loaded System Docker Images");
tracing::info!("Loading Package Docker Images");
crate::install::load_images(cfg.datadir().join(PKG_ARCHIVE_DIR)).await?;
crate::install::rebuild_from(datadir.join(PKG_ARCHIVE_DIR), &datadir).await?;
tracing::info!("Loaded Package Docker Images");
}
@@ -333,6 +342,7 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
.arg("run")
.arg("-d")
.arg("--rm")
.arg("--init")
.arg("--network=start9")
.arg("--name=netdummy")
.arg("start9/x_system/utils:latest")
@@ -354,28 +364,27 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
.await?;
tracing::info!("Enabled Docker QEMU Emulation");
if current_governor()
.await?
.map(|g| &g != &GOVERNOR_PERFORMANCE)
.unwrap_or(false)
{
tracing::info!("Setting CPU Governor to \"{}\"", GOVERNOR_PERFORMANCE);
if get_available_governors()
.await?
.contains(&GOVERNOR_PERFORMANCE)
{
set_governor(&GOVERNOR_PERFORMANCE).await?;
tracing::info!("Set CPU Governor");
let governor = if let Some(governor) = &server_info.governor {
if get_available_governors().await?.contains(governor) {
Some(governor)
} else {
tracing::warn!("CPU Governor \"{}\" Not Available", GOVERNOR_PERFORMANCE)
tracing::warn!("CPU Governor \"{governor}\" Not Available");
None
}
} else {
get_preferred_governor().await?
};
if let Some(governor) = governor {
tracing::info!("Setting CPU Governor to \"{governor}\"");
set_governor(governor).await?;
tracing::info!("Set CPU Governor");
}
let mut time_not_synced = true;
server_info.ntp_synced = false;
let mut not_made_progress = 0u32;
for _ in 0..1800 {
if check_time_is_synchronized().await? {
time_not_synced = false;
server_info.ntp_synced = true;
break;
}
let t = SystemTime::now();
@@ -392,7 +401,7 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
break;
}
}
if time_not_synced {
if !server_info.ntp_synced {
tracing::warn!("Timed out waiting for system time to synchronize");
} else {
tracing::info!("Syncronized system clock");
@@ -410,21 +419,6 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
restarting: false,
};
server_info.ntp_synced = if time_not_synced {
let db = db.clone();
tokio::spawn(async move {
while !check_time_is_synchronized().await.unwrap() {
tokio::time::sleep(Duration::from_secs(30)).await;
}
db.mutate(|v| v.as_server_info_mut().as_ntp_synced_mut().ser(&true))
.await
.unwrap()
});
false
} else {
true
};
db.mutate(|v| {
v.as_server_info_mut().ser(&server_info)?;
Ok(())
@@ -447,8 +441,6 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
}?;
}
drop(song);
tracing::info!("System initialized.");
Ok(InitResult { secret_store, db })

View File

@@ -891,102 +891,11 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
}
tracing::info!("Install {}@{}: Fetched Dependency Info", pkg_id, version);
let public_dir_path = ctx
.datadir
.join(PKG_PUBLIC_DIR)
.join(pkg_id)
.join(version.as_str());
tokio::fs::create_dir_all(&public_dir_path).await?;
tracing::info!("Install {}@{}: Unpacking LICENSE.md", pkg_id, version);
progress
.track_read_during(ctx.db.clone(), pkg_id, || async {
let license_path = public_dir_path.join("LICENSE.md");
let mut dst = File::create(&license_path).await?;
tokio::io::copy(&mut rdr.license().await?, &mut dst).await?;
dst.sync_all().await?;
Ok(())
let icon = progress
.track_read_during(ctx.db.clone(), pkg_id, || {
unpack_s9pk(&ctx.datadir, &manifest, rdr)
})
.await?;
tracing::info!("Install {}@{}: Unpacked LICENSE.md", pkg_id, version);
tracing::info!("Install {}@{}: Unpacking INSTRUCTIONS.md", pkg_id, version);
progress
.track_read_during(ctx.db.clone(), pkg_id, || async {
let instructions_path = public_dir_path.join("INSTRUCTIONS.md");
let mut dst = File::create(&instructions_path).await?;
tokio::io::copy(&mut rdr.instructions().await?, &mut dst).await?;
dst.sync_all().await?;
Ok(())
})
.await?;
tracing::info!("Install {}@{}: Unpacked INSTRUCTIONS.md", pkg_id, version);
let icon_filename = Path::new("icon").with_extension(manifest.assets.icon_type());
let icon_path = public_dir_path.join(&icon_filename);
tracing::info!(
"Install {}@{}: Unpacking {}",
pkg_id,
version,
icon_path.display()
);
let icon_buf = progress
.track_read_during(ctx.db.clone(), pkg_id, || async {
Ok(rdr.icon().await?.to_vec().await?)
})
.await?;
let mut dst = File::create(&icon_path).await?;
dst.write_all(&icon_buf).await?;
dst.sync_all().await?;
let icon = DataUrl::from_vec(
mime(manifest.assets.icon_type()).unwrap_or("image/png"),
icon_buf,
);
tracing::info!(
"Install {}@{}: Unpacked {}",
pkg_id,
version,
icon_filename.display()
);
tracing::info!("Install {}@{}: Unpacking Docker Images", pkg_id, version);
progress
.track_read_during(ctx.db.clone(), pkg_id, || async {
Command::new(CONTAINER_TOOL)
.arg("load")
.input(Some(&mut rdr.docker_images().await?))
.invoke(ErrorKind::Docker)
.await
})
.await?;
tracing::info!("Install {}@{}: Unpacked Docker Images", pkg_id, version,);
tracing::info!("Install {}@{}: Unpacking Assets", pkg_id, version);
progress
.track_read_during(ctx.db.clone(), pkg_id, || async {
let asset_dir = asset_dir(&ctx.datadir, pkg_id, version);
if tokio::fs::metadata(&asset_dir).await.is_err() {
tokio::fs::create_dir_all(&asset_dir).await?;
}
let mut tar = tokio_tar::Archive::new(rdr.assets().await?);
tar.unpack(asset_dir).await?;
let script_dir = script_dir(&ctx.datadir, pkg_id, version);
if tokio::fs::metadata(&script_dir).await.is_err() {
tokio::fs::create_dir_all(&script_dir).await?;
}
if let Some(mut hdl) = rdr.scripts().await? {
tokio::io::copy(
&mut hdl,
&mut File::create(script_dir.join("embassy.js")).await?,
)
.await?;
}
Ok(())
})
.await?;
tracing::info!("Install {}@{}: Unpacked Assets", pkg_id, version);
progress.unpack_complete.store(true, Ordering::SeqCst);
@@ -1107,6 +1016,8 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
let mut auto_start = false;
let mut configured = false;
let mut to_cleanup = None;
if let PackageDataEntry::Updating(PackageDataEntryUpdating {
installed: prev, ..
}) = &prev
@@ -1148,7 +1059,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
auto_start = prev.status.main.running();
}
if &prev.manifest.version != version {
cleanup(&ctx, &prev.manifest.id, &prev.manifest.version).await?;
to_cleanup = Some((prev.manifest.id.clone(), prev.manifest.version.clone()));
}
} else if let PackageDataEntry::Restoring(PackageDataEntryRestoring { .. }) = prev {
next.installed.marketplace_url = manifest
@@ -1191,6 +1102,10 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
})
.await?;
if let Some((id, version)) = to_cleanup {
cleanup(&ctx, &id, &version).await?;
}
if configured && manifest.config.is_some() {
let breakages = BTreeMap::new();
let overrides = Default::default();
@@ -1237,15 +1152,103 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
}
#[instrument(skip_all)]
pub fn load_images<'a, P: AsRef<Path> + 'a + Send + Sync>(
datadir: P,
pub async fn unpack_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
datadir: impl AsRef<Path>,
manifest: &Manifest,
rdr: &mut S9pkReader<R>,
) -> Result<DataUrl<'static>, Error> {
let datadir = datadir.as_ref();
let pkg_id = &manifest.id;
let version = &manifest.version;
let public_dir_path = datadir
.join(PKG_PUBLIC_DIR)
.join(pkg_id)
.join(version.as_str());
tokio::fs::create_dir_all(&public_dir_path).await?;
tracing::info!("Install {}@{}: Unpacking LICENSE.md", pkg_id, version);
let license_path = public_dir_path.join("LICENSE.md");
let mut dst = File::create(&license_path).await?;
tokio::io::copy(&mut rdr.license().await?, &mut dst).await?;
dst.sync_all().await?;
tracing::info!("Install {}@{}: Unpacked LICENSE.md", pkg_id, version);
tracing::info!("Install {}@{}: Unpacking INSTRUCTIONS.md", pkg_id, version);
let instructions_path = public_dir_path.join("INSTRUCTIONS.md");
let mut dst = File::create(&instructions_path).await?;
tokio::io::copy(&mut rdr.instructions().await?, &mut dst).await?;
dst.sync_all().await?;
tracing::info!("Install {}@{}: Unpacked INSTRUCTIONS.md", pkg_id, version);
let icon_filename = Path::new("icon").with_extension(manifest.assets.icon_type());
let icon_path = public_dir_path.join(&icon_filename);
tracing::info!(
"Install {}@{}: Unpacking {}",
pkg_id,
version,
icon_path.display()
);
let icon_buf = rdr.icon().await?.to_vec().await?;
let mut dst = File::create(&icon_path).await?;
dst.write_all(&icon_buf).await?;
dst.sync_all().await?;
let icon = DataUrl::from_vec(
mime(manifest.assets.icon_type()).unwrap_or("image/png"),
icon_buf,
);
tracing::info!(
"Install {}@{}: Unpacked {}",
pkg_id,
version,
icon_filename.display()
);
tracing::info!("Install {}@{}: Unpacking Docker Images", pkg_id, version);
Command::new(CONTAINER_TOOL)
.arg("load")
.input(Some(&mut rdr.docker_images().await?))
.invoke(ErrorKind::Docker)
.await?;
tracing::info!("Install {}@{}: Unpacked Docker Images", pkg_id, version,);
tracing::info!("Install {}@{}: Unpacking Assets", pkg_id, version);
let asset_dir = asset_dir(datadir, pkg_id, version);
if tokio::fs::metadata(&asset_dir).await.is_ok() {
tokio::fs::remove_dir_all(&asset_dir).await?;
}
tokio::fs::create_dir_all(&asset_dir).await?;
let mut tar = tokio_tar::Archive::new(rdr.assets().await?);
tar.unpack(asset_dir).await?;
let script_dir = script_dir(datadir, pkg_id, version);
if tokio::fs::metadata(&script_dir).await.is_err() {
tokio::fs::create_dir_all(&script_dir).await?;
}
if let Some(mut hdl) = rdr.scripts().await? {
tokio::io::copy(
&mut hdl,
&mut File::create(script_dir.join("embassy.js")).await?,
)
.await?;
}
tracing::info!("Install {}@{}: Unpacked Assets", pkg_id, version);
Ok(icon)
}
#[instrument(skip_all)]
pub fn rebuild_from<'a>(
source: impl AsRef<Path> + 'a + Send + Sync,
datadir: impl AsRef<Path> + 'a + Send + Sync,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
let docker_dir = datadir.as_ref();
if tokio::fs::metadata(&docker_dir).await.is_ok() {
ReadDirStream::new(tokio::fs::read_dir(&docker_dir).await?)
let source_dir = source.as_ref();
let datadir = datadir.as_ref();
if tokio::fs::metadata(&source_dir).await.is_ok() {
ReadDirStream::new(tokio::fs::read_dir(&source_dir).await?)
.map(|r| {
r.with_ctx(|_| (crate::ErrorKind::Filesystem, format!("{:?}", &docker_dir)))
r.with_ctx(|_| (crate::ErrorKind::Filesystem, format!("{:?}", &source_dir)))
})
.try_for_each(|entry| async move {
let m = entry.metadata().await?;
@@ -1260,26 +1263,21 @@ pub fn load_images<'a, P: AsRef<Path> + 'a + Send + Sync>(
.arg("load")
.input(Some(&mut File::open(&path).await?))
.invoke(ErrorKind::Docker)
.await
.await?;
Ok::<_, Error>(())
}
Some("s9pk") => {
Command::new(CONTAINER_TOOL)
.arg("load")
.input(Some(
&mut S9pkReader::open(&path, true)
.await?
.docker_images()
.await?,
))
.invoke(ErrorKind::Docker)
.await
let mut s9pk = S9pkReader::open(&path, true).await?;
unpack_s9pk(datadir, &s9pk.manifest().await?, &mut s9pk)
.await?;
Ok(())
}
_ => unreachable!(),
}
}
.await
{
tracing::error!("Error loading docker images from s9pk: {e}");
tracing::error!("Error unpacking {path:?}: {e}");
tracing::debug!("{e:?}");
}
Ok(())
@@ -1287,7 +1285,7 @@ pub fn load_images<'a, P: AsRef<Path> + 'a + Send + Sync>(
Ok(())
}
} else if m.is_dir() {
load_images(entry.path()).await?;
rebuild_from(entry.path(), datadir).await?;
Ok(())
} else {
Ok(())

View File

@@ -1,5 +1,3 @@
#![recursion_limit = "256"]
pub const DEFAULT_MARKETPLACE: &str = "https://registry.start9.com";
// pub const COMMUNITY_MARKETPLACE: &str = "https://community-registry.start9.com";
pub const BUFFER_SIZE: usize = 1024;
@@ -107,6 +105,7 @@ pub fn main_api() -> Result<(), RpcError> {
shutdown::restart,
shutdown::rebuild,
update::update_system,
firmware::update_firmware,
))]
pub fn server() -> Result<(), RpcError> {
Ok(())

View File

@@ -1,15 +1,23 @@
use std::collections::BTreeMap;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use models::{Id, InterfaceId, PackageId};
use openssl::pkey::{PKey, Private};
use openssl::sha::Sha256;
use openssl::x509::X509;
use p256::elliptic_curve::pkcs8::EncodePrivateKey;
use sqlx::PgExecutor;
use rpc_toolkit::command;
use sqlx::{Acquire, PgExecutor};
use ssh_key::private::Ed25519PrivateKey;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument;
use zeroize::Zeroize;
use crate::config::{configure, ConfigureContext};
use crate::context::RpcContext;
use crate::control::restart;
use crate::disk::fsck::RequiresReboot;
use crate::net::ssl::CertPair;
use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
@@ -274,3 +282,107 @@ pub fn test_keygen() {
key.tor_key();
key.openssl_key_nistp256();
}
fn display_requires_reboot(arg: RequiresReboot, _matches: &ArgMatches) {
if arg.0 {
println!("Server must be restarted for changes to take effect");
}
}
#[command(rename = "rotate-key", display(display_requires_reboot))]
pub async fn rotate_key(
#[context] ctx: RpcContext,
#[arg] package: Option<PackageId>,
#[arg] interface: Option<InterfaceId>,
) -> Result<RequiresReboot, Error> {
let mut pgcon = ctx.secret_store.acquire().await?;
let mut tx = pgcon.begin().await?;
if let Some(package) = package {
let Some(interface) = interface else {
return Err(Error::new(
eyre!("Must specify interface"),
ErrorKind::InvalidRequest,
));
};
sqlx::query!(
"DELETE FROM tor WHERE package = $1 AND interface = $2",
&package,
&interface,
)
.execute(&mut *tx)
.await?;
sqlx::query!(
"DELETE FROM network_keys WHERE package = $1 AND interface = $2",
&package,
&interface,
)
.execute(&mut *tx)
.await?;
let new_key =
Key::for_interface(&mut *tx, Some((package.clone(), interface.clone()))).await?;
let needs_config = ctx
.db
.mutate(|v| {
let installed = v
.as_package_data_mut()
.as_idx_mut(&package)
.or_not_found(&package)?
.as_installed_mut()
.or_not_found("installed")?;
let addrs = installed
.as_interface_addresses_mut()
.as_idx_mut(&interface)
.or_not_found(&interface)?;
if let Some(lan) = addrs.as_lan_address_mut().transpose_mut() {
lan.ser(&new_key.local_address())?;
}
if let Some(lan) = addrs.as_tor_address_mut().transpose_mut() {
lan.ser(&new_key.tor_address().to_string())?;
}
if installed
.as_manifest()
.as_config()
.transpose_ref()
.is_some()
{
installed
.as_status_mut()
.as_configured_mut()
.replace(&false)
} else {
Ok(false)
}
})
.await?;
tx.commit().await?;
if needs_config {
configure(
&ctx,
&package,
ConfigureContext {
breakages: BTreeMap::new(),
timeout: None,
config: None,
overrides: BTreeMap::new(),
dry_run: false,
},
)
.await?;
} else {
restart(ctx, package).await?;
}
Ok(RequiresReboot(false))
} else {
sqlx::query!("UPDATE account SET tor_key = NULL, network_key = gen_random_bytes(32)")
.execute(&mut *tx)
.await?;
let new_key = Key::for_interface(&mut *tx, None).await?;
let url = format!("https://{}", new_key.tor_address()).parse()?;
ctx.db
.mutate(|v| v.as_server_info_mut().as_tor_address_mut().ser(&url))
.await?;
tx.commit().await?;
Ok(RequiresReboot(true))
}
}

View File

@@ -23,7 +23,7 @@ pub mod wifi;
pub const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
#[command(subcommands(tor::tor, dhcp::dhcp, ssl::ssl))]
#[command(subcommands(tor::tor, dhcp::dhcp, ssl::ssl, keys::rotate_key))]
pub fn net() -> Result<(), Error> {
Ok(())
}

View File

@@ -23,6 +23,7 @@ use tokio_util::io::ReaderStream;
use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::core::rpc_continuations::RequestGuid;
use crate::db::subscribe;
use crate::hostname::Hostname;
use crate::install::PKG_PUBLIC_DIR;
use crate::middleware::auth::{auth as auth_middleware, HasValidSession};
use crate::middleware::cors::cors;
@@ -339,7 +340,8 @@ async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response
.await
}
(&Method::GET, Some(("eos", "local.crt"))) => {
cert_send(&ctx.account.read().await.root_ca_cert)
let account = ctx.account.read().await;
cert_send(&account.root_ca_cert, &account.hostname)
}
(&Method::GET, _) => {
let uri_path = UiMode::Main.path(
@@ -405,7 +407,7 @@ fn bad_request() -> Response<Body> {
.unwrap()
}
fn cert_send(cert: &X509) -> Result<Response<Body>, Error> {
fn cert_send(cert: &X509, hostname: &Hostname) -> Result<Response<Body>, Error> {
let pem = cert.to_pem()?;
Response::builder()
.status(StatusCode::OK)
@@ -417,8 +419,12 @@ fn cert_send(cert: &X509) -> Result<Response<Body>, Error> {
)
.to_lowercase(),
)
.header(http::header::CONTENT_TYPE, "application/x-pem-file")
.header(http::header::CONTENT_TYPE, "application/x-x509-ca-cert")
.header(http::header::CONTENT_LENGTH, pem.len())
.header(
http::header::CONTENT_DISPOSITION,
format!("attachment; filename={}.crt", &hostname.0),
)
.body(Body::from(pem))
.with_kind(ErrorKind::Network)
}

View File

@@ -53,11 +53,6 @@ lazy_static! {
static ref PROGRESS_REGEX: Regex = Regex::new("PROGRESS=([0-9]+)").unwrap();
}
#[test]
fn random_key() {
println!("x'{}'", hex::encode(rand::random::<[u8; 32]>()));
}
#[command(subcommands(list_services, logs, reset))]
pub fn tor() -> Result<(), Error> {
Ok(())

View File

@@ -225,18 +225,18 @@ async fn test_start_deno_command() -> Result<Command, Error> {
.arg("build")
.invoke(ErrorKind::Unknown)
.await?;
if tokio::fs::metadata("target/debug/start-deno")
if tokio::fs::metadata("../target/debug/start-deno")
.await
.is_err()
{
Command::new("ln")
.arg("-rsf")
.arg("target/debug/startbox")
.arg("target/debug/start-deno")
.arg("../target/debug/startbox")
.arg("../target/debug/start-deno")
.invoke(crate::ErrorKind::Filesystem)
.await?;
}
Ok(Command::new("target/debug/start-deno"))
Ok(Command::new("../target/debug/start-deno"))
}
#[cfg(test)]

View File

@@ -54,6 +54,17 @@ impl Shutdown {
tracing::error!("Error Stopping Docker: {}", e);
tracing::debug!("{:?}", e);
}
} else if CONTAINER_TOOL == "podman" {
if let Err(e) = Command::new("podman")
.arg("rm")
.arg("-f")
.arg("netdummy")
.invoke(crate::ErrorKind::Docker)
.await
{
tracing::error!("Error Stopping Podman: {}", e);
tracing::debug!("{:?}", e);
}
}
if let Some((guid, datadir)) = &self.export_args {
if let Err(e) = export(guid, datadir).await {

View File

@@ -1,3 +1,4 @@
use std::collections::BTreeSet;
use std::fmt;
use chrono::Utc;
@@ -20,11 +21,12 @@ use crate::logs::{
};
use crate::prelude::*;
use crate::shutdown::Shutdown;
use crate::util::cpupower::{get_available_governors, set_governor, Governor};
use crate::util::serde::{display_serializable, IoFormat};
use crate::util::{display_none, Invoke};
use crate::{Error, ErrorKind, ResultExt};
#[command(subcommands(zram))]
#[command(subcommands(zram, governor))]
pub async fn experimental() -> Result<(), Error> {
Ok(())
}
@@ -85,6 +87,56 @@ pub async fn zram(#[context] ctx: RpcContext, #[arg] enable: bool) -> Result<(),
Ok(())
}
#[derive(Debug, Deserialize, Serialize)]
pub struct GovernorInfo {
current: Option<Governor>,
available: BTreeSet<Governor>,
}
fn display_governor_info(arg: GovernorInfo, matches: &ArgMatches) {
use prettytable::*;
if matches.is_present("format") {
return display_serializable(arg, matches);
}
let mut table = Table::new();
table.add_row(row![bc -> "GOVERNORS"]);
for entry in arg.available {
if Some(&entry) == arg.current.as_ref() {
table.add_row(row![g -> format!("* {entry} (current)")]);
} else {
table.add_row(row![entry]);
}
}
table.print_tty(false).unwrap();
}
#[command(display(display_governor_info))]
pub async fn governor(
#[context] ctx: RpcContext,
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
#[arg] set: Option<Governor>,
) -> Result<GovernorInfo, Error> {
let available = get_available_governors().await?;
if let Some(set) = set {
if !available.contains(&set) {
return Err(Error::new(
eyre!("Governor {set} not available"),
ErrorKind::InvalidRequest,
));
}
set_governor(&set).await?;
ctx.db
.mutate(|d| d.as_server_info_mut().as_governor_mut().ser(&Some(set)))
.await?;
}
let current = ctx.db.peek().await.as_server_info().as_governor().de()?;
Ok(GovernorInfo { current, available })
}
#[derive(Serialize, Deserialize)]
pub struct TimeInfo {
now: String,

View File

@@ -7,10 +7,20 @@ use tokio::process::Command;
use crate::prelude::*;
use crate::util::Invoke;
pub const GOVERNOR_PERFORMANCE: Governor = Governor(Cow::Borrowed("performance"));
pub const GOVERNOR_HEIRARCHY: &[Governor] = &[
Governor(Cow::Borrowed("ondemand")),
Governor(Cow::Borrowed("schedutil")),
Governor(Cow::Borrowed("conservative")),
];
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
pub struct Governor(Cow<'static, str>);
impl std::str::FromStr for Governor {
type Err = std::convert::Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(s.to_owned().into()))
}
}
impl std::fmt::Display for Governor {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
@@ -29,13 +39,12 @@ impl std::borrow::Borrow<str> for Governor {
}
pub async fn get_available_governors() -> Result<BTreeSet<Governor>, Error> {
let raw = String::from_utf8(
Command::new("cpupower")
.arg("frequency-info")
.arg("-g")
.invoke(ErrorKind::CpuSettings)
.await?,
)?;
let raw = Command::new("cpupower")
.arg("frequency-info")
.arg("-g")
.invoke(ErrorKind::CpuSettings)
.await
.map_or_else(|e| Ok(e.source.to_string()), String::from_utf8)?;
let mut for_cpu: OrdMap<u32, BTreeSet<Governor>> = OrdMap::new();
let mut current_cpu = None;
for line in raw.lines() {
@@ -114,6 +123,16 @@ pub async fn current_governor() -> Result<Option<Governor>, Error> {
))
}
pub async fn get_preferred_governor() -> Result<Option<&'static Governor>, Error> {
let governors = get_available_governors().await?;
for governor in GOVERNOR_HEIRARCHY {
if governors.contains(governor) {
return Ok(Some(governor));
}
}
Ok(None)
}
pub async fn set_governor(governor: &Governor) -> Result<(), Error> {
Command::new("cpupower")
.arg("frequency-set")

View File

@@ -8,7 +8,7 @@ use sqlx::PgPool;
use crate::prelude::*;
use crate::Error;
mod v0_3_5;
mod v0_3_5_1;
mod v0_4_0;
pub type Current = v0_4_0::Version;
@@ -16,8 +16,8 @@ pub type Current = v0_4_0::Version;
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
#[serde(untagged)]
enum Version {
LT0_3_5(LTWrapper<v0_3_5::Version>),
V0_3_5(Wrapper<v0_3_5::Version>),
LT0_3_5_1(LTWrapper<v0_3_5_1::Version>),
V0_3_5_1(Wrapper<v0_3_5_1::Version>),
V0_4_0(Wrapper<v0_4_0::Version>),
Other(emver::Version),
}
@@ -34,8 +34,8 @@ impl Version {
#[cfg(test)]
fn as_sem_ver(&self) -> emver::Version {
match self {
Version::LT0_3_5(LTWrapper(_, x)) => x.clone(),
Version::V0_3_5(Wrapper(x)) => x.semver(),
Version::LT0_3_5_1(LTWrapper(_, x)) => x.clone(),
Version::V0_3_5_1(Wrapper(x)) => x.semver(),
Version::V0_4_0(Wrapper(x)) => x.semver(),
Version::Other(x) => x.clone(),
}
@@ -182,13 +182,13 @@ pub async fn init(db: &PatchDb, secrets: &PgPool) -> Result<(), Error> {
let version = Version::from_util_version(db.peek().await.as_server_info().as_version().de()?);
match version {
Version::LT0_3_5(_) => {
Version::LT0_3_5_1(_) => {
return Err(Error::new(
eyre!("Cannot migrate from pre-0.3.5. Please update to v0.3.5 first."),
eyre!("Cannot migrate from pre-0.3.5.1. Please update to v0.3.5.1 first."),
crate::ErrorKind::MigrationFailed,
));
}
Version::V0_3_5(v) => v.0.migrate_to(&Current::new(), db, secrets).await?,
Version::V0_3_5_1(v) => v.0.migrate_to(&Current::new(), db, secrets).await?,
Version::V0_4_0(v) => v.0.migrate_to(&Current::new(), db, secrets).await?,
Version::Other(_) => {
return Err(Error::new(
@@ -222,15 +222,15 @@ mod tests {
fn versions() -> impl Strategy<Value = Version> {
prop_oneof![
em_version().prop_map(|v| if v < v0_3_5::Version::new().semver() {
Version::LT0_3_5(LTWrapper(v0_3_5::Version::new(), v))
em_version().prop_map(|v| if v < v0_3_5_1::Version::new().semver() {
Version::LT0_3_5_1(LTWrapper(v0_3_5_1::Version::new(), v))
} else {
Version::LT0_3_5(LTWrapper(
v0_3_5::Version::new(),
Version::LT0_3_5_1(LTWrapper(
v0_3_5_1::Version::new(),
emver::Version::new(0, 3, 0, 0),
))
}),
Just(Version::V0_3_5(Wrapper(v0_3_5::Version::new()))),
Just(Version::V0_3_5_1(Wrapper(v0_3_5_1::Version::new()))),
Just(Version::V0_4_0(Wrapper(v0_4_0::Version::new()))),
em_version().prop_map(Version::Other),
]

View File

@@ -1,135 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use itertools::Itertools;
use openssl::hash::MessageDigest;
use serde_json::json;
use ssh_key::public::Ed25519PublicKey;
use super::*;
use crate::account::AccountInfo;
use crate::hostname::{sync_hostname, Hostname};
use crate::prelude::*;
const V0_3_4: emver::Version = emver::Version::new(0, 3, 4, 0);
lazy_static::lazy_static! {
pub static ref V0_3_0_COMPAT: VersionRange = VersionRange::Conj(
Box::new(VersionRange::Anchor(
emver::GTE,
emver::Version::new(0, 3, 0, 0),
)),
Box::new(VersionRange::Anchor(emver::LTE, Current::new().semver())),
);
}
const COMMUNITY_URL: &str = "https://community-registry.start9.com/";
const MAIN_REGISTRY: &str = "https://registry.start9.com/";
const COMMUNITY_SERVICES: &[&str] = &[
"ipfs",
"agora",
"lightning-jet",
"balanceofsatoshis",
"mastodon",
"lndg",
"robosats",
"thunderhub",
"syncthing",
"sphinx-relay",
];
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = Self;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4
}
fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT
}
async fn up(&self, db: PatchDb, secrets: &PgPool) -> Result<(), Error> {
let mut account = AccountInfo::load(secrets).await?;
let account = db
.mutate(|d| {
d.as_server_info_mut().as_pubkey_mut().ser(
&ssh_key::PublicKey::from(Ed25519PublicKey::from(&account.key.ssh_key()))
.to_openssh()?,
)?;
d.as_server_info_mut().as_ca_fingerprint_mut().ser(
&account
.root_ca_cert
.digest(MessageDigest::sha256())
.unwrap()
.iter()
.map(|x| format!("{x:X}"))
.join(":"),
)?;
let server_info = d.as_server_info();
account.hostname = server_info.as_hostname().de().map(Hostname)?;
account.server_id = server_info.as_id().de()?;
Ok(account)
})
.await?;
account.save(secrets).await?;
sync_hostname(&account.hostname).await?;
let parsed_url = Some(COMMUNITY_URL.parse().unwrap());
db.mutate(|d| {
let mut ui = d.as_ui().de()?;
use imbl_value::json;
ui["marketplace"]["known-hosts"][COMMUNITY_URL] = json!({});
ui["marketplace"]["known-hosts"][MAIN_REGISTRY] = json!({});
for package_id in d.as_package_data().keys()? {
if !COMMUNITY_SERVICES.contains(&&*package_id.to_string()) {
continue;
}
d.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_installed_mut()
.or_not_found(&package_id)?
.as_marketplace_url_mut()
.ser(&parsed_url)?;
}
ui["theme"] = json!("Dark".to_string());
ui["widgets"] = json!([]);
d.as_ui_mut().ser(&ui)
})
.await
}
async fn down(&self, db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
db.mutate(|d| {
let mut ui = d.as_ui().de()?;
let parsed_url = Some(MAIN_REGISTRY.parse().unwrap());
for package_id in d.as_package_data().keys()? {
if !COMMUNITY_SERVICES.contains(&&*package_id.to_string()) {
continue;
}
d.as_package_data_mut()
.as_idx_mut(&package_id)
.or_not_found(&package_id)?
.as_installed_mut()
.or_not_found(&package_id)?
.as_marketplace_url_mut()
.ser(&parsed_url)?;
}
if let imbl_value::Value::Object(ref mut obj) = ui {
obj.remove("theme");
obj.remove("widgets");
}
ui["marketplace"]["known-hosts"][COMMUNITY_URL].take();
ui["marketplace"]["known-hosts"][MAIN_REGISTRY].take();
d.as_ui_mut().ser(&ui)
})
.await
}
}

View File

@@ -1,31 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use super::v0_3_4::V0_3_0_COMPAT;
use super::*;
use crate::prelude::*;
const V0_3_4_1: emver::Version = emver::Version::new(0, 3, 4, 1);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4_1
}
fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT
}
async fn up(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,31 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use super::v0_3_4::V0_3_0_COMPAT;
use super::*;
use crate::prelude::*;
const V0_3_4_2: emver::Version = emver::Version::new(0, 3, 4, 2);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4_1::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4_2
}
fn compat(&self) -> &'static VersionRange {
&*V0_3_0_COMPAT
}
async fn up(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,31 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use super::v0_3_4::V0_3_0_COMPAT;
use super::*;
use crate::prelude::*;
const V0_3_4_3: emver::Version = emver::Version::new(0, 3, 4, 3);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4_2::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4_3
}
fn compat(&self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
async fn up(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,43 +0,0 @@
use async_trait::async_trait;
use emver::VersionRange;
use models::ResultExt;
use sqlx::PgPool;
use super::v0_3_4::V0_3_0_COMPAT;
use super::{v0_3_4_3, VersionT};
use crate::prelude::*;
const V0_3_4_4: emver::Version = emver::Version::new(0, 3, 4, 4);
#[derive(Clone, Debug)]
pub struct Version;
#[async_trait]
impl VersionT for Version {
type Previous = v0_3_4_3::Version;
fn new() -> Self {
Version
}
fn semver(&self) -> emver::Version {
V0_3_4_4
}
fn compat(&self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
async fn up(&self, db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
db.mutate(|v| {
let tor_address_lens = v.as_server_info_mut().as_tor_address_mut();
let mut tor_addr = tor_address_lens.de()?;
tor_addr
.set_scheme("https")
.map_err(|_| eyre!("unable to update url scheme to https"))
.with_kind(crate::ErrorKind::ParseUrl)?;
tor_address_lens.ser(&tor_addr)
})
.await?;
Ok(())
}
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,11 +1,11 @@
use async_trait::async_trait;
use emver::VersionRange;
use lazy_static::lazy_static;
use sqlx::PgPool;
use super::*;
use super::v0_3_4::V0_3_0_COMPAT;
use super::VersionT;
use crate::prelude::*;
const V0_3_5: emver::Version = emver::Version::new(0, 3, 5, 0);
lazy_static! {
static ref V0_3_0_COMPAT: VersionRange = VersionRange::Conj(
Box::new(VersionRange::Anchor(
@@ -15,6 +15,7 @@ lazy_static! {
Box::new(VersionRange::Anchor(emver::LTE, Current::new().semver())),
);
}
const V0_3_5_1: emver::Version = emver::Version::new(0, 3, 5, 1);
#[derive(Clone, Debug)]
pub struct Version;
@@ -26,15 +27,15 @@ impl VersionT for Version {
Version
}
fn semver(&self) -> emver::Version {
V0_3_5
V0_3_5_1
}
fn compat(&self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
async fn up(&self, _db: &PatchDb, _secrets: &PgPool) -> Result<(), Error> {
async fn up(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
async fn down(&self, _db: &PatchDb, _secrets: &PgPool) -> Result<(), Error> {
async fn down(&self, _db: PatchDb, _secrets: &PgPool) -> Result<(), Error> {
Ok(())
}
}

28
download-firmware.sh Executable file
View File

@@ -0,0 +1,28 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
PLATFORM=$1
if [ -z "$PLATFORM" ]; then
>&2 echo "usage: $0 <PLATFORM>"
exit 1
fi
rm -rf ./firmware/$PLATFORM
mkdir -p ./firmware/$PLATFORM
cd ./firmware/$PLATFORM
mapfile -t firmwares <<< "$(jq -c ".[] | select(.platform[] | contains(\"$PLATFORM\"))" ../../build/lib/firmware.json)"
for firmware in "${firmwares[@]}"; do
if [ -n "$firmware" ]; then
id=$(echo "$firmware" | jq --raw-output '.id')
url=$(echo "$firmware" | jq --raw-output '.url')
shasum=$(echo "$firmware" | jq --raw-output '.shasum')
curl --fail -L -o "${id}.rom.gz" "$url"
echo "$shasum ${id}.rom.gz" | sha256sum -c
fi
done

View File

@@ -158,8 +158,8 @@ echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/tor.key.gpg]
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o config/archives/docker.key
echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/docker.key.gpg] https://download.docker.com/linux/debian ${IB_SUITE} stable" > config/archives/docker.list
curl -fsSL https://download.opensuse.org/repositories/devel:kubic:libcontainers:unstable/Debian_Testing/Release.key | gpg --dearmor -o config/archives/podman.key
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/trusted.gpg.d/podman.key.gpg] https://download.opensuse.org/repositories/devel:kubic:libcontainers:unstable/Debian_Testing/ /" > config/archives/podman.list
curl -fsSL https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/Debian_Testing/Release.key | gpg --dearmor -o config/archives/podman.key
echo "deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/trusted.gpg.d/podman.key.gpg] https://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/unstable/Debian_Testing/ /" > config/archives/podman.list
# Dependencies
@@ -344,4 +344,4 @@ elif [ "${IMAGE_TYPE}" = img ]; then
mv $TARGET_NAME $RESULTS_DIR/$IMAGE_BASENAME.img
fi
fi

View File

@@ -3819,9 +3819,12 @@ dependencies = [
[[package]]
name = "semver"
version = "1.0.16"
version = "1.0.20"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "58bc9567378fc7690d6b2addae4e60ac2eeea07becb2c64b9f218b53865cba2a"
checksum = "836fa6a3e1e547f9a2c4040802ec865b5d85f4014efe00555d7090a3dcaa1090"
dependencies = [
"serde",
]
[[package]]
name = "serde"
@@ -4418,7 +4421,7 @@ dependencies = [
[[package]]
name = "start-os"
version = "0.3.5"
version = "0.3.5-rev.1"
dependencies = [
"aes",
"async-compression",
@@ -4494,6 +4497,7 @@ dependencies = [
"rpc-toolkit",
"rust-argon2 2.0.0",
"scopeguard",
"semver",
"serde",
"serde_json",
"serde_with",

4
web/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "startos-ui",
"version": "0.3.5",
"version": "0.3.5.1",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "startos-ui",
"version": "0.3.5",
"version": "0.3.5.1",
"dependencies": {
"@angular/animations": "^16.1.4",
"@angular/common": "^16.1.4",

View File

@@ -1,6 +1,6 @@
{
"name": "startos-ui",
"version": "0.3.5",
"version": "0.3.5.1",
"author": "Start9 Labs, Inc",
"homepage": "https://start9.com/",
"scripts": {

View File

@@ -44,7 +44,7 @@
size="small"
shape="round"
color="primary"
href="https://docs.start9.com/0.3.5.x/user-manual/trust-ca#establishing-trust"
href="https://docs.start9.com/0.3.5.x/user-manual/trust-ca"
target="_blank"
noreferrer
>
@@ -97,10 +97,4 @@
</ng-template>
</div>
<a
id="install-cert"
href="/eos/local.crt"
[download]="
config.isLocal() ? document.location.hostname + '.crt' : 'startos.crt'
"
></a>
<a id="install-cert" href="/eos/local.crt"></a>

View File

@@ -27,8 +27,8 @@
<ion-label>
<h2>{{ server.zram ? 'Disable' : 'Enable' }} zram</h2>
<p>
Enabling zram may improve server performance, especially on low RAM
devices
Zram creates compressed swap in memory, resulting in faster I/O for
low RAM devices
</p>
</ion-label>
</ion-item>

View File

@@ -59,11 +59,11 @@ export class ExperimentalFeaturesPage {
presentAlertZram(enabled: boolean) {
this.dialogs
.open(TUI_PROMPT, {
label: enabled ? 'Confirm' : 'Warning',
label: 'Confirm',
data: {
content: enabled
? 'Are you sure you want to disable zram?'
: 'zram on StartOS is experimental. It may increase performance of you server, especially if it is a low RAM device.',
? 'Are you sure you want to disable zram? It provides significant performance benefits on low RAM devices.'
: 'Enable zram? It will only make a difference on lower RAM devices.',
yes: enabled ? 'Disable' : 'Enable',
no: 'Cancel',
},
@@ -90,7 +90,7 @@ export class ExperimentalFeaturesPage {
private async toggleZram(enabled: boolean) {
const loader = this.loader
.open(enabled ? 'Disabling zram...' : 'Enabling zram')
.open(enabled ? 'Disabling zram...' : 'Enabling zram...')
.subscribe()
try {

View File

@@ -35,10 +35,10 @@ export module Mock {
'shutting-down': false,
}
export const MarketplaceEos: RR.GetMarketplaceEosRes = {
version: '0.3.5',
version: '0.3.5.1',
headline: 'Our biggest release ever.',
'release-notes': {
'0.3.5': 'Some **Markdown** release _notes_ for 0.3.5',
'0.3.5.1': 'Some **Markdown** release _notes_ for 0.3.5.1',
'0.3.4.4': 'Some **Markdown** release _notes_ for 0.3.4.4',
'0.3.4.3': 'Some **Markdown** release _notes_ for 0.3.4.3',
'0.3.4.2': 'Some **Markdown** release _notes_ for 0.3.4.2',

View File

@@ -36,7 +36,7 @@ export const mockPatchData: DataModel = {
},
'server-info': {
id: 'abcdefgh',
version: '0.3.5',
version: '0.3.5.1',
country: 'us',
ui: {
lanHostname: 'adjective-noun.local',

View File

@@ -5,8 +5,8 @@
"background_color": "#1e1e1e",
"display": "standalone",
"scope": ".",
"start_url": "/?version=035",
"id": "/?version=035",
"start_url": "/?version=0351",
"id": "/?version=0351",
"icons": [
{
"src": "assets/img/icon.png",