Feature/multi platform (#1866)

* wip

* wip

* wip

* wip

* wip

* wip

* remove debian dir

* lazy env and git hash

* remove env and git hash on clean

* don't leave project dir

* use docker for native builds

* start9 rust

* correctly mount registry

* remove systemd config

* switch to /usr/bin

* disable sound for now

* wip

* change disk list

* multi-arch images

* multi-arch system images

* default aarch64

* edition 2021

* dynamic wifi interface name

* use wifi interface from config

* bugfixes

* add beep based sound

* wip

* wip

* wip

* separate out raspberry pi specific files

* fixes

* use new initramfs always

* switch journald conf to sed script

* fixes

* fix permissions

* talking about kernel modules not scripts

* fix

* fix

* switch to MBR

* install to /usr/lib

* fixes

* fixes

* fixes

* fixes

* add media config to cfg path

* fixes

* fixes

* fixes

* raspi image fixes

* fix test

* fix workflow

* sync boot partition

* gahhhhh
This commit is contained in:
Aiden McClelland
2022-10-19 23:01:23 -06:00
parent 0511680fc5
commit 6ad9a5952e
87 changed files with 1734 additions and 1162 deletions

View File

@@ -15,23 +15,23 @@ jobs:
compat: compat:
uses: ./.github/workflows/reusable-workflow.yaml uses: ./.github/workflows/reusable-workflow.yaml
with: with:
build_command: make system-images/compat/compat.tar build_command: make system-images/compat/docker-images/aarch64.tar
artifact_name: compat.tar artifact_name: compat.tar
artifact_path: system-images/compat/compat.tar artifact_path: system-images/compat/docker-images/aarch64.tar
utils: utils:
uses: ./.github/workflows/reusable-workflow.yaml uses: ./.github/workflows/reusable-workflow.yaml
with: with:
build_command: make system-images/utils/utils.tar build_command: make system-images/utils/docker-images/aarch64.tar
artifact_name: utils.tar artifact_name: utils.tar
artifact_path: system-images/utils/utils.tar artifact_path: system-images/utils/docker-images/aarch64.tar
binfmt: binfmt:
uses: ./.github/workflows/reusable-workflow.yaml uses: ./.github/workflows/reusable-workflow.yaml
with: with:
build_command: make system-images/binfmt/binfmt.tar build_command: make system-images/binfmt/docker-images/aarch64.tar
artifact_name: binfmt.tar artifact_name: binfmt.tar
artifact_path: system-images/binfmt/binfmt.tar artifact_path: system-images/binfmt/docker-images/aarch64.tar
nc-broadcast: nc-broadcast:
uses: ./.github/workflows/reusable-workflow.yaml uses: ./.github/workflows/reusable-workflow.yaml
@@ -60,19 +60,19 @@ jobs:
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
with: with:
name: compat.tar name: compat.tar
path: system-images/compat path: system-images/compat/docker-images/
- name: Download utils.tar artifact - name: Download utils.tar artifact
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
with: with:
name: utils.tar name: utils.tar
path: system-images/utils path: system-images/utils/docker-images/
- name: Download binfmt.tar artifact - name: Download binfmt.tar artifact
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
with: with:
name: binfmt.tar name: binfmt.tar
path: system-images/binfmt path: system-images/binfmt/docker-images/
- name: Download nc-broadcast.tar artifact - name: Download nc-broadcast.tar artifact
uses: actions/download-artifact@v3 uses: actions/download-artifact@v3
@@ -126,12 +126,9 @@ jobs:
key: cache-raspios key: cache-raspios
- name: Build image - name: Build image
run: "make V=1 NO_KEY=1 eos.img --debug" run: "make V=1 ARCH=aarch64 embassyos-raspi.img --debug"
- name: Compress image
run: "make gzip"
- uses: actions/upload-artifact@v3 - uses: actions/upload-artifact@v3
with: with:
name: image name: image
path: eos.tar.gz path: embassyos-raspi.img

3
.gitignore vendored
View File

@@ -15,4 +15,5 @@ secrets.db
/cargo-deps/**/* /cargo-deps/**/*
/ENVIRONMENT.txt /ENVIRONMENT.txt
/GIT_HASH.txt /GIT_HASH.txt
/eos.tar.gz /embassyos-*.tar.gz
/*.deb

View File

@@ -1,14 +1,14 @@
ARCH = aarch64 ARCH = $(shell uname -m)
ENVIRONMENT_FILE := $(shell ./check-environment.sh) ENVIRONMENT_FILE = $(shell ./check-environment.sh)
GIT_HASH_FILE := $(shell ./check-git-hash.sh) GIT_HASH_FILE = $(shell ./check-git-hash.sh)
EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-sdk backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-sdk backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias
EMBASSY_UIS := frontend/dist/ui frontend/dist/setup-wizard frontend/dist/diagnostic-ui EMBASSY_UIS := frontend/dist/ui frontend/dist/setup-wizard frontend/dist/diagnostic-ui
EMBASSY_SRC := raspios.img product_key.txt $(EMBASSY_BINS) backend/embassyd.service backend/embassy-init.service $(EMBASSY_UIS) $(shell find build) EMBASSY_SRC := backend/embassyd.service backend/embassy-init.service $(EMBASSY_UIS) $(shell find build)
COMPAT_SRC := $(shell find system-images/compat/ -not -path 'system-images/compat/target/*' -and -not -name compat.tar -and -not -name target) COMPAT_SRC := $(shell find system-images/compat/ -not -path 'system-images/compat/target/*' -and -not -name *.tar -and -not -name target)
UTILS_SRC := $(shell find system-images/utils/ -not -name utils.tar) UTILS_SRC := $(shell find system-images/utils/ -not -name *.tar)
BINFMT_SRC := $(shell find system-images/binfmt/ -not -name binfmt.tar) BINFMT_SRC := $(shell find system-images/binfmt/ -not -name *.tar)
BACKEND_SRC := $(shell find backend/src) $(shell find backend/migrations) $(shell find patch-db/*/src) backend/Cargo.toml backend/Cargo.lock BACKEND_SRC := $(shell find backend/src) $(shell find backend/migrations) $(shell find patch-db/*/src) backend/Cargo.toml backend/Cargo.lock
FRONTEND_SHARED_SRC := $(shell find frontend/projects/shared) $(shell find frontend/assets) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json FRONTEND_SHARED_SRC := $(shell find frontend/projects/shared) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json
FRONTEND_UI_SRC := $(shell find frontend/projects/ui) FRONTEND_UI_SRC := $(shell find frontend/projects/ui)
FRONTEND_SETUP_WIZARD_SRC := $(shell find frontend/projects/setup-wizard) FRONTEND_SETUP_WIZARD_SRC := $(shell find frontend/projects/setup-wizard)
FRONTEND_DIAGNOSTIC_UI_SRC := $(shell find frontend/projects/diagnostic-ui) FRONTEND_DIAGNOSTIC_UI_SRC := $(shell find frontend/projects/diagnostic-ui)
@@ -18,26 +18,36 @@ $(shell sudo true)
.DELETE_ON_ERROR: .DELETE_ON_ERROR:
.PHONY: all gzip clean format sdk snapshots frontends ui backend .PHONY: all gzip install clean format sdk snapshots frontends ui backend
all: eos.img
gzip: eos.tar.gz all: $(EMBASSY_SRC) $(EMBASSY_BINS) system-images/compat/docker-images/aarch64.tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
eos.tar.gz: eos.img gzip: embassyos-raspi.tar.gz
tar --format=posix -cS -f- eos.img | $(GZIP_BIN) > eos.tar.gz
embassyos-raspi.tar.gz: embassyos-raspi.img
tar --format=posix -cS -f- embassyos-raspi.img | $(GZIP_BIN) > embassyos-raspi.tar.gz
clean: clean:
rm -f eos.img rm -f 2022-01-28-raspios-bullseye-arm64-lite.zip
rm -f raspios.img
rm -f embassyos-raspi.img
rm -f embassyos-raspi.tar.gz
rm -f ubuntu.img rm -f ubuntu.img
rm -f product_key.txt rm -f product_key.txt
rm -f system-images/**/*.tar rm -f system-images/**/*.tar
sudo rm -f $(EMBASSY_BINS) rm -rf system-images/compat/target
rm -rf backend/target
rm -rf frontend/.angular
rm -f frontend/config.json rm -f frontend/config.json
rm -rf frontend/node_modules rm -rf frontend/node_modules
rm -rf frontend/dist rm -rf frontend/dist
rm -rf libs/target
rm -rf patch-db/client/node_modules rm -rf patch-db/client/node_modules
rm -rf patch-db/client/dist rm -rf patch-db/client/dist
sudo rm -rf cargo-deps rm -rf patch-db/target
rm -rf cargo-deps
rm ENVIRONMENT.txt
rm GIT_HASH.txt
format: format:
cd backend && cargo +nightly fmt cd backend && cargo +nightly fmt
@@ -46,17 +56,43 @@ format:
sdk: sdk:
cd backend/ && ./install-sdk.sh cd backend/ && ./install-sdk.sh
eos.img: $(EMBASSY_SRC) system-images/compat/compat.tar system-images/utils/utils.tar system-images/binfmt/binfmt.tar cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) embassyos-raspi.img: all raspios.img cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast
! test -f eos.img || rm eos.img ! test -f embassyos-raspi.img || rm embassyos-raspi.img
if [ "$(NO_KEY)" = "1" ]; then NO_KEY=1 ./build/make-image.sh; else ./build/make-image.sh; fi ./build/raspberry-pi/make-image.sh
system-images/compat/compat.tar: $(COMPAT_SRC) # For creating os images. DO NOT USE
install: all
mkdir -p $(DESTDIR)/usr/bin
cp backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init $(DESTDIR)/usr/bin/
cp backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd $(DESTDIR)/usr/bin/
cp backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli $(DESTDIR)/usr/bin/
cp backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias $(DESTDIR)/usr/bin/
mkdir -p $(DESTDIR)/usr/lib
rm -rf $(DESTDIR)/usr/lib/embassy
cp -r build/lib $(DESTDIR)/usr/lib/embassy
cp ENVIRONMENT.txt $(DESTDIR)/usr/lib/embassy/
cp GIT_HASH.txt $(DESTDIR)/usr/lib/embassy/
mkdir -p $(DESTDIR)/usr/lib/embassy/system-images
cp system-images/compat/docker-images/aarch64.tar $(DESTDIR)/usr/lib/embassy/system-images/compat.tar
cp system-images/utils/docker-images/$(ARCH).tar $(DESTDIR)/usr/lib/embassy/system-images/utils.tar
cp system-images/binfmt/docker-images/$(ARCH).tar $(DESTDIR)/usr/lib/embassy/system-images/binfmt.tar
mkdir -p $(DESTDIR)/var/www/html
cp -r frontend/dist/diagnostic-ui $(DESTDIR)/var/www/html/diagnostic
cp -r frontend/dist/setup-wizard $(DESTDIR)/var/www/html/setup
cp -r frontend/dist/ui $(DESTDIR)/var/www/html/main
cp index.html $(DESTDIR)/var/www/html/
system-images/compat/docker-images/aarch64.tar: $(COMPAT_SRC)
cd system-images/compat && make cd system-images/compat && make
system-images/utils/utils.tar: $(UTILS_SRC) system-images/utils/docker-images/aarch64.tar system-images/utils/docker-images/x86_64.tar: $(UTILS_SRC)
cd system-images/utils && make cd system-images/utils && make
system-images/binfmt/binfmt.tar: $(BINFMT_SRC) system-images/binfmt/docker-images/aarch64.tar system-images/binfmt/docker-images/x86_64.tar: $(BINFMT_SRC)
cd system-images/binfmt && make cd system-images/binfmt && make
raspios.img: raspios.img:
@@ -75,7 +111,7 @@ snapshots: libs/snapshot-creator/Cargo.toml
cd libs/ && ./build-arm-v8-snapshot.sh cd libs/ && ./build-arm-v8-snapshot.sh
$(EMBASSY_BINS): $(BACKEND_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) frontend/patchdb-ui-seed.json $(EMBASSY_BINS): $(BACKEND_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) frontend/patchdb-ui-seed.json
cd backend && ./build-prod.sh cd backend && ARCH=$(ARCH) ./build-prod.sh
touch $(EMBASSY_BINS) touch $(EMBASSY_BINS)
frontend/node_modules: frontend/package.json frontend/node_modules: frontend/package.json

View File

@@ -2,7 +2,7 @@
authors = ["Aiden McClelland <me@drbonez.dev>"] authors = ["Aiden McClelland <me@drbonez.dev>"]
description = "The core of the Start9 Embassy Operating System" description = "The core of the Start9 Embassy Operating System"
documentation = "https://docs.rs/embassy-os" documentation = "https://docs.rs/embassy-os"
edition = "2018" edition = "2021"
keywords = [ keywords = [
"self-hosted", "self-hosted",
"raspberry-pi", "raspberry-pi",
@@ -42,7 +42,7 @@ path = "src/bin/avahi-alias.rs"
[features] [features]
avahi = ["avahi-sys"] avahi = ["avahi-sys"]
default = ["avahi", "sound", "metal", "js_engine"] default = ["avahi", "metal", "sound", "js_engine"]
dev = [] dev = []
metal = [] metal = []
sound = [] sound = []

View File

@@ -3,6 +3,10 @@
set -e set -e
shopt -s expand_aliases shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
if [ "$0" != "./build-prod.sh" ]; then if [ "$0" != "./build-prod.sh" ]; then
>&2 echo "Must be run from backend directory" >&2 echo "Must be run from backend directory"
exit 1 exit 1
@@ -24,10 +28,10 @@ if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then
FLAGS="dev,$FLAGS" FLAGS="dev,$FLAGS"
fi fi
if [[ "$FLAGS" = "" ]]; then if [[ "$FLAGS" = "" ]]; then
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --locked)" rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --locked --target=$ARCH-unknown-linux-gnu)"
else else
echo "FLAGS=$FLAGS" echo "FLAGS=$FLAGS"
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS --locked)" rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS --locked --target=$ARCH-unknown-linux-gnu)"
fi fi
cd backend cd backend

View File

@@ -7,7 +7,7 @@ Wants=avahi-daemon.service nginx.service tor.service
[Service] [Service]
Type=oneshot Type=oneshot
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug,patch_db=warn Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/local/bin/embassy-init ExecStart=/usr/bin/embassy-init
RemainAfterExit=true RemainAfterExit=true
StandardOutput=append:/var/log/embassy-init.log StandardOutput=append:/var/log/embassy-init.log

View File

@@ -6,7 +6,7 @@ Requires=embassy-init.service
[Service] [Service]
Type=simple Type=simple
Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug,patch_db=warn Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/local/bin/embassyd ExecStart=/usr/bin/embassyd
Restart=always Restart=always
RestartSec=3 RestartSec=3
ManagedOOMPreference=avoid ManagedOOMPreference=avoid

View File

@@ -8,17 +8,15 @@ use rpc_toolkit::command;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tracing::instrument; use tracing::instrument;
use crate::config::{Config, ConfigSpec};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::id::ImageId; use crate::id::ImageId;
use crate::procedure::docker::DockerContainer;
use crate::procedure::{PackageProcedure, ProcedureName}; use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat}; use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
use crate::util::Version; use crate::util::Version;
use crate::volume::Volumes; use crate::volume::Volumes;
use crate::{
config::{Config, ConfigSpec},
procedure::docker::DockerContainer,
};
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[derive(Clone, Debug, Default, Deserialize, Serialize)] #[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Actions(pub BTreeMap<ActionId, Action>); pub struct Actions(pub BTreeMap<ActionId, Action>);

View File

@@ -14,17 +14,18 @@ use tokio::io::AsyncWriteExt;
use tracing::instrument; use tracing::instrument;
use self::target::PackageBackupInfo; use self::target::PackageBackupInfo;
use crate::context::RpcContext;
use crate::dependencies::reconfigure_dependents_with_live_pointers; use crate::dependencies::reconfigure_dependents_with_live_pointers;
use crate::id::ImageId; use crate::id::ImageId;
use crate::install::PKG_ARCHIVE_DIR; use crate::install::PKG_ARCHIVE_DIR;
use crate::net::interface::{InterfaceId, Interfaces}; use crate::net::interface::{InterfaceId, Interfaces};
use crate::procedure::docker::DockerContainer;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName}; use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::serde::IoFormat; use crate::util::serde::IoFormat;
use crate::util::Version; use crate::util::Version;
use crate::version::{Current, VersionT}; use crate::version::{Current, VersionT};
use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR}; use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR};
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
pub mod backup_bulk; pub mod backup_bulk;

View File

@@ -236,7 +236,7 @@ pub async fn recover_full_embassy(
os_backup.tor_key.public().get_onion_address(), os_backup.tor_key.public().get_onion_address(),
os_backup.root_ca_cert, os_backup.root_ca_cert,
async move { async move {
let rpc_ctx = RpcContext::init(ctx.config_path.as_ref(), disk_guid).await?; let rpc_ctx = RpcContext::init(ctx.config_path.clone(), disk_guid).await?;
let mut db = rpc_ctx.db.handle(); let mut db = rpc_ctx.db.handle();
let ids = backup_guard let ids = backup_guard

View File

@@ -139,8 +139,10 @@ pub async fn list(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
) -> Result<BTreeMap<BackupTargetId, BackupTarget>, Error> { ) -> Result<BTreeMap<BackupTargetId, BackupTarget>, Error> {
let mut sql_handle = ctx.secret_store.acquire().await?; let mut sql_handle = ctx.secret_store.acquire().await?;
let (disks_res, cifs) = let (disks_res, cifs) = tokio::try_join!(
tokio::try_join!(crate::disk::util::list(), cifs::list(&mut sql_handle),)?; crate::disk::util::list(&ctx.os_partitions),
cifs::list(&mut sql_handle),
)?;
Ok(disks_res Ok(disks_res
.into_iter() .into_iter()
.flat_map(|mut disk| { .flat_map(|mut disk| {

View File

@@ -1,4 +1,4 @@
use std::path::Path; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
@@ -27,8 +27,11 @@ fn status_fn(_: i32) -> StatusCode {
} }
#[instrument] #[instrument]
async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> { async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_err() { if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_err()
{
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
let _mdns = MdnsController::init(); let _mdns = MdnsController::init();
tokio::fs::write( tokio::fs::write(
@@ -68,7 +71,7 @@ async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
.with_kind(embassy::ErrorKind::Network)?; .with_kind(embassy::ErrorKind::Network)?;
} else { } else {
let cfg = RpcContextConfig::load(cfg_path).await?; let cfg = RpcContextConfig::load(cfg_path).await?;
let guid_string = tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy let guid_string = tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?; .await?;
let guid = guid_string.trim(); let guid = guid_string.trim();
let requires_reboot = embassy::disk::main::import( let requires_reboot = embassy::disk::main::import(
@@ -119,7 +122,7 @@ async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
} }
#[instrument] #[instrument]
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> { async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() { if tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
tokio::fs::remove_file(STANDBY_MODE_PATH).await?; tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
Command::new("sync").invoke(ErrorKind::Filesystem).await?; Command::new("sync").invoke(ErrorKind::Filesystem).await?;
@@ -129,10 +132,10 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
embassy::sound::BEP.play().await?; embassy::sound::BEP.play().await?;
run_script_if_exists("/embassy-os/preinit.sh").await; run_script_if_exists("/media/embassy/config/preinit.sh").await;
let res = if let Err(e) = setup_or_init(cfg_path).await { let res = if let Err(e) = setup_or_init(cfg_path.clone()).await {
async { async move {
tracing::error!("{}", e.source); tracing::error!("{}", e.source);
tracing::debug!("{}", e.source); tracing::debug!("{}", e.source);
embassy::sound::BEETHOVEN.play().await?; embassy::sound::BEETHOVEN.play().await?;
@@ -156,9 +159,12 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
.await?; .await?;
let ctx = DiagnosticContext::init( let ctx = DiagnosticContext::init(
cfg_path, cfg_path,
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() { if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new( Some(Arc::new(
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await? .await?
.trim() .trim()
.to_owned(), .to_owned(),
@@ -200,7 +206,7 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
Ok(None) Ok(None)
}; };
run_script_if_exists("/embassy-os/postinit.sh").await; run_script_if_exists("/media/embassy/config/postinit.sh").await;
res res
} }
@@ -217,7 +223,7 @@ fn main() {
EmbassyLogger::init(); EmbassyLogger::init();
let cfg_path = matches.value_of("config"); let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = { let res = {
let rt = tokio::runtime::Builder::new_multi_thread() let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all() .enable_all()

View File

@@ -1,3 +1,4 @@
use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
@@ -39,12 +40,12 @@ fn err_to_500(e: Error) -> Response<Body> {
} }
#[instrument] #[instrument]
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> { async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, shutdown) = { let (rpc_ctx, shutdown) = {
let rpc_ctx = RpcContext::init( let rpc_ctx = RpcContext::init(
cfg_path, cfg_path,
Arc::new( Arc::new(
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await? .await?
.trim() .trim()
.to_owned(), .to_owned(),
@@ -292,7 +293,7 @@ fn main() {
EmbassyLogger::init(); EmbassyLogger::init();
let cfg_path = matches.value_of("config"); let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = { let res = {
let rt = tokio::runtime::Builder::new_multi_thread() let rt = tokio::runtime::Builder::new_multi_thread()
@@ -300,7 +301,7 @@ fn main() {
.build() .build()
.expect("failed to initialize runtime"); .expect("failed to initialize runtime");
rt.block_on(async { rt.block_on(async {
match inner_main(cfg_path).await { match inner_main(cfg_path.clone()).await {
Ok(a) => Ok(a), Ok(a) => Ok(a),
Err(e) => { Err(e) => {
(|| async { (|| async {
@@ -327,9 +328,12 @@ fn main() {
.await?; .await?;
let ctx = DiagnosticContext::init( let ctx = DiagnosticContext::init(
cfg_path, cfg_path,
if tokio::fs::metadata("/embassy-os/disk.guid").await.is_ok() { if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new( Some(Arc::new(
tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await? .await?
.trim() .trim()
.to_owned(), .to_owned(),

View File

@@ -7,14 +7,15 @@ use serde::{Deserialize, Serialize};
use tracing::instrument; use tracing::instrument;
use super::{Config, ConfigSpec}; use super::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::dependencies::Dependencies; use crate::dependencies::Dependencies;
use crate::id::ImageId; use crate::id::ImageId;
use crate::procedure::docker::DockerContainer;
use crate::procedure::{PackageProcedure, ProcedureName}; use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::status::health_check::HealthCheckId; use crate::status::health_check::HealthCheckId;
use crate::util::Version; use crate::util::Version;
use crate::volume::Volumes; use crate::volume::Volumes;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[derive(Debug, Deserialize, Serialize, HasModel)] #[derive(Debug, Deserialize, Serialize, HasModel)]

View File

@@ -13,6 +13,7 @@ use rpc_toolkit::command;
use serde_json::Value; use serde_json::Value;
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents}; use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents};
use crate::dependencies::{ use crate::dependencies::{
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive, add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
@@ -20,11 +21,11 @@ use crate::dependencies::{
DependencyErrors, DependencyReceipt, TaggedDependencyError, TryHealReceipts, DependencyErrors, DependencyReceipt, TaggedDependencyError, TryHealReceipts,
}; };
use crate::install::cleanup::{remove_from_current_dependents_lists, UpdateDependencyReceipts}; use crate::install::cleanup::{remove_from_current_dependents_lists, UpdateDependencyReceipts};
use crate::procedure::docker::DockerContainer;
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::util::display_none; use crate::util::display_none;
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat}; use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
use crate::Error; use crate::Error;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
pub mod action; pub mod action;
pub mod spec; pub mod spec;

View File

@@ -22,11 +22,12 @@ use sqlx::PgPool;
use super::util::{self, CharSet, NumRange, UniqueBy, STATIC_NULL}; use super::util::{self, CharSet, NumRange, UniqueBy, STATIC_NULL};
use super::{Config, MatchError, NoMatchWithPath, TimeoutError, TypeOf}; use super::{Config, MatchError, NoMatchWithPath, TimeoutError, TypeOf};
use crate::config::ConfigurationError;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::net::interface::InterfaceId; use crate::net::interface::InterfaceId;
use crate::procedure::docker::DockerContainer;
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::Error; use crate::Error;
use crate::{config::ConfigurationError, procedure::docker::DockerContainer};
// Config Value Specifications // Config Value Specifications
#[async_trait] #[async_trait]

View File

@@ -16,13 +16,13 @@ use rpc_toolkit::Context;
use serde::Deserialize; use serde::Deserialize;
use sqlx::postgres::PgConnectOptions; use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool; use sqlx::PgPool;
use tokio::fs::File;
use tokio::process::Command; use tokio::process::Command;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock}; use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tracing::instrument; use tracing::instrument;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation}; use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry}; use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::disk::OsPartitionInfo;
use crate::hostname::HostNameReceipt; use crate::hostname::HostNameReceipt;
use crate::init::{init_postgres, pgloader}; use crate::init::{init_postgres, pgloader};
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts}; use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
@@ -35,13 +35,16 @@ use crate::notifications::NotificationManager;
use crate::setup::password_hash; use crate::setup::password_hash;
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::status::{MainStatus, Status}; use crate::status::{MainStatus, Status};
use crate::util::io::from_yaml_async_reader; use crate::util::config::load_config_from_paths;
use crate::util::{AsyncFileExt, Invoke}; use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
#[derive(Debug, Default, Deserialize)] #[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct RpcContextConfig { pub struct RpcContextConfig {
pub wifi_interface: Option<String>,
pub ethernet_interface: String,
pub os_partitions: OsPartitionInfo,
pub migration_batch_rows: Option<usize>, pub migration_batch_rows: Option<usize>,
pub migration_prefetch_rows: Option<usize>, pub migration_prefetch_rows: Option<usize>,
pub bind_rpc: Option<SocketAddr>, pub bind_rpc: Option<SocketAddr>,
@@ -55,19 +58,20 @@ pub struct RpcContextConfig {
pub log_server: Option<Url>, pub log_server: Option<Url>,
} }
impl RpcContextConfig { impl RpcContextConfig {
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> { pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let cfg_path = path tokio::task::spawn_blocking(move || {
.as_ref() load_config_from_paths(
.map(|p| p.as_ref()) path.as_ref()
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH)); .into_iter()
if let Some(f) = File::maybe_open(cfg_path) .map(|p| p.as_ref())
.await .chain(std::iter::once(Path::new(
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))? "/media/embassy/config/config.yaml",
{ )))
from_yaml_async_reader(f).await .chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
} else { )
Ok(Self::default()) })
} .await
.unwrap()
} }
pub fn datadir(&self) -> &Path { pub fn datadir(&self) -> &Path {
self.datadir self.datadir
@@ -116,6 +120,9 @@ impl RpcContextConfig {
pub struct RpcContextSeed { pub struct RpcContextSeed {
is_closed: AtomicBool, is_closed: AtomicBool,
pub os_partitions: OsPartitionInfo,
pub wifi_interface: Option<String>,
pub ethernet_interface: String,
pub bind_rpc: SocketAddr, pub bind_rpc: SocketAddr,
pub bind_ws: SocketAddr, pub bind_ws: SocketAddr,
pub bind_static: SocketAddr, pub bind_static: SocketAddr,
@@ -134,7 +141,7 @@ pub struct RpcContextSeed {
pub notification_manager: NotificationManager, pub notification_manager: NotificationManager,
pub open_authed_websockets: Mutex<BTreeMap<HashSessionToken, Vec<oneshot::Sender<()>>>>, pub open_authed_websockets: Mutex<BTreeMap<HashSessionToken, Vec<oneshot::Sender<()>>>>,
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>, pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
pub wifi_manager: Arc<RwLock<WpaCli>>, pub wifi_manager: Option<Arc<RwLock<WpaCli>>>,
} }
pub struct RpcCleanReceipts { pub struct RpcCleanReceipts {
@@ -209,7 +216,7 @@ impl RpcSetNginxReceipts {
pub struct RpcContext(Arc<RpcContextSeed>); pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext { impl RpcContext {
#[instrument(skip(cfg_path))] #[instrument(skip(cfg_path))]
pub async fn init<P: AsRef<Path>>( pub async fn init<P: AsRef<Path> + Send + 'static>(
cfg_path: Option<P>, cfg_path: Option<P>,
disk_guid: Arc<String>, disk_guid: Arc<String>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
@@ -248,10 +255,13 @@ impl RpcContext {
tracing::info!("Initialized Notification Manager"); tracing::info!("Initialized Notification Manager");
let seed = Arc::new(RpcContextSeed { let seed = Arc::new(RpcContextSeed {
is_closed: AtomicBool::new(false), is_closed: AtomicBool::new(false),
datadir: base.datadir().to_path_buf(),
os_partitions: base.os_partitions,
wifi_interface: base.wifi_interface.clone(),
ethernet_interface: base.ethernet_interface,
bind_rpc: base.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()), bind_rpc: base.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
bind_ws: base.bind_ws.unwrap_or(([127, 0, 0, 1], 5960).into()), bind_ws: base.bind_ws.unwrap_or(([127, 0, 0, 1], 5960).into()),
bind_static: base.bind_static.unwrap_or(([127, 0, 0, 1], 5961).into()), bind_static: base.bind_static.unwrap_or(([127, 0, 0, 1], 5961).into()),
datadir: base.datadir().to_path_buf(),
disk_guid, disk_guid,
db, db,
secret_store, secret_store,
@@ -266,7 +276,9 @@ impl RpcContext {
notification_manager, notification_manager,
open_authed_websockets: Mutex::new(BTreeMap::new()), open_authed_websockets: Mutex::new(BTreeMap::new()),
rpc_stream_continuations: Mutex::new(BTreeMap::new()), rpc_stream_continuations: Mutex::new(BTreeMap::new()),
wifi_manager: Arc::new(RwLock::new(WpaCli::init("wlan0".to_string()))), wifi_manager: base
.wifi_interface
.map(|i| Arc::new(RwLock::new(WpaCli::init(i)))),
}); });
let res = Self(seed); let res = Self(seed);

View File

@@ -11,18 +11,17 @@ use rpc_toolkit::Context;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sqlx::postgres::PgConnectOptions; use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool; use sqlx::PgPool;
use tokio::fs::File;
use tokio::sync::broadcast::Sender; use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tracing::instrument; use tracing::instrument;
use url::Host; use url::Host;
use crate::db::model::Database; use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader}; use crate::init::{init_postgres, pgloader};
use crate::net::tor::os_key; use crate::net::tor::os_key;
use crate::setup::{password_hash, RecoveryStatus}; use crate::setup::{password_hash, RecoveryStatus};
use crate::util::io::from_yaml_async_reader; use crate::util::config::load_config_from_paths;
use crate::util::AsyncFileExt;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[derive(Clone, Serialize, Deserialize)] #[derive(Clone, Serialize, Deserialize)]
@@ -36,6 +35,7 @@ pub struct SetupResult {
#[derive(Debug, Default, Deserialize)] #[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct SetupContextConfig { pub struct SetupContextConfig {
pub os_partitions: OsPartitionInfo,
pub migration_batch_rows: Option<usize>, pub migration_batch_rows: Option<usize>,
pub migration_prefetch_rows: Option<usize>, pub migration_prefetch_rows: Option<usize>,
pub bind_rpc: Option<SocketAddr>, pub bind_rpc: Option<SocketAddr>,
@@ -43,19 +43,20 @@ pub struct SetupContextConfig {
} }
impl SetupContextConfig { impl SetupContextConfig {
#[instrument(skip(path))] #[instrument(skip(path))]
pub async fn load<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> { pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let cfg_path = path tokio::task::spawn_blocking(move || {
.as_ref() load_config_from_paths(
.map(|p| p.as_ref()) path.as_ref()
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH)); .into_iter()
if let Some(f) = File::maybe_open(cfg_path) .map(|p| p.as_ref())
.await .chain(std::iter::once(Path::new(
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))? "/media/embassy/config/config.yaml",
{ )))
from_yaml_async_reader(f).await .chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
} else { )
Ok(Self::default()) })
} .await
.unwrap()
} }
pub fn datadir(&self) -> &Path { pub fn datadir(&self) -> &Path {
self.datadir self.datadir
@@ -65,6 +66,7 @@ impl SetupContextConfig {
} }
pub struct SetupContextSeed { pub struct SetupContextSeed {
pub os_partitions: OsPartitionInfo,
pub config_path: Option<PathBuf>, pub config_path: Option<PathBuf>,
pub migration_batch_rows: usize, pub migration_batch_rows: usize,
pub migration_prefetch_rows: usize, pub migration_prefetch_rows: usize,
@@ -90,11 +92,12 @@ impl AsRef<Jwk> for SetupContextSeed {
pub struct SetupContext(Arc<SetupContextSeed>); pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext { impl SetupContext {
#[instrument(skip(path))] #[instrument(skip(path))]
pub async fn init<P: AsRef<Path>>(path: Option<P>) -> Result<Self, Error> { pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let cfg = SetupContextConfig::load(path.as_ref()).await?; let cfg = SetupContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?;
let (shutdown, _) = tokio::sync::broadcast::channel(1); let (shutdown, _) = tokio::sync::broadcast::channel(1);
let datadir = cfg.datadir().to_owned(); let datadir = cfg.datadir().to_owned();
Ok(Self(Arc::new(SetupContextSeed { Ok(Self(Arc::new(SetupContextSeed {
os_partitions: cfg.os_partitions,
config_path: path.as_ref().map(|p| p.as_ref().to_owned()), config_path: path.as_ref().map(|p| p.as_ref().to_owned()),
migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000), migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000),
migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000), migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000),

View File

@@ -14,10 +14,12 @@ use rpc_toolkit::command;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tracing::instrument; use tracing::instrument;
use crate::config::action::{ConfigActions, ConfigRes};
use crate::config::spec::PackagePointerSpec; use crate::config::spec::PackagePointerSpec;
use crate::config::{not_found, Config, ConfigReceipts, ConfigSpec}; use crate::config::{not_found, Config, ConfigReceipts, ConfigSpec};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::{CurrentDependencies, CurrentDependents, InstalledPackageDataEntry}; use crate::db::model::{CurrentDependencies, CurrentDependents, InstalledPackageDataEntry};
use crate::procedure::docker::DockerContainer;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName}; use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::health_check::{HealthCheckId, HealthCheckResult}; use crate::status::health_check::{HealthCheckId, HealthCheckResult};
@@ -26,10 +28,6 @@ use crate::util::serde::display_serializable;
use crate::util::{display_none, Version}; use crate::util::{display_none, Version};
use crate::volume::Volumes; use crate::volume::Volumes;
use crate::Error; use crate::Error;
use crate::{
config::action::{ConfigActions, ConfigRes},
procedure::docker::DockerContainer,
};
#[command(subcommands(configure))] #[command(subcommands(configure))]
pub fn dependency() -> Result<(), Error> { pub fn dependency() -> Result<(), Error> {

View File

@@ -58,7 +58,7 @@ pub fn disk() -> Result<(), Error> {
#[command(rename = "forget", display(display_none))] #[command(rename = "forget", display(display_none))]
pub async fn forget_disk() -> Result<(), Error> { pub async fn forget_disk() -> Result<(), Error> {
let disk_guid = Path::new("/embassy-os/disk.guid"); let disk_guid = Path::new("/media/embassy/config/disk.guid");
if tokio::fs::metadata(disk_guid).await.is_ok() { if tokio::fs::metadata(disk_guid).await.is_ok() {
tokio::fs::remove_file(disk_guid).await?; tokio::fs::remove_file(disk_guid).await?;
} }

View File

@@ -63,7 +63,7 @@ fn backup_existing_undo_file<'a>(path: &'a Path) -> BoxFuture<'a, Result<(), Err
pub async fn e2fsck_aggressive( pub async fn e2fsck_aggressive(
logicalname: impl AsRef<Path> + std::fmt::Debug, logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> { ) -> Result<RequiresReboot, Error> {
let undo_path = Path::new("/embassy-os") let undo_path = Path::new("/media/embassy/config")
.join( .join(
logicalname logicalname
.as_ref() .as_ref()

View File

@@ -1,6 +1,10 @@
use std::path::{Path, PathBuf};
use clap::ArgMatches; use clap::ArgMatches;
use rpc_toolkit::command; use rpc_toolkit::command;
use serde::Deserialize;
use crate::context::RpcContext;
use crate::disk::util::DiskInfo; use crate::disk::util::DiskInfo;
use crate::util::display_none; use crate::util::display_none;
use crate::util::serde::{display_serializable, IoFormat}; use crate::util::serde::{display_serializable, IoFormat};
@@ -12,7 +16,19 @@ pub mod mount;
pub mod util; pub mod util;
pub const BOOT_RW_PATH: &str = "/media/boot-rw"; pub const BOOT_RW_PATH: &str = "/media/boot-rw";
pub const REPAIR_DISK_PATH: &str = "/embassy-os/repair-disk"; pub const REPAIR_DISK_PATH: &str = "/media/embassy/config/repair-disk";
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct OsPartitionInfo {
pub boot: PathBuf,
pub root: PathBuf,
}
impl OsPartitionInfo {
pub fn contains(&self, logicalname: impl AsRef<Path>) -> bool {
&*self.boot == logicalname.as_ref() || &*self.root == logicalname.as_ref()
}
}
#[command(subcommands(list, repair))] #[command(subcommands(list, repair))]
pub fn disk() -> Result<(), Error> { pub fn disk() -> Result<(), Error> {
@@ -75,11 +91,12 @@ fn display_disk_info(info: Vec<DiskInfo>, matches: &ArgMatches) {
#[command(display(display_disk_info))] #[command(display(display_disk_info))]
pub async fn list( pub async fn list(
#[context] ctx: RpcContext,
#[allow(unused_variables)] #[allow(unused_variables)]
#[arg] #[arg]
format: Option<IoFormat>, format: Option<IoFormat>,
) -> Result<Vec<DiskInfo>, Error> { ) -> Result<Vec<DiskInfo>, Error> {
crate::disk::util::list().await crate::disk::util::list(&ctx.os_partitions).await
} }
#[command(display(display_none))] #[command(display(display_none))]

View File

@@ -0,0 +1,54 @@
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use async_trait::async_trait;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use sha2::Sha256;
use super::{FileSystem, MountType, ReadOnly};
use crate::disk::mount::util::bind;
use crate::{Error, ResultExt};
pub struct Bind<SrcDir: AsRef<Path>> {
src_dir: SrcDir,
}
impl<SrcDir: AsRef<Path>> Bind<SrcDir> {
pub fn new(src_dir: SrcDir) -> Self {
Self { src_dir }
}
}
#[async_trait]
impl<SrcDir: AsRef<Path> + Send + Sync> FileSystem for Bind<SrcDir> {
async fn mount<P: AsRef<Path> + Send + Sync>(
&self,
mountpoint: P,
mount_type: MountType,
) -> Result<(), Error> {
bind(
self.src_dir.as_ref(),
mountpoint,
matches!(mount_type, ReadOnly),
)
.await
}
async fn source_hash(
&self,
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
let mut sha = Sha256::new();
sha.update("Bind");
sha.update(
tokio::fs::canonicalize(self.src_dir.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
self.src_dir.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
);
Ok(sha.finalize())
}
}

View File

@@ -0,0 +1,52 @@
use std::path::Path;
use async_trait::async_trait;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use reqwest::Url;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use super::{FileSystem, MountType};
use crate::util::Invoke;
use crate::Error;
pub async fn mount_httpdirfs(url: &Url, mountpoint: impl AsRef<Path>) -> Result<(), Error> {
tokio::fs::create_dir_all(mountpoint.as_ref()).await?;
let mut cmd = tokio::process::Command::new("httpdirfs");
cmd.arg("--cache")
.arg("--single-file-mode")
.arg(url.as_str())
.arg(mountpoint.as_ref());
cmd.invoke(crate::ErrorKind::Filesystem).await?;
Ok(())
}
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct HttpDirFS {
url: Url,
}
impl HttpDirFS {
pub fn new(url: Url) -> Self {
HttpDirFS { url }
}
}
#[async_trait]
impl FileSystem for HttpDirFS {
async fn mount<P: AsRef<Path> + Send + Sync>(
&self,
mountpoint: P,
_mount_type: MountType,
) -> Result<(), Error> {
mount_httpdirfs(&self.url, mountpoint).await
}
async fn source_hash(
&self,
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
let mut sha = Sha256::new();
sha.update("HttpDirFS");
sha.update(self.url.as_str());
Ok(sha.finalize())
}
}

View File

@@ -7,9 +7,11 @@ use sha2::Sha256;
use crate::Error; use crate::Error;
pub mod bind;
pub mod block_dev; pub mod block_dev;
pub mod cifs; pub mod cifs;
pub mod ecryptfs; pub mod ecryptfs;
pub mod httpdirfs;
pub mod label; pub mod label;
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]

View File

@@ -11,7 +11,7 @@ use super::util::unmount;
use crate::util::Invoke; use crate::util::Invoke;
use crate::Error; use crate::Error;
pub const TMP_MOUNTPOINT: &'static str = "/media/embassy-os/tmp"; pub const TMP_MOUNTPOINT: &'static str = "/media/embassy/tmp";
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait GenericMountGuard: AsRef<Path> + std::fmt::Debug + Send + Sync + 'static { pub trait GenericMountGuard: AsRef<Path> + std::fmt::Debug + Send + Sync + 'static {

View File

@@ -19,6 +19,7 @@ use tracing::instrument;
use super::mount::filesystem::block_dev::BlockDev; use super::mount::filesystem::block_dev::BlockDev;
use super::mount::filesystem::ReadOnly; use super::mount::filesystem::ReadOnly;
use super::mount::guard::TmpMountGuard; use super::mount::guard::TmpMountGuard;
use crate::disk::OsPartitionInfo;
use crate::util::io::from_yaml_async_reader; use crate::util::io::from_yaml_async_reader;
use crate::util::serde::IoFormat; use crate::util::serde::IoFormat;
use crate::util::{Invoke, Version}; use crate::util::{Invoke, Version};
@@ -232,7 +233,11 @@ pub async fn recovery_info(
} }
#[instrument] #[instrument]
pub async fn list() -> Result<Vec<DiskInfo>, Error> { pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
struct DiskIndex {
parts: IndexSet<PathBuf>,
internal: bool,
}
let disk_guids = pvscan().await?; let disk_guids = pvscan().await?;
let disks = tokio_stream::wrappers::ReadDirStream::new( let disks = tokio_stream::wrappers::ReadDirStream::new(
tokio::fs::read_dir(DISK_PATH) tokio::fs::read_dir(DISK_PATH)
@@ -245,127 +250,157 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
crate::ErrorKind::Filesystem, crate::ErrorKind::Filesystem,
) )
}) })
.try_fold(BTreeMap::new(), |mut disks, dir_entry| async move { .try_fold(
if let Some(disk_path) = dir_entry.path().file_name().and_then(|s| s.to_str()) { BTreeMap::<PathBuf, DiskIndex>::new(),
let (disk_path, part_path) = if let Some(end) = PARTITION_REGEX.find(disk_path) { |mut disks, dir_entry| async move {
( if let Some(disk_path) = dir_entry.path().file_name().and_then(|s| s.to_str()) {
disk_path.strip_suffix(end.as_str()).unwrap_or_default(), let (disk_path, part_path) = if let Some(end) = PARTITION_REGEX.find(disk_path) {
Some(disk_path), (
) disk_path.strip_suffix(end.as_str()).unwrap_or_default(),
} else { Some(disk_path),
(disk_path, None) )
}; } else {
let disk_path = Path::new(DISK_PATH).join(disk_path); (disk_path, None)
let disk = tokio::fs::canonicalize(&disk_path).await.with_ctx(|_| { };
( let disk_path = Path::new(DISK_PATH).join(disk_path);
crate::ErrorKind::Filesystem, let disk = tokio::fs::canonicalize(&disk_path).await.with_ctx(|_| {
disk_path.display().to_string(),
)
})?;
if &*disk == Path::new("/dev/mmcblk0") {
return Ok(disks);
}
if !disks.contains_key(&disk) {
disks.insert(disk.clone(), IndexSet::new());
}
if let Some(part_path) = part_path {
let part_path = Path::new(DISK_PATH).join(part_path);
let part = tokio::fs::canonicalize(&part_path).await.with_ctx(|_| {
( (
crate::ErrorKind::Filesystem, crate::ErrorKind::Filesystem,
part_path.display().to_string(), disk_path.display().to_string(),
) )
})?; })?;
disks.get_mut(&disk).unwrap().insert(part); let part = if let Some(part_path) = part_path {
let part_path = Path::new(DISK_PATH).join(part_path);
let part = tokio::fs::canonicalize(&part_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
part_path.display().to_string(),
)
})?;
Some(part)
} else {
None
};
if !disks.contains_key(&disk) {
disks.insert(
disk.clone(),
DiskIndex {
parts: IndexSet::new(),
internal: false,
},
);
}
if let Some(part) = part {
if os.contains(&part) {
disks.get_mut(&disk).unwrap().internal = true;
} else {
disks.get_mut(&disk).unwrap().parts.insert(part);
}
}
} }
} Ok(disks)
Ok(disks) },
}) )
.await?; .await?;
let mut res = Vec::with_capacity(disks.len()); let mut res = Vec::with_capacity(disks.len());
for (disk, parts) in disks { for (disk, index) in disks {
let mut guid: Option<String> = None; if index.internal {
let mut partitions = Vec::with_capacity(parts.len()); for part in index.parts {
let vendor = get_vendor(&disk) let mut disk_info = disk_info(disk.clone()).await;
.await disk_info.logicalname = part;
.map_err(|e| tracing::warn!("Could not get vendor of {}: {}", disk.display(), e.source)) if let Some(g) = disk_guids.get(&disk_info.logicalname) {
.unwrap_or_default(); disk_info.guid = g.clone();
let model = get_model(&disk) } else {
.await disk_info.partitions = vec![part_info(disk_info.logicalname.clone()).await];
.map_err(|e| tracing::warn!("Could not get model of {}: {}", disk.display(), e.source))
.unwrap_or_default();
let capacity = get_capacity(&disk)
.await
.map_err(|e| {
tracing::warn!("Could not get capacity of {}: {}", disk.display(), e.source)
})
.unwrap_or_default();
if let Some(g) = disk_guids.get(&disk) {
guid = g.clone();
} else {
for part in parts {
let mut embassy_os = None;
let label = get_label(&part).await?;
let capacity = get_capacity(&part)
.await
.map_err(|e| {
tracing::warn!("Could not get capacity of {}: {}", part.display(), e.source)
})
.unwrap_or_default();
let mut used = None;
match TmpMountGuard::mount(&BlockDev::new(&part), ReadOnly).await {
Err(e) => tracing::warn!("Could not collect usage information: {}", e.source),
Ok(mount_guard) => {
used = get_used(&mount_guard)
.await
.map_err(|e| {
tracing::warn!(
"Could not get usage of {}: {}",
part.display(),
e.source
)
})
.ok();
if let Some(recovery_info) = match recovery_info(&mount_guard).await {
Ok(a) => a,
Err(e) => {
tracing::error!(
"Error fetching unencrypted backup metadata: {}",
e
);
None
}
} {
embassy_os = Some(recovery_info)
}
mount_guard.unmount().await?;
}
} }
res.push(disk_info);
partitions.push(PartitionInfo {
logicalname: part,
label,
capacity,
used,
embassy_os,
});
} }
} else {
let mut disk_info = disk_info(disk).await;
disk_info.partitions = Vec::with_capacity(index.parts.len());
if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone();
} else {
for part in index.parts {
disk_info.partitions.push(part_info(part).await);
}
}
res.push(disk_info);
} }
res.push(DiskInfo {
logicalname: disk,
vendor,
model,
partitions,
capacity,
guid,
})
} }
Ok(res) Ok(res)
} }
async fn disk_info(disk: PathBuf) -> DiskInfo {
let vendor = get_vendor(&disk)
.await
.map_err(|e| tracing::warn!("Could not get vendor of {}: {}", disk.display(), e.source))
.unwrap_or_default();
let model = get_model(&disk)
.await
.map_err(|e| tracing::warn!("Could not get model of {}: {}", disk.display(), e.source))
.unwrap_or_default();
let capacity = get_capacity(&disk)
.await
.map_err(|e| tracing::warn!("Could not get capacity of {}: {}", disk.display(), e.source))
.unwrap_or_default();
DiskInfo {
logicalname: disk,
vendor,
model,
partitions: Vec::new(),
capacity,
guid: None,
}
}
async fn part_info(part: PathBuf) -> PartitionInfo {
let mut embassy_os = None;
let label = get_label(&part)
.await
.map_err(|e| tracing::warn!("Could not get label of {}: {}", part.display(), e.source))
.unwrap_or_default();
let capacity = get_capacity(&part)
.await
.map_err(|e| tracing::warn!("Could not get capacity of {}: {}", part.display(), e.source))
.unwrap_or_default();
let mut used = None;
match TmpMountGuard::mount(&BlockDev::new(&part), ReadOnly).await {
Err(e) => tracing::warn!("Could not collect usage information: {}", e.source),
Ok(mount_guard) => {
used = get_used(&mount_guard)
.await
.map_err(|e| {
tracing::warn!("Could not get usage of {}: {}", part.display(), e.source)
})
.ok();
if let Some(recovery_info) = match recovery_info(&mount_guard).await {
Ok(a) => a,
Err(e) => {
tracing::error!("Error fetching unencrypted backup metadata: {}", e);
None
}
} {
embassy_os = Some(recovery_info)
}
if let Err(e) = mount_guard.unmount().await {
tracing::error!("Error unmounting partition {}: {}", part.display(), e);
}
}
}
PartitionInfo {
logicalname: part,
label,
capacity,
used,
embassy_os,
}
}
fn parse_pvscan_output(pvscan_output: &str) -> BTreeMap<PathBuf, Option<String>> { fn parse_pvscan_output(pvscan_output: &str) -> BTreeMap<PathBuf, Option<String>> {
fn parse_line(line: &str) -> IResult<&str, (&str, Option<&str>)> { fn parse_line(line: &str) -> IResult<&str, (&str, Option<&str>)> {
let pv_parse = preceded( let pv_parse = preceded(

View File

@@ -70,7 +70,7 @@ pub enum ErrorKind {
TLSInit = 61, TLSInit = 61,
HttpRange = 62, HttpRange = 62,
ContentLength = 63, ContentLength = 63,
BytesError = 64 BytesError = 64,
} }
impl ErrorKind { impl ErrorKind {
pub fn as_str(&self) -> &'static str { pub fn as_str(&self) -> &'static str {
@@ -139,7 +139,7 @@ impl ErrorKind {
TLSInit => "TLS Backend Initialize Error", TLSInit => "TLS Backend Initialize Error",
HttpRange => "No Support for Web Server HTTP Ranges", HttpRange => "No Support for Web Server HTTP Ranges",
ContentLength => "Request has no content length header", ContentLength => "Request has no content length header",
BytesError => "Could not get the bytes for this request" BytesError => "Could not get the bytes for this request",
} }
} }
} }
@@ -244,8 +244,6 @@ impl From<openssl::error::ErrorStack> for Error {
fn from(e: openssl::error::ErrorStack) -> Self { fn from(e: openssl::error::ErrorStack) -> Self {
Error::new(eyre!("OpenSSL ERROR:\n{}", e), ErrorKind::OpenSsl) Error::new(eyre!("OpenSSL ERROR:\n{}", e), ErrorKind::OpenSsl)
} }
} }
impl From<Error> for RpcError { impl From<Error> for RpcError {
fn from(e: Error) -> Self { fn from(e: Error) -> Self {

View File

@@ -17,8 +17,8 @@ use crate::util::Invoke;
use crate::version::VersionT; use crate::version::VersionT;
use crate::Error; use crate::Error;
pub const SYSTEM_REBUILD_PATH: &str = "/embassy-os/system-rebuild"; pub const SYSTEM_REBUILD_PATH: &str = "/media/embassy/config/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/embassy-os/standby"; pub const STANDBY_MODE_PATH: &str = "/media/embassy/config/standby";
pub async fn check_time_is_synchronized() -> Result<bool, Error> { pub async fn check_time_is_synchronized() -> Result<bool, Error> {
Ok(String::from_utf8( Ok(String::from_utf8(
@@ -309,7 +309,7 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
tracing::info!("Created Docker Network"); tracing::info!("Created Docker Network");
tracing::info!("Loading System Docker Images"); tracing::info!("Loading System Docker Images");
crate::install::load_images("/var/lib/embassy/system-images").await?; crate::install::load_images("/usr/lib/embassy/system-images").await?;
tracing::info!("Loaded System Docker Images"); tracing::info!("Loaded System Docker Images");
tracing::info!("Loading Package Docker Images"); tracing::info!("Loading Package Docker Images");
@@ -332,12 +332,15 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?; crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?;
tracing::info!("Synced SSH Keys"); tracing::info!("Synced SSH Keys");
crate::net::wifi::synchronize_wpa_supplicant_conf( if let Some(wifi_interface) = &cfg.wifi_interface {
&cfg.datadir().join("main"), crate::net::wifi::synchronize_wpa_supplicant_conf(
&receipts.last_wifi_region.get(&mut handle).await?, &cfg.datadir().join("main"),
) wifi_interface,
.await?; &receipts.last_wifi_region.get(&mut handle).await?,
tracing::info!("Synchronized wpa_supplicant.conf"); )
.await?;
tracing::info!("Synchronized WiFi");
}
receipts receipts
.status_info .status_info
.set( .set(

View File

@@ -1,8 +1,6 @@
use std::{ use std::collections::HashMap;
collections::HashMap, use std::path::{Path, PathBuf};
path::{Path, PathBuf}, use std::sync::Arc;
sync::Arc,
};
use bollard::image::ListImagesOptions; use bollard::image::ListImagesOptions;
use color_eyre::Report; use color_eyre::Report;

View File

@@ -896,7 +896,7 @@ impl InstallS9Receipts {
} }
#[instrument(skip(ctx, rdr))] #[instrument(skip(ctx, rdr))]
pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>( pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
ctx: &RpcContext, ctx: &RpcContext,
pkg_id: &PackageId, pkg_id: &PackageId,
version: &Version, version: &Version,

View File

@@ -113,6 +113,7 @@ impl InstallProgress {
} }
#[pin_project::pin_project] #[pin_project::pin_project]
#[derive(Debug)]
pub struct InstallProgressTracker<RW> { pub struct InstallProgressTracker<RW> {
#[pin] #[pin]
inner: RW, inner: RW,

View File

@@ -22,17 +22,17 @@ use tokio::task::JoinHandle;
use torut::onion::TorSecretKeyV3; use torut::onion::TorSecretKeyV3;
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext;
use crate::manager::sync::synchronizer;
use crate::net::interface::InterfaceId; use crate::net::interface::InterfaceId;
use crate::net::GeneratedCertificateMountPoint; use crate::net::GeneratedCertificateMountPoint;
use crate::notifications::NotificationLevel; use crate::notifications::NotificationLevel;
use crate::procedure::docker::DockerProcedure; use crate::procedure::docker::{DockerContainer, DockerInject, DockerProcedure};
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName}; use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::MainStatus; use crate::status::MainStatus;
use crate::util::{Container, NonDetachingJoinHandle, Version}; use crate::util::{Container, NonDetachingJoinHandle, Version};
use crate::Error; use crate::Error;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{manager::sync::synchronizer, procedure::docker::DockerInject};
pub mod health; pub mod health;
mod sync; mod sync;

View File

@@ -59,12 +59,10 @@ pub fn db<M: Metadata>(ctx: RpcContext) -> DynMiddleware<M> {
.append("X-Patch-Updates", HeaderValue::from_str(&a)?), .append("X-Patch-Updates", HeaderValue::from_str(&a)?),
Err(e) => res.headers.append( Err(e) => res.headers.append(
"X-Patch-Error", "X-Patch-Error",
HeaderValue::from_str( HeaderValue::from_str(&base64::encode_config(
&base64::encode_config( &e.to_string(),
&e.to_string(), base64::URL_SAFE,
base64::URL_SAFE, ))?,
),
)?,
), ),
}; };
} }

View File

@@ -8,12 +8,13 @@ use patch_db::HasModel;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext;
use crate::id::ImageId; use crate::id::ImageId;
use crate::procedure::docker::DockerContainer;
use crate::procedure::{PackageProcedure, ProcedureName}; use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::Version; use crate::util::Version;
use crate::volume::Volumes; use crate::volume::Volumes;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)] #[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]

View File

@@ -18,6 +18,19 @@ use crate::util::serde::{display_serializable, IoFormat};
use crate::util::{display_none, Invoke}; use crate::util::{display_none, Invoke};
use crate::{Error, ErrorKind}; use crate::{Error, ErrorKind};
type WifiManager = Arc<RwLock<WpaCli>>;
pub fn wifi_manager(ctx: &RpcContext) -> Result<&WifiManager, Error> {
if let Some(wifi_manager) = ctx.wifi_manager.as_ref() {
Ok(wifi_manager)
} else {
Err(Error::new(
color_eyre::eyre::eyre!("No WiFi interface available"),
ErrorKind::Wifi,
))
}
}
#[command(subcommands(add, connect, delete, get, country, available))] #[command(subcommands(add, connect, delete, get, country, available))]
pub async fn wifi() -> Result<(), Error> { pub async fn wifi() -> Result<(), Error> {
Ok(()) Ok(())
@@ -42,6 +55,7 @@ pub async fn add(
#[arg] priority: isize, #[arg] priority: isize,
#[arg] connect: bool, #[arg] connect: bool,
) -> Result<(), Error> { ) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() { if !ssid.is_ascii() {
return Err(Error::new( return Err(Error::new(
color_eyre::eyre::eyre!("SSID may not have special characters"), color_eyre::eyre::eyre!("SSID may not have special characters"),
@@ -56,7 +70,7 @@ pub async fn add(
} }
async fn add_procedure( async fn add_procedure(
db: impl DbHandle, db: impl DbHandle,
wifi_manager: Arc<RwLock<WpaCli>>, wifi_manager: WifiManager,
ssid: &Ssid, ssid: &Ssid,
password: &Psk, password: &Psk,
priority: isize, priority: isize,
@@ -71,7 +85,7 @@ pub async fn add(
} }
if let Err(err) = add_procedure( if let Err(err) = add_procedure(
&mut ctx.db.handle(), &mut ctx.db.handle(),
ctx.wifi_manager.clone(), wifi_manager.clone(),
&Ssid(ssid.clone()), &Ssid(ssid.clone()),
&Psk(password.clone()), &Psk(password.clone()),
priority, priority,
@@ -91,6 +105,7 @@ pub async fn add(
#[command(display(display_none))] #[command(display(display_none))]
#[instrument(skip(ctx))] #[instrument(skip(ctx))]
pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> { pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() { if !ssid.is_ascii() {
return Err(Error::new( return Err(Error::new(
color_eyre::eyre::eyre!("SSID may not have special characters"), color_eyre::eyre::eyre!("SSID may not have special characters"),
@@ -99,7 +114,7 @@ pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<
} }
async fn connect_procedure( async fn connect_procedure(
mut db: impl DbHandle, mut db: impl DbHandle,
wifi_manager: Arc<RwLock<WpaCli>>, wifi_manager: WifiManager,
ssid: &Ssid, ssid: &Ssid,
) -> Result<(), Error> { ) -> Result<(), Error> {
let wpa_supplicant = wifi_manager.read().await; let wpa_supplicant = wifi_manager.read().await;
@@ -125,7 +140,7 @@ pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<
if let Err(err) = connect_procedure( if let Err(err) = connect_procedure(
&mut ctx.db.handle(), &mut ctx.db.handle(),
ctx.wifi_manager.clone(), wifi_manager.clone(),
&Ssid(ssid.clone()), &Ssid(ssid.clone()),
) )
.await .await
@@ -142,20 +157,21 @@ pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<
#[command(display(display_none))] #[command(display(display_none))]
#[instrument(skip(ctx))] #[instrument(skip(ctx))]
pub async fn delete(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> { pub async fn delete(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() { if !ssid.is_ascii() {
return Err(Error::new( return Err(Error::new(
color_eyre::eyre::eyre!("SSID may not have special characters"), color_eyre::eyre::eyre!("SSID may not have special characters"),
ErrorKind::Wifi, ErrorKind::Wifi,
)); ));
} }
let wpa_supplicant = ctx.wifi_manager.read().await; let wpa_supplicant = wifi_manager.read().await;
let current = wpa_supplicant.get_current_network().await?; let current = wpa_supplicant.get_current_network().await?;
drop(wpa_supplicant); drop(wpa_supplicant);
let mut wpa_supplicant = ctx.wifi_manager.write().await; let mut wpa_supplicant = wifi_manager.write().await;
let ssid = Ssid(ssid); let ssid = Ssid(ssid);
let is_current_being_removed = matches!(current, Some(current) if current == ssid); let is_current_being_removed = matches!(current, Some(current) if current == ssid);
let is_current_removed_and_no_hardwire = let is_current_removed_and_no_hardwire =
is_current_being_removed && !interface_connected("eth0").await?; is_current_being_removed && !interface_connected(&ctx.ethernet_interface).await?;
if is_current_removed_and_no_hardwire { if is_current_removed_and_no_hardwire {
return Err(Error::new(color_eyre::eyre::eyre!("Forbidden: Deleting this Network would make your Embassy Unreachable. Either connect to ethernet or connect to a different WiFi network to remedy this."), ErrorKind::Wifi)); return Err(Error::new(color_eyre::eyre::eyre!("Forbidden: Deleting this Network would make your Embassy Unreachable. Either connect to ethernet or connect to a different WiFi network to remedy this."), ErrorKind::Wifi));
} }
@@ -284,12 +300,13 @@ pub async fn get(
#[arg(long = "format")] #[arg(long = "format")]
format: Option<IoFormat>, format: Option<IoFormat>,
) -> Result<WiFiInfo, Error> { ) -> Result<WiFiInfo, Error> {
let wpa_supplicant = ctx.wifi_manager.read().await; let wifi_manager = wifi_manager(&ctx)?;
let wpa_supplicant = wifi_manager.read().await;
let (list_networks, current_res, country_res, ethernet_res, signal_strengths) = tokio::join!( let (list_networks, current_res, country_res, ethernet_res, signal_strengths) = tokio::join!(
wpa_supplicant.list_networks_low(), wpa_supplicant.list_networks_low(),
wpa_supplicant.get_current_network(), wpa_supplicant.get_current_network(),
wpa_supplicant.get_country_low(), wpa_supplicant.get_country_low(),
interface_connected("eth0"), // TODO: pull from config interface_connected(&ctx.ethernet_interface),
wpa_supplicant.list_wifi_low() wpa_supplicant.list_wifi_low()
); );
let signal_strengths = signal_strengths?; let signal_strengths = signal_strengths?;
@@ -337,7 +354,8 @@ pub async fn get_available(
#[arg(long = "format")] #[arg(long = "format")]
format: Option<IoFormat>, format: Option<IoFormat>,
) -> Result<Vec<WifiListOut>, Error> { ) -> Result<Vec<WifiListOut>, Error> {
let wpa_supplicant = ctx.wifi_manager.read().await; let wifi_manager = wifi_manager(&ctx)?;
let wpa_supplicant = wifi_manager.read().await;
let (wifi_list, network_list) = tokio::join!( let (wifi_list, network_list) = tokio::join!(
wpa_supplicant.list_wifi_low(), wpa_supplicant.list_wifi_low(),
wpa_supplicant.list_networks_low() wpa_supplicant.list_networks_low()
@@ -365,13 +383,14 @@ pub async fn set_country(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(parse(country_code_parse))] country: CountryCode, #[arg(parse(country_code_parse))] country: CountryCode,
) -> Result<(), Error> { ) -> Result<(), Error> {
if !interface_connected("eth0").await? { let wifi_manager = wifi_manager(&ctx)?;
if !interface_connected(&ctx.ethernet_interface).await? {
return Err(Error::new( return Err(Error::new(
color_eyre::eyre::eyre!("Won't change country without hardwire connection"), color_eyre::eyre::eyre!("Won't change country without hardwire connection"),
crate::ErrorKind::Wifi, crate::ErrorKind::Wifi,
)); ));
} }
let mut wpa_supplicant = ctx.wifi_manager.write().await; let mut wpa_supplicant = wifi_manager.write().await;
wpa_supplicant.set_country_low(country.alpha2()).await?; wpa_supplicant.set_country_low(country.alpha2()).await?;
for (network_id, _wifi_info) in wpa_supplicant.list_networks_low().await? { for (network_id, _wifi_info) in wpa_supplicant.list_networks_low().await? {
wpa_supplicant.remove_network_low(network_id).await?; wpa_supplicant.remove_network_low(network_id).await?;
@@ -776,6 +795,7 @@ pub fn country_code_parse(code: &str, _matches: &ArgMatches) -> Result<CountryCo
#[instrument(skip(main_datadir))] #[instrument(skip(main_datadir))]
pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>( pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>(
main_datadir: P, main_datadir: P,
wifi_iface: &str,
last_country_code: &Option<CountryCode>, last_country_code: &Option<CountryCode>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let persistent = main_datadir.as_ref().join("system-connections"); let persistent = main_datadir.as_ref().join("system-connections");
@@ -797,7 +817,7 @@ pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>(
.invoke(ErrorKind::Wifi) .invoke(ErrorKind::Wifi)
.await?; .await?;
Command::new("ifconfig") Command::new("ifconfig")
.arg("wlan0") .arg(wifi_iface)
.arg("up") .arg("up")
.invoke(ErrorKind::Wifi) .invoke(ErrorKind::Wifi)
.await?; .await?;

View File

@@ -0,0 +1,95 @@
use std::borrow::Cow;
use std::collections::BTreeSet;
use std::io::SeekFrom;
use std::path::Path;
use color_eyre::eyre::eyre;
use futures::{FutureExt, TryStreamExt};
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt};
use tokio_tar::{Archive, Entry};
use crate::util::io::from_cbor_async_reader;
use crate::{Error, ErrorKind, ARCH};
#[derive(Default, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct DockerMultiArch {
pub default: String,
pub available: BTreeSet<String>,
}
#[pin_project::pin_project(project = DockerReaderProject)]
#[derive(Debug)]
pub enum DockerReader<R: AsyncRead + Unpin> {
SingleArch(#[pin] R),
MultiArch(#[pin] Entry<Archive<R>>),
}
impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> DockerReader<R> {
pub async fn new(mut rdr: R) -> Result<Self, Error> {
let arch = if let Some(multiarch) = tokio_tar::Archive::new(&mut rdr)
.entries()?
.try_filter_map(|e| {
async move {
Ok(if &*e.path()? == Path::new("multiarch.cbor") {
Some(e)
} else {
None
})
}
.boxed()
})
.try_next()
.await?
{
let multiarch: DockerMultiArch = from_cbor_async_reader(multiarch).await?;
Some(if multiarch.available.contains(&**ARCH) {
Cow::Borrowed(&**ARCH)
} else {
Cow::Owned(multiarch.default)
})
} else {
None
};
rdr.seek(SeekFrom::Start(0)).await?;
if let Some(arch) = arch {
if let Some(image) = tokio_tar::Archive::new(rdr)
.entries()?
.try_filter_map(|e| {
let arch = arch.clone();
async move {
Ok(if &*e.path()? == Path::new(&format!("{}.tar", arch)) {
Some(e)
} else {
None
})
}
.boxed()
})
.try_next()
.await?
{
Ok(Self::MultiArch(image))
} else {
Err(Error::new(
eyre!("Docker image section does not contain tarball for architecture"),
ErrorKind::ParseS9pk,
))
}
} else {
Ok(Self::SingleArch(rdr))
}
}
}
impl<R: AsyncRead + Unpin + Send + Sync> AsyncRead for DockerReader<R> {
fn poll_read(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
match self.project() {
DockerReaderProject::SingleArch(r) => r.poll_read(cx, buf),
DockerReaderProject::MultiArch(r) => r.poll_read(cx, buf),
}
}
}

View File

@@ -6,18 +6,19 @@ use patch_db::HasModel;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use url::Url; use url::Url;
use crate::action::Actions;
use crate::backup::BackupActions; use crate::backup::BackupActions;
use crate::config::action::ConfigActions; use crate::config::action::ConfigActions;
use crate::dependencies::Dependencies; use crate::dependencies::Dependencies;
use crate::migration::Migrations; use crate::migration::Migrations;
use crate::net::interface::Interfaces; use crate::net::interface::Interfaces;
use crate::procedure::docker::DockerContainer;
use crate::procedure::PackageProcedure; use crate::procedure::PackageProcedure;
use crate::status::health_check::HealthChecks; use crate::status::health_check::HealthChecks;
use crate::util::Version; use crate::util::Version;
use crate::version::{Current, VersionT}; use crate::version::{Current, VersionT};
use crate::volume::Volumes; use crate::volume::Volumes;
use crate::Error; use crate::Error;
use crate::{action::Actions, procedure::docker::DockerContainer};
fn current_version() -> Version { fn current_version() -> Version {
Current::new().semver().into() Current::new().semver().into()
@@ -143,7 +144,7 @@ impl Assets {
self.docker_images self.docker_images
.as_ref() .as_ref()
.map(|a| a.as_path()) .map(|a| a.as_path())
.unwrap_or(Path::new("image.tar")) .unwrap_or(Path::new("docker-images"))
} }
pub fn assets_path(&self) -> &Path { pub fn assets_path(&self) -> &Path {
self.assets self.assets

View File

@@ -1,22 +1,27 @@
use std::ffi::OsStr;
use std::path::PathBuf; use std::path::PathBuf;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use futures::TryStreamExt;
use imbl::OrdMap; use imbl::OrdMap;
use patch_db::{LockReceipt, LockType};
use rpc_toolkit::command; use rpc_toolkit::command;
use serde_json::Value; use serde_json::Value;
use tokio::io::AsyncRead;
use tracing::instrument; use tracing::instrument;
use crate::context::SdkContext;
use crate::s9pk::builder::S9pkPacker; use crate::s9pk::builder::S9pkPacker;
use crate::s9pk::docker::DockerMultiArch;
use crate::s9pk::manifest::Manifest; use crate::s9pk::manifest::Manifest;
use crate::s9pk::reader::S9pkReader; use crate::s9pk::reader::S9pkReader;
use crate::util::display_none; use crate::util::display_none;
use crate::util::io::BufferedWriteReader;
use crate::util::serde::IoFormat; use crate::util::serde::IoFormat;
use crate::volume::Volume; use crate::volume::Volume;
use crate::{context::SdkContext, procedure::docker::DockerContainer};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
pub mod builder; pub mod builder;
pub mod docker;
pub mod header; pub mod header;
pub mod manifest; pub mod manifest;
pub mod reader; pub mod reader;
@@ -94,33 +99,73 @@ pub async fn pack(#[context] ctx: SdkContext, #[arg] path: Option<PathBuf>) -> R
) )
})?, })?,
) )
.docker_images( .docker_images({
File::open(path.join(manifest.assets.docker_images_path())) let docker_images_path = path.join(manifest.assets.docker_images_path());
.await let res: Box<dyn AsyncRead + Unpin + Send + Sync> = if tokio::fs::metadata(&docker_images_path).await?.is_dir() {
.with_ctx(|_| { let tars: Vec<_> = tokio_stream::wrappers::ReadDirStream::new(tokio::fs::read_dir(&docker_images_path).await?).try_collect().await?;
( let mut arch_info = DockerMultiArch::default();
crate::ErrorKind::Filesystem, for tar in &tars {
manifest.assets.docker_images_path().display().to_string(), if tar.path().extension() == Some(OsStr::new("tar")) {
) arch_info.available.insert(tar.path().file_stem().unwrap_or_default().to_str().unwrap_or_default().to_owned());
})?, }
) }
if arch_info.available.contains("aarch64") {
arch_info.default = "aarch64".to_owned();
} else {
arch_info.default = arch_info.available.iter().next().cloned().unwrap_or_default();
}
let arch_info_cbor = IoFormat::Cbor.to_vec(&arch_info)?;
Box::new(BufferedWriteReader::new(|w| async move {
let mut docker_images = tokio_tar::Builder::new(w);
let mut multiarch_header = tokio_tar::Header::new_gnu();
multiarch_header.set_path("multiarch.cbor")?;
multiarch_header.set_size(arch_info_cbor.len() as u64);
multiarch_header.set_cksum();
docker_images.append(&multiarch_header, std::io::Cursor::new(arch_info_cbor)).await?;
for tar in tars
{
docker_images
.append_path_with_name(
tar.path(),
tar.file_name(),
)
.await?;
}
Ok::<_, std::io::Error>(())
}, 1024 * 1024))
} else {
Box::new(File::open(docker_images_path)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
manifest.assets.docker_images_path().display().to_string(),
)
})?)
};
res
})
.assets({ .assets({
let mut assets = tokio_tar::Builder::new(Vec::new()); // TODO: Ideally stream this? best not to buffer in memory let asset_volumes = manifest
.volumes
.iter()
.filter(|(_, v)| matches!(v, &&Volume::Assets {})).map(|(id, _)| id.clone()).collect::<Vec<_>>();
let assets_path = manifest.assets.assets_path().to_owned();
let path = path.clone();
for (asset_volume, _) in manifest BufferedWriteReader::new(|w| async move {
.volumes let mut assets = tokio_tar::Builder::new(w);
.iter() for asset_volume in asset_volumes
.filter(|(_, v)| matches!(v, &&Volume::Assets {})) {
{ assets
assets .append_dir_all(
.append_dir_all( &asset_volume,
asset_volume, path.join(&assets_path).join(&asset_volume),
path.join(manifest.assets.assets_path()).join(asset_volume), )
) .await?;
.await?; }
} Ok::<_, std::io::Error>(())
}, 1024 * 1024)
std::io::Cursor::new(assets.into_inner().await?)
}) })
.scripts({ .scripts({
let script_path = path.join(manifest.assets.scripts_path()).join("embassy.js"); let script_path = path.join(manifest.assets.scripts_path()).join("embassy.js");

View File

@@ -1,5 +1,6 @@
use std::collections::BTreeSet; use std::collections::BTreeSet;
use std::io::SeekFrom; use std::io::SeekFrom;
use std::ops::Range;
use std::path::Path; use std::path::Path;
use std::pin::Pin; use std::pin::Pin;
use std::str::FromStr; use std::str::FromStr;
@@ -11,44 +12,74 @@ use ed25519_dalek::PublicKey;
use futures::TryStreamExt; use futures::TryStreamExt;
use sha2_old::{Digest, Sha512}; use sha2_old::{Digest, Sha512};
use tokio::fs::File; use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf, Take}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf};
use tracing::instrument; use tracing::instrument;
use super::header::{FileSection, Header, TableOfContents}; use super::header::{FileSection, Header, TableOfContents};
use super::manifest::{Manifest, PackageId}; use super::manifest::{Manifest, PackageId};
use super::SIG_CONTEXT; use super::SIG_CONTEXT;
use crate::id::ImageId;
use crate::install::progress::InstallProgressTracker; use crate::install::progress::InstallProgressTracker;
use crate::s9pk::docker::DockerReader;
use crate::util::Version; use crate::util::Version;
use crate::{id::ImageId, procedure::docker::DockerContainer};
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[pin_project::pin_project] #[pin_project::pin_project]
pub struct ReadHandle<'a, R: AsyncRead + AsyncSeek + Unpin = File> { #[derive(Debug)]
pub struct ReadHandle<'a, R = File> {
pos: &'a mut u64, pos: &'a mut u64,
range: Range<u64>,
#[pin] #[pin]
rdr: Take<&'a mut R>, rdr: &'a mut R,
} }
impl<'a, R: AsyncRead + AsyncSeek + Unpin> ReadHandle<'a, R> { impl<'a, R: AsyncRead + Unpin> ReadHandle<'a, R> {
pub async fn to_vec(mut self) -> std::io::Result<Vec<u8>> { pub async fn to_vec(mut self) -> std::io::Result<Vec<u8>> {
let mut buf = vec![0; self.rdr.limit() as usize]; let mut buf = vec![0; (self.range.end - self.range.start) as usize];
self.read_exact(&mut buf).await?; self.read_exact(&mut buf).await?;
Ok(buf) Ok(buf)
} }
} }
impl<'a, R: AsyncRead + AsyncSeek + Unpin> AsyncRead for ReadHandle<'a, R> { impl<'a, R: AsyncRead + Unpin> AsyncRead for ReadHandle<'a, R> {
fn poll_read( fn poll_read(
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>, buf: &mut ReadBuf<'_>,
) -> Poll<std::io::Result<()>> { ) -> Poll<std::io::Result<()>> {
let start = buf.filled().len();
let this = self.project(); let this = self.project();
let pos = this.pos; let start = buf.filled().len();
let res = AsyncRead::poll_read(this.rdr, cx, buf); let mut take_buf = buf.take(this.range.end.saturating_sub(**this.pos) as usize);
**pos += (buf.filled().len() - start) as u64; let res = AsyncRead::poll_read(this.rdr, cx, &mut take_buf);
let n = take_buf.filled().len();
unsafe { buf.assume_init(start + n) };
buf.advance(n);
**this.pos += n as u64;
res res
} }
} }
impl<'a, R: AsyncSeek + Unpin> AsyncSeek for ReadHandle<'a, R> {
fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> std::io::Result<()> {
let this = self.project();
AsyncSeek::start_seek(
this.rdr,
match position {
SeekFrom::Current(n) => SeekFrom::Current(n),
SeekFrom::End(n) => SeekFrom::Start((this.range.end as i64 + n) as u64),
SeekFrom::Start(n) => SeekFrom::Start(this.range.start + n),
},
)
}
fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<u64>> {
let this = self.project();
match AsyncSeek::poll_complete(this.rdr, cx) {
Poll::Ready(Ok(n)) => {
let res = n.saturating_sub(this.range.start);
**this.pos = this.range.start + res;
Poll::Ready(Ok(res))
}
a => a,
}
}
}
#[derive(Debug)] #[derive(Debug)]
pub struct ImageTag { pub struct ImageTag {
@@ -110,7 +141,7 @@ impl FromStr for ImageTag {
} }
} }
pub struct S9pkReader<R: AsyncRead + AsyncSeek + Unpin = File> { pub struct S9pkReader<R: AsyncRead + AsyncSeek + Unpin + Send + Sync = File> {
hash: Option<Output<Sha512>>, hash: Option<Output<Sha512>>,
hash_string: Option<String>, hash_string: Option<String>,
developer_key: PublicKey, developer_key: PublicKey,
@@ -128,12 +159,12 @@ impl S9pkReader {
Self::from_reader(rdr, check_sig).await Self::from_reader(rdr, check_sig).await
} }
} }
impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<InstallProgressTracker<R>> { impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<InstallProgressTracker<R>> {
pub fn validated(&mut self) { pub fn validated(&mut self) {
self.rdr.validated() self.rdr.validated()
} }
} }
impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<R> { impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<R> {
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn validate(&mut self) -> Result<(), Error> { pub async fn validate(&mut self) -> Result<(), Error> {
if self.toc.icon.length > 102_400 { if self.toc.icon.length > 102_400 {
@@ -309,8 +340,9 @@ impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<R> {
self.pos = section.position; self.pos = section.position;
} }
Ok(ReadHandle { Ok(ReadHandle {
range: self.pos..(self.pos + section.length),
pos: &mut self.pos, pos: &mut self.pos,
rdr: (&mut self.rdr).take(section.length), rdr: &mut self.rdr,
}) })
} }
@@ -336,8 +368,8 @@ impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<R> {
Ok(self.read_handle(self.toc.icon).await?) Ok(self.read_handle(self.toc.icon).await?)
} }
pub async fn docker_images<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> { pub async fn docker_images<'a>(&'a mut self) -> Result<DockerReader<ReadHandle<'a, R>>, Error> {
Ok(self.read_handle(self.toc.docker_images).await?) DockerReader::new(self.read_handle(self.toc.docker_images).await?).await
} }
pub async fn assets<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> { pub async fn assets<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> {

View File

@@ -88,8 +88,8 @@ pub fn disk() -> Result<(), Error> {
} }
#[command(rename = "list", rpc_only, metadata(authenticated = false))] #[command(rename = "list", rpc_only, metadata(authenticated = false))]
pub async fn list_disks() -> Result<Vec<DiskInfo>, Error> { pub async fn list_disks(#[context] ctx: SetupContext) -> Result<Vec<DiskInfo>, Error> {
crate::disk::list(None).await crate::disk::util::list(&ctx.os_partitions).await
} }
#[command(rpc_only)] #[command(rpc_only)]
@@ -135,7 +135,7 @@ pub async fn attach(
ErrorKind::DiskManagement, ErrorKind::DiskManagement,
)); ));
} }
init(&RpcContextConfig::load(ctx.config_path.as_ref()).await?).await?; init(&RpcContextConfig::load(ctx.config_path.clone()).await?).await?;
let secrets = ctx.secret_store().await?; let secrets = ctx.secret_store().await?;
let db = ctx.db(&secrets).await?; let db = ctx.db(&secrets).await?;
let mut secrets_handle = secrets.acquire().await?; let mut secrets_handle = secrets.acquire().await?;
@@ -331,7 +331,7 @@ pub async fn complete(#[context] ctx: SetupContext) -> Result<SetupResult, Error
si.lan_address() si.lan_address()
.put(&mut db, &hostname.lan_address().parse().unwrap()) .put(&mut db, &hostname.lan_address().parse().unwrap())
.await?; .await?;
let mut guid_file = File::create("/embassy-os/disk.guid").await?; let mut guid_file = File::create("/media/embassy/config/disk.guid").await?;
guid_file.write_all(guid.as_bytes()).await?; guid_file.write_all(guid.as_bytes()).await?;
guid_file.sync_all().await?; guid_file.sync_all().await?;
ctx.shutdown.send(()).expect("failed to shutdown"); ctx.shutdown.send(()).expect("failed to shutdown");
@@ -378,7 +378,7 @@ pub async fn execute_inner(
recovery_password, recovery_password,
) )
.await?; .await?;
let db = init(&RpcContextConfig::load(ctx.config_path.as_ref()).await?) let db = init(&RpcContextConfig::load(ctx.config_path.clone()).await?)
.await? .await?
.db; .db;
let hostname = { let hostname = {
@@ -416,7 +416,7 @@ pub async fn execute_inner(
res res
} else { } else {
let (tor_addr, root_ca) = fresh_setup(&ctx, &embassy_password).await?; let (tor_addr, root_ca) = fresh_setup(&ctx, &embassy_password).await?;
let db = init(&RpcContextConfig::load(ctx.config_path.as_ref()).await?) let db = init(&RpcContextConfig::load(ctx.config_path.clone()).await?)
.await? .await?
.db; .db;
let mut handle = db.handle(); let mut handle = db.handle();

View File

@@ -4,9 +4,10 @@ use std::time::{Duration, Instant};
use divrem::DivRem; use divrem::DivRem;
use proptest_derive::Arbitrary; use proptest_derive::Arbitrary;
use tokio::process::Command;
use tracing::instrument; use tracing::instrument;
use crate::util::FileLock; use crate::util::{FileLock, Invoke};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
lazy_static::lazy_static! { lazy_static::lazy_static! {
@@ -22,31 +23,49 @@ lazy_static::lazy_static! {
pub const SOUND_LOCK_FILE: &'static str = "/etc/embassy/sound.lock"; pub const SOUND_LOCK_FILE: &'static str = "/etc/embassy/sound.lock";
struct SoundInterface(Option<FileLock>); struct SoundInterface {
use_beep: bool,
guard: Option<FileLock>,
}
impl SoundInterface { impl SoundInterface {
#[instrument] #[instrument]
pub async fn lease() -> Result<Self, Error> { pub async fn lease() -> Result<Self, Error> {
let guard = FileLock::new(SOUND_LOCK_FILE, true).await?; let guard = FileLock::new(SOUND_LOCK_FILE, true).await?;
tokio::fs::write(&*EXPORT_FILE, "0") if Command::new("which")
.arg("beep")
.invoke(ErrorKind::NotFound)
.await .await
.or_else(|e| { .is_ok()
if e.raw_os_error() == Some(16) {
Ok(())
} else {
Err(e)
}
})
.with_ctx(|_| (ErrorKind::SoundError, EXPORT_FILE.to_string_lossy()))?;
let instant = Instant::now();
while tokio::fs::metadata(&*PERIOD_FILE).await.is_err()
&& instant.elapsed() < Duration::from_secs(1)
{ {
tokio::time::sleep(Duration::from_millis(1)).await; Ok(SoundInterface {
use_beep: true,
guard: Some(guard),
})
} else {
tokio::fs::write(&*EXPORT_FILE, "0")
.await
.or_else(|e| {
if e.raw_os_error() == Some(16) {
Ok(())
} else {
Err(e)
}
})
.with_ctx(|_| (ErrorKind::SoundError, EXPORT_FILE.to_string_lossy()))?;
let instant = Instant::now();
while tokio::fs::metadata(&*PERIOD_FILE).await.is_err()
&& instant.elapsed() < Duration::from_secs(1)
{
tokio::time::sleep(Duration::from_millis(1)).await;
}
Ok(SoundInterface {
use_beep: false,
guard: Some(guard),
})
} }
Ok(SoundInterface(Some(guard)))
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn play(&mut self, note: &Note) -> Result<(), Error> { async fn play_pwm(&mut self, note: &Note) -> Result<(), Error> {
let curr_period = tokio::fs::read_to_string(&*PERIOD_FILE) let curr_period = tokio::fs::read_to_string(&*PERIOD_FILE)
.await .await
.with_ctx(|_| (ErrorKind::SoundError, PERIOD_FILE.to_string_lossy()))?; .with_ctx(|_| (ErrorKind::SoundError, PERIOD_FILE.to_string_lossy()))?;
@@ -71,41 +90,50 @@ impl SoundInterface {
Ok(()) Ok(())
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn play_for_time_slice( async fn stop_pwm(&mut self) -> Result<(), Error> {
&mut self,
tempo_qpm: u16,
note: &Note,
time_slice: &TimeSlice,
) -> Result<(), Error> {
if let Err(e) = async {
self.play(note).await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) * 19 / 20).await;
self.stop().await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) / 20).await;
Ok::<_, Error>(())
}
.await
{
// we could catch this error and propagate but I'd much prefer the original error bubble up
let _mute = self.stop().await;
Err(e)
} else {
Ok(())
}
}
#[instrument(skip(self))]
pub async fn stop(&mut self) -> Result<(), Error> {
tokio::fs::write(&*SWITCH_FILE, "0") tokio::fs::write(&*SWITCH_FILE, "0")
.await .await
.with_ctx(|_| (ErrorKind::SoundError, SWITCH_FILE.to_string_lossy())) .with_ctx(|_| (ErrorKind::SoundError, SWITCH_FILE.to_string_lossy()))
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn close(mut self) -> Result<(), Error> { pub async fn close(mut self) -> Result<(), Error> {
if let Some(lock) = self.0.take() { if let Some(lock) = self.guard.take() {
lock.unlock().await?; lock.unlock().await?;
} }
Ok(()) Ok(())
} }
#[instrument(skip(self))]
pub async fn play_for_time_slice(
&mut self,
tempo_qpm: u16,
note: &Note,
time_slice: &TimeSlice,
) -> Result<(), Error> {
if self.use_beep {
Command::new("beep")
.arg("-f")
.arg(note.frequency().to_string())
.arg("-l")
.arg(time_slice.to_duration(tempo_qpm).as_millis().to_string())
.invoke(ErrorKind::SoundError)
.await?;
} else {
if let Err(e) = async {
self.play_pwm(note).await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) * 19 / 20).await;
self.stop_pwm().await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) / 20).await;
Ok::<_, Error>(())
}
.await
{
// we could catch this error and propagate but I'd much prefer the original error bubble up
let _mute = self.stop_pwm().await;
return Err(e);
}
}
Ok(())
}
} }
pub struct Song<Notes> { pub struct Song<Notes> {
@@ -139,7 +167,7 @@ where
impl Drop for SoundInterface { impl Drop for SoundInterface {
fn drop(&mut self) { fn drop(&mut self) {
let guard = self.0.take(); let guard = self.guard.take();
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = tokio::fs::write(&*UNEXPORT_FILE, "0").await { if let Err(e) = tokio::fs::write(&*UNEXPORT_FILE, "0").await {
tracing::error!("Failed to Unexport Sound Interface: {}", e); tracing::error!("Failed to Unexport Sound Interface: {}", e);

View File

@@ -5,13 +5,14 @@ pub use models::HealthCheckId;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext;
use crate::id::ImageId; use crate::id::ImageId;
use crate::procedure::docker::DockerContainer;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName}; use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::serde::Duration; use crate::util::serde::Duration;
use crate::util::Version; use crate::util::Version;
use crate::volume::Volumes; use crate::volume::Volumes;
use crate::{context::RpcContext, procedure::docker::DockerContainer};
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[derive(Clone, Debug, Deserialize, Serialize)] #[derive(Clone, Debug, Deserialize, Serialize)]

View File

@@ -1,38 +1,31 @@
use std::future::Future;
use std::path::Path;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::{eyre, Result}; use color_eyre::eyre::{eyre, Result};
use digest::Digest;
use emver::Version; use emver::Version;
use futures::Stream;
use lazy_static::lazy_static; use lazy_static::lazy_static;
use patch_db::{DbHandle, LockType, Revision}; use patch_db::{DbHandle, LockType, Revision};
use regex::Regex;
use reqwest::Url; use reqwest::Url;
use rpc_toolkit::command; use rpc_toolkit::command;
use sha2::Sha256;
use tokio::io::AsyncWriteExt;
use tokio::pin;
use tokio::process::Command; use tokio::process::Command;
use tokio::time::Instant;
use tokio_stream::StreamExt; use tokio_stream::StreamExt;
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::UpdateProgress; use crate::db::model::UpdateProgress;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev; use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::{FileSystem, ReadWrite}; use crate::disk::mount::filesystem::httpdirfs::HttpDirFS;
use crate::disk::mount::guard::TmpMountGuard; use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::BOOT_RW_PATH; use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{MountGuard, TmpMountGuard};
use crate::notifications::NotificationLevel; use crate::notifications::NotificationLevel;
use crate::sound::{ use crate::sound::{
CIRCLE_OF_5THS_SHORT, UPDATE_FAILED_1, UPDATE_FAILED_2, UPDATE_FAILED_3, UPDATE_FAILED_4, CIRCLE_OF_5THS_SHORT, UPDATE_FAILED_1, UPDATE_FAILED_2, UPDATE_FAILED_3, UPDATE_FAILED_4,
}; };
use crate::update::latest_information::LatestInformation; use crate::update::latest_information::LatestInformation;
use crate::util::rsync::{Rsync, RsyncOptions};
use crate::util::Invoke; use crate::util::Invoke;
use crate::version::{Current, VersionT}; use crate::version::{Current, VersionT};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
@@ -84,48 +77,6 @@ fn display_update_result(status: UpdateResult, _: &ArgMatches) {
} }
} }
const HEADER_KEY: &str = "x-eos-hash";
#[derive(Debug, Clone, Copy)]
pub enum WritableDrives {
Green,
Blue,
}
impl WritableDrives {
pub fn label(&self) -> &'static str {
match self {
Self::Green => "green",
Self::Blue => "blue",
}
}
pub fn block_dev(&self) -> &'static Path {
Path::new(match self {
Self::Green => "/dev/mmcblk0p3",
Self::Blue => "/dev/mmcblk0p4",
})
}
pub fn part_uuid(&self) -> &'static str {
match self {
Self::Green => "cb15ae4d-03",
Self::Blue => "cb15ae4d-04",
}
}
pub fn as_fs(&self) -> impl FileSystem {
BlockDev::new(self.block_dev())
}
}
/// This will be where we are going to be putting the new update
#[derive(Debug, Clone, Copy)]
pub struct NewLabel(pub WritableDrives);
/// This is our current label where the os is running
pub struct CurrentLabel(pub WritableDrives);
lazy_static! {
static ref PARSE_COLOR: Regex = Regex::new("LABEL=(\\w+)[ \t]+/").unwrap();
}
#[instrument(skip(ctx))] #[instrument(skip(ctx))]
async fn maybe_do_update( async fn maybe_do_update(
ctx: RpcContext, ctx: RpcContext,
@@ -172,26 +123,41 @@ async fn maybe_do_update(
return Ok(None); return Ok(None);
} }
let (new_label, _current_label) = query_mounted_label().await?; // mount httpdirfs
let (size, download) = download_file( // losetup remote fs
ctx.db.handle(), // BEGIN TASK
&EosUrl { // rsync fs
base: marketplace_url, // validate (hash) fs
version: latest_version.clone(), // kernel update?
}, // swap selected fs
new_label, let new_block_dev = TmpMountGuard::mount(
&HttpDirFS::new(
EosUrl {
base: marketplace_url,
version: latest_version,
}
.to_string()
.parse()?,
),
ReadOnly,
) )
.await?; .await?;
let new_fs = TmpMountGuard::mount(
&BlockDev::new(new_block_dev.as_ref().join("eos.img")),
ReadOnly,
)
.await?;
status.update_progress = Some(UpdateProgress { status.update_progress = Some(UpdateProgress {
size, size: Some(100),
downloaded: 0, downloaded: 0,
}); });
status.save(&mut tx).await?; status.save(&mut tx).await?;
let rev = tx.commit().await?; let rev = tx.commit().await?;
tokio::spawn(async move { tokio::spawn(async move {
let res = do_update(ctx.clone(), new_fs, new_block_dev).await;
let mut db = ctx.db.handle(); let mut db = ctx.db.handle();
let res = do_update(download, new_label).await;
let mut status = crate::db::DatabaseModel::new() let mut status = crate::db::DatabaseModel::new()
.server_info() .server_info()
.status_info() .status_info()
@@ -245,49 +211,44 @@ async fn maybe_do_update(
Ok(rev) Ok(rev)
} }
#[instrument(skip(download))] #[instrument(skip(ctx, new_fs, new_block_dev))]
async fn do_update( async fn do_update(
download: impl Future<Output = Result<(), Error>>, ctx: RpcContext,
new_label: NewLabel, new_fs: TmpMountGuard,
new_block_dev: TmpMountGuard,
) -> Result<(), Error> { ) -> Result<(), Error> {
download.await?; let mut rsync = Rsync::new(
copy_machine_id(new_label).await?; new_fs.as_ref().join(""),
copy_ssh_host_keys(new_label).await?; "/media/embassy/next",
swap_boot_label(new_label).await?; Default::default(),
)?;
while let Some(progress) = rsync.progress.next().await {
crate::db::DatabaseModel::new()
.server_info()
.status_info()
.update_progress()
.put(
&mut ctx.db.handle(),
&UpdateProgress {
size: Some(100),
downloaded: (100.0 * progress) as u64,
},
)
.await?;
}
rsync.wait().await?;
new_fs.unmount().await?;
new_block_dev.unmount().await?;
copy_fstab().await?;
copy_machine_id().await?;
copy_ssh_host_keys().await?;
sync_boot().await?;
swap_boot_label().await?;
Ok(()) Ok(())
} }
#[instrument]
pub async fn query_mounted_label() -> Result<(NewLabel, CurrentLabel), Error> {
let output = tokio::fs::read_to_string("/etc/fstab")
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, "/etc/fstab"))?;
match &PARSE_COLOR.captures(&output).ok_or_else(|| {
Error::new(
eyre!("Can't find pattern in {}", output),
crate::ErrorKind::Filesystem,
)
})?[1]
{
x if x == WritableDrives::Green.label() => Ok((
NewLabel(WritableDrives::Blue),
CurrentLabel(WritableDrives::Green),
)),
x if x == WritableDrives::Blue.label() => Ok((
NewLabel(WritableDrives::Green),
CurrentLabel(WritableDrives::Blue),
)),
e => {
return Err(Error::new(
eyre!("Could not find a mounted resource for {}", e),
crate::ErrorKind::Filesystem,
))
}
}
}
#[derive(Debug)] #[derive(Debug)]
struct EosUrl { struct EosUrl {
base: Url, base: Url,
@@ -306,187 +267,84 @@ impl std::fmt::Display for EosUrl {
} }
} }
#[instrument(skip(db))] async fn copy_fstab() -> Result<(), Error> {
async fn download_file<'a, Db: DbHandle + 'a>( tokio::fs::copy("/etc/fstab", "/media/embassy/next/etc/fstab").await?;
mut db: Db,
eos_url: &EosUrl,
new_label: NewLabel,
) -> Result<(Option<u64>, impl Future<Output = Result<(), Error>> + 'a), Error> {
let download_request = reqwest::get(eos_url.to_string())
.await
.with_kind(ErrorKind::Network)?;
let size = download_request
.headers()
.get("content-length")
.and_then(|a| a.to_str().ok())
.map(|l| l.parse())
.transpose()?;
Ok((size, async move {
let hash_from_header: String = download_request
.headers()
.get(HEADER_KEY)
.ok_or_else(|| Error::new(eyre!("No {} in headers", HEADER_KEY), ErrorKind::Network))?
.to_str()
.with_kind(ErrorKind::InvalidRequest)?
.to_owned();
let stream_download = download_request.bytes_stream();
let file_sum = write_stream_to_label(&mut db, size, stream_download, new_label).await?;
check_download(&hash_from_header, file_sum).await?;
Ok(())
}))
}
#[instrument(skip(db, stream_download))]
async fn write_stream_to_label<Db: DbHandle>(
db: &mut Db,
size: Option<u64>,
stream_download: impl Stream<Item = Result<rpc_toolkit::hyper::body::Bytes, reqwest::Error>>,
file: NewLabel,
) -> Result<Vec<u8>, Error> {
let block_dev = file.0.block_dev();
let mut file = tokio::fs::OpenOptions::new()
.write(true)
.open(&block_dev)
.await
.with_kind(ErrorKind::Filesystem)?;
let mut hasher = Sha256::new();
pin!(stream_download);
let mut downloaded = 0;
let mut last_progress_update = Instant::now();
while let Some(item) = stream_download
.next()
.await
.transpose()
.with_kind(ErrorKind::Network)?
{
file.write_all(&item)
.await
.with_kind(ErrorKind::Filesystem)?;
downloaded += item.len() as u64;
if last_progress_update.elapsed() > Duration::from_secs(1) {
last_progress_update = Instant::now();
crate::db::DatabaseModel::new()
.server_info()
.status_info()
.update_progress()
.put(db, &UpdateProgress { size, downloaded })
.await?;
}
hasher.update(item);
}
file.flush().await.with_kind(ErrorKind::Filesystem)?;
file.shutdown().await.with_kind(ErrorKind::Filesystem)?;
file.sync_all().await.with_kind(ErrorKind::Filesystem)?;
drop(file);
Ok(hasher.finalize().to_vec())
}
#[instrument]
async fn check_download(hash_from_header: &str, file_digest: Vec<u8>) -> Result<(), Error> {
if hex::decode(hash_from_header).with_kind(ErrorKind::Network)? != file_digest {
return Err(Error::new(
eyre!("Hash sum does not match source"),
ErrorKind::Network,
));
}
Ok(()) Ok(())
} }
async fn copy_machine_id(new_label: NewLabel) -> Result<(), Error> { async fn copy_machine_id() -> Result<(), Error> {
let new_guard = TmpMountGuard::mount(&new_label.0.as_fs(), ReadWrite).await?; tokio::fs::copy("/etc/machine-id", "/media/embassy/next/etc/machine-id").await?;
tokio::fs::copy("/etc/machine-id", new_guard.as_ref().join("etc/machine-id")).await?;
new_guard.unmount().await?;
Ok(()) Ok(())
} }
async fn copy_ssh_host_keys(new_label: NewLabel) -> Result<(), Error> { async fn copy_ssh_host_keys() -> Result<(), Error> {
let new_guard = TmpMountGuard::mount(&new_label.0.as_fs(), ReadWrite).await?;
tokio::fs::copy( tokio::fs::copy(
"/etc/ssh/ssh_host_rsa_key", "/etc/ssh/ssh_host_rsa_key",
new_guard.as_ref().join("etc/ssh/ssh_host_rsa_key"), "/media/embassy/next/etc/ssh/ssh_host_rsa_key",
) )
.await?; .await?;
tokio::fs::copy( tokio::fs::copy(
"/etc/ssh/ssh_host_rsa_key.pub", "/etc/ssh/ssh_host_rsa_key.pub",
new_guard.as_ref().join("etc/ssh/ssh_host_rsa_key.pub"), "/media/embassy/next/etc/ssh/ssh_host_rsa_key.pub",
) )
.await?; .await?;
tokio::fs::copy( tokio::fs::copy(
"/etc/ssh/ssh_host_ecdsa_key", "/etc/ssh/ssh_host_ecdsa_key",
new_guard.as_ref().join("etc/ssh/ssh_host_ecdsa_key"), "/media/embassy/next/etc/ssh/ssh_host_ecdsa_key",
) )
.await?; .await?;
tokio::fs::copy( tokio::fs::copy(
"/etc/ssh/ssh_host_ecdsa_key.pub", "/etc/ssh/ssh_host_ecdsa_key.pub",
new_guard.as_ref().join("etc/ssh/ssh_host_ecdsa_key.pub"), "/media/embassy/next/etc/ssh/ssh_host_ecdsa_key.pub",
) )
.await?; .await?;
tokio::fs::copy( tokio::fs::copy(
"/etc/ssh/ssh_host_ed25519_key", "/etc/ssh/ssh_host_ed25519_key",
new_guard.as_ref().join("etc/ssh/ssh_host_ed25519_key"), "/media/embassy/next/etc/ssh/ssh_host_ed25519_key",
) )
.await?; .await?;
tokio::fs::copy( tokio::fs::copy(
"/etc/ssh/ssh_host_ed25519_key.pub", "/etc/ssh/ssh_host_ed25519_key.pub",
new_guard.as_ref().join("etc/ssh/ssh_host_ed25519_key.pub"), "/media/embassy/next/etc/ssh/ssh_host_ed25519_key.pub",
) )
.await?; .await?;
new_guard.unmount().await?; Ok(())
}
async fn sync_boot() -> Result<(), Error> {
Rsync::new(
"/media/embassy/next/boot/",
"/boot",
RsyncOptions {
delete: false,
force: false,
ignore_existing: true,
},
)?
.wait()
.await?;
let dev_mnt =
MountGuard::mount(&Bind::new("/dev"), "/media/embassy/next/dev", ReadWrite).await?;
let sys_mnt =
MountGuard::mount(&Bind::new("/sys"), "/media/embassy/next/sys", ReadWrite).await?;
let proc_mnt =
MountGuard::mount(&Bind::new("/proc"), "/media/embassy/next/proc", ReadWrite).await?;
let boot_mnt =
MountGuard::mount(&Bind::new("/boot"), "/media/embassy/next/boot", ReadWrite).await?;
Command::new("chroot")
.arg("/media/embassy/next")
.arg("update-grub")
.invoke(ErrorKind::MigrationFailed)
.await?;
boot_mnt.unmount().await?;
proc_mnt.unmount().await?;
sys_mnt.unmount().await?;
dev_mnt.unmount().await?;
Ok(()) Ok(())
} }
#[instrument] #[instrument]
async fn swap_boot_label(new_label: NewLabel) -> Result<(), Error> { async fn swap_boot_label() -> Result<(), Error> {
let block_dev = new_label.0.block_dev(); tokio::fs::write("/media/embassy/config/upgrade", b"").await?;
Command::new("e2label")
.arg(block_dev)
.arg(new_label.0.label())
.invoke(crate::ErrorKind::BlockDevice)
.await?;
let mounted = TmpMountGuard::mount(&new_label.0.as_fs(), ReadWrite).await?;
Command::new("sed")
.arg("-i")
.arg(&format!(
"s/LABEL=\\(blue\\|green\\)/LABEL={}/g",
new_label.0.label()
))
.arg(mounted.as_ref().join("etc/fstab"))
.invoke(crate::ErrorKind::Filesystem)
.await?;
mounted.unmount().await?;
Command::new("sed")
.arg("-i")
.arg(&format!(
"s/PARTUUID=cb15ae4d-\\(03\\|04\\)/PARTUUID={}/g",
new_label.0.part_uuid()
))
.arg(Path::new(BOOT_RW_PATH).join("cmdline.txt.orig"))
.invoke(crate::ErrorKind::Filesystem)
.await?;
Command::new("sed")
.arg("-i")
.arg(&format!(
"s/PARTUUID=cb15ae4d-\\(03\\|04\\)/PARTUUID={}/g",
new_label.0.part_uuid()
))
.arg(Path::new(BOOT_RW_PATH).join("cmdline.txt"))
.invoke(crate::ErrorKind::Filesystem)
.await?;
UPDATED.store(true, Ordering::SeqCst);
Ok(()) Ok(())
} }
/// Captured from doing an fstab with an embassy box and the cat from the /etc/fstab
#[test]
fn test_capture() {
let output = r#"
LABEL=blue / ext4 discard,errors=remount-ro 0 1
LABEL=system-boot /media/boot-rw vfat defaults 0 1
/media/boot-rw /boot none defaults,bind,ro 0 0
LABEL=EMBASSY /embassy-os vfat defaults 0 1
# a swapfile is not a swap partition, no line here
# use dphys-swapfile swap[on|off] for that
"#;
assert_eq!(&PARSE_COLOR.captures(&output).unwrap()[1], "blue");
}

View File

@@ -1,14 +1,13 @@
use std::cmp::min; use std::cmp::min;
use std::convert::TryFrom; use std::convert::TryFrom;
use std::fmt::Display; use std::fmt::Display;
use std::future::Future;
use std::io::Error as StdIOError; use std::io::Error as StdIOError;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use futures::future::BoxFuture; use futures::{FutureExt, Stream};
use futures::stream::BoxStream;
use futures::{FutureExt, StreamExt};
use http::header::{ACCEPT_RANGES, CONTENT_LENGTH, RANGE}; use http::header::{ACCEPT_RANGES, CONTENT_LENGTH, RANGE};
use hyper::body::Bytes; use hyper::body::Bytes;
use pin_project::pin_project; use pin_project::pin_project;
@@ -30,9 +29,27 @@ pub struct HttpReader {
enum ReadInProgress { enum ReadInProgress {
None, None,
InProgress( InProgress(
BoxFuture<'static, Result<BoxStream<'static, Result<Bytes, reqwest::Error>>, Error>>, Pin<
Box<
dyn Future<
Output = Result<
Pin<
Box<
dyn Stream<Item = Result<Bytes, reqwest::Error>>
+ Send
+ Sync
+ 'static,
>,
>,
Error,
>,
> + Send
+ Sync
+ 'static,
>,
>,
), ),
Complete(BoxStream<'static, Result<Bytes, reqwest::Error>>), Complete(Pin<Box<dyn Stream<Item = Result<Bytes, reqwest::Error>> + Send + Sync + 'static>>),
} }
impl ReadInProgress { impl ReadInProgress {
fn take(&mut self) -> Self { fn take(&mut self) -> Self {
@@ -62,6 +79,7 @@ impl Display for RangeUnit {
impl HttpReader { impl HttpReader {
pub async fn new(http_url: Url) -> Result<Self, Error> { pub async fn new(http_url: Url) -> Result<Self, Error> {
let http_client = Client::builder() let http_client = Client::builder()
// .proxy(reqwest::Proxy::all("socks5h://127.0.0.1:9050").unwrap())
.build() .build()
.with_kind(crate::ErrorKind::TLSInit)?; .with_kind(crate::ErrorKind::TLSInit)?;
@@ -141,11 +159,14 @@ impl HttpReader {
start: usize, start: usize,
len: usize, len: usize,
total_bytes: usize, total_bytes: usize,
) -> Result<BoxStream<'static, Result<Bytes, reqwest::Error>>, Error> { ) -> Result<
Pin<Box<dyn Stream<Item = Result<Bytes, reqwest::Error>> + Send + Sync + 'static>>,
Error,
> {
let end = min(start + len, total_bytes) - 1; let end = min(start + len, total_bytes) - 1;
if start > end { if start > end {
return Ok(futures::stream::empty().boxed()); return Ok(Box::pin(futures::stream::empty()));
} }
let data_range = format!("{}={}-{} ", range_unit.unwrap_or_default(), start, end); let data_range = format!("{}={}-{} ", range_unit.unwrap_or_default(), start, end);
@@ -159,7 +180,7 @@ impl HttpReader {
.error_for_status() .error_for_status()
.with_kind(crate::ErrorKind::Network)?; .with_kind(crate::ErrorKind::Network)?;
Ok(data_resp.bytes_stream().boxed()) Ok(Box::pin(data_resp.bytes_stream()))
} }
} }
@@ -170,7 +191,9 @@ impl AsyncRead for HttpReader {
buf: &mut tokio::io::ReadBuf<'_>, buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> { ) -> Poll<std::io::Result<()>> {
fn poll_complete( fn poll_complete(
body: &mut BoxStream<'static, Result<Bytes, reqwest::Error>>, body: &mut Pin<
Box<dyn Stream<Item = Result<Bytes, reqwest::Error>> + Send + Sync + 'static>,
>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>, buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<Option<std::io::Result<usize>>> { ) -> Poll<Option<std::io::Result<usize>>> {
@@ -220,15 +243,14 @@ impl AsyncRead for HttpReader {
continue; continue;
} }
}, },
ReadInProgress::None => HttpReader::get_range( ReadInProgress::None => Box::pin(HttpReader::get_range(
*this.range_unit, *this.range_unit,
this.http_client.clone(), this.http_client.clone(),
this.http_url.clone(), this.http_url.clone(),
*this.cursor_pos, *this.cursor_pos,
buf.remaining(), buf.remaining(),
*this.total_bytes, *this.total_bytes,
) )),
.boxed(),
ReadInProgress::InProgress(fut) => fut, ReadInProgress::InProgress(fut) => fut,
}; };
@@ -339,19 +361,20 @@ async fn main_test() {
} }
#[tokio::test] #[tokio::test]
#[ignore]
async fn s9pk_test() { async fn s9pk_test() {
use tokio::io::BufReader; use tokio::io::BufReader;
let http_url = Url::parse("https://github.com/Start9Labs/hello-world-wrapper/releases/download/v0.3.0/hello-world.s9pk").unwrap(); let http_url = Url::parse("http://qhc6ac47cytstejcepk2ia3ipadzjhlkc5qsktsbl4e7u2krfmfuaqqd.onion/content/files/2022/09/ghost.s9pk").unwrap();
println!("Getting this resource: {}", http_url); println!("Getting this resource: {}", http_url);
let test_reader = let test_reader =
BufReader::with_capacity(1024 * 1024, HttpReader::new(http_url).await.unwrap()); BufReader::with_capacity(1024 * 1024, HttpReader::new(http_url).await.unwrap());
let mut s9pk = crate::s9pk::reader::S9pkReader::from_reader(test_reader, true) let mut s9pk = crate::s9pk::reader::S9pkReader::from_reader(test_reader, false)
.await .await
.unwrap(); .unwrap();
let manifest = s9pk.manifest().await.unwrap(); let manifest = s9pk.manifest().await.unwrap();
assert_eq!(&**manifest.id, "hello-world"); assert_eq!(&**manifest.id, "ghost");
} }

View File

@@ -1,11 +1,19 @@
use std::future::Future;
use std::path::Path; use std::path::Path;
use std::task::Poll;
use futures::future::BoxFuture; use futures::future::{BoxFuture, Fuse};
use futures::{FutureExt, TryStreamExt}; use futures::{AsyncSeek, FutureExt, TryStreamExt};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, ReadBuf}; use helpers::NonDetachingJoinHandle;
use tokio::io::{
duplex, AsyncRead, AsyncReadExt, AsyncWrite, AsyncWriteExt, DuplexStream, ReadBuf, WriteHalf,
};
use crate::ResultExt; use crate::ResultExt;
pub trait AsyncReadSeek: AsyncRead + AsyncSeek {}
impl<T: AsyncRead + AsyncSeek> AsyncReadSeek for T {}
#[derive(Clone, Debug)] #[derive(Clone, Debug)]
pub struct AsyncCompat<T>(pub T); pub struct AsyncCompat<T>(pub T);
impl<T> futures::io::AsyncRead for AsyncCompat<T> impl<T> futures::io::AsyncRead for AsyncCompat<T>
@@ -246,3 +254,72 @@ pub fn response_to_reader(response: reqwest::Response) -> impl AsyncRead + Unpin
) )
})) }))
} }
#[pin_project::pin_project]
pub struct BufferedWriteReader {
#[pin]
hdl: Fuse<NonDetachingJoinHandle<Result<(), std::io::Error>>>,
#[pin]
rdr: DuplexStream,
}
impl BufferedWriteReader {
pub fn new<
W: FnOnce(WriteHalf<DuplexStream>) -> Fut,
Fut: Future<Output = Result<(), std::io::Error>> + Send + Sync + 'static,
>(
write_fn: W,
max_buf_size: usize,
) -> Self {
let (w, rdr) = duplex(max_buf_size);
let (_, w) = tokio::io::split(w);
BufferedWriteReader {
hdl: NonDetachingJoinHandle::from(tokio::spawn(write_fn(w))).fuse(),
rdr,
}
}
}
impl AsyncRead for BufferedWriteReader {
fn poll_read(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let this = self.project();
let res = this.rdr.poll_read(cx, buf);
match this.hdl.poll(cx) {
Poll::Ready(Ok(Err(e))) => return Poll::Ready(Err(e)),
Poll::Ready(Err(e)) => {
return Poll::Ready(Err(std::io::Error::new(std::io::ErrorKind::BrokenPipe, e)))
}
_ => res,
}
}
}
#[pin_project::pin_project]
pub struct ByteReplacementReader<R> {
pub replace: u8,
pub with: u8,
#[pin]
pub inner: R,
}
impl<R: AsyncRead> AsyncRead for ByteReplacementReader<R> {
fn poll_read(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
buf: &mut ReadBuf<'_>,
) -> std::task::Poll<std::io::Result<()>> {
let this = self.project();
match this.inner.poll_read(cx, buf) {
Poll::Ready(Ok(())) => {
for idx in 0..buf.filled().len() {
if buf.filled()[idx] == *this.replace {
buf.filled_mut()[idx] = *this.with;
}
}
Poll::Ready(Ok(()))
}
a => a,
}
}
}

View File

@@ -24,9 +24,10 @@ use tracing::instrument;
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::{Error, ErrorKind, ResultExt as _}; use crate::{Error, ErrorKind, ResultExt as _};
pub mod config; pub mod config;
pub mod io;
pub mod http_reader; pub mod http_reader;
pub mod io;
pub mod logger; pub mod logger;
pub mod rsync;
pub mod serde; pub mod serde;
#[derive(Clone, Copy, Debug)] #[derive(Clone, Copy, Debug)]

105
backend/src/util/rsync.rs Normal file
View File

@@ -0,0 +1,105 @@
use color_eyre::eyre::eyre;
use std::path::Path;
use helpers::NonDetachingJoinHandle;
use tokio::io::{AsyncBufReadExt, AsyncReadExt, BufReader};
use tokio::process::{Child, Command};
use tokio::sync::watch;
use tokio_stream::wrappers::WatchStream;
use crate::util::io::ByteReplacementReader;
use crate::{Error, ErrorKind};
pub struct RsyncOptions {
pub delete: bool,
pub force: bool,
pub ignore_existing: bool,
}
impl Default for RsyncOptions {
fn default() -> Self {
Self {
delete: true,
force: true,
ignore_existing: false,
}
}
}
pub struct Rsync {
pub command: Child,
_progress_task: NonDetachingJoinHandle<Result<(), Error>>,
stderr: NonDetachingJoinHandle<Result<String, Error>>,
pub progress: WatchStream<f64>,
}
impl Rsync {
pub fn new(
src: impl AsRef<Path>,
dst: impl AsRef<Path>,
options: RsyncOptions,
) -> Result<Self, Error> {
let mut cmd = Command::new("rsync");
if options.delete {
cmd.arg("--delete");
}
if options.force {
cmd.arg("--force");
}
if options.ignore_existing {
cmd.arg("--ignore-existing");
}
let mut command = cmd
.arg("-a")
.arg("--info=progress2")
.arg(src.as_ref())
.arg(dst.as_ref())
.kill_on_drop(true)
.stdout(std::process::Stdio::piped())
.spawn()?;
let cmd_stdout = command.stdout.take().unwrap();
let mut cmd_stderr = command.stderr.take().unwrap();
let (send, recv) = watch::channel(0.0);
let stderr = tokio::spawn(async move {
let mut res = String::new();
cmd_stderr.read_to_string(&mut res).await?;
Ok(res)
})
.into();
let progress_task = tokio::spawn(async move {
let mut lines = BufReader::new(ByteReplacementReader {
replace: b'\r',
with: b'\n',
inner: cmd_stdout,
})
.lines();
while let Some(line) = lines.next_line().await? {
if let Some(percentage) = line
.split_ascii_whitespace()
.find_map(|col| col.strip_suffix("%"))
{
send.send(percentage.parse::<f64>()? / 100.0).unwrap();
}
}
Ok(())
})
.into();
Ok(Rsync {
command,
_progress_task: progress_task,
stderr,
progress: WatchStream::new(recv),
})
}
pub async fn wait(mut self) -> Result<(), Error> {
let status = self.command.wait().await?;
let stderr = self.stderr.await.unwrap()?;
if status.success() {
tracing::info!("rsync: {}", stderr);
} else {
return Err(Error::new(
eyre!("rsync error: {}", stderr),
ErrorKind::Filesystem,
));
}
Ok(())
}
}

View File

@@ -5,7 +5,6 @@ use tokio::process::Command;
use super::*; use super::*;
use crate::disk::BOOT_RW_PATH; use crate::disk::BOOT_RW_PATH;
use crate::update::query_mounted_label;
use crate::util::Invoke; use crate::util::Invoke;
const V0_3_0_1: emver::Version = emver::Version::new(0, 3, 0, 1); const V0_3_0_1: emver::Version = emver::Version::new(0, 3, 0, 1);
@@ -25,16 +24,6 @@ impl VersionT for Version {
&*v0_3_0::V0_3_0_COMPAT &*v0_3_0::V0_3_0_COMPAT
} }
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
let (_, current) = query_mounted_label().await?;
Command::new("sed")
.arg("-i")
.arg(&format!(
"s/PARTUUID=cb15ae4d-\\(03\\|04\\)/PARTUUID={}/g",
current.0.part_uuid()
))
.arg(Path::new(BOOT_RW_PATH).join("cmdline.txt.orig"))
.invoke(crate::ErrorKind::Filesystem)
.await?;
Ok(()) Ok(())
} }
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> { async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {

View File

@@ -1,9 +1,8 @@
use emver::VersionRange; use emver::VersionRange;
use crate::hostname::{generate_id, sync_hostname};
use super::v0_3_0::V0_3_0_COMPAT; use super::v0_3_0::V0_3_0_COMPAT;
use super::*; use super::*;
use crate::hostname::{generate_id, sync_hostname};
const V0_3_2: emver::Version = emver::Version::new(0, 3, 2, 0); const V0_3_2: emver::Version = emver::Version::new(0, 3, 2, 0);

View File

@@ -16,7 +16,7 @@ use crate::util::Version;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
pub const PKG_VOLUME_DIR: &str = "package-data/volumes"; pub const PKG_VOLUME_DIR: &str = "package-data/volumes";
pub const BACKUP_DIR: &str = "/media/embassy-os/backups"; pub const BACKUP_DIR: &str = "/media/embassy/backups";
#[derive(Clone, Debug, Default, Deserialize, Serialize)] #[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Volumes(BTreeMap<VolumeId, Volume>); pub struct Volumes(BTreeMap<VolumeId, Volume>);

View File

@@ -1,4 +0,0 @@
LABEL=green / ext4 discard,errors=remount-ro 0 1
LABEL=system-boot /media/boot-rw vfat defaults 0 1
/media/boot-rw /boot none defaults,bind,ro 0 0
LABEL=EMBASSY /embassy-os vfat defaults 0 1

View File

@@ -1,161 +0,0 @@
#!/bin/bash
set -e
# introduce start9 username and embassy as default password
if ! awk -F: '{ print $1 }' /etc/passwd | grep start9
then
usermod -l start9 -d /home/start9 -m pi
groupmod --new-name start9 pi
echo start9:embassy | chpasswd
fi
passwd -l start9
START=$(date +%s)
while ! ping -q -w 1 -c 1 `ip r | grep default | cut -d ' ' -f 3` > /dev/null; do
>&2 echo "Waiting for internet connection..."
sleep 1
if [ "$[$START + 60]" -lt $(date +%s) ]; then
>&2 echo "Timed out waiting for internet connection..."
exit 1
fi
done
echo "Connected to network"
# change timezone
timedatectl set-timezone Etc/UTC
! test -f /etc/docker/daemon.json || rm /etc/docker/daemon.json
mount -o remount,rw /boot
apt-mark hold raspberrypi-bootloader
apt-mark hold raspberrypi-kernel
# Convert all repos to use https:// before apt update
sed -i "s/http:/https:/g" /etc/apt/sources.list /etc/apt/sources.list.d/*.list
apt-get update
apt-get install -y \
nginx \
libavahi-client3 \
avahi-daemon \
avahi-utils \
iotop \
bmon \
lvm2 \
cryptsetup \
exfat-utils \
sqlite3 \
network-manager \
wireless-tools \
net-tools \
ecryptfs-utils \
cifs-utils \
samba-common-bin \
vim \
jq \
ncdu \
postgresql \
pgloader \
dnsutils
# switch to systemd-resolved & network-manager
systemctl enable systemd-resolved
systemctl start systemd-resolved
apt-get remove --purge openresolv dhcpcd5 -y
echo "#" > /etc/network/interfaces
systemctl disable wpa_supplicant.service
ln -rsf /run/systemd/resolve/stub-resolv.conf /etc/resolv.conf
cat << EOF > /etc/NetworkManager/NetworkManager.conf
[main]
plugins=ifupdown,keyfile
dns=systemd-resolved
[ifupdown]
managed=true
EOF
sudo systemctl restart NetworkManager
nmcli device modify eth0 ipv4.ignore-auto-dns no
START=$(date +%s)
while ! ping -q -w 1 -c 1 start9.com > /dev/null; do
>&2 echo "Waiting for network to reinitialize..."
sleep 1
if [ "$[$START + 60]" -lt $(date +%s) ]; then
>&2 echo "Timed out waiting for network to reinitialize..."
exit 1
fi
done
echo "Network reinitialized"
# Setup repository from The Guardian Project and install latest stable Tor daemon
wget -qO- https://deb.torproject.org/torproject.org/A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89.asc | gpg --dearmor | tee /usr/share/keyrings/tor-archive-keyring.gpg >/dev/null
echo "deb [arch=arm64 signed-by=/usr/share/keyrings/tor-archive-keyring.gpg] https://deb.torproject.org/torproject.org bullseye main" > /etc/apt/sources.list.d/tor.list
apt-get update && apt-get install -y tor deb.torproject.org-keyring
curl -fsSL https://get.docker.com | sh # TODO: commit this script into git instead of live fetching it
systemctl disable postgresql.service
systemctl disable bluetooth.service
systemctl disable hciuart.service
systemctl disable triggerhappy.service
apt-get autoremove -y
apt-get upgrade -y
sed -i 's/Restart=on-failure/Restart=always/g' /lib/systemd/system/tor@default.service
sed -i 's/ExecStart=\/usr\/bin\/dockerd/ExecStart=\/usr\/bin\/dockerd --exec-opt native.cgroupdriver=systemd/g' /lib/systemd/system/docker.service
sed -i '/}/i \ \ \ \ application\/wasm \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ wasm;' /etc/nginx/mime.types
sed -i 's/# server_names_hash_bucket_size 64;/server_names_hash_bucket_size 128;/g' /etc/nginx/nginx.conf
sed -i 's/#allow-interfaces=eth0/allow-interfaces=eth0,wlan0/g' /etc/avahi/avahi-daemon.conf
sed -i '/\(^\|#\)entries-per-entry-group-max=/c\entries-per-entry-group-max=128' /etc/avahi/avahi-daemon.conf
echo '{ "cgroup-parent": "docker-engine.slice" }' > /etc/docker/daemon.json
mkdir -p /etc/nginx/ssl
# fix to suppress docker warning, fixed in 21.xx release of docker cli: https://github.com/docker/cli/pull/2934
mkdir -p /root/.docker
touch /root/.docker/config.json
mkdir -p /etc/embassy
systemctl enable embassyd.service embassy-init.service
cat << EOF > /etc/tor/torrc
SocksPort 0.0.0.0:9050
SocksPolicy accept 127.0.0.1
SocksPolicy accept 172.18.0.0/16
SocksPolicy reject *
ControlPort 9051
CookieAuthentication 1
EOF
if [ -f /embassy-os/product_key.txt ]
then
cat /embassy-os/product_key.txt | tr -d '\n' | sha256sum | head -c 32 | sed 's/$/\n/' > /etc/machine-id
else
head -c 16 /dev/urandom | xxd -p | sed 's/$/\n/' > /etc/machine-id
fi
systemctl stop tor
rm -rf /var/lib/tor/*
raspi-config nonint enable_overlayfs
# create a copy of the cmdline *without* the quirk string, so that it can be easily amended
sed -i 's/usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u //g' /boot/cmdline.txt
cp /boot/cmdline.txt /boot/cmdline.txt.orig
sed -i 's/^/usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u /g' /boot/cmdline.txt
# making that *sudo docker stats* command fulfil its purpose by displaying all metrics
sed -i 's/rootwait quiet.*/rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory quiet/g' /boot/cmdline.txt
systemctl disable nc-broadcast.service
systemctl disable initialization.service
echo "fs.inotify.max_user_watches=1048576" > /etc/sysctl.d/97-embassy.conf
sync
reboot

View File

@@ -1,44 +0,0 @@
# This file is part of systemd.
#
# systemd is free software; you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation; either version 2.1 of the License, or
# (at your option) any later version.
#
# Entries in this file show the compile time defaults.
# You can change settings by editing this file.
# Defaults can be restored by simply deleting this file.
#
# See journald.conf(5) for details.
[Journal]
Storage=persistent
Compress=yes
#Seal=yes
#SplitMode=uid
#SyncIntervalSec=5m
#RateLimitIntervalSec=30s
#RateLimitBurst=10000
SystemMaxUse=1G
#SystemKeepFree=
#SystemMaxFileSize=
#SystemMaxFiles=100
#RuntimeMaxUse=
#RuntimeKeepFree=
#RuntimeMaxFileSize=
#RuntimeMaxFiles=100
#MaxRetentionSec=
#MaxFileSec=1month
ForwardToSyslog=no
#ForwardToKMsg=no
#ForwardToConsole=no
#ForwardToWall=yes
#TTYPath=/dev/console
#MaxLevelStore=debug
#MaxLevelSyslog=debug
#MaxLevelKMsg=notice
#MaxLevelConsole=info
#MaxLevelWall=emerg
#LineMax=48K
#ReadKMsg=yes
#Audit=no

3
build/lib/conflicts Normal file
View File

@@ -0,0 +1,3 @@
openresolv
dhcpcd5
firewalld

31
build/lib/depends Normal file
View File

@@ -0,0 +1,31 @@
tor
nginx
avahi-daemon
avahi-utils
iotop
bmon
lvm2
cryptsetup
exfat-utils
sqlite3
wireless-tools
net-tools
ecryptfs-utils
cifs-utils
samba-common-bin
network-manager
vim
jq
ncdu
postgresql
pgloader
openssh-server
docker-ce
docker-ce-cli
containerd.io
docker-compose-plugin
beep
httpdirfs
iw
squashfs-tools
rsync

View File

@@ -0,0 +1,9 @@
#!/bin/sh
set -e
curl -fsSL https://deb.torproject.org/torproject.org/A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89.asc | gpg --dearmor -o- > /usr/share/keyrings/tor-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/tor-archive-keyring.gpg] https://deb.torproject.org/torproject.org bullseye main" > /etc/apt/sources.list.d/tor.list
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o- > /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bullseye stable" > /etc/apt/sources.list.d/docker.list

View File

@@ -0,0 +1,96 @@
# Local filesystem mounting -*- shell-script -*-
#
# This script overrides local_mount_root() in /scripts/local
# and mounts root as a read-only filesystem with a temporary (rw)
# overlay filesystem.
#
. /scripts/local
local_mount_root()
{
echo 'using embassy initramfs module'
local_top
local_device_setup "${ROOT}" "root file system"
ROOT="${DEV}"
# Get the root filesystem type if not set
if [ -z "${ROOTFSTYPE}" ]; then
FSTYPE=$(get_fstype "${ROOT}")
else
FSTYPE=${ROOTFSTYPE}
fi
local_premount
# CHANGES TO THE ORIGINAL FUNCTION BEGIN HERE
# N.B. this code still lacks error checking
modprobe ${FSTYPE}
checkfs ${ROOT} root "${FSTYPE}"
if [ "${FSTYPE}" != "unknown" ]; then
mount -t ${FSTYPE} ${ROOTFLAGS} ${ROOT} ${rootmnt}
else
mount ${ROOTFLAGS} ${ROOT} ${rootmnt}
fi
echo 'mounting embassyfs'
mkdir /embassyfs
mount --move ${rootmnt} /embassyfs
if ! [ -d /embassyfs/current ] && [ -d /embassyfs/prev ]; then
mv /embassyfs/prev /embassyfs/current
fi
if ! [ -d /embassyfs/current ]; then
mkdir /embassyfs/current
for FILE in $(ls /embassyfs); do
if [ "$FILE" != current ]; then
mv /embassyfs/$FILE /embassyfs/current/
fi
done
fi
mkdir -p /embassyfs/config
if [ -f /embassyfs/config/upgrade ] && [ -d /embassyfs/next ]; then
mv /embassyfs/current /embassyfs/prev
mv /embassyfs/next /embassyfs/current
rm /embassyfs/config/upgrade
fi
if ! [ -d /embassyfs/next ]; then
if [ -d /embassyfs/prev ]; then
mv /embassyfs/prev /embassyfs/next
else
mkdir /embassyfs/next
fi
fi
mkdir /lower /upper
mount -r --bind /embassyfs/current /lower
modprobe overlay || insmod "/lower/lib/modules/$(uname -r)/kernel/fs/overlayfs/overlay.ko"
# Mount a tmpfs for the overlay in /upper
mount -t tmpfs tmpfs /upper
mkdir /upper/data /upper/work
# Mount the final overlay-root in $rootmnt
mount -t overlay \
-olowerdir=/lower,upperdir=/upper/data,workdir=/upper/work \
overlay ${rootmnt}
mkdir -p ${rootmnt}/media/embassy/config
mount --bind /embassyfs/config ${rootmnt}/media/embassy/config
mkdir -p ${rootmnt}/media/embassy/next
mount --bind /embassyfs/next ${rootmnt}/media/embassy/next
mkdir -p ${rootmnt}/media/embassy/embassyfs
mount -r --bind /embassyfs ${rootmnt}/media/embassy/embassyfs
}

View File

@@ -0,0 +1,34 @@
#!/bin/sh
ARGS=
for ARG in $@; do
if [ "${ARG%%[!/]*}" = "/" ]; then
OPTIONS=
path="$ARG"
while true; do
if FSTYPE=$( findmnt -n -o FSTYPE "$path" ); then
if [ "$FSTYPE" = "overlay" ]; then
OPTIONS=$(findmnt -n -o OPTIONS "$path")
break
else
break
fi
fi
if [ "$path" = "/" ]; then break; fi
path=$(dirname "$path")
done
if LOWERDIR=$(echo "$OPTIONS" | grep -m 1 -oP 'lowerdir=\K[^,]+'); then
#echo "[DEBUG] Overlay filesystem detected ${ARG} --> ${LOWERDIR}${ARG%*/}" 1>&2
ARG=/media/embassy/embassyfs"${ARG%*/}"
fi
fi
ARGS="$ARGS $ARG"
done
grub-probe-default $ARGS
exit $?

122
build/lib/scripts/install Executable file
View File

@@ -0,0 +1,122 @@
#!/bin/bash
set -e
function partition_for () {
if [[ "$1" =~ [0-9]+$ ]]; then
echo "$1p$2"
else
echo "$1$2"
fi
}
OSDISK=$1
if [ -z "$OSDISK" ]; then
>&2 echo 'usage: embassy-install <TARGET DISK>'
exit 1
fi
WIFI_IFACE=
for IFACE in $(ls /sys/class/net); do
if [ -d /sys/class/net/$IFACE/wireless ]; then
WIFI_IFACE=$IFACE
break
fi
done
ETH_IFACE=
for IFACE in $(ls /sys/class/net); do
if ! [ -d /sys/class/net/$IFACE/wireless ] && [ -d /sys/class/net/$IFACE/device ]; then
ETH_IFACE=$IFACE
break
fi
done
if [ -z "$ETH_IFACE" ]; then
>&2 echo 'Could not detect ethernet interface'
exit 1
fi
(
echo o # GPT
echo n # New Partition
echo p # Primary
echo 1 # Index #1
echo # Default Starting Position
echo '+1G' # 1GB
echo t # Change Type
echo 0b # W95 FAT32
echo a # Set Bootable
echo n # New Partition
echo p # Primary
echo 2 # Index #2
echo # Default Starting Position
echo '+15G' # 15GB
echo n # New Partition
echo p # Primary
echo 3 # Index #3
echo # Default Starting Position
echo # Use Full Remaining
echo t # Change Type
echo 3 # (Still Index #3)
echo 8e # Linux LVM
echo w # Write Changes
) | fdisk $OSDISK
BOOTPART=`partition_for $OSDISK 1`
ROOTPART=`partition_for $OSDISK 2`
mkfs.vfat $BOOTPART
fatlabel $BOOTPART boot
mkfs.ext4 $ROOTPART
e2label $ROOTPART rootfs
mount $ROOTPART /mnt
mkdir /mnt/config
mkdir /mnt/current
mkdir /mnt/next
mkdir /mnt/current/boot
mount $BOOTPART /mnt/current/boot
unsquashfs -f -d /mnt/current /cdrom/casper/filesystem.squashfs
cat > /mnt/config/config.yaml << EOF
os-partitions:
boot: $BOOTPART
root: $ROOTPART
ethernet-interface: $ETH_IFACE
EOF
if [ -n "$WIFI_IFACE" ]; then
echo "wifi-interface: $WIFI_IFACE" >> /mnt/config/config.yaml
fi
# gen fstab
cat > /mnt/current/etc/fstab << EOF
$BOOTPART /boot vfat defaults 0 2
$ROOTPART / ext4 defaults 0 1
EOF
# gen machine-id
chroot /mnt/current systemd-machine-id-setup
# gen ssh host keys
ssh-keygen -t rsa /mnt/current/etc/ssh_host_rsa_key
ssh-keygen -t ecdsa /mnt/current/etc/ssh_host_ecdsa_key
ssh-keygen -t ed25519 /mnt/current/etc/ssh_host_ed25519_key
mount --bind /dev /mnt/current/dev
mount --bind /sys /mnt/current/sys
mount --bind /proc /mnt/current/proc
chroot /mnt/current update-grub
chroot /mnt/current grub-install $OSDISK
umount /mnt/current/dev
umount /mnt/current/sys
umount /mnt/current/proc
umount /mnt/current/boot
umount /mnt

93
build/lib/scripts/postinst Executable file
View File

@@ -0,0 +1,93 @@
#!/bin/sh
set -e
SYSTEMCTL=systemctl
if [ -n "$DPKG_MAINTSCRIPT_PACKAGE" ]; then
SYSTEMCTL=deb-systemd-helper
fi
if [ -f /usr/sbin/grub-probe ]; then
mv /usr/sbin/grub-probe /usr/sbin/grub-probe-default
ln -s /usr/lib/embassy/scripts/grub-probe-eos /usr/sbin/grub-probe
fi
cp /usr/lib/embassy/scripts/embassy-initramfs-module /etc/initramfs-tools/scripts/embassy
if ! grep overlay /etc/initramfs-tools/modules > /dev/null; then
echo overlay >> /etc/initramfs-tools/modules
fi
update-initramfs -u -k all
if [ -f /etc/default/grub ]; then
sed -i '/\(^\|#\)GRUB_CMDLINE_LINUX=/c\GRUB_CMDLINE_LINUX="boot=embassy"' /etc/default/grub
fi
# change timezone
rm -f /etc/localtime
ln -s /usr/share/zoneinfo/Etc/UTC /etc/localtime
# switch to systemd-resolved & network-manager
echo "#" > /etc/network/interfaces
ln -rsf /run/systemd/resolve/stub-resolv.conf /etc/resolv.conf
cat << EOF > /etc/NetworkManager/NetworkManager.conf
[main]
plugins=ifupdown,keyfile
dns=systemd-resolved
[ifupdown]
managed=true
EOF
$SYSTEMCTL enable systemd-resolved.service
$SYSTEMCTL disable wpa_supplicant.service
$SYSTEMCTL disable postgresql.service
$SYSTEMCTL disable bluetooth.service
$SYSTEMCTL disable hciuart.service
$SYSTEMCTL disable triggerhappy.service
$SYSTEMCTL mask sleep.target
$SYSTEMCTL mask suspend.target
$SYSTEMCTL mask hibernate.target
$SYSTEMCTL mask hybrid-sleep.target
if which gsettings > /dev/null; then
gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-ac-timeout '0'
gsettings set org.gnome.settings-daemon.plugins.power sleep-inactive-battery-timeout '0'
fi
sed -i 's/Restart=on-failure/Restart=always/g' /lib/systemd/system/tor@default.service
sed -i 's/ExecStart=\/usr\/bin\/dockerd/ExecStart=\/usr\/bin\/dockerd --exec-opt native.cgroupdriver=systemd/g' /lib/systemd/system/docker.service
sed -i '/}/i \ \ \ \ application\/wasm \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ wasm;' /etc/nginx/mime.types
sed -i 's/# server_names_hash_bucket_size 64;/server_names_hash_bucket_size 128;/g' /etc/nginx/nginx.conf
sed -i '/\(^\|#\)entries-per-entry-group-max=/c\entries-per-entry-group-max=128' /etc/avahi/avahi-daemon.conf
sed -i '/\(^\|#\)Storage=/c\Storage=persistent' /etc/systemd/journald.conf
sed -i '/\(^\|#\)Compress=/c\Compress=yes' /etc/systemd/journald.conf
sed -i '/\(^\|#\)SystemMaxUse=/c\SystemMaxUse=1G' /etc/systemd/journald.conf
sed -i '/\(^\|#\)ForwardToSyslog=/c\ForwardToSyslog=no' /etc/systemd/journald.conf
mkdir -p /etc/docker
ln -sf /usr/lib/embassy/docker-engine.slice /etc/systemd/system/docker-engine.slice
echo '{ "cgroup-parent": "docker-engine.slice" }' > /etc/docker/daemon.json
mkdir -p /etc/nginx/ssl
# fix to suppress docker warning, fixed in 21.xx release of docker cli: https://github.com/docker/cli/pull/2934
mkdir -p /root/.docker
touch /root/.docker/config.json
cat << EOF > /etc/tor/torrc
SocksPort 0.0.0.0:9050
SocksPolicy accept 127.0.0.1
SocksPolicy accept 172.18.0.0/16
SocksPolicy reject *
ControlPort 9051
CookieAuthentication 1
EOF
rm -rf /var/lib/tor/*
echo "fs.inotify.max_user_watches=1048576" > /etc/sysctl.d/97-embassy.conf
rm -f /etc/motd
ln -sf /usr/lib/embassy/motd /etc/update-motd.d/00-embassy
chmod -x /etc/update-motd.d/*
chmod +x /etc/update-motd.d/00-embassy

View File

@@ -1,42 +0,0 @@
#!/bin/bash
set -e
function partition_for () {
if [[ "$1" =~ [0-9]+$ ]]; then
echo "$1p$2"
else
echo "$1$2"
fi
}
SOURCE="${BASH_SOURCE[0]}"
while [ -h "$SOURCE" ]; do # resolve $SOURCE until the file is no longer a symlink
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
SOURCE="$(readlink "$SOURCE")"
[[ $SOURCE != /* ]] && SOURCE="$DIR/$SOURCE" # if $SOURCE was a relative symlink, we need to resolve it relative to the path where the symlink file was located
done
DIR="$( cd -P "$( dirname "$SOURCE" )" >/dev/null 2>&1 && pwd )"
cd "$DIR/.."
truncate --size=$[(31116287+1)*512] eos.img
if [ -z "$OUTPUT_DEVICE" ]; then
export OUTPUT_DEVICE=$(sudo losetup --show -fP eos.img)
export DETACH_OUTPUT_DEVICE=1
else
export DETACH_OUTPUT_DEVICE=0
sudo dd if=/dev/zero of=$OUTPUT_DEVICE bs=1M count=1
fi
export LOOPDEV=$(sudo losetup --show -fP raspios.img)
./build/partitioning.sh
./build/write-image.sh
sudo e2fsck -f -y `partition_for ${OUTPUT_DEVICE} 3`
sudo resize2fs -M `partition_for ${OUTPUT_DEVICE} 3`
BLOCK_INFO=$(sudo dumpe2fs `partition_for ${OUTPUT_DEVICE} 3`)
BLOCK_COUNT=$(echo "$BLOCK_INFO" | grep "Block count:" | sed 's/Block count:\s\+//g')
BLOCK_SIZE=$(echo "$BLOCK_INFO" | grep "Block size:" | sed 's/Block size:\s\+//g')
echo "YOUR GREEN FILESYSTEM is '$[$BLOCK_COUNT*$BLOCK_SIZE]' BYTES"
echo "IF YOU ARE QUICK-FLASHING FROM MAC-OS, NOTE THIS NUMBER FOR LATER"
if [ "$DETACH_OUTPUT_DEVICE" -eq "1" ]; then
sudo losetup -d $OUTPUT_DEVICE
fi

View File

@@ -1,6 +0,0 @@
#!/bin/bash
set -e
# Use fdisk to create DOS partition table with 4 primary partitions, set 1 as bootable, write, and quite
(echo o; echo x; echo i; echo "0xcb15ae4d"; echo r; echo n; echo p; echo 1; echo 2048; echo 526335; echo t; echo c; echo n; echo p; echo 2; echo 526336; echo 1050623; echo t; echo 2; echo c; echo n; echo p; echo 3; echo 1050624; echo 16083455; echo n; echo p; echo 16083456; echo 31116287; echo a; echo 1; echo w) | sudo fdisk ${OUTPUT_DEVICE} > /dev/null

View File

@@ -1,132 +0,0 @@
#!/bin/bash
set -e
function mktmpfifo () {
TMP_PATH=$(mktemp)
rm $TMP_PATH
mkfifo $TMP_PATH
echo $TMP_PATH
}
echo 'This script will only work on a card that has previously had a full image written to it.'
echo 'It will *only* flash the ext4 portion (`green` partition) of the img file onto the card.'
echo 'The product key, disk guid, and kernel data will *not* be affected.'
read -p "Continue? [y/N]" -n 1 -r
echo
if ! [[ "$REPLY" =~ ^[Yy]$ ]]; then
exit 1
fi
if ! which pv > /dev/null; then
>&2 echo 'This script would like to use `pv` to show a progress indicator, but it is not installed.'
if which apt-get > /dev/null; then
read -p "Install? [y/N]" -n 1 -r
echo
if [[ "$REPLY" =~ ^[Yy]$ ]]; then
sudo apt-get install pv
fi
elif which pacman > /dev/null; then
read -p "Install? [y/N]" -n 1 -r
echo
if [[ "$REPLY" =~ ^[Yy]$ ]]; then
sudo pacman -S pv
fi
elif which brew > /dev/null; then
read -p "Install? [y/N]" -n 1 -r
echo
if [[ "$REPLY" =~ ^[Yy]$ ]]; then
brew install pv
fi
else
>&2 echo 'This script does not recognize what package manager you have available on your system.'
>&2 echo 'Please go install the utility manually if you want progress reporting.'
fi
fi
if [[ "$(uname)" == "Darwin" ]]; then
export TARGET_PARTITION="/dev/disk$(diskutil list | grep EMBASSY | head -1 | rev | cut -b 3)s3"
if ! test -e $TARGET_PARTITION; then
>&2 echo '`green` partition not found'
exit 1
fi
export SOURCE_DEVICE="$(hdiutil attach -nomount eos.img | head -n1 | sed -E 's/([^ ]+).*$/\1/g')"
export SOURCE_PARTITION="${SOURCE_DEVICE}s3"
function detach () {
hdiutil detach $SOURCE_DEVICE
}
else
if ! test -e /dev/disk/by-label/green; then
>&2 echo '`green` partition not found'
exit 1
fi
export TARGET_PARTITION=$(readlink -f /dev/disk/by-label/green)
export SOURCE_DEVICE="$(sudo losetup --show -fP eos.img)"
export SOURCE_PARTITION="${SOURCE_DEVICE}p3"
function detach () {
sudo losetup -d ${SOURCE_DEVICE}
}
fi
if [[ "$TARGET_PARTITION" =~ ^/dev/loop ]]; then
>&2 echo 'You are currently flashing onto a loop device.'
>&2 echo 'This is probably a mistake, and usually means you failed to detach a .img file.'
read -p "Continue anyway? [y/N]" -n 1 -r
echo
if ! [[ "$REPLY" =~ ^[Yy]$ ]]; then
exit 1
fi
fi
if [[ "$(uname)" == "Darwin" ]]; then
if test -z "$FS_SIZE"; then
read -p "Enter FS Size (shown during make of eos.img)" -r
export FS_SIZE=$REPLY
fi
else
sudo e2fsck -f ${SOURCE_PARTITION}
sudo resize2fs -M ${SOURCE_PARTITION}
export BLOCK_INFO=$(sudo dumpe2fs ${SOURCE_PARTITION})
export BLOCK_COUNT=$(echo "$BLOCK_INFO" | grep "Block count:" | sed 's/Block count:\s\+//g')
export BLOCK_SIZE=$(echo "$BLOCK_INFO" | grep "Block size:" | sed 's/Block size:\s\+//g')
export FS_SIZE=$[$BLOCK_COUNT*$BLOCK_SIZE]
fi
echo "Flashing $FS_SIZE bytes to $TARGET_PARTITION"
if [[ "$(uname)" == "Darwin" ]]; then
if which pv > /dev/null; then
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | pv -s $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1m 2>/dev/null
else
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1m
fi
else
if which pv > /dev/null; then
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | pv -s $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1M iflag=fullblock oflag=direct conv=fsync 2>/dev/null
else
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | sudo dd of=${TARGET_PARTITION} bs=1M iflag=fullblock oflag=direct conv=fsync
fi
fi
echo Verifying...
export INPUT_HASH=$(mktemp)
export OUTPUT_HASH=$(mktemp)
if which pv > /dev/null; then
export PV_IN=$(mktmpfifo)
fi
sudo cat ${SOURCE_PARTITION} | head -c $FS_SIZE | tee -a $PV_IN | sha256sum > $INPUT_HASH &
export INPUT_CHILD=$!
sudo cat ${TARGET_PARTITION} | head -c $FS_SIZE | tee -a $PV_IN | sha256sum > $OUTPUT_HASH &
export OUTPUT_CHILD=$!
if which pv > /dev/null; then
pv -s $[$FS_SIZE*2] < $PV_IN > /dev/null &
fi
wait $INPUT_CHILD $OUTPUT_CHILD
if which pv > /dev/null; then
rm $PV_IN
fi
detach
if ! [[ "$(cat $INPUT_HASH)" == "$(cat $OUTPUT_HASH)" ]]; then
rm $INPUT_HASH $OUTPUT_HASH
>&2 echo Verification Failed
exit 1
fi
rm $INPUT_HASH $OUTPUT_HASH
echo "Verification Succeeded"

View File

@@ -0,0 +1,5 @@
os-partitions:
boot: /dev/mmcblk0p1
root: /dev/mmcblk0p2
ethernet-interface: eth0
wifi-interface: wlan0

View File

@@ -0,0 +1,60 @@
#!/bin/bash
set -e
# introduce start9 username and embassy as default password
if ! awk -F: '{ print $1 }' /etc/passwd | grep start9
then
usermod -l start9 -d /home/start9 -m pi
groupmod --new-name start9 pi
echo start9:embassy | chpasswd
fi
START=$(date +%s)
while ! ping -q -w 1 -c 1 `ip r | grep default | cut -d ' ' -f 3` > /dev/null; do
>&2 echo "Waiting for internet connection..."
sleep 1
if [ "$[$START + 60]" -lt $(date +%s) ]; then
>&2 echo "Timed out waiting for internet connection..."
exit 1
fi
done
echo "Connected to network"
# Convert all repos to use https:// before apt update
sed -i "s/http:/https:/g" /etc/apt/sources.list /etc/apt/sources.list.d/*.list
. /usr/lib/embassy/scripts/add-apt-sources
apt-get update
apt-get upgrade -y
apt-get install -y $(cat /usr/lib/embassy/depends)
apt-get remove --purge -y $(cat /usr/lib/embassy/conflicts) beep
apt-get autoremove -y
systemctl stop tor
. /usr/lib/embassy/scripts/postinst
systemctl enable embassyd.service embassy-init.service
sed -i 's/^/usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u /g' /boot/cmdline.txt
# making that *sudo docker stats* command fulfil its purpose by displaying all metrics
sed -i 's/rootwait quiet.*/rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory quiet/g' /boot/cmdline.txt
systemctl disable nc-broadcast.service
systemctl disable initialization.service
update-initramfs -c -k "$(uname -r)"
sed -i /boot/config.txt -e "/initramfs.*/d"
echo initramfs "initrd.img-$(uname -r)" >> /boot/config.txt
sed -i /boot/cmdline.txt -e "s/^/boot=embassy /"
passwd -l start9
sync
reboot

View File

@@ -0,0 +1,18 @@
#!/bin/bash
set -e
function partition_for () {
if [[ "$1" =~ [0-9]+$ ]]; then
echo "$1p$2"
else
echo "$1$2"
fi
}
cp raspios.img embassyos-raspi.img
export OUTPUT_DEVICE=$(sudo losetup --show -fP embassyos-raspi.img)
./build/raspberry-pi/write-image.sh
sudo e2fsck -f -y `partition_for ${OUTPUT_DEVICE} 2`
sudo resize2fs -M `partition_for ${OUTPUT_DEVICE} 2`
sudo losetup -d $OUTPUT_DEVICE

View File

@@ -0,0 +1,55 @@
#!/bin/bash
set -e
function partition_for () {
if [[ "$1" =~ [0-9]+$ ]]; then
echo "$1p$2"
else
echo "$1$2"
fi
}
# Mount the boot partition and config
mkdir -p /tmp/eos-mnt
sudo mount `partition_for ${OUTPUT_DEVICE} 1` /tmp/eos-mnt
cat /tmp/eos-mnt/config.txt | grep -v "dtoverlay=" | sudo tee /tmp/eos-mnt/config.txt.tmp > /dev/null
echo "dtoverlay=pwm-2chan,disable-bt" | sudo tee -a /tmp/eos-mnt/config.txt.tmp > /dev/null
echo "gpu_mem=16" | sudo tee -a /tmp/eos-mnt/config.txt.tmp > /dev/null
sudo mv /tmp/eos-mnt/config.txt.tmp /tmp/eos-mnt/config.txt
sudo touch /tmp/eos-mnt/ssh
sudo umount /tmp/eos-mnt
sudo mount `partition_for ${OUTPUT_DEVICE} 2` /tmp/eos-mnt
sudo mkdir /tmp/eos-mnt/media/embassy/
sudo make install ARCH=aarch64 DESTDIR=/tmp/eos-mnt
sudo sed -i 's/raspberrypi/embassy/g' /tmp/eos-mnt/etc/hostname
sudo sed -i 's/raspberrypi/embassy/g' /tmp/eos-mnt/etc/hosts
sudo cp cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast /tmp/eos-mnt/usr/local/bin
sudo cp backend/*.service /tmp/eos-mnt/etc/systemd/system/
sudo mkdir -p /tmp/eos-mnt/etc/embassy
sudo cp build/raspberry-pi/config.yaml /tmp/eos-mnt/etc/embassy/config.yaml
# Make the .ssh directory for UID 1000 user
sudo mkdir -p /tmp/eos-mnt/home/$(awk -v val=1000 -F ":" '$3==val{print $1}' /tmp/eos-mnt/etc/passwd)/.ssh
sudo mv /tmp/eos-mnt/etc/sudoers.d/010_pi-nopasswd /tmp/eos-mnt/etc/sudoers.d/010_start9-nopasswd
sudo sed -i 's/pi/start9/g' /tmp/eos-mnt/etc/sudoers.d/010_start9-nopasswd
sudo sed -i 's/ pi / start9 /g' /tmp/eos-mnt/etc/systemd/system/autologin@.service
if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then
cat ./build/raspberry-pi/initialization.sh | grep -v "passwd -l start9" | sudo tee /tmp/eos-mnt/usr/local/bin/initialization.sh > /dev/null
sudo chmod +x /tmp/eos-mnt/usr/local/bin/initialization.sh
else
sudo cp ./build/raspberry-pi/initialization.sh /tmp/eos-mnt/usr/local/bin
fi
sudo cp ./build/raspberry-pi/init-with-sound.sh /tmp/eos-mnt/usr/local/bin
sudo cp ./build/raspberry-pi/initialization.service /tmp/eos-mnt/etc/systemd/system/initialization.service
sudo ln -s /etc/systemd/system/initialization.service /tmp/eos-mnt/etc/systemd/system/multi-user.target.wants/initialization.service
sudo cp ./build/raspberry-pi/nc-broadcast.service /tmp/eos-mnt/etc/systemd/system/nc-broadcast.service
sudo ln -s /etc/systemd/system/nc-broadcast.service /tmp/eos-mnt/etc/systemd/system/multi-user.target.wants/nc-broadcast.service
sudo umount /tmp/eos-mnt

View File

@@ -1,105 +0,0 @@
#!/bin/bash
set -e
function partition_for () {
if [[ "$1" =~ [0-9]+$ ]]; then
echo "$1p$2"
else
echo "$1$2"
fi
}
# Write contents of LOOPDEV (Ubuntu image) to sd card and make filesystems, then detach the loop device
echo USING $LOOPDEV TO IMAGE $OUTPUT_DEVICE WITH ENVIRONMENT $ENVIRONMENT
sudo dd if=${LOOPDEV}p1 of=`partition_for ${OUTPUT_DEVICE} 1` bs=1M iflag=fullblock oflag=direct conv=fsync status=progress
sudo mkfs.vfat -F 32 `partition_for ${OUTPUT_DEVICE} 2`
sudo dd if=${LOOPDEV}p2 of=`partition_for ${OUTPUT_DEVICE} 3` bs=1M iflag=fullblock oflag=direct conv=fsync status=progress
sudo mkfs.ext4 `partition_for ${OUTPUT_DEVICE} 4`
sudo losetup -d $LOOPDEV
# Label the filesystems
sudo fatlabel `partition_for ${OUTPUT_DEVICE} 1` system-boot
sudo fatlabel `partition_for ${OUTPUT_DEVICE} 2` EMBASSY
sudo e2label `partition_for ${OUTPUT_DEVICE} 3` green
sudo e2label `partition_for ${OUTPUT_DEVICE} 4` blue
# Mount the boot partition and config
mkdir -p /tmp/eos-mnt
sudo mount `partition_for ${OUTPUT_DEVICE} 1` /tmp/eos-mnt
sudo sed -i 's/PARTUUID=cb15ae4d-02/PARTUUID=cb15ae4d-03/g' /tmp/eos-mnt/cmdline.txt
sudo sed -i 's/ init=\/usr\/lib\/raspi-config\/init_resize.sh//g' /tmp/eos-mnt/cmdline.txt
cat /tmp/eos-mnt/config.txt | grep -v "dtoverlay=" | sudo tee /tmp/eos-mnt/config.txt.tmp > /dev/null
echo "dtoverlay=pwm-2chan,disable-bt" | sudo tee -a /tmp/eos-mnt/config.txt.tmp > /dev/null
echo "gpu_mem=16" | sudo tee -a /tmp/eos-mnt/config.txt.tmp > /dev/null
sudo mv /tmp/eos-mnt/config.txt.tmp /tmp/eos-mnt/config.txt
sudo touch /tmp/eos-mnt/ssh
sudo umount /tmp/eos-mnt
sudo mount `partition_for ${OUTPUT_DEVICE} 3` /tmp/eos-mnt
sudo mkdir /tmp/eos-mnt/media/boot-rw
sudo mkdir /tmp/eos-mnt/embassy-os
sudo mkdir /tmp/eos-mnt/etc/embassy
sudo cp ENVIRONMENT.txt /tmp/eos-mnt/etc/embassy
sudo cp GIT_HASH.txt /tmp/eos-mnt/etc/embassy
sudo cp build/fstab /tmp/eos-mnt/etc/fstab
sudo cp build/journald.conf /tmp/eos-mnt/etc/systemd/journald.conf
sudo sed -i 's/raspberrypi/embassy/g' /tmp/eos-mnt/etc/hostname
sudo sed -i 's/raspberrypi/embassy/g' /tmp/eos-mnt/etc/hosts
# copy over cargo dependencies
sudo cp cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast /tmp/eos-mnt/usr/local/bin
# Enter the backend directory, copy over the built embassyOS binaries and systemd services, edit the nginx config, then create the .ssh directory
cd backend/
sudo cp target/aarch64-unknown-linux-gnu/release/embassy-init /tmp/eos-mnt/usr/local/bin
sudo cp target/aarch64-unknown-linux-gnu/release/embassyd /tmp/eos-mnt/usr/local/bin
sudo cp target/aarch64-unknown-linux-gnu/release/embassy-cli /tmp/eos-mnt/usr/local/bin
sudo cp target/aarch64-unknown-linux-gnu/release/avahi-alias /tmp/eos-mnt/usr/local/bin
sudo cp *.service /tmp/eos-mnt/etc/systemd/system/
cd ..
# Copy system images
sudo mkdir -p /tmp/eos-mnt/var/lib/embassy/system-images
sudo cp system-images/**/*.tar /tmp/eos-mnt/var/lib/embassy/system-images
# after performing npm run build
sudo mkdir -p /tmp/eos-mnt/var/www/html
sudo cp -R frontend/dist/diagnostic-ui /tmp/eos-mnt/var/www/html/diagnostic
sudo cp -R frontend/dist/setup-wizard /tmp/eos-mnt/var/www/html/setup
sudo cp -R frontend/dist/ui /tmp/eos-mnt/var/www/html/main
sudo cp index.html /tmp/eos-mnt/var/www/html/index.html
# Make the .ssh directory for UID 1000 user
sudo mkdir -p /tmp/eos-mnt/home/$(awk -v val=1000 -F ":" '$3==val{print $1}' /tmp/eos-mnt/etc/passwd)/.ssh
sudo mv /tmp/eos-mnt/etc/sudoers.d/010_pi-nopasswd /tmp/eos-mnt/etc/sudoers.d/010_start9-nopasswd
sudo sed -i 's/pi/start9/g' /tmp/eos-mnt/etc/sudoers.d/010_start9-nopasswd
sudo sed -i 's/ pi / start9 /g' /tmp/eos-mnt/etc/systemd/system/autologin@.service
# Custom MOTD
sudo rm /tmp/eos-mnt/etc/motd
sudo cp ./build/00-embassy /tmp/eos-mnt/etc/update-motd.d
sudo chmod -x /tmp/eos-mnt/etc/update-motd.d/*
sudo chmod +x /tmp/eos-mnt/etc/update-motd.d/00-embassy
if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then
cat ./build/initialization.sh | grep -v "passwd -l start9" | sudo tee /tmp/eos-mnt/usr/local/bin/initialization.sh > /dev/null
sudo chmod +x /tmp/eos-mnt/usr/local/bin/initialization.sh
else
sudo cp ./build/initialization.sh /tmp/eos-mnt/usr/local/bin
fi
sudo cp ./build/init-with-sound.sh /tmp/eos-mnt/usr/local/bin
sudo cp ./build/initialization.service /tmp/eos-mnt/etc/systemd/system/initialization.service
sudo ln -s /etc/systemd/system/initialization.service /tmp/eos-mnt/etc/systemd/system/multi-user.target.wants/initialization.service
sudo cp ./build/nc-broadcast.service /tmp/eos-mnt/etc/systemd/system/nc-broadcast.service
sudo ln -s /etc/systemd/system/nc-broadcast.service /tmp/eos-mnt/etc/systemd/system/multi-user.target.wants/nc-broadcast.service
sudo umount /tmp/eos-mnt

View File

@@ -1,7 +1,7 @@
{ {
"name": null, "name": null,
"auto-check-updates": true, "auto-check-updates": true,
"ack-welcome": "0.3.3", "ack-welcome": "0.3.2.1",
"marketplace": { "marketplace": {
"selected-url": "https://registry.start9.com/", "selected-url": "https://registry.start9.com/",
"known-hosts": { "known-hosts": {

View File

@@ -20,7 +20,9 @@ cd -
echo "Creating Arm v8 Snapshot" echo "Creating Arm v8 Snapshot"
docker run --platform linux/arm64/v8 --mount type=bind,src=$(pwd),dst=/mnt arm64v8/ubuntu:20.04 /bin/sh -c "cd /mnt && /mnt/target/aarch64-unknown-linux-gnu/release/snapshot-creator" docker run --platform linux/arm64/v8 --mount type=bind,src=$(pwd),dst=/mnt arm64v8/ubuntu:20.04 /bin/sh -c "cd /mnt && /mnt/target/aarch64-unknown-linux-gnu/release/snapshot-creator"
sudo chown ${whoami}:${whoami} JS_SNAPSHOT.bin sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
sudo chown $USER JS_SNAPSHOT.bin
sudo chmod 0644 JS_SNAPSHOT.bin sudo chmod 0644 JS_SNAPSHOT.bin
sudo mv -f JS_SNAPSHOT.bin ./js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin sudo mv -f JS_SNAPSHOT.bin ./js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin

View File

@@ -10,7 +10,9 @@ fi
echo "Creating v8 Snapshot" echo "Creating v8 Snapshot"
cargo run -p snapshot-creator --release cargo run -p snapshot-creator --release
sudo chown ${whoami}:${whoami} JS_SNAPSHOT.bin sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
sudo chown $USER JS_SNAPSHOT.bin
sudo chmod 0644 JS_SNAPSHOT.bin sudo chmod 0644 JS_SNAPSHOT.bin
sudo mv -f JS_SNAPSHOT.bin ./js_engine/src/artifacts/JS_SNAPSHOT.bin sudo mv -f JS_SNAPSHOT.bin ./js_engine/src/artifacts/JS_SNAPSHOT.bin

View File

@@ -808,6 +808,15 @@ mod fns {
let parent = tokio::fs::canonicalize(parent).await?; let parent = tokio::fs::canonicalize(parent).await?;
Ok(child.starts_with(parent)) Ok(child.starts_with(parent))
} }
#[tokio::test]
async fn test_is_subset() {
assert!(
!is_subset("/home/drbonez", "/home/drbonez/code/fakedir/../../..")
.await
.unwrap()
)
}
} }
fn system_time_as_unix_ms(system_time: &SystemTime) -> Option<u64> { fn system_time_as_unix_ms(system_time: &SystemTime) -> Option<u64> {

View File

@@ -1 +1 @@
binfmt.tar /docker-images

View File

@@ -1,6 +1,15 @@
.DELETE_ON_ERROR: .DELETE_ON_ERROR:
all: binfmt.tar all: docker-images/aarch64.tar docker-images/x86_64.tar
binfmt.tar: Dockerfile clean:
DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/binfmt --platform=linux/arm64 -o type=docker,dest=binfmt.tar . rm -rf docker-images
docker-images:
mkdir docker-images
docker-images/aarch64.tar: Dockerfile docker-images
docker buildx build --tag start9/x_system/binfmt --platform=linux/arm64 -o type=docker,dest=docker-images/aarch64.tar .
docker-images/x86_64.tar: Dockerfile docker-images
docker buildx build --tag start9/x_system/binfmt --platform=linux/amd64 -o type=docker,dest=docker-images/x86_64.tar .

View File

@@ -0,0 +1 @@
[{"Config":"3f7fd8db05afd7a5fa804ebfb0e40038068af52b45d8f9a04c1c9fa8d316e511.json","RepoTags":["multitest/binfmt:latest"],"Layers":["753827173463aa2b7471a5be8ac2323a1cf78ccedfdfcc7bb21831d9e0141732/layer.tar","e0747212aee318099c729893bb4cca2ff164f6bd154841d5777636f82b7f18d4/layer.tar"]},{"Config":"143329dfbcc6abb472070a2f0bae5cf9865f178e5391a15592e75b349c803c69.json","RepoTags":["multitest/binfmt:latest"],"Layers":["de6e72d83667be34f782cf1f0a376e0507d189872142e1934f14d3be4813715a/layer.tar","12670876b666af35f388c3d2160436257b390a163a4c7178863badd05394577a/layer.tar"]}]

View File

@@ -2,4 +2,4 @@
**/*.rs.bk **/*.rs.bk
.DS_Store .DS_Store
.vscode .vscode
compat.tar /docker-images

View File

@@ -2,10 +2,17 @@
.DELETE_ON_ERROR: .DELETE_ON_ERROR:
all: compat.tar all: docker-images/aarch64.tar
compat.tar: Dockerfile target/aarch64-unknown-linux-musl/release/compat clean:
DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/compat --platform=linux/arm64 -o type=docker,dest=compat.tar . cargo clean
rm -rf docker-images
docker-images:
mkdir docker-images
docker-images/aarch64.tar: Dockerfile target/aarch64-unknown-linux-musl/release/compat docker-images
docker buildx build --tag start9/x_system/compat --platform=linux/arm64 -o type=docker,dest=docker-images/aarch64.tar .
target/aarch64-unknown-linux-musl/release/compat: $(COMPAT_SRC) target/aarch64-unknown-linux-musl/release/compat: $(COMPAT_SRC)
./build.sh ./build.sh

View File

@@ -15,6 +15,9 @@ fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:aarch64-musl' alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:aarch64-musl'
cd ../../.. cd ../..
rust-musl-builder sh -c "(git config --global --add safe.directory '*'; cd embassy-os/system-images/compat && cargo +beta build --release --target=aarch64-unknown-linux-musl --no-default-features)" rust-musl-builder sh -c "(git config --global --add safe.directory '*'; cd system-images/compat && cargo +beta build --release --target=aarch64-unknown-linux-musl --no-default-features)"
cd embassy-os/system-images/compat cd system-images/compat
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo

View File

@@ -1 +1 @@
utils.tar /docker-images

View File

@@ -1,6 +1,15 @@
.DELETE_ON_ERROR: .DELETE_ON_ERROR:
all: utils.tar all: docker-images/aarch64.tar docker-images/x86_64.tar
utils.tar: Dockerfile clean:
DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/utils --platform=linux/arm64 -o type=docker,dest=utils.tar . rm -rf docker-images
docker-images:
mkdir docker-images
docker-images/aarch64.tar: Dockerfile docker-images
docker buildx build --tag start9/x_system/utils --platform=linux/arm64 -o type=docker,dest=docker-images/aarch64.tar .
docker-images/x86_64.tar: Dockerfile docker-images
docker buildx build --tag start9/x_system/utils --platform=linux/amd64 -o type=docker,dest=docker-images/x86_64.tar .