Compare commits

..

1 Commits

Author SHA1 Message Date
Aiden McClelland
9f7856883d secure erase data drive 2023-03-14 15:02:49 -06:00
314 changed files with 6227 additions and 8997 deletions

View File

@@ -1,6 +1,6 @@
name: 🐛 Bug Report name: 🐛 Bug Report
description: Create a report to help us improve StartOS description: Create a report to help us improve embassyOS
title: "[bug]: " title: '[bug]: '
labels: [Bug, Needs Triage] labels: [Bug, Needs Triage]
assignees: assignees:
- MattDHill - MattDHill
@@ -10,25 +10,27 @@ body:
label: Prerequisites label: Prerequisites
description: Please confirm you have completed the following. description: Please confirm you have completed the following.
options: options:
- label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already report this problem. - label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already report this problem.
required: true required: true
- type: input - type: input
attributes: attributes:
label: Server Hardware label: embassyOS Version
description: On what hardware are you running StartOS? Please be as detailed as possible! description: What version of embassyOS are you running?
placeholder: Pi (8GB) w/ 32GB microSD & Samsung T7 SSD placeholder: e.g. 0.3.0
validations:
required: true
- type: input
attributes:
label: StartOS Version
description: What version of StartOS are you running?
placeholder: e.g. 0.3.4.3
validations: validations:
required: true required: true
- type: dropdown - type: dropdown
attributes: attributes:
label: Client OS label: Device
description: What device are you using to connect to Embassy?
options:
- Phone/tablet
- Laptop/Desktop
validations:
required: true
- type: dropdown
attributes:
label: Device OS
description: What operating system is your device running? description: What operating system is your device running?
options: options:
- MacOS - MacOS
@@ -43,14 +45,14 @@ body:
required: true required: true
- type: input - type: input
attributes: attributes:
label: Client OS Version label: Device OS Version
description: What version is your device OS? description: What version is your device OS?
validations: validations:
required: true required: true
- type: dropdown - type: dropdown
attributes: attributes:
label: Browser label: Browser
description: What browser are you using to connect to your server? description: What browser are you using to connect to Embassy?
options: options:
- Firefox - Firefox
- Brave - Brave

View File

@@ -1,6 +1,6 @@
name: 💡 Feature Request name: 💡 Feature Request
description: Suggest an idea for StartOS description: Suggest an idea for embassyOS
title: "[feat]: " title: '[feat]: '
labels: [Enhancement] labels: [Enhancement]
assignees: assignees:
- MattDHill - MattDHill
@@ -10,7 +10,7 @@ body:
label: Prerequisites label: Prerequisites
description: Please confirm you have completed the following. description: Please confirm you have completed the following.
options: options:
- label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already suggest this feature. - label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already suggest this feature.
required: true required: true
- type: textarea - type: textarea
attributes: attributes:
@@ -27,7 +27,7 @@ body:
- type: textarea - type: textarea
attributes: attributes:
label: Describe Preferred Solution label: Describe Preferred Solution
description: How you want this feature added to StartOS? description: How you want this feature added to embassyOS?
- type: textarea - type: textarea
attributes: attributes:
label: Describe Alternatives label: Describe Alternatives

29
.github/workflows/README.md vendored Normal file
View File

@@ -0,0 +1,29 @@
# This folder contains GitHub Actions workflows for building the project
## backend
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
This workflow uses the actions and docker/setup-buildx-action@v1 to prepare the environment for aarch64 cross complilation using docker buildx.
When execution of aarch64 containers is required the action docker/setup-qemu-action@v1 is added.
A matrix-strategy has been used to build for both x86_64 and aarch64 platforms in parallel.
### Running unittests
Unittests are run using [cargo-nextest]( https://nexte.st/). First the sources are (cross-)compiled and archived. The archive is then run on the correct platform.
## frontend
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
This workflow builds the frontends.
## product
Runs: when a pull request targets the master or next branch and when a change to the master or next branch is made
This workflow builds everything, re-using the backend and frontend workflows.
The download and extraction order of artifacts is relevant to `make`, as it checks the file timestamps to decide which targets need to be executed.
Result: eos.img
## a note on uploading artifacts
Artifacts are used to share data between jobs. File permissions are not maintained during artifact upload. Where file permissions are relevant, the workaround using tar has been used. See (here)[https://github.com/actions/upload-artifact#maintaining-file-permissions-and-case-sensitive-files].

233
.github/workflows/backend.yaml vendored Normal file
View File

@@ -0,0 +1,233 @@
name: Backend
on:
workflow_call:
workflow_dispatch:
env:
RUST_VERSION: "1.67.1"
ENVIRONMENT: "dev"
jobs:
build_libs:
name: Build libs
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
snapshot_command: ./build-v8-snapshot.sh
artifact_name: js_snapshot
artifact_path: libs/js_engine/src/artifacts/JS_SNAPSHOT.bin
- target: aarch64
snapshot_command: ./build-arm-v8-snapshot.sh
artifact_name: arm_js_snapshot
artifact_path: libs/js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: ${{ matrix.target == 'aarch64' }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
if: ${{ matrix.target == 'aarch64' }}
- name: "Install Rust"
run: |
rustup toolchain install ${{ env.RUST_VERSION }} --profile minimal --no-self-update
rustup default ${{ inputs.rust }}
shell: bash
if: ${{ matrix.target == 'x86_64' }}
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
libs/target/
key: ${{ runner.os }}-cargo-libs-${{ matrix.target }}-${{ hashFiles('libs/Cargo.lock') }}
- name: Build v8 snapshot
run: ${{ matrix.snapshot_command }}
working-directory: libs
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.artifact_name }}
path: ${{ matrix.artifact_path }}
build_backend:
name: Build backend
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
snapshot_download: js_snapshot
- target: aarch64
snapshot_download: arm_js_snapshot
runs-on: ubuntu-latest
timeout-minutes: 120
needs: build_libs
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Download ${{ matrix.snapshot_download }} artifact
uses: actions/download-artifact@v3
with:
name: ${{ matrix.snapshot_download }}
path: libs/js_engine/src/artifacts/
- name: "Install Rust"
run: |
rustup toolchain install ${{ env.RUST_VERSION }} --profile minimal --no-self-update
rustup default ${{ inputs.rust }}
shell: bash
if: ${{ matrix.target == 'x86_64' }}
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
backend/target/
key: ${{ runner.os }}-cargo-backend-${{ matrix.target }}-${{ hashFiles('backend/Cargo.lock') }}
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install libavahi-client-dev
if: ${{ matrix.target == 'x86_64' }}
- name: Check Git Hash
run: ./check-git-hash.sh
- name: Check Environment
run: ./check-environment.sh
- name: Build backend
run: make ARCH=${{ matrix.target }} backend
- name: 'Tar files to preserve file permissions'
run: make ARCH=${{ matrix.target }} backend-${{ matrix.target }}.tar
- uses: actions/upload-artifact@v3
with:
name: backend-${{ matrix.target }}
path: backend-${{ matrix.target }}.tar
- name: Install nextest
uses: taiki-e/install-action@nextest
- name: Build and archive tests
run: cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu
working-directory: backend
if: ${{ matrix.target == 'x86_64' }}
- name: Build and archive tests
run: |
docker run --rm \
-v "$HOME/.cargo/registry":/root/.cargo/registry \
-v "$(pwd)":/home/rust/src \
-P start9/rust-arm-cross:aarch64 \
sh -c 'cd /home/rust/src/backend &&
rustup install ${{ env.RUST_VERSION }} &&
rustup override set ${{ env.RUST_VERSION }} &&
rustup target add aarch64-unknown-linux-gnu &&
curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin &&
cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu'
if: ${{ matrix.target == 'aarch64' }}
- name: Reset permissions
run: sudo chown -R $USER target
working-directory: backend
if: ${{ matrix.target == 'aarch64' }}
- name: Upload archive to workflow
uses: actions/upload-artifact@v3
with:
name: nextest-archive-${{ matrix.target }}
path: backend/nextest-archive-${{ matrix.target }}.tar.zst
run_tests_backend:
name: Test backend
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
- target: aarch64
runs-on: ubuntu-latest
timeout-minutes: 60
needs: build_backend
env:
CARGO_TERM_COLOR: always
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: ${{ matrix.target == 'aarch64' }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
if: ${{ matrix.target == 'aarch64' }}
- run: mkdir -p ~/.cargo/bin
if: ${{ matrix.target == 'x86_64' }}
- name: Install nextest
uses: taiki-e/install-action@v2
with:
tool: nextest@0.9.47
if: ${{ matrix.target == 'x86_64' }}
- name: Download archive
uses: actions/download-artifact@v3
with:
name: nextest-archive-${{ matrix.target }}
- name: Download nextest (aarch64)
run: wget -O nextest-aarch64.tar.gz https://get.nexte.st/0.9.47/linux-arm
if: ${{ matrix.target == 'aarch64' }}
- name: Run tests
run: |
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --no-fail-fast --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
--filter-expr 'not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))'
if: ${{ matrix.target == 'x86_64' }}
- name: Run tests
run: |
docker run --rm --platform linux/arm64/v8 \
-v "/home/runner/.cargo/registry":/usr/local/cargo/registry \
-v "$(pwd)":/home/rust/src \
-e CARGO_TERM_COLOR=${{ env.CARGO_TERM_COLOR }} \
-P ubuntu:20.04 \
sh -c '
apt-get update &&
apt-get install -y ca-certificates &&
apt-get install -y rsync &&
cd /home/rust/src &&
mkdir -p ~/.cargo/bin &&
tar -zxvf nextest-aarch64.tar.gz -C ${CARGO_HOME:-~/.cargo}/bin &&
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
--filter-expr "not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))"'
if: ${{ matrix.target == 'aarch64' }}

63
.github/workflows/debian.yaml vendored Normal file
View File

@@ -0,0 +1,63 @@
name: Debian Package
on:
workflow_call:
workflow_dispatch:
env:
NODEJS_VERSION: '16.11.0'
ENVIRONMENT: "dev"
jobs:
dpkg:
name: Build dpkg
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
repository: Start9Labs/embassy-os-deb
- uses: actions/checkout@v3
with:
submodules: recursive
path: embassyos-0.3.x
- run: |
cp -r debian embassyos-0.3.x/
VERSION=0.3.x ./control.sh
cp embassyos-0.3.x/backend/embassyd.service embassyos-0.3.x/debian/embassyos.embassyd.service
cp embassyos-0.3.x/backend/embassy-init.service embassyos-0.3.x/debian/embassyos.embassy-init.service
- uses: actions/setup-node@v3
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Get npm cache directory
id: npm-cache-dir
run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
- uses: actions/cache@v3
id: npm-cache
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install debmake debhelper-compat
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Run build
run: "make VERSION=0.3.x TAG=${{ github.ref_name }}"
- uses: actions/upload-artifact@v3
with:
name: deb
path: embassyos_0.3.x-1_amd64.deb

46
.github/workflows/frontend.yaml vendored Normal file
View File

@@ -0,0 +1,46 @@
name: Frontend
on:
workflow_call:
workflow_dispatch:
env:
NODEJS_VERSION: '16.11.0'
ENVIRONMENT: "dev"
jobs:
frontend:
name: Build frontend
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- uses: actions/setup-node@v3
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Get npm cache directory
id: npm-cache-dir
run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
- uses: actions/cache@v3
id: npm-cache
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Build frontends
run: make frontends
- name: 'Tar files to preserve file permissions'
run: tar -cvf frontend.tar ENVIRONMENT.txt GIT_HASH.txt VERSION.txt frontend/dist frontend/config.json
- uses: actions/upload-artifact@v3
with:
name: frontend
path: frontend.tar

129
.github/workflows/product.yaml vendored Normal file
View File

@@ -0,0 +1,129 @@
name: Build Pipeline
on:
workflow_dispatch:
push:
branches:
- master
- next
pull_request:
branches:
- master
- next
env:
ENVIRONMENT: "dev"
jobs:
compat:
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make system-images/compat/docker-images/aarch64.tar
artifact_name: compat.tar
artifact_path: system-images/compat/docker-images/aarch64.tar
utils:
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make system-images/utils/docker-images/aarch64.tar
artifact_name: utils.tar
artifact_path: system-images/utils/docker-images/aarch64.tar
binfmt:
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make system-images/binfmt/docker-images/aarch64.tar
artifact_name: binfmt.tar
artifact_path: system-images/binfmt/docker-images/aarch64.tar
backend:
uses: ./.github/workflows/backend.yaml
frontend:
uses: ./.github/workflows/frontend.yaml
image:
name: Build image
runs-on: ubuntu-latest
timeout-minutes: 60
needs: [compat,utils,binfmt,backend,frontend]
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Download compat.tar artifact
uses: actions/download-artifact@v3
with:
name: compat.tar
path: system-images/compat/docker-images/
- name: Download utils.tar artifact
uses: actions/download-artifact@v3
with:
name: utils.tar
path: system-images/utils/docker-images/
- name: Download binfmt.tar artifact
uses: actions/download-artifact@v3
with:
name: binfmt.tar
path: system-images/binfmt/docker-images/
- name: Download js_snapshot artifact
uses: actions/download-artifact@v3
with:
name: js_snapshot
path: libs/js_engine/src/artifacts/
- name: Download arm_js_snapshot artifact
uses: actions/download-artifact@v3
with:
name: arm_js_snapshot
path: libs/js_engine/src/artifacts/
- name: Download backend artifact
uses: actions/download-artifact@v3
with:
name: backend-aarch64
- name: 'Extract backend'
run:
tar -mxvf backend-aarch64.tar
- name: Download frontend artifact
uses: actions/download-artifact@v3
with:
name: frontend
- name: Skip frontend build
run: |
mkdir frontend/node_modules
mkdir frontend/dist
mkdir patch-db/client/node_modules
mkdir patch-db/client/dist
- name: 'Extract frontend'
run: |
tar -mxvf frontend.tar frontend/config.json
tar -mxvf frontend.tar frontend/dist
tar -xvf frontend.tar GIT_HASH.txt
tar -xvf frontend.tar ENVIRONMENT.txt
tar -xvf frontend.tar VERSION.txt
rm frontend.tar
- name: Cache raspiOS
id: cache-raspios
uses: actions/cache@v3
with:
path: raspios.img
key: cache-raspios
- name: Build image
run: |
make V=1 eos_raspberrypi-uninit.img --debug
- uses: actions/upload-artifact@v3
with:
name: image
path: eos_raspberrypi-uninit.img

70
.github/workflows/pureos-iso.yaml vendored Normal file
View File

@@ -0,0 +1,70 @@
name: PureOS Based ISO
on:
workflow_call:
workflow_dispatch:
push:
branches:
- master
- next
pull_request:
branches:
- master
- next
env:
ENVIRONMENT: "dev"
jobs:
dpkg:
uses: ./.github/workflows/debian.yaml
iso:
name: Build iso
runs-on: ubuntu-22.04
needs: [dpkg]
steps:
- uses: actions/checkout@v3
with:
repository: Start9Labs/eos-image-recipes
- name: Install dependencies
run: |
sudo apt update
wget http://ftp.us.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.1-1_all.deb
sha256sum ./debspawn_0.6.1-1_all.deb | grep fb8a3f588438ff9ef51e713ec1d83306db893f0aa97447565e28bbba9c6e90c6
sudo apt-get install -y ./debspawn_0.6.1-1_all.deb
wget https://repo.pureos.net/pureos/pool/main/d/debootstrap/debootstrap_1.0.125pureos1_all.deb
sudo apt-get install -y --allow-downgrades ./debootstrap_1.0.125pureos1_all.deb
wget https://repo.pureos.net/pureos/pool/main/p/pureos-archive-keyring/pureos-archive-keyring_2021.11.0_all.deb
sudo apt-get install -y ./pureos-archive-keyring_2021.11.0_all.deb
- name: Configure debspawn
run: |
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
- uses: actions/cache@v3
with:
path: /var/lib/debspawn
key: ${{ runner.os }}-debspawn-init-byzantium
- name: Make build container
run: "debspawn list | grep byzantium || debspawn create --with-init byzantium"
- run: "mkdir -p overlays/vendor/root"
- name: Download dpkg
uses: actions/download-artifact@v3
with:
name: deb
path: overlays/vendor/root
- name: Run build
run: |
./run-local-build.sh --no-fakemachine byzantium none custom "" true
- uses: actions/upload-artifact@v3
with:
name: iso
path: results/*.iso

View File

@@ -0,0 +1,37 @@
name: Reusable Workflow
on:
workflow_call:
inputs:
build_command:
required: true
type: string
artifact_name:
required: true
type: string
artifact_path:
required: true
type: string
env:
ENVIRONMENT: "dev"
jobs:
generic_build_job:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build image
run: ${{ inputs.build_command }}
- uses: actions/upload-artifact@v3
with:
name: ${{ inputs.artifact_name }}
path: ${{ inputs.artifact_path }}

View File

@@ -1,208 +0,0 @@
name: Debian-based ISO and SquashFS
on:
workflow_call:
workflow_dispatch:
inputs:
environment:
type: choice
description: Environment
options:
- NONE
- dev
- unstable
- dev-unstable
runner:
type: choice
description: Runner
options:
- standard
- fast
platform:
type: choice
description: Platform
options:
- ALL
- x86_64
- x86_64-nonfree
- aarch64
- aarch64-nonfree
- raspberrypi
push:
branches:
- master
- next
pull_request:
branches:
- master
- next
env:
NODEJS_VERSION: "18.15.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
all:
name: Build
strategy:
fail-fast: false
matrix:
platform: >-
${{
fromJson(
format(
'[
["{0}"],
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "raspberrypi"]
]',
github.event.inputs.platform || 'ALL'
)
)[(github.event.inputs.platform || 'ALL') == 'ALL']
}}
runs-on: >-
${{
fromJson(
format(
'["ubuntu-22.04", "{0}"]',
fromJson('{
"x86_64": ["buildjet-32vcpu-ubuntu-2204", "buildjet-32vcpu-ubuntu-2204"],
"x86_64-nonfree": ["buildjet-32vcpu-ubuntu-2204", "buildjet-32vcpu-ubuntu-2204"],
"aarch64": ["buildjet-16vcpu-ubuntu-2204-arm", "buildjet-32vcpu-ubuntu-2204-arm"],
"aarch64-nonfree": ["buildjet-16vcpu-ubuntu-2204-arm", "buildjet-32vcpu-ubuntu-2204-arm"],
"raspberrypi": ["buildjet-16vcpu-ubuntu-2204-arm", "buildjet-32vcpu-ubuntu-2204-arm"],
}')[matrix.platform][github.event.inputs.platform == matrix.platform]
)
)[github.event.inputs.runner == 'fast']
}}
steps:
- name: Free space
run: df -h && rm -rf /opt/hostedtoolcache* && df -h
if: ${{ github.event.inputs.runner != 'fast' }}
- run: |
sudo mount -t tmpfs tmpfs .
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree' || github.event.inputs.platform == matrix.platform) }}
- uses: actions/checkout@v3
with:
repository: Start9Labs/embassy-os-deb
path: embassy-os-deb
- uses: actions/checkout@v3
with:
submodules: recursive
path: embassy-os-deb/embassyos-0.3.x
- run: |
cp -r debian embassyos-0.3.x/
VERSION=0.3.x ./control.sh
cp embassyos-0.3.x/backend/startd.service embassyos-0.3.x/debian/embassyos.startd.service
working-directory: embassy-os-deb
- uses: actions/setup-node@v3
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Get npm cache directory
id: npm-cache-dir
run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
- uses: actions/cache@v3
id: npm-cache
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install \
debmake \
debhelper-compat \
crossbuild-essential-arm64
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Run dpkg build
working-directory: embassy-os-deb
run: "make VERSION=0.3.x TAG=${{ github.ref_name }}"
env:
OS_ARCH: ${{ matrix.platform }}
- uses: actions/checkout@v3
with:
repository: Start9Labs/startos-image-recipes
path: startos-image-recipes
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y qemu-user-static
wget https://deb.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.2-1_all.deb
sha256sum ./debspawn_0.6.2-1_all.deb | grep 37ef27458cb1e35e8bce4d4f639b06b4b3866fc0b9191ec6b9bd157afd06a817
sudo apt-get install -y ./debspawn_0.6.2-1_all.deb
- name: Configure debspawn
run: |
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo mkdir -p /var/tmp/debspawn
- run: sudo mount -t tmpfs tmpfs /var/tmp/debspawn
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree') }}
- uses: actions/cache@v3
with:
path: /var/lib/debspawn
key: ${{ runner.os }}-${{ matrix.platform }}-debspawn-init
- run: "mkdir -p startos-image-recipes/overlays/deb"
- run: "mv embassy-os-deb/embassyos_0.3.x-1_*.deb startos-image-recipes/overlays/deb/"
- run: "rm -rf embassy-os-deb ${{ steps.npm-cache-dir.outputs.dir }} $HOME/.cargo"
- name: Run iso build
working-directory: startos-image-recipes
run: |
./run-local-build.sh ${{ matrix.platform }}
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.platform }}.squashfs
path: startos-image-recipes/results/*.squashfs
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.platform }}.iso
path: startos-image-recipes/results/*.iso
if: ${{ matrix.platform != 'raspberrypi' }}
- uses: actions/checkout@v3
with:
submodules: recursive
path: start-os
if: ${{ matrix.platform == 'raspberrypi' }}
- run: "mv startos-image-recipes/results/startos-*_raspberrypi.squashfs start-os/startos.raspberrypi.squashfs"
if: ${{ matrix.platform == 'raspberrypi' }}
- run: rm -rf startos-image-recipes
if: ${{ matrix.platform == 'raspberrypi' }}
- name: Build image
working-directory: start-os
run: make startos_raspberrypi.img
if: ${{ matrix.platform == 'raspberrypi' }}
- uses: actions/upload-artifact@v3
with:
name: raspberrypi.img
path: start-os/startos-*_raspberrypi.img
if: ${{ matrix.platform == 'raspberrypi' }}

View File

@@ -1,6 +1,6 @@
<!-- omit in toc --> <!-- omit in toc -->
# Contributing to StartOS # Contributing to Embassy OS
First off, thanks for taking the time to contribute! ❤️ First off, thanks for taking the time to contribute! ❤️
@@ -19,7 +19,7 @@ forward to your contributions. 🎉
> - Tweet about it > - Tweet about it
> - Refer this project in your project's readme > - Refer this project in your project's readme
> - Mention the project at local meetups and tell your friends/colleagues > - Mention the project at local meetups and tell your friends/colleagues
> - Buy a [Start9 server](https://start9.com) > - Buy an [Embassy](https://start9labs.com)
<!-- omit in toc --> <!-- omit in toc -->
@@ -49,7 +49,7 @@ forward to your contributions. 🎉
> [Documentation](https://docs.start9labs.com). > [Documentation](https://docs.start9labs.com).
Before you ask a question, it is best to search for existing Before you ask a question, it is best to search for existing
[Issues](https://github.com/Start9Labs/start-os/issues) that might help you. [Issues](https://github.com/Start9Labs/embassy-os/issues) that might help you.
In case you have found a suitable issue and still need clarification, you can In case you have found a suitable issue and still need clarification, you can
write your question in this issue. It is also advisable to search the internet write your question in this issue. It is also advisable to search the internet
for answers first. for answers first.
@@ -57,7 +57,7 @@ for answers first.
If you then still feel the need to ask a question and need clarification, we If you then still feel the need to ask a question and need clarification, we
recommend the following: recommend the following:
- Open an [Issue](https://github.com/Start9Labs/start-os/issues/new). - Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new).
- Provide as much context as you can about what you're running into. - Provide as much context as you can about what you're running into.
- Provide project and platform versions, depending on what seems relevant. - Provide project and platform versions, depending on what seems relevant.
@@ -105,7 +105,7 @@ steps in advance to help us fix any potential bug as fast as possible.
- To see if other users have experienced (and potentially already solved) the - To see if other users have experienced (and potentially already solved) the
same issue you are having, check if there is not already a bug report existing same issue you are having, check if there is not already a bug report existing
for your bug or error in the for your bug or error in the
[bug tracker](https://github.com/Start9Labs/start-os/issues?q=label%3Abug). [bug tracker](https://github.com/Start9Labs/embassy-os/issues?q=label%3Abug).
- Also make sure to search the internet (including Stack Overflow) to see if - Also make sure to search the internet (including Stack Overflow) to see if
users outside of the GitHub community have discussed the issue. users outside of the GitHub community have discussed the issue.
- Collect information about the bug: - Collect information about the bug:
@@ -131,7 +131,7 @@ steps in advance to help us fix any potential bug as fast as possible.
We use GitHub issues to track bugs and errors. If you run into an issue with the We use GitHub issues to track bugs and errors. If you run into an issue with the
project: project:
- Open an [Issue](https://github.com/Start9Labs/start-os/issues/new/choose) - Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new/choose)
selecting the appropriate type. selecting the appropriate type.
- Explain the behavior you would expect and the actual behavior. - Explain the behavior you would expect and the actual behavior.
- Please provide as much context as possible and describe the _reproduction - Please provide as much context as possible and describe the _reproduction
@@ -155,7 +155,8 @@ Once it's filed:
### Suggesting Enhancements ### Suggesting Enhancements
This section guides you through submitting an enhancement suggestion for StartOS, **including completely new features and minor improvements to existing This section guides you through submitting an enhancement suggestion for Embassy
OS, **including completely new features and minor improvements to existing
functionality**. Following these guidelines will help maintainers and the functionality**. Following these guidelines will help maintainers and the
community to understand your suggestion and find related suggestions. community to understand your suggestion and find related suggestions.
@@ -167,7 +168,7 @@ community to understand your suggestion and find related suggestions.
- Read the [documentation](https://start9.com/latest/user-manual) carefully and - Read the [documentation](https://start9.com/latest/user-manual) carefully and
find out if the functionality is already covered, maybe by an individual find out if the functionality is already covered, maybe by an individual
configuration. configuration.
- Perform a [search](https://github.com/Start9Labs/start-os/issues) to see if - Perform a [search](https://github.com/Start9Labs/embassy-os/issues) to see if
the enhancement has already been suggested. If it has, add a comment to the the enhancement has already been suggested. If it has, add a comment to the
existing issue instead of opening a new one. existing issue instead of opening a new one.
- Find out whether your idea fits with the scope and aims of the project. It's - Find out whether your idea fits with the scope and aims of the project. It's
@@ -181,7 +182,7 @@ community to understand your suggestion and find related suggestions.
#### How Do I Submit a Good Enhancement Suggestion? #### How Do I Submit a Good Enhancement Suggestion?
Enhancement suggestions are tracked as Enhancement suggestions are tracked as
[GitHub issues](https://github.com/Start9Labs/start-os/issues). [GitHub issues](https://github.com/Start9Labs/embassy-os/issues).
- Use a **clear and descriptive title** for the issue to identify the - Use a **clear and descriptive title** for the issue to identify the
suggestion. suggestion.
@@ -196,7 +197,7 @@ Enhancement suggestions are tracked as
macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast)
or [this tool](https://github.com/GNOME/byzanz) on Linux. or [this tool](https://github.com/GNOME/byzanz) on Linux.
<!-- this should only be included if the project has a GUI --> <!-- this should only be included if the project has a GUI -->
- **Explain why this enhancement would be useful** to most StartOS users. You - **Explain why this enhancement would be useful** to most Embassy OS users. You
may also want to point out the other projects that solved it better and which may also want to point out the other projects that solved it better and which
could serve as inspiration. could serve as inspiration.
@@ -204,24 +205,24 @@ Enhancement suggestions are tracked as
### Project Structure ### Project Structure
StartOS is composed of the following components. Please visit the README for embassyOS is composed of the following components. Please visit the README for
each component to understand the dependency requirements and installation each component to understand the dependency requirements and installation
instructions. instructions.
- [`backend`](backend/README.md) (Rust) is a command line utility, daemon, and - [`backend`](backend/README.md) (Rust) is a command line utility, daemon, and
software development kit that sets up and manages services and their software development kit that sets up and manages services and their
environments, provides the interface for the ui, manages system state, and environments, provides the interface for the ui, manages system state, and
provides utilities for packaging services for StartOS. provides utilities for packaging services for embassyOS.
- [`build`](build/README.md) contains scripts and necessary for deploying - [`build`](build/README.md) contains scripts and necessary for deploying
StartOS to a debian/raspbian system. embassyOS to a debian/raspbian system.
- [`frontend`](frontend/README.md) (Typescript Ionic Angular) is the code that - [`frontend`](frontend/README.md) (Typescript Ionic Angular) is the code that
is deployed to the browser to provide the user interface for StartOS. is deployed to the browser to provide the user interface for embassyOS.
- `projects/ui` - Code for the user interface that is displayed when StartOS - `projects/ui` - Code for the user interface that is displayed when embassyOS
is running normally. is running normally.
- `projects/setup-wizard`(frontend/README.md) - Code for the user interface - `projects/setup-wizard`(frontend/README.md) - Code for the user interface
that is displayed during the setup and recovery process for StartOS. that is displayed during the setup and recovery process for embassyOS.
- `projects/diagnostic-ui` - Code for the user interface that is displayed - `projects/diagnostic-ui` - Code for the user interface that is displayed
when something has gone wrong with starting up StartOS, which provides when something has gone wrong with starting up embassyOS, which provides
helpful debugging tools. helpful debugging tools.
- `libs` (Rust) is a set of standalone crates that were separated out of - `libs` (Rust) is a set of standalone crates that were separated out of
`backend` for the purpose of portability `backend` for the purpose of portability
@@ -231,18 +232,18 @@ instructions.
[client](https://github.com/Start9Labs/patch-db/tree/master/client) with its [client](https://github.com/Start9Labs/patch-db/tree/master/client) with its
own dependency and installation requirements. own dependency and installation requirements.
- `system-images` - (Docker, Rust) A suite of utility Docker images that are - `system-images` - (Docker, Rust) A suite of utility Docker images that are
preloaded with StartOS to assist with functions relating to services (eg. preloaded with embassyOS to assist with functions relating to services (eg.
configuration, backups, health checks). configuration, backups, health checks).
### Your First Code Contribution ### Your First Code Contribution
#### Setting Up Your Development Environment #### Setting Up Your Development Environment
First, clone the StartOS repository and from the project root, pull in the First, clone the embassyOS repository and from the project root, pull in the
submodules for dependent libraries. submodules for dependent libraries.
```sh ```sh
git clone https://github.com/Start9Labs/start-os.git git clone https://github.com/Start9Labs/embassy-os.git
git submodule update --init --recursive git submodule update --init --recursive
``` ```
@@ -253,7 +254,7 @@ to, follow the installation requirements listed in that component's README
#### Building The Raspberry Pi Image #### Building The Raspberry Pi Image
This step is for setting up an environment in which to test your code changes if This step is for setting up an environment in which to test your code changes if
you do not yet have a StartOS. you do not yet have a embassyOS.
- Requirements - Requirements
- `ext4fs` (available if running on the Linux kernel) - `ext4fs` (available if running on the Linux kernel)
@@ -261,7 +262,7 @@ you do not yet have a StartOS.
- GNU Make - GNU Make
- Building - Building
- see setup instructions [here](build/README.md) - see setup instructions [here](build/README.md)
- run `make startos-raspi.img ARCH=aarch64` from the project root - run `make embassyos-raspi.img ARCH=aarch64` from the project root
### Improving The Documentation ### Improving The Documentation
@@ -285,7 +286,7 @@ seamless and intuitive experience.
### Formatting ### Formatting
Each component of StartOS contains its own style guide. Code must be formatted Each component of embassyOS contains its own style guide. Code must be formatted
with the formatter designated for each component. These are outlined within each with the formatter designated for each component. These are outlined within each
component folder's README. component folder's README.
@@ -305,7 +306,7 @@ component. i.e. `backend: update to tokio v0.3`.
The body of a pull request should contain sufficient description of what the The body of a pull request should contain sufficient description of what the
changes do, as well as a justification. You should include references to any changes do, as well as a justification. You should include references to any
relevant [issues](https://github.com/Start9Labs/start-os/issues). relevant [issues](https://github.com/Start9Labs/embassy-os/issues).
### Rebasing Changes ### Rebasing Changes

View File

@@ -1,34 +1,33 @@
OS_ARCH := $(shell echo "${OS_ARCH}") RASPI_TARGETS := eos_raspberrypi-uninit.img eos_raspberrypi-uninit.tar.gz
ARCH := $(shell if [ "$(OS_ARCH)" = "raspberrypi" ]; then echo aarch64; else echo $(OS_ARCH) | sed 's/-nonfree$$//g'; fi) OS_ARCH := $(shell if echo $(RASPI_TARGETS) | grep -qw "$(MAKECMDGOALS)"; then echo raspberrypi; else uname -m; fi)
ARCH := $(shell if [ "$(OS_ARCH)" = "raspberrypi" ]; then echo aarch64; else echo $(OS_ARCH); fi)
ENVIRONMENT_FILE = $(shell ./check-environment.sh) ENVIRONMENT_FILE = $(shell ./check-environment.sh)
GIT_HASH_FILE = $(shell ./check-git-hash.sh) GIT_HASH_FILE = $(shell ./check-git-hash.sh)
VERSION_FILE = $(shell ./check-version.sh) VERSION_FILE = $(shell ./check-version.sh)
EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/startbox libs/target/aarch64-unknown-linux-musl/release/embassy_container_init libs/target/x86_64-unknown-linux-musl/release/embassy_container_init EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-sdk backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias libs/target/aarch64-unknown-linux-musl/release/embassy_container_init libs/target/x86_64-unknown-linux-musl/release/embassy_container_init
EMBASSY_UIS := frontend/dist/raw/ui frontend/dist/raw/setup-wizard frontend/dist/raw/diagnostic-ui frontend/dist/raw/install-wizard EMBASSY_UIS := frontend/dist/ui frontend/dist/setup-wizard frontend/dist/diagnostic-ui frontend/dist/install-wizard
BUILD_SRC := $(shell find build) BUILD_SRC := $(shell find build)
EMBASSY_SRC := backend/startd.service $(BUILD_SRC) EMBASSY_SRC := backend/embassyd.service backend/embassy-init.service $(EMBASSY_UIS) $(BUILD_SRC)
COMPAT_SRC := $(shell find system-images/compat/ -not -path 'system-images/compat/target/*' -and -not -name *.tar -and -not -name target) COMPAT_SRC := $(shell find system-images/compat/ -not -path 'system-images/compat/target/*' -and -not -name *.tar -and -not -name target)
UTILS_SRC := $(shell find system-images/utils/ -not -name *.tar) UTILS_SRC := $(shell find system-images/utils/ -not -name *.tar)
BINFMT_SRC := $(shell find system-images/binfmt/ -not -name *.tar) BINFMT_SRC := $(shell find system-images/binfmt/ -not -name *.tar)
BACKEND_SRC := $(shell find backend/src) $(shell find backend/migrations) $(shell find patch-db/*/src) $(shell find libs/*/src) libs/*/Cargo.toml backend/Cargo.toml backend/Cargo.lock frontend/dist/static BACKEND_SRC := $(shell find backend/src) $(shell find backend/migrations) $(shell find patch-db/*/src) $(shell find libs/*/src) libs/*/Cargo.toml backend/Cargo.toml backend/Cargo.lock
FRONTEND_SHARED_SRC := $(shell find frontend/projects/shared) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/package.json frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json FRONTEND_SHARED_SRC := $(shell find frontend/projects/shared) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/package.json frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json
FRONTEND_UI_SRC := $(shell find frontend/projects/ui) FRONTEND_UI_SRC := $(shell find frontend/projects/ui)
FRONTEND_SETUP_WIZARD_SRC := $(shell find frontend/projects/setup-wizard) FRONTEND_SETUP_WIZARD_SRC := $(shell find frontend/projects/setup-wizard)
FRONTEND_DIAGNOSTIC_UI_SRC := $(shell find frontend/projects/diagnostic-ui) FRONTEND_DIAGNOSTIC_UI_SRC := $(shell find frontend/projects/diagnostic-ui)
FRONTEND_INSTALL_WIZARD_SRC := $(shell find frontend/projects/install-wizard) FRONTEND_INSTALL_WIZARD_SRC := $(shell find frontend/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell find patch-db/client -not -path patch-db/client/dist -and -not -path patch-db/client/node_modules) PATCH_DB_CLIENT_SRC := $(shell find patch-db/client -not -path patch-db/client/dist)
GZIP_BIN := $(shell which pigz || which gzip) GZIP_BIN := $(shell which pigz || which gzip)
ALL_TARGETS := $(EMBASSY_BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar $(EMBASSY_SRC) $(shell if [ "$(OS_ARCH)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep; fi) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) ALL_TARGETS := $(EMBASSY_BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar $(EMBASSY_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
ifeq ($(REMOTE),) ifeq ($(REMOTE),)
mkdir = mkdir -p $1 mkdir = mkdir -p $1
rm = rm -rf $1 rm = rm -rf $1
cp = cp -r $1 $2 cp = cp -r $1 $2
ln = ln -sf $1 $2
else else
mkdir = ssh $(REMOTE) 'mkdir -p $1' mkdir = ssh $(REMOTE) 'mkdir -p $1'
rm = ssh $(REMOTE) 'sudo rm -rf $1' rm = ssh $(REMOTE) 'sudo rm -rf $1'
ln = ssh $(REMOTE) 'sudo ln -sf $1 $2'
define cp define cp
tar --transform "s|^$1|x|" -czv -f- $1 | ssh $(REMOTE) "sudo tar --transform 's|^x|$2|' -xzv -f- -C /" tar --transform "s|^$1|x|" -czv -f- $1 | ssh $(REMOTE) "sudo tar --transform 's|^x|$2|' -xzv -f- -C /"
endef endef
@@ -36,7 +35,7 @@ endif
.DELETE_ON_ERROR: .DELETE_ON_ERROR:
.PHONY: all gzip install clean format sdk snapshots frontends ui backend reflash startos_raspberrypi.img sudo .PHONY: all gzip install clean format sdk snapshots frontends ui backend reflash eos_raspberrypi.img sudo
all: $(ALL_TARGETS) all: $(ALL_TARGETS)
@@ -44,6 +43,12 @@ sudo:
sudo true sudo true
clean: clean:
rm -f 2022-01-28-raspios-bullseye-arm64-lite.zip
rm -f raspios.img
rm -f eos_raspberrypi-uninit.img
rm -f eos_raspberrypi-uninit.tar.gz
rm -f ubuntu.img
rm -f product_key.txt
rm -f system-images/**/*.tar rm -f system-images/**/*.tar
rm -rf system-images/compat/target rm -rf system-images/compat/target
rm -rf backend/target rm -rf backend/target
@@ -67,19 +72,25 @@ format:
sdk: sdk:
cd backend/ && ./install-sdk.sh cd backend/ && ./install-sdk.sh
startos_raspberrypi.img: $(BUILD_SRC) startos.raspberrypi.squashfs $(VERSION_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep | sudo eos_raspberrypi-uninit.img: $(ALL_TARGETS) raspios.img cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep | sudo
./build/raspberrypi/make-image.sh ! test -f eos_raspberrypi-uninit.img || rm eos_raspberrypi-uninit.img
./build/raspberry-pi/make-image.sh
lite-upgrade.img: raspios.img cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep $(BUILD_SRC) eos.raspberrypi.squashfs
! test -f lite-upgrade.img || rm lite-upgrade.img
./build/raspberry-pi/make-upgrade-image.sh
eos_raspberrypi.img: raspios.img $(BUILD_SRC) eos.raspberrypi.squashfs $(VERSION_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) | sudo
! test -f eos_raspberrypi.img || rm eos_raspberrypi.img
./build/raspberry-pi/make-initialized-image.sh
# For creating os images. DO NOT USE # For creating os images. DO NOT USE
install: $(ALL_TARGETS) install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/bin) $(call mkdir,$(DESTDIR)/usr/bin)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/startbox,$(DESTDIR)/usr/bin/startbox) $(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init,$(DESTDIR)/usr/bin/embassy-init)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd) $(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd,$(DESTDIR)/usr/bin/embassyd)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli) $(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli,$(DESTDIR)/usr/bin/embassy-cli)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk) $(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias,$(DESTDIR)/usr/bin/avahi-alias)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/avahi-alias)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/embassy-cli)
if [ "$(OS_ARCH)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
$(call mkdir,$(DESTDIR)/usr/lib) $(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/embassy) $(call rm,$(DESTDIR)/usr/lib/embassy)
@@ -98,14 +109,22 @@ install: $(ALL_TARGETS)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/utils.tar) $(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/utils.tar)
$(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/binfmt.tar) $(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/binfmt.tar)
$(call mkdir,$(DESTDIR)/var/www/html)
$(call cp,frontend/dist/diagnostic-ui,$(DESTDIR)/var/www/html/diagnostic)
$(call cp,frontend/dist/setup-wizard,$(DESTDIR)/var/www/html/setup)
$(call cp,frontend/dist/install-wizard,$(DESTDIR)/var/www/html/install)
$(call cp,frontend/dist/ui,$(DESTDIR)/var/www/html/main)
$(call cp,index.html,$(DESTDIR)/var/www/html/index.html)
update-overlay: update-overlay:
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m" @echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m" @echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@if [ "`ssh $(REMOTE) 'cat /usr/lib/embassy/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi @if [ "`ssh $(REMOTE) 'cat /usr/lib/embassy/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "Embassy requires migrations: update-overlay is unavailable." && false; fi
ssh $(REMOTE) "sudo systemctl stop startd" @if ssh $(REMOTE) "pidof embassy-init"; then >&2 echo "Embassy in INIT: update-overlay is unavailable." && false; fi
ssh $(REMOTE) "sudo systemctl stop embassyd"
$(MAKE) install REMOTE=$(REMOTE) OS_ARCH=$(OS_ARCH) $(MAKE) install REMOTE=$(REMOTE) OS_ARCH=$(OS_ARCH)
ssh $(REMOTE) "sudo systemctl start startd" ssh $(REMOTE) "sudo systemctl start embassyd"
update: update:
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@@ -128,6 +147,11 @@ system-images/utils/docker-images/aarch64.tar system-images/utils/docker-images/
system-images/binfmt/docker-images/aarch64.tar system-images/binfmt/docker-images/x86_64.tar: $(BINFMT_SRC) system-images/binfmt/docker-images/aarch64.tar system-images/binfmt/docker-images/x86_64.tar: $(BINFMT_SRC)
cd system-images/binfmt && make cd system-images/binfmt && make
raspios.img:
wget --continue https://downloads.raspberrypi.org/raspios_lite_arm64/images/raspios_lite_arm64-2022-01-28/2022-01-28-raspios-bullseye-arm64-lite.zip
unzip 2022-01-28-raspios-bullseye-arm64-lite.zip
mv 2022-01-28-raspios-bullseye-arm64-lite.img raspios.img
snapshots: libs/snapshot_creator/Cargo.toml snapshots: libs/snapshot_creator/Cargo.toml
cd libs/ && ./build-v8-snapshot.sh cd libs/ && ./build-v8-snapshot.sh
cd libs/ && ./build-arm-v8-snapshot.sh cd libs/ && ./build-arm-v8-snapshot.sh
@@ -139,21 +163,18 @@ $(EMBASSY_BINS): $(BACKEND_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) frontend/pa
frontend/node_modules: frontend/package.json frontend/node_modules: frontend/package.json
npm --prefix frontend ci npm --prefix frontend ci
frontend/dist/raw/ui: $(FRONTEND_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE) frontend/dist/ui: $(FRONTEND_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
npm --prefix frontend run build:ui npm --prefix frontend run build:ui
frontend/dist/raw/setup-wizard: $(FRONTEND_SETUP_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE) frontend/dist/setup-wizard: $(FRONTEND_SETUP_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
npm --prefix frontend run build:setup npm --prefix frontend run build:setup
frontend/dist/raw/diagnostic-ui: $(FRONTEND_DIAGNOSTIC_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE) frontend/dist/diagnostic-ui: $(FRONTEND_DIAGNOSTIC_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
npm --prefix frontend run build:dui npm --prefix frontend run build:dui
frontend/dist/raw/install-wizard: $(FRONTEND_INSTALL_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE) frontend/dist/install-wizard: $(FRONTEND_INSTALL_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
npm --prefix frontend run build:install-wiz npm --prefix frontend run build:install-wiz
frontend/dist/static: $(EMBASSY_UIS)
./compress-uis.sh
frontend/config.json: $(GIT_HASH_FILE) frontend/config-sample.json frontend/config.json: $(GIT_HASH_FILE) frontend/config-sample.json
jq '.useMocks = false' frontend/config-sample.json > frontend/config.json jq '.useMocks = false' frontend/config-sample.json > frontend/config.json
jq '.packageArch = "$(ARCH)"' frontend/config.json > frontend/config.json.tmp jq '.packageArch = "$(ARCH)"' frontend/config.json > frontend/config.json.tmp
@@ -162,7 +183,7 @@ frontend/config.json: $(GIT_HASH_FILE) frontend/config-sample.json
npm --prefix frontend run-script build-config npm --prefix frontend run-script build-config
frontend/patchdb-ui-seed.json: frontend/package.json frontend/patchdb-ui-seed.json: frontend/package.json
jq '."ack-welcome" = $(shell yq '.version' frontend/package.json)' frontend/patchdb-ui-seed.json > ui-seed.tmp jq '."ack-welcome" = "$(shell yq '.version' frontend/package.json)"' frontend/patchdb-ui-seed.json > ui-seed.tmp
mv ui-seed.tmp frontend/patchdb-ui-seed.json mv ui-seed.tmp frontend/patchdb-ui-seed.json
patch-db/client/node_modules: patch-db/client/package.json patch-db/client/node_modules: patch-db/client/package.json
@@ -180,10 +201,13 @@ backend-$(ARCH).tar: $(EMBASSY_BINS)
frontends: $(EMBASSY_UIS) frontends: $(EMBASSY_UIS)
# this is a convenience step to build the UI # this is a convenience step to build the UI
ui: frontend/dist/raw/ui ui: frontend/dist/ui
# used by github actions # used by github actions
backend: $(EMBASSY_BINS) backend: $(EMBASSY_BINS)
cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast:
./build-cargo-dep.sh nc-broadcast
cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep: cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep:
ARCH=aarch64 ./build-cargo-dep.sh pi-beep ./build-cargo-dep.sh pi-beep

110
README.md
View File

@@ -1,81 +1,51 @@
<div align="center"> # embassyOS
<img src="frontend/projects/shared/assets/img/icon_pwa.png" alt="StartOS Logo" width="16%" /> [![version](https://img.shields.io/github/v/tag/Start9Labs/embassy-os?color=success)](https://github.com/Start9Labs/embassy-os/releases)
<h1 style="margin-top: 0;">StartOS</h1> [![build](https://github.com/Start9Labs/embassy-os/actions/workflows/product.yaml/badge.svg)](https://github.com/Start9Labs/embassy-os/actions/workflows/product.yaml)
<a href="https://github.com/Start9Labs/start-os/releases"> [![community](https://img.shields.io/badge/community-matrix-yellow)](https://matrix.to/#/#community:matrix.start9labs.com)
<img src="https://img.shields.io/github/v/tag/Start9Labs/start-os?color=success" /> [![community](https://img.shields.io/badge/community-telegram-informational)](https://t.me/start9_labs)
</a> [![support](https://img.shields.io/badge/support-docs-important)](https://docs.start9.com)
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml"> [![developer](https://img.shields.io/badge/developer-matrix-blueviolet)](https://matrix.to/#/#community-dev:matrix.start9labs.com)
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg"> [![website](https://img.shields.io/website?down_color=lightgrey&down_message=offline&up_color=green&up_message=online&url=https%3A%2F%2Fstart9.com)](https://start9.com)
</a>
<a href="https://twitter.com/start9labs">
<img src="https://img.shields.io/twitter/follow/start9labs?label=Follow">
</a>
<a href="http://mastodon.start9labs.com">
<img src="https://img.shields.io/mastodon/follow/000000001?domain=https%3A%2F%2Fmastodon.start9labs.com&label=Follow&style=social">
</a>
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
<img src="https://img.shields.io/badge/community-matrix-yellow">
</a>
<a href="https://t.me/start9_labs">
<img src="https://img.shields.io/badge/community-telegram-informational">
</a>
<a href="https://docs.start9.com">
<img src="https://img.shields.io/badge/support-docs-important">
</a>
<a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
<img src="https://img.shields.io/badge/developer-matrix-blueviolet">
</a>
<a href="https://start9.com">
<img src="https://img.shields.io/website?down_color=lightgrey&down_message=offline&up_color=green&up_message=online&url=https%3A%2F%2Fstart9.com">
</a>
</div>
<br />
<div align="center">
<h3>
Welcome to the era of Sovereign Computing
</h3>
<p>
StartOS is a Debian-based Linux distro optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
</p>
</div>
<br />
<p align="center">
<img src="assets/StartOS.png" alt="StartOS" width="85%">
</p>
<br />
## Running StartOS [![mastodon](https://img.shields.io/mastodon/follow/000000001?domain=https%3A%2F%2Fmastodon.start9labs.com&label=Follow&style=social)](http://mastodon.start9labs.com)
There are multiple ways to get started with StartOS: [![twitter](https://img.shields.io/twitter/follow/start9labs?label=Follow)](https://twitter.com/start9labs)
### 💰 Buy a Start9 server ### _Welcome to the era of Sovereign Computing_ ###
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
### 👷 Build your own server embassyOS is a browser-based, graphical operating system for a personal server. embassyOS facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services. It is the most advanced, secure, reliable, and user friendly personal server OS in the world.
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have hardware ## Running embassyOS
1. You want to save on shipping costs There are multiple ways to get your hands on embassyOS.
1. You prefer not to divulge your physical address
1. You just like building things ### :moneybag: Buy an Embassy
This is the most convenient option. Simply [buy an Embassy](https://start9.com) from Start9 and plug it in. Depending on where you live, shipping costs and import duties will vary.
### :construction_worker: Build your own Embassy
While not as convenient as buying an Embassy, this option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have your own hardware.
1. You want to save on shipping costs.
1. You prefer not to divulge your physical address.
1. You just like building things.
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy). To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
## ❤️ Contributing ### :hammer_and_wrench: Build embassyOS from Source
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
To report security issues, please email our security team - security@start9.com. embassyOS can be built from source, for personal use, for free.
A detailed guide for doing so can be found [here](https://github.com/Start9Labs/embassy-os/blob/master/build/README.md).
## 🌎 Marketplace ## :heart: Contributing
There are dozens of service available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/) There are multiple ways to contribute: work directly on embassyOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://docs.start9.com/latest/contribute/) or [here](https://github.com/Start9Labs/embassy-os/blob/master/CONTRIBUTING.md).
## 🖥️ User Interface Screenshots
## UI Screenshots
<p align="center"> <p align="center">
<img src="assets/registry.png" alt="StartOS Marketplace" width="49%"> <img src="assets/embassyOS.png" alt="embassyOS" width="85%">
<img src="assets/community.png" alt="StartOS Community Registry" width="49%"> </p>
<img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%"> <p align="center">
<img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%"> <img src="assets/eOS-preferences.png" alt="Embassy Preferences" width="49%">
<img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%"> <img src="assets/eOS-ghost.png" alt="Embassy Ghost Service" width="49%">
<img src="assets/system.png" alt="StartOS System Settings" width="49%"> </p>
<img src="assets/welcome.png" alt="StartOS System Settings" width="49%"> <p align="center">
<img src="assets/logs.png" alt="StartOS System Settings" width="49%"> <img src="assets/eOS-synapse-health-check.png" alt="Embassy Synapse Health Checks" width="49%">
<img src="assets/eOS-sideload.png" alt="Embassy Sideload Service" width="49%">
</p> </p>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 396 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 402 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 591 KiB

BIN
assets/eOS-ghost.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 281 KiB

BIN
assets/eOS-preferences.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 266 KiB

BIN
assets/eOS-sideload.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 213 KiB

BIN
assets/embassyOS.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 319 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 521 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 331 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 402 KiB

2447
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
[package] [package]
authors = ["Aiden McClelland <me@drbonez.dev>"] authors = ["Aiden McClelland <me@drbonez.dev>"]
description = "The core of StartOS" description = "The core of the Start9 Embassy Operating System"
documentation = "https://docs.rs/start-os" documentation = "https://docs.rs/embassy-os"
edition = "2021" edition = "2021"
keywords = [ keywords = [
"self-hosted", "self-hosted",
@@ -11,28 +11,40 @@ keywords = [
"full-node", "full-node",
"lightning", "lightning",
] ]
name = "start-os" name = "embassy-os"
readme = "README.md" readme = "README.md"
repository = "https://github.com/Start9Labs/start-os" repository = "https://github.com/Start9Labs/embassy-os"
version = "0.3.4-rev.4" version = "0.3.4"
[lib] [lib]
name = "startos" name = "embassy"
path = "src/lib.rs" path = "src/lib.rs"
[[bin]] [[bin]]
name = "startbox" name = "embassyd"
path = "src/main.rs" path = "src/bin/embassyd.rs"
[[bin]]
name = "embassy-init"
path = "src/bin/embassy-init.rs"
[[bin]]
name = "embassy-sdk"
path = "src/bin/embassy-sdk.rs"
[[bin]]
name = "embassy-cli"
path = "src/bin/embassy-cli.rs"
[[bin]]
name = "avahi-alias"
path = "src/bin/avahi-alias.rs"
[features] [features]
avahi = ["avahi-sys"] avahi = ["avahi-sys"]
default = ["avahi-alias", "cli", "sdk", "daemon", "js_engine"] default = ["avahi", "js_engine"]
dev = [] dev = []
unstable = ["patch-db/unstable"] unstable = ["patch-db/unstable"]
avahi-alias = ["avahi"]
cli = []
sdk = []
daemon = []
[dependencies] [dependencies]
aes = { version = "0.7.5", features = ["ctr"] } aes = { version = "0.7.5", features = ["ctr"] }
@@ -78,14 +90,11 @@ http = "0.2.8"
hyper = { version = "0.14.20", features = ["full"] } hyper = { version = "0.14.20", features = ["full"] }
hyper-ws-listener = "0.2.0" hyper-ws-listener = "0.2.0"
imbl = "2.0.0" imbl = "2.0.0"
include_dir = "0.7.3"
indexmap = { version = "1.9.1", features = ["serde"] } indexmap = { version = "1.9.1", features = ["serde"] }
ipnet = { version = "2.7.1", features = ["serde"] } ipnet = { version = "2.7.1", features = ["serde"] }
iprange = { version = "0.6.7", features = ["serde"] } iprange = { version = "0.6.7", features = ["serde"] }
isocountry = "0.3.2" isocountry = "0.3.2"
itertools = "0.10.3" itertools = "0.10.3"
jaq-core = "0.10.0"
jaq-std = "0.10.0"
josekit = "0.8.1" josekit = "0.8.1"
js_engine = { path = '../libs/js_engine', optional = true } js_engine = { path = '../libs/js_engine', optional = true }
jsonpath_lib = "0.3.0" jsonpath_lib = "0.3.0"
@@ -94,7 +103,6 @@ libc = "0.2.126"
log = "0.4.17" log = "0.4.17"
mbrman = "0.5.0" mbrman = "0.5.0"
models = { version = "*", path = "../libs/models" } models = { version = "*", path = "../libs/models" }
new_mime_guess = "4"
nix = "0.25.0" nix = "0.25.0"
nom = "7.1.1" nom = "7.1.1"
num = "0.4.0" num = "0.4.0"
@@ -144,7 +152,6 @@ tokio-stream = { version = "0.1.11", features = ["io-util", "sync", "net"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" } tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.17.1", features = ["native-tls"] } tokio-tungstenite = { version = "0.17.1", features = ["native-tls"] }
tokio-rustls = "0.23.4" tokio-rustls = "0.23.4"
tokio-socks = "0.5.1"
tokio-util = { version = "0.7.3", features = ["io"] } tokio-util = { version = "0.7.3", features = ["io"] }
torut = "0.2.1" torut = "0.2.1"
tracing = "0.1.35" tracing = "0.1.35"
@@ -154,7 +161,6 @@ tracing-subscriber = { version = "0.3.14", features = ["env-filter"] }
trust-dns-server = "0.22.0" trust-dns-server = "0.22.0"
typed-builder = "0.10.0" typed-builder = "0.10.0"
url = { version = "2.2.2", features = ["serde"] } url = { version = "2.2.2", features = ["serde"] }
urlencoding = "2.1.2"
uuid = { version = "1.1.2", features = ["v4"] } uuid = { version = "1.1.2", features = ["v4"] }
zeroize = "1.5.7" zeroize = "1.5.7"

View File

@@ -1,35 +1,36 @@
# StartOS Backend # embassyOS Backend
- Requirements: - Requirements:
- [Install Rust](https://rustup.rs) - [Install Rust](https://rustup.rs)
- Recommended: [rust-analyzer](https://rust-analyzer.github.io/) - Recommended: [rust-analyzer](https://rust-analyzer.github.io/)
- [Docker](https://docs.docker.com/get-docker/) - [Docker](https://docs.docker.com/get-docker/)
- [Rust ARM64 Build Container](https://github.com/Start9Labs/rust-arm-builder) - [Rust ARM64 Build Container](https://github.com/Start9Labs/rust-arm-builder)
- Scripts (run within the `./backend` directory) - Scripts (run withing the `./backend` directory)
- `build-prod.sh` - compiles a release build of the artifacts for running on - `build-prod.sh` - compiles a release build of the artifacts for running on
ARM64 ARM64
- A Linux computer or VM - A Linux computer or VM
## Structure ## Structure
The StartOS backend is packed into a single binary `startbox` that is symlinked under The embassyOS backend is broken up into 4 different binaries:
several different names for different behaviour:
- startd: This is the main workhorse of StartOS - any new functionality you - embassyd: This is the main workhorse of embassyOS - any new functionality you
want will likely go here want will likely go here
- start-cli: This is a CLI tool that will allow you to issue commands to - embassy-init: This is the component responsible for allowing you to set up
startd and control it similarly to the UI your device, and handles system initialization on startup
- start-sdk: This is a CLI tool that aids in building and packaging services - embassy-cli: This is a CLI tool that will allow you to issue commands to
you wish to deploy to StartOS embassyd and control it similarly to the UI
- embassy-sdk: This is a CLI tool that aids in building and packaging services
you wish to deploy to the Embassy
Finally there is a library `startos` that supports all of these tools. Finally there is a library `embassy` that supports all four of these tools.
See [here](/backend/Cargo.toml) for details. See [here](/backend/Cargo.toml) for details.
## Building ## Building
You can build the entire operating system image using `make` from the root of You can build the entire operating system image using `make` from the root of
the StartOS project. This will subsequently invoke the build scripts above to the embassyOS project. This will subsequently invoke the build scripts above to
actually create the requisite binaries and put them onto the final operating actually create the requisite binaries and put them onto the final operating
system image. system image.

24
backend/build-dev.sh Executable file
View File

@@ -0,0 +1,24 @@
#!/bin/bash
set -e
shopt -s expand_aliases
if [ "$0" != "./build-dev.sh" ]; then
>&2 echo "Must be run from backend directory"
exit 1
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
cd ..
rust-arm64-builder sh -c "(cd backend && cargo build --locked)"
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd

23
backend/build-portable-dev.sh Executable file
View File

@@ -0,0 +1,23 @@
#!/bin/bash
set -e
shopt -s expand_aliases
if [ "$0" != "./build-portable-dev.sh" ]; then
>&2 echo "Must be run from backend directory"
exit 1
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
cd ..
rust-musl-builder sh -c "(cd backend && cargo +beta build --target=x86_64-unknown-linux-musl --no-default-features --locked)"
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo

View File

@@ -3,11 +3,6 @@
set -e set -e
shopt -s expand_aliases shopt -s expand_aliases
if [ -z "$OS_ARCH" ]; then
>&2 echo '$OS_ARCH is required'
exit 1
fi
if [ -z "$ARCH" ]; then if [ -z "$ARCH" ]; then
ARCH=$(uname -m) ARCH=$(uname -m)
fi fi
@@ -22,8 +17,8 @@ if tty -s; then
USE_TTY="-it" USE_TTY="-it"
fi fi
alias 'rust-gnu-builder'='docker run $USE_TTY --rm -e "OS_ARCH=$OS_ARCH" -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$(pwd)":/home/rust/src -w /home/rust/src -P start9/rust-arm-cross:aarch64' alias 'rust-gnu-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P start9/rust-arm-cross:aarch64'
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "OS_ARCH=$OS_ARCH" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P messense/rust-musl-cross:$ARCH-musl' alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
cd .. cd ..
FLAGS="" FLAGS=""
@@ -37,26 +32,26 @@ fi
set +e set +e
fail= fail=
if [[ "$FLAGS" = "" ]]; then if [[ "$FLAGS" = "" ]]; then
rust-gnu-builder sh -c "(cd backend && cargo build --release --locked --target=$ARCH-unknown-linux-gnu)" rust-gnu-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --locked --target=$ARCH-unknown-linux-gnu)"
if test $? -ne 0; then if test $? -ne 0; then
fail=true fail=true
fi fi
for ARCH in x86_64 aarch64 for ARCH in x86_64 aarch64
do do
rust-musl-builder sh -c "(cd libs && cargo build --release --locked --bin embassy_container_init )" rust-musl-builder sh -c "(git config --global --add safe.directory '*'; cd libs && cargo build --release --locked --bin embassy_container_init )"
if test $? -ne 0; then if test $? -ne 0; then
fail=true fail=true
fi fi
done done
else else
echo "FLAGS=$FLAGS" echo "FLAGS=$FLAGS"
rust-gnu-builder sh -c "(cd backend && cargo build --release --features $FLAGS --locked --target=$ARCH-unknown-linux-gnu)" rust-gnu-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS --locked --target=$ARCH-unknown-linux-gnu)"
if test $? -ne 0; then if test $? -ne 0; then
fail=true fail=true
fi fi
for ARCH in x86_64 aarch64 for ARCH in x86_64 aarch64
do do
rust-musl-builder sh -c "(cd libs && cargo build --release --features $FLAGS --locked --bin embassy_container_init)" rust-musl-builder sh -c "(git config --global --add safe.directory '*'; cd libs && cargo build --release --features $FLAGS --locked --bin embassy_container_init)"
if test $? -ne 0; then if test $? -ne 0; then
fail=true fail=true
fi fi
@@ -72,3 +67,5 @@ sudo chown -R $USER ../libs/target
if [ -n "$fail" ]; then if [ -n "$fail" ]; then
exit 1 exit 1
fi fi
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd

View File

@@ -0,0 +1,15 @@
[Unit]
Description=Embassy Init
After=network-online.target
Requires=network-online.target
Wants=avahi-daemon.service
[Service]
Type=oneshot
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/bin/embassy-init
RemainAfterExit=true
StandardOutput=append:/var/log/embassy-init.log
[Install]
WantedBy=embassyd.service

17
backend/embassyd.service Normal file
View File

@@ -0,0 +1,17 @@
[Unit]
Description=Embassy Daemon
After=embassy-init.service
Requires=embassy-init.service
[Service]
Type=simple
Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/bin/embassyd
Restart=always
RestartSec=3
ManagedOOMPreference=avoid
CPUAccounting=true
CPUWeight=1000
[Install]
WantedBy=multi-user.target

View File

@@ -8,11 +8,4 @@ if [ "$0" != "./install-sdk.sh" ]; then
exit 1 exit 1
fi fi
if [ -z "$OS_ARCH" ]; then cargo install --bin=embassy-sdk --bin=embassy-cli --path=. --no-default-features --features=js_engine --locked
export OS_ARCH=$(uname -m)
fi
cargo install --path=. --no-default-features --features=js_engine,sdk,cli --locked
startbox_loc=$(which startbox)
ln -sf $startbox_loc $(dirname $startbox_loc)/start-cli
ln -sf $startbox_loc $(dirname $startbox_loc)/start-sdk

View File

@@ -56,7 +56,7 @@ pub struct Action {
pub input_spec: ConfigSpec, pub input_spec: ConfigSpec,
} }
impl Action { impl Action {
#[instrument(skip_all)] #[instrument]
pub fn validate( pub fn validate(
&self, &self,
container: &Option<DockerContainers>, container: &Option<DockerContainers>,
@@ -74,7 +74,7 @@ impl Action {
}) })
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn execute( pub async fn execute(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
@@ -120,7 +120,7 @@ fn display_action_result(action_result: ActionResult, matches: &ArgMatches) {
} }
#[command(about = "Executes an action", display(display_action_result))] #[command(about = "Executes an action", display(display_action_result))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn action( pub async fn action(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(rename = "id")] pkg_id: PackageId, #[arg(rename = "id")] pkg_id: PackageId,

View File

@@ -90,7 +90,7 @@ fn gen_pwd() {
) )
} }
#[instrument(skip_all)] #[instrument(skip(ctx, password))]
async fn cli_login( async fn cli_login(
ctx: CliContext, ctx: CliContext,
password: Option<PasswordType>, password: Option<PasswordType>,
@@ -145,7 +145,7 @@ where
display(display_none), display(display_none),
metadata(authenticated = false) metadata(authenticated = false)
)] )]
#[instrument(skip_all)] #[instrument(skip(ctx, password))]
pub async fn login( pub async fn login(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[request] req: &RequestParts, #[request] req: &RequestParts,
@@ -183,7 +183,7 @@ pub async fn login(
} }
#[command(display(display_none), metadata(authenticated = false))] #[command(display(display_none), metadata(authenticated = false))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn logout( pub async fn logout(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[request] req: &RequestParts, #[request] req: &RequestParts,
@@ -250,7 +250,7 @@ fn display_sessions(arg: SessionList, matches: &ArgMatches) {
} }
#[command(display(display_sessions))] #[command(display(display_sessions))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn list( pub async fn list(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[request] req: &RequestParts, #[request] req: &RequestParts,
@@ -296,7 +296,7 @@ impl AsLogoutSessionId for KillSessionId {
} }
#[command(display(display_none))] #[command(display(display_none))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn kill( pub async fn kill(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(parse(parse_comma_separated))] ids: Vec<String>, #[arg(parse(parse_comma_separated))] ids: Vec<String>,
@@ -305,7 +305,7 @@ pub async fn kill(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx, old_password, new_password))]
async fn cli_reset_password( async fn cli_reset_password(
ctx: CliContext, ctx: CliContext,
old_password: Option<PasswordType>, old_password: Option<PasswordType>,
@@ -369,7 +369,7 @@ impl SetPasswordReceipt {
custom_cli(cli_reset_password(async, context(CliContext))), custom_cli(cli_reset_password(async, context(CliContext))),
display(display_none) display(display_none)
)] )]
#[instrument(skip_all)] #[instrument(skip(ctx, old_password, new_password))]
pub async fn reset_password( pub async fn reset_password(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(rename = "old-password")] old_password: Option<PasswordType>, #[arg(rename = "old-password")] old_password: Option<PasswordType>,
@@ -403,7 +403,7 @@ pub async fn reset_password(
display(display_none), display(display_none),
metadata(authenticated = false) metadata(authenticated = false)
)] )]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn get_pubkey(#[context] ctx: RpcContext) -> Result<Jwk, RpcError> { pub async fn get_pubkey(#[context] ctx: RpcContext) -> Result<Jwk, RpcError> {
let secret = ctx.as_ref().clone(); let secret = ctx.as_ref().clone();
let pub_key = secret.to_public_key()?; let pub_key = secret.to_public_key()?;

View File

@@ -1,5 +1,5 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::path::{Path, PathBuf}; use std::path::PathBuf;
use chrono::Utc; use chrono::Utc;
use clap::ArgMatches; use clap::ArgMatches;
@@ -8,7 +8,6 @@ use helpers::AtomicFile;
use patch_db::{DbHandle, LockType, PatchDbHandle}; use patch_db::{DbHandle, LockType, PatchDbHandle};
use rpc_toolkit::command; use rpc_toolkit::command;
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
use tokio::process::Command;
use tracing::instrument; use tracing::instrument;
use super::target::BackupTargetId; use super::target::BackupTargetId;
@@ -24,9 +23,8 @@ use crate::disk::mount::guard::TmpMountGuard;
use crate::notifications::NotificationLevel; use crate::notifications::NotificationLevel;
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::status::MainStatus; use crate::status::MainStatus;
use crate::util::io::dir_copy; use crate::util::display_none;
use crate::util::serde::IoFormat; use crate::util::serde::IoFormat;
use crate::util::{display_none, Invoke};
use crate::version::VersionT; use crate::version::VersionT;
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
@@ -37,7 +35,7 @@ fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<BTreeSet<PackageId
} }
#[command(rename = "create", display(display_none))] #[command(rename = "create", display(display_none))]
#[instrument(skip_all)] #[instrument(skip(ctx, old_password, password))]
pub async fn backup_all( pub async fn backup_all(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId, #[arg(rename = "target-id")] target_id: BackupTargetId,
@@ -163,7 +161,7 @@ pub async fn backup_all(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(db, packages))]
async fn assure_backing_up( async fn assure_backing_up(
db: &mut PatchDbHandle, db: &mut PatchDbHandle,
packages: impl IntoIterator<Item = &PackageId>, packages: impl IntoIterator<Item = &PackageId>,
@@ -202,7 +200,7 @@ async fn assure_backing_up(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db, backup_guard))]
async fn perform_backup<Db: DbHandle>( async fn perform_backup<Db: DbHandle>(
ctx: &RpcContext, ctx: &RpcContext,
mut db: Db, mut db: Db,
@@ -360,19 +358,6 @@ async fn perform_backup<Db: DbHandle>(
.await .await
.with_kind(ErrorKind::Filesystem)?; .with_kind(ErrorKind::Filesystem)?;
let luks_folder_old = backup_guard.as_ref().join("luks.old");
if tokio::fs::metadata(&luks_folder_old).await.is_ok() {
tokio::fs::remove_dir_all(&luks_folder_old).await?;
}
let luks_folder_bak = backup_guard.as_ref().join("luks");
if tokio::fs::metadata(&luks_folder_bak).await.is_ok() {
tokio::fs::rename(&luks_folder_bak, &luks_folder_old).await?;
}
let luks_folder = Path::new("/media/embassy/config/luks");
if tokio::fs::metadata(&luks_folder).await.is_ok() {
dir_copy(&luks_folder, &luks_folder_bak, None).await?;
}
let timestamp = Some(Utc::now()); let timestamp = Some(Utc::now());
backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into(); backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into();

View File

@@ -92,7 +92,7 @@ impl BackupActions {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db))]
pub async fn create<Db: DbHandle>( pub async fn create<Db: DbHandle>(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
@@ -189,7 +189,7 @@ impl BackupActions {
}) })
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db))]
pub async fn restore<Db: DbHandle>( pub async fn restore<Db: DbHandle>(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,

View File

@@ -1,13 +1,12 @@
use openssl::pkey::PKey;
use openssl::x509::X509;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, generate_id, Hostname}; use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key; use crate::net::keys::Key;
use crate::util::serde::Base64; use crate::util::serde::Base64;
use crate::Error; use crate::Error;
use openssl::pkey::PKey;
use openssl::x509::X509;
use serde::{Deserialize, Serialize};
use serde_json::Value;
pub struct OsBackup { pub struct OsBackup {
pub account: AccountInfo, pub account: AccountInfo,

View File

@@ -6,8 +6,8 @@ use std::time::Duration;
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use futures::future::BoxFuture; use futures::{future::BoxFuture, stream};
use futures::{stream, FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use openssl::x509::X509; use openssl::x509::X509;
use patch_db::{DbHandle, PatchDbHandle}; use patch_db::{DbHandle, PatchDbHandle};
use rpc_toolkit::command; use rpc_toolkit::command;
@@ -46,7 +46,7 @@ fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<PackageId>, Er
} }
#[command(rename = "restore", display(display_none))] #[command(rename = "restore", display(display_none))]
#[instrument(skip_all)] #[instrument(skip(ctx, password))]
pub async fn restore_packages_rpc( pub async fn restore_packages_rpc(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(parse(parse_comma_separated))] ids: Vec<PackageId>, #[arg(parse(parse_comma_separated))] ids: Vec<PackageId>,
@@ -109,7 +109,7 @@ async fn approximate_progress(
if tokio::fs::metadata(&dir).await.is_err() { if tokio::fs::metadata(&dir).await.is_err() {
*size = 0; *size = 0;
} else { } else {
*size = dir_size(&dir, None).await?; *size = dir_size(&dir).await?;
} }
} }
Ok(()) Ok(())
@@ -169,7 +169,7 @@ impl ProgressInfo {
} }
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn recover_full_embassy( pub async fn recover_full_embassy(
ctx: SetupContext, ctx: SetupContext,
disk_guid: Arc<String>, disk_guid: Arc<String>,
@@ -285,7 +285,7 @@ async fn restore_packages(
progress_info.package_installs.insert(id.clone(), progress); progress_info.package_installs.insert(id.clone(), progress);
progress_info progress_info
.src_volume_size .src_volume_size
.insert(id.clone(), dir_size(backup_dir(&id), None).await?); .insert(id.clone(), dir_size(backup_dir(&id)).await?);
progress_info.target_volume_size.insert(id.clone(), 0); progress_info.target_volume_size.insert(id.clone(), 0);
let package_id = id.clone(); let package_id = id.clone();
tasks.push( tasks.push(
@@ -306,7 +306,7 @@ async fn restore_packages(
Ok((backup_guard, tasks, progress_info)) Ok((backup_guard, tasks, progress_info))
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db, backup_guard))]
async fn assure_restoring( async fn assure_restoring(
ctx: &RpcContext, ctx: &RpcContext,
db: &mut PatchDbHandle, db: &mut PatchDbHandle,
@@ -376,7 +376,7 @@ async fn assure_restoring(
Ok(guards) Ok(guards)
} }
#[instrument(skip_all)] #[instrument(skip(ctx, guard))]
async fn restore_package<'a>( async fn restore_package<'a>(
ctx: RpcContext, ctx: RpcContext,
manifest: Manifest, manifest: Manifest,
@@ -443,7 +443,7 @@ async fn restore_package<'a>(
Ok(( Ok((
progress.clone(), progress.clone(),
async move { async move {
download_install_s9pk(&ctx, &manifest, None, progress, file, None).await?; download_install_s9pk(&ctx, &manifest, None, progress, file).await?;
guard.unmount().await?; guard.unmount().await?;

View File

@@ -7,12 +7,10 @@ use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use digest::generic_array::GenericArray; use digest::generic_array::GenericArray;
use digest::OutputSizeUser; use digest::OutputSizeUser;
use lazy_static::lazy_static;
use rpc_toolkit::command; use rpc_toolkit::command;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sha2::Sha256; use sha2::Sha256;
use sqlx::{Executor, Postgres}; use sqlx::{Executor, Postgres};
use tokio::sync::Mutex;
use tracing::instrument; use tracing::instrument;
use self::cifs::CifsBackupTarget; use self::cifs::CifsBackupTarget;
@@ -25,7 +23,7 @@ use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::util::PartitionInfo; use crate::disk::util::PartitionInfo;
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::serde::{deserialize_from_str, display_serializable, serialize_display}; use crate::util::serde::{deserialize_from_str, display_serializable, serialize_display};
use crate::util::{display_none, Version}; use crate::util::Version;
use crate::Error; use crate::Error;
pub mod cifs; pub mod cifs;
@@ -44,7 +42,7 @@ pub enum BackupTarget {
Cifs(CifsBackupTarget), Cifs(CifsBackupTarget),
} }
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum BackupTargetId { pub enum BackupTargetId {
Disk { logicalname: PathBuf }, Disk { logicalname: PathBuf },
Cifs { id: i32 }, Cifs { id: i32 },
@@ -131,7 +129,7 @@ impl FileSystem for BackupTargetFS {
} }
} }
#[command(subcommands(cifs::cifs, list, info, mount, umount))] #[command(subcommands(cifs::cifs, list, info))]
pub fn target() -> Result<(), Error> { pub fn target() -> Result<(), Error> {
Ok(()) Ok(())
} }
@@ -225,7 +223,7 @@ fn display_backup_info(info: BackupInfo, matches: &ArgMatches) {
} }
#[command(display(display_backup_info))] #[command(display(display_backup_info))]
#[instrument(skip_all)] #[instrument(skip(ctx, password))]
pub async fn info( pub async fn info(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId, #[arg(rename = "target-id")] target_id: BackupTargetId,
@@ -249,61 +247,3 @@ pub async fn info(
Ok(res) Ok(res)
} }
lazy_static! {
static ref USER_MOUNTS: Mutex<BTreeMap<BackupTargetId, BackupMountGuard<TmpMountGuard>>> =
Mutex::new(BTreeMap::new());
}
#[command]
#[instrument(skip_all)]
pub async fn mount(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId,
#[arg] password: String,
) -> Result<String, Error> {
let mut mounts = USER_MOUNTS.lock().await;
if let Some(existing) = mounts.get(&target_id) {
return Ok(existing.as_ref().display().to_string());
}
let guard = BackupMountGuard::mount(
TmpMountGuard::mount(
&target_id
.clone()
.load(&mut ctx.secret_store.acquire().await?)
.await?,
ReadWrite,
)
.await?,
&password,
)
.await?;
let res = guard.as_ref().display().to_string();
mounts.insert(target_id, guard);
Ok(res)
}
#[command(display(display_none))]
#[instrument(skip_all)]
pub async fn umount(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: Option<BackupTargetId>,
) -> Result<(), Error> {
let mut mounts = USER_MOUNTS.lock().await;
if let Some(target_id) = target_id {
if let Some(existing) = mounts.remove(&target_id) {
existing.unmount().await?;
}
} else {
for (_, existing) in std::mem::take(&mut *mounts) {
existing.unmount().await?;
}
}
Ok(())
}

View File

@@ -14,7 +14,7 @@ fn log_str_error(action: &str, e: i32) {
} }
} }
pub fn main() { fn main() {
let aliases: Vec<_> = std::env::args().skip(1).collect(); let aliases: Vec<_> = std::env::args().skip(1).collect();
unsafe { unsafe {
let simple_poll = avahi_sys::avahi_simple_poll_new(); let simple_poll = avahi_sys::avahi_simple_poll_new();

View File

@@ -1,22 +1,21 @@
use clap::Arg; use clap::Arg;
use embassy::context::CliContext;
use embassy::util::logger::EmbassyLogger;
use embassy::version::{Current, VersionT};
use embassy::Error;
use rpc_toolkit::run_cli; use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
use serde_json::Value; use serde_json::Value;
use crate::context::CliContext;
use crate::util::logger::EmbassyLogger;
use crate::version::{Current, VersionT};
use crate::Error;
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string(); static ref VERSION_STRING: String = Current::new().semver().to_string();
} }
fn inner_main() -> Result<(), Error> { fn inner_main() -> Result<(), Error> {
run_cli!({ run_cli!({
command: crate::main_api, command: embassy::main_api,
app: app => app app: app => app
.name("StartOS CLI") .name("Embassy CLI")
.version(&**VERSION_STRING) .version(&**VERSION_STRING)
.arg( .arg(
clap::Arg::with_name("config") clap::Arg::with_name("config")
@@ -49,7 +48,7 @@ fn inner_main() -> Result<(), Error> {
Ok(()) Ok(())
} }
pub fn main() { fn main() {
match inner_main() { match inner_main() {
Ok(_) => (), Ok(_) => (),
Err(e) => { Err(e) => {

View File

@@ -1,71 +1,28 @@
use std::net::{Ipv6Addr, SocketAddr};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use embassy::context::rpc::RpcContextConfig;
use embassy::context::{DiagnosticContext, InstallContext, SetupContext};
use embassy::disk::fsck::RepairStrategy;
use embassy::disk::main::DEFAULT_PASSWORD;
use embassy::disk::REPAIR_DISK_PATH;
use embassy::init::STANDBY_MODE_PATH;
use embassy::net::web_server::WebServer;
use embassy::shutdown::Shutdown;
use embassy::sound::CHIME;
use embassy::util::logger::EmbassyLogger;
use embassy::util::Invoke;
use embassy::{Error, ErrorKind, ResultExt, IS_RASPBERRY_PI};
use tokio::process::Command; use tokio::process::Command;
use tracing::instrument; use tracing::instrument;
use crate::context::rpc::RpcContextConfig; #[instrument]
use crate::context::{DiagnosticContext, InstallContext, SetupContext};
use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::REPAIR_DISK_PATH;
use crate::init::STANDBY_MODE_PATH;
use crate::net::web_server::WebServer;
use crate::shutdown::Shutdown;
use crate::sound::CHIME;
use crate::util::logger::EmbassyLogger;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, OS_ARCH};
#[instrument(skip_all)]
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> { async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
Command::new("ln") if tokio::fs::metadata("/cdrom").await.is_ok() {
.arg("-sf")
.arg("/usr/lib/embassy/scripts/fake-apt")
.arg("/usr/local/bin/apt")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
Command::new("ln")
.arg("-sf")
.arg("/usr/lib/embassy/scripts/fake-apt")
.arg("/usr/local/bin/apt-get")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
Command::new("ln")
.arg("-sf")
.arg("/usr/lib/embassy/scripts/fake-apt")
.arg("/usr/local/bin/aptitude")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
Command::new("make-ssl-cert")
.arg("generate-default-snakeoil")
.arg("--force-overwrite")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
if tokio::fs::metadata("/run/live/medium").await.is_ok() {
Command::new("sed")
.arg("-i")
.arg("s/PasswordAuthentication no/PasswordAuthentication yes/g")
.arg("/etc/ssh/sshd_config")
.invoke(crate::ErrorKind::Filesystem)
.await?;
Command::new("systemctl")
.arg("reload")
.arg("ssh")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
let ctx = InstallContext::init(cfg_path).await?; let ctx = InstallContext::init(cfg_path).await?;
let server = WebServer::install( let server = WebServer::install(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?; CHIME.play().await?;
@@ -79,7 +36,7 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
server.shutdown().await; server.shutdown().await;
Command::new("reboot") Command::new("reboot")
.invoke(crate::ErrorKind::Unknown) .invoke(embassy::ErrorKind::Unknown)
.await?; .await?;
} else if tokio::fs::metadata("/media/embassy/config/disk.guid") } else if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await .await
@@ -87,11 +44,7 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
{ {
let ctx = SetupContext::init(cfg_path).await?; let ctx = SetupContext::init(cfg_path).await?;
let server = WebServer::setup( let server = WebServer::setup(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?; CHIME.play().await?;
@@ -117,7 +70,7 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
let guid_string = tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy let guid_string = tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?; .await?;
let guid = guid_string.trim(); let guid = guid_string.trim();
let requires_reboot = crate::disk::main::import( let requires_reboot = embassy::disk::main::import(
guid, guid,
cfg.datadir(), cfg.datadir(),
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() { if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
@@ -125,26 +78,22 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
} else { } else {
RepairStrategy::Preen RepairStrategy::Preen
}, },
if guid.ends_with("_UNENC") { DEFAULT_PASSWORD,
None
} else {
Some(DEFAULT_PASSWORD)
},
) )
.await?; .await?;
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() { if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
tokio::fs::remove_file(REPAIR_DISK_PATH) tokio::fs::remove_file(REPAIR_DISK_PATH)
.await .await
.with_ctx(|_| (crate::ErrorKind::Filesystem, REPAIR_DISK_PATH))?; .with_ctx(|_| (embassy::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
} }
if requires_reboot.0 { if requires_reboot.0 {
crate::disk::main::export(guid, cfg.datadir()).await?; embassy::disk::main::export(guid, cfg.datadir()).await?;
Command::new("reboot") Command::new("reboot")
.invoke(crate::ErrorKind::Unknown) .invoke(embassy::ErrorKind::Unknown)
.await?; .await?;
} }
tracing::info!("Loaded Disk"); tracing::info!("Loaded Disk");
crate::init::init(&cfg).await?; embassy::init::init(&cfg).await?;
} }
Ok(()) Ok(())
@@ -168,16 +117,16 @@ async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
} }
} }
#[instrument(skip_all)] #[instrument]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> { async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if OS_ARCH == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() { if *IS_RASPBERRY_PI && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
tokio::fs::remove_file(STANDBY_MODE_PATH).await?; tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
Command::new("sync").invoke(ErrorKind::Filesystem).await?; Command::new("sync").invoke(ErrorKind::Filesystem).await?;
crate::sound::SHUTDOWN.play().await?; embassy::sound::SHUTDOWN.play().await?;
futures::future::pending::<()>().await; futures::future::pending::<()>().await;
} }
crate::sound::BEP.play().await?; embassy::sound::BEP.play().await?;
run_script_if_exists("/media/embassy/config/preinit.sh").await; run_script_if_exists("/media/embassy/config/preinit.sh").await;
@@ -185,7 +134,7 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
async move { async move {
tracing::error!("{}", e.source); tracing::error!("{}", e.source);
tracing::debug!("{}", e.source); tracing::debug!("{}", e.source);
crate::sound::BEETHOVEN.play().await?; embassy::sound::BEETHOVEN.play().await?;
let ctx = DiagnosticContext::init( let ctx = DiagnosticContext::init(
cfg_path, cfg_path,
@@ -206,11 +155,7 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
) )
.await?; .await?;
let server = WebServer::diagnostic( let server = WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap(); let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();
@@ -228,8 +173,8 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
res res
} }
pub fn main() { fn main() {
let matches = clap::App::new("start-init") let matches = clap::App::new("embassy-init")
.arg( .arg(
clap::Arg::with_name("config") clap::Arg::with_name("config")
.short('c') .short('c')
@@ -238,6 +183,8 @@ pub fn main() {
) )
.get_matches(); .get_matches();
EmbassyLogger::init();
let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned()); let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = { let res = {
let rt = tokio::runtime::Builder::new_multi_thread() let rt = tokio::runtime::Builder::new_multi_thread()

View File

@@ -1,21 +1,20 @@
use embassy::context::SdkContext;
use embassy::util::logger::EmbassyLogger;
use embassy::version::{Current, VersionT};
use embassy::Error;
use rpc_toolkit::run_cli; use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
use serde_json::Value; use serde_json::Value;
use crate::context::SdkContext;
use crate::util::logger::EmbassyLogger;
use crate::version::{Current, VersionT};
use crate::Error;
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string(); static ref VERSION_STRING: String = Current::new().semver().to_string();
} }
fn inner_main() -> Result<(), Error> { fn inner_main() -> Result<(), Error> {
run_cli!({ run_cli!({
command: crate::portable_api, command: embassy::portable_api,
app: app => app app: app => app
.name("StartOS SDK") .name("Embassy SDK")
.version(&**VERSION_STRING) .version(&**VERSION_STRING)
.arg( .arg(
clap::Arg::with_name("config") clap::Arg::with_name("config")
@@ -48,7 +47,7 @@ fn inner_main() -> Result<(), Error> {
Ok(()) Ok(())
} }
pub fn main() { fn main() {
match inner_main() { match inner_main() {
Ok(_) => (), Ok(_) => (),
Err(e) => { Err(e) => {

View File

@@ -1,20 +1,18 @@
use std::net::{Ipv6Addr, SocketAddr};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use embassy::context::{DiagnosticContext, RpcContext};
use embassy::net::web_server::WebServer;
use embassy::shutdown::Shutdown;
use embassy::system::launch_metrics_task;
use embassy::util::logger::EmbassyLogger;
use embassy::{Error, ErrorKind, ResultExt};
use futures::{FutureExt, TryFutureExt}; use futures::{FutureExt, TryFutureExt};
use tokio::signal::unix::signal; use tokio::signal::unix::signal;
use tracing::instrument; use tracing::instrument;
use crate::context::{DiagnosticContext, RpcContext}; #[instrument]
use crate::net::web_server::WebServer;
use crate::shutdown::Shutdown;
use crate::system::launch_metrics_task;
use crate::util::logger::EmbassyLogger;
use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip_all)]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> { async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, server, shutdown) = { let (rpc_ctx, server, shutdown) = {
let rpc_ctx = RpcContext::init( let rpc_ctx = RpcContext::init(
@@ -27,12 +25,8 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
), ),
) )
.await?; .await?;
crate::hostname::sync_hostname(&rpc_ctx.account.read().await.hostname).await?; embassy::hostname::sync_hostname(&*rpc_ctx.account.read().await).await?;
let server = WebServer::main( let server = WebServer::main(([0, 0, 0, 0], 80).into(), rpc_ctx.clone()).await?;
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
rpc_ctx.clone(),
)
.await?;
let mut shutdown_recv = rpc_ctx.shutdown.subscribe(); let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
@@ -72,7 +66,7 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
.await .await
}); });
crate::sound::CHIME.play().await?; embassy::sound::CHIME.play().await?;
metrics_task metrics_task
.map_err(|e| { .map_err(|e| {
@@ -101,15 +95,8 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
Ok(shutdown) Ok(shutdown)
} }
pub fn main() { fn main() {
EmbassyLogger::init(); let matches = clap::App::new("embassyd")
if !Path::new("/run/embassy/initialized").exists() {
super::start_init::main();
std::fs::write("/run/embassy/initialized", "").unwrap();
}
let matches = clap::App::new("startd")
.arg( .arg(
clap::Arg::with_name("config") clap::Arg::with_name("config")
.short('c') .short('c')
@@ -118,6 +105,8 @@ pub fn main() {
) )
.get_matches(); .get_matches();
EmbassyLogger::init();
let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned()); let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = { let res = {
@@ -132,7 +121,7 @@ pub fn main() {
async { async {
tracing::error!("{}", e.source); tracing::error!("{}", e.source);
tracing::debug!("{:?}", e.source); tracing::debug!("{:?}", e.source);
crate::sound::BEETHOVEN.play().await?; embassy::sound::BEETHOVEN.play().await?;
let ctx = DiagnosticContext::init( let ctx = DiagnosticContext::init(
cfg_path, cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid") if tokio::fs::metadata("/media/embassy/config/disk.guid")
@@ -152,11 +141,8 @@ pub fn main() {
) )
.await?; .await?;
let server = WebServer::diagnostic( let server =
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80), WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
ctx.clone(),
)
.await?;
let mut shutdown = ctx.shutdown.subscribe(); let mut shutdown = ctx.shutdown.subscribe();

View File

@@ -1,9 +0,0 @@
pub fn renamed(old: &str, new: &str) -> ! {
eprintln!("{old} has been renamed to {new}");
std::process::exit(1)
}
pub fn removed(name: &str) -> ! {
eprintln!("{name} has been removed");
std::process::exit(1)
}

View File

@@ -1,55 +0,0 @@
use std::path::Path;
#[cfg(feature = "avahi-alias")]
pub mod avahi_alias;
pub mod deprecated;
#[cfg(feature = "cli")]
pub mod start_cli;
#[cfg(feature = "daemon")]
pub mod start_init;
#[cfg(feature = "sdk")]
pub mod start_sdk;
#[cfg(feature = "daemon")]
pub mod startd;
fn select_executable(name: &str) -> Option<fn()> {
match name {
#[cfg(feature = "avahi-alias")]
"avahi-alias" => Some(avahi_alias::main),
#[cfg(feature = "cli")]
"start-cli" => Some(start_cli::main),
#[cfg(feature = "sdk")]
"start-sdk" => Some(start_sdk::main),
#[cfg(feature = "daemon")]
"startd" => Some(startd::main),
"embassy-cli" => Some(|| deprecated::renamed("embassy-cli", "start-cli")),
"embassy-sdk" => Some(|| deprecated::renamed("embassy-sdk", "start-sdk")),
"embassyd" => Some(|| deprecated::renamed("embassyd", "startd")),
"embassy-init" => Some(|| deprecated::removed("embassy-init")),
_ => None,
}
}
pub fn startbox() {
let args = std::env::args().take(2).collect::<Vec<_>>();
if let Some(x) = args
.get(0)
.and_then(|s| Path::new(&*s).file_name())
.and_then(|s| s.to_str())
.and_then(|s| select_executable(&s))
{
x()
} else if let Some(x) = args.get(1).and_then(|s| select_executable(&s)) {
x()
} else {
eprintln!(
"unknown executable: {}",
args.get(0)
.filter(|x| &**x != "startbox")
.or_else(|| args.get(1))
.map(|s| s.as_str())
.unwrap_or("N/A")
);
std::process::exit(1);
}
}

View File

@@ -31,7 +31,7 @@ pub struct ConfigActions {
pub set: PackageProcedure, pub set: PackageProcedure,
} }
impl ConfigActions { impl ConfigActions {
#[instrument(skip_all)] #[instrument]
pub fn validate( pub fn validate(
&self, &self,
container: &Option<DockerContainers>, container: &Option<DockerContainers>,
@@ -47,7 +47,7 @@ impl ConfigActions {
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Set"))?; .with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Set"))?;
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn get( pub async fn get(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
@@ -71,7 +71,7 @@ impl ConfigActions {
}) })
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn set( pub async fn set(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,

View File

@@ -214,7 +214,7 @@ impl ConfigGetReceipts {
} }
#[command(display(display_serializable))] #[command(display(display_serializable))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn get( pub async fn get(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[parent_data] id: PackageId, #[parent_data] id: PackageId,
@@ -240,7 +240,7 @@ pub async fn get(
display(display_none), display(display_none),
metadata(sync_db = true) metadata(sync_db = true)
)] )]
#[instrument(skip_all)] #[instrument]
pub fn set( pub fn set(
#[parent_data] id: PackageId, #[parent_data] id: PackageId,
#[allow(unused_variables)] #[allow(unused_variables)]
@@ -413,7 +413,7 @@ impl ConfigReceipts {
} }
#[command(rename = "dry", display(display_serializable))] #[command(rename = "dry", display(display_serializable))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn set_dry( pub async fn set_dry(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[parent_data] (id, config, timeout): (PackageId, Option<Config>, Option<Duration>), #[parent_data] (id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
@@ -440,7 +440,7 @@ pub async fn set_dry(
Ok(BreakageRes(breakages)) Ok(BreakageRes(breakages))
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn set_impl( pub async fn set_impl(
ctx: RpcContext, ctx: RpcContext,
(id, config, timeout): (PackageId, Option<Config>, Option<Duration>), (id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
@@ -465,7 +465,7 @@ pub async fn set_impl(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db, receipts))]
pub async fn configure<'a, Db: DbHandle>( pub async fn configure<'a, Db: DbHandle>(
ctx: &RpcContext, ctx: &RpcContext,
db: &'a mut Db, db: &'a mut Db,
@@ -485,7 +485,7 @@ pub async fn configure<'a, Db: DbHandle>(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db, receipts))]
pub fn configure_rec<'a, Db: DbHandle>( pub fn configure_rec<'a, Db: DbHandle>(
ctx: &'a RpcContext, ctx: &'a RpcContext,
db: &'a mut Db, db: &'a mut Db,
@@ -503,27 +503,19 @@ pub fn configure_rec<'a, Db: DbHandle>(
.config_actions .config_actions
.get(db, id) .get(db, id)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
let dependencies = receipts let dependencies = receipts
.dependencies .dependencies
.get(db, id) .get(db, id)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
let volumes = receipts let volumes = receipts.volumes.get(db, id).await?.ok_or_else(not_found)?;
.volumes
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
let is_needs_config = !receipts let is_needs_config = !receipts
.configured .configured
.get(db, id) .get(db, id)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
let version = receipts let version = receipts.version.get(db, id).await?.ok_or_else(not_found)?;
.version
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
// get current config and current spec // get current config and current spec
let ConfigRes { let ConfigRes {
@@ -538,11 +530,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)? spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)?
}; };
let manifest = receipts let manifest = receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
.manifest
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
spec.validate(&manifest)?; spec.validate(&manifest)?;
spec.matches(&config)?; // check that new config matches spec spec.matches(&config)?; // check that new config matches spec
@@ -561,7 +549,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
.system_pointers .system_pointers
.get(db, &id) .get(db, &id)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
sys.truncate(0); sys.truncate(0);
let mut current_dependencies: CurrentDependencies = CurrentDependencies( let mut current_dependencies: CurrentDependencies = CurrentDependencies(
dependencies dependencies
@@ -667,7 +655,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
.dependency_errors .dependency_errors
.get(db, &id) .get(db, &id)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
tracing::warn!("Dependency Errors: {:?}", errs); tracing::warn!("Dependency Errors: {:?}", errs);
let errs = DependencyErrors::init( let errs = DependencyErrors::init(
ctx, ctx,
@@ -687,7 +675,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
.current_dependents .current_dependents
.get(db, id) .get(db, id)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
let prev = if is_needs_config { None } else { old_config } let prev = if is_needs_config { None } else { old_config }
.map(Value::Object) .map(Value::Object)
.unwrap_or_default(); .unwrap_or_default();
@@ -705,7 +693,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
.manifest .manifest
.get(db, &dependent) .get(db, &dependent)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
if let Err(error) = cfg if let Err(error) = cfg
.check( .check(
ctx, ctx,
@@ -783,16 +771,10 @@ pub fn configure_rec<'a, Db: DbHandle>(
} }
.boxed() .boxed()
} }
#[instrument]
macro_rules! not_found { pub fn not_found() -> Error {
($x:expr) => { Error::new(eyre!("Could not find"), crate::ErrorKind::Incoherent)
crate::Error::new(
color_eyre::eyre::eyre!("Could not find {} at {}:{}", $x, module_path!(), line!()),
crate::ErrorKind::Incoherent,
)
};
} }
pub(crate) use not_found;
/// We want to have a double check that the paths are what we expect them to be. /// We want to have a double check that the paths are what we expect them to be.
/// Found that earlier the paths where not what we expected them to be. /// Found that earlier the paths where not what we expected them to be.

View File

@@ -17,11 +17,12 @@ use rpc_toolkit::Context;
use serde::Deserialize; use serde::Deserialize;
use tracing::instrument; use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH; use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::util::config::{load_config_from_paths, local_config_path}; use crate::util::config::{load_config_from_paths, local_config_path};
use crate::ResultExt; use crate::ResultExt;
use super::setup::CURRENT_SECRET;
#[derive(Debug, Default, Deserialize)] #[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct CliContextConfig { pub struct CliContextConfig {
@@ -53,8 +54,7 @@ impl Drop for CliContextSeed {
true, true,
) )
.unwrap(); .unwrap();
let mut store = self.cookie_store.lock().unwrap(); let store = self.cookie_store.lock().unwrap();
store.remove("localhost", "", "local");
store.save_json(&mut *writer).unwrap(); store.save_json(&mut *writer).unwrap();
writer.sync_all().unwrap(); writer.sync_all().unwrap();
std::fs::rename(tmp, &self.cookie_path).unwrap(); std::fs::rename(tmp, &self.cookie_path).unwrap();
@@ -68,7 +68,7 @@ const DEFAULT_PORT: u16 = 5959;
pub struct CliContext(Arc<CliContextSeed>); pub struct CliContext(Arc<CliContextSeed>);
impl CliContext { impl CliContext {
/// BLOCKING /// BLOCKING
#[instrument(skip_all)] #[instrument(skip(matches))]
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> { pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
let local_config_path = local_config_path(); let local_config_path = local_config_path();
let base: CliContextConfig = load_config_from_paths( let base: CliContextConfig = load_config_from_paths(
@@ -101,22 +101,19 @@ impl CliContext {
.unwrap_or(Path::new("/")) .unwrap_or(Path::new("/"))
.join(".cookies.json") .join(".cookies.json")
}); });
let cookie_store = Arc::new(CookieStoreMutex::new({ let cookie_store = Arc::new(CookieStoreMutex::new(if cookie_path.exists() {
let mut store = if cookie_path.exists() { let mut store = CookieStore::load_json(BufReader::new(File::open(&cookie_path)?))
CookieStore::load_json(BufReader::new(File::open(&cookie_path)?)) .map_err(|e| eyre!("{}", e))
.map_err(|e| eyre!("{}", e)) .with_kind(crate::ErrorKind::Deserialization)?;
.with_kind(crate::ErrorKind::Deserialization)?
} else {
CookieStore::default()
};
if let Ok(local) = std::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH) { if let Ok(local) = std::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH) {
store store
.insert_raw(&Cookie::new("local", local), &"http://localhost".parse()?) .insert_raw(&Cookie::new("local", local), &"http://localhost".parse()?)
.with_kind(crate::ErrorKind::Network)?; .with_kind(crate::ErrorKind::Network)?;
} }
store store
} else {
CookieStore::default()
})); }));
Ok(CliContext(Arc::new(CliContextSeed { Ok(CliContext(Arc::new(CliContextSeed {
base_url: url.clone(), base_url: url.clone(),
rpc_url: { rpc_url: {

View File

@@ -18,7 +18,7 @@ pub struct DiagnosticContextConfig {
pub datadir: Option<PathBuf>, pub datadir: Option<PathBuf>,
} }
impl DiagnosticContextConfig { impl DiagnosticContextConfig {
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {
load_config_from_paths( load_config_from_paths(
@@ -52,7 +52,7 @@ pub struct DiagnosticContextSeed {
#[derive(Clone)] #[derive(Clone)]
pub struct DiagnosticContext(Arc<DiagnosticContextSeed>); pub struct DiagnosticContext(Arc<DiagnosticContextSeed>);
impl DiagnosticContext { impl DiagnosticContext {
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn init<P: AsRef<Path> + Send + 'static>( pub async fn init<P: AsRef<Path> + Send + 'static>(
path: Option<P>, path: Option<P>,
disk_guid: Option<Arc<String>>, disk_guid: Option<Arc<String>>,

View File

@@ -15,7 +15,7 @@ use crate::Error;
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct InstallContextConfig {} pub struct InstallContextConfig {}
impl InstallContextConfig { impl InstallContextConfig {
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {
load_config_from_paths( load_config_from_paths(
@@ -38,7 +38,7 @@ pub struct InstallContextSeed {
#[derive(Clone)] #[derive(Clone)]
pub struct InstallContext(Arc<InstallContextSeed>); pub struct InstallContext(Arc<InstallContextSeed>);
impl InstallContext { impl InstallContext {
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let _cfg = InstallContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?; let _cfg = InstallContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?;
let (shutdown, _) = tokio::sync::broadcast::channel(1); let (shutdown, _) = tokio::sync::broadcast::channel(1);

View File

@@ -11,7 +11,7 @@ use helpers::to_tmp_path;
use josekit::jwk::Jwk; use josekit::jwk::Jwk;
use patch_db::json_ptr::JsonPointer; use patch_db::json_ptr::JsonPointer;
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb}; use patch_db::{DbHandle, LockReceipt, LockType, PatchDb};
use reqwest::{Client, Proxy, Url}; use reqwest::Url;
use rpc_toolkit::Context; use rpc_toolkit::Context;
use serde::Deserialize; use serde::Deserialize;
use sqlx::postgres::PgConnectOptions; use sqlx::postgres::PgConnectOptions;
@@ -19,12 +19,11 @@ use sqlx::PgPool;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock}; use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tracing::instrument; use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation}; use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{CurrentDependents, Database, InstalledPackageDataEntry, PackageDataEntry}; use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::disk::OsPartitionInfo; use crate::disk::OsPartitionInfo;
use crate::init::init_postgres; use crate::init::{init_postgres, pgloader};
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts}; use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
use crate::manager::ManagerMap; use crate::manager::ManagerMap;
use crate::middleware::auth::HashSessionToken; use crate::middleware::auth::HashSessionToken;
@@ -34,11 +33,11 @@ use crate::net::wifi::WpaCli;
use crate::notifications::NotificationManager; use crate::notifications::NotificationManager;
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::status::{MainStatus, Status}; use crate::status::{MainStatus, Status};
use crate::system::get_mem_info;
use crate::util::config::load_config_from_paths; use crate::util::config::load_config_from_paths;
use crate::util::lshw::{lshw, LshwDevice};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
use super::setup::CURRENT_SECRET;
#[derive(Debug, Default, Deserialize)] #[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct RpcContextConfig { pub struct RpcContextConfig {
@@ -87,7 +86,7 @@ impl RpcContextConfig {
} }
Ok(db) Ok(db)
} }
#[instrument(skip_all)] #[instrument]
pub async fn secret_store(&self) -> Result<PgPool, Error> { pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(self.datadir()).await?; init_postgres(self.datadir()).await?;
let secret_store = let secret_store =
@@ -97,6 +96,15 @@ impl RpcContextConfig {
.run(&secret_store) .run(&secret_store)
.await .await
.with_kind(crate::ErrorKind::Database)?; .with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir().join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows.unwrap_or(25000),
self.migration_prefetch_rows.unwrap_or(100_000),
)
.await?;
}
Ok(secret_store) Ok(secret_store)
} }
} }
@@ -122,13 +130,6 @@ pub struct RpcContextSeed {
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>, pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
pub wifi_manager: Option<Arc<RwLock<WpaCli>>>, pub wifi_manager: Option<Arc<RwLock<WpaCli>>>,
pub current_secret: Arc<Jwk>, pub current_secret: Arc<Jwk>,
pub client: Client,
pub hardware: Hardware,
}
pub struct Hardware {
pub devices: Vec<LshwDevice>,
pub ram: u64,
} }
pub struct RpcCleanReceipts { pub struct RpcCleanReceipts {
@@ -172,7 +173,7 @@ impl RpcCleanReceipts {
#[derive(Clone)] #[derive(Clone)]
pub struct RpcContext(Arc<RpcContextSeed>); pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext { impl RpcContext {
#[instrument(skip_all)] #[instrument(skip(cfg_path))]
pub async fn init<P: AsRef<Path> + Send + 'static>( pub async fn init<P: AsRef<Path> + Send + 'static>(
cfg_path: Option<P>, cfg_path: Option<P>,
disk_guid: Arc<String>, disk_guid: Arc<String>,
@@ -196,7 +197,6 @@ impl RpcContext {
NetController::init( NetController::init(
base.tor_control base.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))), .unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
base.dns_bind base.dns_bind
.as_ref() .as_ref()
.map(|v| v.as_slice()) .map(|v| v.as_slice())
@@ -212,9 +212,6 @@ impl RpcContext {
let metrics_cache = RwLock::new(None); let metrics_cache = RwLock::new(None);
let notification_manager = NotificationManager::new(secret_store.clone()); let notification_manager = NotificationManager::new(secret_store.clone());
tracing::info!("Initialized Notification Manager"); tracing::info!("Initialized Notification Manager");
let tor_proxy_url = format!("socks5h://{tor_proxy}");
let devices = lshw().await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
let seed = Arc::new(RpcContextSeed { let seed = Arc::new(RpcContextSeed {
is_closed: AtomicBool::new(false), is_closed: AtomicBool::new(false),
datadir: base.datadir().to_path_buf(), datadir: base.datadir().to_path_buf(),
@@ -247,17 +244,6 @@ impl RpcContext {
) )
})?, })?,
), ),
client: Client::builder()
.proxy(Proxy::custom(move |url| {
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {
Some(tor_proxy_url.clone())
} else {
None
}
}))
.build()
.with_kind(crate::ErrorKind::ParseUrl)?,
hardware: Hardware { devices, ram },
}); });
let res = Self(seed); let res = Self(seed);
@@ -274,7 +260,7 @@ impl RpcContext {
Ok(res) Ok(res)
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn shutdown(self) -> Result<(), Error> { pub async fn shutdown(self) -> Result<(), Error> {
self.managers.empty().await?; self.managers.empty().await?;
self.secret_store.close().await; self.secret_store.close().await;
@@ -284,49 +270,10 @@ impl RpcContext {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn cleanup(&self) -> Result<(), Error> { pub async fn cleanup(&self) -> Result<(), Error> {
let mut db = self.db.handle(); let mut db = self.db.handle();
let receipts = RpcCleanReceipts::new(&mut db).await?; let receipts = RpcCleanReceipts::new(&mut db).await?;
let packages = receipts.packages.get(&mut db).await?.0;
let mut current_dependents = packages
.keys()
.map(|k| (k.clone(), BTreeMap::new()))
.collect::<BTreeMap<_, _>>();
for (package_id, package) in packages {
for (k, v) in package
.into_installed()
.into_iter()
.flat_map(|i| i.current_dependencies.0)
{
let mut entry: BTreeMap<_, _> = current_dependents.remove(&k).unwrap_or_default();
entry.insert(package_id.clone(), v);
current_dependents.insert(k, entry);
}
}
for (package_id, current_dependents) in current_dependents {
if let Some(deps) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&package_id)
.and_then(|pde| pde.installed())
.map::<_, CurrentDependents>(|i| i.current_dependents())
.check(&mut db)
.await?
{
deps.put(&mut db, &CurrentDependents(current_dependents))
.await?;
} else if let Some(deps) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&package_id)
.and_then(|pde| pde.removing())
.map::<_, CurrentDependents>(|i| i.current_dependents())
.check(&mut db)
.await?
{
deps.put(&mut db, &CurrentDependents(current_dependents))
.await?;
}
}
for (package_id, package) in receipts.packages.get(&mut db).await?.0 { for (package_id, package) in receipts.packages.get(&mut db).await?.0 {
if let Err(e) = async { if let Err(e) = async {
match package { match package {
@@ -401,7 +348,7 @@ impl RpcContext {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn clean_continuations(&self) { pub async fn clean_continuations(&self) {
let mut continuations = self.rpc_stream_continuations.lock().await; let mut continuations = self.rpc_stream_continuations.lock().await;
let mut to_remove = Vec::new(); let mut to_remove = Vec::new();
@@ -415,7 +362,7 @@ impl RpcContext {
} }
} }
#[instrument(skip_all)] #[instrument(skip(self, handler))]
pub async fn add_continuation(&self, guid: RequestGuid, handler: RpcContinuation) { pub async fn add_continuation(&self, guid: RequestGuid, handler: RpcContinuation) {
self.clean_continuations().await; self.clean_continuations().await;
self.rpc_stream_continuations self.rpc_stream_continuations

View File

@@ -25,7 +25,7 @@ pub struct SdkContextSeed {
pub struct SdkContext(Arc<SdkContextSeed>); pub struct SdkContext(Arc<SdkContextSeed>);
impl SdkContext { impl SdkContext {
/// BLOCKING /// BLOCKING
#[instrument(skip_all)] #[instrument(skip(matches))]
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> { pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
let local_config_path = local_config_path(); let local_config_path = local_config_path();
let base: SdkContextConfig = load_config_from_paths( let base: SdkContextConfig = load_config_from_paths(
@@ -49,7 +49,7 @@ impl SdkContext {
}))) })))
} }
/// BLOCKING /// BLOCKING
#[instrument(skip_all)] #[instrument]
pub fn developer_key(&self) -> Result<ed25519_dalek::Keypair, Error> { pub fn developer_key(&self) -> Result<ed25519_dalek::Keypair, Error> {
if !self.developer_key_path.exists() { if !self.developer_key_path.exists() {
return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized)); return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized));

View File

@@ -17,7 +17,7 @@ use tracing::instrument;
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::db::model::Database; use crate::db::model::Database;
use crate::disk::OsPartitionInfo; use crate::disk::OsPartitionInfo;
use crate::init::init_postgres; use crate::init::{init_postgres, pgloader};
use crate::setup::SetupStatus; use crate::setup::SetupStatus;
use crate::util::config::load_config_from_paths; use crate::util::config::load_config_from_paths;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
@@ -45,11 +45,9 @@ pub struct SetupContextConfig {
pub migration_batch_rows: Option<usize>, pub migration_batch_rows: Option<usize>,
pub migration_prefetch_rows: Option<usize>, pub migration_prefetch_rows: Option<usize>,
pub datadir: Option<PathBuf>, pub datadir: Option<PathBuf>,
#[serde(default)]
pub disable_encryption: bool,
} }
impl SetupContextConfig { impl SetupContextConfig {
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {
load_config_from_paths( load_config_from_paths(
@@ -77,7 +75,6 @@ pub struct SetupContextSeed {
pub config_path: Option<PathBuf>, pub config_path: Option<PathBuf>,
pub migration_batch_rows: usize, pub migration_batch_rows: usize,
pub migration_prefetch_rows: usize, pub migration_prefetch_rows: usize,
pub disable_encryption: bool,
pub shutdown: Sender<()>, pub shutdown: Sender<()>,
pub datadir: PathBuf, pub datadir: PathBuf,
pub selected_v2_drive: RwLock<Option<PathBuf>>, pub selected_v2_drive: RwLock<Option<PathBuf>>,
@@ -95,7 +92,7 @@ impl AsRef<Jwk> for SetupContextSeed {
#[derive(Clone)] #[derive(Clone)]
pub struct SetupContext(Arc<SetupContextSeed>); pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext { impl SetupContext {
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let cfg = SetupContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?; let cfg = SetupContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?;
let (shutdown, _) = tokio::sync::broadcast::channel(1); let (shutdown, _) = tokio::sync::broadcast::channel(1);
@@ -105,7 +102,6 @@ impl SetupContext {
config_path: path.as_ref().map(|p| p.as_ref().to_owned()), config_path: path.as_ref().map(|p| p.as_ref().to_owned()),
migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000), migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000),
migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000), migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000),
disable_encryption: cfg.disable_encryption,
shutdown, shutdown,
datadir, datadir,
selected_v2_drive: RwLock::new(None), selected_v2_drive: RwLock::new(None),
@@ -114,7 +110,7 @@ impl SetupContext {
setup_result: RwLock::new(None), setup_result: RwLock::new(None),
}))) })))
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> { pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> {
let db_path = self.datadir.join("main").join("embassy.db"); let db_path = self.datadir.join("main").join("embassy.db");
let db = PatchDb::open(&db_path) let db = PatchDb::open(&db_path)
@@ -126,7 +122,7 @@ impl SetupContext {
} }
Ok(db) Ok(db)
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn secret_store(&self) -> Result<PgPool, Error> { pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(&self.datadir).await?; init_postgres(&self.datadir).await?;
let secret_store = let secret_store =
@@ -136,6 +132,15 @@ impl SetupContext {
.run(&secret_store) .run(&secret_store)
.await .await
.with_kind(crate::ErrorKind::Database)?; .with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir.join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows,
self.migration_prefetch_rows,
)
.await?;
}
Ok(secret_store) Ok(secret_store)
} }
} }

View File

@@ -61,7 +61,7 @@ impl StartReceipts {
} }
#[command(display(display_none), metadata(sync_db = true))] #[command(display(display_none), metadata(sync_db = true))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> { pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle(); let mut db = ctx.db.handle();
let mut tx = db.begin().await?; let mut tx = db.begin().await?;
@@ -120,7 +120,7 @@ impl StopReceipts {
} }
} }
#[instrument(skip_all)] #[instrument(skip(db))]
pub async fn stop_common<Db: DbHandle>( pub async fn stop_common<Db: DbHandle>(
db: &mut Db, db: &mut Db,
id: &PackageId, id: &PackageId,
@@ -154,7 +154,7 @@ pub fn stop(#[arg] id: PackageId) -> Result<PackageId, Error> {
} }
#[command(rename = "dry", display(display_serializable))] #[command(rename = "dry", display(display_serializable))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn stop_dry( pub async fn stop_dry(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[parent_data] id: PackageId, #[parent_data] id: PackageId,
@@ -170,7 +170,7 @@ pub async fn stop_dry(
Ok(BreakageRes(breakages)) Ok(BreakageRes(breakages))
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<MainStatus, Error> { pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<MainStatus, Error> {
let mut db = ctx.db.handle(); let mut db = ctx.db.handle();
let mut tx = db.begin().await?; let mut tx = db.begin().await?;

View File

@@ -4,10 +4,9 @@ pub mod package;
use std::future::Future; use std::future::Future;
use std::sync::Arc; use std::sync::Arc;
use color_eyre::eyre::eyre;
use futures::{FutureExt, SinkExt, StreamExt}; use futures::{FutureExt, SinkExt, StreamExt};
use patch_db::json_ptr::JsonPointer; use patch_db::json_ptr::JsonPointer;
use patch_db::{DbHandle, Dump, LockType, Revision}; use patch_db::{Dump, Revision};
use rpc_toolkit::command; use rpc_toolkit::command;
use rpc_toolkit::hyper::upgrade::Upgraded; use rpc_toolkit::hyper::upgrade::Upgraded;
use rpc_toolkit::hyper::{Body, Error as HyperError, Request, Response}; use rpc_toolkit::hyper::{Body, Error as HyperError, Request, Response};
@@ -25,11 +24,10 @@ use tracing::instrument;
pub use self::model::DatabaseModel; pub use self::model::DatabaseModel;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::middleware::auth::{HasValidSession, HashSessionToken}; use crate::middleware::auth::{HasValidSession, HashSessionToken};
use crate::util::display_none;
use crate::util::serde::{display_serializable, IoFormat}; use crate::util::serde::{display_serializable, IoFormat};
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[instrument(skip_all)] #[instrument(skip(ctx, session, ws_fut))]
async fn ws_handler< async fn ws_handler<
WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>, WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
>( >(
@@ -75,7 +73,7 @@ async fn subscribe_to_session_kill(
recv recv
} }
#[instrument(skip_all)] #[instrument(skip(_has_valid_authentication, kill, sub, stream))]
async fn deal_with_messages( async fn deal_with_messages(
_has_valid_authentication: HasValidSession, _has_valid_authentication: HasValidSession,
mut kill: oneshot::Receiver<()>, mut kill: oneshot::Receiver<()>,
@@ -165,7 +163,7 @@ pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<B
Ok(res) Ok(res)
} }
#[command(subcommands(revisions, dump, put, apply))] #[command(subcommands(revisions, dump, put))]
pub fn db() -> Result<(), RpcError> { pub fn db() -> Result<(), RpcError> {
Ok(()) Ok(())
} }
@@ -201,92 +199,13 @@ pub async fn dump(
Ok(ctx.db.dump().await?) Ok(ctx.db.dump().await?)
} }
fn apply_expr(input: jaq_core::Val, expr: &str) -> Result<jaq_core::Val, Error> {
let (expr, errs) = jaq_core::parse::parse(expr, jaq_core::parse::main());
let Some(expr) = expr else {
return Err(Error::new(
eyre!("Failed to parse expression: {:?}", errs),
crate::ErrorKind::InvalidRequest,
));
};
let mut errs = Vec::new();
let mut defs = jaq_core::Definitions::core();
for def in jaq_std::std() {
defs.insert(def, &mut errs);
}
let filter = defs.finish(expr, Vec::new(), &mut errs);
if !errs.is_empty() {
return Err(Error::new(
eyre!("Failed to compile expression: {:?}", errs),
crate::ErrorKind::InvalidRequest,
));
};
let inputs = jaq_core::RcIter::new(std::iter::empty());
let mut res_iter = filter.run(jaq_core::Ctx::new([], &inputs), input);
let Some(res) = res_iter
.next()
.transpose()
.map_err(|e| eyre!("{e}"))
.with_kind(crate::ErrorKind::Deserialization)?
else {
return Err(Error::new(
eyre!("expr returned no results"),
crate::ErrorKind::InvalidRequest,
));
};
if res_iter.next().is_some() {
return Err(Error::new(
eyre!("expr returned too many results"),
crate::ErrorKind::InvalidRequest,
));
}
Ok(res)
}
#[command(display(display_none))]
pub async fn apply(#[context] ctx: RpcContext, #[arg] expr: String) -> Result<(), Error> {
let mut db = ctx.db.handle();
DatabaseModel::new().lock(&mut db, LockType::Write).await?;
let root_ptr = JsonPointer::<String>::default();
let input = db.get_value(&root_ptr, None).await?;
let res = (|| {
let res = apply_expr(input.into(), &expr)?;
serde_json::from_value::<model::Database>(res.clone().into()).with_ctx(|_| {
(
crate::ErrorKind::Deserialization,
"result does not match database model",
)
})?;
Ok::<serde_json::Value, Error>(res.into())
})()?;
db.put_value(&root_ptr, &res).await?;
Ok(())
}
#[command(subcommands(ui))] #[command(subcommands(ui))]
pub fn put() -> Result<(), RpcError> { pub fn put() -> Result<(), RpcError> {
Ok(()) Ok(())
} }
#[command(display(display_serializable))] #[command(display(display_serializable))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn ui( pub async fn ui(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg] pointer: JsonPointer, #[arg] pointer: JsonPointer,

View File

@@ -49,7 +49,7 @@ impl Database {
last_wifi_region: None, last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(), eos_version_compat: Current::new().compat().clone(),
lan_address, lan_address,
tor_address: format!("https://{}", account.key.tor_address()) tor_address: format!("http://{}", account.key.tor_address())
.parse() .parse()
.unwrap(), .unwrap(),
ip_info: BTreeMap::new(), ip_info: BTreeMap::new(),
@@ -80,7 +80,6 @@ impl Database {
.map(|x| format!("{x:X}")) .map(|x| format!("{x:X}"))
.join(":"), .join(":"),
system_start_time: Utc::now().to_rfc3339(), system_start_time: Utc::now().to_rfc3339(),
zram: false,
}, },
package_data: AllPackageData::default(), package_data: AllPackageData::default(),
ui: serde_json::from_str(include_str!("../../../frontend/patchdb-ui-seed.json")) ui: serde_json::from_str(include_str!("../../../frontend/patchdb-ui-seed.json"))
@@ -107,7 +106,6 @@ pub struct ServerInfo {
pub lan_address: Url, pub lan_address: Url,
pub tor_address: Url, pub tor_address: Url,
#[model] #[model]
#[serde(default)]
pub ip_info: BTreeMap<String, IpInfo>, pub ip_info: BTreeMap<String, IpInfo>,
#[model] #[model]
#[serde(default)] #[serde(default)]
@@ -119,8 +117,6 @@ pub struct ServerInfo {
pub pubkey: String, pub pubkey: String,
pub ca_fingerprint: String, pub ca_fingerprint: String,
pub system_start_time: String, pub system_start_time: String,
#[serde(default)]
pub zram: bool,
} }
#[derive(Debug, Deserialize, Serialize, HasModel)] #[derive(Debug, Deserialize, Serialize, HasModel)]

View File

@@ -191,7 +191,7 @@ impl DependencyError {
(DependencyError::Transitive, _) => DependencyError::Transitive, (DependencyError::Transitive, _) => DependencyError::Transitive,
} }
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db, receipts))]
pub fn try_heal<'a, Db: DbHandle>( pub fn try_heal<'a, Db: DbHandle>(
self, self,
ctx: &'a RpcContext, ctx: &'a RpcContext,
@@ -237,16 +237,13 @@ impl DependencyError {
} }
} }
DependencyError::ConfigUnsatisfied { .. } => { DependencyError::ConfigUnsatisfied { .. } => {
let dependent_manifest = receipts let dependent_manifest =
.manifest receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
let dependency_manifest = receipts let dependency_manifest = receipts
.manifest .manifest
.get(db, dependency) .get(db, dependency)
.await? .await?
.ok_or_else(|| not_found!(dependency))?; .ok_or_else(not_found)?;
let dependency_config = if let Some(cfg) = dependency_config.take() { let dependency_config = if let Some(cfg) = dependency_config.take() {
cfg cfg
@@ -297,7 +294,7 @@ impl DependencyError {
.status .status
.get(db, dependency) .get(db, dependency)
.await? .await?
.ok_or_else(|| not_found!(dependency))?; .ok_or_else(not_found)?;
if status.main.running() { if status.main.running() {
DependencyError::HealthChecksFailed { DependencyError::HealthChecksFailed {
failures: BTreeMap::new(), failures: BTreeMap::new(),
@@ -313,7 +310,7 @@ impl DependencyError {
.status .status
.get(db, dependency) .get(db, dependency)
.await? .await?
.ok_or_else(|| not_found!(dependency))?; .ok_or_else(not_found)?;
match status.main { match status.main {
MainStatus::BackingUp { MainStatus::BackingUp {
started: Some(_), started: Some(_),
@@ -327,7 +324,7 @@ impl DependencyError {
.current_dependencies .current_dependencies
.get(db, id) .get(db, id)
.await? .await?
.ok_or_else(|| not_found!(id))? .ok_or_else(not_found)?
.get(dependency) .get(dependency)
.map(|x| x.health_checks.contains(&check)) .map(|x| x.health_checks.contains(&check))
.unwrap_or(false) .unwrap_or(false)
@@ -696,7 +693,7 @@ pub struct ConfigDryRes {
} }
#[command(rename = "dry", display(display_serializable))] #[command(rename = "dry", display(display_serializable))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn configure_dry( pub async fn configure_dry(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[parent_data] (pkg_id, dependency_id): (PackageId, PackageId), #[parent_data] (pkg_id, dependency_id): (PackageId, PackageId),
@@ -787,7 +784,7 @@ pub async fn configure_logic(
spec, spec,
}) })
} }
#[instrument(skip_all)] #[instrument(skip(db, current_dependencies, current_dependent_receipt))]
pub async fn add_dependent_to_current_dependents_lists<'a, Db: DbHandle>( pub async fn add_dependent_to_current_dependents_lists<'a, Db: DbHandle>(
db: &mut Db, db: &mut Db,
dependent_id: &PackageId, dependent_id: &PackageId,
@@ -922,7 +919,7 @@ impl BreakTransitiveReceipts {
} }
} }
#[instrument(skip_all)] #[instrument(skip(db, receipts))]
pub fn break_transitive<'a, Db: DbHandle>( pub fn break_transitive<'a, Db: DbHandle>(
db: &'a mut Db, db: &'a mut Db,
id: &'a PackageId, id: &'a PackageId,
@@ -937,7 +934,7 @@ pub fn break_transitive<'a, Db: DbHandle>(
.dependency_errors .dependency_errors
.get(&mut tx, id) .get(&mut tx, id)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
let old = dependency_errors.0.remove(dependency); let old = dependency_errors.0.remove(dependency);
let newly_broken = if let Some(e) = &old { let newly_broken = if let Some(e) = &old {
@@ -989,7 +986,7 @@ pub fn break_transitive<'a, Db: DbHandle>(
.boxed() .boxed()
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db, locks))]
pub async fn heal_all_dependents_transitive<'a, Db: DbHandle>( pub async fn heal_all_dependents_transitive<'a, Db: DbHandle>(
ctx: &'a RpcContext, ctx: &'a RpcContext,
db: &'a mut Db, db: &'a mut Db,
@@ -1000,14 +997,14 @@ pub async fn heal_all_dependents_transitive<'a, Db: DbHandle>(
.current_dependents .current_dependents
.get(db, id) .get(db, id)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
for dependent in dependents.0.keys().filter(|dependent| id != *dependent) { for dependent in dependents.0.keys().filter(|dependent| id != *dependent) {
heal_transitive(ctx, db, dependent, id, locks).await?; heal_transitive(ctx, db, dependent, id, locks).await?;
} }
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db, receipts))]
pub fn heal_transitive<'a, Db: DbHandle>( pub fn heal_transitive<'a, Db: DbHandle>(
ctx: &'a RpcContext, ctx: &'a RpcContext,
db: &'a mut Db, db: &'a mut Db,
@@ -1016,11 +1013,7 @@ pub fn heal_transitive<'a, Db: DbHandle>(
receipts: &'a DependencyReceipt, receipts: &'a DependencyReceipt,
) -> BoxFuture<'a, Result<(), Error>> { ) -> BoxFuture<'a, Result<(), Error>> {
async move { async move {
let mut status = receipts let mut status = receipts.status.get(db, id).await?.ok_or_else(not_found)?;
.status
.get(db, id)
.await?
.ok_or_else(|| not_found!(id))?;
let old = status.dependency_errors.0.remove(dependency); let old = status.dependency_errors.0.remove(dependency);
@@ -1029,7 +1022,7 @@ pub fn heal_transitive<'a, Db: DbHandle>(
.dependency .dependency
.get(db, (id, dependency)) .get(db, (id, dependency))
.await? .await?
.ok_or_else(|| not_found!(format!("{id}'s dependency: {dependency}")))?; .ok_or_else(not_found)?;
if let Some(new) = old if let Some(new) = old
.try_heal(ctx, db, id, dependency, None, &info, &receipts.try_heal) .try_heal(ctx, db, id, dependency, None, &info, &receipts.try_heal)
.await? .await?

View File

@@ -12,7 +12,7 @@ use crate::util::display_none;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[command(cli_only, blocking, display(display_none))] #[command(cli_only, blocking, display(display_none))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> { pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
if !ctx.developer_key_path.exists() { if !ctx.developer_key_path.exists() {
let parent = ctx.developer_key_path.parent().unwrap_or(Path::new("/")); let parent = ctx.developer_key_path.parent().unwrap_or(Path::new("/"));

View File

@@ -9,10 +9,11 @@ use crate::disk::repair;
use crate::init::SYSTEM_REBUILD_PATH; use crate::init::SYSTEM_REBUILD_PATH;
use crate::logs::{fetch_logs, LogResponse, LogSource}; use crate::logs::{fetch_logs, LogResponse, LogSource};
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::system::SYSTEMD_UNIT;
use crate::util::display_none; use crate::util::display_none;
use crate::Error; use crate::Error;
pub const SYSTEMD_UNIT: &'static str = "embassy-init";
#[command(subcommands(error, logs, exit, restart, forget_disk, disk, rebuild))] #[command(subcommands(error, logs, exit, restart, forget_disk, disk, rebuild))]
pub fn diagnostic() -> Result<(), Error> { pub fn diagnostic() -> Result<(), Error> {
Ok(()) Ok(())

View File

@@ -7,10 +7,35 @@ use futures::FutureExt;
use tokio::process::Command; use tokio::process::Command;
use tracing::instrument; use tracing::instrument;
use crate::disk::fsck::RequiresReboot;
use crate::Error; use crate::Error;
#[instrument(skip_all)] #[derive(Debug, Clone, Copy)]
#[must_use]
pub struct RequiresReboot(pub bool);
impl std::ops::BitOrAssign for RequiresReboot {
fn bitor_assign(&mut self, rhs: Self) {
self.0 |= rhs.0
}
}
#[derive(Debug, Clone, Copy)]
pub enum RepairStrategy {
Preen,
Aggressive,
}
impl RepairStrategy {
pub async fn e2fsck(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match self {
RepairStrategy::Preen => e2fsck_preen(logicalname).await,
RepairStrategy::Aggressive => e2fsck_aggressive(logicalname).await,
}
}
}
#[instrument]
pub async fn e2fsck_preen( pub async fn e2fsck_preen(
logicalname: impl AsRef<Path> + std::fmt::Debug, logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> { ) -> Result<RequiresReboot, Error> {
@@ -34,7 +59,7 @@ fn backup_existing_undo_file<'a>(path: &'a Path) -> BoxFuture<'a, Result<(), Err
.boxed() .boxed()
} }
#[instrument(skip_all)] #[instrument]
pub async fn e2fsck_aggressive( pub async fn e2fsck_aggressive(
logicalname: impl AsRef<Path> + std::fmt::Debug, logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> { ) -> Result<RequiresReboot, Error> {

View File

@@ -1,31 +0,0 @@
use std::path::Path;
use tokio::process::Command;
use tracing::instrument;
use crate::disk::fsck::RequiresReboot;
use crate::util::Invoke;
use crate::Error;
#[instrument(skip_all)]
pub async fn btrfs_check_readonly(logicalname: impl AsRef<Path>) -> Result<RequiresReboot, Error> {
Command::new("btrfs")
.arg("check")
.arg("--readonly")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Ok(RequiresReboot(false))
}
pub async fn btrfs_check_repair(logicalname: impl AsRef<Path>) -> Result<RequiresReboot, Error> {
Command::new("btrfs")
.arg("check")
.arg("--repair")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Ok(RequiresReboot(false))
}

View File

@@ -1,70 +0,0 @@
use std::path::Path;
use color_eyre::eyre::eyre;
use tokio::process::Command;
use crate::disk::fsck::btrfs::{btrfs_check_readonly, btrfs_check_repair};
use crate::disk::fsck::ext4::{e2fsck_aggressive, e2fsck_preen};
use crate::util::Invoke;
use crate::Error;
pub mod btrfs;
pub mod ext4;
#[derive(Debug, Clone, Copy)]
#[must_use]
pub struct RequiresReboot(pub bool);
impl std::ops::BitOrAssign for RequiresReboot {
fn bitor_assign(&mut self, rhs: Self) {
self.0 |= rhs.0
}
}
#[derive(Debug, Clone, Copy)]
pub enum RepairStrategy {
Preen,
Aggressive,
}
impl RepairStrategy {
pub async fn fsck(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match &*String::from_utf8(
Command::new("grub-probe")
.arg("-d")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?,
)?
.trim()
{
"ext2" => self.e2fsck(logicalname).await,
"btrfs" => self.btrfs_check(logicalname).await,
fs => {
return Err(Error::new(
eyre!("Unknown filesystem {fs}"),
crate::ErrorKind::DiskManagement,
))
}
}
}
pub async fn e2fsck(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match self {
RepairStrategy::Preen => e2fsck_preen(logicalname).await,
RepairStrategy::Aggressive => e2fsck_aggressive(logicalname).await,
}
}
pub async fn btrfs_check(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match self {
RepairStrategy::Preen => btrfs_check_readonly(logicalname).await,
RepairStrategy::Aggressive => btrfs_check_repair(logicalname).await,
}
}
}

View File

@@ -13,32 +13,31 @@ use crate::disk::mount::util::unmount;
use crate::util::Invoke; use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
pub const PASSWORD_PATH: &'static str = "/run/embassy/password"; pub const PASSWORD_PATH: &'static str = "/etc/embassy/password";
pub const DEFAULT_PASSWORD: &'static str = "password"; pub const DEFAULT_PASSWORD: &'static str = "password";
pub const MAIN_FS_SIZE: FsSize = FsSize::Gigabytes(8); pub const MAIN_FS_SIZE: FsSize = FsSize::Gigabytes(8);
#[instrument(skip_all)] #[instrument(skip(disks, datadir, password))]
pub async fn create<I, P>( pub async fn create<I, P>(
disks: &I, disks: &I,
pvscan: &BTreeMap<PathBuf, Option<String>>, pvscan: &BTreeMap<PathBuf, Option<String>>,
datadir: impl AsRef<Path>, datadir: impl AsRef<Path>,
password: Option<&str>, password: &str,
) -> Result<String, Error> ) -> Result<String, Error>
where where
for<'a> &'a I: IntoIterator<Item = &'a P>, for<'a> &'a I: IntoIterator<Item = &'a P>,
P: AsRef<Path>, P: AsRef<Path>,
{ {
let guid = create_pool(disks, pvscan, password.is_some()).await?; let guid = create_pool(disks, pvscan).await?;
create_all_fs(&guid, &datadir, password).await?; create_all_fs(&guid, &datadir, password).await?;
export(&guid, datadir).await?; export(&guid, datadir).await?;
Ok(guid) Ok(guid)
} }
#[instrument(skip_all)] #[instrument(skip(disks))]
pub async fn create_pool<I, P>( pub async fn create_pool<I, P>(
disks: &I, disks: &I,
pvscan: &BTreeMap<PathBuf, Option<String>>, pvscan: &BTreeMap<PathBuf, Option<String>>,
encrypted: bool,
) -> Result<String, Error> ) -> Result<String, Error>
where where
for<'a> &'a I: IntoIterator<Item = &'a P>, for<'a> &'a I: IntoIterator<Item = &'a P>,
@@ -63,16 +62,13 @@ where
.invoke(crate::ErrorKind::DiskManagement) .invoke(crate::ErrorKind::DiskManagement)
.await?; .await?;
} }
let mut guid = format!( let guid = format!(
"EMBASSY_{}", "EMBASSY_{}",
base32::encode( base32::encode(
base32::Alphabet::RFC4648 { padding: false }, base32::Alphabet::RFC4648 { padding: false },
&rand::random::<[u8; 32]>(), &rand::random::<[u8; 32]>(),
) )
); );
if !encrypted {
guid += "_UNENC";
}
let mut cmd = Command::new("vgcreate"); let mut cmd = Command::new("vgcreate");
cmd.arg("-y").arg(&guid); cmd.arg("-y").arg(&guid);
for disk in disks { for disk in disks {
@@ -88,14 +84,17 @@ pub enum FsSize {
FreePercentage(usize), FreePercentage(usize),
} }
#[instrument(skip_all)] #[instrument(skip(datadir, password))]
pub async fn create_fs<P: AsRef<Path>>( pub async fn create_fs<P: AsRef<Path>>(
guid: &str, guid: &str,
datadir: P, datadir: P,
name: &str, name: &str,
size: FsSize, size: FsSize,
password: Option<&str>, password: &str,
) -> Result<(), Error> { ) -> Result<(), Error> {
tokio::fs::write(PASSWORD_PATH, password)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
let mut cmd = Command::new("lvcreate"); let mut cmd = Command::new("lvcreate");
match size { match size {
FsSize::Gigabytes(a) => cmd.arg("-L").arg(format!("{}G", a)), FsSize::Gigabytes(a) => cmd.arg("-L").arg(format!("{}G", a)),
@@ -107,49 +106,44 @@ pub async fn create_fs<P: AsRef<Path>>(
.arg(guid) .arg(guid)
.invoke(crate::ErrorKind::DiskManagement) .invoke(crate::ErrorKind::DiskManagement)
.await?; .await?;
let mut blockdev_path = Path::new("/dev").join(guid).join(name); Command::new("cryptsetup")
if let Some(password) = password { .arg("-q")
if let Some(parent) = Path::new(PASSWORD_PATH).parent() { .arg("luksFormat")
tokio::fs::create_dir_all(parent).await?; .arg(format!("--key-file={}", PASSWORD_PATH))
} .arg(format!("--keyfile-size={}", password.len()))
tokio::fs::write(PASSWORD_PATH, password) .arg(Path::new("/dev").join(guid).join(name))
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
Command::new("cryptsetup")
.arg("-q")
.arg("luksFormat")
.arg(format!("--key-file={}", PASSWORD_PATH))
.arg(format!("--keyfile-size={}", password.len()))
.arg(&blockdev_path)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("cryptsetup")
.arg("-q")
.arg("luksOpen")
.arg(format!("--key-file={}", PASSWORD_PATH))
.arg(format!("--keyfile-size={}", password.len()))
.arg(&blockdev_path)
.arg(format!("{}_{}", guid, name))
.invoke(crate::ErrorKind::DiskManagement)
.await?;
tokio::fs::remove_file(PASSWORD_PATH)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
blockdev_path = Path::new("/dev/mapper").join(format!("{}_{}", guid, name));
}
Command::new("mkfs.btrfs")
.arg(&blockdev_path)
.invoke(crate::ErrorKind::DiskManagement) .invoke(crate::ErrorKind::DiskManagement)
.await?; .await?;
mount(&blockdev_path, datadir.as_ref().join(name), ReadWrite).await?; Command::new("cryptsetup")
.arg("-q")
.arg("luksOpen")
.arg(format!("--key-file={}", PASSWORD_PATH))
.arg(format!("--keyfile-size={}", password.len()))
.arg(Path::new("/dev").join(guid).join(name))
.arg(format!("{}_{}", guid, name))
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("mkfs.ext4")
.arg(Path::new("/dev/mapper").join(format!("{}_{}", guid, name)))
.invoke(crate::ErrorKind::DiskManagement)
.await?;
mount(
Path::new("/dev/mapper").join(format!("{}_{}", guid, name)),
datadir.as_ref().join(name),
ReadWrite,
)
.await?;
tokio::fs::remove_file(PASSWORD_PATH)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(datadir, password))]
pub async fn create_all_fs<P: AsRef<Path>>( pub async fn create_all_fs<P: AsRef<Path>>(
guid: &str, guid: &str,
datadir: P, datadir: P,
password: Option<&str>, password: &str,
) -> Result<(), Error> { ) -> Result<(), Error> {
create_fs(guid, &datadir, "main", MAIN_FS_SIZE, password).await?; create_fs(guid, &datadir, "main", MAIN_FS_SIZE, password).await?;
create_fs( create_fs(
@@ -163,22 +157,20 @@ pub async fn create_all_fs<P: AsRef<Path>>(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(datadir))]
pub async fn unmount_fs<P: AsRef<Path>>(guid: &str, datadir: P, name: &str) -> Result<(), Error> { pub async fn unmount_fs<P: AsRef<Path>>(guid: &str, datadir: P, name: &str) -> Result<(), Error> {
unmount(datadir.as_ref().join(name)).await?; unmount(datadir.as_ref().join(name)).await?;
if !guid.ends_with("_UNENC") { Command::new("cryptsetup")
Command::new("cryptsetup") .arg("-q")
.arg("-q") .arg("luksClose")
.arg("luksClose") .arg(format!("{}_{}", guid, name))
.arg(format!("{}_{}", guid, name)) .invoke(crate::ErrorKind::DiskManagement)
.invoke(crate::ErrorKind::DiskManagement) .await?;
.await?;
}
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(datadir))]
pub async fn unmount_all_fs<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> { pub async fn unmount_all_fs<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> {
unmount_fs(guid, &datadir, "main").await?; unmount_fs(guid, &datadir, "main").await?;
unmount_fs(guid, &datadir, "package-data").await?; unmount_fs(guid, &datadir, "package-data").await?;
@@ -189,7 +181,7 @@ pub async fn unmount_all_fs<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<()
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(datadir))]
pub async fn export<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> { pub async fn export<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> {
Command::new("sync").invoke(ErrorKind::Filesystem).await?; Command::new("sync").invoke(ErrorKind::Filesystem).await?;
unmount_all_fs(guid, datadir).await?; unmount_all_fs(guid, datadir).await?;
@@ -205,12 +197,12 @@ pub async fn export<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error>
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(datadir, password))]
pub async fn import<P: AsRef<Path>>( pub async fn import<P: AsRef<Path>>(
guid: &str, guid: &str,
datadir: P, datadir: P,
repair: RepairStrategy, repair: RepairStrategy,
password: Option<&str>, password: &str,
) -> Result<RequiresReboot, Error> { ) -> Result<RequiresReboot, Error> {
let scan = pvscan().await?; let scan = pvscan().await?;
if scan if scan
@@ -221,7 +213,7 @@ pub async fn import<P: AsRef<Path>>(
.is_none() .is_none()
{ {
return Err(Error::new( return Err(Error::new(
eyre!("StartOS disk not found."), eyre!("Embassy disk not found."),
crate::ErrorKind::DiskNotAvailable, crate::ErrorKind::DiskNotAvailable,
)); ));
} }
@@ -231,7 +223,7 @@ pub async fn import<P: AsRef<Path>>(
.any(|id| id == guid) .any(|id| id == guid)
{ {
return Err(Error::new( return Err(Error::new(
eyre!("A StartOS disk was found, but it is not the correct disk for this device."), eyre!("An Embassy disk was found, but it is not the correct disk for this device."),
crate::ErrorKind::IncorrectDisk, crate::ErrorKind::IncorrectDisk,
)); ));
} }
@@ -262,72 +254,43 @@ pub async fn import<P: AsRef<Path>>(
mount_all_fs(guid, datadir, repair, password).await mount_all_fs(guid, datadir, repair, password).await
} }
#[instrument(skip_all)] #[instrument(skip(datadir, password))]
pub async fn mount_fs<P: AsRef<Path>>( pub async fn mount_fs<P: AsRef<Path>>(
guid: &str, guid: &str,
datadir: P, datadir: P,
name: &str, name: &str,
repair: RepairStrategy, repair: RepairStrategy,
password: Option<&str>, password: &str,
) -> Result<RequiresReboot, Error> { ) -> Result<RequiresReboot, Error> {
let orig_path = Path::new("/dev").join(guid).join(name); tokio::fs::write(PASSWORD_PATH, password)
let mut blockdev_path = orig_path.clone(); .await
let full_name = format!("{}_{}", guid, name); .with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
if !guid.ends_with("_UNENC") { Command::new("cryptsetup")
let password = password.unwrap_or(DEFAULT_PASSWORD); .arg("-q")
if let Some(parent) = Path::new(PASSWORD_PATH).parent() { .arg("luksOpen")
tokio::fs::create_dir_all(parent).await?; .arg(format!("--key-file={}", PASSWORD_PATH))
} .arg(format!("--keyfile-size={}", password.len()))
tokio::fs::write(PASSWORD_PATH, password) .arg(Path::new("/dev").join(guid).join(name))
.await .arg(format!("{}_{}", guid, name))
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?; .invoke(crate::ErrorKind::DiskManagement)
Command::new("cryptsetup") .await?;
.arg("-q") let mapper_path = Path::new("/dev/mapper").join(format!("{}_{}", guid, name));
.arg("luksOpen") let reboot = repair.e2fsck(&mapper_path).await?;
.arg(format!("--key-file={}", PASSWORD_PATH)) mount(&mapper_path, datadir.as_ref().join(name), ReadWrite).await?;
.arg(format!("--keyfile-size={}", password.len()))
.arg(&blockdev_path)
.arg(&full_name)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
tokio::fs::remove_file(PASSWORD_PATH)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
blockdev_path = Path::new("/dev/mapper").join(&full_name);
}
let reboot = repair.fsck(&blockdev_path).await?;
if !guid.ends_with("_UNENC") { tokio::fs::remove_file(PASSWORD_PATH)
// Backup LUKS header if e2fsck succeeded .await
let luks_folder = Path::new("/media/embassy/config/luks"); .with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
tokio::fs::create_dir_all(luks_folder).await?;
let tmp_luks_bak = luks_folder.join(format!(".{full_name}.luks.bak.tmp"));
if tokio::fs::metadata(&tmp_luks_bak).await.is_ok() {
tokio::fs::remove_file(&tmp_luks_bak).await?;
}
let luks_bak = luks_folder.join(format!("{full_name}.luks.bak"));
Command::new("cryptsetup")
.arg("-q")
.arg("luksHeaderBackup")
.arg("--header-backup-file")
.arg(&tmp_luks_bak)
.arg(&orig_path)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
tokio::fs::rename(&tmp_luks_bak, &luks_bak).await?;
}
mount(&blockdev_path, datadir.as_ref().join(name), ReadWrite).await?;
Ok(reboot) Ok(reboot)
} }
#[instrument(skip_all)] #[instrument(skip(datadir, password))]
pub async fn mount_all_fs<P: AsRef<Path>>( pub async fn mount_all_fs<P: AsRef<Path>>(
guid: &str, guid: &str,
datadir: P, datadir: P,
repair: RepairStrategy, repair: RepairStrategy,
password: Option<&str>, password: &str,
) -> Result<RequiresReboot, Error> { ) -> Result<RequiresReboot, Error> {
let mut reboot = RequiresReboot(false); let mut reboot = RequiresReboot(false);
reboot |= mount_fs(guid, &datadir, "main", repair, password).await?; reboot |= mount_fs(guid, &datadir, "main", repair, password).await?;

View File

@@ -22,7 +22,6 @@ pub const REPAIR_DISK_PATH: &str = "/media/embassy/config/repair-disk";
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct OsPartitionInfo { pub struct OsPartitionInfo {
pub efi: Option<PathBuf>, pub efi: Option<PathBuf>,
pub bios: Option<PathBuf>,
pub boot: PathBuf, pub boot: PathBuf,
pub root: PathBuf, pub root: PathBuf,
} }
@@ -32,11 +31,6 @@ impl OsPartitionInfo {
.as_ref() .as_ref()
.map(|p| p == logicalname.as_ref()) .map(|p| p == logicalname.as_ref())
.unwrap_or(false) .unwrap_or(false)
|| self
.bios
.as_ref()
.map(|p| p == logicalname.as_ref())
.unwrap_or(false)
|| &*self.boot == logicalname.as_ref() || &*self.boot == logicalname.as_ref()
|| &*self.root == logicalname.as_ref() || &*self.root == logicalname.as_ref()
} }

View File

@@ -35,7 +35,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
} }
} }
#[instrument(skip_all)] #[instrument(skip(password))]
pub async fn mount(backup_disk_mount_guard: G, password: &str) -> Result<Self, Error> { pub async fn mount(backup_disk_mount_guard: G, password: &str) -> Result<Self, Error> {
let backup_disk_path = backup_disk_mount_guard.as_ref(); let backup_disk_path = backup_disk_mount_guard.as_ref();
let unencrypted_metadata_path = let unencrypted_metadata_path =
@@ -145,7 +145,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn mount_package_backup( pub async fn mount_package_backup(
&self, &self,
id: &PackageId, id: &PackageId,
@@ -159,7 +159,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
}) })
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn save(&self) -> Result<(), Error> { pub async fn save(&self) -> Result<(), Error> {
let metadata_path = self.as_ref().join("metadata.cbor"); let metadata_path = self.as_ref().join("metadata.cbor");
let backup_disk_path = self.backup_disk_path(); let backup_disk_path = self.backup_disk_path();
@@ -180,7 +180,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn unmount(mut self) -> Result<(), Error> { pub async fn unmount(mut self) -> Result<(), Error> {
if let Some(guard) = self.encrypted_guard.take() { if let Some(guard) = self.encrypted_guard.take() {
guard.unmount().await?; guard.unmount().await?;
@@ -191,7 +191,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn save_and_unmount(self) -> Result<(), Error> { pub async fn save_and_unmount(self) -> Result<(), Error> {
self.save().await?; self.save().await?;
self.unmount().await?; self.unmount().await?;

View File

@@ -16,9 +16,6 @@ use crate::util::Invoke;
use crate::Error; use crate::Error;
async fn resolve_hostname(hostname: &str) -> Result<IpAddr, Error> { async fn resolve_hostname(hostname: &str) -> Result<IpAddr, Error> {
if let Ok(addr) = hostname.parse() {
return Ok(addr);
}
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
if hostname.ends_with(".local") { if hostname.ends_with(".local") {
return Ok(IpAddr::V4(crate::net::mdns::resolve_mdns(hostname).await?)); return Ok(IpAddr::V4(crate::net::mdns::resolve_mdns(hostname).await?));
@@ -36,7 +33,7 @@ async fn resolve_hostname(hostname: &str) -> Result<IpAddr, Error> {
.parse()?) .parse()?)
} }
#[instrument(skip_all)] #[instrument(skip(path, password, mountpoint))]
pub async fn mount_cifs( pub async fn mount_cifs(
hostname: &str, hostname: &str,
path: impl AsRef<Path>, path: impl AsRef<Path>,

View File

@@ -1,3 +1,4 @@
use std::os::unix::ffi::OsStrExt;
use std::path::Path; use std::path::Path;
use async_trait::async_trait; use async_trait::async_trait;

View File

@@ -95,7 +95,7 @@ pub struct TmpMountGuard {
} }
impl TmpMountGuard { impl TmpMountGuard {
/// DRAGONS: if you try to mount something as ro and rw at the same time, the ro mount will be upgraded to rw. /// DRAGONS: if you try to mount something as ro and rw at the same time, the ro mount will be upgraded to rw.
#[instrument(skip_all)] #[instrument(skip(filesystem))]
pub async fn mount(filesystem: &impl FileSystem, mount_type: MountType) -> Result<Self, Error> { pub async fn mount(filesystem: &impl FileSystem, mount_type: MountType) -> Result<Self, Error> {
let mountpoint = tmp_mountpoint(filesystem).await?; let mountpoint = tmp_mountpoint(filesystem).await?;
let mut tmp_mounts = TMP_MOUNTS.lock().await; let mut tmp_mounts = TMP_MOUNTS.lock().await;

View File

@@ -5,7 +5,7 @@ use tracing::instrument;
use crate::util::Invoke; use crate::util::Invoke;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[instrument(skip_all)] #[instrument(skip(src, dst))]
pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>( pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
src: P0, src: P0,
dst: P1, dst: P1,
@@ -40,7 +40,7 @@ pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(mountpoint))]
pub async fn unmount<P: AsRef<Path>>(mountpoint: P) -> Result<(), Error> { pub async fn unmount<P: AsRef<Path>>(mountpoint: P) -> Result<(), Error> {
tracing::debug!("Unmounting {}.", mountpoint.as_ref().display()); tracing::debug!("Unmounting {}.", mountpoint.as_ref().display());
tokio::process::Command::new("umount") tokio::process::Command::new("umount")

View File

@@ -1,4 +1,4 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::BTreeMap;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use color_eyre::eyre::{self, eyre}; use color_eyre::eyre::{self, eyre};
@@ -69,7 +69,7 @@ lazy_static::lazy_static! {
static ref PARTITION_REGEX: Regex = Regex::new("-part[0-9]+$").unwrap(); static ref PARTITION_REGEX: Regex = Regex::new("-part[0-9]+$").unwrap();
} }
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn get_partition_table<P: AsRef<Path>>(path: P) -> Result<Option<PartitionTable>, Error> { pub async fn get_partition_table<P: AsRef<Path>>(path: P) -> Result<Option<PartitionTable>, Error> {
Ok(String::from_utf8( Ok(String::from_utf8(
Command::new("fdisk") Command::new("fdisk")
@@ -87,7 +87,7 @@ pub async fn get_partition_table<P: AsRef<Path>>(path: P) -> Result<Option<Parti
})) }))
} }
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn get_vendor<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> { pub async fn get_vendor<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> {
let vendor = tokio::fs::read_to_string( let vendor = tokio::fs::read_to_string(
Path::new(SYS_BLOCK_PATH) Path::new(SYS_BLOCK_PATH)
@@ -110,7 +110,7 @@ pub async fn get_vendor<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error
}) })
} }
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn get_model<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> { pub async fn get_model<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> {
let model = tokio::fs::read_to_string( let model = tokio::fs::read_to_string(
Path::new(SYS_BLOCK_PATH) Path::new(SYS_BLOCK_PATH)
@@ -129,7 +129,7 @@ pub async fn get_model<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error>
Ok(if model.is_empty() { None } else { Some(model) }) Ok(if model.is_empty() { None } else { Some(model) })
} }
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn get_capacity<P: AsRef<Path>>(path: P) -> Result<u64, Error> { pub async fn get_capacity<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
Ok(String::from_utf8( Ok(String::from_utf8(
Command::new("blockdev") Command::new("blockdev")
@@ -142,7 +142,7 @@ pub async fn get_capacity<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
.parse::<u64>()?) .parse::<u64>()?)
} }
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn get_label<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> { pub async fn get_label<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> {
let label = String::from_utf8( let label = String::from_utf8(
Command::new("lsblk") Command::new("lsblk")
@@ -157,7 +157,7 @@ pub async fn get_label<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error>
Ok(if label.is_empty() { None } else { Some(label) }) Ok(if label.is_empty() { None } else { Some(label) })
} }
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn get_used<P: AsRef<Path>>(path: P) -> Result<u64, Error> { pub async fn get_used<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
Ok(String::from_utf8( Ok(String::from_utf8(
Command::new("df") Command::new("df")
@@ -175,7 +175,7 @@ pub async fn get_used<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
.parse::<u64>()?) .parse::<u64>()?)
} }
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn get_available<P: AsRef<Path>>(path: P) -> Result<u64, Error> { pub async fn get_available<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
Ok(String::from_utf8( Ok(String::from_utf8(
Command::new("df") Command::new("df")
@@ -193,7 +193,7 @@ pub async fn get_available<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
.parse::<u64>()?) .parse::<u64>()?)
} }
#[instrument(skip_all)] #[instrument(skip(path))]
pub async fn get_percentage<P: AsRef<Path>>(path: P) -> Result<u64, Error> { pub async fn get_percentage<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
Ok(String::from_utf8( Ok(String::from_utf8(
Command::new("df") Command::new("df")
@@ -212,7 +212,7 @@ pub async fn get_percentage<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
.parse::<u64>()?) .parse::<u64>()?)
} }
#[instrument(skip_all)] #[instrument]
pub async fn pvscan() -> Result<BTreeMap<PathBuf, Option<String>>, Error> { pub async fn pvscan() -> Result<BTreeMap<PathBuf, Option<String>>, Error> {
let pvscan_out = Command::new("pvscan") let pvscan_out = Command::new("pvscan")
.invoke(crate::ErrorKind::DiskManagement) .invoke(crate::ErrorKind::DiskManagement)
@@ -248,10 +248,10 @@ pub async fn recovery_info(
Ok(None) Ok(None)
} }
#[instrument(skip_all)] #[instrument]
pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> { pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
struct DiskIndex { struct DiskIndex {
parts: BTreeSet<PathBuf>, parts: IndexSet<PathBuf>,
internal: bool, internal: bool,
} }
let disk_guids = pvscan().await?; let disk_guids = pvscan().await?;
@@ -301,7 +301,7 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
disks.insert( disks.insert(
disk.clone(), disk.clone(),
DiskIndex { DiskIndex {
parts: BTreeSet::new(), parts: IndexSet::new(),
internal: false, internal: false,
}, },
); );
@@ -324,13 +324,11 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
if index.internal { if index.internal {
for part in index.parts { for part in index.parts {
let mut disk_info = disk_info(disk.clone()).await; let mut disk_info = disk_info(disk.clone()).await;
let part_info = part_info(part).await; disk_info.logicalname = part;
disk_info.logicalname = part_info.logicalname.clone();
disk_info.capacity = part_info.capacity;
if let Some(g) = disk_guids.get(&disk_info.logicalname) { if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone(); disk_info.guid = g.clone();
} else { } else {
disk_info.partitions = vec![part_info]; disk_info.partitions = vec![part_info(disk_info.logicalname.clone()).await];
} }
res.push(disk_info); res.push(disk_info);
} }

View File

@@ -44,7 +44,7 @@ pub fn generate_id() -> String {
id.to_string() id.to_string()
} }
#[instrument(skip_all)] #[instrument]
pub async fn get_current_hostname() -> Result<Hostname, Error> { pub async fn get_current_hostname() -> Result<Hostname, Error> {
let out = Command::new("hostname") let out = Command::new("hostname")
.invoke(ErrorKind::ParseSysInfo) .invoke(ErrorKind::ParseSysInfo)
@@ -53,11 +53,10 @@ pub async fn get_current_hostname() -> Result<Hostname, Error> {
Ok(Hostname(out_string.trim().to_owned())) Ok(Hostname(out_string.trim().to_owned()))
} }
#[instrument(skip_all)] #[instrument]
pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> { pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> {
let hostname: &String = &hostname.0; let hostname: &String = &hostname.0;
Command::new("hostnamectl") let _out = Command::new("hostnamectl")
.arg("--static")
.arg("set-hostname") .arg("set-hostname")
.arg(hostname) .arg(hostname)
.invoke(ErrorKind::ParseSysInfo) .invoke(ErrorKind::ParseSysInfo)
@@ -65,9 +64,9 @@ pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument]
pub async fn sync_hostname(hostname: &Hostname) -> Result<(), Error> { pub async fn sync_hostname(account: &AccountInfo) -> Result<(), Error> {
set_hostname(hostname).await?; set_hostname(&account.hostname).await?;
Command::new("systemctl") Command::new("systemctl")
.arg("restart") .arg("restart")
.arg("avahi-daemon") .arg("avahi-daemon")

View File

@@ -1,7 +1,8 @@
use std::collections::HashMap; use std::collections::{BTreeMap, HashMap};
use std::fs::Permissions; use std::fs::Permissions;
use std::os::unix::fs::PermissionsExt; use std::os::unix::fs::PermissionsExt;
use std::path::Path; use std::path::Path;
use std::process::Stdio;
use std::time::Duration; use std::time::Duration;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
@@ -14,8 +15,7 @@ use tokio::process::Command;
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::context::rpc::RpcContextConfig; use crate::context::rpc::RpcContextConfig;
use crate::db::model::{ServerInfo, ServerStatus}; use crate::db::model::{IpInfo, ServerStatus};
use crate::disk::mount::util::unmount;
use crate::install::PKG_ARCHIVE_DIR; use crate::install::PKG_ARCHIVE_DIR;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH; use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::sound::BEP; use crate::sound::BEP;
@@ -40,18 +40,17 @@ pub async fn check_time_is_synchronized() -> Result<bool, Error> {
} }
pub struct InitReceipts { pub struct InitReceipts {
pub server_info: LockReceipt<ServerInfo, ()>,
pub server_version: LockReceipt<crate::util::Version, ()>, pub server_version: LockReceipt<crate::util::Version, ()>,
pub version_range: LockReceipt<emver::VersionRange, ()>, pub version_range: LockReceipt<emver::VersionRange, ()>,
pub last_wifi_region: LockReceipt<Option<isocountry::CountryCode>, ()>,
pub status_info: LockReceipt<ServerStatus, ()>,
pub ip_info: LockReceipt<BTreeMap<String, IpInfo>, ()>,
pub system_start_time: LockReceipt<String, ()>,
} }
impl InitReceipts { impl InitReceipts {
pub async fn new(db: &mut impl DbHandle) -> Result<Self, Error> { pub async fn new(db: &mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new(); let mut locks = Vec::new();
let server_info = crate::db::DatabaseModel::new()
.server_info()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let server_version = crate::db::DatabaseModel::new() let server_version = crate::db::DatabaseModel::new()
.server_info() .server_info()
.version() .version()
@@ -62,29 +61,112 @@ impl InitReceipts {
.eos_version_compat() .eos_version_compat()
.make_locker(LockType::Write) .make_locker(LockType::Write)
.add_to_keys(&mut locks); .add_to_keys(&mut locks);
let last_wifi_region = crate::db::DatabaseModel::new()
.server_info()
.last_wifi_region()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let ip_info = crate::db::DatabaseModel::new()
.server_info()
.ip_info()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let status_info = crate::db::DatabaseModel::new()
.server_info()
.status_info()
.into_model()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let system_start_time = crate::db::DatabaseModel::new()
.server_info()
.system_start_time()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let skeleton_key = db.lock_all(locks).await?; let skeleton_key = db.lock_all(locks).await?;
Ok(Self { Ok(Self {
server_info: server_info.verify(&skeleton_key)?,
server_version: server_version.verify(&skeleton_key)?, server_version: server_version.verify(&skeleton_key)?,
version_range: version_range.verify(&skeleton_key)?, version_range: version_range.verify(&skeleton_key)?,
ip_info: ip_info.verify(&skeleton_key)?,
status_info: status_info.verify(&skeleton_key)?,
last_wifi_region: last_wifi_region.verify(&skeleton_key)?,
system_start_time: system_start_time.verify(&skeleton_key)?,
}) })
} }
} }
pub async fn pgloader(
old_db_path: impl AsRef<Path>,
batch_rows: usize,
prefetch_rows: usize,
) -> Result<(), Error> {
tokio::fs::write(
"/etc/embassy/migrate.load",
format!(
include_str!("migrate.load"),
sqlite_path = old_db_path.as_ref().display(),
batch_rows = batch_rows,
prefetch_rows = prefetch_rows
),
)
.await?;
match tokio::fs::remove_dir_all("/tmp/pgloader").await {
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(()),
a => a,
}?;
tracing::info!("Running pgloader");
let out = Command::new("pgloader")
.arg("-v")
.arg("/etc/embassy/migrate.load")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await?;
let stdout = String::from_utf8(out.stdout)?;
for line in stdout.lines() {
tracing::debug!("pgloader: {}", line);
}
let stderr = String::from_utf8(out.stderr)?;
for line in stderr.lines() {
tracing::debug!("pgloader err: {}", line);
}
tracing::debug!("pgloader exited with code {:?}", out.status);
if let Some(err) = stdout.lines().chain(stderr.lines()).find_map(|l| {
if l.split_ascii_whitespace()
.any(|word| word == "ERROR" || word == "FATAL")
{
Some(l)
} else {
None
}
}) {
return Err(Error::new(
eyre!("pgloader error: {}", err),
crate::ErrorKind::Database,
));
}
tokio::fs::rename(
old_db_path.as_ref(),
old_db_path.as_ref().with_extension("bak"),
)
.await?;
Ok(())
}
// must be idempotent // must be idempotent
pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> { pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
let db_dir = datadir.as_ref().join("main/postgresql"); let db_dir = datadir.as_ref().join("main/postgresql");
if tokio::process::Command::new("mountpoint") let is_mountpoint = || async {
.arg("/var/lib/postgresql") Ok::<_, Error>(
.stdout(std::process::Stdio::null()) tokio::process::Command::new("mountpoint")
.stderr(std::process::Stdio::null()) .arg("/var/lib/postgresql")
.status() .stdout(std::process::Stdio::null())
.await? .stderr(std::process::Stdio::null())
.success() .status()
{ .await?
unmount("/var/lib/postgresql").await?; .success(),
} )
};
let exists = tokio::fs::metadata(&db_dir).await.is_ok(); let exists = tokio::fs::metadata(&db_dir).await.is_ok();
if !exists { if !exists {
Command::new("cp") Command::new("cp")
@@ -94,73 +176,18 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
.invoke(crate::ErrorKind::Filesystem) .invoke(crate::ErrorKind::Filesystem)
.await?; .await?;
} }
if !is_mountpoint().await? {
crate::disk::mount::util::bind(&db_dir, "/var/lib/postgresql", false).await?;
}
Command::new("chown") Command::new("chown")
.arg("-R") .arg("-R")
.arg("postgres:postgres") .arg("postgres")
.arg(&db_dir) .arg("/var/lib/postgresql")
.invoke(crate::ErrorKind::Database) .invoke(crate::ErrorKind::Database)
.await?; .await?;
let mut pg_paths = tokio::fs::read_dir("/usr/lib/postgresql").await?;
let mut pg_version = None;
while let Some(pg_path) = pg_paths.next_entry().await? {
let pg_path_version = pg_path
.file_name()
.to_str()
.map(|v| v.parse())
.transpose()?
.unwrap_or(0);
if pg_path_version > pg_version.unwrap_or(0) {
pg_version = Some(pg_path_version)
}
}
let pg_version = pg_version.ok_or_else(|| {
Error::new(
eyre!("could not determine postgresql version"),
crate::ErrorKind::Database,
)
})?;
crate::disk::mount::util::bind(&db_dir, "/var/lib/postgresql", false).await?;
let pg_version_string = pg_version.to_string();
let pg_version_path = db_dir.join(&pg_version_string);
if tokio::fs::metadata(&pg_version_path).await.is_err() {
let conf_dir = Path::new("/etc/postgresql").join(pg_version.to_string());
let conf_dir_tmp = {
let mut tmp = conf_dir.clone();
tmp.set_extension("tmp");
tmp
};
if tokio::fs::metadata(&conf_dir).await.is_ok() {
tokio::fs::rename(&conf_dir, &conf_dir_tmp).await?;
}
let mut old_version = pg_version;
while old_version > 13
/* oldest pg version included in startos */
{
old_version -= 1;
let old_datadir = db_dir.join(old_version.to_string());
if tokio::fs::metadata(&old_datadir).await.is_ok() {
Command::new("pg_upgradecluster")
.arg(old_version.to_string())
.arg("main")
.invoke(crate::ErrorKind::Database)
.await?;
break;
}
}
if tokio::fs::metadata(&conf_dir).await.is_ok() {
if tokio::fs::metadata(&conf_dir).await.is_ok() {
tokio::fs::remove_dir_all(&conf_dir).await?;
}
tokio::fs::rename(&conf_dir_tmp, &conf_dir).await?;
}
}
Command::new("systemctl") Command::new("systemctl")
.arg("start") .arg("start")
.arg(format!("postgresql@{pg_version}-main.service")) .arg("postgresql")
.invoke(crate::ErrorKind::Database) .invoke(crate::ErrorKind::Database)
.await?; .await?;
if !exists { if !exists {
@@ -181,7 +208,6 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
.invoke(crate::ErrorKind::Database) .invoke(crate::ErrorKind::Database)
.await?; .await?;
} }
Ok(()) Ok(())
} }
@@ -224,15 +250,15 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
let db = cfg.db(&account).await?; let db = cfg.db(&account).await?;
tracing::info!("Opened PatchDB"); tracing::info!("Opened PatchDB");
let mut handle = db.handle(); let mut handle = db.handle();
let mut server_info = crate::db::DatabaseModel::new() crate::db::DatabaseModel::new()
.server_info() .server_info()
.get_mut(&mut handle) .lock(&mut handle, LockType::Write)
.await?; .await?;
let receipts = InitReceipts::new(&mut handle).await?; let receipts = InitReceipts::new(&mut handle).await?;
// write to ca cert store // write to ca cert store
tokio::fs::write( tokio::fs::write(
"/usr/local/share/ca-certificates/startos-root-ca.crt", "/usr/local/share/ca-certificates/embassy-root-ca.crt",
account.root_ca_cert.to_pem()?, account.root_ca_cert.to_pem()?,
) )
.await?; .await?;
@@ -244,15 +270,17 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
crate::net::wifi::synchronize_wpa_supplicant_conf( crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"), &cfg.datadir().join("main"),
wifi_interface, wifi_interface,
&server_info.last_wifi_region, &receipts.last_wifi_region.get(&mut handle).await?,
) )
.await?; .await?;
tracing::info!("Synchronized WiFi"); tracing::info!("Synchronized WiFi");
} }
let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok() let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok()
|| &*server_info.version < &emver::Version::new(0, 3, 2, 0) || &*receipts.server_version.get(&mut handle).await? < &emver::Version::new(0, 3, 2, 0)
|| (*ARCH == "x86_64" && &*server_info.version < &emver::Version::new(0, 3, 4, 0)); || (*ARCH == "x86_64"
&& &*receipts.server_version.get(&mut handle).await?
< &emver::Version::new(0, 3, 4, 0));
let song = if should_rebuild { let song = if should_rebuild {
Some(NonDetachingJoinHandle::from(tokio::spawn(async { Some(NonDetachingJoinHandle::from(tokio::spawn(async {
@@ -370,19 +398,32 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
tracing::info!("Syncronized system clock"); tracing::info!("Syncronized system clock");
} }
if server_info.zram { Command::new("systemctl")
crate::system::enable_zram().await? .arg("start")
} .arg("tor")
server_info.ip_info = crate::net::dhcp::init_ips().await?; .invoke(crate::ErrorKind::Tor)
server_info.status_info = ServerStatus { .await?;
updated: false,
update_progress: None,
backup_progress: None,
};
server_info.system_start_time = time().await?; receipts
.ip_info
.set(&mut handle, crate::net::dhcp::init_ips().await?)
.await?;
receipts
.status_info
.set(
&mut handle,
ServerStatus {
updated: false,
update_progress: None,
backup_progress: None,
},
)
.await?;
server_info.save(&mut handle).await?; receipts
.system_start_time
.set(&mut handle, time().await?)
.await?;
crate::version::init(&mut handle, &secret_store, &receipts).await?; crate::version::init(&mut handle, &secret_store, &receipts).await?;

View File

@@ -62,7 +62,7 @@ impl UpdateDependencyReceipts {
} }
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db, deps, receipts))]
pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>( pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>(
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db, db: &mut Db,
@@ -82,7 +82,7 @@ pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>(
.dependency_errors .dependency_errors
.get(db, dep) .get(db, dep)
.await? .await?
.ok_or_else(|| not_found!(dep))?; .ok_or_else(not_found)?;
errs.0.insert(id.clone(), e); errs.0.insert(id.clone(), e);
receipts.dependency_errors.set(db, errs, dep).await? receipts.dependency_errors.set(db, errs, dep).await?
} else { } else {
@@ -90,7 +90,7 @@ pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>(
.dependency_errors .dependency_errors
.get(db, dep) .get(db, dep)
.await? .await?
.ok_or_else(|| not_found!(dep))?; .ok_or_else(not_found)?;
errs.0.remove(id); errs.0.remove(id);
receipts.dependency_errors.set(db, errs, dep).await? receipts.dependency_errors.set(db, errs, dep).await?
} }
@@ -99,7 +99,7 @@ pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn cleanup(ctx: &RpcContext, id: &PackageId, version: &Version) -> Result<(), Error> { pub async fn cleanup(ctx: &RpcContext, id: &PackageId, version: &Version) -> Result<(), Error> {
let mut errors = ErrorCollection::new(); let mut errors = ErrorCollection::new();
ctx.managers.remove(&(id.clone(), version.clone())).await; ctx.managers.remove(&(id.clone(), version.clone())).await;
@@ -204,7 +204,7 @@ impl CleanupFailedReceipts {
} }
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db, receipts))]
pub async fn cleanup_failed<Db: DbHandle>( pub async fn cleanup_failed<Db: DbHandle>(
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db, db: &mut Db,
@@ -215,7 +215,7 @@ pub async fn cleanup_failed<Db: DbHandle>(
.package_data_entry .package_data_entry
.get(db, id) .get(db, id)
.await? .await?
.ok_or_else(|| not_found!(id))?; .ok_or_else(not_found)?;
if let Some(manifest) = match &pde { if let Some(manifest) = match &pde {
PackageDataEntry::Installing { manifest, .. } PackageDataEntry::Installing { manifest, .. }
| PackageDataEntry::Restoring { manifest, .. } => Some(manifest), | PackageDataEntry::Restoring { manifest, .. } => Some(manifest),
@@ -272,7 +272,7 @@ pub async fn cleanup_failed<Db: DbHandle>(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(db, current_dependencies, current_dependent_receipt))]
pub async fn remove_from_current_dependents_lists<'a, Db: DbHandle>( pub async fn remove_from_current_dependents_lists<'a, Db: DbHandle>(
db: &mut Db, db: &mut Db,
id: &'a PackageId, id: &'a PackageId,
@@ -340,7 +340,7 @@ impl UninstallReceipts {
} }
} }
} }
#[instrument(skip_all)] #[instrument(skip(ctx, secrets, db))]
pub async fn uninstall<Ex>( pub async fn uninstall<Ex>(
ctx: &RpcContext, ctx: &RpcContext,
db: &mut PatchDbHandle, db: &mut PatchDbHandle,
@@ -404,7 +404,7 @@ where
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(secrets))]
pub async fn remove_tor_keys<Ex>(secrets: &mut Ex, id: &PackageId) -> Result<(), Error> pub async fn remove_tor_keys<Ex>(secrets: &mut Ex, id: &PackageId) -> Result<(), Error>
where where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>, for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,

View File

@@ -21,7 +21,6 @@ use rpc_toolkit::yajrc::RpcError;
use tokio::fs::{File, OpenOptions}; use tokio::fs::{File, OpenOptions};
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt}; use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt};
use tokio::process::Command; use tokio::process::Command;
use tokio::sync::oneshot;
use tokio_stream::wrappers::ReadDirStream; use tokio_stream::wrappers::ReadDirStream;
use tracing::instrument; use tracing::instrument;
@@ -40,14 +39,13 @@ use crate::dependencies::{
}; };
use crate::install::cleanup::{cleanup, update_dependency_errors_of_dependents}; use crate::install::cleanup::{cleanup, update_dependency_errors_of_dependents};
use crate::install::progress::{InstallProgress, InstallProgressTracker}; use crate::install::progress::{InstallProgress, InstallProgressTracker};
use crate::marketplace::with_query_params;
use crate::notifications::NotificationLevel; use crate::notifications::NotificationLevel;
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::s9pk::reader::S9pkReader; use crate::s9pk::reader::S9pkReader;
use crate::status::{MainStatus, Status}; use crate::status::{MainStatus, Status};
use crate::util::io::{copy_and_shutdown, response_to_reader}; use crate::util::io::{copy_and_shutdown, response_to_reader};
use crate::util::serde::{display_serializable, Port}; use crate::util::serde::{display_serializable, Port};
use crate::util::{display_none, AsyncFileExt, Version}; use crate::util::{assure_send, display_none, AsyncFileExt, Version};
use crate::version::{Current, VersionT}; use crate::version::{Current, VersionT};
use crate::volume::{asset_dir, script_dir}; use crate::volume::{asset_dir, script_dir};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
@@ -118,7 +116,7 @@ impl std::fmt::Display for MinMax {
display(display_none), display(display_none),
metadata(sync_db = true) metadata(sync_db = true)
)] )]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn install( pub async fn install(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg] id: String, #[arg] id: String,
@@ -137,39 +135,35 @@ pub async fn install(
let marketplace_url = let marketplace_url =
marketplace_url.unwrap_or_else(|| crate::DEFAULT_MARKETPLACE.parse().unwrap()); marketplace_url.unwrap_or_else(|| crate::DEFAULT_MARKETPLACE.parse().unwrap());
let version_priority = version_priority.unwrap_or_default(); let version_priority = version_priority.unwrap_or_default();
let man: Manifest = ctx let man: Manifest = reqwest::get(format!(
.client "{}/package/v0/manifest/{}?spec={}&version-priority={}&eos-version-compat={}&arch={}",
.get(with_query_params( marketplace_url,
&ctx, id,
format!( version,
"{}/package/v0/manifest/{}?spec={}&version-priority={}", version_priority,
marketplace_url, id, version, version_priority, Current::new().compat(),
) &*crate::ARCH,
.parse()?, ))
)) .await
.send() .with_kind(crate::ErrorKind::Registry)?
.await .error_for_status()
.with_kind(crate::ErrorKind::Registry)? .with_kind(crate::ErrorKind::Registry)?
.error_for_status() .json()
.with_kind(crate::ErrorKind::Registry)? .await
.json() .with_kind(crate::ErrorKind::Registry)?;
.await let s9pk = reqwest::get(format!(
.with_kind(crate::ErrorKind::Registry)?; "{}/package/v0/{}.s9pk?spec=={}&version-priority={}&eos-version-compat={}&arch={}",
let s9pk = ctx marketplace_url,
.client id,
.get(with_query_params( man.version,
&ctx, version_priority,
format!( Current::new().compat(),
"{}/package/v0/{}.s9pk?spec=={}&version-priority={}", &*crate::ARCH,
marketplace_url, id, man.version, version_priority, ))
) .await
.parse()?, .with_kind(crate::ErrorKind::Registry)?
)) .error_for_status()
.send() .with_kind(crate::ErrorKind::Registry)?;
.await
.with_kind(crate::ErrorKind::Registry)?
.error_for_status()
.with_kind(crate::ErrorKind::Registry)?;
if man.id.as_str() != id || !man.version.satisfies(&version) { if man.id.as_str() != id || !man.version.satisfies(&version) {
return Err(Error::new( return Err(Error::new(
@@ -190,18 +184,16 @@ pub async fn install(
async { async {
tokio::io::copy( tokio::io::copy(
&mut response_to_reader( &mut response_to_reader(
ctx.client reqwest::get(format!(
.get(with_query_params( "{}/package/v0/license/{}?spec=={}&eos-version-compat={}&arch={}",
&ctx, marketplace_url,
format!( id,
"{}/package/v0/license/{}?spec=={}", man.version,
marketplace_url, id, man.version, Current::new().compat(),
) &*crate::ARCH,
.parse()?, ))
)) .await?
.send() .error_for_status()?,
.await?
.error_for_status()?,
), ),
&mut File::create(public_dir_path.join("LICENSE.md")).await?, &mut File::create(public_dir_path.join("LICENSE.md")).await?,
) )
@@ -211,18 +203,16 @@ pub async fn install(
async { async {
tokio::io::copy( tokio::io::copy(
&mut response_to_reader( &mut response_to_reader(
ctx.client reqwest::get(format!(
.get(with_query_params( "{}/package/v0/instructions/{}?spec=={}&eos-version-compat={}&arch={}",
&ctx, marketplace_url,
format!( id,
"{}/package/v0/instructions/{}?spec=={}", man.version,
marketplace_url, id, man.version, Current::new().compat(),
) &*crate::ARCH,
.parse()?, ))
)) .await?
.send() .error_for_status()?,
.await?
.error_for_status()?,
), ),
&mut File::create(public_dir_path.join("INSTRUCTIONS.md")).await?, &mut File::create(public_dir_path.join("INSTRUCTIONS.md")).await?,
) )
@@ -232,18 +222,16 @@ pub async fn install(
async { async {
tokio::io::copy( tokio::io::copy(
&mut response_to_reader( &mut response_to_reader(
ctx.client reqwest::get(format!(
.get(with_query_params( "{}/package/v0/icon/{}?spec=={}&eos-version-compat={}&arch={}",
&ctx, marketplace_url,
format!( id,
"{}/package/v0/icon/{}?spec=={}", man.version,
marketplace_url, id, man.version, Current::new().compat(),
) &*crate::ARCH,
.parse()?, ))
)) .await?
.send() .error_for_status()?,
.await?
.error_for_status()?,
), ),
&mut File::create(public_dir_path.join(format!("icon.{}", icon_type))).await?, &mut File::create(public_dir_path.join(format!("icon.{}", icon_type))).await?,
) )
@@ -309,7 +297,6 @@ pub async fn install(
Some(marketplace_url), Some(marketplace_url),
InstallProgress::new(s9pk.content_length()), InstallProgress::new(s9pk.content_length()),
response_to_reader(s9pk), response_to_reader(s9pk),
None,
) )
.await .await
{ {
@@ -339,7 +326,7 @@ pub async fn install(
} }
#[command(rpc_only, display(display_none))] #[command(rpc_only, display(display_none))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn sideload( pub async fn sideload(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg] manifest: Manifest, #[arg] manifest: Manifest,
@@ -438,64 +425,52 @@ pub async fn sideload(
pde.save(&mut tx).await?; pde.save(&mut tx).await?;
tx.commit().await?; tx.commit().await?;
let (send, recv) = oneshot::channel(); if let Err(e) = download_install_s9pk(
&new_ctx,
tokio::spawn(async move { &manifest,
if let Err(e) = download_install_s9pk( None,
&new_ctx, progress,
&manifest, tokio_util::io::StreamReader::new(req.into_body().map_err(|e| {
None, std::io::Error::new(
progress, match &e {
tokio_util::io::StreamReader::new(req.into_body().map_err(|e| { e if e.is_connect() => std::io::ErrorKind::ConnectionRefused,
std::io::Error::new( e if e.is_timeout() => std::io::ErrorKind::TimedOut,
match &e { _ => std::io::ErrorKind::Other,
e if e.is_connect() => std::io::ErrorKind::ConnectionRefused, },
e if e.is_timeout() => std::io::ErrorKind::TimedOut, e,
_ => std::io::ErrorKind::Other, )
}, })),
e, )
) .await
})), {
Some(send), let err_str = format!(
) "Install of {}@{} Failed: {}",
.await manifest.id, manifest.version, e
);
tracing::error!("{}", err_str);
tracing::debug!("{:?}", e);
if let Err(e) = new_ctx
.notification_manager
.notify(
&mut hdl,
Some(manifest.id),
NotificationLevel::Error,
String::from("Install Failed"),
err_str,
(),
None,
)
.await
{ {
let err_str = format!( tracing::error!("Failed to issue Notification: {}", e);
"Install of {}@{} Failed: {}",
manifest.id, manifest.version, e
);
tracing::error!("{}", err_str);
tracing::debug!("{:?}", e); tracing::debug!("{:?}", e);
if let Err(e) = new_ctx
.notification_manager
.notify(
&mut hdl,
Some(manifest.id),
NotificationLevel::Error,
String::from("Install Failed"),
err_str,
(),
None,
)
.await
{
tracing::error!("Failed to issue Notification: {}", e);
tracing::debug!("{:?}", e);
}
} }
});
if let Ok(_) = recv.await {
Response::builder()
.status(StatusCode::OK)
.body(Body::empty())
.with_kind(ErrorKind::Network)
} else {
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
.body(Body::from("installation aborted before upload completed"))
.with_kind(ErrorKind::Network)
} }
Response::builder()
.status(StatusCode::OK)
.body(Body::empty())
.with_kind(ErrorKind::Network)
} }
.boxed() .boxed()
}); });
@@ -507,7 +482,7 @@ pub async fn sideload(
Ok(guid) Ok(guid)
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
async fn cli_install( async fn cli_install(
ctx: CliContext, ctx: CliContext,
target: String, target: String,
@@ -599,7 +574,7 @@ pub async fn uninstall(#[arg] id: PackageId) -> Result<PackageId, Error> {
} }
#[command(rename = "dry", display(display_serializable))] #[command(rename = "dry", display(display_serializable))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn uninstall_dry( pub async fn uninstall_dry(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[parent_data] id: PackageId, #[parent_data] id: PackageId,
@@ -622,7 +597,7 @@ pub async fn uninstall_dry(
Ok(BreakageRes(breakages)) Ok(BreakageRes(breakages))
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<(), Error> { pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<(), Error> {
let mut handle = ctx.db.handle(); let mut handle = ctx.db.handle();
let mut tx = handle.begin().await?; let mut tx = handle.begin().await?;
@@ -725,14 +700,13 @@ impl DownloadInstallReceipts {
} }
} }
#[instrument(skip_all)] #[instrument(skip(ctx, temp_manifest, s9pk))]
pub async fn download_install_s9pk( pub async fn download_install_s9pk(
ctx: &RpcContext, ctx: &RpcContext,
temp_manifest: &Manifest, temp_manifest: &Manifest,
marketplace_url: Option<Url>, marketplace_url: Option<Url>,
progress: Arc<InstallProgress>, progress: Arc<InstallProgress>,
mut s9pk: impl AsyncRead + Unpin, mut s9pk: impl AsyncRead + Unpin,
download_complete: Option<oneshot::Sender<()>>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let pkg_id = &temp_manifest.id; let pkg_id = &temp_manifest.id;
let version = &temp_manifest.version; let version = &temp_manifest.version;
@@ -825,9 +799,6 @@ pub async fn download_install_s9pk(
let mut progress_writer = InstallProgressTracker::new(&mut dst, progress.clone()); let mut progress_writer = InstallProgressTracker::new(&mut dst, progress.clone());
tokio::io::copy(&mut s9pk, &mut progress_writer).await?; tokio::io::copy(&mut s9pk, &mut progress_writer).await?;
progress.download_complete(); progress.download_complete();
if let Some(complete) = download_complete {
complete.send(()).unwrap_or_default();
}
Ok(()) Ok(())
}) })
.await?; .await?;
@@ -902,7 +873,7 @@ impl InstallS9Receipts {
} }
} }
#[instrument(skip_all)] #[instrument(skip(ctx, rdr))]
pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>( pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
ctx: &RpcContext, ctx: &RpcContext,
pkg_id: &PackageId, pkg_id: &PackageId,
@@ -939,20 +910,17 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
{ {
Some(local_man) Some(local_man)
} else if let Some(marketplace_url) = &marketplace_url { } else if let Some(marketplace_url) = &marketplace_url {
match ctx match reqwest::get(format!(
.client "{}/package/v0/manifest/{}?spec={}&eos-version-compat={}&arch={}",
.get(with_query_params( marketplace_url,
ctx, dep,
format!( info.version,
"{}/package/v0/manifest/{}?spec={}", Current::new().compat(),
marketplace_url, dep, info.version, &*crate::ARCH,
) ))
.parse()?, .await
)) .with_kind(crate::ErrorKind::Registry)?
.send() .error_for_status()
.await
.with_kind(crate::ErrorKind::Registry)?
.error_for_status()
{ {
Ok(a) => Ok(Some( Ok(a) => Ok(Some(
a.json() a.json()
@@ -977,19 +945,16 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
let icon_path = dir.join(format!("icon.{}", manifest.assets.icon_type())); let icon_path = dir.join(format!("icon.{}", manifest.assets.icon_type()));
if tokio::fs::metadata(&icon_path).await.is_err() { if tokio::fs::metadata(&icon_path).await.is_err() {
tokio::fs::create_dir_all(&dir).await?; tokio::fs::create_dir_all(&dir).await?;
let icon = ctx let icon = reqwest::get(format!(
.client "{}/package/v0/icon/{}?spec={}&eos-version-compat={}&arch={}",
.get(with_query_params( marketplace_url,
ctx, dep,
format!( info.version,
"{}/package/v0/icon/{}?spec={}", Current::new().compat(),
marketplace_url, dep, info.version, &*crate::ARCH,
) ))
.parse()?, .await
)) .with_kind(crate::ErrorKind::Registry)?;
.send()
.await
.with_kind(crate::ErrorKind::Registry)?;
let mut dst = File::create(&icon_path).await?; let mut dst = File::create(&icon_path).await?;
tokio::io::copy(&mut response_to_reader(icon), &mut dst).await?; tokio::io::copy(&mut response_to_reader(icon), &mut dst).await?;
dst.sync_all().await?; dst.sync_all().await?;
@@ -1314,14 +1279,6 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
migration.or(prev_migration) migration.or(prev_migration)
}; };
remove_from_current_dependents_lists(
&mut tx,
pkg_id,
&prev.current_dependencies,
&receipts.config.current_dependents,
)
.await?; // remove previous
let configured = if let Some(f) = viable_migration { let configured = if let Some(f) = viable_migration {
f.await?.configured && prev_is_configured f.await?.configured && prev_is_configured
} else { } else {
@@ -1341,6 +1298,13 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
) )
.await?; .await?;
} else { } else {
remove_from_current_dependents_lists(
&mut tx,
pkg_id,
&prev.current_dependencies,
&receipts.config.current_dependents,
)
.await?; // remove previous
add_dependent_to_current_dependents_lists( add_dependent_to_current_dependents_lists(
&mut tx, &mut tx,
pkg_id, pkg_id,
@@ -1438,7 +1402,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(datadir))]
pub fn load_images<'a, P: AsRef<Path> + 'a + Send + Sync>( pub fn load_images<'a, P: AsRef<Path> + 'a + Send + Sync>(
datadir: P, datadir: P,
) -> BoxFuture<'a, Result<(), Error>> { ) -> BoxFuture<'a, Result<(), Error>> {
@@ -1471,23 +1435,16 @@ pub fn load_images<'a, P: AsRef<Path> + 'a + Send + Sync>(
copy_and_shutdown(&mut File::open(&path).await?, load_in) copy_and_shutdown(&mut File::open(&path).await?, load_in)
.await? .await?
} }
Some("s9pk") => match async { Some("s9pk") => {
let mut reader = S9pkReader::open(&path, true).await?; copy_and_shutdown(
copy_and_shutdown(&mut reader.docker_images().await?, load_in) &mut S9pkReader::open(&path, false)
.await?; .await?
Ok::<_, Error>(()) .docker_images()
.await?,
load_in,
)
.await?
} }
.await
{
Ok(()) => (),
Err(e) => {
tracing::error!(
"Error loading docker images from s9pk: {e}"
);
tracing::debug!("{e:?}");
return Ok(());
}
},
_ => unreachable!(), _ => unreachable!(),
}; };

View File

@@ -60,7 +60,7 @@ pub async fn update() -> Result<(), Error> {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
#[command(display(display_serializable))] #[command(display(display_serializable))]
pub async fn dry( pub async fn dry(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
@@ -76,7 +76,7 @@ pub async fn dry(
.current_dependents .current_dependents
.get(&mut tx, &id) .get(&mut tx, &id)
.await? .await?
.ok_or_else(|| not_found!(id))? .ok_or_else(not_found)?
.0 .0
.keys() .keys()
.into_iter() .into_iter()

View File

@@ -5,19 +5,20 @@ pub const DEFAULT_MARKETPLACE: &str = "https://registry.start9.com";
pub const BUFFER_SIZE: usize = 1024; pub const BUFFER_SIZE: usize = 1024;
pub const HOST_IP: [u8; 4] = [172, 18, 0, 1]; pub const HOST_IP: [u8; 4] = [172, 18, 0, 1];
pub const TARGET: &str = current_platform::CURRENT_PLATFORM; pub const TARGET: &str = current_platform::CURRENT_PLATFORM;
pub const OS_ARCH: &str = env!("OS_ARCH");
lazy_static::lazy_static! { lazy_static::lazy_static! {
pub static ref ARCH: &'static str = { pub static ref ARCH: &'static str = {
let (arch, _) = TARGET.split_once("-").unwrap(); let (arch, _) = TARGET.split_once("-").unwrap();
arch arch
}; };
pub static ref IS_RASPBERRY_PI: bool = {
*ARCH == "aarch64"
};
} }
pub mod account; pub mod account;
pub mod action; pub mod action;
pub mod auth; pub mod auth;
pub mod backup; pub mod backup;
pub mod bins;
pub mod config; pub mod config;
pub mod context; pub mod context;
pub mod control; pub mod control;
@@ -86,7 +87,6 @@ pub fn main_api() -> Result<(), RpcError> {
#[command(subcommands( #[command(subcommands(
system::time, system::time,
system::experimental,
system::logs, system::logs,
system::kernel_logs, system::kernel_logs,
system::metrics, system::metrics,

View File

@@ -33,7 +33,7 @@ use crate::util::serde::Reversible;
use crate::{Error, ErrorKind}; use crate::{Error, ErrorKind};
#[pin_project::pin_project] #[pin_project::pin_project]
pub struct LogStream { struct LogStream {
_child: Child, _child: Child,
#[pin] #[pin]
entries: BoxStream<'static, Result<JournalctlEntry, Error>>, entries: BoxStream<'static, Result<JournalctlEntry, Error>>,
@@ -64,7 +64,7 @@ impl Stream for LogStream {
} }
} }
#[instrument(skip_all)] #[instrument(skip(logs, ws_fut))]
async fn ws_handler< async fn ws_handler<
WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>, WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
>( >(
@@ -141,14 +141,14 @@ impl std::fmt::Display for LogEntry {
} }
#[derive(Serialize, Deserialize, Debug)] #[derive(Serialize, Deserialize, Debug)]
pub struct JournalctlEntry { struct JournalctlEntry {
#[serde(rename = "__REALTIME_TIMESTAMP")] #[serde(rename = "__REALTIME_TIMESTAMP")]
pub timestamp: String, timestamp: String,
#[serde(rename = "MESSAGE")] #[serde(rename = "MESSAGE")]
#[serde(deserialize_with = "deserialize_string_or_utf8_array")] #[serde(deserialize_with = "deserialize_string_or_utf8_array")]
pub message: String, message: String,
#[serde(rename = "__CURSOR")] #[serde(rename = "__CURSOR")]
pub cursor: String, cursor: String,
} }
impl JournalctlEntry { impl JournalctlEntry {
fn log_entry(self) -> Result<(String, LogEntry), Error> { fn log_entry(self) -> Result<(String, LogEntry), Error> {
@@ -344,7 +344,7 @@ pub async fn cli_logs_generic_follow(
Ok(()) Ok(())
} }
pub async fn journalctl( async fn journalctl(
id: LogSource, id: LogSource,
limit: usize, limit: usize,
cursor: Option<&str>, cursor: Option<&str>,
@@ -409,7 +409,7 @@ pub async fn journalctl(
}) })
} }
#[instrument(skip_all)] #[instrument]
pub async fn fetch_logs( pub async fn fetch_logs(
id: LogSource, id: LogSource,
limit: Option<usize>, limit: Option<usize>,
@@ -456,7 +456,7 @@ pub async fn fetch_logs(
}) })
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn follow_logs( pub async fn follow_logs(
ctx: RpcContext, ctx: RpcContext,
id: LogSource, id: LogSource,

View File

@@ -1,3 +0,0 @@
fn main() {
startos::bins::startbox()
}

View File

@@ -1,7 +1,6 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use itertools::Itertools;
use patch_db::{DbHandle, LockReceipt, LockType}; use patch_db::{DbHandle, LockReceipt, LockType};
use tracing::instrument; use tracing::instrument;
@@ -91,7 +90,7 @@ impl HealthCheckStatusReceipt {
} }
} }
#[instrument(skip_all)] #[instrument(skip(ctx, db))]
pub async fn check<Db: DbHandle>( pub async fn check<Db: DbHandle>(
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db, db: &mut Db,
@@ -112,7 +111,6 @@ pub async fn check<Db: DbHandle>(
}; };
let health_results = if let Some(started) = started { let health_results = if let Some(started) = started {
tracing::debug!("Checking health of {}", id);
manifest manifest
.health_checks .health_checks
.check_all( .check_all(
@@ -131,24 +129,6 @@ pub async fn check<Db: DbHandle>(
if !should_commit.load(Ordering::SeqCst) { if !should_commit.load(Ordering::SeqCst) {
return Ok(()); return Ok(());
} }
if !health_results
.iter()
.any(|(_, res)| matches!(res, HealthCheckResult::Failure { .. }))
{
tracing::debug!("All health checks succeeded for {}", id);
} else {
tracing::debug!(
"Some health checks failed for {}: {}",
id,
health_results
.iter()
.filter(|(_, res)| matches!(res, HealthCheckResult::Failure { .. }))
.map(|(id, _)| &*id)
.join(", ")
);
}
let current_dependents = { let current_dependents = {
let mut checkpoint = tx.begin().await?; let mut checkpoint = tx.begin().await?;
let receipts = HealthCheckStatusReceipt::new(&mut checkpoint, id).await?; let receipts = HealthCheckStatusReceipt::new(&mut checkpoint, id).await?;
@@ -173,7 +153,9 @@ pub async fn check<Db: DbHandle>(
current_dependents current_dependents
}; };
tracing::debug!("Checking health of {}", id);
let receipts = crate::dependencies::BreakTransitiveReceipts::new(&mut tx).await?; let receipts = crate::dependencies::BreakTransitiveReceipts::new(&mut tx).await?;
tracing::debug!("Got receipts {}", id);
for (dependent, info) in (current_dependents).0.iter() { for (dependent, info) in (current_dependents).0.iter() {
let failures: BTreeMap<HealthCheckId, HealthCheckResult> = health_results let failures: BTreeMap<HealthCheckId, HealthCheckResult> = health_results

View File

@@ -21,7 +21,6 @@ use tracing::instrument;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::manager::sync::synchronizer; use crate::manager::sync::synchronizer;
use crate::net::net_controller::NetService; use crate::net::net_controller::NetService;
use crate::net::vhost::AlpnInfo;
use crate::procedure::docker::{DockerContainer, DockerProcedure, LongRunning}; use crate::procedure::docker::{DockerContainer, DockerProcedure, LongRunning};
#[cfg(feature = "js_engine")] #[cfg(feature = "js_engine")]
use crate::procedure::js_scripts::JsProcedure; use crate::procedure::js_scripts::JsProcedure;
@@ -40,7 +39,7 @@ pub const HEALTH_CHECK_GRACE_PERIOD_SECONDS: u64 = 5;
#[derive(Default)] #[derive(Default)]
pub struct ManagerMap(RwLock<BTreeMap<(PackageId, Version), Arc<Manager>>>); pub struct ManagerMap(RwLock<BTreeMap<(PackageId, Version), Arc<Manager>>>);
impl ManagerMap { impl ManagerMap {
#[instrument(skip_all)] #[instrument(skip(self, ctx, db, secrets))]
pub async fn init<Db: DbHandle, Ex>( pub async fn init<Db: DbHandle, Ex>(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
@@ -79,7 +78,7 @@ impl ManagerMap {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(self, ctx))]
pub async fn add(&self, ctx: RpcContext, manifest: Manifest) -> Result<(), Error> { pub async fn add(&self, ctx: RpcContext, manifest: Manifest) -> Result<(), Error> {
let mut lock = self.0.write().await; let mut lock = self.0.write().await;
let id = (manifest.id.clone(), manifest.version.clone()); let id = (manifest.id.clone(), manifest.version.clone());
@@ -92,7 +91,7 @@ impl ManagerMap {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn remove(&self, id: &(PackageId, Version)) { pub async fn remove(&self, id: &(PackageId, Version)) {
if let Some(man) = self.0.write().await.remove(id) { if let Some(man) = self.0.write().await.remove(id) {
if let Err(e) = man.exit().await { if let Err(e) = man.exit().await {
@@ -102,7 +101,7 @@ impl ManagerMap {
} }
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn empty(&self) -> Result<(), Error> { pub async fn empty(&self) -> Result<(), Error> {
let res = let res =
futures::future::join_all(std::mem::take(&mut *self.0.write().await).into_iter().map( futures::future::join_all(std::mem::take(&mut *self.0.write().await).into_iter().map(
@@ -129,7 +128,7 @@ impl ManagerMap {
}) })
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn get(&self, id: &(PackageId, Version)) -> Option<Arc<Manager>> { pub async fn get(&self, id: &(PackageId, Version)) -> Option<Arc<Manager>> {
self.0.read().await.get(id).cloned() self.0.read().await.get(id).cloned()
} }
@@ -175,7 +174,7 @@ pub enum OnStop {
Exit, Exit,
} }
#[instrument(skip_all)] #[instrument(skip(state))]
async fn run_main( async fn run_main(
state: &Arc<ManagerSharedState>, state: &Arc<ManagerSharedState>,
) -> Result<Result<NoOutput, (i32, String)>, Error> { ) -> Result<Result<NoOutput, (i32, String)>, Error> {
@@ -233,7 +232,7 @@ async fn start_up_image(
} }
impl Manager { impl Manager {
#[instrument(skip_all)] #[instrument(skip(ctx))]
async fn create(ctx: RpcContext, manifest: Manifest) -> Result<Self, Error> { async fn create(ctx: RpcContext, manifest: Manifest) -> Result<Self, Error> {
let (on_stop, recv) = channel(OnStop::Sleep); let (on_stop, recv) = channel(OnStop::Sleep);
let seed = Arc::new(ManagerSeed { let seed = Arc::new(ManagerSeed {
@@ -272,7 +271,7 @@ impl Manager {
send_signal(&self.shared, signal).await send_signal(&self.shared, signal).await
} }
#[instrument(skip_all)] #[instrument(skip(self))]
async fn exit(&self) -> Result<(), Error> { async fn exit(&self) -> Result<(), Error> {
self.shared self.shared
.commit_health_check_results .commit_health_check_results
@@ -434,7 +433,7 @@ pub struct PersistentContainer {
} }
impl PersistentContainer { impl PersistentContainer {
#[instrument(skip_all)] #[instrument(skip(seed))]
async fn init(seed: &Arc<ManagerSeed>) -> Result<Option<Self>, Error> { async fn init(seed: &Arc<ManagerSeed>) -> Result<Option<Self>, Error> {
Ok(if let Some(containers) = &seed.manifest.containers { Ok(if let Some(containers) = &seed.manifest.containers {
let (running_docker, rpc_client) = let (running_docker, rpc_client) =
@@ -574,14 +573,8 @@ async fn add_network_for_main(
let mut tx = secrets.begin().await?; let mut tx = secrets.begin().await?;
for (id, interface) in &seed.manifest.interfaces.0 { for (id, interface) in &seed.manifest.interfaces.0 {
for (external, internal) in interface.lan_config.iter().flatten() { for (external, internal) in interface.lan_config.iter().flatten() {
svc.add_lan( svc.add_lan(&mut tx, id.clone(), external.0, internal.internal, false)
&mut tx, .await?;
id.clone(),
external.0,
internal.internal,
Err(AlpnInfo::Specified(vec![])),
)
.await?;
} }
for (external, internal) in interface.tor_config.iter().flat_map(|t| &t.port_mapping) { for (external, internal) in interface.tor_config.iter().flat_map(|t| &t.port_mapping) {
svc.add_tor(&mut tx, id.clone(), external.0, internal.0) svc.add_tor(&mut tx, id.clone(), external.0, internal.0)
@@ -729,7 +722,7 @@ fn sigterm_timeout(manifest: &Manifest) -> Option<Duration> {
} }
} }
#[instrument(skip_all)] #[instrument(skip(shared))]
async fn stop(shared: &ManagerSharedState) -> Result<(), Error> { async fn stop(shared: &ManagerSharedState) -> Result<(), Error> {
shared shared
.commit_health_check_results .commit_health_check_results
@@ -753,7 +746,7 @@ async fn stop(shared: &ManagerSharedState) -> Result<(), Error> {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(shared))]
async fn start(shared: &ManagerSharedState) -> Result<(), Error> { async fn start(shared: &ManagerSharedState) -> Result<(), Error> {
shared.on_stop.send_modify(|status| { shared.on_stop.send_modify(|status| {
if matches!(*status, OnStop::Sleep) { if matches!(*status, OnStop::Sleep) {
@@ -768,7 +761,7 @@ async fn start(shared: &ManagerSharedState) -> Result<(), Error> {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(shared))]
async fn pause(shared: &ManagerSharedState) -> Result<(), Error> { async fn pause(shared: &ManagerSharedState) -> Result<(), Error> {
if let Err(e) = shared if let Err(e) = shared
.seed .seed
@@ -785,7 +778,7 @@ async fn pause(shared: &ManagerSharedState) -> Result<(), Error> {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(shared))]
async fn resume(shared: &ManagerSharedState) -> Result<(), Error> { async fn resume(shared: &ManagerSharedState) -> Result<(), Error> {
shared shared
.seed .seed

View File

@@ -3,8 +3,6 @@ use reqwest::{StatusCode, Url};
use rpc_toolkit::command; use rpc_toolkit::command;
use serde_json::Value; use serde_json::Value;
use crate::context::RpcContext;
use crate::version::VersionT;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[command(subcommands(get))] #[command(subcommands(get))]
@@ -12,34 +10,9 @@ pub fn marketplace() -> Result<(), Error> {
Ok(()) Ok(())
} }
pub fn with_query_params(ctx: &RpcContext, mut url: Url) -> Url {
url.query_pairs_mut()
.append_pair(
"os.version",
&crate::version::Current::new().semver().to_string(),
)
.append_pair(
"os.compat",
&crate::version::Current::new().compat().to_string(),
)
.append_pair("os.arch", crate::OS_ARCH)
.append_pair("hardware.arch", &*crate::ARCH)
.append_pair("hardware.ram", &ctx.hardware.ram.to_string());
for hw in &ctx.hardware.devices {
url.query_pairs_mut()
.append_pair(&format!("hardware.device.{}", hw.class()), hw.product());
}
url
}
#[command] #[command]
pub async fn get(#[context] ctx: RpcContext, #[arg] url: Url) -> Result<Value, Error> { pub async fn get(#[arg] url: Url) -> Result<Value, Error> {
let mut response = ctx let mut response = reqwest::get(url)
.client
.get(with_query_params(&ctx, url))
.send()
.await .await
.with_kind(crate::ErrorKind::Network)?; .with_kind(crate::ErrorKind::Network)?;
let status = response.status(); let status = response.status();

View File

@@ -47,7 +47,7 @@ pub struct EncryptedWire {
encrypted: serde_json::Value, encrypted: serde_json::Value,
} }
impl EncryptedWire { impl EncryptedWire {
#[instrument(skip_all)] #[instrument(skip(current_secret))]
pub fn decrypt(self, current_secret: impl AsRef<Jwk>) -> Option<String> { pub fn decrypt(self, current_secret: impl AsRef<Jwk>) -> Option<String> {
let current_secret = current_secret.as_ref(); let current_secret = current_secret.as_ref();

View File

@@ -24,7 +24,7 @@ pub struct Migrations {
pub to: IndexMap<VersionRange, PackageProcedure>, pub to: IndexMap<VersionRange, PackageProcedure>,
} }
impl Migrations { impl Migrations {
#[instrument(skip_all)] #[instrument]
pub fn validate( pub fn validate(
&self, &self,
container: &Option<DockerContainers>, container: &Option<DockerContainers>,
@@ -55,7 +55,7 @@ impl Migrations {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub fn from<'a>( pub fn from<'a>(
&'a self, &'a self,
container: &'a Option<DockerContainers>, container: &'a Option<DockerContainers>,
@@ -95,7 +95,7 @@ impl Migrations {
} }
} }
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub fn to<'a>( pub fn to<'a>(
&'a self, &'a self,
ctx: &'a RpcContext, ctx: &'a RpcContext,

View File

@@ -7,4 +7,4 @@ prompt = no
[req_distinguished_name] [req_distinguished_name]
CN = {hostname}.local CN = {hostname}.local
O = Start9 Labs O = Start9 Labs
OU = StartOS OU = Embassy

View File

@@ -11,7 +11,6 @@ use models::PackageId;
use tokio::net::{TcpListener, UdpSocket}; use tokio::net::{TcpListener, UdpSocket};
use tokio::process::Command; use tokio::process::Command;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tracing::instrument;
use trust_dns_server::authority::MessageResponseBuilder; use trust_dns_server::authority::MessageResponseBuilder;
use trust_dns_server::client::op::{Header, ResponseCode}; use trust_dns_server::client::op::{Header, ResponseCode};
use trust_dns_server::client::rr::{Name, Record, RecordType}; use trust_dns_server::client::rr::{Name, Record, RecordType};
@@ -148,7 +147,6 @@ impl RequestHandler for Resolver {
} }
impl DnsController { impl DnsController {
#[instrument(skip_all)]
pub async fn init(bind: &[SocketAddr]) -> Result<Self, Error> { pub async fn init(bind: &[SocketAddr]) -> Result<Self, Error> {
let services = Arc::new(RwLock::new(BTreeMap::new())); let services = Arc::new(RwLock::new(BTreeMap::new()));
@@ -163,16 +161,10 @@ impl DnsController {
); );
server.register_socket(UdpSocket::bind(bind).await.with_kind(ErrorKind::Network)?); server.register_socket(UdpSocket::bind(bind).await.with_kind(ErrorKind::Network)?);
Command::new("resolvectl") Command::new("systemd-resolve")
.arg("dns") .arg("--set-dns=127.0.0.1")
.arg("br-start9") .arg("--interface=br-start9")
.arg("127.0.0.1") .arg("--set-domain=embassy")
.invoke(ErrorKind::Network)
.await?;
Command::new("resolvectl")
.arg("domain")
.arg("br-start9")
.arg("embassy")
.invoke(ErrorKind::Network) .invoke(ErrorKind::Network)
.await?; .await?;

View File

@@ -16,7 +16,7 @@ use crate::{Error, ResultExt};
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct Interfaces(pub BTreeMap<InterfaceId, Interface>); // TODO pub struct Interfaces(pub BTreeMap<InterfaceId, Interface>); // TODO
impl Interfaces { impl Interfaces {
#[instrument(skip_all)] #[instrument]
pub fn validate(&self) -> Result<(), Error> { pub fn validate(&self) -> Result<(), Error> {
for (_, interface) in &self.0 { for (_, interface) in &self.0 {
interface.validate().with_ctx(|_| { interface.validate().with_ctx(|_| {
@@ -28,7 +28,7 @@ impl Interfaces {
} }
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(secrets))]
pub async fn install<Ex>( pub async fn install<Ex>(
&self, &self,
secrets: &mut Ex, secrets: &mut Ex,
@@ -90,7 +90,7 @@ pub struct Interface {
pub protocols: IndexSet<String>, pub protocols: IndexSet<String>,
} }
impl Interface { impl Interface {
#[instrument(skip_all)] #[instrument]
pub fn validate(&self) -> Result<(), color_eyre::eyre::Report> { pub fn validate(&self) -> Result<(), color_eyre::eyre::Report> {
if self.tor_config.is_some() && !self.protocols.contains("tcp") { if self.tor_config.is_some() && !self.protocols.contains("tcp") {
color_eyre::eyre::bail!("must support tcp to set up a tor hidden service"); color_eyre::eyre::bail!("must support tcp to set up a tor hidden service");

View File

@@ -5,7 +5,6 @@ use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use tokio::process::{Child, Command}; use tokio::process::{Child, Command};
use tokio::sync::Mutex; use tokio::sync::Mutex;
use tracing::instrument;
use crate::util::Invoke; use crate::util::Invoke;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
@@ -52,7 +51,6 @@ pub struct MdnsControllerInner {
} }
impl MdnsControllerInner { impl MdnsControllerInner {
#[instrument(skip_all)]
async fn init() -> Result<Self, Error> { async fn init() -> Result<Self, Error> {
let mut res = MdnsControllerInner { let mut res = MdnsControllerInner {
alias_cmd: None, alias_cmd: None,
@@ -61,7 +59,6 @@ impl MdnsControllerInner {
res.sync().await?; res.sync().await?;
Ok(res) Ok(res)
} }
#[instrument(skip_all)]
async fn sync(&mut self) -> Result<(), Error> { async fn sync(&mut self) -> Result<(), Error> {
if let Some(mut cmd) = self.alias_cmd.take() { if let Some(mut cmd) = self.alias_cmd.take() {
cmd.kill().await.with_kind(crate::ErrorKind::Network)?; cmd.kill().await.with_kind(crate::ErrorKind::Network)?;

View File

@@ -15,7 +15,7 @@ use crate::net::keys::Key;
use crate::net::mdns::MdnsController; use crate::net::mdns::MdnsController;
use crate::net::ssl::{export_cert, export_key, SslManager}; use crate::net::ssl::{export_cert, export_key, SslManager};
use crate::net::tor::TorController; use crate::net::tor::TorController;
use crate::net::vhost::{AlpnInfo, VHostController}; use crate::net::vhost::VHostController;
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::volume::cert_dir; use crate::volume::cert_dir;
use crate::{Error, HOST_IP}; use crate::{Error, HOST_IP};
@@ -31,10 +31,9 @@ pub struct NetController {
} }
impl NetController { impl NetController {
#[instrument(skip_all)] #[instrument]
pub async fn init( pub async fn init(
tor_control: SocketAddr, tor_control: SocketAddr,
tor_socks: SocketAddr,
dns_bind: &[SocketAddr], dns_bind: &[SocketAddr],
ssl: SslManager, ssl: SslManager,
hostname: &Hostname, hostname: &Hostname,
@@ -42,7 +41,7 @@ impl NetController {
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let ssl = Arc::new(ssl); let ssl = Arc::new(ssl);
let mut res = Self { let mut res = Self {
tor: TorController::new(tor_control, tor_socks), tor: TorController::init(tor_control).await?,
#[cfg(feature = "avahi")] #[cfg(feature = "avahi")]
mdns: MdnsController::init().await?, mdns: MdnsController::init().await?,
vhost: VHostController::new(ssl.clone()), vhost: VHostController::new(ssl.clone()),
@@ -55,8 +54,6 @@ impl NetController {
} }
async fn add_os_bindings(&mut self, hostname: &Hostname, key: &Key) -> Result<(), Error> { async fn add_os_bindings(&mut self, hostname: &Hostname, key: &Key) -> Result<(), Error> {
let alpn = Err(AlpnInfo::Specified(vec!["http/1.1".into(), "h2".into()]));
// Internal DNS // Internal DNS
self.vhost self.vhost
.add( .add(
@@ -64,7 +61,7 @@ impl NetController {
Some("embassy".into()), Some("embassy".into()),
443, 443,
([127, 0, 0, 1], 80).into(), ([127, 0, 0, 1], 80).into(),
alpn.clone(), false,
) )
.await?; .await?;
self.os_bindings self.os_bindings
@@ -73,13 +70,7 @@ impl NetController {
// LAN IP // LAN IP
self.os_bindings.push( self.os_bindings.push(
self.vhost self.vhost
.add( .add(key.clone(), None, 443, ([127, 0, 0, 1], 80).into(), false)
key.clone(),
None,
443,
([127, 0, 0, 1], 80).into(),
alpn.clone(),
)
.await?, .await?,
); );
@@ -91,7 +82,7 @@ impl NetController {
Some("localhost".into()), Some("localhost".into()),
443, 443,
([127, 0, 0, 1], 80).into(), ([127, 0, 0, 1], 80).into(),
alpn.clone(), false,
) )
.await?, .await?,
); );
@@ -102,7 +93,7 @@ impl NetController {
Some(hostname.no_dot_host_name()), Some(hostname.no_dot_host_name()),
443, 443,
([127, 0, 0, 1], 80).into(), ([127, 0, 0, 1], 80).into(),
alpn.clone(), false,
) )
.await?, .await?,
); );
@@ -115,7 +106,7 @@ impl NetController {
Some(hostname.local_domain_name()), Some(hostname.local_domain_name()),
443, 443,
([127, 0, 0, 1], 80).into(), ([127, 0, 0, 1], 80).into(),
alpn.clone(), false,
) )
.await?, .await?,
); );
@@ -123,7 +114,7 @@ impl NetController {
// Tor (http) // Tor (http)
self.os_bindings.push( self.os_bindings.push(
self.tor self.tor
.add(key.tor_key(), 80, ([127, 0, 0, 1], 80).into()) .add(&key.tor_key(), 80, ([127, 0, 0, 1], 80).into())
.await?, .await?,
); );
@@ -135,20 +126,20 @@ impl NetController {
Some(key.tor_address().to_string()), Some(key.tor_address().to_string()),
443, 443,
([127, 0, 0, 1], 80).into(), ([127, 0, 0, 1], 80).into(),
alpn.clone(), false,
) )
.await?, .await?,
); );
self.os_bindings.push( self.os_bindings.push(
self.tor self.tor
.add(key.tor_key(), 443, ([127, 0, 0, 1], 443).into()) .add(&key.tor_key(), 443, ([127, 0, 0, 1], 443).into())
.await?, .await?,
); );
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(self))]
pub async fn create_service( pub async fn create_service(
self: &Arc<Self>, self: &Arc<Self>,
package: PackageId, package: PackageId,
@@ -173,13 +164,13 @@ impl NetController {
target: SocketAddr, target: SocketAddr,
) -> Result<Vec<Arc<()>>, Error> { ) -> Result<Vec<Arc<()>>, Error> {
let mut rcs = Vec::with_capacity(1); let mut rcs = Vec::with_capacity(1);
rcs.push(self.tor.add(key.tor_key(), external, target).await?); rcs.push(self.tor.add(&key.tor_key(), external, target).await?);
Ok(rcs) Ok(rcs)
} }
async fn remove_tor(&self, key: &Key, external: u16, rcs: Vec<Arc<()>>) -> Result<(), Error> { async fn remove_tor(&self, key: &Key, external: u16, rcs: Vec<Arc<()>>) -> Result<(), Error> {
drop(rcs); drop(rcs);
self.tor.gc(Some(key.tor_key()), Some(external)).await self.tor.gc(&key.tor_key(), external).await
} }
async fn add_lan( async fn add_lan(
@@ -187,7 +178,7 @@ impl NetController {
key: Key, key: Key,
external: u16, external: u16,
target: SocketAddr, target: SocketAddr,
connect_ssl: Result<(), AlpnInfo>, connect_ssl: bool,
) -> Result<Vec<Arc<()>>, Error> { ) -> Result<Vec<Arc<()>>, Error> {
let mut rcs = Vec::with_capacity(2); let mut rcs = Vec::with_capacity(2);
rcs.push( rcs.push(
@@ -269,7 +260,7 @@ impl NetService {
id: InterfaceId, id: InterfaceId,
external: u16, external: u16,
internal: u16, internal: u16,
connect_ssl: Result<(), AlpnInfo>, connect_ssl: bool,
) -> Result<(), Error> ) -> Result<(), Error>
where where
for<'a> &'a mut Ex: PgExecutor<'a>, for<'a> &'a mut Ex: PgExecutor<'a>,

View File

@@ -143,21 +143,21 @@ pub async fn export_cert(chain: &[&X509], target: &Path) -> Result<(), Error> {
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument]
fn rand_serial() -> Result<Asn1Integer, Error> { fn rand_serial() -> Result<Asn1Integer, Error> {
let mut bn = BigNum::new()?; let mut bn = BigNum::new()?;
bn.rand(64, MsbOption::MAYBE_ZERO, false)?; bn.rand(64, MsbOption::MAYBE_ZERO, false)?;
let asn1 = Asn1Integer::from_bn(&bn)?; let asn1 = Asn1Integer::from_bn(&bn)?;
Ok(asn1) Ok(asn1)
} }
#[instrument(skip_all)] #[instrument]
pub fn generate_key() -> Result<PKey<Private>, Error> { pub fn generate_key() -> Result<PKey<Private>, Error> {
let new_key = EcKey::generate(EC_GROUP.as_ref())?; let new_key = EcKey::generate(EC_GROUP.as_ref())?;
let key = PKey::from_ec_key(new_key)?; let key = PKey::from_ec_key(new_key)?;
Ok(key) Ok(key)
} }
#[instrument(skip_all)] #[instrument]
pub fn make_root_cert(root_key: &PKey<Private>, hostname: &Hostname) -> Result<X509, Error> { pub fn make_root_cert(root_key: &PKey<Private>, hostname: &Hostname) -> Result<X509, Error> {
let mut builder = X509Builder::new()?; let mut builder = X509Builder::new()?;
builder.set_version(CERTIFICATE_VERSION)?; builder.set_version(CERTIFICATE_VERSION)?;
@@ -173,7 +173,7 @@ pub fn make_root_cert(root_key: &PKey<Private>, hostname: &Hostname) -> Result<X
let mut subject_name_builder = X509NameBuilder::new()?; let mut subject_name_builder = X509NameBuilder::new()?;
subject_name_builder.append_entry_by_text("CN", &format!("{} Local Root CA", &*hostname.0))?; subject_name_builder.append_entry_by_text("CN", &format!("{} Local Root CA", &*hostname.0))?;
subject_name_builder.append_entry_by_text("O", "Start9")?; subject_name_builder.append_entry_by_text("O", "Start9")?;
subject_name_builder.append_entry_by_text("OU", "StartOS")?; subject_name_builder.append_entry_by_text("OU", "Embassy")?;
let subject_name = subject_name_builder.build(); let subject_name = subject_name_builder.build();
builder.set_subject_name(&subject_name)?; builder.set_subject_name(&subject_name)?;
@@ -208,7 +208,7 @@ pub fn make_root_cert(root_key: &PKey<Private>, hostname: &Hostname) -> Result<X
let cert = builder.build(); let cert = builder.build();
Ok(cert) Ok(cert)
} }
#[instrument(skip_all)] #[instrument]
pub fn make_int_cert( pub fn make_int_cert(
signer: (&PKey<Private>, &X509), signer: (&PKey<Private>, &X509),
applicant: &PKey<Private>, applicant: &PKey<Private>,
@@ -225,9 +225,9 @@ pub fn make_int_cert(
builder.set_serial_number(&*rand_serial()?)?; builder.set_serial_number(&*rand_serial()?)?;
let mut subject_name_builder = X509NameBuilder::new()?; let mut subject_name_builder = X509NameBuilder::new()?;
subject_name_builder.append_entry_by_text("CN", "StartOS Local Intermediate CA")?; subject_name_builder.append_entry_by_text("CN", "Embassy Local Intermediate CA")?;
subject_name_builder.append_entry_by_text("O", "Start9")?; subject_name_builder.append_entry_by_text("O", "Start9")?;
subject_name_builder.append_entry_by_text("OU", "StartOS")?; subject_name_builder.append_entry_by_text("OU", "Embassy")?;
let subject_name = subject_name_builder.build(); let subject_name = subject_name_builder.build();
builder.set_subject_name(&subject_name)?; builder.set_subject_name(&subject_name)?;
@@ -334,7 +334,7 @@ impl std::fmt::Display for SANInfo {
} }
} }
#[instrument(skip_all)] #[instrument]
pub fn make_leaf_cert( pub fn make_leaf_cert(
signer: (&PKey<Private>, &X509), signer: (&PKey<Private>, &X509),
applicant: (&PKey<Private>, &SANInfo), applicant: (&PKey<Private>, &SANInfo),
@@ -370,7 +370,7 @@ pub fn make_leaf_cert(
.unwrap_or("localhost"), .unwrap_or("localhost"),
)?; )?;
subject_name_builder.append_entry_by_text("O", "Start9")?; subject_name_builder.append_entry_by_text("O", "Start9")?;
subject_name_builder.append_entry_by_text("OU", "StartOS")?; subject_name_builder.append_entry_by_text("OU", "Embassy")?;
let subject_name = subject_name_builder.build(); let subject_name = subject_name_builder.build();
builder.set_subject_name(&subject_name)?; builder.set_subject_name(&subject_name)?;

View File

@@ -1,19 +1,18 @@
use std::borrow::Cow;
use std::fs::Metadata; use std::fs::Metadata;
use std::path::{Path, PathBuf}; use std::path::Path;
use std::sync::Arc; use std::sync::Arc;
use std::time::UNIX_EPOCH; use std::time::UNIX_EPOCH;
use async_compression::tokio::bufread::BrotliEncoder;
use async_compression::tokio::bufread::GzipEncoder; use async_compression::tokio::bufread::GzipEncoder;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use digest::Digest; use digest::Digest;
use futures::FutureExt; use futures::FutureExt;
use http::header::ACCEPT_ENCODING; use http::header::ACCEPT_ENCODING;
use http::header::CONTENT_ENCODING;
use http::request::Parts as RequestParts; use http::request::Parts as RequestParts;
use http::response::Builder; use http::response::Builder;
use hyper::{Body, Method, Request, Response, StatusCode}; use hyper::{Body, Method, Request, Response, StatusCode};
use include_dir::{include_dir, Dir};
use new_mime_guess::MimeGuess;
use openssl::hash::MessageDigest; use openssl::hash::MessageDigest;
use openssl::x509::X509; use openssl::x509::X509;
use rpc_toolkit::rpc_handler; use rpc_toolkit::rpc_handler;
@@ -36,9 +35,10 @@ static NOT_FOUND: &[u8] = b"Not Found";
static METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed"; static METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed";
static NOT_AUTHORIZED: &[u8] = b"Not Authorized"; static NOT_AUTHORIZED: &[u8] = b"Not Authorized";
static EMBEDDED_UIS: Dir<'_> = include_dir!("$CARGO_MANIFEST_DIR/../frontend/dist/static"); pub const MAIN_UI_WWW_DIR: &str = "/var/www/html/main";
pub const SETUP_UI_WWW_DIR: &str = "/var/www/html/setup";
const PROXY_STRIP_HEADERS: &[&str] = &["cookie", "host", "origin", "referer", "user-agent"]; pub const DIAG_UI_WWW_DIR: &str = "/var/www/html/diagnostic";
pub const INSTALL_UI_WWW_DIR: &str = "/var/www/html/install";
fn status_fn(_: i32) -> StatusCode { fn status_fn(_: i32) -> StatusCode {
StatusCode::OK StatusCode::OK
@@ -52,17 +52,6 @@ pub enum UiMode {
Main, Main,
} }
impl UiMode {
fn path(&self, path: &str) -> PathBuf {
match self {
Self::Setup => Path::new("setup-wizard").join(path),
Self::Diag => Path::new("diagnostic-ui").join(path),
Self::Install => Path::new("install-wizard").join(path),
Self::Main => Path::new("ui").join(path),
}
}
}
pub async fn setup_ui_file_router(ctx: SetupContext) -> Result<HttpHandler, Error> { pub async fn setup_ui_file_router(ctx: SetupContext) -> Result<HttpHandler, Error> {
let handler: HttpHandler = Arc::new(move |req| { let handler: HttpHandler = Arc::new(move |req| {
let ctx = ctx.clone(); let ctx = ctx.clone();
@@ -237,35 +226,65 @@ pub async fn main_ui_server_router(ctx: RpcContext) -> Result<HttpHandler, Error
} }
async fn alt_ui(req: Request<Body>, ui_mode: UiMode) -> Result<Response<Body>, Error> { async fn alt_ui(req: Request<Body>, ui_mode: UiMode) -> Result<Response<Body>, Error> {
let selected_root_dir = match ui_mode {
UiMode::Setup => SETUP_UI_WWW_DIR,
UiMode::Diag => DIAG_UI_WWW_DIR,
UiMode::Install => INSTALL_UI_WWW_DIR,
UiMode::Main => MAIN_UI_WWW_DIR,
};
let (request_parts, _body) = req.into_parts(); let (request_parts, _body) = req.into_parts();
let accept_encoding = request_parts
.headers
.get_all(ACCEPT_ENCODING)
.into_iter()
.filter_map(|h| h.to_str().ok())
.flat_map(|s| s.split(","))
.filter_map(|s| s.split(";").next())
.map(|s| s.trim())
.collect::<Vec<_>>();
match &request_parts.method { match &request_parts.method {
&Method::GET => { &Method::GET => {
let uri_path = ui_mode.path( let uri_path = request_parts
request_parts .uri
.uri .path()
.path() .strip_prefix('/')
.strip_prefix('/') .unwrap_or(request_parts.uri.path());
.unwrap_or(request_parts.uri.path()),
);
let file = EMBEDDED_UIS let full_path = Path::new(selected_root_dir).join(uri_path);
.get_file(&*uri_path) file_send(
.or_else(|| EMBEDDED_UIS.get_file(&*ui_mode.path("index.html"))); &request_parts,
if tokio::fs::metadata(&full_path)
if let Some(file) = file {
FileData::from_embedded(&request_parts, file)
.into_response(&request_parts)
.await .await
} else { .ok()
Ok(not_found()) .map(|f| f.is_file())
} .unwrap_or(false)
{
full_path
} else {
Path::new(selected_root_dir).join("index.html")
},
&accept_encoding,
)
.await
} }
_ => Ok(method_not_allowed()), _ => Ok(method_not_allowed()),
} }
} }
async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response<Body>, Error> { async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response<Body>, Error> {
let selected_root_dir = MAIN_UI_WWW_DIR;
let (request_parts, _body) = req.into_parts(); let (request_parts, _body) = req.into_parts();
let accept_encoding = request_parts
.headers
.get_all(ACCEPT_ENCODING)
.into_iter()
.filter_map(|h| h.to_str().ok())
.flat_map(|s| s.split(","))
.filter_map(|s| s.split(";").next())
.map(|s| s.trim())
.collect::<Vec<_>>();
match ( match (
&request_parts.method, &request_parts.method,
request_parts request_parts
@@ -280,12 +299,11 @@ async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response
Ok(_) => { Ok(_) => {
let sub_path = Path::new(path); let sub_path = Path::new(path);
if let Ok(rest) = sub_path.strip_prefix("package-data") { if let Ok(rest) = sub_path.strip_prefix("package-data") {
FileData::from_path( file_send(
&request_parts, &request_parts,
&ctx.datadir.join(PKG_PUBLIC_DIR).join(rest), ctx.datadir.join(PKG_PUBLIC_DIR).join(rest),
&accept_encoding,
) )
.await?
.into_response(&request_parts)
.await .await
} else if let Ok(rest) = sub_path.strip_prefix("eos") { } else if let Ok(rest) = sub_path.strip_prefix("eos") {
match rest.to_str() { match rest.to_str() {
@@ -300,40 +318,6 @@ async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response
Err(e) => un_authorized(e, &format!("public/{path}")), Err(e) => un_authorized(e, &format!("public/{path}")),
} }
} }
(&Method::GET, Some(("proxy", target))) => {
match HasValidSession::from_request_parts(&request_parts, &ctx).await {
Ok(_) => {
let target = urlencoding::decode(target)?;
let res = ctx
.client
.get(target.as_ref())
.headers(
request_parts
.headers
.iter()
.filter(|(h, _)| {
!PROXY_STRIP_HEADERS
.iter()
.any(|bad| h.as_str().eq_ignore_ascii_case(bad))
})
.map(|(h, v)| (h.clone(), v.clone()))
.collect(),
)
.send()
.await
.with_kind(crate::ErrorKind::Network)?;
let mut hres = Response::builder().status(res.status());
for (h, v) in res.headers().clone() {
if let Some(h) = h {
hres = hres.header(h, v);
}
}
hres.body(Body::wrap_stream(res.bytes_stream()))
.with_kind(crate::ErrorKind::Network)
}
Err(e) => un_authorized(e, &format!("proxy/{target}")),
}
}
(&Method::GET, Some(("eos", "local.crt"))) => { (&Method::GET, Some(("eos", "local.crt"))) => {
match HasValidSession::from_request_parts(&request_parts, &ctx).await { match HasValidSession::from_request_parts(&request_parts, &ctx).await {
Ok(_) => cert_send(&ctx.account.read().await.root_ca_cert), Ok(_) => cert_send(&ctx.account.read().await.root_ca_cert),
@@ -341,25 +325,28 @@ async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response
} }
} }
(&Method::GET, _) => { (&Method::GET, _) => {
let uri_path = UiMode::Main.path( let uri_path = request_parts
request_parts .uri
.uri .path()
.path() .strip_prefix('/')
.strip_prefix('/') .unwrap_or(request_parts.uri.path());
.unwrap_or(request_parts.uri.path()),
);
let file = EMBEDDED_UIS let full_path = Path::new(selected_root_dir).join(uri_path);
.get_file(&*uri_path) file_send(
.or_else(|| EMBEDDED_UIS.get_file(&*UiMode::Main.path("index.html"))); &request_parts,
if tokio::fs::metadata(&full_path)
if let Some(file) = file {
FileData::from_embedded(&request_parts, file)
.into_response(&request_parts)
.await .await
} else { .ok()
Ok(not_found()) .map(|f| f.is_file())
} .unwrap_or(false)
{
full_path
} else {
Path::new(selected_root_dir).join("index.html")
},
&accept_encoding,
)
.await
} }
_ => Ok(method_not_allowed()), _ => Ok(method_not_allowed()),
} }
@@ -422,158 +409,118 @@ fn cert_send(cert: &X509) -> Result<Response<Body>, Error> {
.with_kind(ErrorKind::Network) .with_kind(ErrorKind::Network)
} }
struct FileData { async fn file_send(
data: Body, req: &RequestParts,
len: Option<u64>, path: impl AsRef<Path>,
encoding: Option<&'static str>, accept_encoding: &[&str],
e_tag: String, ) -> Result<Response<Body>, Error> {
mime: Option<String>, // Serve a file by asynchronously reading it by chunks using tokio-util crate.
}
impl FileData {
fn from_embedded(req: &RequestParts, file: &'static include_dir::File<'static>) -> Self {
let path = file.path();
let (encoding, data) = req
.headers
.get_all(ACCEPT_ENCODING)
.into_iter()
.filter_map(|h| h.to_str().ok())
.flat_map(|s| s.split(","))
.filter_map(|s| s.split(";").next())
.map(|s| s.trim())
.fold((None, file.contents()), |acc, e| {
if let Some(file) = (e == "br")
.then_some(())
.and_then(|_| EMBEDDED_UIS.get_file(format!("{}.br", path.display())))
{
(Some("br"), file.contents())
} else if let Some(file) = (e == "gzip" && acc.0 != Some("br"))
.then_some(())
.and_then(|_| EMBEDDED_UIS.get_file(format!("{}.gz", path.display())))
{
(Some("gzip"), file.contents())
} else {
acc
}
});
Self { let path = path.as_ref();
len: Some(data.len() as u64),
encoding, let file = File::open(path)
data: data.into(), .await
e_tag: e_tag(path, None), .with_ctx(|_| (ErrorKind::Filesystem, path.display().to_string()))?;
mime: MimeGuess::from_path(path) let metadata = file
.first() .metadata()
.map(|m| m.essence_str().to_owned()), .await
} .with_ctx(|_| (ErrorKind::Filesystem, path.display().to_string()))?;
let e_tag = e_tag(path, &metadata)?;
let mut builder = Response::builder();
builder = with_content_type(path, builder);
builder = builder.header(http::header::ETAG, &e_tag);
builder = builder.header(
http::header::CACHE_CONTROL,
"public, max-age=21000000, immutable",
);
if req
.headers
.get_all(http::header::CONNECTION)
.iter()
.flat_map(|s| s.to_str().ok())
.flat_map(|s| s.split(","))
.any(|s| s.trim() == "keep-alive")
{
builder = builder.header(http::header::CONNECTION, "keep-alive");
} }
async fn from_path(req: &RequestParts, path: &Path) -> Result<Self, Error> { if req
let encoding = req .headers
.headers .get("if-none-match")
.get_all(ACCEPT_ENCODING) .and_then(|h| h.to_str().ok())
.into_iter() == Some(e_tag.as_str())
.filter_map(|h| h.to_str().ok()) {
.flat_map(|s| s.split(",")) builder = builder.status(StatusCode::NOT_MODIFIED);
.filter_map(|s| s.split(";").next()) builder.body(Body::empty())
.map(|s| s.trim()) } else {
.any(|e| e == "gzip") let body = if false && accept_encoding.contains(&"br") && metadata.len() > u16::MAX as u64 {
.then_some("gzip"); builder = builder.header(CONTENT_ENCODING, "br");
Body::wrap_stream(ReaderStream::new(BrotliEncoder::new(BufReader::new(file))))
let file = File::open(path) } else if accept_encoding.contains(&"gzip") && metadata.len() > u16::MAX as u64 {
.await builder = builder.header(CONTENT_ENCODING, "gzip");
.with_ctx(|_| (ErrorKind::Filesystem, path.display().to_string()))?; Body::wrap_stream(ReaderStream::new(GzipEncoder::new(BufReader::new(file))))
let metadata = file
.metadata()
.await
.with_ctx(|_| (ErrorKind::Filesystem, path.display().to_string()))?;
let e_tag = e_tag(path, Some(&metadata));
let (len, data) = if encoding == Some("gzip") {
(
None,
Body::wrap_stream(ReaderStream::new(GzipEncoder::new(BufReader::new(file)))),
)
} else { } else {
( builder = with_content_length(&metadata, builder);
Some(metadata.len()), Body::wrap_stream(ReaderStream::new(file))
Body::wrap_stream(ReaderStream::new(file)),
)
}; };
builder.body(body)
Ok(Self {
data,
len,
encoding,
e_tag,
mime: MimeGuess::from_path(path)
.first()
.map(|m| m.essence_str().to_owned()),
})
}
async fn into_response(self, req: &RequestParts) -> Result<Response<Body>, Error> {
let mut builder = Response::builder();
if let Some(mime) = self.mime {
builder = builder.header(http::header::CONTENT_TYPE, &*mime);
}
builder = builder.header(http::header::ETAG, &*self.e_tag);
builder = builder.header(
http::header::CACHE_CONTROL,
"public, max-age=21000000, immutable",
);
if req
.headers
.get_all(http::header::CONNECTION)
.iter()
.flat_map(|s| s.to_str().ok())
.flat_map(|s| s.split(","))
.any(|s| s.trim() == "keep-alive")
{
builder = builder.header(http::header::CONNECTION, "keep-alive");
}
if req
.headers
.get("if-none-match")
.and_then(|h| h.to_str().ok())
== Some(self.e_tag.as_ref())
{
builder = builder.status(StatusCode::NOT_MODIFIED);
builder.body(Body::empty())
} else {
if let Some(len) = self.len {
builder = builder.header(http::header::CONTENT_LENGTH, len);
}
if let Some(encoding) = self.encoding {
builder = builder.header(http::header::CONTENT_ENCODING, encoding);
}
builder.body(self.data)
}
.with_kind(ErrorKind::Network)
} }
.with_kind(ErrorKind::Network)
} }
fn e_tag(path: &Path, metadata: Option<&Metadata>) -> String { fn e_tag(path: &Path, metadata: &Metadata) -> Result<String, Error> {
let modified = metadata.modified().with_kind(ErrorKind::Filesystem)?;
let mut hasher = sha2::Sha256::new(); let mut hasher = sha2::Sha256::new();
hasher.update(format!("{:?}", path).as_bytes()); hasher.update(format!("{:?}", path).as_bytes());
if let Some(modified) = metadata.and_then(|m| m.modified().ok()) { hasher.update(
hasher.update( format!(
format!( "{}",
"{}", modified
modified .duration_since(UNIX_EPOCH)
.duration_since(UNIX_EPOCH) .unwrap_or_default()
.unwrap_or_default() .as_secs()
.as_secs() )
) .as_bytes(),
.as_bytes(), );
);
}
let res = hasher.finalize(); let res = hasher.finalize();
format!( Ok(format!(
"\"{}\"", "\"{}\"",
base32::encode(base32::Alphabet::RFC4648 { padding: false }, res.as_slice()).to_lowercase() base32::encode(base32::Alphabet::RFC4648 { padding: false }, res.as_slice()).to_lowercase()
) ))
}
///https://en.wikipedia.org/wiki/Media_type
fn with_content_type(path: &Path, builder: Builder) -> Builder {
let content_type = match path.extension() {
Some(os_str) => match os_str.to_str() {
Some("apng") => "image/apng",
Some("avif") => "image/avif",
Some("flif") => "image/flif",
Some("gif") => "image/gif",
Some("jpg") | Some("jpeg") | Some("jfif") | Some("pjpeg") | Some("pjp") => "image/jpeg",
Some("jxl") => "image/jxl",
Some("png") => "image/png",
Some("svg") => "image/svg+xml",
Some("webp") => "image/webp",
Some("mng") | Some("x-mng") => "image/x-mng",
Some("css") => "text/css",
Some("csv") => "text/csv",
Some("html") => "text/html",
Some("php") => "text/php",
Some("plain") | Some("md") | Some("txt") => "text/plain",
Some("xml") => "text/xml",
Some("js") => "text/javascript",
Some("wasm") => "application/wasm",
None | Some(_) => "text/plain",
},
None => "text/plain",
};
builder.header(http::header::CONTENT_TYPE, content_type)
}
fn with_content_length(metadata: &Metadata, builder: Builder) -> Builder {
builder.header(http::header::CONTENT_LENGTH, metadata.len())
} }

View File

@@ -1,80 +1,32 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::sync::atomic::AtomicBool;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::time::Duration;
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use futures::future::BoxFuture; use futures::future::BoxFuture;
use futures::{FutureExt, TryStreamExt}; use futures::FutureExt;
use helpers::NonDetachingJoinHandle;
use itertools::Itertools;
use lazy_static::lazy_static;
use regex::Regex;
use rpc_toolkit::command; use rpc_toolkit::command;
use rpc_toolkit::yajrc::RpcError;
use tokio::net::TcpStream; use tokio::net::TcpStream;
use tokio::process::Command; use tokio::sync::Mutex;
use tokio::sync::{mpsc, oneshot};
use tokio::time::Instant;
use torut::control::{AsyncEvent, AuthenticatedConn, ConnError}; use torut::control::{AsyncEvent, AuthenticatedConn, ConnError};
use torut::onion::{OnionAddressV3, TorSecretKeyV3}; use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument; use tracing::instrument;
use crate::context::{CliContext, RpcContext}; use crate::context::RpcContext;
use crate::logs::{
cli_logs_generic_follow, cli_logs_generic_nofollow, fetch_logs, follow_logs, journalctl,
LogFollowResponse, LogResponse, LogSource,
};
use crate::util::serde::{display_serializable, IoFormat}; use crate::util::serde::{display_serializable, IoFormat};
use crate::util::{display_none, Invoke};
use crate::{Error, ErrorKind, ResultExt as _}; use crate::{Error, ErrorKind, ResultExt as _};
pub const SYSTEMD_UNIT: &str = "tor@default";
const STARTING_HEALTH_TIMEOUT: u64 = 120; // 2min
enum ErrorLogSeverity {
Fatal { wipe_state: bool },
Unknown { wipe_state: bool },
}
lazy_static! {
static ref LOG_REGEXES: Vec<(Regex, ErrorLogSeverity)> = vec![(
Regex::new("This could indicate a route manipulation attack, network overload, bad local network connectivity, or a bug\\.").unwrap(),
ErrorLogSeverity::Unknown { wipe_state: true }
),(
Regex::new("died due to an invalid selected path").unwrap(),
ErrorLogSeverity::Fatal { wipe_state: false }
),(
Regex::new("Tor has not observed any network activity for the past").unwrap(),
ErrorLogSeverity::Unknown { wipe_state: false }
)];
static ref PROGRESS_REGEX: Regex = Regex::new("PROGRESS=([0-9]+)").unwrap();
}
#[test] #[test]
fn random_key() { fn random_key() {
println!("x'{}'", hex::encode(rand::random::<[u8; 32]>())); println!("x'{}'", hex::encode(rand::random::<[u8; 32]>()));
} }
#[command(subcommands(list_services, logs, reset))] #[command(subcommands(list_services))]
pub fn tor() -> Result<(), Error> { pub fn tor() -> Result<(), Error> {
Ok(()) Ok(())
} }
#[command(display(display_none))]
pub async fn reset(
#[context] ctx: RpcContext,
#[arg(rename = "wipe-state", short = 'w', long = "wipe-state")] wipe_state: bool,
#[arg] reason: String,
) -> Result<(), Error> {
ctx.net_controller
.tor
.reset(wipe_state, Error::new(eyre!("{reason}"), ErrorKind::Tor))
.await
}
fn display_services(services: Vec<OnionAddressV3>, matches: &ArgMatches) { fn display_services(services: Vec<OnionAddressV3>, matches: &ArgMatches) {
use prettytable::*; use prettytable::*;
@@ -100,343 +52,73 @@ pub async fn list_services(
ctx.net_controller.tor.list_services().await ctx.net_controller.tor.list_services().await
} }
#[command(
custom_cli(cli_logs(async, context(CliContext))),
subcommands(self(logs_nofollow(async)), logs_follow),
display(display_none)
)]
pub async fn logs(
#[arg(short = 'l', long = "limit")] limit: Option<usize>,
#[arg(short = 'c', long = "cursor")] cursor: Option<String>,
#[arg(short = 'B', long = "before", default)] before: bool,
#[arg(short = 'f', long = "follow", default)] follow: bool,
) -> Result<(Option<usize>, Option<String>, bool, bool), Error> {
Ok((limit, cursor, before, follow))
}
pub async fn cli_logs(
ctx: CliContext,
(limit, cursor, before, follow): (Option<usize>, Option<String>, bool, bool),
) -> Result<(), RpcError> {
if follow {
if cursor.is_some() {
return Err(RpcError::from(Error::new(
eyre!("The argument '--cursor <cursor>' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
if before {
return Err(RpcError::from(Error::new(
eyre!("The argument '--before' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
cli_logs_generic_follow(ctx, "net.tor.logs.follow", None, limit).await
} else {
cli_logs_generic_nofollow(ctx, "net.tor.logs", None, limit, cursor, before).await
}
}
pub async fn logs_nofollow(
_ctx: (),
(limit, cursor, before, _): (Option<usize>, Option<String>, bool, bool),
) -> Result<LogResponse, Error> {
fetch_logs(LogSource::Service(SYSTEMD_UNIT), limit, cursor, before).await
}
#[command(rpc_only, rename = "follow", display(display_none))]
pub async fn logs_follow(
#[context] ctx: RpcContext,
#[parent_data] (limit, _, _, _): (Option<usize>, Option<String>, bool, bool),
) -> Result<LogFollowResponse, Error> {
follow_logs(ctx, LogSource::Service(SYSTEMD_UNIT), limit).await
}
fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> { fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> {
async move { Ok(()) }.boxed() async move { Ok(()) }.boxed()
} }
pub struct TorController(TorControl); pub struct TorController(Mutex<TorControllerInner>);
impl TorController { impl TorController {
pub fn new(tor_control: SocketAddr, tor_socks: SocketAddr) -> Self { pub async fn init(tor_control: SocketAddr) -> Result<Self, Error> {
TorController(TorControl::new(tor_control, tor_socks)) Ok(TorController(Mutex::new(
TorControllerInner::init(tor_control).await?,
)))
} }
pub async fn add( pub async fn add(
&self, &self,
key: TorSecretKeyV3, key: &TorSecretKeyV3,
external: u16, external: u16,
target: SocketAddr, target: SocketAddr,
) -> Result<Arc<()>, Error> { ) -> Result<Arc<()>, Error> {
let (reply, res) = oneshot::channel(); self.0.lock().await.add(key, external, target).await
self.0
.send
.send(TorCommand::AddOnion {
key,
external,
target,
reply,
})
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))?;
res.await
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))
} }
pub async fn gc( pub async fn gc(&self, key: &TorSecretKeyV3, external: u16) -> Result<(), Error> {
&self, self.0.lock().await.gc(key, external).await
key: Option<TorSecretKeyV3>,
external: Option<u16>,
) -> Result<(), Error> {
self.0
.send
.send(TorCommand::GC { key, external })
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))
}
pub async fn reset(&self, wipe_state: bool, context: Error) -> Result<(), Error> {
self.0
.send
.send(TorCommand::Reset {
wipe_state,
context,
})
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))
} }
pub async fn list_services(&self) -> Result<Vec<OnionAddressV3>, Error> { pub async fn list_services(&self) -> Result<Vec<OnionAddressV3>, Error> {
let (reply, res) = oneshot::channel(); self.0.lock().await.list_services().await
self.0
.send
.send(TorCommand::GetInfo {
query: "onions/current".into(),
reply,
})
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))?;
res.await
.ok()
.ok_or_else(|| Error::new(eyre!("TorControl died"), ErrorKind::Tor))??
.lines()
.map(|l| l.trim())
.filter(|l| !l.is_empty())
.map(|l| l.parse().with_kind(ErrorKind::Tor))
.collect()
} }
} }
type AuthenticatedConnection = AuthenticatedConn< type AuthenticatedConnection = AuthenticatedConn<
TcpStream, TcpStream,
Box<dyn Fn(AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> + Send + Sync>, fn(AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>>,
>; >;
enum TorCommand { pub struct TorControllerInner {
AddOnion { control_addr: SocketAddr,
key: TorSecretKeyV3, connection: AuthenticatedConnection,
services: BTreeMap<String, BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
}
impl TorControllerInner {
#[instrument(skip(self))]
async fn add(
&mut self,
key: &TorSecretKeyV3,
external: u16, external: u16,
target: SocketAddr, target: SocketAddr,
reply: oneshot::Sender<Arc<()>>, ) -> Result<Arc<()>, Error> {
}, let mut rm_res = Ok(());
GC { let onion_base = key
key: Option<TorSecretKeyV3>, .public()
external: Option<u16>, .get_onion_address()
}, .get_address_without_dot_onion();
GetInfo { let mut service = if let Some(service) = self.services.remove(&onion_base) {
query: String, rm_res = self.connection.del_onion(&onion_base).await;
reply: oneshot::Sender<Result<String, Error>>, service
}, } else {
Reset { BTreeMap::new()
wipe_state: bool, };
context: Error, let mut binding = service.remove(&external).unwrap_or_default();
}, let rc = if let Some(rc) = Weak::upgrade(&binding.remove(&target).unwrap_or_default()) {
} rc
} else {
#[instrument(skip_all)] Arc::new(())
async fn torctl( };
tor_control: SocketAddr, binding.insert(target, Arc::downgrade(&rc));
tor_socks: SocketAddr, service.insert(external, binding);
recv: &mut mpsc::UnboundedReceiver<TorCommand>,
services: &mut BTreeMap<[u8; 64], BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
wipe_state: &AtomicBool,
health_timeout: &mut Duration,
) -> Result<(), Error> {
let bootstrap = async {
if Command::new("systemctl")
.arg("is-active")
.arg("--quiet")
.arg("tor")
.invoke(ErrorKind::Tor)
.await
.is_ok()
{
Command::new("systemctl")
.arg("stop")
.arg("tor")
.invoke(ErrorKind::Tor)
.await?;
for _ in 0..30 {
if TcpStream::connect(tor_control).await.is_err() {
break;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
if TcpStream::connect(tor_control).await.is_ok() {
return Err(Error::new(
eyre!("Tor is failing to shut down"),
ErrorKind::Tor,
));
}
}
if wipe_state.load(std::sync::atomic::Ordering::SeqCst) {
tokio::fs::remove_dir_all("/var/lib/tor").await?;
wipe_state.store(false, std::sync::atomic::Ordering::SeqCst);
}
tokio::fs::create_dir_all("/var/lib/tor").await?;
Command::new("chown")
.arg("-R")
.arg("debian-tor")
.arg("/var/lib/tor")
.invoke(ErrorKind::Filesystem)
.await?;
Command::new("systemctl")
.arg("start")
.arg("tor")
.invoke(ErrorKind::Tor)
.await?;
let logs = journalctl(LogSource::Service(SYSTEMD_UNIT), 0, None, false, true).await?;
let mut tcp_stream = None;
for _ in 0..60 {
if let Ok(conn) = TcpStream::connect(tor_control).await {
tcp_stream = Some(conn);
break;
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
let tcp_stream = tcp_stream.ok_or_else(|| {
Error::new(eyre!("Timed out waiting for tor to start"), ErrorKind::Tor)
})?;
tracing::info!("Tor is started");
let mut conn = torut::control::UnauthenticatedConn::new(tcp_stream);
let auth = conn
.load_protocol_info()
.await?
.make_auth_data()?
.ok_or_else(|| eyre!("Cookie Auth Not Available"))
.with_kind(crate::ErrorKind::Tor)?;
conn.authenticate(&auth).await?;
let mut connection: AuthenticatedConnection = conn.into_authenticated().await;
connection.set_async_event_handler(Some(Box::new(|event| event_handler(event))));
let mut bootstrapped = false;
let mut last_increment = (String::new(), Instant::now());
for _ in 0..300 {
match connection.get_info("status/bootstrap-phase").await {
Ok(a) => {
if a.contains("TAG=done") {
bootstrapped = true;
break;
}
if let Some(p) = PROGRESS_REGEX.captures(&a) {
if let Some(p) = p.get(1) {
if p.as_str() != &*last_increment.0 {
last_increment = (p.as_str().into(), Instant::now());
}
}
}
}
Err(e) => {
let e = Error::from(e);
tracing::error!("{}", e);
tracing::debug!("{:?}", e);
}
}
if last_increment.1.elapsed() > Duration::from_secs(30) {
return Err(Error::new(
eyre!("Tor stuck bootstrapping at {}%", last_increment.0),
ErrorKind::Tor,
));
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
if !bootstrapped {
return Err(Error::new(
eyre!("Timed out waiting for tor to bootstrap"),
ErrorKind::Tor,
));
}
Ok((connection, logs))
};
let pre_handler = async {
while let Some(command) = recv.recv().await {
match command {
TorCommand::AddOnion {
key,
external,
target,
reply,
} => {
let mut service = if let Some(service) = services.remove(&key.as_bytes()) {
service
} else {
BTreeMap::new()
};
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) =
Weak::upgrade(&binding.remove(&target).unwrap_or_default())
{
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding);
services.insert(key.as_bytes(), service);
reply.send(rc).unwrap_or_default();
}
TorCommand::GetInfo { reply, .. } => {
reply
.send(Err(Error::new(
eyre!("Tor has not finished bootstrapping..."),
ErrorKind::Tor,
)))
.unwrap_or_default();
}
TorCommand::GC { .. } => (),
TorCommand::Reset {
wipe_state: new_wipe_state,
context,
} => {
wipe_state.fetch_or(new_wipe_state, std::sync::atomic::Ordering::SeqCst);
return Err(context);
}
}
}
Ok(())
};
let (mut connection, mut logs) = tokio::select! {
res = bootstrap => res?,
res = pre_handler => return res,
};
let hck_key = TorSecretKeyV3::generate();
connection
.add_onion_v3(
&hck_key,
false,
false,
false,
None,
&mut [(80, SocketAddr::from(([127, 0, 0, 1], 80)))].iter(),
)
.await?;
for (key, service) in std::mem::take(services) {
let key = TorSecretKeyV3::from(key);
let bindings = service let bindings = service
.iter() .iter()
.flat_map(|(ext, int)| { .flat_map(|(ext, int)| {
@@ -445,240 +127,85 @@ async fn torctl(
.map(|(addr, _)| (*ext, SocketAddr::from(*addr))) .map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
}) })
.collect::<Vec<_>>(); .collect::<Vec<_>>();
if !bindings.is_empty() { self.services.insert(onion_base, service);
services.insert(key.as_bytes(), service); rm_res?;
connection self.connection
.add_onion_v3(&key, false, false, false, None, &mut bindings.iter()) .add_onion_v3(key, false, false, false, None, &mut bindings.iter())
.await?; .await?;
} Ok(rc)
} }
let handler = async { #[instrument(skip(self))]
while let Some(command) = recv.recv().await { async fn gc(&mut self, key: &TorSecretKeyV3, external: u16) -> Result<(), Error> {
match command { let onion_base = key
TorCommand::AddOnion { .public()
key, .get_onion_address()
external, .get_address_without_dot_onion();
target, if let Some(mut service) = self.services.remove(&onion_base) {
reply, if let Some(mut binding) = service.remove(&external) {
} => { binding = binding
let mut rm_res = Ok(()); .into_iter()
let onion_base = key .filter(|(_, rc)| rc.strong_count() > 0)
.public() .collect();
.get_onion_address() if !binding.is_empty() {
.get_address_without_dot_onion();
let mut service = if let Some(service) = services.remove(&key.as_bytes()) {
rm_res = connection.del_onion(&onion_base).await;
service
} else {
BTreeMap::new()
};
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) =
Weak::upgrade(&binding.remove(&target).unwrap_or_default())
{
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding); service.insert(external, binding);
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
services.insert(key.as_bytes(), service);
reply.send(rc).unwrap_or_default();
rm_res?;
connection
.add_onion_v3(&key, false, false, false, None, &mut bindings.iter())
.await?;
}
TorCommand::GC { key, external } => {
for key in if key.is_some() {
itertools::Either::Left(key.into_iter().map(|k| k.as_bytes()))
} else {
itertools::Either::Right(services.keys().cloned().collect_vec().into_iter())
} {
let key = TorSecretKeyV3::from(key);
let onion_base = key
.public()
.get_onion_address()
.get_address_without_dot_onion();
if let Some(mut service) = services.remove(&key.as_bytes()) {
for external in if external.is_some() {
itertools::Either::Left(external.into_iter())
} else {
itertools::Either::Right(
service.keys().copied().collect_vec().into_iter(),
)
} {
if let Some(mut binding) = service.remove(&external) {
binding = binding
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect();
if !binding.is_empty() {
service.insert(external, binding);
}
}
}
let rm_res = connection.del_onion(&onion_base).await;
if !service.is_empty() {
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
if !bindings.is_empty() {
services.insert(key.as_bytes(), service);
}
rm_res?;
if !bindings.is_empty() {
connection
.add_onion_v3(
&key,
false,
false,
false,
None,
&mut bindings.iter(),
)
.await?;
}
} else {
rm_res?;
}
}
}
}
TorCommand::GetInfo { query, reply } => {
reply
.send(connection.get_info(&query).await.with_kind(ErrorKind::Tor))
.unwrap_or_default();
}
TorCommand::Reset {
wipe_state: new_wipe_state,
context,
} => {
wipe_state.fetch_or(new_wipe_state, std::sync::atomic::Ordering::SeqCst);
return Err(context);
} }
} }
} let rm_res = self.connection.del_onion(&onion_base).await;
Ok(()) if !service.is_empty() {
}; let bindings = service
let log_parser = async { .iter()
while let Some(log) = logs.try_next().await? { .flat_map(|(ext, int)| {
for (regex, severity) in &*LOG_REGEXES { int.iter()
if regex.is_match(&log.message) { .find(|(_, rc)| rc.strong_count() > 0)
let (check, wipe_state) = match severity { .map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
ErrorLogSeverity::Fatal { wipe_state } => (false, *wipe_state), })
ErrorLogSeverity::Unknown { wipe_state } => (true, *wipe_state), .collect::<Vec<_>>();
}; self.services.insert(onion_base, service);
if !check rm_res?;
|| tokio::time::timeout( self.connection
Duration::from_secs(30), .add_onion_v3(&key, false, false, false, None, &mut bindings.iter())
tokio_socks::tcp::Socks5Stream::connect( .await?;
tor_socks,
(hck_key.public().get_onion_address().to_string(), 80),
),
)
.await
.map_err(|e| tracing::warn!("Tor is confirmed to be down: {e}"))
.and_then(|a| {
a.map_err(|e| tracing::warn!("Tor is confirmed to be down: {e}"))
})
.is_err()
{
if wipe_state {
Command::new("systemctl")
.arg("stop")
.arg("tor")
.invoke(ErrorKind::Tor)
.await?;
tokio::fs::remove_dir_all("/var/lib/tor").await?;
}
return Err(Error::new(eyre!("{}", log.message), ErrorKind::Tor));
}
}
}
}
Err(Error::new(eyre!("Log stream terminated"), ErrorKind::Tor))
};
let health_checker = async {
let mut last_success = Instant::now();
loop {
tokio::time::sleep(Duration::from_secs(30)).await;
if let Err(e) = tokio::time::timeout(
Duration::from_secs(30),
tokio_socks::tcp::Socks5Stream::connect(
tor_socks,
(hck_key.public().get_onion_address().to_string(), 80),
),
)
.await
.map_err(|e| e.to_string())
.and_then(|e| e.map_err(|e| e.to_string()))
{
if last_success.elapsed() > *health_timeout {
let err = Error::new(eyre!("Tor health check failed for longer than current timeout ({health_timeout:?})"), crate::ErrorKind::Tor);
*health_timeout *= 2;
wipe_state.store(true, std::sync::atomic::Ordering::SeqCst);
return Err(err);
}
} else { } else {
last_success = Instant::now(); rm_res?;
} }
} }
};
tokio::select! { Ok(())
res = handler => res?,
res = log_parser => res?,
res = health_checker => res?,
} }
Ok(()) #[instrument]
} async fn init(tor_control: SocketAddr) -> Result<Self, Error> {
let mut conn = torut::control::UnauthenticatedConn::new(
TcpStream::connect(tor_control).await?, // TODO
);
let auth = conn
.load_protocol_info()
.await?
.make_auth_data()?
.ok_or_else(|| eyre!("Cookie Auth Not Available"))
.with_kind(crate::ErrorKind::Tor)?;
conn.authenticate(&auth).await?;
let mut connection: AuthenticatedConnection = conn.into_authenticated().await;
connection.set_async_event_handler(Some(event_handler));
struct TorControl { Ok(Self {
_thread: NonDetachingJoinHandle<()>, control_addr: tor_control,
send: mpsc::UnboundedSender<TorCommand>, connection,
} services: BTreeMap::new(),
impl TorControl { })
pub fn new(tor_control: SocketAddr, tor_socks: SocketAddr) -> Self { }
let (send, mut recv) = mpsc::unbounded_channel();
Self { #[instrument(skip(self))]
_thread: tokio::spawn(async move { async fn list_services(&mut self) -> Result<Vec<OnionAddressV3>, Error> {
let mut services = BTreeMap::new(); self.connection
let wipe_state = AtomicBool::new(false); .get_info("onions/current")
let mut health_timeout = Duration::from_secs(STARTING_HEALTH_TIMEOUT); .await?
while let Err(e) = torctl( .lines()
tor_control, .map(|l| l.trim())
tor_socks, .filter(|l| !l.is_empty())
&mut recv, .map(|l| l.parse().with_kind(ErrorKind::Tor))
&mut services, .collect()
&wipe_state,
&mut health_timeout,
)
.await
{
tracing::error!("{e}: Restarting tor");
tracing::debug!("{e:?}");
}
tracing::info!("TorControl is shut down.")
})
.into(),
send,
}
} }
} }

View File

@@ -1,5 +1,5 @@
use std::convert::Infallible; use std::convert::Infallible;
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr}; use std::net::{Ipv4Addr, Ipv6Addr};
use std::path::Path; use std::path::Path;
use async_stream::try_stream; use async_stream::try_stream;
@@ -7,29 +7,24 @@ use color_eyre::eyre::eyre;
use futures::stream::BoxStream; use futures::stream::BoxStream;
use futures::{StreamExt, TryStreamExt}; use futures::{StreamExt, TryStreamExt};
use ipnet::{Ipv4Net, Ipv6Net}; use ipnet::{Ipv4Net, Ipv6Net};
use tokio::net::{TcpListener, TcpStream};
use tokio::process::Command; use tokio::process::Command;
use crate::util::Invoke; use crate::util::Invoke;
use crate::Error; use crate::Error;
fn parse_iface_ip(output: &str) -> Result<Vec<&str>, Error> { fn parse_iface_ip(output: &str) -> Result<Option<&str>, Error> {
let output = output.trim(); let output = output.trim();
if output.is_empty() { if output.is_empty() {
return Ok(Vec::new()); return Ok(None);
} }
let mut res = Vec::new(); if let Some(ip) = output.split_ascii_whitespace().nth(3) {
for line in output.lines() { Ok(Some(ip))
if let Some(ip) = line.split_ascii_whitespace().nth(3) { } else {
res.push(ip) Err(Error::new(
} else { eyre!("malformed output from `ip`"),
return Err(Error::new( crate::ErrorKind::Network,
eyre!("malformed output from `ip`"), ))
crate::ErrorKind::Network,
));
}
} }
Ok(res)
} }
pub async fn get_iface_ipv4_addr(iface: &str) -> Result<Option<(Ipv4Addr, Ipv4Net)>, Error> { pub async fn get_iface_ipv4_addr(iface: &str) -> Result<Option<(Ipv4Addr, Ipv4Net)>, Error> {
@@ -43,9 +38,7 @@ pub async fn get_iface_ipv4_addr(iface: &str) -> Result<Option<(Ipv4Addr, Ipv4Ne
.invoke(crate::ErrorKind::Network) .invoke(crate::ErrorKind::Network)
.await?, .await?,
)?)? )?)?
.into_iter()
.map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?))) .map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?)))
.next()
.transpose()?) .transpose()?)
} }
@@ -60,8 +53,6 @@ pub async fn get_iface_ipv6_addr(iface: &str) -> Result<Option<(Ipv6Addr, Ipv6Ne
.invoke(crate::ErrorKind::Network) .invoke(crate::ErrorKind::Network)
.await?, .await?,
)?)? )?)?
.into_iter()
.find(|ip| !ip.starts_with("fe80::"))
.map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?))) .map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?)))
.transpose()?) .transpose()?)
} }
@@ -130,37 +121,3 @@ impl<T> hyper::server::accept::Accept for SingleAccept<T> {
std::task::Poll::Ready(self.project().0.take().map(Ok)) std::task::Poll::Ready(self.project().0.take().map(Ok))
} }
} }
pub struct TcpListeners {
listeners: Vec<TcpListener>,
}
impl TcpListeners {
pub fn new(listeners: impl IntoIterator<Item = TcpListener>) -> Self {
Self {
listeners: listeners.into_iter().collect(),
}
}
pub async fn accept(&self) -> std::io::Result<(TcpStream, SocketAddr)> {
futures::future::select_all(self.listeners.iter().map(|l| Box::pin(l.accept())))
.await
.0
}
}
impl hyper::server::accept::Accept for TcpListeners {
type Conn = TcpStream;
type Error = std::io::Error;
fn poll_accept(
mut self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Result<Self::Conn, Self::Error>>> {
for listener in self.listeners.iter() {
let poll = listener.poll_accept(cx);
if poll.is_ready() {
return poll.map(|a| a.map(|a| a.0)).map(Some);
}
}
std::task::Poll::Pending
}
}

View File

@@ -1,9 +1,8 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use std::convert::Infallible; use std::convert::Infallible;
use std::net::{IpAddr, Ipv6Addr, SocketAddr}; use std::net::{IpAddr, SocketAddr};
use std::str::FromStr; use std::str::FromStr;
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::time::Duration;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use helpers::NonDetachingJoinHandle; use helpers::NonDetachingJoinHandle;
@@ -20,7 +19,7 @@ use tokio_rustls::{LazyConfigAcceptor, TlsConnector};
use crate::net::keys::Key; use crate::net::keys::Key;
use crate::net::ssl::SslManager; use crate::net::ssl::SslManager;
use crate::net::utils::SingleAccept; use crate::net::utils::SingleAccept;
use crate::util::io::{BackTrackingReader, TimeoutStream}; use crate::util::io::BackTrackingReader;
use crate::Error; use crate::Error;
// not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353 // not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
@@ -42,7 +41,7 @@ impl VHostController {
hostname: Option<String>, hostname: Option<String>,
external: u16, external: u16,
target: SocketAddr, target: SocketAddr,
connect_ssl: Result<(), AlpnInfo>, connect_ssl: bool,
) -> Result<Arc<()>, Error> { ) -> Result<Arc<()>, Error> {
let mut writable = self.servers.lock().await; let mut writable = self.servers.lock().await;
let server = if let Some(server) = writable.remove(&external) { let server = if let Some(server) = writable.remove(&external) {
@@ -78,16 +77,10 @@ impl VHostController {
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct TargetInfo { struct TargetInfo {
addr: SocketAddr, addr: SocketAddr,
connect_ssl: Result<(), AlpnInfo>, connect_ssl: bool,
key: Key, key: Key,
} }
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum AlpnInfo {
Reflect,
Specified(Vec<Vec<u8>>),
}
struct VHostServer { struct VHostServer {
mapping: Weak<RwLock<BTreeMap<Option<String>, BTreeMap<TargetInfo, Weak<()>>>>>, mapping: Weak<RwLock<BTreeMap<Option<String>, BTreeMap<TargetInfo, Weak<()>>>>>,
_thread: NonDetachingJoinHandle<()>, _thread: NonDetachingJoinHandle<()>,
@@ -95,7 +88,7 @@ struct VHostServer {
impl VHostServer { impl VHostServer {
async fn new(port: u16, ssl: Arc<SslManager>) -> Result<Self, Error> { async fn new(port: u16, ssl: Arc<SslManager>) -> Result<Self, Error> {
// check if port allowed // check if port allowed
let listener = TcpListener::bind(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), port)) let listener = TcpListener::bind(SocketAddr::new([0, 0, 0, 0].into(), port))
.await .await
.with_kind(crate::ErrorKind::Network)?; .with_kind(crate::ErrorKind::Network)?;
let mapping = Arc::new(RwLock::new(BTreeMap::new())); let mapping = Arc::new(RwLock::new(BTreeMap::new()));
@@ -105,8 +98,6 @@ impl VHostServer {
loop { loop {
match listener.accept().await { match listener.accept().await {
Ok((stream, _)) => { Ok((stream, _)) => {
let stream =
Box::pin(TimeoutStream::new(stream, Duration::from_secs(300)));
let mut stream = BackTrackingReader::new(stream); let mut stream = BackTrackingReader::new(stream);
stream.start_buffering(); stream.start_buffering();
let mapping = mapping.clone(); let mapping = mapping.clone();
@@ -187,7 +178,7 @@ impl VHostServer {
let cfg = ServerConfig::builder() let cfg = ServerConfig::builder()
.with_safe_defaults() .with_safe_defaults()
.with_no_client_auth(); .with_no_client_auth();
let mut cfg = let cfg =
if mid.client_hello().signature_schemes().contains( if mid.client_hello().signature_schemes().contains(
&tokio_rustls::rustls::SignatureScheme::ED25519, &tokio_rustls::rustls::SignatureScheme::ED25519,
) { ) {
@@ -222,94 +213,49 @@ impl VHostServer {
.private_key_to_der()?, .private_key_to_der()?,
), ),
) )
} };
.with_kind(crate::ErrorKind::OpenSsl)?; let mut tls_stream = mid
match target.connect_ssl { .into_stream(Arc::new(
Ok(()) => { cfg.with_kind(crate::ErrorKind::OpenSsl)?,
let mut client_cfg = ))
.await?;
tls_stream.get_mut().0.stop_buffering();
if target.connect_ssl {
tokio::io::copy_bidirectional(
&mut tls_stream,
&mut TlsConnector::from(Arc::new(
tokio_rustls::rustls::ClientConfig::builder() tokio_rustls::rustls::ClientConfig::builder()
.with_safe_defaults() .with_safe_defaults()
.with_root_certificates({ .with_root_certificates({
let mut store = RootCertStore::empty(); let mut store = RootCertStore::empty();
store.add( store.add(
&tokio_rustls::rustls::Certificate( &tokio_rustls::rustls::Certificate(
key.root_ca().to_der()?, key.root_ca().to_der()?,
), ),
).with_kind(crate::ErrorKind::OpenSsl)?; ).with_kind(crate::ErrorKind::OpenSsl)?;
store store
}) })
.with_no_client_auth(); .with_no_client_auth(),
client_cfg.alpn_protocols = mid ))
.client_hello() .connect(
.alpn() key.key()
.into_iter() .internal_address()
.flatten() .as_str()
.map(|x| x.to_vec()) .try_into()
.collect(); .with_kind(crate::ErrorKind::OpenSsl)?,
let mut target_stream = tcp_stream,
TlsConnector::from(Arc::new(client_cfg))
.connect_with(
key.key()
.internal_address()
.as_str()
.try_into()
.with_kind(
crate::ErrorKind::OpenSsl,
)?,
tcp_stream,
|conn| {
cfg.alpn_protocols.extend(
conn.alpn_protocol()
.into_iter()
.map(|p| p.to_vec()),
)
},
)
.await
.with_kind(crate::ErrorKind::OpenSsl)?;
let mut tls_stream =
mid.into_stream(Arc::new(cfg)).await?;
tls_stream.get_mut().0.stop_buffering();
tokio::io::copy_bidirectional(
&mut tls_stream,
&mut target_stream,
) )
.await .await
} .with_kind(crate::ErrorKind::OpenSsl)?,
Err(AlpnInfo::Reflect) => { )
for proto in .await?;
mid.client_hello().alpn().into_iter().flatten() } else {
{ tokio::io::copy_bidirectional(
cfg.alpn_protocols.push(proto.into()); &mut tls_stream,
} &mut tcp_stream,
let mut tls_stream = )
mid.into_stream(Arc::new(cfg)).await?; .await?;
tls_stream.get_mut().0.stop_buffering();
tokio::io::copy_bidirectional(
&mut tls_stream,
&mut tcp_stream,
)
.await
}
Err(AlpnInfo::Specified(alpn)) => {
cfg.alpn_protocols = alpn;
let mut tls_stream =
mid.into_stream(Arc::new(cfg)).await?;
tls_stream.get_mut().0.stop_buffering();
tokio::io::copy_bidirectional(
&mut tls_stream,
&mut tcp_stream,
)
.await
}
} }
.map_or_else(
|e| match e.kind() {
std::io::ErrorKind::UnexpectedEof => Ok(()),
_ => Err(e),
},
|_| Ok(()),
)?;
} else { } else {
// 503 // 503
} }

View File

@@ -47,7 +47,7 @@ pub async fn country() -> Result<(), Error> {
} }
#[command(display(display_none))] #[command(display(display_none))]
#[instrument(skip_all)] #[instrument(skip(ctx, password))]
pub async fn add( pub async fn add(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg] ssid: String, #[arg] ssid: String,
@@ -103,7 +103,7 @@ pub async fn add(
} }
#[command(display(display_none))] #[command(display(display_none))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> { pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?; let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() { if !ssid.is_ascii() {
@@ -155,7 +155,7 @@ pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<
} }
#[command(display(display_none))] #[command(display(display_none))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn delete(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> { pub async fn delete(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?; let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() { if !ssid.is_ascii() {
@@ -173,7 +173,7 @@ pub async fn delete(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(
let is_current_removed_and_no_hardwire = let is_current_removed_and_no_hardwire =
is_current_being_removed && !interface_connected(&ctx.ethernet_interface).await?; is_current_being_removed && !interface_connected(&ctx.ethernet_interface).await?;
if is_current_removed_and_no_hardwire { if is_current_removed_and_no_hardwire {
return Err(Error::new(color_eyre::eyre::eyre!("Forbidden: Deleting this network would make your server unreachable. Either connect to ethernet or connect to a different WiFi network to remedy this."), ErrorKind::Wifi)); return Err(Error::new(color_eyre::eyre::eyre!("Forbidden: Deleting this Network would make your Embassy Unreachable. Either connect to ethernet or connect to a different WiFi network to remedy this."), ErrorKind::Wifi));
} }
wpa_supplicant wpa_supplicant
@@ -293,7 +293,7 @@ fn display_wifi_list(info: Vec<WifiListOut>, matches: &ArgMatches) {
} }
#[command(display(display_wifi_info))] #[command(display(display_wifi_info))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn get( pub async fn get(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[allow(unused_variables)] #[allow(unused_variables)]
@@ -347,7 +347,7 @@ pub async fn get(
} }
#[command(rename = "get", display(display_wifi_list))] #[command(rename = "get", display(display_wifi_list))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn get_available( pub async fn get_available(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[allow(unused_variables)] #[allow(unused_variables)]
@@ -457,7 +457,7 @@ impl WpaCli {
WpaCli { interface } WpaCli { interface }
} }
#[instrument(skip_all)] #[instrument(skip(self, psk))]
pub async fn set_add_network_low(&mut self, ssid: &Ssid, psk: &Psk) -> Result<(), Error> { pub async fn set_add_network_low(&mut self, ssid: &Ssid, psk: &Psk) -> Result<(), Error> {
let _ = Command::new("nmcli") let _ = Command::new("nmcli")
.arg("-a") .arg("-a")
@@ -473,7 +473,7 @@ impl WpaCli {
.await?; .await?;
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(self, psk))]
pub async fn add_network_low(&mut self, ssid: &Ssid, psk: &Psk) -> Result<(), Error> { pub async fn add_network_low(&mut self, ssid: &Ssid, psk: &Psk) -> Result<(), Error> {
if self.find_networks(ssid).await?.is_empty() { if self.find_networks(ssid).await?.is_empty() {
Command::new("nmcli") Command::new("nmcli")
@@ -567,7 +567,7 @@ impl WpaCli {
.await?; .await?;
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument]
pub async fn list_networks_low(&self) -> Result<BTreeMap<NetworkId, WifiInfo>, Error> { pub async fn list_networks_low(&self) -> Result<BTreeMap<NetworkId, WifiInfo>, Error> {
let r = Command::new("nmcli") let r = Command::new("nmcli")
.arg("-t") .arg("-t")
@@ -596,7 +596,7 @@ impl WpaCli {
.collect::<BTreeMap<NetworkId, WifiInfo>>()) .collect::<BTreeMap<NetworkId, WifiInfo>>())
} }
#[instrument(skip_all)] #[instrument]
pub async fn list_wifi_low(&self) -> Result<WifiList, Error> { pub async fn list_wifi_low(&self) -> Result<WifiList, Error> {
let r = Command::new("nmcli") let r = Command::new("nmcli")
.arg("-g") .arg("-g")
@@ -681,7 +681,7 @@ impl WpaCli {
}) })
.collect()) .collect())
} }
#[instrument(skip_all)] #[instrument(skip(db))]
pub async fn select_network(&mut self, db: impl DbHandle, ssid: &Ssid) -> Result<bool, Error> { pub async fn select_network(&mut self, db: impl DbHandle, ssid: &Ssid) -> Result<bool, Error> {
let m_id = self.check_active_network(ssid).await?; let m_id = self.check_active_network(ssid).await?;
match m_id { match m_id {
@@ -717,7 +717,7 @@ impl WpaCli {
} }
} }
} }
#[instrument(skip_all)] #[instrument]
pub async fn get_current_network(&self) -> Result<Option<Ssid>, Error> { pub async fn get_current_network(&self) -> Result<Option<Ssid>, Error> {
let r = Command::new("iwgetid") let r = Command::new("iwgetid")
.arg(&self.interface) .arg(&self.interface)
@@ -733,7 +733,7 @@ impl WpaCli {
Ok(Some(Ssid(network.to_owned()))) Ok(Some(Ssid(network.to_owned())))
} }
} }
#[instrument(skip_all)] #[instrument(skip(db))]
pub async fn remove_network(&mut self, db: impl DbHandle, ssid: &Ssid) -> Result<bool, Error> { pub async fn remove_network(&mut self, db: impl DbHandle, ssid: &Ssid) -> Result<bool, Error> {
let found_networks = self.find_networks(ssid).await?; let found_networks = self.find_networks(ssid).await?;
if found_networks.is_empty() { if found_networks.is_empty() {
@@ -745,7 +745,7 @@ impl WpaCli {
self.save_config(db).await?; self.save_config(db).await?;
Ok(true) Ok(true)
} }
#[instrument(skip_all)] #[instrument(skip(psk, db))]
pub async fn set_add_network( pub async fn set_add_network(
&mut self, &mut self,
db: impl DbHandle, db: impl DbHandle,
@@ -757,7 +757,7 @@ impl WpaCli {
self.save_config(db).await?; self.save_config(db).await?;
Ok(()) Ok(())
} }
#[instrument(skip_all)] #[instrument(skip(psk, db))]
pub async fn add_network( pub async fn add_network(
&mut self, &mut self,
db: impl DbHandle, db: impl DbHandle,
@@ -771,7 +771,7 @@ impl WpaCli {
} }
} }
#[instrument(skip_all)] #[instrument]
pub async fn interface_connected(interface: &str) -> Result<bool, Error> { pub async fn interface_connected(interface: &str) -> Result<bool, Error> {
let out = Command::new("ifconfig") let out = Command::new("ifconfig")
.arg(interface) .arg(interface)
@@ -792,7 +792,7 @@ pub fn country_code_parse(code: &str, _matches: &ArgMatches) -> Result<CountryCo
}) })
} }
#[instrument(skip_all)] #[instrument(skip(main_datadir))]
pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>( pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>(
main_datadir: P, main_datadir: P,
wifi_iface: &str, wifi_iface: &str,

View File

@@ -23,7 +23,7 @@ pub async fn notification() -> Result<(), Error> {
} }
#[command(display(display_serializable))] #[command(display(display_serializable))]
#[instrument(skip_all)] #[instrument(skip(ctx))]
pub async fn list( pub async fn list(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg] before: Option<i32>, #[arg] before: Option<i32>,
@@ -232,7 +232,7 @@ impl NotificationManager {
cache: Mutex::new(HashMap::new()), cache: Mutex::new(HashMap::new()),
} }
} }
#[instrument(skip_all)] #[instrument(skip(self, db))]
pub async fn notify<Db: DbHandle, T: NotificationType>( pub async fn notify<Db: DbHandle, T: NotificationType>(
&self, &self,
db: &mut Db, db: &mut Db,

Some files were not shown because too many files have changed in this diff Show More