Compare commits

...

46 Commits

Author SHA1 Message Date
Matt Hill
5ba68a3124 translations 2026-02-16 00:34:41 -07:00
Matt Hill
bb68c3b91c update binding for API types, add ARCHITECTURE 2026-02-16 00:05:24 -07:00
Aiden McClelland
3518eccc87 feat: add port_forwards field to Host for tracking gateway forwarding rules 2026-02-14 16:40:21 -07:00
Matt Hill
2f19188dae looking good 2026-02-14 16:37:04 -07:00
Aiden McClelland
3a63f3b840 feat: add mdns hostname metadata variant and fix vhost routing
- Add HostnameMetadata::Mdns variant to distinguish mDNS from private domains
- Mark mDNS addresses as private (public: false) since mDNS is local-only
- Fall back to null SNI entry when hostname not found in vhost mapping
- Simplify public detection in ProxyTarget filter
- Pass hostname to update_addresses for mDNS domain name generation
2026-02-14 15:34:48 -07:00
Matt Hill
098d9275f4 new service interfacee page 2026-02-14 12:24:16 -07:00
Matt Hill
d5c74bc22e re-arrange (#3123) 2026-02-14 08:15:50 -07:00
Aiden McClelland
49d4da03ca feat: refactor NetService to watch DB and reconcile network state
- NetService sync task now uses PatchDB DbWatch instead of being called
  directly after DB mutations
- Read gateways from DB instead of network interface context when
  updating host addresses
- gateway sync updates all host addresses in the DB
- Add Watch<u64> channel for callers to wait on sync completion
- Fix ts-rs codegen bug with #[ts(skip)] on flattened Plugin field
- Update SDK getServiceInterface.ts for new HostnameInfo shape
- Remove unnecessary HTTPS redirect in static_server.rs
- Fix tunnel/api.rs to filter for WAN IPv4 address
2026-02-13 16:21:57 -07:00
Aiden McClelland
3765465618 chore: update ts bindings for preferred port design 2026-02-13 14:23:48 -07:00
Aiden McClelland
61f820d09e Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-13 13:39:25 -07:00
Aiden McClelland
db7f3341ac wip refactor 2026-02-12 14:51:33 -07:00
Matt Hill
4decf9335c fix license display in marketplace 2026-02-12 13:07:19 -07:00
Matt Hill
339e5f799a build ts types and fix i18n 2026-02-12 11:32:29 -07:00
Aiden McClelland
89d3e0cf35 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-12 10:51:32 -07:00
Aiden McClelland
638ed27599 feat: replace SourceFilter with IpNet, add policy routing, remove MASQUERADE 2026-02-12 10:51:26 -07:00
Matt Hill
da75b8498e Merge branch 'next/major' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-12 08:28:36 -07:00
Matt Hill
8ef4ecf5ac outbound gateway support (#3120)
* Multiple (#3111)

* fix alerts i18n, fix status display, better, remove usb media, hide shutdown for install complete

* trigger chnage detection for localize pipe and round out implementing localize pipe for consistency even though not needed

* Fix PackageInfoShort to handle LocaleString on releaseNotes (#3112)

* Fix PackageInfoShort to handle LocaleString on releaseNotes

* fix: filter by target_version in get_matching_models and pass otherVersions from install

* chore: add exver documentation for ai agents

* frontend plus some be types

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2026-02-12 08:27:09 -07:00
Matt Hill
0260c1532d fix ssh, undeprecate wifi (#3121) 2026-02-12 08:10:01 -07:00
Aiden McClelland
2a54625f43 feat: replace InterfaceFilter with ForwardRequirements, add WildcardListener, complete alpha.20 bump
- Replace DynInterfaceFilter with ForwardRequirements for per-IP forward
  precision with source-subnet iptables filtering for private forwards
- Add WildcardListener (binds [::]:port) to replace the per-gateway
  NetworkInterfaceListener/SelfContainedNetworkInterfaceListener/
  UpgradableListener infrastructure
- Update forward-port script with src_subnet and excluded_src env vars
- Remove unused filter types and listener infrastructure from gateway.rs
- Add availablePorts migration (IdPool -> BTreeMap<u16, bool>) to alpha.20
- Complete version bump to 0.4.0-alpha.20 in SDK and web
2026-02-11 18:10:27 -07:00
Aiden McClelland
4e638fb58e feat: implement preferred port allocation and per-address enable/disable
- Add AvailablePorts::try_alloc() with SSL tracking (BTreeMap<u16, bool>)
- Add DerivedAddressInfo on BindInfo with private_disabled/public_enabled/possible sets
- Add Bindings wrapper with Map impl for patchdb indexed access
- Flatten HostAddress from single-variant enum to struct
- Replace set-gateway-enabled RPC with set-address-enabled
- Remove hostname_info from Host; computed addresses now in BindInfo.addresses.possible
- Compute possible addresses inline in NetServiceData::update()
- Update DB migration, SDK types, frontend, and container-runtime
2026-02-10 17:38:51 -07:00
Aiden McClelland
73274ef6e0 docs: update TODO.md with DerivedAddressInfo design, remove completed tor task 2026-02-10 14:45:50 -07:00
Aiden McClelland
e1915bf497 chore: format RPCSpec.md markdown table 2026-02-10 13:38:40 -07:00
Aiden McClelland
8204074bdf chore: flatten HostnameInfo from enum to struct
HostnameInfo only had one variant (Ip) after removing Tor. Flatten it
into a plain struct with fields gateway, public, hostname. Remove all
kind === 'ip' type guards and narrowing across SDK, frontend, and
container runtime. Update DB migration to strip the kind field.
2026-02-10 13:38:12 -07:00
Aiden McClelland
2ee403e7de chore: remove tor from startos core
Tor is being moved from a built-in OS feature to a service. This removes
the Arti-based Tor client, onion address management, hidden service
creation, and all related code from the core backend, frontend, and SDK.

- Delete core/src/net/tor/ module (~2060 lines)
- Remove OnionAddress, TorSecretKey, TorController from all consumers
- Remove HostnameInfo::Onion and HostAddress::Onion variants
- Remove onion CRUD RPC endpoints and tor subcommand
- Remove tor key handling from account and backup/restore
- Remove ~12 tor-related Cargo dependencies (arti-client, torut, etc.)
- Remove tor UI components, API methods, mock data, and routes
- Remove OnionHostname and tor patterns/regexes from SDK
- Add v0_4_0_alpha_20 database migration to strip onion data
- Bump version to 0.4.0-alpha.20
2026-02-10 13:28:24 -07:00
Aiden McClelland
1974dfd66f docs: move address enable/disable to overflow menu, add SSL indicator, defer UI placement decisions 2026-02-09 13:29:49 -07:00
Aiden McClelland
2e03a95e47 docs: overhaul interfaces page design with view/manage split and per-address controls 2026-02-09 13:10:57 -07:00
Aiden McClelland
b6262c8e13 Fix PackageInfoShort to handle LocaleString on releaseNotes (#3112)
* Fix PackageInfoShort to handle LocaleString on releaseNotes

* fix: filter by target_version in get_matching_models and pass otherVersions from install

* chore: add exver documentation for ai agents
2026-02-09 19:42:03 +00:00
Matt Hill
ba740a9ee2 Multiple (#3111)
* fix alerts i18n, fix status display, better, remove usb media, hide shutdown for install complete

* trigger chnage detection for localize pipe and round out implementing localize pipe for consistency even though not needed
2026-02-09 12:41:29 -07:00
Aiden McClelland
8f809dab21 docs: add user-controlled public/private and port forward mapping to design 2026-02-08 11:17:43 -07:00
Aiden McClelland
c0b2cbe1c8 docs: update preferred external port design in TODO 2026-02-06 09:30:35 -07:00
Aiden McClelland
f2142f0bb3 add documentation for ai agents (#3115)
* add documentation for ai agents

* docs: consolidate CLAUDE.md and CONTRIBUTING.md, add style guidelines

- Refactor CLAUDE.md to reference CONTRIBUTING.md for build/test/format info
- Expand CONTRIBUTING.md with comprehensive build targets, env vars, and testing
- Add code style guidelines section with conventional commits
- Standardize SDK prettier config to use single quotes (matching web)
- Add project-level Claude Code settings to disable co-author attribution

* style(sdk): apply prettier with single quotes

Run prettier across sdk/base and sdk/package to apply the
standardized quote style (single quotes matching web).

* docs: add USER.md for per-developer TODO filtering

- Add agents/USER.md to .gitignore (contains user identifier)
- Document session startup flow in CLAUDE.md:
  - Create USER.md if missing, prompting for identifier
  - Filter TODOs by @username tags
  - Offer relevant TODOs on session start

* docs: add i18n documentation task to agent TODOs

* docs: document i18n ID patterns in core/

Add agents/i18n-patterns.md covering rust-i18n setup, translation file
format, t!() macro usage, key naming conventions, and locale selection.
Remove completed TODO item and add reference in CLAUDE.md.

* chore: clarify that all builds work on any OS with Docker
2026-02-06 00:10:16 +01:00
gStart9
86ca23c093 Remove redundant https:// strings in start-tunnel installation output (#3114) 2026-02-05 23:22:31 +01:00
Dominion5254
463b6ca4ef propagate Error info (#3116) 2026-02-05 23:21:28 +01:00
Matt Hill
58e0b166cb move comment to safe place 2026-02-02 21:09:19 -07:00
Matt Hill
2a678bb017 fix warning and skip raspberrypi builds for now 2026-02-02 20:16:41 -07:00
Matt Hill
5664456b77 fix for buildjet 2026-02-02 18:51:11 -07:00
Matt Hill
3685b7e57e fix workflows 2026-02-02 18:37:13 -07:00
Matt Hill
989d5f73b1 fix --arch flag to fall back to emulation when native image unavailab… (#3108)
* fix --arch flag to fall back to emulation when native image unavailable, always infer hardware requirement for arch

* better handling of arch filter

* dont cancel in-progress commit workflows and abstract common setup

* cli improvements

fix group handling

* fix cli publish

* alpha.19

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2026-02-03 00:56:59 +00:00
Matt Hill
4f84073cb5 actually eliminate duplicate workflows 2026-01-30 11:28:18 -07:00
Matt Hill
c190295c34 cleaner, also eliminate duplicate workflows 2026-01-30 11:23:40 -07:00
Matt Hill
60875644a1 better i18n checks, better action disabled, fix cert download for ios 2026-01-30 10:59:27 -07:00
Matt Hill
113b09ad01 fix cert download issue in index html 2026-01-29 16:57:12 -07:00
Alex Inkin
2605d0e671 chore: make column shorter (#3107) 2026-01-29 09:54:42 -07:00
Aiden McClelland
d232b91d31 update ota script, rbind for dependency mounts, cli list-ingredients fix, and formatting 2026-01-28 16:09:37 -07:00
Aiden McClelland
c65db31fd9 Feature/consolidate setup (#3092)
* start consolidating

* add start-cli flash-os

* combine install and setup and refactor all

* use http

* undo mock

* fix translation

* translations

* use dialogservice wrapper

* better ST messaging on setup

* only warn on update if breakages (#3097)

* finish setup wizard and ui language-keyboard feature

* fix typo

* wip: localization

* remove start-tunnel readme

* switch to posix strings for language internal

* revert mock

* translate backend strings

* fix missing about text

* help text for args

* feat: add "Add new gateway" option (#3098)

* feat: add "Add new gateway" option

* Update web/projects/ui/src/app/routes/portal/components/form/controls/select.component.ts

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* add translation

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Matt Hill <mattnine@protonmail.com>

* fix dns selection

* keyboard keymap also

* ability to shutdown after install

* revert mock

* working setup flow + manifest localization

* (mostly) redundant localization on frontend

* version bump

* omit live medium from disk list and better space management

* ignore missing package archive on 035 migration

* fix device migration

* add i18n helper to sdk

* fix install over 0.3.5.1

* fix grub config

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
Co-authored-by: Alex Inkin <alexander@inkin.ru>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-01-27 14:44:41 -08:00
Aiden McClelland
99871805bd hardware acceleration and support for NVIDIA cards on nonfree images (#3089)
* add nvidia packages

* add nvidia deps to nonfree

* gpu_acceleration flag & nvidia hacking

* fix gpu_config & /tmp/lxc.log

* implement hardware acceleration more dynamically

* refactor OpenUI

* use mknod

* registry updates for multi-hardware-requirements

* pluralize

* handle new registry types

* remove log

* migrations and driver fixes

* wip

* misc patches

* handle nvidia-container differently

* chore: comments (#3093)

* chore: comments

* revert some sizing

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>

* Revert "handle nvidia-container differently"

This reverts commit d708ae53df.

* fix debian containers

* cleanup

* feat: add empty array placeholder in forms (#3095)

* fixes from testing, client side device filtering for better fingerprinting resistance

* fix mac builds

---------

Co-authored-by: Sam Sartor <me@samsartor.com>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Alex Inkin <alexander@inkin.ru>
2026-01-15 11:42:17 -08:00
767 changed files with 27464 additions and 21097 deletions

1
.claude/settings.json Normal file
View File

@@ -0,0 +1 @@
{}

81
.github/actions/setup-build/action.yml vendored Normal file
View File

@@ -0,0 +1,81 @@
name: Setup Build Environment
description: Common build environment setup steps
inputs:
nodejs-version:
description: Node.js version
required: true
setup-python:
description: Set up Python
required: false
default: "false"
setup-docker:
description: Set up Docker QEMU and Buildx
required: false
default: "true"
setup-sccache:
description: Configure sccache for GitHub Actions
required: false
default: "true"
free-space:
description: Remove unnecessary packages to free disk space
required: false
default: "true"
runs:
using: composite
steps:
- name: Free disk space
if: inputs.free-space == 'true'
shell: bash
run: |
sudo apt-get remove --purge -y azure-cli || true
sudo apt-get remove --purge -y firefox || true
sudo apt-get remove --purge -y ghc-* || true
sudo apt-get remove --purge -y google-cloud-sdk || true
sudo apt-get remove --purge -y google-chrome-stable || true
sudo apt-get remove --purge -y powershell || true
sudo apt-get remove --purge -y php* || true
sudo apt-get remove --purge -y ruby* || true
sudo apt-get remove --purge -y mono-* || true
sudo apt-get autoremove -y
sudo apt-get clean
sudo rm -rf /usr/lib/jvm
sudo rm -rf /usr/local/.ghcup
sudo rm -rf /usr/local/lib/android
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/share/swift
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
# BuildJet runners lack /opt/hostedtoolcache, which setup-python and setup-qemu expect
- name: Ensure hostedtoolcache exists
shell: bash
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
- name: Set up Python
if: inputs.setup-python == 'true'
uses: actions/setup-python@v5
with:
python-version: "3.x"
- uses: actions/setup-node@v4
with:
node-version: ${{ inputs.nodejs-version }}
cache: npm
cache-dependency-path: "**/package-lock.json"
- name: Set up Docker QEMU
if: inputs.setup-docker == 'true'
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
if: inputs.setup-docker == 'true'
uses: docker/setup-buildx-action@v3
- name: Configure sccache
if: inputs.setup-sccache == 'true'
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');

View File

@@ -37,6 +37,10 @@ on:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
@@ -44,6 +48,7 @@ env:
jobs:
compile:
name: Build Debian Package
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
@@ -60,50 +65,15 @@ jobs:
}}
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Cleaning up unnecessary files
run: |
sudo apt-get remove --purge -y mono-* \
ghc* cabal-install* \
dotnet* \
php* \
ruby* \
mysql-* \
postgresql-* \
azure-cli \
powershell \
google-cloud-sdk \
msodbcsql* mssql-tools* \
imagemagick* \
libgl1-mesa-dri \
google-chrome-stable \
firefox
sudo apt-get autoremove -y
sudo apt-get clean
- run: |
sudo mount -t tmpfs tmpfs .
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v4
- uses: ./.github/actions/setup-build
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Configure sccache
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
nodejs-version: ${{ env.NODEJS_VERSION }}
- name: Make
run: TARGET=${{ matrix.triple }} make cli

View File

@@ -1,4 +1,4 @@
name: Start-Registry
name: start-registry
on:
workflow_call:
@@ -35,6 +35,10 @@ on:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
@@ -42,6 +46,7 @@ env:
jobs:
compile:
name: Build Debian Package
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
@@ -56,50 +61,15 @@ jobs:
}}
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Cleaning up unnecessary files
run: |
sudo apt-get remove --purge -y mono-* \
ghc* cabal-install* \
dotnet* \
php* \
ruby* \
mysql-* \
postgresql-* \
azure-cli \
powershell \
google-cloud-sdk \
msodbcsql* mssql-tools* \
imagemagick* \
libgl1-mesa-dri \
google-chrome-stable \
firefox
sudo apt-get autoremove -y
sudo apt-get clean
- run: |
sudo mount -t tmpfs tmpfs .
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v4
- uses: ./.github/actions/setup-build
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Configure sccache
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
nodejs-version: ${{ env.NODEJS_VERSION }}
- name: Make
run: make registry-deb

View File

@@ -1,4 +1,4 @@
name: Start-Tunnel
name: start-tunnel
on:
workflow_call:
@@ -35,6 +35,10 @@ on:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
@@ -42,6 +46,7 @@ env:
jobs:
compile:
name: Build Debian Package
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
@@ -56,50 +61,15 @@ jobs:
}}
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Cleaning up unnecessary files
run: |
sudo apt-get remove --purge -y mono-* \
ghc* cabal-install* \
dotnet* \
php* \
ruby* \
mysql-* \
postgresql-* \
azure-cli \
powershell \
google-cloud-sdk \
msodbcsql* mssql-tools* \
imagemagick* \
libgl1-mesa-dri \
google-chrome-stable \
firefox
sudo apt-get autoremove -y
sudo apt-get clean
- run: |
sudo mount -t tmpfs tmpfs .
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v4
- uses: ./.github/actions/setup-build
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Configure sccache
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
nodejs-version: ${{ env.NODEJS_VERSION }}
- name: Make
run: make tunnel-deb

View File

@@ -27,7 +27,7 @@ on:
- x86_64-nonfree
- aarch64
- aarch64-nonfree
- raspberrypi
# - raspberrypi
- riscv64
deploy:
type: choice
@@ -45,6 +45,10 @@ on:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
@@ -52,6 +56,7 @@ env:
jobs:
compile:
name: Compile Base Binaries
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
@@ -86,54 +91,16 @@ jobs:
)[github.event.inputs.runner == 'fast']
}}
steps:
- name: Cleaning up unnecessary files
run: |
sudo apt-get remove --purge -y azure-cli || true
sudo apt-get remove --purge -y firefox || true
sudo apt-get remove --purge -y ghc-* || true
sudo apt-get remove --purge -y google-cloud-sdk || true
sudo apt-get remove --purge -y google-chrome-stable || true
sudo apt-get remove --purge -y powershell || true
sudo apt-get remove --purge -y php* || true
sudo apt-get remove --purge -y ruby* || true
sudo apt-get remove --purge -y mono-* || true
sudo apt-get autoremove -y
sudo apt-get clean
sudo rm -rf /usr/lib/jvm # All JDKs
sudo rm -rf /usr/local/.ghcup # Haskell toolchain
sudo rm -rf /usr/local/lib/android # Android SDK/NDK, emulator
sudo rm -rf /usr/share/dotnet # .NET SDKs
sudo rm -rf /usr/share/swift # Swift toolchain (if present)
sudo rm -rf "$AGENT_TOOLSDIRECTORY" # Pre-cached tool cache (Go, Node, etc.)
- run: |
sudo mount -t tmpfs tmpfs .
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Python
uses: actions/setup-python@v5
- uses: ./.github/actions/setup-build
with:
python-version: "3.x"
- uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Configure sccache
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
nodejs-version: ${{ env.NODEJS_VERSION }}
setup-python: "true"
- name: Make
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
@@ -151,13 +118,14 @@ jobs:
strategy:
fail-fast: false
matrix:
# TODO: re-add "raspberrypi" to the platform list below
platform: >-
${{
fromJson(
format(
'[
["{0}"],
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "riscv64", "raspberrypi"]
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "riscv64"]
]',
github.event.inputs.platform || 'ALL'
)
@@ -221,6 +189,10 @@ jobs:
sudo rm -rf "$AGENT_TOOLSDIRECTORY" # Pre-cached tool cache (Go, Node, etc.)
if: ${{ github.event.inputs.runner != 'fast' }}
# BuildJet runners lack /opt/hostedtoolcache, which setup-qemu expects
- name: Ensure hostedtoolcache exists
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
@@ -251,10 +223,8 @@ jobs:
mkdir -p patch-db/client/dist
mkdir -p web/.angular
mkdir -p web/dist/raw/ui
mkdir -p web/dist/raw/install-wizard
mkdir -p web/dist/raw/setup-wizard
mkdir -p web/dist/static/ui
mkdir -p web/dist/static/install-wizard
mkdir -p web/dist/static/setup-wizard
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar

View File

@@ -10,6 +10,10 @@ on:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: dev-unstable
@@ -17,15 +21,18 @@ env:
jobs:
test:
name: Run Automated Tests
if: github.event.pull_request.draft != true
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v4
- uses: ./.github/actions/setup-build
with:
node-version: ${{ env.NODEJS_VERSION }}
nodejs-version: ${{ env.NODEJS_VERSION }}
free-space: "false"
setup-docker: "false"
setup-sccache: "false"
- name: Build And Run Tests
run: make test

4
.gitignore vendored
View File

@@ -19,4 +19,6 @@ secrets.db
/compiled.tar
/compiled-*.tar
/build/lib/firmware
tmp
tmp
web/.i18n-checked
docs/USER.md

101
ARCHITECTURE.md Normal file
View File

@@ -0,0 +1,101 @@
# Architecture
StartOS is an open-source Linux distribution for running personal servers. It manages discovery, installation, network configuration, backups, and health monitoring of self-hosted services.
## Tech Stack
- Backend: Rust (async/Tokio, Axum web framework)
- Frontend: Angular 20 + TypeScript + TaigaUI
- Container runtime: Node.js/TypeScript with LXC
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`)
- Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`)
## Project Structure
```bash
/
├── assets/ # Screenshots for README
├── build/ # Auxiliary files and scripts for deployed images
├── container-runtime/ # Node.js program managing package containers
├── core/ # Rust backend: API, daemon (startd), CLI (start-cli)
├── debian/ # Debian package maintainer scripts
├── image-recipe/ # Scripts for building StartOS images
├── patch-db/ # (submodule) Diff-based data store for frontend sync
├── sdk/ # TypeScript SDK for building StartOS packages
└── web/ # Web UIs (Angular)
```
## Components
- **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md).
- **`web/`** — Angular 20 + TypeScript workspace using Taiga UI. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md).
- **`container-runtime/`** — Node.js runtime that runs inside each service's LXC container. Loads the service's JavaScript from its S9PK package and manages subcontainers. Communicates with the host daemon via JSON-RPC over Unix socket. See [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md).
- **`sdk/`** — TypeScript SDK for packaging services for StartOS (`@start9labs/start-sdk`). Split into `base/` (core types, ABI definitions, effects interface, consumed by web as `@start9labs/start-sdk-base`) and `package/` (full SDK for service developers, consumed by container-runtime as `@start9labs/start-sdk`).
- **`patch-db/`** — Git submodule providing diff-based state synchronization. Uses CBOR encoding. Backend mutations produce diffs that are pushed to the frontend via WebSocket, enabling reactive UI updates without polling. See [patch-db repo](https://github.com/Start9Labs/patch-db).
## Build Pipeline
Components have a strict dependency chain. Changes flow in one direction:
```
Rust (core/)
→ cargo test exports ts-rs types to core/bindings/
→ rsync copies to sdk/base/lib/osBindings/
→ SDK build produces baseDist/ and dist/
→ web/ consumes baseDist/ (via @start9labs/start-sdk-base)
→ container-runtime/ consumes dist/ (via @start9labs/start-sdk)
```
Key make targets along this chain:
| Step | Command | What it does |
|---|---|---|
| 1 | `cargo check -p start-os` | Verify Rust compiles |
| 2 | `make ts-bindings` | Export ts-rs types → rsync to SDK |
| 3 | `cd sdk && make baseDist dist` | Build SDK packages |
| 4 | `cd web && npm run check` | Type-check Angular projects |
| 5 | `cd container-runtime && npm run check` | Type-check runtime |
**Important**: Editing `sdk/base/lib/osBindings/*.ts` alone is NOT sufficient — you must rebuild the SDK bundle (step 3) before web/container-runtime can see the changes.
## Cross-Layer Verification
When making changes across multiple layers (Rust, SDK, web, container-runtime), verify in this order:
1. **Rust**: `cargo check -p start-os` — verifies core compiles
2. **TS bindings**: `make ts-bindings` — regenerates TypeScript types from Rust `#[ts(export)]` structs
- Runs `./core/build/build-ts.sh` to export ts-rs types to `core/bindings/`
- Syncs `core/bindings/``sdk/base/lib/osBindings/` via rsync
- If you manually edit files in `sdk/base/lib/osBindings/`, you must still rebuild the SDK (step 3)
3. **SDK bundle**: `cd sdk && make baseDist dist` — compiles SDK source into packages
- `baseDist/` is consumed by `/web` (via `@start9labs/start-sdk-base`)
- `dist/` is consumed by `/container-runtime` (via `@start9labs/start-sdk`)
- Web and container-runtime reference the **built** SDK, not source files
4. **Web type check**: `cd web && npm run check` — type-checks all Angular projects
5. **Container runtime type check**: `cd container-runtime && npm run check` — type-checks the runtime
## Data Flow: Backend to Frontend
StartOS uses Patch-DB for reactive state synchronization:
1. The backend mutates state via `db.mutate()`, producing CBOR diffs
2. Diffs are pushed to the frontend over a persistent WebSocket connection
3. The frontend applies diffs to its local state copy and notifies observers
4. Components watch specific database paths via `PatchDB.watch$()`, receiving updates reactively
This means the UI is always eventually consistent with the backend — after any mutating API call, the frontend waits for the corresponding PatchDB diff before resolving, so the UI reflects the result immediately.
## Further Reading
- [core/ARCHITECTURE.md](core/ARCHITECTURE.md) — Rust backend architecture
- [web/ARCHITECTURE.md](web/ARCHITECTURE.md) — Angular frontend architecture
- [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md) — Container runtime details
- [core/rpc-toolkit.md](core/rpc-toolkit.md) — JSON-RPC handler patterns
- [core/s9pk-structure.md](core/s9pk-structure.md) — S9PK package format
- [docs/exver.md](docs/exver.md) — Extended versioning format
- [docs/VERSION_BUMP.md](docs/VERSION_BUMP.md) — Version bumping guide

55
CLAUDE.md Normal file
View File

@@ -0,0 +1,55 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Architecture
See [ARCHITECTURE.md](ARCHITECTURE.md) for the full system architecture, component map, build pipeline, and cross-layer verification order.
Each major component has its own `CLAUDE.md` with detailed guidance: `core/`, `web/`, `container-runtime/`, `sdk/`.
## Build & Development
See [CONTRIBUTING.md](CONTRIBUTING.md) for:
- Environment setup and requirements
- Build commands and make targets
- Testing and formatting commands
- Environment variables
**Quick reference:**
```bash
. ./devmode.sh # Enable dev mode
make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
make test-core # Run Rust tests
```
## Operating Rules
- Always verify cross-layer changes using the order described in [ARCHITECTURE.md](ARCHITECTURE.md#cross-layer-verification)
- Check component-level CLAUDE.md files for component-specific conventions
- Follow existing patterns before inventing new ones
## Supplementary Documentation
The `docs/` directory contains cross-cutting documentation for AI assistants:
- `TODO.md` - Pending tasks for AI agents (check this first, remove items when completed)
- `USER.md` - Current user identifier (gitignored, see below)
- `exver.md` - Extended versioning format (used across core, sdk, and web)
- `VERSION_BUMP.md` - Guide for bumping the StartOS version across the codebase
Component-specific docs live alongside their code (e.g., `core/rpc-toolkit.md`, `core/i18n-patterns.md`).
### Session Startup
On startup:
1. **Check for `docs/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer.
2. **Check `docs/TODO.md` for relevant tasks** - Show TODOs that either:
- Have no `@username` tag (relevant to everyone)
- Are tagged with the current user's identifier
Skip TODOs tagged with a different user.
3. **Ask "What would you like to do today?"** - Offer options for each relevant TODO item, plus "Something else" for other requests.

View File

@@ -1,133 +1,240 @@
# Contributing to StartOS
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://docs.start9.com/latest/packaging-guide/). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://github.com/Start9Labs/ai-service-packaging). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
## Collaboration
- [Matrix](https://matrix.to/#/#community-dev:matrix.start9labs.com)
- [Telegram](https://t.me/start9_labs/47471)
- [Matrix](https://matrix.to/#/#dev-startos:matrix.start9labs.com)
## Project Structure
```bash
/
├── assets/
├── container-runtime/
├── core/
├── build/
├── debian/
├── web/
├── image-recipe/
├── patch-db
└── sdk/
```
#### assets
screenshots for the StartOS README
#### container-runtime
A NodeJS program that dynamically loads maintainer scripts and communicates with the OS to manage packages
#### core
An API, daemon (startd), and CLI (start-cli) that together provide the core functionality of StartOS.
#### build
Auxiliary files and scripts to include in deployed StartOS images
#### debian
Maintainer scripts for the StartOS Debian package
#### web
Web UIs served under various conditions and used to interact with StartOS APIs.
#### image-recipe
Scripts for building StartOS images
#### patch-db (submodule)
A diff based data store used to synchronize data between the web interfaces and server.
#### sdk
A typescript sdk for building start-os packages
For project structure and system architecture, see [ARCHITECTURE.md](ARCHITECTURE.md).
## Environment Setup
#### Clone the StartOS repository
### Installing Dependencies (Debian/Ubuntu)
> Debian/Ubuntu is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install).
```sh
git clone https://github.com/Start9Labs/start-os.git --recurse-submodules
sudo apt update
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 24
nvm use 24
nvm alias default 24 # this prevents your machine from reverting back to another version
```
### Cloning the Repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
cd start-os
```
#### Continue to your project of interest for additional instructions:
### Development Mode
- [`core`](core/README.md)
- [`web-interfaces`](web-interfaces/README.md)
- [`build`](build/README.md)
- [`patch-db`](https://github.com/Start9Labs/patch-db)
For faster iteration during development:
```sh
. ./devmode.sh
```
This sets `ENVIRONMENT=dev` and `GIT_BRANCH_AS_HASH=1` to prevent rebuilds on every commit.
## Building
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components. To build any specific component, simply run `make <TARGET>` replacing `<TARGET>` with the name of the target you'd like to build
All builds can be performed on any operating system that can run Docker.
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components.
### Requirements
- [GNU Make](https://www.gnu.org/software/make/)
- [Docker](https://docs.docker.com/get-docker/)
- [Docker](https://docs.docker.com/get-docker/) or [Podman](https://podman.io/)
- [NodeJS v20.16.0](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
- [sed](https://www.gnu.org/software/sed/)
- [grep](https://www.gnu.org/software/grep/)
- [awk](https://www.gnu.org/software/gawk/)
- [Rust](https://rustup.rs/) (nightly for formatting)
- [sed](https://www.gnu.org/software/sed/), [grep](https://www.gnu.org/software/grep/), [awk](https://www.gnu.org/software/gawk/)
- [jq](https://jqlang.github.io/jq/)
- [gzip](https://www.gnu.org/software/gzip/)
- [brotli](https://github.com/google/brotli)
- [gzip](https://www.gnu.org/software/gzip/), [brotli](https://github.com/google/brotli)
### Environment variables
### Environment Variables
- `PLATFORM`: which platform you would like to build for. Must be one of `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `raspberrypi`
- NOTE: `nonfree` images are for including `nonfree` firmware packages in the built ISO
- `ENVIRONMENT`: a hyphen separated set of feature flags to enable
- `dev`: enables password ssh (INSECURE!) and does not compress frontends
- `unstable`: enables assertions that will cause errors on unexpected inconsistencies that are undesirable in production use either for performance or reliability reasons
- `docker`: use `docker` instead of `podman`
- `GIT_BRANCH_AS_HASH`: set to `1` to use the current git branch name as the git hash so that the project does not need to be rebuilt on each commit
| Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------------- |
| `PLATFORM` | Target platform: `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `riscv64`, `raspberrypi` |
| `ENVIRONMENT` | Hyphen-separated feature flags (see below) |
| `PROFILE` | Build profile: `release` (default) or `dev` |
| `GIT_BRANCH_AS_HASH` | Set to `1` to use git branch name as version hash (avoids rebuilds) |
### Useful Make Targets
**ENVIRONMENT flags:**
- `iso`: Create a full `.iso` image
- Only possible from Debian
- Not available for `PLATFORM=raspberrypi`
- Additional Requirements:
- [debspawn](https://github.com/lkhq/debspawn)
- `img`: Create a full `.img` image
- Only possible from Debian
- Only available for `PLATFORM=raspberrypi`
- Additional Requirements:
- [debspawn](https://github.com/lkhq/debspawn)
- `format`: Run automatic code formatting for the project
- Additional Requirements:
- [rust](https://rustup.rs/)
- `test`: Run automated tests for the project
- Additional Requirements:
- [rust](https://rustup.rs/)
- `update`: Deploy the current working project to a device over ssh as if through an over-the-air update
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
- `reflash`: Deploy the current working project to a device over ssh as if using a live `iso` image to reflash it
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
- `update-overlay`: Deploy the current working project to a device over ssh to the in-memory overlay without restarting it
- WARNING: changes will be reverted after the device is rebooted
- WARNING: changes to `init` will not take effect as the device is already initialized
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
- `wormhole`: Deploy the `startbox` to a device using [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
- When the build it complete will emit a command to paste into the shell of the device to upgrade it
- Additional Requirements:
- [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
- `clean`: Delete all compiled artifacts
- `dev` - Enables password SSH before setup, skips frontend compression
- `unstable` - Enables assertions and debugging with performance penalty
- `console` - Enables tokio-console for async debugging
**Platform notes:**
- `-nonfree` variants include proprietary firmware and drivers
- `raspberrypi` includes non-free components by necessity
- Platform is remembered between builds if not specified
### Make Targets
#### Building
| Target | Description |
| ------------- | ---------------------------------------------- |
| `iso` | Create full `.iso` image (not for raspberrypi) |
| `img` | Create full `.img` image (raspberrypi only) |
| `deb` | Build Debian package |
| `all` | Build all Rust binaries |
| `uis` | Build all web UIs |
| `ui` | Build main UI only |
| `ts-bindings` | Generate TypeScript bindings from Rust types |
#### Deploying to Device
For devices on the same network:
| Target | Description |
| ------------------------------------ | ----------------------------------------------- |
| `update-startbox REMOTE=start9@<ip>` | Deploy binary + UI only (fastest) |
| `update-deb REMOTE=start9@<ip>` | Deploy full Debian package |
| `update REMOTE=start9@<ip>` | OTA-style update |
| `reflash REMOTE=start9@<ip>` | Reflash as if using live ISO |
| `update-overlay REMOTE=start9@<ip>` | Deploy to in-memory overlay (reverts on reboot) |
For devices on different networks (uses [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)):
| Target | Description |
| ------------------- | -------------------- |
| `wormhole` | Send startbox binary |
| `wormhole-deb` | Send Debian package |
| `wormhole-squashfs` | Send squashfs image |
### Creating a VM
Install virt-manager:
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
virt-manager
```
Follow the screenshot walkthrough in [`assets/create-vm/`](assets/create-vm/) to create a new virtual machine. Key steps:
1. Create a new virtual machine
2. Browse for the ISO — create a storage pool pointing to your `results/` directory
3. Select "Generic or unknown OS"
4. Set memory and CPUs
5. Create a disk and name the VM
Build an ISO first:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
#### Other
| Target | Description |
| ------------------------ | ------------------------------------------- |
| `format` | Run code formatting (Rust nightly required) |
| `test` | Run all automated tests |
| `test-core` | Run Rust tests |
| `test-sdk` | Run SDK tests |
| `test-container-runtime` | Run container runtime tests |
| `clean` | Delete all compiled artifacts |
## Testing
```bash
make test # All tests
make test-core # Rust tests (via ./core/run-tests.sh)
make test-sdk # SDK tests
make test-container-runtime # Container runtime tests
# Run specific Rust test
cd core && cargo test <test_name> --features=test
```
## Code Formatting
```bash
# Rust (requires nightly)
make format
# TypeScript/HTML/SCSS (web)
cd web && npm run format
```
## Code Style Guidelines
### Formatting
Run the formatters before committing. Configuration is handled by `rustfmt.toml` (Rust) and prettier configs (TypeScript).
### Documentation & Comments
**Rust:**
- Add doc comments (`///`) to public APIs, structs, and non-obvious functions
- Use `//` comments sparingly for complex logic that isn't self-evident
- Prefer self-documenting code (clear naming, small functions) over comments
**TypeScript:**
- Document exported functions and complex types with JSDoc
- Keep comments focused on "why" rather than "what"
**General:**
- Don't add comments that just restate the code
- Update or remove comments when code changes
- TODOs should include context: `// TODO(username): reason`
### Commit Messages
Use [Conventional Commits](https://www.conventionalcommits.org/):
```
<type>(<scope>): <description>
[optional body]
[optional footer]
```
**Types:**
- `feat` - New feature
- `fix` - Bug fix
- `docs` - Documentation only
- `style` - Formatting, no code change
- `refactor` - Code change that neither fixes a bug nor adds a feature
- `test` - Adding or updating tests
- `chore` - Build process, dependencies, etc.
**Examples:**
```
feat(web): add dark mode toggle
fix(core): resolve race condition in service startup
docs: update CONTRIBUTING.md with style guidelines
refactor(sdk): simplify package validation logic
```

View File

@@ -1,134 +0,0 @@
# Setting up your development environment on Debian/Ubuntu
A step-by-step guide
> This is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
## Installing dependencies
Run the following commands one at a time
```sh
sudo apt update
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 24
nvm use 24
nvm alias default 24 # this prevents your machine from reverting back to another version
```
## Cloning the repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
cd start-os
```
## Building an ISO
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
This will build an ISO for your current architecture. If you are building to run on an architecture other than the one you are currently on, replace `$(uname -m)` with the correct platform for the device (one of `aarch64`, `aarch64-nonfree`, `x86_64`, `x86_64-nonfree`, `raspberrypi`)
## Creating a VM
### Install virt-manager
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
```
### Launch virt-manager
```sh
virt-manager
```
### Create new virtual machine
![Select "Create a new virtual machine"](assets/create-vm/step-1.png)
![Click "Forward"](assets/create-vm/step-2.png)
![Click "Browse"](assets/create-vm/step-3.png)
![Click "+"](assets/create-vm/step-4.png)
#### make sure to set "Target Path" to the path to your results directory in start-os
![Create storage pool](assets/create-vm/step-5.png)
![Select storage pool](assets/create-vm/step-6.png)
![Select ISO](assets/create-vm/step-7.png)
![Select "Generic or unknown OS" and click "Forward"](assets/create-vm/step-8.png)
![Set Memory and CPUs](assets/create-vm/step-9.png)
![Create disk](assets/create-vm/step-10.png)
![Name VM](assets/create-vm/step-11.png)
![Create network](assets/create-vm/step-12.png)
## Updating a VM
The fastest way to update a VM to your latest code depends on what you changed:
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-startbox REMOTE=start9@<VM IP>
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-deb REMOTE=start9@<VM IP>
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-squashfs REMOTE=start9@<VM IP>
```
---
If the device you are building for is not available via ssh, it is also possible to use `magic-wormhole` to send the relevant files.
### Prerequisites:
```sh
sudo apt update
sudo apt install -y magic-wormhole
```
As before, the fastest way to update a VM to your latest code depends on what you changed. Each of the following commands will return a command to paste into the shell of the device you would like to upgrade.
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-deb
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-squashfs
```

View File

@@ -12,8 +12,8 @@ RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else ech
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./build/env/basename.sh)
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./build/env/basename.sh)
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html web/dist/raw/install-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html web/dist/static/install-wizard/index.html
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/)
@@ -22,7 +22,6 @@ CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules pat
WEB_SHARED_SRC := $(call ls-files, web/projects/shared) $(call ls-files, web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
WEB_UI_SRC := $(call ls-files, web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(call ls-files, web/projects/setup-wizard)
WEB_INSTALL_WIZARD_SRC := $(call ls-files, web/projects/install-wizard)
WEB_START_TUNNEL_SRC := $(call ls-files, web/projects/start-tunnel)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip)
@@ -237,9 +236,9 @@ update-startbox: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
update-deb: results/$(BASENAME).deb # better than update, but only available from debian
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call mkdir,/media/startos/next/tmp/startos-deb)
$(call cp,results/$(BASENAME).deb,/media/startos/next/tmp/startos-deb/$(BASENAME).deb)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /tmp/startos-deb/$(BASENAME).deb"')
$(call mkdir,/media/startos/next/var/tmp/startos-deb)
$(call cp,results/$(BASENAME).deb,/media/startos/next/var/tmp/startos-deb/$(BASENAME).deb)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /var/tmp/startos-deb/$(BASENAME).deb"')
update-squashfs: results/$(BASENAME).squashfs
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@@ -325,19 +324,19 @@ web/.angular/.updated: patch-db/client/dist/index.js sdk/baseDist/package.json w
mkdir -p web/.angular
touch web/.angular/.updated
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
web/.i18n-checked: $(WEB_SHARED_SRC) $(WEB_UI_SRC) $(WEB_SETUP_WIZARD_SRC) $(WEB_START_TUNNEL_SRC)
npm --prefix web run check:i18n
touch web/.i18n-checked
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
npm --prefix web run build:ui
touch web/dist/raw/ui/index.html
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
npm --prefix web run build:setup
touch web/dist/raw/setup-wizard/index.html
web/dist/raw/install-wizard/index.html: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
npm --prefix web run build:install
touch web/dist/raw/install-wizard/index.html
web/dist/raw/start-tunnel/index.html: $(WEB_START_TUNNEL_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
web/dist/raw/start-tunnel/index.html: $(WEB_START_TUNNEL_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
npm --prefix web run build:tunnel
touch web/dist/raw/start-tunnel/index.html

View File

@@ -7,76 +7,64 @@
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
</a>
<a href="https://heyapollo.com/product/startos">
<a href="https://heyapollo.com/product/startos">
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
</a>
<a href="https://twitter.com/start9labs">
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
</a>
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
</a>
<a href="https://t.me/start9_labs">
<img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
</a>
<a href="https://docs.start9.com">
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
</a>
<a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
<a href="https://matrix.to/#/#dev-startos:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
</a>
<a href="https://start9.com">
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
</a>
</div>
<br />
<div align="center">
<h3>
Welcome to the era of Sovereign Computing
</h3>
<p>
StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
</p>
</div>
<br />
<p align="center">
<img src="assets/StartOS.png" alt="StartOS" width="85%">
</p>
<br />
## Running StartOS
> [!WARNING]
> StartOS is in beta. It lacks features. It doesn't always work perfectly. Start9 servers are not plug and play. Using them properly requires some effort and patience. Please do not use StartOS or purchase a server if you are unable or unwilling to follow instructions and learn new concepts.
## What is StartOS?
### 💰 Buy a Start9 server
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
StartOS is an open-source Linux distribution for running a personal server. It handles discovery, installation, network configuration, data backup, dependency management, and health monitoring of self-hosted services.
### 👷 Build your own server
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have hardware
1. You want to save on shipping costs
1. You prefer not to divulge your physical address
1. You just like building things
**Tech stack:** Rust backend (Tokio/Axum), Angular frontend, Node.js container runtime with LXC, and a custom diff-based database ([Patch-DB](https://github.com/Start9Labs/patch-db)) for reactive state synchronization.
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
Services run in isolated LXC containers, packaged as [S9PKs](https://github.com/Start9Labs/start-os/blob/master/core/s9pk-structure.md) — a signed, merkle-archived format that supports partial downloads and cryptographic verification.
## ❤️ Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
## What can you do with it?
To report security issues, please email our security team - security@start9.com.
StartOS lets you self-host services that would otherwise depend on third-party cloud providers — giving you full ownership of your data and infrastructure.
## 🌎 Marketplace
There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
Browse available services on the [Start9 Marketplace](https://marketplace.start9.com/), including:
## 🖥️ User Interface Screenshots
- **Bitcoin & Lightning** — Run a full Bitcoin node, Lightning node, BTCPay Server, and other payment infrastructure
- **Communication** — Self-host Matrix, SimpleX, or other messaging platforms
- **Cloud Storage** — Run Nextcloud, Vaultwarden, and other productivity tools
<p align="center">
<img src="assets/registry.png" alt="StartOS Marketplace" width="49%">
<img src="assets/community.png" alt="StartOS Community Registry" width="49%">
<img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%">
<img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%">
<img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%">
<img src="assets/system.png" alt="StartOS System Settings" width="49%">
<img src="assets/welcome.png" alt="StartOS System Settings" width="49%">
<img src="assets/logs.png" alt="StartOS System Settings" width="49%">
</p>
Services are added by the community. If a service you want isn't available, you can [package it yourself](https://github.com/Start9Labs/ai-service-packaging/).
## Getting StartOS
### Buy a Start9 server
The easiest path. [Buy a server](https://store.start9.com) from Start9 and plug it in.
### Build your own
Install StartOS on your own hardware. Follow one of the [DIY guides](https://start9.com/latest/diy). Reasons to go this route:
1. You already have compatible hardware
2. You want to save on shipping costs
3. You prefer not to share your physical address
4. You enjoy building things
### Build from source
See [CONTRIBUTING.md](CONTRIBUTING.md) for environment setup, build instructions, and development workflow.
## Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. See [CONTRIBUTING.md](CONTRIBUTING.md) or visit [start9.com/contribute](https://start9.com/contribute/).
To report security issues, email [security@start9.com](mailto:security@start9.com).

View File

@@ -1,95 +0,0 @@
# StartTunnel
A self-hosted WireGuard VPN optimized for creating VLANs and reverse tunneling to personal servers.
You can think of StartTunnel as "virtual router in the cloud".
Use it for private remote access to self-hosted services running on a personal server, or to expose self-hosted services to the public Internet without revealing the host server's IP address.
## Features
- **Create Subnets**: Each subnet creates a private, virtual local area network (VLAN), similar to the LAN created by a home router.
- **Add Devices**: When you add a device (server, phone, laptop) to a subnet, it receives a LAN IP address on that subnet as well as a unique WireGuard config that must be copied, downloaded, or scanned into the device.
- **Forward Ports**: Forwarding a port creates a "reverse tunnel", exposing a specific port on a specific device to the public Internet.
## Installation
1. Rent a low cost VPS. For most use cases, the cheapest option should be enough.
- It must have a dedicated public IP address.
- For compute (CPU), memory (RAM), and storage (disk), choose the minimum spec.
- For transfer (bandwidth), it depends on (1) your use case and (2) your home Internet's _upload_ speed. Even if you intend to serve large files or stream content from your server, there is no reason to pay for speeds that exceed your home Internet's upload speed.
1. Provision the VPS with the latest version of Debian.
1. Access the VPS via SSH.
1. Run the StartTunnel install script:
curl -fsSL https://start9labs.github.io/start-tunnel | sh
1. [Initialize the web interface](#web-interface) (recommended)
## Updating
Simply re-run the install command:
```sh
curl -fsSL https://start9labs.github.io/start-tunnel | sh
```
## CLI
By default, StartTunnel is managed via the `start-tunnel` command line interface, which is self-documented.
```
start-tunnel --help
```
## Web Interface
Enable the web interface (recommended in most cases) to access your StartTunnel from the browser or via API.
1. Initialize the web interface.
start-tunnel web init
1. If your VPS has multiple public IP addresses, you will be prompted to select the IP address at which to host the web interface.
1. When prompted, enter the port at which to host the web interface. The default is 8443, and we recommend using it. If you change the default, choose an uncommon port to avoid future conflicts.
1. To access your StartTunnel web interface securely over HTTPS, you need an SSL certificate. When prompted, select whether to autogenerate a certificate or provide your own. _This is only for accessing your StartTunnel web interface_.
1. You will receive a success message with 3 pieces of information:
- **<https://IP:port>**: the URL where you can reach your personal web interface.
- **Password**: an autogenerated password for your interface. If you lose/forget it, you can reset it using the start-tunnel CLI.
- **Root Certificate Authority**: the Root CA of your StartTunnel instance.
1. If you autogenerated your SSL certificate, visiting the `https://IP:port` URL in the browser will warn you that the website is insecure. This is expected. You have two options for getting past this warning:
- option 1 (recommended): [Trust your StartTunnel Root CA on your connecting device](#trusting-your-starttunnel-root-ca).
- Option 2: bypass the warning in the browser, creating a one-time security exception.
### Trusting your StartTunnel Root CA
1. Copy the contents of your Root CA (starting with -----BEGIN CERTIFICATE----- and ending with -----END CERTIFICATE-----).
2. Open a text editor:
- Linux: gedit, nano, or any editor
- Mac: TextEdit
- Windows: Notepad
3. Paste the contents of your Root CA.
4. Save the file with a `.crt` extension (e.g. `start-tunnel.crt`) (make sure it saves as plain text, not rich text).
5. Trust the Root CA on your client device(s):
- [Linux](https://staging.docs.start9.com/device-guides/linux/ca.html)
- [Mac](https://staging.docs.start9.com/device-guides/mac/ca.html)
- [Windows](https://staging.docs.start9.com/device-guides/windows/ca.html)
- [Android/Graphene](https://staging.docs.start9.com/device-guides/android/ca.html)
- [iOS](https://staging.docs.start9.com/device-guides/ios/ca.html)

215
TODO.md Normal file
View File

@@ -0,0 +1,215 @@
# AI Agent TODOs
Pending tasks for AI agents. Remove items when completed.
## Unreviewed CLAUDE.md Sections
- [ ] Architecture - Web (`/web`) - @MattDHill
## Features
- [ ] Support preferred external ports besides 443 - @dr-bonez
**Problem**: Currently, port 443 is the only preferred external port that is actually honored. When a
service requests `preferred_external_port: 8443` (or any non-443 value) for SSL, the system ignores
the preference and assigns a dynamic-range port (49152-65535). The `preferred_external_port` is only
used as a label for Tor mappings and as a trigger for the port-443 special case in `update()`.
**Goal**: Honor `preferred_external_port` for both SSL and non-SSL binds when the requested port is
available, with proper conflict resolution and fallback to dynamic-range allocation.
### Design
**Key distinction**: There are two separate concepts for SSL port usage:
1. **Port ownership** (`assigned_ssl_port`) — A port exclusively owned by a binding, allocated from
`AvailablePorts`. Used for server hostnames (`.local`, mDNS, etc.) and iptables forwards.
2. **Domain SSL port** — The port used for domain-based vhost entries. A binding does NOT need to own
a port to have a domain vhost on it. The VHostController already supports multiple hostnames on the
same port via SNI. Any binding can create a domain vhost entry on any SSL port that the
VHostController has a listener for, regardless of who "owns" that port.
For example: the OS owns port 443 as its `assigned_ssl_port`. A service with
`preferred_external_port: 443` won't get 443 as its `assigned_ssl_port` (it's taken), but it CAN
still have domain vhost entries on port 443 — SNI routes by hostname.
#### 1. Preferred Port Allocation for Ownership ✅ DONE
`AvailablePorts::try_alloc(port) -> Option<u16>` added to `forward.rs`. `BindInfo::new()` and
`BindInfo::update()` attempt the preferred port first, falling back to dynamic-range allocation.
#### 2. Per-Address Enable/Disable ✅ DONE
Gateway-level `private_disabled`/`public_enabled` on `NetInfo` replaced with per-address
`DerivedAddressInfo` on `BindInfo`. `hostname_info` removed from `Host` — computed addresses now
live in `BindInfo.addresses.possible`.
**`DerivedAddressInfo` struct** (on `BindInfo`):
```rust
pub struct DerivedAddressInfo {
pub private_disabled: BTreeSet<HostnameInfo>,
pub public_enabled: BTreeSet<HostnameInfo>,
pub possible: BTreeSet<HostnameInfo>, // COMPUTED by update()
}
```
`DerivedAddressInfo::enabled()` returns `possible` filtered by the two sets. `HostnameInfo` derives
`Ord` for `BTreeSet` usage. `AddressFilter` (implementing `InterfaceFilter`) derives enabled
gateway set from `DerivedAddressInfo` for vhost/forward filtering.
**RPC endpoint**: `set-gateway-enabled` replaced with `set-address-enabled` (on both
`server.host.binding` and `package.host.binding`).
**How disabling works per address type** (enforcement deferred to Section 3):
- **WAN/LAN IP:port**: Will be enforced via **source-IP gating** in the vhost layer (Section 3).
- **Hostname-based addresses** (`.local`, domains): Disabled by **not creating the vhost/SNI
entry** for that hostname.
#### 3. Eliminate the Port 5443 Hack: Source-IP-Based WAN Blocking (`vhost.rs`, `net_controller.rs`)
**Current problem**: The `if ssl.preferred_external_port == 443` branch (line 341 of
`net_controller.rs`) creates a bespoke dual-vhost setup: port 5443 for private-only access and port
443 for public (or public+private). This exists because both public and private traffic arrive on the
same port 443 listener, and the current `InterfaceFilter`/`PublicFilter` model distinguishes
public/private by which _network interface_ the connection arrived on — which doesn't work when both
traffic types share a listener.
**Solution**: Determine public vs private based on **source IP** at the vhost level. Traffic arriving
from the gateway IP should be treated as public (the gateway may MASQUERADE/NAT internet traffic, so
anything from the gateway is potentially public). Traffic from LAN IPs is private.
This applies to **all** vhost targets, not just port 443:
- **Add a `public` field to `ProxyTarget`** (or an enum: `Public`, `Private`, `Both`) indicating
what traffic this target accepts, derived from the binding's user-controlled `public` field.
- **Modify `VHostTarget::filter()`** (`vhost.rs:342`): Instead of (or in addition to) checking the
network interface via `GatewayInfo`, check the source IP of the TCP connection against known gateway
IPs. If the source IP matches a gateway or IP outside the subnet, the connection is public;
otherwise it's private. Use this to gate against the target's `public` field.
- **Eliminate the 5443 port entirely**: A single vhost entry on port 443 (or any shared SSL port) can
serve both public and private traffic, with per-target source-IP gating determining which backend
handles which connections.
#### 4. Port Forward Mapping in Patch-DB
When a binding is marked `public = true`, StartOS must record the required port forwards in patch-db
so the frontend can display them to the user. The user then configures these on their router manually.
For each public binding, store:
- The external port the router should forward (the actual vhost port used for domains, or the
`assigned_port` / `assigned_ssl_port` for non-domain access)
- The protocol (TCP/UDP)
- The StartOS LAN IP as the forward target
- Which service/binding this forward is for (for display purposes)
This mapping should be in the public database model so the frontend can read and display it.
#### 5. Simplify `update()` Domain Vhost Logic (`net_controller.rs`)
With source-IP gating in the vhost controller:
- **Remove the `== 443` special case** and the 5443 secondary vhost.
- For **server hostnames** (`.local`, mDNS, embassy, startos, localhost): use `assigned_ssl_port`
(the port the binding owns).
- For **domain-based vhost entries**: attempt to use `preferred_external_port` as the vhost port.
This succeeds if the port is either unused or already has an SSL listener (SNI handles sharing).
It fails only if the port is already in use by a non-SSL binding, or is a restricted port. On
failure, fall back to `assigned_ssl_port`.
- The binding's `public` field determines the `ProxyTarget`'s public/private gating.
- Hostname info must exactly match the actual vhost port used: for server hostnames, report
`ssl_port: assigned_ssl_port`. For domains, report `ssl_port: preferred_external_port` if it was
successfully used for the domain vhost, otherwise report `ssl_port: assigned_ssl_port`.
#### 6. Reachability Test Endpoint
New RPC endpoint that tests whether an address is actually reachable, with diagnostic info on
failure.
**RPC endpoint** (`binding.rs` or new file):
- **`test-address`** — Test reachability of a specific address.
```ts
interface BindingTestAddressParams {
internalPort: number;
address: HostnameInfo;
}
```
The backend simply performs the raw checks and returns the results. The **frontend** owns all
interpretation — it already knows the address type, expected IP, expected port, etc. from the
`HostnameInfo` data, so it can compare against the backend results and construct fix messaging.
```ts
interface TestAddressResult {
dns: string[] | null; // resolved IPs, null if not a domain address or lookup failed
portOpen: boolean | null; // TCP connect result, null if not applicable
}
```
This yields two RPC methods:
- `server.host.binding.test-address`
- `package.host.binding.test-address`
The frontend already has the full `HostnameInfo` context (expected IP, domain, port, gateway,
public/private). It compares the backend's raw results against the expected state and constructs
localized fix instructions. For example:
- `dns` returned but doesn't contain the expected WAN IP → "Update DNS A record for {domain}
to {wanIp}"
- `dns` is `null` for a domain address → "DNS lookup failed for {domain}"
- `portOpen` is `false` → "Configure port forward on your router: external {port} TCP →
{lanIp}:{port}"
### Key Files
| File | Role |
| ------------------------------------ | ------------------------------------------------------------------------------------------------------------------------------------------------------- |
| `core/src/net/forward.rs` | `AvailablePorts` — port pool allocation, `try_alloc()` for preferred ports |
| `core/src/net/host/binding.rs` | `Bindings` (Map wrapper for patchdb), `BindInfo`/`NetInfo`/`DerivedAddressInfo`/`AddressFilter` — per-address enable/disable, `set-address-enabled` RPC |
| `core/src/net/net_controller.rs:259` | `NetServiceData::update()` — computes `DerivedAddressInfo.possible`, vhost/forward/DNS reconciliation, 5443 hack removal |
| `core/src/net/vhost.rs` | `VHostController` / `ProxyTarget` — source-IP gating for public/private |
| `core/src/net/gateway.rs` | `InterfaceFilter` trait and filter types (`AddressFilter`, `PublicFilter`, etc.) |
| `core/src/net/service_interface.rs` | `HostnameInfo` — derives `Ord` for `BTreeSet` usage |
| `core/src/net/host/address.rs` | `HostAddress` (flattened struct), domain CRUD endpoints |
| `sdk/base/lib/interfaces/Host.ts` | SDK `MultiHost.bindPort()` — no changes needed |
| `core/src/db/model/public.rs` | Public DB model — port forward mapping |
- [ ] Extract TS-exported types into a lightweight sub-crate for fast binding generation
**Problem**: `make ts-bindings` compiles the entire `start-os` crate (with all dependencies: tokio,
axum, openssl, etc.) just to run test functions that serialize type definitions to `.ts` files.
Even in debug mode, this takes minutes. The generated output is pure type info — no runtime code
is needed.
**Goal**: Generate TS bindings in seconds by isolating exported types in a small crate with minimal
dependencies.
**Approach**: Create a `core/bindings-types/` sub-crate containing (or re-exporting) all 168
`#[ts(export)]` types. This crate depends only on `serde`, `ts-rs`, `exver`, and other type-only
crates — not on tokio, axum, openssl, etc. Then `build-ts.sh` runs `cargo test -p bindings-types`
instead of `cargo test -p start-os`.
**Challenge**: The exported types are scattered across `core/src/` and reference each other and
other crate types. Extracting them requires either moving the type definitions into the sub-crate
(and importing them back into `start-os`) or restructuring to share a common types crate.
- [ ] Use auto-generated RPC types in the frontend instead of manual duplicates
**Problem**: The web frontend manually defines ~755 lines of API request/response types in
`web/projects/ui/src/app/services/api/api.types.ts` that can drift from the actual Rust types.
**Current state**: The Rust backend already has `#[ts(export)]` on RPC param types (e.g.
`AddTunnelParams`, `SetWifiEnabledParams`, `LoginParams`), and they are generated into
`core/bindings/`. However, commit `71b83245b` ("Chore/unexport api ts #2585", April 2024)
deliberately stopped building them into the SDK and had the frontend maintain its own types.
**Goal**: Reverse that decision — pipe the generated RPC types through the SDK into the frontend
so `api.types.ts` can import them instead of duplicating them. This eliminates drift between
backend and frontend API contracts.
- [ ] Auto-configure port forwards via UPnP/NAT-PMP/PCP - @dr-bonez
**Blocked by**: "Support preferred external ports besides 443" (must be implemented and tested
end-to-end first).
**Goal**: When a binding is marked public, automatically configure port forwards on the user's router
using UPnP, NAT-PMP, or PCP, instead of requiring manual router configuration. Fall back to
displaying manual instructions (the port forward mapping from patch-db) when auto-configuration is
unavailable or fails.

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 396 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 402 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 591 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 319 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 521 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 331 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 402 KiB

0
build/README.md Normal file
View File

View File

@@ -3,6 +3,7 @@ avahi-utils
b3sum
bash-completion
beep
binfmt-support
bmon
btrfs-progs
ca-certificates
@@ -15,6 +16,7 @@ dnsutils
dosfstools
e2fsprogs
ecryptfs-utils
equivs
exfatprogs
flashrom
fuse3
@@ -44,6 +46,7 @@ openssh-server
podman
psmisc
qemu-guest-agent
qemu-user-static
rfkill
rsync
samba-common-bin

View File

@@ -9,6 +9,9 @@ FEATURES+=("${ARCH}")
if [ "$ARCH" != "$PLATFORM" ]; then
FEATURES+=("${PLATFORM}")
fi
if [[ "$PLATFORM" =~ -nonfree$ ]]; then
FEATURES+=("nonfree")
fi
feature_file_checker='
/^#/ { next }

View File

@@ -0,0 +1,10 @@
+ firmware-amd-graphics
+ firmware-atheros
+ firmware-brcm80211
+ firmware-iwlwifi
+ firmware-libertas
+ firmware-misc-nonfree
+ firmware-realtek
+ nvidia-container-toolkit
# + nvidia-driver
# + nvidia-kernel-dkms

View File

@@ -73,7 +73,7 @@ if [ "$NON_FREE" = 1 ]; then
if [ "$IB_SUITE" = "bullseye" ]; then
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free"
else
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free-firmware"
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free non-free-firmware"
fi
fi
@@ -154,9 +154,12 @@ prompt 0
timeout 50
EOF
cp $SOURCE_DIR/splash.png config/bootloaders/syslinux_common/splash.png
cp $SOURCE_DIR/splash.png config/bootloaders/isolinux/splash.png
cp $SOURCE_DIR/splash.png config/bootloaders/grub-pc/splash.png
# Extract splash.png from the deb package
dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xf - ./usr/lib/startos/splash.png > /tmp/splash.png
cp /tmp/splash.png config/bootloaders/syslinux_common/splash.png
cp /tmp/splash.png config/bootloaders/isolinux/splash.png
cp /tmp/splash.png config/bootloaders/grub-pc/splash.png
rm /tmp/splash.png
sed -i -e '2i set timeout=5' config/bootloaders/grub-pc/config.cfg
@@ -174,40 +177,123 @@ if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
fi
cat > config/archives/backports.pref <<- EOF
if [ "$NON_FREE" = 1 ]; then
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o config/archives/nvidia-container-toolkit.key
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
| sed 's#deb https://#deb [signed-by=/etc/apt/trusted.gpg.d/nvidia-container-toolkit.key.gpg] https://#g' \
> config/archives/nvidia-container-toolkit.list
fi
cat > config/archives/backports.pref <<-EOF
Package: linux-image-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: linux-headers-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: *nvidia*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
EOF
# Dependencies
## Firmware
if [ "$NON_FREE" = 1 ]; then
echo 'firmware-iwlwifi firmware-misc-nonfree firmware-brcm80211 firmware-realtek firmware-atheros firmware-libertas firmware-amd-graphics' > config/package-lists/nonfree.list.chroot
fi
# Hooks
cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF
#!/bin/bash
set -e
if [ "${NON_FREE}" = "1" ] && [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
# install a specific NVIDIA driver version
# ---------------- configuration ----------------
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.119.02}"
BASE_URL="https://download.nvidia.com/XFree86/Linux-${QEMU_ARCH}"
echo "[nvidia-hook] Using NVIDIA driver: \${NVIDIA_DRIVER_VERSION}" >&2
# ---------------- kernel version ----------------
# Determine target kernel version from newest /boot/vmlinuz-* in the chroot.
KVER="\$(
ls -1t /boot/vmlinuz-* 2>/dev/null \
| head -n1 \
| sed 's|.*/vmlinuz-||'
)"
if [ -z "\${KVER}" ]; then
echo "[nvidia-hook] ERROR: no /boot/vmlinuz-* found; cannot determine kernel version" >&2
exit 1
fi
echo "[nvidia-hook] Target kernel version: \${KVER}" >&2
# Ensure kernel headers are present
TEMP_APT_DEPS=(build-essential)
if [ ! -e "/lib/modules/\${KVER}/build" ]; then
TEMP_APT_DEPS+=(linux-headers-\${KVER})
fi
echo "[nvidia-hook] Installing build dependencies" >&2
/usr/lib/startos/scripts/install-equivs <<-EOF
Package: nvidia-depends
Version: \${NVIDIA_DRIVER_VERSION}
Section: unknown
Priority: optional
Depends: \${dep_list="\$(IFS=', '; echo "\${TEMP_APT_DEPS[*]}")"}
EOF
# ---------------- download and run installer ----------------
RUN_NAME="NVIDIA-Linux-${QEMU_ARCH}-\${NVIDIA_DRIVER_VERSION}.run"
RUN_PATH="/root/\${RUN_NAME}"
RUN_URL="\${BASE_URL}/\${NVIDIA_DRIVER_VERSION}/\${RUN_NAME}"
echo "[nvidia-hook] Downloading \${RUN_URL}" >&2
wget -O "\${RUN_PATH}" "\${RUN_URL}"
chmod +x "\${RUN_PATH}"
echo "[nvidia-hook] Running NVIDIA installer for kernel \${KVER}" >&2
sh "\${RUN_PATH}" \
--silent \
--kernel-name="\${KVER}" \
--no-x-check \
--no-nouveau-check \
--no-runlevel-check
# Rebuild module metadata
echo "[nvidia-hook] Running depmod for \${KVER}" >&2
depmod -a "\${KVER}"
echo "[nvidia-hook] NVIDIA \${NVIDIA_DRIVER_VERSION} installation complete for kernel \${KVER}" >&2
echo "[nvidia-hook] Removing build dependencies..." >&2
apt-get purge -y nvidia-depends
apt-get autoremove -y
echo "[nvidia-hook] Removed build dependencies." >&2
fi
cp /etc/resolv.conf /etc/resolv.conf.bak
if [ "${IB_SUITE}" = trixie ] && [ "${IB_TARGET_ARCH}" != riscv64 ]; then
echo 'deb https://deb.debian.org/debian/ bookworm main' > /etc/apt/sources.list.d/bookworm.list
apt-get update
apt-get install -y postgresql-15
rm /etc/apt/sources.list.d/bookworm.list
apt-get update
systemctl mask postgresql
echo 'deb https://deb.debian.org/debian/ bookworm main' > /etc/apt/sources.list.d/bookworm.list
apt-get update
apt-get install -y postgresql-15
rm /etc/apt/sources.list.d/bookworm.list
apt-get update
systemctl mask postgresql
fi
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
KERNEL_VERSION=${RPI_KERNEL_VERSION} sh /boot/config.sh > /boot/config.txt
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
mkinitramfs -c gzip -o initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
KERNEL_VERSION=${RPI_KERNEL_VERSION} sh /boot/config.sh > /boot/config.txt
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
fi
useradd --shell /bin/bash -G startos -m start9
@@ -218,11 +304,11 @@ usermod -aG systemd-journal start9
echo "start9 ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/010_start9-nopasswd"
if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
/usr/lib/startos/scripts/enable-kiosk
/usr/lib/startos/scripts/enable-kiosk
fi
if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
passwd -l start9
passwd -l start9
fi
EOF
@@ -360,4 +446,4 @@ elif [ "${IMAGE_TYPE}" = img ]; then
fi
chown $IB_UID:$IB_UID $RESULTS_DIR/$IMAGE_BASENAME.*
chown $IB_UID:$IB_UID $RESULTS_DIR/$IMAGE_BASENAME.*

View File

@@ -1 +1 @@
usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory quiet boot=startos
usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory boot=startos

View File

@@ -0,0 +1,51 @@
desktop-image: "../splash.png"
title-color: "#ffffff"
title-font: "Unifont Regular 16"
title-text: "StartOS Boot Menu with GRUB"
message-font: "Unifont Regular 16"
terminal-font: "Unifont Regular 16"
#help bar at the bottom
+ label {
top = 100%-50
left = 0
width = 100%
height = 20
text = "@KEYMAP_SHORT@"
align = "center"
color = "#ffffff"
font = "Unifont Regular 16"
}
#boot menu
+ boot_menu {
left = 10%
width = 80%
top = 52%
height = 48%-80
item_color = "#a8a8a8"
item_font = "Unifont Regular 16"
selected_item_color= "#ffffff"
selected_item_font = "Unifont Regular 16"
item_height = 16
item_padding = 0
item_spacing = 4
icon_width = 0
icon_heigh = 0
item_icon_space = 0
}
#progress bar
+ progress_bar {
id = "__timeout__"
left = 15%
top = 100%-80
height = 16
width = 70%
font = "Unifont Regular 16"
text_color = "#000000"
fg_color = "#ffffff"
bg_color = "#a8a8a8"
border_color = "#ffffff"
text = "@TIMEOUT_NOTIFICATION_LONG@"
}

View File

@@ -4,7 +4,7 @@ parse_essential_db_info() {
DB_DUMP="/tmp/startos_db.json"
if command -v start-cli >/dev/null 2>&1; then
start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1
timeout 30 start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1
else
return 1
fi

View File

@@ -111,6 +111,6 @@ if [ "$CHROOT_RES" -eq 0 ]; then
reboot
fi
umount -R /media/startos/next
umount /media/startos/next
umount /media/startos/upper
rm -rf /media/startos/upper /media/startos/next

View File

@@ -5,7 +5,7 @@ if [ -z "$sip" ] || [ -z "$dip" ] || [ -z "$dprefix" ] || [ -z "$sport" ] || [ -
exit 1
fi
NAME="F$(echo "$sip:$sport -> $dip/$dprefix:$dport" | sha256sum | head -c 15)"
NAME="F$(echo "$sip:$sport -> $dip/$dprefix:$dport ${src_subnet:-any}" | sha256sum | head -c 15)"
for kind in INPUT FORWARD ACCEPT; do
if ! iptables -C $kind -j "${NAME}_${kind}" 2> /dev/null; then
@@ -13,7 +13,7 @@ for kind in INPUT FORWARD ACCEPT; do
iptables -A $kind -j "${NAME}_${kind}"
fi
done
for kind in PREROUTING INPUT OUTPUT POSTROUTING; do
for kind in PREROUTING OUTPUT; do
if ! iptables -t nat -C $kind -j "${NAME}_${kind}" 2> /dev/null; then
iptables -t nat -N "${NAME}_${kind}" 2> /dev/null
iptables -t nat -A $kind -j "${NAME}_${kind}"
@@ -26,7 +26,7 @@ trap 'err=1' ERR
for kind in INPUT FORWARD ACCEPT; do
iptables -F "${NAME}_${kind}" 2> /dev/null
done
for kind in PREROUTING INPUT OUTPUT POSTROUTING; do
for kind in PREROUTING OUTPUT; do
iptables -t nat -F "${NAME}_${kind}" 2> /dev/null
done
if [ "$UNDO" = 1 ]; then
@@ -36,20 +36,21 @@ if [ "$UNDO" = 1 ]; then
fi
# DNAT: rewrite destination for incoming packets (external traffic)
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
# When src_subnet is set, only forward traffic from that subnet (private forwards)
if [ -n "$src_subnet" ]; then
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
else
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
fi
# DNAT: rewrite destination for locally-originated packets (hairpin from host itself)
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
# MASQUERADE: rewrite source for all forwarded traffic to the destination
# This ensures responses are routed back through the host regardless of source IP
iptables -t nat -A ${NAME}_POSTROUTING -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -d "$dip" -p udp --dport "$dport" -j MASQUERADE
# Allow new connections to be forwarded to the destination
iptables -A ${NAME}_FORWARD -d $dip -p tcp --dport $dport -m state --state NEW -j ACCEPT
iptables -A ${NAME}_FORWARD -d $dip -p udp --dport $dport -m state --state NEW -j ACCEPT
exit $err
exit $err

View File

@@ -0,0 +1,20 @@
#!/bin/bash
export DEBIAN_FRONTEND=noninteractive
export DEBCONF_NONINTERACTIVE_SEEN=true
TMP_DIR=$(mktemp -d)
(
set -e
cd $TMP_DIR
cat > control.equivs
equivs-build control.equivs
apt-get install -y ./*.deb < /dev/null
)
rm -rf $TMP_DIR
echo Install complete. >&2
exit 0

View File

@@ -29,10 +29,13 @@ if [ -z "$needed" ]; then
exit 1
fi
MARGIN=${MARGIN:-1073741824}
target=$((needed + MARGIN))
if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then
echo 'Pruning...'
current="$(readlink -f /media/startos/config/current.rootfs)"
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$needed" ]]; do
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$target" ]]; do
to_prune="$(ls -t1 /media/startos/images/*.rootfs /media/startos/images/*.squashfs 2> /dev/null | grep -v "$current" | tail -n1)"
if [ -e "$to_prune" ]; then
echo " Pruning $to_prune"

View File

Before

Width:  |  Height:  |  Size: 9.6 KiB

After

Width:  |  Height:  |  Size: 9.6 KiB

View File

@@ -1,87 +0,0 @@
#!/bin/bash
set -e
function partition_for () {
if [[ "$1" =~ [0-9]+$ ]]; then
echo "$1p$2"
else
echo "$1$2"
fi
}
VERSION=$(cat VERSION.txt)
ENVIRONMENT=$(cat ENVIRONMENT.txt)
GIT_HASH=$(cat GIT_HASH.txt | head -c 7)
DATE=$(date +%Y%m%d)
ROOT_PART_END=7217792
VERSION_FULL="$VERSION-$GIT_HASH"
if [ -n "$ENVIRONMENT" ]; then
VERSION_FULL="$VERSION_FULL~$ENVIRONMENT"
fi
TARGET_NAME=startos-${VERSION_FULL}-${DATE}_raspberrypi.img
TARGET_SIZE=$[($ROOT_PART_END+1)*512]
rm -f $TARGET_NAME
truncate -s $TARGET_SIZE $TARGET_NAME
(
echo o
echo x
echo i
echo "0xcb15ae4d"
echo r
echo n
echo p
echo 1
echo 2048
echo 526335
echo t
echo c
echo n
echo p
echo 2
echo 526336
echo $ROOT_PART_END
echo a
echo 1
echo w
) | fdisk $TARGET_NAME
OUTPUT_DEVICE=$(sudo losetup --show -fP $TARGET_NAME)
sudo mkfs.ext4 `partition_for ${OUTPUT_DEVICE} 2`
sudo mkfs.vfat `partition_for ${OUTPUT_DEVICE} 1`
TMPDIR=$(mktemp -d)
sudo mount `partition_for ${OUTPUT_DEVICE} 2` $TMPDIR
sudo mkdir $TMPDIR/boot
sudo mount `partition_for ${OUTPUT_DEVICE} 1` $TMPDIR/boot
sudo unsquashfs -f -d $TMPDIR startos.raspberrypi.squashfs
REAL_GIT_HASH=$(cat $TMPDIR/usr/lib/startos/GIT_HASH.txt)
REAL_VERSION=$(cat $TMPDIR/usr/lib/startos/VERSION.txt)
REAL_ENVIRONMENT=$(cat $TMPDIR/usr/lib/startos/ENVIRONMENT.txt)
sudo sed -i 's| boot=startos| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo cp ./build/raspberrypi/fstab $TMPDIR/etc/
sudo cp ./build/raspberrypi/init_resize.sh $TMPDIR/usr/lib/startos/scripts/init_resize.sh
sudo umount $TMPDIR/boot
sudo umount $TMPDIR
sudo losetup -d $OUTPUT_DEVICE
if [ "$ALLOW_VERSION_MISMATCH" != 1 ]; then
if [ "$(cat GIT_HASH.txt)" != "$REAL_GIT_HASH" ]; then
>&2 echo "startos.raspberrypi.squashfs GIT_HASH.txt mismatch"
>&2 echo "expected $REAL_GIT_HASH (dpkg) found $(cat GIT_HASH.txt) (repo)"
exit 1
fi
if [ "$(cat VERSION.txt)" != "$REAL_VERSION" ]; then
>&2 echo "startos.raspberrypi.squashfs VERSION.txt mismatch"
exit 1
fi
if [ "$(cat ENVIRONMENT.txt)" != "$REAL_ENVIRONMENT" ]; then
>&2 echo "startos.raspberrypi.squashfs ENVIRONMENT.txt mismatch"
exit 1
fi
fi

View File

@@ -15,13 +15,12 @@ if [ "$SKIP_DL" != "1" ]; then
fi
if [ -n "$RUN_ID" ]; then
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree raspberrypi; do
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.squashfs -D $(pwd); do sleep 1; done
done
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.iso -D $(pwd); do sleep 1; done
done
while ! gh run download -R Start9Labs/start-os $RUN_ID -n raspberrypi.img -D $(pwd); do sleep 1; done
fi
if [ -n "$ST_RUN_ID" ]; then
@@ -57,31 +56,23 @@ start-cli --registry=https://alpha-registry-x.start9.com registry os version add
if [ "$SKIP_UL" = "2" ]; then
exit 2
elif [ "$SKIP_UL" != "1" ]; then
for file in *.squashfs *.iso *.deb start-cli_*; do
for file in *.deb start-cli_*; do
gh release upload -R Start9Labs/start-os v$VERSION $file
done
for file in *.img; do
if ! [ -f $file.gz ]; then
cat $file | pigz > $file.gz
fi
gh release upload -R Start9Labs/start-os v$VERSION $file.gz
for file in *.iso *.squashfs; do
s3cmd put -P $file s3://startos-images/v$VERSION/$file
done
fi
if [ "$SKIP_INDEX" != "1" ]; then
for arch in aarch64 aarch64-nonfree x86_64 x86_64-nonfree; do
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
for file in *_$arch.squashfs *_$arch.iso; do
start-cli --registry=https://alpha-registry-x.start9.com registry os asset add --platform=$arch --version=$VERSION $file https://github.com/Start9Labs/start-os/releases/download/v$VERSION/$(echo -n "$file" | sed 's/~/./g')
done
done
for arch in raspberrypi; do
for file in *_$arch.squashfs; do
start-cli --registry=https://alpha-registry-x.start9.com registry os asset add --platform=$arch --version=$VERSION $file https://github.com/Start9Labs/start-os/releases/download/v$VERSION/$(echo -n "$file" | sed 's/~/./g')
start-cli --registry=https://alpha-registry-x.start9.com registry os asset add --platform=$arch --version=$VERSION $file https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$file
done
done
fi
for file in *.iso *.img *.img.gz *.squashfs *.deb start-cli_*; do
for file in *.iso *.squashfs *.deb start-cli_*; do
gpg -u 7CFFDA41CA66056A --detach-sign --armor -o "${file}.asc" "$file"
done
@@ -90,20 +81,30 @@ tar -czvf signatures.tar.gz *.asc
gh release upload -R Start9Labs/start-os v$VERSION signatures.tar.gz
cat << EOF
# ISO Downloads
- [x86_64/AMD64](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_x86_64-nonfree.iso))
- [x86_64/AMD64-slim (FOSS-only)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
- [aarch64/ARM64](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_aarch64-nonfree.iso))
- [aarch64/ARM64-slim (FOSS-Only)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
- [RISCV64 (RVA23)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_riscv64.iso))
EOF
cat << 'EOF'
# StartOS Checksums
## SHA-256
```
EOF
sha256sum *.iso *.img *img.gz *.squashfs
sha256sum *.iso *.squashfs
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum *.iso *.img *.img.gz *.squashfs
b3sum *.iso *.squashfs
cat << 'EOF'
```
@@ -138,5 +139,4 @@ EOF
b3sum start-cli_*
cat << 'EOF'
```
EOF
EOF

View File

@@ -0,0 +1,32 @@
# Container Runtime — Node.js Service Manager
Node.js runtime that manages service containers via JSON-RPC. See `RPCSpec.md` in this directory for the full RPC protocol.
## Architecture
```
LXC Container (uniform base for all services)
└── systemd
└── container-runtime.service
└── Loads /usr/lib/startos/package/index.js (from s9pk javascript.squashfs)
└── Package JS launches subcontainers (from images in s9pk)
```
The container runtime communicates with the host via JSON-RPC over Unix socket. Package JavaScript must export functions conforming to the `ABI` type defined in `sdk/base/lib/types.ts`.
## `/media/startos/` Directory (mounted by host into container)
| Path | Description |
| -------------------- | ----------------------------------------------------- |
| `volumes/<name>/` | Package data volumes (id-mapped, persistent) |
| `assets/` | Read-only assets from s9pk `assets.squashfs` |
| `images/<name>/` | Container images (squashfs, used for subcontainers) |
| `images/<name>.env` | Environment variables for image |
| `images/<name>.json` | Image metadata |
| `backup/` | Backup mount point (mounted during backup operations) |
| `rpc/service.sock` | RPC socket (container runtime listens here) |
| `rpc/host.sock` | Host RPC socket (for effects callbacks to host) |
## S9PK Structure
See `../core/s9pk-structure.md` for the S9PK package format.

View File

@@ -1,16 +1,21 @@
# Container RPC SERVER Specification
# Container RPC Server Specification
The container runtime exposes a JSON-RPC server over a Unix socket at `/media/startos/rpc/service.sock`.
## Methods
### init
initialize runtime (mount `/proc`, `/sys`, `/dev`, and `/run` to each image in `/media/images`)
Initialize the runtime and system.
called after os has mounted js and images to the container
#### params
#### args
`[]`
```ts
{
id: string,
kind: "install" | "update" | "restore" | null,
}
```
#### response
@@ -18,11 +23,16 @@ called after os has mounted js and images to the container
### exit
shutdown runtime
Shutdown runtime and optionally run exit hooks for a target version.
#### args
#### params
`[]`
```ts
{
id: string,
target: string | null, // ExtendedVersion or VersionRange
}
```
#### response
@@ -30,11 +40,11 @@ shutdown runtime
### start
run main method if not already running
Run main method if not already running.
#### args
#### params
`[]`
None
#### response
@@ -42,11 +52,11 @@ run main method if not already running
### stop
stop main method by sending SIGTERM to child processes, and SIGKILL after timeout
Stop main method by sending SIGTERM to child processes, and SIGKILL after timeout.
#### args
#### params
`{ timeout: millis }`
None
#### response
@@ -54,15 +64,16 @@ stop main method by sending SIGTERM to child processes, and SIGKILL after timeou
### execute
run a specific package procedure
Run a specific package procedure.
#### args
#### params
```ts
{
procedure: JsonPath,
input: any,
timeout: millis,
id: string, // event ID
procedure: string, // JSON path (e.g., "/backup/create", "/actions/{name}/run")
input: any,
timeout: number | null,
}
```
@@ -72,18 +83,64 @@ run a specific package procedure
### sandbox
run a specific package procedure in sandbox mode
Run a specific package procedure in sandbox mode. Same interface as `execute`.
#### args
UNIMPLEMENTED: this feature is planned but does not exist
#### params
```ts
{
procedure: JsonPath,
input: any,
timeout: millis,
id: string,
procedure: string,
input: any,
timeout: number | null,
}
```
#### response
`any`
### callback
Handle a callback from an effect.
#### params
```ts
{
id: number,
args: any[],
}
```
#### response
`null` (no response sent)
### eval
Evaluate a script in the runtime context. Used for debugging.
#### params
```ts
{
script: string,
}
```
#### response
`any`
## Procedures
The `execute` and `sandbox` methods route to procedures based on the `procedure` path:
| Procedure | Description |
| -------------------------- | ---------------------------- |
| `/backup/create` | Create a backup |
| `/actions/{name}/getInput` | Get input spec for an action |
| `/actions/{name}/run` | Run an action with input |

View File

@@ -38,7 +38,7 @@
},
"../sdk/dist": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.47",
"version": "0.4.0-beta.48",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",

View File

@@ -319,6 +319,7 @@ export function makeEffects(context: EffectContext): Effects {
}
if (context.callbacks?.onLeaveContext)
self.onLeaveContext(() => {
self.constRetry = undefined
self.isInContext = false
self.onLeaveContext = () => {
console.warn(

View File

@@ -82,18 +82,15 @@ export class DockerProcedureContainer extends Drop {
}),
)
} else if (volumeMount.type === "certificate") {
const hostInfo = await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
const hostnames = [
`${packageId}.embassy`,
...new Set(
Object.values(
(
await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
)?.hostnameInfo || {},
)
.flatMap((h) => h)
.flatMap((h) => (h.kind === "onion" ? [h.hostname.value] : [])),
Object.values(hostInfo?.bindings || {})
.flatMap((b) => b.addresses.available)
.map((h) => h.host),
).values(),
]
const certChain = await effects.getSslCertificate({

View File

@@ -1244,12 +1244,8 @@ async function updateConfig(
? ""
: catchFn(
() =>
(specValue.target === "lan-address"
? filled.addressInfo!.filter({ kind: "mdns" }) ||
filled.addressInfo!.onion
: filled.addressInfo!.onion ||
filled.addressInfo!.filter({ kind: "mdns" })
).hostnames[0].hostname.value,
filled.addressInfo!.filter({ kind: "mdns" })!.hostnames[0]
.host,
) || ""
mutConfigValue[key] = url
}

69
core/ARCHITECTURE.md Normal file
View File

@@ -0,0 +1,69 @@
# Core Architecture
The Rust backend daemon for StartOS.
## Binaries
The crate produces a single binary `startbox` that is symlinked under different names for different behavior:
- `startbox` / `startd` — Main daemon
- `start-cli` — CLI interface
- `start-container` — Runs inside LXC containers; communicates with host and manages subcontainers
- `registrybox` — Registry daemon
- `tunnelbox` — VPN/tunnel daemon
## Crate Structure
- `startos` — Core library that supports building `startbox`
- `helpers` — Utility functions used across both `startos` and `js-engine`
- `models` — Types shared across `startos`, `js-engine`, and `helpers`
## Key Modules
- `src/context/` — Context types (RpcContext, CliContext, InitContext, DiagnosticContext)
- `src/service/` — Service lifecycle management with actor pattern (`service_actor.rs`)
- `src/db/model/` — Patch-DB models (`public.rs` synced to frontend, `private.rs` backend-only)
- `src/net/` — Networking (DNS, ACME, WiFi, Tor via Arti, WireGuard)
- `src/s9pk/` — S9PK package format (merkle archive)
- `src/registry/` — Package registry management
## RPC Pattern
The API is JSON-RPC (not REST). All endpoints are RPC methods organized in a hierarchical command structure using [rpc-toolkit](https://github.com/Start9Labs/rpc-toolkit). Handlers are registered in a tree of `ParentHandler` nodes, with four handler types: `from_fn_async` (standard), `from_fn_async_local` (non-Send), `from_fn` (sync), and `from_fn_blocking` (blocking). Metadata like `.with_about()` drives middleware and documentation.
See [rpc-toolkit.md](rpc-toolkit.md) for full handler patterns and configuration.
## Patch-DB Patterns
Patch-DB provides diff-based state synchronization. Changes to `db/model/public.rs` automatically sync to the frontend.
**Key patterns:**
- `db.peek().await` — Get a read-only snapshot of the database state
- `db.mutate(|db| { ... }).await` — Apply mutations atomically, returns `MutateResult`
- `#[derive(HasModel)]` — Derive macro for types stored in the database, generates typed accessors
**Generated accessor types** (from `HasModel` derive):
- `as_field()` — Immutable reference: `&Model<T>`
- `as_field_mut()` — Mutable reference: `&mut Model<T>`
- `into_field()` — Owned value: `Model<T>`
**`Model<T>` APIs** (from `db/prelude.rs`):
- `.de()` — Deserialize to `T`
- `.ser(&value)` — Serialize from `T`
- `.mutate(|v| ...)` — Deserialize, mutate, reserialize
- For maps: `.keys()`, `.as_idx(&key)`, `.as_idx_mut(&key)`, `.insert()`, `.remove()`, `.contains_key()`
## i18n
See [i18n-patterns.md](i18n-patterns.md) for internationalization key conventions and the `t!()` macro.
## Rust Utilities & Patterns
See [core-rust-patterns.md](core-rust-patterns.md) for common utilities (Invoke trait, Guard pattern, mount guards, Apply trait, etc.).
## Related Documentation
- [rpc-toolkit.md](rpc-toolkit.md) — JSON-RPC handler patterns
- [i18n-patterns.md](i18n-patterns.md) — Internationalization conventions
- [core-rust-patterns.md](core-rust-patterns.md) — Common Rust utilities
- [s9pk-structure.md](s9pk-structure.md) — S9PK package format

25
core/CLAUDE.md Normal file
View File

@@ -0,0 +1,25 @@
# Core — Rust Backend
The Rust backend daemon for StartOS.
## Architecture
See [ARCHITECTURE.md](ARCHITECTURE.md) for binaries, modules, Patch-DB patterns, and related documentation.
See [CONTRIBUTING.md](CONTRIBUTING.md) for how to add RPC endpoints, TS-exported types, and i18n keys.
## Quick Reference
```bash
cargo check -p start-os # Type check
make test-core # Run tests
make ts-bindings # Regenerate TS types after changing #[ts(export)] structs
cd sdk && make baseDist dist # Rebuild SDK after ts-bindings
```
## Operating Rules
- Always run `cargo check -p start-os` after modifying Rust code
- When adding RPC endpoints, follow the patterns in [rpc-toolkit.md](rpc-toolkit.md)
- When modifying `#[ts(export)]` types, regenerate bindings and rebuild the SDK (see [ARCHITECTURE.md](../ARCHITECTURE.md#build-pipeline))
- When adding i18n keys, add all 5 locales in `core/locales/i18n.yaml` (see [i18n-patterns.md](i18n-patterns.md))

49
core/CONTRIBUTING.md Normal file
View File

@@ -0,0 +1,49 @@
# Contributing to Core
For general environment setup, cloning, and build system, see the root [CONTRIBUTING.md](../CONTRIBUTING.md).
## Prerequisites
- [Rust](https://rustup.rs) (nightly for formatting)
- [rust-analyzer](https://rust-analyzer.github.io/) recommended
- [Docker](https://docs.docker.com/get-docker/) (for cross-compilation via `rust-zig-builder` container)
## Common Commands
```bash
cargo check -p start-os # Type check
cargo test --features=test # Run tests (or: make test-core)
make format # Format with nightly rustfmt
cd core && cargo test <test_name> --features=test # Run a specific test
```
## Adding a New RPC Endpoint
1. Define a params struct with `#[derive(Deserialize, Serialize)]`
2. Choose a handler type (`from_fn_async` for most cases)
3. Write the handler function: `async fn my_handler(ctx: RpcContext, params: MyParams) -> Result<MyResponse, Error>`
4. Register it in the appropriate `ParentHandler` tree
5. If params/response should be available in TypeScript, add `#[derive(TS)]` and `#[ts(export)]`
See [rpc-toolkit.md](rpc-toolkit.md) for full handler patterns and all four handler types.
## Adding TS-Exported Types
When a Rust type needs to be available in TypeScript (for the web frontend or SDK):
1. Add `ts_rs::TS` to the derive list and `#[ts(export)]` to the struct/enum
2. Use `#[serde(rename_all = "camelCase")]` for JS-friendly field names
3. For types that don't implement TS (like `DateTime<Utc>`, `exver::Version`), use `#[ts(type = "string")]` overrides
4. For `u64` fields that should be JS `number` (not `bigint`), use `#[ts(type = "number")]`
5. Run `make ts-bindings` to regenerate — files appear in `core/bindings/` then sync to `sdk/base/lib/osBindings/`
6. Rebuild the SDK: `cd sdk && make baseDist dist`
## Adding i18n Keys
1. Add the key to `core/locales/i18n.yaml` with all 5 language translations
2. Use the `t!("your.key.name")` macro in Rust code
3. Follow existing namespace conventions — match the module path where the key is used
4. Use kebab-case for multi-word segments
5. Translations are validated at compile time
See [i18n-patterns.md](i18n-patterns.md) for full conventions.

3834
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -15,7 +15,7 @@ license = "MIT"
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.17" # VERSION_BUMP
version = "0.4.0-alpha.20" # VERSION_BUMP
[lib]
name = "startos"
@@ -42,17 +42,6 @@ name = "tunnelbox"
path = "src/main/tunnelbox.rs"
[features]
arti = [
"arti-client",
"safelog",
"tor-cell",
"tor-hscrypto",
"tor-hsservice",
"tor-keymgr",
"tor-llcrypto",
"tor-proto",
"tor-rtcompat",
]
beta = []
console = ["console-subscriber", "tokio/tracing"]
default = []
@@ -62,16 +51,6 @@ unstable = ["backtrace-on-stack-overflow"]
[dependencies]
aes = { version = "0.7.5", features = ["ctr"] }
arti-client = { version = "0.33", features = [
"compression",
"ephemeral-keystore",
"experimental-api",
"onion-service-client",
"onion-service-service",
"rustls",
"static",
"tokio",
], default-features = false, git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
async-acme = { version = "0.6.0", git = "https://github.com/dr-bonez/async-acme.git", features = [
"use_rustls",
"use_tokio",
@@ -100,7 +79,6 @@ console-subscriber = { version = "0.5.0", optional = true }
const_format = "0.2.34"
cookie = "0.18.0"
cookie_store = "0.22.0"
curve25519-dalek = "4.1.3"
der = { version = "0.7.9", features = ["derive", "pem"] }
digest = "0.10.7"
divrem = "1.0.0"
@@ -176,6 +154,7 @@ mio = "1"
new_mime_guess = "4"
nix = { version = "0.30.1", features = [
"fs",
"hostname",
"mount",
"net",
"process",
@@ -213,8 +192,8 @@ reqwest = { version = "0.12.25", features = [
reqwest_cookie_store = "0.9.0"
rpassword = "7.2.0"
rust-argon2 = "3.0.0"
rust-i18n = "3.1.5"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git" }
safelog = { version = "0.4.8", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
semver = { version = "1.0.20", features = ["serde"] }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.1" }
@@ -242,28 +221,11 @@ tokio-stream = { version = "0.1.14", features = ["io-util", "net", "sync"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.26.2", features = ["native-tls", "url"] }
tokio-util = { version = "0.7.9", features = ["io"] }
tor-cell = { version = "0.33", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-hscrypto = { version = "0.33", features = [
"full",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-hsservice = { version = "0.33", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-keymgr = { version = "0.33", features = [
"ephemeral-keystore",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-llcrypto = { version = "0.33", features = [
"full",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-proto = { version = "0.33", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-rtcompat = { version = "0.33", features = [
"rustls",
"tokio",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
torut = "0.2.1"
tower-service = "0.3.3"
tracing = "0.1.39"
tracing-error = "0.2.0"
tracing-journald = "0.3.0"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
tracing-subscriber = { version = "=0.3.19", features = ["env-filter"] }
ts-rs = "9.0.1"
typed-builder = "0.23.2"
url = { version = "2.4.1", features = ["serde"] }

View File

@@ -22,9 +22,7 @@ several different names for different behavior:
- `start-sdk`: This is a CLI tool that aids in building and packaging services
you wish to deploy to StartOS
## Questions
## Documentation
If you have questions about how various pieces of the backend system work. Open
an issue and tag the following people
- dr-bonez
- [ARCHITECTURE.md](ARCHITECTURE.md) — Backend architecture, modules, and patterns
- [CONTRIBUTING.md](CONTRIBUTING.md) — How to contribute to core

View File

@@ -26,7 +26,7 @@ PROFILE=${PROFILE:-release}
if [ "${PROFILE}" = "release" ]; then
BUILD_FLAGS="--release"
else
if [ "$PROFILE" != "debug"]; then
if [ "$PROFILE" != "debug" ]; then
>&2 echo "Unknown profile $PROFILE: falling back to debug..."
PROFILE=debug
fi

View File

@@ -7,11 +7,11 @@ source ./builder-alias.sh
set -ea
shopt -s expand_aliases
PROFILE=${PROFILE:-release}
PROFILE=${PROFILE:-debug}
if [ "${PROFILE}" = "release" ]; then
BUILD_FLAGS="--release"
else
if [ "$PROFILE" != "debug"]; then
if [ "$PROFILE" != "debug" ]; then
>&2 echo "Unknown profile $PROFILE: falling back to debug..."
PROFILE=debug
fi
@@ -38,7 +38,7 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features test,$FEATURES --locked 'export_bindings_'
rust-zig-builder cargo test --manifest-path=./core/Cargo.toml --lib $BUILD_FLAGS --features test,$FEATURES --locked 'export_bindings_'
if [ "$(ls -nd "core/bindings" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID core/bindings && chown -R $UID:$UID /usr/local/cargo"
fi

249
core/core-rust-patterns.md Normal file
View File

@@ -0,0 +1,249 @@
# Utilities & Patterns
This document covers common utilities and patterns used throughout the StartOS codebase.
## Util Module (`core/src/util/`)
The `util` module contains reusable utilities. Key submodules:
| Module | Purpose |
|--------|---------|
| `actor/` | Actor pattern implementation for concurrent state management |
| `collections/` | Custom collection types |
| `crypto.rs` | Cryptographic utilities (encryption, hashing) |
| `future.rs` | Future/async utilities |
| `io.rs` | File I/O helpers (create_file, canonicalize, etc.) |
| `iter.rs` | Iterator extensions |
| `net.rs` | Network utilities |
| `rpc.rs` | RPC helpers |
| `rpc_client.rs` | RPC client utilities |
| `serde.rs` | Serialization helpers (Base64, display/fromstr, etc.) |
| `sync.rs` | Synchronization primitives (SyncMutex, etc.) |
## Command Invocation (`Invoke` trait)
The `Invoke` trait provides a clean way to run external commands with error handling:
```rust
use crate::util::Invoke;
// Simple invocation
tokio::process::Command::new("ls")
.arg("-la")
.invoke(ErrorKind::Filesystem)
.await?;
// With timeout
tokio::process::Command::new("slow-command")
.timeout(Some(Duration::from_secs(30)))
.invoke(ErrorKind::Timeout)
.await?;
// With input
let mut input = Cursor::new(b"input data");
tokio::process::Command::new("cat")
.input(Some(&mut input))
.invoke(ErrorKind::Filesystem)
.await?;
// Piped commands
tokio::process::Command::new("cat")
.arg("file.txt")
.pipe(&mut tokio::process::Command::new("grep").arg("pattern"))
.invoke(ErrorKind::Filesystem)
.await?;
```
## Guard Pattern
Guards ensure cleanup happens when they go out of scope.
### `GeneralGuard` / `GeneralBoxedGuard`
For arbitrary cleanup actions:
```rust
use crate::util::GeneralGuard;
let guard = GeneralGuard::new(|| {
println!("Cleanup runs on drop");
});
// Do work...
// Explicit drop with action
guard.drop();
// Or skip the action
// guard.drop_without_action();
```
### `FileLock`
File-based locking with automatic unlock:
```rust
use crate::util::FileLock;
let lock = FileLock::new("/path/to/lockfile", true).await?; // blocking=true
// Lock held until dropped or explicitly unlocked
lock.unlock().await?;
```
## Mount Guard Pattern (`core/src/disk/mount/guard.rs`)
RAII guards for filesystem mounts. Ensures filesystems are unmounted when guards are dropped.
### `MountGuard`
Basic mount guard:
```rust
use crate::disk::mount::guard::MountGuard;
use crate::disk::mount::filesystem::{MountType, ReadOnly};
let guard = MountGuard::mount(&filesystem, "/mnt/target", ReadOnly).await?;
// Use the mounted filesystem at guard.path()
do_something(guard.path()).await?;
// Explicit unmount (or auto-unmounts on drop)
guard.unmount(false).await?; // false = don't delete mountpoint
```
### `TmpMountGuard`
Reference-counted temporary mount (mounts to `/media/startos/tmp/`):
```rust
use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::mount::filesystem::ReadOnly;
// Multiple clones share the same mount
let guard1 = TmpMountGuard::mount(&filesystem, ReadOnly).await?;
let guard2 = guard1.clone();
// Mount stays alive while any guard exists
// Auto-unmounts when last guard is dropped
```
### `GenericMountGuard` trait
All mount guards implement this trait:
```rust
pub trait GenericMountGuard: std::fmt::Debug + Send + Sync + 'static {
fn path(&self) -> &Path;
fn unmount(self) -> impl Future<Output = Result<(), Error>> + Send;
}
```
### `SubPath`
Wraps a mount guard to point to a subdirectory:
```rust
use crate::disk::mount::guard::SubPath;
let mount = TmpMountGuard::mount(&filesystem, ReadOnly).await?;
let subdir = SubPath::new(mount, "data/subdir");
// subdir.path() returns the full path including subdirectory
```
## FileSystem Implementations (`core/src/disk/mount/filesystem/`)
Various filesystem types that can be mounted:
| Type | Description |
|------|-------------|
| `bind.rs` | Bind mounts |
| `block_dev.rs` | Block device mounts |
| `cifs.rs` | CIFS/SMB network shares |
| `ecryptfs.rs` | Encrypted filesystem |
| `efivarfs.rs` | EFI variables |
| `httpdirfs.rs` | HTTP directory as filesystem |
| `idmapped.rs` | ID-mapped mounts |
| `label.rs` | Mount by label |
| `loop_dev.rs` | Loop device mounts |
| `overlayfs.rs` | Overlay filesystem |
## Other Useful Utilities
### `Apply` / `ApplyRef` traits
Fluent method chaining:
```rust
use crate::util::Apply;
let result = some_value
.apply(|v| transform(v))
.apply(|v| another_transform(v));
```
### `Container<T>`
Async-safe optional container:
```rust
use crate::util::Container;
let container = Container::new(None);
container.set(value).await;
let taken = container.take().await;
```
### `HashWriter<H, W>`
Write data while computing hash:
```rust
use crate::util::HashWriter;
use sha2::Sha256;
let writer = HashWriter::new(Sha256::new(), file);
// Write data...
let (hasher, file) = writer.finish();
let hash = hasher.finalize();
```
### `Never` type
Uninhabited type for impossible cases:
```rust
use crate::util::Never;
fn impossible() -> Never {
// This function can never return
}
let never: Never = impossible();
never.absurd::<String>() // Can convert to any type
```
### `MaybeOwned<'a, T>`
Either borrowed or owned data:
```rust
use crate::util::MaybeOwned;
fn accept_either(data: MaybeOwned<'_, String>) {
// Use &*data to access the value
}
accept_either(MaybeOwned::from(&existing_string));
accept_either(MaybeOwned::from(owned_string));
```
### `new_guid()`
Generate a random GUID:
```rust
use crate::util::new_guid;
let guid = new_guid(); // Returns InternedString
```

100
core/i18n-patterns.md Normal file
View File

@@ -0,0 +1,100 @@
# i18n Patterns in `core/`
## Library & Setup
**Crate:** [`rust-i18n`](https://crates.io/crates/rust-i18n) v3.1.5 (`core/Cargo.toml`)
**Initialization** (`core/src/lib.rs:3`):
```rust
rust_i18n::i18n!("locales", fallback = ["en_US"]);
```
This macro scans `core/locales/` at compile time and embeds all translations as constants.
**Prelude re-export** (`core/src/prelude.rs:4`):
```rust
pub use rust_i18n::t;
```
Most modules import `t!` via the prelude.
## Translation File
**Location:** `core/locales/i18n.yaml`
**Format:** YAML v2 (~755 keys)
**Supported languages:** `en_US`, `de_DE`, `es_ES`, `fr_FR`, `pl_PL`
**Entry structure:**
```yaml
namespace.sub.key-name:
en_US: "English text with %{param}"
de_DE: "German text with %{param}"
# ...
```
## Using `t!()`
```rust
// Simple key
t!("error.unknown")
// With parameter interpolation (%{name} in YAML)
t!("bins.deprecated.renamed", old = old_name, new = new_name)
```
## Key Naming Conventions
Keys use **dot-separated hierarchical namespaces** with **kebab-case** for multi-word segments:
```
<module>.<submodule>.<descriptive-name>
```
Examples:
- `error.incorrect-password` — error kind label
- `bins.start-init.updating-firmware` — startup phase message
- `backup.bulk.complete-title` — backup notification title
- `help.arg.acme-contact` — CLI help text for an argument
- `context.diagnostic.starting-diagnostic-ui` — diagnostic context status
### Top-Level Namespaces
| Namespace | Purpose |
|-----------|---------|
| `error.*` | `ErrorKind` display strings (see `src/error.rs`) |
| `bins.*` | CLI binary messages (deprecated, start-init, startd, etc.) |
| `init.*` | Initialization phase labels |
| `setup.*` | First-run setup messages |
| `context.*` | Context startup messages (diagnostic, setup, CLI) |
| `service.*` | Service lifecycle messages |
| `backup.*` | Backup/restore operation messages |
| `registry.*` | Package registry messages |
| `net.*` | Network-related messages |
| `middleware.*` | Request middleware messages (auth, etc.) |
| `disk.*` | Disk operation messages |
| `lxc.*` | Container management messages |
| `system.*` | System monitoring/metrics messages |
| `notifications.*` | User-facing notification messages |
| `update.*` | OS update messages |
| `util.*` | Utility messages (TUI, RPC) |
| `ssh.*` | SSH operation messages |
| `shutdown.*` | Shutdown-related messages |
| `logs.*` | Log-related messages |
| `auth.*` | Authentication messages |
| `help.*` | CLI help text (`help.arg.<arg-name>`) |
| `about.*` | CLI command descriptions |
## Locale Selection
`core/src/bins/mod.rs:15-36``set_locale_from_env()`:
1. Reads `LANG` environment variable
2. Strips `.UTF-8` suffix
3. Exact-matches against available locales, falls back to language-prefix match (e.g. `en_GB` matches `en_US`)
## Adding New Keys
1. Add the key to `core/locales/i18n.yaml` with all 5 language translations
2. Use the `t!("your.key.name")` macro in Rust code
3. Follow existing namespace conventions — match the module path where the key is used
4. Use kebab-case for multi-word segments
5. Translations are validated at compile time

5343
core/locales/i18n.yaml Normal file

File diff suppressed because it is too large Load Diff

226
core/rpc-toolkit.md Normal file
View File

@@ -0,0 +1,226 @@
# rpc-toolkit
StartOS uses [rpc-toolkit](https://github.com/Start9Labs/rpc-toolkit) for its JSON-RPC API. This document covers the patterns used in this codebase.
## Overview
The API is JSON-RPC (not REST). All endpoints are RPC methods organized in a hierarchical command structure.
## Handler Functions
There are four types of handler functions, chosen based on the function's characteristics:
### `from_fn_async` - Async handlers
For standard async functions. Most handlers use this.
```rust
pub async fn my_handler(ctx: RpcContext, params: MyParams) -> Result<MyResponse, Error> {
// Can use .await
}
from_fn_async(my_handler)
```
### `from_fn_async_local` - Non-thread-safe async handlers
For async functions that are not `Send` (cannot be safely moved between threads). Use when working with non-thread-safe types.
```rust
pub async fn cli_download(ctx: CliContext, params: Params) -> Result<(), Error> {
// Non-Send async operations
}
from_fn_async_local(cli_download)
```
### `from_fn_blocking` - Sync blocking handlers
For synchronous functions that perform blocking I/O or long computations.
```rust
pub fn query_dns(ctx: RpcContext, params: DnsParams) -> Result<DnsResponse, Error> {
// Blocking operations (file I/O, DNS lookup, etc.)
}
from_fn_blocking(query_dns)
```
### `from_fn` - Sync non-blocking handlers
For pure functions or quick synchronous operations with no I/O.
```rust
pub fn echo(ctx: RpcContext, params: EchoParams) -> Result<String, Error> {
Ok(params.message)
}
from_fn(echo)
```
## ParentHandler
Groups related RPC methods into a hierarchy:
```rust
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
pub fn my_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand("list", from_fn_async(list_handler).with_call_remote::<CliContext>())
.subcommand("create", from_fn_async(create_handler).with_call_remote::<CliContext>())
}
```
## Handler Extensions
Chain methods to configure handler behavior.
**Ordering rules:**
1. `with_about()` must come AFTER other CLI modifiers (`no_display()`, `with_custom_display_fn()`, etc.)
2. `with_call_remote()` must be the LAST adapter in the chain
| Method | Purpose |
|--------|---------|
| `.with_metadata("key", Value)` | Attach metadata for middleware |
| `.no_cli()` | RPC-only, not available via CLI |
| `.no_display()` | No CLI output |
| `.with_display_serializable()` | Default JSON/YAML output for CLI |
| `.with_custom_display_fn(\|_, res\| ...)` | Custom CLI output formatting |
| `.with_about("about.description")` | Add help text (i18n key) - **after CLI modifiers** |
| `.with_call_remote::<CliContext>()` | Enable CLI to call remotely - **must be last** |
### Correct ordering example:
```rust
from_fn_async(my_handler)
.with_metadata("sync_db", Value::Bool(true)) // metadata early
.no_display() // CLI modifier
.with_about("about.my-handler") // after CLI modifiers
.with_call_remote::<CliContext>() // always last
```
## Metadata by Middleware
Metadata tags are processed by different middleware. Group them logically:
### Auth Middleware (`middleware/auth/mod.rs`)
| Metadata | Default | Description |
|----------|---------|-------------|
| `authenticated` | `true` | Whether endpoint requires authentication. Set to `false` for public endpoints. |
### Session Auth Middleware (`middleware/auth/session.rs`)
| Metadata | Default | Description |
|----------|---------|-------------|
| `login` | `false` | Special handling for login endpoints (rate limiting, cookie setting) |
| `get_session` | `false` | Inject session ID into params as `__Auth_session` |
### Signature Auth Middleware (`middleware/auth/signature.rs`)
| Metadata | Default | Description |
|----------|---------|-------------|
| `get_signer` | `false` | Inject signer public key into params as `__Auth_signer` |
### Registry Auth (extends Signature Auth)
| Metadata | Default | Description |
|----------|---------|-------------|
| `admin` | `false` | Require admin privileges (signer must be in admin list) |
| `get_device_info` | `false` | Inject device info header for hardware filtering |
### Database Middleware (`middleware/db.rs`)
| Metadata | Default | Description |
|----------|---------|-------------|
| `sync_db` | `false` | Sync database after mutation, add `X-Patch-Sequence` header |
## Context Types
Different contexts for different execution environments:
- `RpcContext` - Web/RPC requests with full service access
- `CliContext` - CLI operations, calls remote RPC
- `InitContext` - During system initialization
- `DiagnosticContext` - Diagnostic/recovery mode
- `RegistryContext` - Registry daemon context
- `EffectContext` - Service effects context (container-to-host calls)
## Parameter Structs
Parameters use derive macros for JSON-RPC, CLI parsing, and TypeScript generation:
```rust
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")] // JSON-RPC uses camelCase
#[command(rename_all = "kebab-case")] // CLI uses kebab-case
#[ts(export)] // Generate TypeScript types
pub struct MyParams {
pub package_id: PackageId,
}
```
### Middleware Injection
Auth middleware can inject values into params using special field names:
```rust
#[derive(Deserialize, Serialize, Parser, TS)]
pub struct MyParams {
#[ts(skip)]
#[serde(rename = "__Auth_session")] // Injected by session auth
session: InternedString,
#[ts(skip)]
#[serde(rename = "__Auth_signer")] // Injected by signature auth
signer: AnyVerifyingKey,
#[ts(skip)]
#[serde(rename = "__Auth_userAgent")] // Injected during login
user_agent: Option<String>,
}
```
## Common Patterns
### Adding a New RPC Endpoint
1. Define params struct with `Deserialize, Serialize, Parser, TS`
2. Choose handler type based on sync/async and thread-safety
3. Write handler function taking `(Context, Params) -> Result<Response, Error>`
4. Add to parent handler with appropriate extensions (display modifiers before `with_about`)
5. TypeScript types auto-generated via `make ts-bindings`
### Public (Unauthenticated) Endpoint
```rust
from_fn_async(get_info)
.with_metadata("authenticated", Value::Bool(false))
.with_display_serializable()
.with_about("about.get-info")
.with_call_remote::<CliContext>() // last
```
### Mutating Endpoint with DB Sync
```rust
from_fn_async(update_config)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_about("about.update-config")
.with_call_remote::<CliContext>() // last
```
### Session-Aware Endpoint
```rust
from_fn_async(logout)
.with_metadata("get_session", Value::Bool(true))
.no_display()
.with_about("about.logout")
.with_call_remote::<CliContext>() // last
```
## File Locations
- Handler definitions: Throughout `core/src/` modules
- Main API tree: `core/src/lib.rs` (`main_api()`, `server()`, `package()`)
- Auth middleware: `core/src/middleware/auth/`
- DB middleware: `core/src/middleware/db.rs`
- Context types: `core/src/context/`

122
core/s9pk-structure.md Normal file
View File

@@ -0,0 +1,122 @@
# S9PK Package Format
S9PK is the package format for StartOS services. Version 2 uses a merkle archive structure for efficient downloading and cryptographic verification.
## File Format
S9PK files begin with a 3-byte header: `0x3b 0x3b 0x02` (magic bytes + version 2).
The archive is cryptographically signed using Ed25519 with prehashed content (SHA-512 over blake3 merkle root hash).
## Archive Structure
```
/
├── manifest.json # Package metadata (required)
├── icon.<ext> # Package icon - any image/* format (required)
├── LICENSE.md # License text (required)
├── dependencies/ # Dependency metadata (optional)
│ └── <package-id>/
│ ├── metadata.json # DependencyMetadata
│ └── icon.<ext> # Dependency icon
├── javascript.squashfs # Package JavaScript code (required)
├── assets.squashfs # Static assets (optional, legacy: assets/ directory)
└── images/ # Container images by architecture
└── <arch>/ # e.g., x86_64, aarch64, riscv64
├── <image-id>.squashfs # Container filesystem
├── <image-id>.json # Image metadata
└── <image-id>.env # Environment variables
```
## Components
### manifest.json
The package manifest contains all metadata:
| Field | Type | Description |
|-------|------|-------------|
| `id` | string | Package identifier (e.g., `bitcoind`) |
| `title` | string | Display name |
| `version` | string | Extended version string |
| `satisfies` | string[] | Version ranges this version satisfies |
| `releaseNotes` | string/object | Release notes (localized) |
| `canMigrateTo` | string | Version range for forward migration |
| `canMigrateFrom` | string | Version range for backward migration |
| `license` | string | License type |
| `wrapperRepo` | string | StartOS wrapper repository URL |
| `upstreamRepo` | string | Upstream project URL |
| `supportSite` | string | Support site URL |
| `marketingSite` | string | Marketing site URL |
| `donationUrl` | string? | Optional donation URL |
| `docsUrl` | string? | Optional documentation URL |
| `description` | object | Short and long descriptions (localized) |
| `images` | object | Image configurations by image ID |
| `volumes` | string[] | Volume IDs for persistent data |
| `alerts` | object | User alerts for lifecycle events |
| `dependencies` | object | Package dependencies |
| `hardwareRequirements` | object | Hardware requirements (arch, RAM, devices) |
| `hardwareAcceleration` | boolean | Whether package uses hardware acceleration |
| `gitHash` | string? | Git commit hash |
| `osVersion` | string | Minimum StartOS version |
| `sdkVersion` | string? | SDK version used to build |
### javascript.squashfs
Contains the package JavaScript that implements the `ABI` interface from `@start9labs/start-sdk-base`. This code runs in the container runtime and manages the package lifecycle.
The squashfs is mounted at `/usr/lib/startos/package/` and the runtime loads `index.js`.
### images/
Container images organized by architecture:
- **`<image-id>.squashfs`** - Container root filesystem
- **`<image-id>.json`** - Image metadata (entrypoint, user, workdir, etc.)
- **`<image-id>.env`** - Environment variables for the container
Images are built from Docker/Podman and converted to squashfs. The `ImageConfig` in manifest specifies:
- `arch` - Supported architectures
- `emulateMissingAs` - Fallback architecture for emulation
- `nvidiaContainer` - Whether to enable NVIDIA container support
### assets.squashfs
Static assets accessible to the package, mounted read-only at `/media/startos/assets/` in the container.
### dependencies/
Metadata for dependencies displayed in the UI:
- `metadata.json` - Just title for now
- `icon.<ext>` - Icon for the dependency
## Merkle Archive
The S9PK uses a merkle tree structure where each file and directory has a blake3 hash. This enables:
1. **Partial downloads** - Download and verify individual files
2. **Integrity verification** - Verify any subset of the archive
3. **Efficient updates** - Only download changed portions
4. **DOS protection** - Size limits enforced before downloading content
Files are sorted by priority for streaming (manifest first, then icon, license, dependencies, javascript, assets, images).
## Building S9PK
Use `start-cli s9pk pack` to build packages:
```bash
start-cli s9pk pack <manifest-path> -o <output.s9pk>
```
Images can be sourced from:
- Docker/Podman build (`--docker-build`)
- Existing Docker tag (`--docker-tag`)
- Pre-built squashfs files
## Related Code
- `core/src/s9pk/v2/mod.rs` - S9pk struct and serialization
- `core/src/s9pk/v2/manifest.rs` - Manifest types
- `core/src/s9pk/v2/pack.rs` - Packing logic
- `core/src/s9pk/merkle_archive/` - Merkle archive implementation

View File

@@ -8,7 +8,6 @@ use openssl::x509::X509;
use crate::db::model::DatabaseModel;
use crate::hostname::{Hostname, generate_hostname, generate_id};
use crate::net::ssl::{gen_nistp256, make_root_cert};
use crate::net::tor::TorSecretKey;
use crate::prelude::*;
use crate::util::serde::Pem;
@@ -26,7 +25,6 @@ pub struct AccountInfo {
pub server_id: String,
pub hostname: Hostname,
pub password: String,
pub tor_keys: Vec<TorSecretKey>,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
pub ssh_key: ssh_key::PrivateKey,
@@ -36,7 +34,6 @@ impl AccountInfo {
pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> {
let server_id = generate_id();
let hostname = generate_hostname();
let tor_key = vec![TorSecretKey::generate()];
let root_ca_key = gen_nistp256()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?;
let ssh_key = ssh_key::PrivateKey::from(ssh_key::private::Ed25519Keypair::random(
@@ -48,7 +45,6 @@ impl AccountInfo {
server_id,
hostname,
password: hash_password(password)?,
tor_keys: tor_key,
root_ca_key,
root_ca_cert,
ssh_key,
@@ -61,17 +57,6 @@ impl AccountInfo {
let hostname = Hostname(db.as_public().as_server_info().as_hostname().de()?);
let password = db.as_private().as_password().de()?;
let key_store = db.as_private().as_key_store();
let tor_addrs = db
.as_public()
.as_server_info()
.as_network()
.as_host()
.as_onions()
.de()?;
let tor_keys = tor_addrs
.into_iter()
.map(|tor_addr| key_store.as_onion().get_key(&tor_addr))
.collect::<Result<_, _>>()?;
let cert_store = key_store.as_local_certs();
let root_ca_key = cert_store.as_root_key().de()?.0;
let root_ca_cert = cert_store.as_root_cert().de()?.0;
@@ -82,7 +67,6 @@ impl AccountInfo {
server_id,
hostname,
password,
tor_keys,
root_ca_key,
root_ca_cert,
ssh_key,
@@ -97,17 +81,6 @@ impl AccountInfo {
server_info
.as_pubkey_mut()
.ser(&self.ssh_key.public_key().to_openssh()?)?;
server_info
.as_network_mut()
.as_host_mut()
.as_onions_mut()
.ser(
&self
.tor_keys
.iter()
.map(|tor_key| tor_key.onion_address())
.collect(),
)?;
server_info.as_password_hash_mut().ser(&self.password)?;
db.as_private_mut().as_password_mut().ser(&self.password)?;
db.as_private_mut()
@@ -117,9 +90,6 @@ impl AccountInfo {
.as_developer_key_mut()
.ser(Pem::new_ref(&self.developer_key))?;
let key_store = db.as_private_mut().as_key_store_mut();
for tor_key in &self.tor_keys {
key_store.as_onion_mut().insert_key(tor_key)?;
}
let cert_store = key_store.as_local_certs_mut();
if cert_store.as_root_cert().de()?.0 != self.root_ca_cert {
cert_store
@@ -148,11 +118,5 @@ impl AccountInfo {
self.hostname.no_dot_host_name(),
self.hostname.local_domain_name(),
]
.into_iter()
.chain(
self.tor_keys
.iter()
.map(|k| InternedString::from_display(&k.onion_address())),
)
}
}

View File

@@ -23,7 +23,7 @@ pub fn action_api<C: Context>() -> ParentHandler<C> {
"get-input",
from_fn_async(get_action_input)
.with_display_serializable()
.with_about("Get action input spec")
.with_about("about.get-action-input-spec")
.with_call_remote::<CliContext>(),
)
.subcommand(
@@ -36,14 +36,14 @@ pub fn action_api<C: Context>() -> ParentHandler<C> {
}
Ok(())
})
.with_about("Run service action")
.with_about("about.run-service-action")
.with_call_remote::<CliContext>(),
)
.subcommand(
"clear-task",
from_fn_async(clear_task)
.no_display()
.with_about("Clear a service task")
.with_about("about.clear-service-task")
.with_call_remote::<CliContext>(),
)
}
@@ -63,7 +63,9 @@ pub struct ActionInput {
#[derive(Deserialize, Serialize, TS, Parser)]
#[serde(rename_all = "camelCase")]
pub struct GetActionInputParams {
#[arg(help = "help.arg.package-id")]
pub package_id: PackageId,
#[arg(help = "help.arg.action-id")]
pub action_id: ActionId,
}
@@ -269,6 +271,7 @@ pub fn display_action_result<T: Serialize>(
}
#[derive(Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct RunActionParams {
pub package_id: PackageId,
@@ -280,8 +283,11 @@ pub struct RunActionParams {
#[derive(Parser)]
struct CliRunActionParams {
#[arg(help = "help.arg.package-id")]
pub package_id: PackageId,
#[arg(help = "help.arg.event-id")]
pub event_id: Option<Guid>,
#[arg(help = "help.arg.action-id")]
pub action_id: ActionId,
#[command(flatten)]
pub input: StdinDeserializable<Option<Value>>,
@@ -357,12 +363,15 @@ pub async fn run_action(
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct ClearTaskParams {
#[arg(help = "help.arg.package-id")]
pub package_id: PackageId,
#[arg(help = "help.arg.replay-id")]
pub replay_id: ReplayId,
#[arg(long)]
#[arg(long, help = "help.arg.force-clear-task")]
#[serde(default)]
pub force: bool,
}

View File

@@ -51,7 +51,10 @@ pub async fn write_shadow(password: &str) -> Result<(), Error> {
match line.split_once(":") {
Some((user, rest)) if user == "start9" || user == "kiosk" => {
let (_, rest) = rest.split_once(":").ok_or_else(|| {
Error::new(eyre!("malformed /etc/shadow"), ErrorKind::ParseSysInfo)
Error::new(
eyre!("{}", t!("auth.malformed-etc-shadow")),
ErrorKind::ParseSysInfo,
)
})?;
shadow_file
.write_all(format!("{user}:{hash}:{rest}\n").as_bytes())
@@ -81,7 +84,7 @@ impl PasswordType {
PasswordType::String(x) => Ok(x),
PasswordType::EncryptedWire(x) => x.decrypt(current_secret).ok_or_else(|| {
Error::new(
color_eyre::eyre::eyre!("Couldn't decode password"),
color_eyre::eyre::eyre!("{}", t!("auth.couldnt-decode-password")),
crate::ErrorKind::Unknown,
)
}),
@@ -125,19 +128,19 @@ where
"login",
from_fn_async(cli_login::<AC>)
.no_display()
.with_about("Log in a new auth session"),
.with_about("about.login-new-auth-session"),
)
.subcommand(
"logout",
from_fn_async(logout::<AC>)
.with_metadata("get_session", Value::Bool(true))
.no_display()
.with_about("Log out of current auth session")
.with_about("about.logout-current-auth-session")
.with_call_remote::<CliContext>(),
)
.subcommand(
"session",
session::<C, AC>().with_about("List or kill auth sessions"),
session::<C, AC>().with_about("about.list-or-kill-auth-sessions"),
)
.subcommand(
"reset-password",
@@ -147,14 +150,14 @@ where
"reset-password",
from_fn_async(cli_reset_password)
.no_display()
.with_about("Reset password"),
.with_about("about.reset-password"),
)
.subcommand(
"get-pubkey",
from_fn_async(get_pubkey)
.with_metadata("authenticated", Value::Bool(false))
.no_display()
.with_about("Get public key derived from server private key")
.with_about("about.get-pubkey-from-server")
.with_call_remote::<CliContext>(),
)
}
@@ -208,12 +211,12 @@ pub fn check_password(hash: &str, password: &str) -> Result<(), Error> {
ensure_code!(
argon2::verify_encoded(&hash, password.as_bytes()).map_err(|_| {
Error::new(
eyre!("Password Incorrect"),
eyre!("{}", t!("auth.password-incorrect")),
crate::ErrorKind::IncorrectPassword,
)
})?,
crate::ErrorKind::IncorrectPassword,
"Password Incorrect"
t!("auth.password-incorrect")
);
Ok(())
}
@@ -327,14 +330,14 @@ where
.with_metadata("get_session", Value::Bool(true))
.with_display_serializable()
.with_custom_display_fn(|handle, result| display_sessions(handle.params, result))
.with_about("Display all auth sessions")
.with_about("about.display-all-auth-sessions")
.with_call_remote::<CliContext>(),
)
.subcommand(
"kill",
from_fn_async(kill::<AC>)
.no_display()
.with_about("Terminate existing auth session(s)")
.with_about("about.terminate-auth-sessions")
.with_call_remote::<CliContext>(),
)
}
@@ -415,9 +418,11 @@ impl AsLogoutSessionId for KillSessionId {
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct KillParams {
#[arg(help = "help.arg.session-ids")]
ids: Vec<String>,
}
@@ -431,10 +436,13 @@ pub async fn kill<C: SessionAuthContext>(
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct ResetPasswordParams {
#[arg(help = "help.arg.old-password")]
old_password: Option<PasswordType>,
#[arg(help = "help.arg.new-password")]
new_password: Option<PasswordType>,
}
@@ -447,13 +455,13 @@ async fn cli_reset_password(
..
}: HandlerArgs<CliContext>,
) -> Result<(), RpcError> {
let old_password = rpassword::prompt_password("Current Password: ")?;
let old_password = rpassword::prompt_password(&t!("auth.prompt-current-password"))?;
let new_password = {
let new_password = rpassword::prompt_password("New Password: ")?;
if new_password != rpassword::prompt_password("Confirm: ")? {
let new_password = rpassword::prompt_password(&t!("auth.prompt-new-password"))?;
if new_password != rpassword::prompt_password(&t!("auth.prompt-confirm"))? {
return Err(Error::new(
eyre!("Passwords do not match"),
eyre!("{}", t!("auth.passwords-do-not-match")),
crate::ErrorKind::IncorrectPassword,
)
.into());
@@ -486,7 +494,7 @@ pub async fn reset_password_impl(
.with_kind(crate::ErrorKind::IncorrectPassword)?
{
return Err(Error::new(
eyre!("Incorrect Password"),
eyre!("{}", t!("auth.password-incorrect")),
crate::ErrorKind::IncorrectPassword,
));
}

View File

@@ -30,14 +30,17 @@ use crate::util::serde::IoFormat;
use crate::version::VersionT;
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct BackupParams {
#[arg(help = "help.arg.backup-target-id")]
target_id: BackupTargetId,
#[arg(long = "old-password")]
#[arg(long = "old-password", help = "help.arg.old-backup-password")]
old_password: Option<crate::auth::PasswordType>,
#[arg(long = "package-ids")]
#[arg(long = "package-ids", help = "help.arg.package-ids-to-backup")]
package_ids: Option<Vec<PackageId>>,
#[arg(help = "help.arg.backup-password")]
password: crate::auth::PasswordType,
}
@@ -69,8 +72,8 @@ impl BackupStatusGuard {
db,
None,
NotificationLevel::Success,
"Backup Complete".to_owned(),
"Your backup has completed".to_owned(),
t!("backup.bulk.complete-title").to_string(),
t!("backup.bulk.complete-message").to_string(),
BackupReport {
server: ServerBackupReport {
attempted: true,
@@ -88,9 +91,8 @@ impl BackupStatusGuard {
db,
None,
NotificationLevel::Warning,
"Backup Complete".to_owned(),
"Your backup has completed, but some package(s) failed to backup"
.to_owned(),
t!("backup.bulk.complete-title").to_string(),
t!("backup.bulk.complete-with-failures").to_string(),
BackupReport {
server: ServerBackupReport {
attempted: true,
@@ -103,7 +105,7 @@ impl BackupStatusGuard {
.await
}
Err(e) => {
tracing::error!("Backup Failed: {}", e);
tracing::error!("{}", t!("backup.bulk.failed-error", error = e));
tracing::debug!("{:?}", e);
let err_string = e.to_string();
db.mutate(|db| {
@@ -111,8 +113,8 @@ impl BackupStatusGuard {
db,
None,
NotificationLevel::Error,
"Backup Failed".to_owned(),
"Your backup failed to complete.".to_owned(),
t!("backup.bulk.failed-title").to_string(),
t!("backup.bulk.failed-message").to_string(),
BackupReport {
server: ServerBackupReport {
attempted: true,
@@ -224,7 +226,7 @@ fn assure_backing_up<'a>(
.as_backup_progress_mut();
if backing_up.transpose_ref().is_some() {
return Err(Error::new(
eyre!("Server is already backing up!"),
eyre!("{}", t!("backup.bulk.already-backing-up")),
ErrorKind::InvalidRequest,
));
}
@@ -303,7 +305,7 @@ async fn perform_backup(
let mut backup_guard = Arc::try_unwrap(backup_guard).map_err(|_| {
Error::new(
eyre!("leaked reference to BackupMountGuard"),
eyre!("{}", t!("backup.bulk.leaked-reference")),
ErrorKind::Incoherent,
)
})?;

View File

@@ -2,6 +2,7 @@ use std::collections::BTreeMap;
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::PackageId;
use crate::context::CliContext;
@@ -13,19 +14,22 @@ pub mod os;
pub mod restore;
pub mod target;
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, TS)]
#[ts(export)]
pub struct BackupReport {
server: ServerBackupReport,
packages: BTreeMap<PackageId, PackageBackupReport>,
}
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, TS)]
#[ts(export)]
pub struct ServerBackupReport {
attempted: bool,
error: Option<String>,
}
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, TS)]
#[ts(export)]
pub struct PackageBackupReport {
pub error: Option<String>,
}
@@ -37,12 +41,12 @@ pub fn backup<C: Context>() -> ParentHandler<C> {
"create",
from_fn_async(backup_bulk::backup_all)
.no_display()
.with_about("Create backup for all packages")
.with_about("about.create-backup-all-packages")
.with_call_remote::<CliContext>(),
)
.subcommand(
"target",
target::target::<C>().with_about("Commands related to a backup target"),
target::target::<C>().with_about("about.commands-backup-target"),
)
}
@@ -51,7 +55,7 @@ pub fn package_backup<C: Context>() -> ParentHandler<C> {
"restore",
from_fn_async(restore::restore_packages_rpc)
.no_display()
.with_about("Restore package(s) from backup")
.with_about("about.restore-packages-from-backup")
.with_call_remote::<CliContext>(),
)
}

View File

@@ -7,9 +7,7 @@ use ssh_key::private::Ed25519Keypair;
use crate::account::AccountInfo;
use crate::hostname::{Hostname, generate_hostname, generate_id};
use crate::net::tor::TorSecretKey;
use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
use crate::util::serde::{Base32, Base64, Pem};
pub struct OsBackup {
@@ -85,10 +83,6 @@ impl OsBackupV0 {
&mut ssh_key::rand_core::OsRng::default(),
ssh_key::Algorithm::Ed25519,
)?,
tor_keys: TorSecretKey::from_bytes(self.tor_key.0)
.ok()
.into_iter()
.collect(),
developer_key: ed25519_dalek::SigningKey::generate(
&mut ssh_key::rand_core::OsRng::default(),
),
@@ -119,10 +113,6 @@ impl OsBackupV1 {
root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0,
ssh_key: ssh_key::PrivateKey::from(Ed25519Keypair::from_seed(&self.net_key.0)),
tor_keys: TorSecretKey::from_bytes(ed25519_expand_key(&self.net_key.0))
.ok()
.into_iter()
.collect(),
developer_key: ed25519_dalek::SigningKey::from_bytes(&self.net_key),
},
ui: self.ui,
@@ -140,7 +130,6 @@ struct OsBackupV2 {
root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key
root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate
ssh_key: Pem<ssh_key::PrivateKey>, // PEM Encoded OpenSSH Key
tor_keys: Vec<TorSecretKey>, // Base64 Encoded Ed25519 Expanded Secret Key
compat_s9pk_key: Pem<ed25519_dalek::SigningKey>, // PEM Encoded ED25519 Key
ui: Value, // JSON Value
}
@@ -154,7 +143,6 @@ impl OsBackupV2 {
root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0,
ssh_key: self.ssh_key.0,
tor_keys: self.tor_keys,
developer_key: self.compat_s9pk_key.0,
},
ui: self.ui,
@@ -167,7 +155,6 @@ impl OsBackupV2 {
root_ca_key: Pem(backup.account.root_ca_key.clone()),
root_ca_cert: Pem(backup.account.root_ca_cert.clone()),
ssh_key: Pem(backup.account.ssh_key.clone()),
tor_keys: backup.account.tor_keys.clone(),
compat_s9pk_key: Pem(backup.account.developer_key.clone()),
ui: backup.ui.clone(),
}

View File

@@ -23,16 +23,20 @@ use crate::progress::ProgressUnits;
use crate::s9pk::S9pk;
use crate::service::service_map::DownloadInstallFuture;
use crate::setup::SetupExecuteProgress;
use crate::system::sync_kiosk;
use crate::util::serde::IoFormat;
use crate::system::{save_language, sync_kiosk};
use crate::util::serde::{IoFormat, Pem};
use crate::{PLATFORM, PackageId};
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
#[ts(export)]
pub struct RestorePackageParams {
#[arg(help = "help.arg.package-ids")]
pub ids: Vec<PackageId>,
#[arg(help = "help.arg.backup-target-id")]
pub target_id: BackupTargetId,
#[arg(help = "help.arg.backup-password")]
pub password: String,
}
@@ -63,7 +67,10 @@ pub async fn restore_packages_rpc(
match async { res.await?.await }.await {
Ok(_) => (),
Err(err) => {
tracing::error!("Error restoring package {}: {}", id, err);
tracing::error!(
"{}",
t!("backup.restore.package-error", id = id, error = err)
);
tracing::debug!("{:?}", err);
}
}
@@ -75,10 +82,10 @@ pub async fn restore_packages_rpc(
}
#[instrument(skip_all)]
pub async fn recover_full_embassy(
pub async fn recover_full_server(
ctx: &SetupContext,
disk_guid: Arc<String>,
start_os_password: String,
disk_guid: InternedString,
password: String,
recovery_source: TmpMountGuard,
server_id: &str,
recovery_password: &str,
@@ -102,7 +109,7 @@ pub async fn recover_full_embassy(
)?;
os_backup.account.password = argon2::hash_encoded(
start_os_password.as_bytes(),
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::rfc9106_low_mem(),
)
@@ -111,16 +118,32 @@ pub async fn recover_full_embassy(
let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi");
sync_kiosk(kiosk).await?;
let language = ctx.language.peek(|a| a.clone());
let keyboard = ctx.keyboard.peek(|a| a.clone());
if let Some(language) = &language {
save_language(&**language).await?;
}
if let Some(keyboard) = &keyboard {
keyboard.save().await?;
}
let db = ctx.db().await?;
db.put(&ROOT, &Database::init(&os_backup.account, kiosk)?)
.await?;
db.put(
&ROOT,
&Database::init(&os_backup.account, kiosk, language, keyboard)?,
)
.await?;
drop(db);
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
let config = ctx.config.peek(|c| c.clone());
let init_result = init(&ctx.webserver, &config, init_phases).await?;
let rpc_ctx = RpcContext::init(
&ctx.webserver,
&ctx.config,
&config,
disk_guid.clone(),
Some(init_result),
rpc_ctx_phases,
@@ -145,7 +168,10 @@ pub async fn recover_full_embassy(
match async { res.await?.await }.await {
Ok(_) => (),
Err(err) => {
tracing::error!("Error restoring package {}: {}", id, err);
tracing::error!(
"{}",
t!("backup.restore.package-error", id = id, error = err)
);
tracing::debug!("{:?}", err);
}
}
@@ -155,7 +181,14 @@ pub async fn recover_full_embassy(
.await;
restore_phase.lock().await.complete();
Ok(((&os_backup.account).try_into()?, rpc_ctx))
Ok((
SetupResult {
hostname: os_backup.account.hostname,
root_ca: Pem(os_backup.account.root_ca_cert),
needs_restart: ctx.install_rootfs.peek(|a| a.is_some()),
},
rpc_ctx,
))
}
#[instrument(skip(ctx, backup_guard))]

View File

@@ -36,7 +36,8 @@ impl Map for CifsTargets {
}
}
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct CifsBackupTarget {
hostname: String,
@@ -52,43 +53,48 @@ pub fn cifs<C: Context>() -> ParentHandler<C> {
"add",
from_fn_async(add)
.no_display()
.with_about("Add a new backup target")
.with_about("about.add-new-backup-target")
.with_call_remote::<CliContext>(),
)
.subcommand(
"update",
from_fn_async(update)
.no_display()
.with_about("Update an existing backup target")
.with_about("about.update-existing-backup-target")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove)
.no_display()
.with_about("Remove an existing backup target")
.with_about("about.remove-existing-backup-target")
.with_call_remote::<CliContext>(),
)
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct AddParams {
pub struct CifsAddParams {
#[arg(help = "help.arg.cifs-hostname")]
pub hostname: String,
#[arg(help = "help.arg.cifs-path")]
pub path: PathBuf,
#[arg(help = "help.arg.cifs-username")]
pub username: String,
#[arg(help = "help.arg.cifs-password")]
pub password: Option<String>,
}
pub async fn add(
ctx: RpcContext,
AddParams {
CifsAddParams {
hostname,
path,
username,
password,
}: AddParams,
}: CifsAddParams,
) -> Result<KeyVal<BackupTargetId, BackupTarget>, Error> {
let cifs = Cifs {
hostname,
@@ -127,31 +133,37 @@ pub async fn add(
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct UpdateParams {
pub struct CifsUpdateParams {
#[arg(help = "help.arg.backup-target-id")]
pub id: BackupTargetId,
#[arg(help = "help.arg.cifs-hostname")]
pub hostname: String,
#[arg(help = "help.arg.cifs-path")]
pub path: PathBuf,
#[arg(help = "help.arg.cifs-username")]
pub username: String,
#[arg(help = "help.arg.cifs-password")]
pub password: Option<String>,
}
pub async fn update(
ctx: RpcContext,
UpdateParams {
CifsUpdateParams {
id,
hostname,
path,
username,
password,
}: UpdateParams,
}: CifsUpdateParams,
) -> Result<KeyVal<BackupTargetId, BackupTarget>, Error> {
let id = if let BackupTargetId::Cifs { id } = id {
id
} else {
return Err(Error::new(
eyre!("Backup Target ID {} Not Found", id),
eyre!("{}", t!("backup.target.cifs.target-not-found", id = id)),
ErrorKind::NotFound,
));
};
@@ -171,7 +183,13 @@ pub async fn update(
.as_idx_mut(&id)
.ok_or_else(|| {
Error::new(
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
eyre!(
"{}",
t!(
"backup.target.cifs.target-not-found",
id = BackupTargetId::Cifs { id }
)
),
ErrorKind::NotFound,
)
})?
@@ -192,18 +210,20 @@ pub async fn update(
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct RemoveParams {
pub struct CifsRemoveParams {
#[arg(help = "help.arg.backup-target-id")]
pub id: BackupTargetId,
}
pub async fn remove(ctx: RpcContext, RemoveParams { id }: RemoveParams) -> Result<(), Error> {
pub async fn remove(ctx: RpcContext, CifsRemoveParams { id }: CifsRemoveParams) -> Result<(), Error> {
let id = if let BackupTargetId::Cifs { id } = id {
id
} else {
return Err(Error::new(
eyre!("Backup Target ID {} Not Found", id),
eyre!("{}", t!("backup.target.cifs.target-not-found", id = id)),
ErrorKind::NotFound,
));
};
@@ -220,7 +240,7 @@ pub fn load(db: &DatabaseModel, id: u32) -> Result<Cifs, Error> {
.as_idx(&id)
.ok_or_else(|| {
Error::new(
eyre!("Backup Target ID {} Not Found", id),
eyre!("{}", t!("backup.target.cifs.target-not-found-id", id = id)),
ErrorKind::NotFound,
)
})?

View File

@@ -34,7 +34,8 @@ use crate::util::{FromStrParser, VersionString};
pub mod cifs;
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(tag = "type")]
#[serde(rename_all = "camelCase")]
pub enum BackupTarget {
@@ -49,7 +50,7 @@ pub enum BackupTarget {
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone, TS)]
#[ts(type = "string")]
#[ts(export, type = "string")]
pub enum BackupTargetId {
Disk { logicalname: PathBuf },
Cifs { id: u32 },
@@ -111,6 +112,7 @@ impl Serialize for BackupTargetId {
}
#[derive(Debug, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(tag = "type")]
#[serde(rename_all = "camelCase")]
pub enum BackupTargetFS {
@@ -143,13 +145,13 @@ pub fn target<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"cifs",
cifs::cifs::<C>().with_about("Add, remove, or update a backup target"),
cifs::cifs::<C>().with_about("about.add-remove-update-backup-target"),
)
.subcommand(
"list",
from_fn_async(list)
.with_display_serializable()
.with_about("List existing backup targets")
.with_about("about.list-existing-backup-targets")
.with_call_remote::<CliContext>(),
)
.subcommand(
@@ -159,20 +161,20 @@ pub fn target<C: Context>() -> ParentHandler<C> {
.with_custom_display_fn::<CliContext, _>(|params, info| {
display_backup_info(params.params, info)
})
.with_about("Display package backup information")
.with_about("about.display-package-backup-information")
.with_call_remote::<CliContext>(),
)
.subcommand(
"mount",
from_fn_async(mount)
.with_about("Mount backup target")
.with_about("about.mount-backup-target")
.with_call_remote::<CliContext>(),
)
.subcommand(
"umount",
from_fn_async(umount)
.no_display()
.with_about("Unmount backup target")
.with_about("about.unmount-backup-target")
.with_call_remote::<CliContext>(),
)
}
@@ -210,20 +212,26 @@ pub async fn list(ctx: RpcContext) -> Result<BTreeMap<BackupTargetId, BackupTarg
.collect())
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct BackupInfo {
#[ts(type = "string")]
pub version: Version,
#[ts(type = "string | null")]
pub timestamp: Option<DateTime<Utc>>,
pub package_backups: BTreeMap<PackageId, PackageBackupInfo>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct PackageBackupInfo {
pub title: InternedString,
pub version: VersionString,
#[ts(type = "string")]
pub os_version: Version,
#[ts(type = "string")]
pub timestamp: DateTime<Utc>,
}
@@ -265,11 +273,15 @@ fn display_backup_info(params: WithIoFormat<InfoParams>, info: BackupInfo) -> Re
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct InfoParams {
#[arg(help = "help.arg.backup-target-id")]
target_id: BackupTargetId,
#[arg(help = "help.arg.server-id")]
server_id: String,
#[arg(help = "help.arg.backup-password")]
password: String,
}
@@ -305,11 +317,13 @@ lazy_static::lazy_static! {
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct MountParams {
#[arg(help = "help.arg.backup-target-id")]
target_id: BackupTargetId,
#[arg(long)]
#[arg(long, help = "help.arg.server-id")]
server_id: Option<String>,
#[arg(help = "help.arg.backup-password")]
password: String, // TODO: rpassword
#[arg(long)]
#[arg(long, help = "help.arg.allow-partial-backup")]
allow_partial: bool,
}
@@ -382,9 +396,11 @@ pub async fn mount(
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct UmountParams {
#[arg(help = "help.arg.backup-target-id")]
target_id: Option<BackupTargetId>,
}

View File

@@ -17,6 +17,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
|cfg: ContainerClientConfig| Ok(ContainerCliContext::init(cfg)),
crate::service::effects::handler(),
)
.mutate_command(super::translate_cli)
.run(args)
{
match e.data {

View File

@@ -1,9 +1,11 @@
use rust_i18n::t;
pub fn renamed(old: &str, new: &str) -> ! {
eprintln!("{old} has been renamed to {new}");
eprintln!("{}", t!("bins.deprecated.renamed", old = old, new = new));
std::process::exit(1)
}
pub fn removed(name: &str) -> ! {
eprintln!("{name} has been removed");
eprintln!("{}", t!("bins.deprecated.removed", name = name));
std::process::exit(1)
}

View File

@@ -2,6 +2,8 @@ use std::collections::{BTreeMap, VecDeque};
use std::ffi::OsString;
use std::path::Path;
use rust_i18n::t;
pub mod container_cli;
pub mod deprecated;
pub mod registry;
@@ -10,75 +12,182 @@ pub mod start_init;
pub mod startd;
pub mod tunnel;
pub fn set_locale_from_env() {
let lang = std::env::var("LANG").ok();
let lang = lang
.as_deref()
.map_or("C", |l| l.strip_suffix(".UTF-8").unwrap_or(l));
set_locale(lang)
}
pub fn set_locale(lang: &str) {
let mut best = None;
let prefix = lang.split_inclusive("_").next().unwrap();
for l in rust_i18n::available_locales!() {
if l == lang {
best = Some(l);
break;
}
if best.is_none() && l.starts_with(prefix) {
best = Some(l);
}
}
rust_i18n::set_locale(best.unwrap_or(lang));
}
pub fn translate_cli(mut cmd: clap::Command) -> clap::Command {
fn translate(s: impl std::fmt::Display) -> String {
t!(s.to_string()).into_owned()
}
if let Some(s) = cmd.get_about() {
let s = translate(s);
cmd = cmd.about(s);
}
if let Some(s) = cmd.get_long_about() {
let s = translate(s);
cmd = cmd.long_about(s);
}
if let Some(s) = cmd.get_before_help() {
let s = translate(s);
cmd = cmd.before_help(s);
}
if let Some(s) = cmd.get_before_long_help() {
let s = translate(s);
cmd = cmd.before_long_help(s);
}
if let Some(s) = cmd.get_after_help() {
let s = translate(s);
cmd = cmd.after_help(s);
}
if let Some(s) = cmd.get_after_long_help() {
let s = translate(s);
cmd = cmd.after_long_help(s);
}
let arg_ids = cmd
.get_arguments()
.map(|a| a.get_id().clone())
.collect::<Vec<_>>();
for id in arg_ids {
cmd = cmd.mut_arg(id, |arg| {
let arg = if let Some(s) = arg.get_help() {
let s = translate(s);
arg.help(s)
} else {
arg
};
if let Some(s) = arg.get_long_help() {
let s = translate(s);
arg.long_help(s)
} else {
arg
}
});
}
for cmd in cmd.get_subcommands_mut() {
*cmd = translate_cli(cmd.clone());
}
cmd
}
#[derive(Default)]
pub struct MultiExecutable(BTreeMap<&'static str, fn(VecDeque<OsString>)>);
pub struct MultiExecutable {
default: Option<&'static str>,
bins: BTreeMap<&'static str, fn(VecDeque<OsString>)>,
}
impl MultiExecutable {
pub fn enable_startd(&mut self) -> &mut Self {
self.0.insert("startd", startd::main);
self.0
self.bins.insert("startd", startd::main);
self.bins
.insert("embassyd", |_| deprecated::renamed("embassyd", "startd"));
self.0
self.bins
.insert("embassy-init", |_| deprecated::removed("embassy-init"));
self
}
pub fn enable_start_cli(&mut self) -> &mut Self {
self.0.insert("start-cli", start_cli::main);
self.0.insert("embassy-cli", |_| {
self.bins.insert("start-cli", start_cli::main);
self.bins.insert("embassy-cli", |_| {
deprecated::renamed("embassy-cli", "start-cli")
});
self.0
self.bins
.insert("embassy-sdk", |_| deprecated::removed("embassy-sdk"));
self
}
pub fn enable_start_container(&mut self) -> &mut Self {
self.0.insert("start-container", container_cli::main);
self.bins.insert("start-container", container_cli::main);
self
}
pub fn enable_start_registryd(&mut self) -> &mut Self {
self.0.insert("start-registryd", registry::main);
self.bins.insert("start-registryd", registry::main);
self
}
pub fn enable_start_registry(&mut self) -> &mut Self {
self.0.insert("start-registry", registry::cli);
self.bins.insert("start-registry", registry::cli);
self
}
pub fn enable_start_tunneld(&mut self) -> &mut Self {
self.0.insert("start-tunneld", tunnel::main);
self.bins.insert("start-tunneld", tunnel::main);
self
}
pub fn enable_start_tunnel(&mut self) -> &mut Self {
self.0.insert("start-tunnel", tunnel::cli);
self.bins.insert("start-tunnel", tunnel::cli);
self
}
pub fn set_default(&mut self, name: &str) -> &mut Self {
if let Some((name, _)) = self.bins.get_key_value(name) {
self.default = Some(*name);
} else {
panic!("{}", t!("bins.mod.does-not-exist", name = name));
}
self
}
fn select_executable(&self, name: &str) -> Option<fn(VecDeque<OsString>)> {
self.0.get(&name).copied()
self.bins.get(&name).copied()
}
pub fn execute(&self) {
set_locale_from_env();
let mut popped = Vec::with_capacity(2);
let mut args = std::env::args_os().collect::<VecDeque<_>>();
for _ in 0..2 {
if let Some(s) = args.pop_front() {
if let Some(name) = Path::new(&*s).file_name().and_then(|s| s.to_str()) {
if name == "--contents" {
for name in self.0.keys() {
for name in self.bins.keys() {
println!("{name}");
}
return;
}
if let Some(x) = self.select_executable(&name) {
args.push_front(s);
return x(args);
}
}
popped.push(s);
}
}
if let Some(default) = self.default {
while let Some(arg) = popped.pop() {
args.push_front(arg);
}
return self.bins[default](args);
}
let args = std::env::args().collect::<VecDeque<_>>();
eprintln!(
"unknown executable: {}",
args.get(1)
.or_else(|| args.get(0))
.map(|s| s.as_str())
.unwrap_or("N/A")
"{}",
t!(
"bins.mod.unknown-executable",
name = args
.get(1)
.or_else(|| args.get(0))
.map(|s| s.as_str())
.unwrap_or("N/A")
)
);
std::process::exit(1);
}

View File

@@ -3,6 +3,7 @@ use std::ffi::OsString;
use clap::Parser;
use futures::FutureExt;
use rpc_toolkit::CliApp;
use rust_i18n::t;
use tokio::signal::unix::signal;
use tracing::instrument;
@@ -77,7 +78,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("failed to initialize runtime");
.expect(&t!("bins.registry.failed-to-initialize-runtime"));
rt.block_on(inner_main(&config))
};
@@ -99,6 +100,7 @@ pub fn cli(args: impl IntoIterator<Item = OsString>) {
|cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?),
crate::registry::registry_api(),
)
.mutate_command(super::translate_cli)
.run(args)
{
match e.data {

View File

@@ -19,6 +19,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
|cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?),
crate::main_api(),
)
.mutate_command(super::translate_cli)
.run(args)
{
match e.data {

View File

@@ -1,17 +1,15 @@
use std::sync::Arc;
use tokio::process::Command;
use tracing::instrument;
use crate::context::config::ServerConfig;
use crate::context::rpc::InitRpcContextPhases;
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::context::{DiagnosticContext, InitContext, RpcContext, SetupContext};
use crate::disk::REPAIR_DISK_PATH;
use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD;
use crate::firmware::{check_for_firmware_update, update_firmware};
use crate::init::{InitPhases, STANDBY_MODE_PATH};
use crate::net::gateway::UpgradableListener;
use crate::net::gateway::WildcardListener;
use crate::net::web_server::WebServer;
use crate::prelude::*;
use crate::progress::FullProgressTracker;
@@ -21,13 +19,19 @@ use crate::{DATA_DIR, PLATFORM};
#[instrument(skip_all)]
async fn setup_or_init(
server: &mut WebServer<UpgradableListener>,
server: &mut WebServer<WildcardListener>,
config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if let Some(firmware) = check_for_firmware_update()
.await
.map_err(|e| {
tracing::warn!("Error checking for firmware update: {e}");
tracing::warn!(
"{}",
t!(
"bins.start-init.error-checking-firmware",
error = e.to_string()
)
);
tracing::debug!("{e:?}");
})
.ok()
@@ -35,14 +39,21 @@ async fn setup_or_init(
{
let init_ctx = InitContext::init(config).await?;
let handle = &init_ctx.progress;
let mut update_phase = handle.add_phase("Updating Firmware".into(), Some(10));
let mut reboot_phase = handle.add_phase("Rebooting".into(), Some(1));
let mut update_phase =
handle.add_phase(t!("bins.start-init.updating-firmware").into(), Some(10));
let mut reboot_phase = handle.add_phase(t!("bins.start-init.rebooting").into(), Some(1));
server.serve_ui_for(init_ctx);
update_phase.start();
if let Err(e) = update_firmware(firmware).await {
tracing::warn!("Error performing firmware update: {e}");
tracing::warn!(
"{}",
t!(
"bins.start-init.error-firmware-update",
error = e.to_string()
)
);
tracing::debug!("{e:?}");
} else {
update_phase.complete();
@@ -79,40 +90,11 @@ async fn setup_or_init(
.invoke(crate::ErrorKind::OpenSsl)
.await?;
if tokio::fs::metadata("/run/live/medium").await.is_ok() {
Command::new("sed")
.arg("-i")
.arg("s/PasswordAuthentication no/PasswordAuthentication yes/g")
.arg("/etc/ssh/sshd_config")
.invoke(crate::ErrorKind::Filesystem)
.await?;
Command::new("systemctl")
.arg("reload")
.arg("ssh")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
let ctx = InstallContext::init().await?;
server.serve_ui_for(ctx.clone());
ctx.shutdown
.subscribe()
.recv()
.await
.expect("context dropped");
return Ok(Err(Shutdown {
disk_guid: None,
restart: true,
}));
}
if tokio::fs::metadata("/media/startos/config/disk.guid")
.await
.is_err()
{
let ctx = SetupContext::init(server, config)?;
let ctx = SetupContext::init(server, config.clone())?;
server.serve_ui_for(ctx.clone());
@@ -127,7 +109,13 @@ async fn setup_or_init(
.invoke(ErrorKind::NotFound)
.await
{
tracing::error!("Failed to kill kiosk: {}", e);
tracing::error!(
"{}",
t!(
"bins.start-init.failed-to-kill-kiosk",
error = e.to_string()
)
);
tracing::debug!("{:?}", e);
}
@@ -136,7 +124,7 @@ async fn setup_or_init(
Some(Err(e)) => return Err(e.clone_output()),
None => {
return Err(Error::new(
eyre!("Setup mode exited before setup completed"),
eyre!("{}", t!("bins.start-init.setup-mode-exited")),
ErrorKind::Unknown,
));
}
@@ -146,7 +134,8 @@ async fn setup_or_init(
let handle = init_ctx.progress.clone();
let err_channel = init_ctx.error.clone();
let mut disk_phase = handle.add_phase("Opening data drive".into(), Some(10));
let mut disk_phase =
handle.add_phase(t!("bins.start-init.opening-data-drive").into(), Some(10));
let init_phases = InitPhases::new(&handle);
let rpc_ctx_phases = InitRpcContextPhases::new(&handle);
@@ -156,9 +145,9 @@ async fn setup_or_init(
disk_phase.start();
let guid_string = tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?;
let disk_guid = Arc::new(String::from(guid_string.trim()));
let disk_guid = InternedString::intern(guid_string.trim());
let requires_reboot = crate::disk::main::import(
&**disk_guid,
&*disk_guid,
DATA_DIR,
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive
@@ -178,11 +167,12 @@ async fn setup_or_init(
.with_ctx(|_| (crate::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
}
disk_phase.complete();
tracing::info!("Loaded Disk");
tracing::info!("{}", t!("bins.start-init.loaded-disk"));
if requires_reboot.0 {
tracing::info!("Rebooting...");
let mut reboot_phase = handle.add_phase("Rebooting".into(), Some(1));
tracing::info!("{}", t!("bins.start-init.rebooting"));
let mut reboot_phase =
handle.add_phase(t!("bins.start-init.rebooting").into(), Some(1));
reboot_phase.start();
return Ok(Err(Shutdown {
disk_guid: Some(disk_guid),
@@ -214,7 +204,7 @@ async fn setup_or_init(
#[instrument(skip_all)]
pub async fn main(
server: &mut WebServer<UpgradableListener>,
server: &mut WebServer<WildcardListener>,
config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
@@ -236,11 +226,10 @@ pub async fn main(
.await
.is_ok()
{
Some(Arc::new(
Some(InternedString::intern(
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
.trim(),
))
} else {
None

View File

@@ -1,29 +1,29 @@
use std::cmp::max;
use std::ffi::OsString;
use std::sync::Arc;
use std::time::Duration;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::{FutureExt, TryFutureExt};
use rust_i18n::t;
use tokio::signal::unix::signal;
use tracing::instrument;
use crate::context::config::ServerConfig;
use crate::context::rpc::InitRpcContextPhases;
use crate::context::{DiagnosticContext, InitContext, RpcContext};
use crate::net::gateway::{BindTcp, SelfContainedNetworkInterfaceListener, UpgradableListener};
use crate::net::gateway::WildcardListener;
use crate::net::static_server::refresher;
use crate::net::web_server::{Acceptor, WebServer};
use crate::prelude::*;
use crate::shutdown::Shutdown;
use crate::system::launch_metrics_task;
use crate::util::io::append_file;
use crate::util::logger::LOGGER;
use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip_all)]
async fn inner_main(
server: &mut WebServer<UpgradableListener>,
server: &mut WebServer<WildcardListener>,
config: &ServerConfig,
) -> Result<Option<Shutdown>, Error> {
let rpc_ctx = if !tokio::fs::metadata("/run/startos/initialized")
@@ -53,11 +53,10 @@ async fn inner_main(
let ctx = RpcContext::init(
&server.acceptor_setter(),
config,
Arc::new(
InternedString::intern(
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
.trim(),
),
None,
rpc_ctx_phases,
@@ -114,11 +113,11 @@ async fn inner_main(
metrics_task
.map_err(|e| {
Error::new(
eyre!("{}", e).wrap_err("Metrics daemon panicked!"),
eyre!("{}", e).wrap_err(t!("bins.startd.metrics-daemon-panicked").to_string()),
ErrorKind::Unknown,
)
})
.map_ok(|_| tracing::debug!("Metrics daemon Shutdown"))
.map_ok(|_| tracing::debug!("{}", t!("bins.startd.metrics-daemon-shutdown")))
.await?;
let shutdown = shutdown_recv
@@ -146,10 +145,10 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
.worker_threads(max(1, num_cpus::get()))
.enable_all()
.build()
.expect("failed to initialize runtime");
.expect(&t!("bins.startd.failed-to-initialize-runtime"));
let res = rt.block_on(async {
let mut server = WebServer::new(
Acceptor::bind_upgradable(SelfContainedNetworkInterfaceListener::bind(BindTcp, 80)),
Acceptor::new(WildcardListener::new(80)?),
refresher(),
);
match inner_main(&mut server, &config).await {
@@ -167,11 +166,10 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
.await
.is_ok()
{
Some(Arc::new(
Some(InternedString::intern(
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
.trim(),
))
} else {
None

View File

@@ -6,13 +6,14 @@ use std::time::Duration;
use clap::Parser;
use futures::FutureExt;
use rpc_toolkit::CliApp;
use rust_i18n::t;
use tokio::signal::unix::signal;
use tracing::instrument;
use visit_rs::Visit;
use crate::context::CliContext;
use crate::context::config::ClientConfig;
use crate::net::gateway::{Bind, BindTcp};
use tokio::net::TcpListener;
use crate::net::tls::TlsListener;
use crate::net::web_server::{Accept, Acceptor, MetadataVisitor, WebServer};
use crate::prelude::*;
@@ -56,7 +57,12 @@ async fn inner_main(config: &TunnelConfig) -> Result<(), Error> {
if !a.contains_key(&key) {
match (|| {
Ok::<_, Error>(TlsListener::new(
BindTcp.bind(addr)?,
TcpListener::from_std(
mio::net::TcpListener::bind(addr)
.with_kind(ErrorKind::Network)?
.into(),
)
.with_kind(ErrorKind::Network)?,
TunnelCertHandler {
db: https_db.clone(),
crypto_provider: Arc::new(tokio_rustls::rustls::crypto::ring::default_provider()),
@@ -70,7 +76,7 @@ async fn inner_main(config: &TunnelConfig) -> Result<(), Error> {
true
}
Err(e) => {
tracing::error!("error adding ssl listener: {e}");
tracing::error!("{}", t!("bins.tunnel.error-adding-ssl-listener", error = e.to_string()));
tracing::debug!("{e:?}");
false
@@ -92,7 +98,7 @@ async fn inner_main(config: &TunnelConfig) -> Result<(), Error> {
}
.await
{
tracing::error!("error updating webserver bind: {e}");
tracing::error!("{}", t!("bins.tunnel.error-updating-webserver-bind", error = e.to_string()));
tracing::debug!("{e:?}");
tokio::time::sleep(Duration::from_secs(5)).await;
}
@@ -157,7 +163,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("failed to initialize runtime");
.expect(&t!("bins.tunnel.failed-to-initialize-runtime"));
rt.block_on(inner_main(&config))
};
@@ -179,6 +185,7 @@ pub fn cli(args: impl IntoIterator<Item = OsString>) {
|cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?),
crate::tunnel::api::tunnel_api(),
)
.mutate_command(super::translate_cli)
.run(args)
{
match e.data {

View File

@@ -7,6 +7,7 @@ use std::sync::Arc;
use cookie::{Cookie, Expiration, SameSite};
use cookie_store::CookieStore;
use http::HeaderMap;
use imbl::OrdMap;
use imbl_value::InternedString;
use josekit::jwk::Jwk;
use once_cell::sync::OnceCell;
@@ -22,7 +23,7 @@ use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::context::config::{ClientConfig, local_config_path};
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::context::{DiagnosticContext, InitContext, RpcContext, SetupContext};
use crate::developer::{OS_DEVELOPER_KEY_PATH, default_developer_key_path};
use crate::middleware::auth::local::LocalAuthContext;
use crate::prelude::*;
@@ -37,6 +38,8 @@ pub struct CliContextSeed {
pub registry_url: Option<Url>,
pub registry_hostname: Vec<InternedString>,
pub registry_listen: Option<SocketAddr>,
pub s9pk_s3base: Option<Url>,
pub s9pk_s3bucket: Option<InternedString>,
pub tunnel_addr: Option<SocketAddr>,
pub tunnel_listen: Option<SocketAddr>,
pub client: Client,
@@ -128,6 +131,8 @@ impl CliContext {
.transpose()?,
registry_hostname: config.registry_hostname.unwrap_or_default(),
registry_listen: config.registry_listen,
s9pk_s3base: config.s9pk_s3base,
s9pk_s3bucket: config.s9pk_s3bucket,
tunnel_addr: config.tunnel,
tunnel_listen: config.tunnel_listen,
client: {
@@ -159,21 +164,23 @@ impl CliContext {
if !path.exists() {
continue;
}
let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
&std::fs::read_to_string(path)?,
)
.with_kind(crate::ErrorKind::Pem)?;
let secret = ed25519_dalek::SecretKey::try_from(&pair.secret_key[..]).map_err(|_| {
Error::new(
eyre!("pkcs8 key is of incorrect length"),
ErrorKind::OpenSsl,
let pair =
<ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
&std::fs::read_to_string(path)?,
)
})?;
return Ok(secret.into())
.with_kind(crate::ErrorKind::Pem)?;
let secret =
ed25519_dalek::SecretKey::try_from(&pair.secret_key[..]).map_err(|_| {
Error::new(
eyre!("{}", t!("context.cli.pkcs8-key-incorrect-length")),
ErrorKind::OpenSsl,
)
})?;
return Ok(secret.into());
}
Err(Error::new(
eyre!("Developer Key does not exist! Please run `start-cli init-key` before running this command."),
crate::ErrorKind::Uninitialized
eyre!("{}", t!("context.cli.developer-key-does-not-exist")),
crate::ErrorKind::Uninitialized,
))
})
}
@@ -188,14 +195,18 @@ impl CliContext {
"http" => "ws",
_ => {
return Err(Error::new(
eyre!("Cannot parse scheme from base URL"),
eyre!("{}", t!("context.cli.cannot-parse-scheme-from-base-url")),
crate::ErrorKind::ParseUrl,
)
.into());
}
};
url.set_scheme(ws_scheme)
.map_err(|_| Error::new(eyre!("Cannot set URL scheme"), crate::ErrorKind::ParseUrl))?;
url.set_scheme(ws_scheme).map_err(|_| {
Error::new(
eyre!("{}", t!("context.cli.cannot-set-url-scheme")),
crate::ErrorKind::ParseUrl,
)
})?;
url.path_segments_mut()
.map_err(|_| eyre!("Url cannot be base"))
.with_kind(crate::ErrorKind::ParseUrl)?
@@ -238,10 +249,16 @@ impl CliContext {
where
Self: CallRemote<RemoteContext>,
{
<Self as CallRemote<RemoteContext, Empty>>::call_remote(&self, method, params, Empty {})
.await
.map_err(Error::from)
.with_ctx(|e| (e.kind, method))
<Self as CallRemote<RemoteContext, Empty>>::call_remote(
&self,
method,
OrdMap::new(),
params,
Empty {},
)
.await
.map_err(Error::from)
.with_ctx(|e| (e.kind, method))
}
pub async fn call_remote_with<RemoteContext, T>(
&self,
@@ -252,10 +269,16 @@ impl CliContext {
where
Self: CallRemote<RemoteContext, T>,
{
<Self as CallRemote<RemoteContext, T>>::call_remote(&self, method, params, extra)
.await
.map_err(Error::from)
.with_ctx(|e| (e.kind, method))
<Self as CallRemote<RemoteContext, T>>::call_remote(
&self,
method,
OrdMap::new(),
params,
extra,
)
.await
.map_err(Error::from)
.with_ctx(|e| (e.kind, method))
}
}
impl AsRef<Jwk> for CliContext {
@@ -292,7 +315,13 @@ impl AsRef<Client> for CliContext {
}
impl CallRemote<RpcContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
if let Ok(local) = read_file_to_string(RpcContext::LOCAL_AUTH_COOKIE_PATH).await {
self.cookie_store
.lock()
@@ -319,7 +348,13 @@ impl CallRemote<RpcContext> for CliContext {
}
}
impl CallRemote<DiagnosticContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
crate::middleware::auth::signature::call_remote(
self,
self.rpc_url.clone(),
@@ -332,7 +367,13 @@ impl CallRemote<DiagnosticContext> for CliContext {
}
}
impl CallRemote<InitContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
crate::middleware::auth::signature::call_remote(
self,
self.rpc_url.clone(),
@@ -345,20 +386,13 @@ impl CallRemote<InitContext> for CliContext {
}
}
impl CallRemote<SetupContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
crate::middleware::auth::signature::call_remote(
self,
self.rpc_url.clone(),
HeaderMap::new(),
self.rpc_url.host_str(),
method,
params,
)
.await
}
}
impl CallRemote<InstallContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
async fn call_remote(
&self,
method: &str,
_: OrdMap<&'static str, Value>,
params: Value,
_: Empty,
) -> Result<Value, RpcError> {
crate::middleware::auth::signature::call_remote(
self,
self.rpc_url.clone(),

View File

@@ -58,27 +58,31 @@ pub trait ContextConfig: DeserializeOwned + Default {
#[command(rename_all = "kebab-case")]
#[command(version = crate::version::Current::default().semver().to_string())]
pub struct ClientConfig {
#[arg(short = 'c', long)]
#[arg(short = 'c', long, help = "help.arg.config-file-path")]
pub config: Option<PathBuf>,
#[arg(short = 'H', long)]
#[arg(short = 'H', long, help = "help.arg.host-url")]
pub host: Option<Url>,
#[arg(short = 'r', long)]
#[arg(short = 'r', long, help = "help.arg.registry-url")]
pub registry: Option<Url>,
#[arg(long)]
#[arg(long, help = "help.arg.registry-hostname")]
pub registry_hostname: Option<Vec<InternedString>>,
#[arg(skip)]
pub registry_listen: Option<SocketAddr>,
#[arg(short = 't', long)]
#[arg(long, help = "help.s9pk-s3base")]
pub s9pk_s3base: Option<Url>,
#[arg(long, help = "help.s9pk-s3bucket")]
pub s9pk_s3bucket: Option<InternedString>,
#[arg(short = 't', long, help = "help.arg.tunnel-address")]
pub tunnel: Option<SocketAddr>,
#[arg(skip)]
pub tunnel_listen: Option<SocketAddr>,
#[arg(short = 'p', long)]
#[arg(short = 'p', long, help = "help.arg.proxy-url")]
pub proxy: Option<Url>,
#[arg(skip)]
pub socks_listen: Option<SocketAddr>,
#[arg(long)]
#[arg(long, help = "help.arg.cookie-path")]
pub cookie_path: Option<PathBuf>,
#[arg(long)]
#[arg(long, help = "help.arg.developer-key-path")]
pub developer_key_path: Option<PathBuf>,
}
impl ContextConfig for ClientConfig {
@@ -89,8 +93,13 @@ impl ContextConfig for ClientConfig {
self.host = self.host.take().or(other.host);
self.registry = self.registry.take().or(other.registry);
self.registry_hostname = self.registry_hostname.take().or(other.registry_hostname);
self.registry_listen = self.registry_listen.take().or(other.registry_listen);
self.s9pk_s3base = self.s9pk_s3base.take().or(other.s9pk_s3base);
self.s9pk_s3bucket = self.s9pk_s3bucket.take().or(other.s9pk_s3bucket);
self.tunnel = self.tunnel.take().or(other.tunnel);
self.tunnel_listen = self.tunnel_listen.take().or(other.tunnel_listen);
self.proxy = self.proxy.take().or(other.proxy);
self.socks_listen = self.socks_listen.take().or(other.socks_listen);
self.cookie_path = self.cookie_path.take().or(other.cookie_path);
self.developer_key_path = self.developer_key_path.take().or(other.developer_key_path);
}
@@ -109,21 +118,19 @@ impl ClientConfig {
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
pub struct ServerConfig {
#[arg(short, long)]
#[arg(short, long, help = "help.arg.config-file-path")]
pub config: Option<PathBuf>,
#[arg(long)]
pub ethernet_interface: Option<String>,
#[arg(skip)]
pub os_partitions: Option<OsPartitionInfo>,
#[arg(long)]
#[arg(long, help = "help.arg.socks-listen-address")]
pub socks_listen: Option<SocketAddr>,
#[arg(long)]
#[arg(long, help = "help.arg.revision-cache-size")]
pub revision_cache_size: Option<usize>,
#[arg(long)]
#[arg(long, help = "help.arg.disable-encryption")]
pub disable_encryption: Option<bool>,
#[arg(long)]
#[arg(long, help = "help.arg.multi-arch-s9pks")]
pub multi_arch_s9pks: Option<bool>,
#[arg(long)]
#[arg(long, help = "help.arg.developer-key-path")]
pub developer_key_path: Option<PathBuf>,
}
impl ContextConfig for ServerConfig {
@@ -131,7 +138,6 @@ impl ContextConfig for ServerConfig {
self.config.take()
}
fn merge_with(&mut self, other: Self) {
self.ethernet_interface = self.ethernet_interface.take().or(other.ethernet_interface);
self.os_partitions = self.os_partitions.take().or(other.os_partitions);
self.socks_listen = self.socks_listen.take().or(other.socks_listen);
self.revision_cache_size = self

View File

@@ -6,15 +6,15 @@ use rpc_toolkit::yajrc::RpcError;
use tokio::sync::broadcast::Sender;
use tracing::instrument;
use crate::Error;
use crate::context::config::ServerConfig;
use crate::prelude::*;
use crate::rpc_continuations::RpcContinuations;
use crate::shutdown::Shutdown;
pub struct DiagnosticContextSeed {
pub shutdown: Sender<Shutdown>,
pub error: Arc<RpcError>,
pub disk_guid: Option<Arc<String>>,
pub disk_guid: Option<InternedString>,
pub rpc_continuations: RpcContinuations,
}
@@ -24,10 +24,13 @@ impl DiagnosticContext {
#[instrument(skip_all)]
pub fn init(
_config: &ServerConfig,
disk_guid: Option<Arc<String>>,
disk_guid: Option<InternedString>,
error: Error,
) -> Result<Self, Error> {
tracing::error!("Error: {}: Starting diagnostic UI", error);
tracing::error!(
"{}",
t!("context.diagnostic.starting-diagnostic-ui", error = error)
);
tracing::debug!("{:?}", error);
let (shutdown, _) = tokio::sync::broadcast::channel(1);

View File

@@ -1,44 +0,0 @@
use std::ops::Deref;
use std::sync::Arc;
use rpc_toolkit::Context;
use tokio::sync::broadcast::Sender;
use tracing::instrument;
use crate::Error;
use crate::net::utils::find_eth_iface;
use crate::rpc_continuations::RpcContinuations;
pub struct InstallContextSeed {
pub ethernet_interface: String,
pub shutdown: Sender<()>,
pub rpc_continuations: RpcContinuations,
}
#[derive(Clone)]
pub struct InstallContext(Arc<InstallContextSeed>);
impl InstallContext {
#[instrument(skip_all)]
pub async fn init() -> Result<Self, Error> {
let (shutdown, _) = tokio::sync::broadcast::channel(1);
Ok(Self(Arc::new(InstallContextSeed {
ethernet_interface: find_eth_iface().await?,
shutdown,
rpc_continuations: RpcContinuations::new(),
})))
}
}
impl AsRef<RpcContinuations> for InstallContext {
fn as_ref(&self) -> &RpcContinuations {
&self.rpc_continuations
}
}
impl Context for InstallContext {}
impl Deref for InstallContext {
type Target = InstallContextSeed;
fn deref(&self) -> &Self::Target {
&*self.0
}
}

View File

@@ -2,13 +2,11 @@ pub mod cli;
pub mod config;
pub mod diagnostic;
pub mod init;
pub mod install;
pub mod rpc;
pub mod setup;
pub use cli::CliContext;
pub use diagnostic::DiagnosticContext;
pub use init::InitContext;
pub use install::InstallContext;
pub use rpc::RpcContext;
pub use setup::SetupContext;

View File

@@ -15,6 +15,7 @@ use josekit::jwk::Jwk;
use reqwest::{Client, Proxy};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{CallRemote, Context, Empty};
use tokio::process::Command;
use tokio::sync::{RwLock, broadcast, oneshot, watch};
use tokio::time::Instant;
use tracing::instrument;
@@ -26,10 +27,14 @@ use crate::context::config::ServerConfig;
use crate::db::model::Database;
use crate::db::model::package::TaskSeverity;
use crate::disk::OsPartitionInfo;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::{FileSystem, ReadOnly};
use crate::disk::mount::guard::MountGuard;
use crate::init::{InitResult, check_time_is_synchronized};
use crate::install::PKG_ARCHIVE_DIR;
use crate::lxc::LxcManager;
use crate::net::gateway::UpgradableListener;
use crate::net::gateway::WildcardListener;
use crate::net::net_controller::{NetController, NetService};
use crate::net::socks::DEFAULT_SOCKS_LISTEN;
use crate::net::utils::{find_eth_iface, find_wifi_iface};
@@ -41,19 +46,21 @@ use crate::rpc_continuations::{Guid, OpenAuthedContinuations, RpcContinuations};
use crate::service::ServiceMap;
use crate::service::action::update_tasks;
use crate::service::effects::callbacks::ServiceCallbacks;
use crate::service::effects::subcontainer::NVIDIA_OVERLAY_PATH;
use crate::shutdown::Shutdown;
use crate::util::Invoke;
use crate::util::future::NonDetachingJoinHandle;
use crate::util::io::delete_file;
use crate::util::io::{TmpDir, delete_file};
use crate::util::lshw::LshwDevice;
use crate::util::sync::{SyncMutex, SyncRwLock, Watch};
use crate::{ActionId, DATA_DIR, PackageId};
use crate::{ActionId, DATA_DIR, PLATFORM, PackageId};
pub struct RpcContextSeed {
is_closed: AtomicBool,
pub os_partitions: OsPartitionInfo,
pub wifi_interface: Option<String>,
pub ethernet_interface: String,
pub disk_guid: Arc<String>,
pub disk_guid: InternedString,
pub ephemeral_sessions: SyncMutex<Sessions>,
pub db: TypedPatchDb<Database>,
pub sync_db: watch::Sender<u64>,
@@ -77,7 +84,7 @@ pub struct RpcContextSeed {
}
impl Drop for RpcContextSeed {
fn drop(&mut self) {
tracing::info!("RpcContext is dropped");
tracing::info!("{}", t!("context.rpc.rpc-context-dropped"));
}
}
@@ -125,9 +132,9 @@ pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext {
#[instrument(skip_all)]
pub async fn init(
webserver: &WebServerAcceptorSetter<UpgradableListener>,
webserver: &WebServerAcceptorSetter<WildcardListener>,
config: &ServerConfig,
disk_guid: Arc<String>,
disk_guid: InternedString,
init_result: Option<InitResult>,
InitRpcContextPhases {
mut load_db,
@@ -148,7 +155,7 @@ impl RpcContext {
let peek = db.peek().await;
let account = AccountInfo::load(&peek)?;
load_db.complete();
tracing::info!("Opened PatchDB");
tracing::info!("{}", t!("context.rpc.opened-patchdb"));
init_net_ctrl.start();
let (net_controller, os_net_service) = if let Some(InitResult {
@@ -160,12 +167,130 @@ impl RpcContext {
} else {
let net_ctrl =
Arc::new(NetController::init(db.clone(), &account.hostname, socks_proxy).await?);
webserver.try_upgrade(|a| net_ctrl.net_iface.watcher.upgrade_listener(a))?;
webserver.send_modify(|wl| wl.set_ip_info(net_ctrl.net_iface.watcher.subscribe()));
let os_net_service = net_ctrl.os_bindings().await?;
(net_ctrl, os_net_service)
};
init_net_ctrl.complete();
tracing::info!("Initialized Net Controller");
tracing::info!("{}", t!("context.rpc.initialized-net-controller"));
if PLATFORM.ends_with("-nonfree") {
if let Err(e) = Command::new("nvidia-smi")
.invoke(ErrorKind::ParseSysInfo)
.await
{
tracing::warn!("{}", t!("context.rpc.nvidia-smi-error", error = e));
tracing::info!("{}", t!("context.rpc.nvidia-warning-can-be-ignored"));
} else {
async {
let version: InternedString = String::from_utf8(
Command::new("modinfo")
.arg("-F")
.arg("version")
.arg("nvidia")
.invoke(ErrorKind::ParseSysInfo)
.await?,
)?
.trim()
.into();
let nvidia_dir =
Path::new("/media/startos/data/package-data/nvidia").join(&*version);
// Generate single squashfs with both debian and generic overlays
let sqfs = nvidia_dir.join("container-overlay.squashfs");
if tokio::fs::metadata(&sqfs).await.is_err() {
let tmp = TmpDir::new().await?;
// Generate debian overlay (libs in /usr/lib/aarch64-linux-gnu/)
let debian_dir = tmp.join("debian");
tokio::fs::create_dir_all(&debian_dir).await?;
// Create /etc/debian_version to trigger debian path detection
tokio::fs::create_dir_all(debian_dir.join("etc")).await?;
tokio::fs::write(debian_dir.join("etc/debian_version"), "").await?;
let procfs = MountGuard::mount(
&Bind::new("/proc"),
debian_dir.join("proc"),
ReadOnly,
)
.await?;
Command::new("nvidia-container-cli")
.arg("configure")
.arg("--no-devbind")
.arg("--no-cgroups")
.arg("--utility")
.arg("--compute")
.arg("--graphics")
.arg("--video")
.arg(&debian_dir)
.invoke(ErrorKind::Unknown)
.await?;
procfs.unmount(true).await?;
// Run ldconfig to create proper symlinks for all NVIDIA libraries
Command::new("ldconfig")
.arg("-r")
.arg(&debian_dir)
.invoke(ErrorKind::Unknown)
.await?;
// Remove /etc/debian_version - it was only needed for nvidia-container-cli detection
tokio::fs::remove_file(debian_dir.join("etc/debian_version")).await?;
// Generate generic overlay (libs in /usr/lib64/)
let generic_dir = tmp.join("generic");
tokio::fs::create_dir_all(&generic_dir).await?;
// No /etc/debian_version - will use generic /usr/lib64 paths
let procfs = MountGuard::mount(
&Bind::new("/proc"),
generic_dir.join("proc"),
ReadOnly,
)
.await?;
Command::new("nvidia-container-cli")
.arg("configure")
.arg("--no-devbind")
.arg("--no-cgroups")
.arg("--utility")
.arg("--compute")
.arg("--graphics")
.arg("--video")
.arg(&generic_dir)
.invoke(ErrorKind::Unknown)
.await?;
procfs.unmount(true).await?;
// Run ldconfig to create proper symlinks for all NVIDIA libraries
Command::new("ldconfig")
.arg("-r")
.arg(&generic_dir)
.invoke(ErrorKind::Unknown)
.await?;
// Create squashfs with UID/GID mapping (avoids chown on readonly mounts)
if let Some(p) = sqfs.parent() {
tokio::fs::create_dir_all(p)
.await
.with_ctx(|_| (ErrorKind::Filesystem, format!("mkdir -p {p:?}")))?;
}
Command::new("mksquashfs")
.arg(&*tmp)
.arg(&sqfs)
.arg("-force-uid")
.arg("100000")
.arg("-force-gid")
.arg("100000")
.invoke(ErrorKind::Filesystem)
.await?;
// tmp.unmount_and_delete().await?;
}
BlockDev::new(&sqfs)
.mount(NVIDIA_OVERLAY_PATH, ReadOnly)
.await?;
Ok::<_, Error>(())
}
.await
.log_err();
}
}
let services = ServiceMap::default();
let metrics_cache = Watch::<Option<crate::system::Metrics>>::new(None);
@@ -210,16 +335,12 @@ impl RpcContext {
is_closed: AtomicBool::new(false),
os_partitions: config.os_partitions.clone().ok_or_else(|| {
Error::new(
eyre!("OS Partition Information Missing"),
eyre!("{}", t!("context.rpc.os-partition-info-missing")),
ErrorKind::Filesystem,
)
})?,
wifi_interface: wifi_interface.clone(),
ethernet_interface: if let Some(eth) = config.ethernet_interface.clone() {
eth
} else {
find_eth_iface().await?
},
ethernet_interface: find_eth_iface().await?,
disk_guid,
ephemeral_sessions: SyncMutex::new(Sessions::new()),
sync_db: watch::Sender::new(db.sequence().await),
@@ -244,9 +365,9 @@ impl RpcContext {
current_secret: Arc::new(
Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).map_err(|e| {
tracing::debug!("{:?}", e);
tracing::error!("Couldn't generate ec key");
tracing::error!("{}", t!("context.rpc.couldnt-generate-ec-key"));
Error::new(
color_eyre::eyre::eyre!("Couldn't generate ec key"),
color_eyre::eyre::eyre!("{}", t!("context.rpc.couldnt-generate-ec-key")),
crate::ErrorKind::Unknown,
)
})?,
@@ -261,10 +382,10 @@ impl RpcContext {
let res = Self(seed.clone());
res.cleanup_and_initialize(cleanup_init).await?;
tracing::info!("Cleaned up transient states");
tracing::info!("{}", t!("context.rpc.cleaned-up-transient-states"));
crate::version::post_init(&res, run_migrations).await?;
tracing::info!("Completed migrations");
tracing::info!("{}", t!("context.rpc.completed-migrations"));
Ok(res)
}
@@ -273,7 +394,7 @@ impl RpcContext {
self.crons.mutate(|c| std::mem::take(c));
self.services.shutdown_all().await?;
self.is_closed.store(true, Ordering::SeqCst);
tracing::info!("RpcContext is shutdown");
tracing::info!("{}", t!("context.rpc.rpc-context-shutdown"));
Ok(())
}
@@ -342,7 +463,10 @@ impl RpcContext {
.await
.result
{
tracing::error!("Error in session cleanup cron: {e}");
tracing::error!(
"{}",
t!("context.rpc.error-in-session-cleanup-cron", error = e)
);
tracing::debug!("{e:?}");
}
}
@@ -455,24 +579,33 @@ impl RpcContext {
pub async fn call_remote<RemoteContext>(
&self,
method: &str,
metadata: OrdMap<&'static str, Value>,
params: Value,
) -> Result<Value, RpcError>
where
Self: CallRemote<RemoteContext>,
{
<Self as CallRemote<RemoteContext, Empty>>::call_remote(&self, method, params, Empty {})
.await
<Self as CallRemote<RemoteContext, Empty>>::call_remote(
&self,
method,
metadata,
params,
Empty {},
)
.await
}
pub async fn call_remote_with<RemoteContext, T>(
&self,
method: &str,
metadata: OrdMap<&'static str, Value>,
params: Value,
extra: T,
) -> Result<Value, RpcError>
where
Self: CallRemote<RemoteContext, T>,
{
<Self as CallRemote<RemoteContext, T>>::call_remote(&self, method, params, extra).await
<Self as CallRemote<RemoteContext, T>>::call_remote(&self, method, metadata, params, extra)
.await
}
}
impl AsRef<Client> for RpcContext {

View File

@@ -6,6 +6,7 @@ use std::time::Duration;
use futures::{Future, StreamExt};
use imbl_value::InternedString;
use josekit::jwk::Jwk;
use openssl::x509::X509;
use patch_db::PatchDb;
use rpc_toolkit::Context;
use serde::{Deserialize, Serialize};
@@ -15,24 +16,26 @@ use tracing::instrument;
use ts_rs::TS;
use crate::MAIN_DATA;
use crate::account::AccountInfo;
use crate::context::RpcContext;
use crate::context::config::ServerConfig;
use crate::disk::OsPartitionInfo;
use crate::disk::mount::guard::{MountGuard, TmpMountGuard};
use crate::hostname::Hostname;
use crate::net::gateway::UpgradableListener;
use crate::net::gateway::WildcardListener;
use crate::net::web_server::{WebServer, WebServerAcceptorSetter};
use crate::prelude::*;
use crate::progress::FullProgressTracker;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::setup::SetupProgress;
use crate::shutdown::Shutdown;
use crate::system::KeyboardOptions;
use crate::util::future::NonDetachingJoinHandle;
use crate::util::serde::Pem;
use crate::util::sync::SyncMutex;
lazy_static::lazy_static! {
pub static ref CURRENT_SECRET: Jwk = Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).unwrap_or_else(|e| {
tracing::debug!("{:?}", e);
tracing::error!("Couldn't generate ec key");
tracing::error!("{}", t!("context.setup.couldnt-generate-ec-key"));
panic!("Couldn't generate ec key")
});
}
@@ -41,40 +44,25 @@ lazy_static::lazy_static! {
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct SetupResult {
pub tor_addresses: Vec<String>,
#[ts(type = "string")]
pub hostname: Hostname,
#[ts(type = "string")]
pub lan_address: InternedString,
pub root_ca: String,
}
impl TryFrom<&AccountInfo> for SetupResult {
type Error = Error;
fn try_from(value: &AccountInfo) -> Result<Self, Self::Error> {
Ok(Self {
tor_addresses: value
.tor_keys
.iter()
.map(|tor_key| format!("https://{}", tor_key.onion_address()))
.collect(),
hostname: value.hostname.clone(),
lan_address: value.hostname.lan_address(),
root_ca: String::from_utf8(value.root_ca_cert.to_pem()?)?,
})
}
pub root_ca: Pem<X509>,
pub needs_restart: bool,
}
pub struct SetupContextSeed {
pub webserver: WebServerAcceptorSetter<UpgradableListener>,
pub config: ServerConfig,
pub os_partitions: OsPartitionInfo,
pub webserver: WebServerAcceptorSetter<WildcardListener>,
pub config: SyncMutex<ServerConfig>,
pub disable_encryption: bool,
pub progress: FullProgressTracker,
pub task: OnceCell<NonDetachingJoinHandle<()>>,
pub result: OnceCell<Result<(SetupResult, RpcContext), Error>>,
pub disk_guid: OnceCell<Arc<String>>,
pub disk_guid: OnceCell<InternedString>,
pub shutdown: Sender<Option<Shutdown>>,
pub rpc_continuations: RpcContinuations,
pub install_rootfs: SyncMutex<Option<(TmpMountGuard, MountGuard)>>,
pub keyboard: SyncMutex<Option<KeyboardOptions>>,
pub language: SyncMutex<Option<InternedString>>,
}
#[derive(Clone)]
@@ -82,28 +70,25 @@ pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext {
#[instrument(skip_all)]
pub fn init(
webserver: &WebServer<UpgradableListener>,
config: &ServerConfig,
webserver: &WebServer<WildcardListener>,
config: ServerConfig,
) -> Result<Self, Error> {
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let mut progress = FullProgressTracker::new();
progress.enable_logging(true);
Ok(Self(Arc::new(SetupContextSeed {
webserver: webserver.acceptor_setter(),
config: config.clone(),
os_partitions: config.os_partitions.clone().ok_or_else(|| {
Error::new(
eyre!("missing required configuration: `os-partitions`"),
ErrorKind::NotFound,
)
})?,
disable_encryption: config.disable_encryption.unwrap_or(false),
config: SyncMutex::new(config),
progress,
task: OnceCell::new(),
result: OnceCell::new(),
disk_guid: OnceCell::new(),
shutdown,
rpc_continuations: RpcContinuations::new(),
install_rootfs: SyncMutex::new(None),
language: SyncMutex::new(None),
keyboard: SyncMutex::new(None),
})))
}
#[instrument(skip_all)]
@@ -129,11 +114,14 @@ impl SetupContext {
.get_or_init(|| async {
match f().await {
Ok(res) => {
tracing::info!("Setup complete!");
tracing::info!("{}", t!("context.setup.setup-complete"));
Ok(res)
}
Err(e) => {
tracing::error!("Setup failed: {e}");
tracing::error!(
"{}",
t!("context.setup.setup-failed", error = e)
);
tracing::debug!("{e:?}");
Err(e)
}
@@ -146,10 +134,13 @@ impl SetupContext {
)
.map_err(|_| {
if self.result.initialized() {
Error::new(eyre!("Setup already complete"), ErrorKind::InvalidRequest)
Error::new(
eyre!("{}", t!("context.setup.setup-already-complete")),
ErrorKind::InvalidRequest,
)
} else {
Error::new(
eyre!("Setup already in progress"),
eyre!("{}", t!("context.setup.setup-already-in-progress")),
ErrorKind::InvalidRequest,
)
}
@@ -199,7 +190,7 @@ impl SetupContext {
}
.await
{
tracing::error!("Error in setup progress websocket: {e}");
tracing::error!("{}", t!("context.setup.error-in-setup-progress-websocket", error = e));
tracing::debug!("{e:?}");
}
},

View File

@@ -8,9 +8,11 @@ use crate::prelude::*;
use crate::{Error, PackageId};
#[derive(Deserialize, Serialize, Parser, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct ControlParams {
#[arg(help = "help.arg.package-id")]
pub id: PackageId,
}

View File

@@ -54,7 +54,7 @@ pub fn db<C: Context>() -> ParentHandler<C> {
"dump",
from_fn_async(cli_dump)
.with_display_serializable()
.with_about("Filter/query db to display tables and records"),
.with_about("about.filter-query-db"),
)
.subcommand("dump", from_fn_async(dump).no_cli())
.subcommand(
@@ -65,13 +65,13 @@ pub fn db<C: Context>() -> ParentHandler<C> {
)
.subcommand(
"put",
put::<C>().with_about("Command for adding UI record to db"),
put::<C>().with_about("about.command-add-ui-record-db"),
)
.subcommand(
"apply",
from_fn_async(cli_apply)
.no_display()
.with_about("Update a db record"),
.with_about("about.update-db-record"),
)
.subcommand("apply", from_fn_async(apply).no_cli())
}
@@ -87,9 +87,14 @@ pub enum RevisionsRes {
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct CliDumpParams {
#[arg(long = "include-private", short = 'p')]
#[arg(
long = "include-private",
short = 'p',
help = "help.arg.include-private-data"
)]
#[serde(default)]
include_private: bool,
#[arg(help = "help.arg.db-path")]
path: Option<PathBuf>,
}
@@ -258,9 +263,11 @@ pub async fn subscribe(
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct CliApplyParams {
#[arg(long)]
#[arg(long, help = "help.arg.allow-model-mismatch")]
allow_model_mismatch: bool,
#[arg(help = "help.arg.db-apply-expr")]
expr: String,
#[arg(help = "help.arg.db-path")]
path: Option<PathBuf>,
}
@@ -327,6 +334,7 @@ async fn cli_apply(
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct ApplyParams {
#[arg(help = "help.arg.db-apply-expr")]
expr: String,
}
@@ -358,7 +366,7 @@ pub fn put<C: Context>() -> ParentHandler<C> {
"ui",
from_fn_async(ui)
.with_display_serializable()
.with_about("Add path and value to db")
.with_about("about.add-path-value-db")
.with_call_remote::<CliContext>(),
)
}
@@ -366,8 +374,10 @@ pub fn put<C: Context>() -> ParentHandler<C> {
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct UiParams {
#[arg(help = "help.arg.json-pointer")]
#[ts(type = "string")]
pointer: JsonPointer,
#[arg(help = "help.arg.json-value")]
#[ts(type = "any")]
value: Value,
}

View File

@@ -14,6 +14,7 @@ use crate::notifications::Notifications;
use crate::prelude::*;
use crate::sign::AnyVerifyingKey;
use crate::ssh::SshKeys;
use crate::system::KeyboardOptions;
use crate::util::serde::Pem;
pub mod package;
@@ -28,9 +29,14 @@ pub struct Database {
pub private: Private,
}
impl Database {
pub fn init(account: &AccountInfo, kiosk: Option<bool>) -> Result<Self, Error> {
pub fn init(
account: &AccountInfo,
kiosk: Option<bool>,
language: Option<InternedString>,
keyboard: Option<KeyboardOptions>,
) -> Result<Self, Error> {
Ok(Self {
public: Public::init(account, kiosk)?,
public: Public::init(account, kiosk, language, keyboard)?,
private: Private {
key_store: KeyStore::new(account)?,
password: account.password.clone(),

View File

@@ -14,11 +14,11 @@ use crate::net::host::Hosts;
use crate::net::service_interface::ServiceInterface;
use crate::prelude::*;
use crate::progress::FullProgress;
use crate::s9pk::manifest::Manifest;
use crate::s9pk::manifest::{LocaleString, Manifest};
use crate::status::StatusInfo;
use crate::util::DataUrl;
use crate::util::serde::{Pem, is_partial_of};
use crate::{ActionId, HealthCheckId, HostId, PackageId, ReplayId, ServiceInterfaceId};
use crate::{ActionId, GatewayId, HealthCheckId, HostId, PackageId, ReplayId, ServiceInterfaceId};
#[derive(Debug, Default, Deserialize, Serialize, TS)]
#[ts(export)]
@@ -381,6 +381,9 @@ pub struct PackageDataEntry {
pub hosts: Hosts,
#[ts(type = "string[]")]
pub store_exposed_dependents: Vec<JsonPointer>,
#[serde(default)]
#[ts(type = "string | null")]
pub outbound_gateway: Option<GatewayId>,
}
impl AsRef<PackageDataEntry> for PackageDataEntry {
fn as_ref(&self) -> &PackageDataEntry {
@@ -417,8 +420,7 @@ impl Map for CurrentDependencies {
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
pub struct CurrentDependencyInfo {
#[ts(type = "string | null")]
pub title: Option<InternedString>,
pub title: Option<LocaleString>,
pub icon: Option<DataUrl<'static>>,
#[serde(flatten)]
pub kind: CurrentDependencyKind,

View File

@@ -20,12 +20,13 @@ use crate::db::model::Database;
use crate::db::model::package::AllPackageData;
use crate::net::acme::AcmeProvider;
use crate::net::host::Host;
use crate::net::host::binding::{AddSslOptions, BindInfo, BindOptions, NetInfo};
use crate::net::utils::ipv6_is_local;
use crate::net::host::binding::{
AddSslOptions, BindInfo, BindOptions, Bindings, DerivedAddressInfo, NetInfo,
};
use crate::net::vhost::AlpnInfo;
use crate::prelude::*;
use crate::progress::FullProgress;
use crate::system::SmtpValue;
use crate::system::{KeyboardOptions, SmtpValue};
use crate::util::cpupower::Governor;
use crate::util::lshw::LshwDevice;
use crate::util::serde::MaybeUtf8String;
@@ -45,7 +46,12 @@ pub struct Public {
pub ui: Value,
}
impl Public {
pub fn init(account: &AccountInfo, kiosk: Option<bool>) -> Result<Self, Error> {
pub fn init(
account: &AccountInfo,
kiosk: Option<bool>,
language: Option<InternedString>,
keyboard: Option<KeyboardOptions>,
) -> Result<Self, Error> {
Ok(Self {
server_info: ServerInfo {
arch: get_arch(),
@@ -58,36 +64,36 @@ impl Public {
post_init_migration_todos: BTreeMap::new(),
network: NetworkInfo {
host: Host {
bindings: [(
80,
BindInfo {
enabled: false,
options: BindOptions {
preferred_external_port: 80,
add_ssl: Some(AddSslOptions {
preferred_external_port: 443,
add_x_forwarded_headers: false,
alpn: Some(AlpnInfo::Specified(vec![
MaybeUtf8String("h2".into()),
MaybeUtf8String("http/1.1".into()),
])),
}),
secure: None,
bindings: Bindings(
[(
80,
BindInfo {
enabled: false,
options: BindOptions {
preferred_external_port: 80,
add_ssl: Some(AddSslOptions {
preferred_external_port: 443,
add_x_forwarded_headers: false,
alpn: Some(AlpnInfo::Specified(vec![
MaybeUtf8String("h2".into()),
MaybeUtf8String("http/1.1".into()),
])),
}),
secure: None,
},
net: NetInfo {
assigned_port: None,
assigned_ssl_port: Some(443),
},
addresses: DerivedAddressInfo::default(),
},
net: NetInfo {
assigned_port: None,
assigned_ssl_port: Some(443),
private_disabled: OrdSet::new(),
public_enabled: OrdSet::new(),
},
},
)]
.into_iter()
.collect(),
onions: account.tor_keys.iter().map(|k| k.onion_address()).collect(),
)]
.into_iter()
.collect(),
),
public_domains: BTreeMap::new(),
private_domains: BTreeSet::new(),
hostname_info: BTreeMap::new(),
private_domains: BTreeMap::new(),
port_forwards: BTreeSet::new(),
},
wifi: WifiInfo {
enabled: true,
@@ -112,6 +118,7 @@ impl Public {
acme
},
dns: Default::default(),
default_outbound: None,
},
status_info: ServerStatus {
backup_progress: None,
@@ -139,6 +146,8 @@ impl Public {
ram: 0,
devices: Vec::new(),
kiosk,
language,
keyboard,
},
package_data: AllPackageData::default(),
ui: serde_json::from_str(*DB_UI_SEED_CELL.get().unwrap_or(&"null"))
@@ -195,6 +204,8 @@ pub struct ServerInfo {
pub ram: u64,
pub devices: Vec<LshwDevice>,
pub kiosk: Option<bool>,
pub language: Option<InternedString>,
pub keyboard: Option<KeyboardOptions>,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
@@ -211,6 +222,9 @@ pub struct NetworkInfo {
pub acme: BTreeMap<AcmeProvider, AcmeSettings>,
#[serde(default)]
pub dns: DnsSettings,
#[serde(default)]
#[ts(type = "string | null")]
pub default_outbound: Option<GatewayId>,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
@@ -230,41 +244,12 @@ pub struct DnsSettings {
#[ts(export)]
pub struct NetworkInterfaceInfo {
pub name: Option<InternedString>,
pub public: Option<bool>,
pub secure: Option<bool>,
pub ip_info: Option<Arc<IpInfo>>,
#[serde(default, rename = "type")]
pub gateway_type: Option<GatewayType>,
}
impl NetworkInterfaceInfo {
pub fn public(&self) -> bool {
self.public.unwrap_or_else(|| {
!self.ip_info.as_ref().map_or(true, |ip_info| {
let ip4s = ip_info
.subnets
.iter()
.filter_map(|ipnet| {
if let IpAddr::V4(ip4) = ipnet.addr() {
Some(ip4)
} else {
None
}
})
.collect::<BTreeSet<_>>();
if !ip4s.is_empty() {
return ip4s
.iter()
.all(|ip4| ip4.is_loopback() || ip4.is_private() || ip4.is_link_local());
}
ip_info.subnets.iter().all(|ipnet| {
if let IpAddr::V6(ip6) = ipnet.addr() {
ipv6_is_local(ip6)
} else {
true
}
})
})
})
}
pub fn secure(&self) -> bool {
self.secure.unwrap_or(false)
}
@@ -301,6 +286,28 @@ pub enum NetworkInterfaceType {
Loopback,
}
#[derive(
Clone,
Copy,
Debug,
Default,
PartialEq,
Eq,
PartialOrd,
Ord,
Deserialize,
Serialize,
TS,
clap::ValueEnum,
)]
#[ts(export)]
#[serde(rename_all = "kebab-case")]
pub enum GatewayType {
#[default]
InboundOutbound,
OutboundOnly,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]

View File

@@ -416,6 +416,51 @@ impl<T: Map> Model<T> {
}
}
impl<T: Map> Model<T>
where
T::Key: FromStr,
Error: From<<T::Key as FromStr>::Err>,
{
/// Retains only the elements specified by the predicate.
/// The predicate can mutate the values and returns whether to keep each entry.
pub fn retain<F>(&mut self, mut f: F) -> Result<(), Error>
where
F: FnMut(&T::Key, &mut Model<T::Value>) -> Result<bool, Error>,
{
let mut to_remove = Vec::new();
match &mut self.value {
Value::Object(o) => {
for (k, v) in o.iter_mut() {
let key = T::Key::from_str(&**k)?;
if !f(&key, patch_db::ModelExt::value_as_mut(v))? {
to_remove.push(k.clone());
}
}
}
v => {
use serde::de::Error;
return Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!(
"expected object found {v}"
)),
kind: patch_db::value::ErrorKind::Deserialization,
}
.into());
}
}
// Remove entries that didn't pass the filter
if let Value::Object(o) = &mut self.value {
for k in to_remove {
o.remove(&k);
}
}
Ok(())
}
}
#[repr(transparent)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct JsonKey<T>(pub T);

View File

@@ -1,11 +1,11 @@
use std::collections::BTreeMap;
use std::path::Path;
use imbl_value::InternedString;
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::prelude::*;
use crate::s9pk::manifest::LocaleString;
use crate::util::PathOrUrl;
use crate::{Error, PackageId};
@@ -28,7 +28,7 @@ impl Map for Dependencies {
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
pub struct DepInfo {
pub description: Option<String>,
pub description: Option<LocaleString>,
pub optional: bool,
#[serde(flatten)]
pub metadata: Option<MetadataSrc>,
@@ -73,7 +73,7 @@ pub enum MetadataSrc {
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct Metadata {
pub title: InternedString,
pub title: LocaleString,
pub icon: PathOrUrl,
}
@@ -82,5 +82,5 @@ pub struct Metadata {
#[model = "Model<Self>"]
pub struct DependencyMetadata {
#[ts(type = "string")]
pub title: InternedString,
pub title: LocaleString,
}

View File

@@ -17,45 +17,46 @@ pub fn diagnostic<C: Context>() -> ParentHandler<C> {
.subcommand(
"error",
from_fn(error)
.with_about("Display diagnostic error")
.with_about("about.display-diagnostic-error")
.with_call_remote::<CliContext>(),
)
.subcommand(
"logs",
crate::system::logs::<DiagnosticContext>().with_about("Display OS logs"),
crate::system::logs::<DiagnosticContext>().with_about("about.display-os-logs"),
)
.subcommand(
"logs",
from_fn_async(crate::logs::cli_logs::<DiagnosticContext, Empty>)
.no_display()
.with_about("Display OS logs"),
.with_about("about.display-os-logs"),
)
.subcommand(
"kernel-logs",
crate::system::kernel_logs::<DiagnosticContext>().with_about("Display kernel logs"),
crate::system::kernel_logs::<DiagnosticContext>()
.with_about("about.display-kernel-logs"),
)
.subcommand(
"kernel-logs",
from_fn_async(crate::logs::cli_logs::<DiagnosticContext, Empty>)
.no_display()
.with_about("Display kernal logs"),
.with_about("about.display-kernel-logs"),
)
.subcommand(
"restart",
from_fn(restart)
.no_display()
.with_about("Restart the server")
.with_about("about.restart-server")
.with_call_remote::<CliContext>(),
)
.subcommand(
"disk",
disk::<C>().with_about("Command to remove disk from filesystem"),
disk::<C>().with_about("about.command-remove-disk-filesystem"),
)
.subcommand(
"rebuild",
from_fn_async(rebuild)
.no_display()
.with_about("Teardown and rebuild service containers")
.with_about("about.teardown-rebuild-containers")
.with_call_remote::<CliContext>(),
)
}
@@ -89,16 +90,16 @@ pub fn disk<C: Context>() -> ParentHandler<C> {
from_fn_async(forget_disk::<RpcContext>).no_display(),
)
.no_display()
.with_about("Remove disk from filesystem"),
.with_about("about.remove-disk-filesystem"),
)
.subcommand("repair", from_fn_async(|_: C| repair()).no_cli())
.subcommand(
"repair",
CallRemoteHandler::<CliContext, _, _>::new(
from_fn_async(|_: RpcContext| repair())
.no_display()
.with_about("Repair disk in the event of corruption"),
),
from_fn_async(|_: RpcContext| repair()).no_display(),
)
.no_display()
.with_about("about.repair-disk-corruption"),
)
}

View File

@@ -4,6 +4,7 @@ use std::path::Path;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use futures::future::BoxFuture;
use rust_i18n::t;
use tokio::process::Command;
use tracing::instrument;
@@ -62,33 +63,39 @@ async fn e2fsck_runner(
let e2fsck_stderr = String::from_utf8(e2fsck_out.stderr)?;
let code = e2fsck_out.status.code().ok_or_else(|| {
Error::new(
eyre!("e2fsck: process terminated by signal"),
eyre!("{}", t!("disk.fsck.process-terminated-by-signal")),
crate::ErrorKind::DiskManagement,
)
})?;
if code & 4 != 0 {
tracing::error!(
"some filesystem errors NOT corrected on {}:\n{}",
logicalname.as_ref().display(),
e2fsck_stderr,
"{}",
t!(
"disk.fsck.errors-not-corrected",
device = logicalname.as_ref().display(),
stderr = e2fsck_stderr
),
);
} else if code & 1 != 0 {
tracing::warn!(
"filesystem errors corrected on {}:\n{}",
logicalname.as_ref().display(),
e2fsck_stderr,
"{}",
t!(
"disk.fsck.errors-corrected",
device = logicalname.as_ref().display(),
stderr = e2fsck_stderr
),
);
}
if code < 8 {
if code & 2 != 0 {
tracing::warn!("reboot required");
tracing::warn!("{}", t!("disk.fsck.reboot-required"));
Ok(RequiresReboot(true))
} else {
Ok(RequiresReboot(false))
}
} else {
Err(Error::new(
eyre!("e2fsck: {}", e2fsck_stderr),
eyre!("{}", t!("disk.fsck.e2fsck-error", stderr = e2fsck_stderr)),
crate::ErrorKind::DiskManagement,
))
}

View File

@@ -2,6 +2,8 @@ use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use color_eyre::eyre::eyre;
use imbl_value::InternedString;
use rust_i18n::t;
use tokio::process::Command;
use tracing::instrument;
@@ -20,10 +22,10 @@ pub const MAIN_FS_SIZE: FsSize = FsSize::Gigabytes(8);
#[instrument(skip_all)]
pub async fn create<I, P>(
disks: &I,
pvscan: &BTreeMap<PathBuf, Option<String>>,
pvscan: &BTreeMap<PathBuf, Option<InternedString>>,
datadir: impl AsRef<Path>,
password: Option<&str>,
) -> Result<String, Error>
) -> Result<InternedString, Error>
where
for<'a> &'a I: IntoIterator<Item = &'a P>,
P: AsRef<Path>,
@@ -37,9 +39,9 @@ where
#[instrument(skip_all)]
pub async fn create_pool<I, P>(
disks: &I,
pvscan: &BTreeMap<PathBuf, Option<String>>,
pvscan: &BTreeMap<PathBuf, Option<InternedString>>,
encrypted: bool,
) -> Result<String, Error>
) -> Result<InternedString, Error>
where
for<'a> &'a I: IntoIterator<Item = &'a P>,
P: AsRef<Path>,
@@ -79,7 +81,7 @@ where
cmd.arg(disk.as_ref());
}
cmd.invoke(crate::ErrorKind::DiskManagement).await?;
Ok(guid)
Ok(guid.into())
}
#[derive(Debug, Clone, Copy)]
@@ -224,7 +226,7 @@ pub async fn import<P: AsRef<Path>>(
.is_none()
{
return Err(Error::new(
eyre!("StartOS disk not found."),
eyre!("{}", t!("disk.main.disk-not-found")),
crate::ErrorKind::DiskNotAvailable,
));
}
@@ -234,7 +236,7 @@ pub async fn import<P: AsRef<Path>>(
.any(|id| id == guid)
{
return Err(Error::new(
eyre!("A StartOS disk was found, but it is not the correct disk for this device."),
eyre!("{}", t!("disk.main.incorrect-disk")),
crate::ErrorKind::IncorrectDisk,
));
}

View File

@@ -25,6 +25,8 @@ pub struct OsPartitionInfo {
pub bios: Option<PathBuf>,
pub boot: PathBuf,
pub root: PathBuf,
#[serde(skip)] // internal use only
pub data: Option<PathBuf>,
}
impl OsPartitionInfo {
pub fn contains(&self, logicalname: impl AsRef<Path>) -> bool {
@@ -49,7 +51,7 @@ pub fn disk<C: Context>() -> ParentHandler<C> {
from_fn_async(list)
.with_display_serializable()
.with_custom_display_fn(|handle, result| display_disk_info(handle.params, result))
.with_about("List disk info")
.with_about("about.list-disk-info")
.with_call_remote::<CliContext>(),
)
.subcommand("repair", from_fn_async(|_: C| repair()).no_cli())
@@ -58,7 +60,7 @@ pub fn disk<C: Context>() -> ParentHandler<C> {
CallRemoteHandler::<CliContext, _, _>::new(
from_fn_async(|_: RpcContext| repair())
.no_display()
.with_about("Repair disk in the event of corruption"),
.with_about("about.repair-disk-corruption"),
),
)
}

View File

@@ -29,25 +29,31 @@ impl Default for FileType {
pub struct Bind<Src: AsRef<Path>> {
src: Src,
filetype: FileType,
recursive: bool,
}
impl<Src: AsRef<Path>> Bind<Src> {
pub fn new(src: Src) -> Self {
Self {
src,
filetype: FileType::Directory,
recursive: false,
}
}
pub fn with_type(mut self, filetype: FileType) -> Self {
self.filetype = filetype;
self
}
pub fn recursive(mut self, recursive: bool) -> Self {
self.recursive = recursive;
self
}
}
impl<Src: AsRef<Path> + Send + Sync> FileSystem for Bind<Src> {
async fn source(&self) -> Result<Option<impl AsRef<Path>>, Error> {
Ok(Some(&self.src))
}
fn extra_args(&self) -> impl IntoIterator<Item = impl AsRef<std::ffi::OsStr>> {
["--bind"]
[if self.recursive { "--rbind" } else { "--bind" }]
}
async fn pre_mount(&self, mountpoint: &Path, mount_type: MountType) -> Result<(), Error> {
let from_meta = tokio::fs::metadata(&self.src).await.ok();

View File

@@ -4,6 +4,7 @@ use std::path::Path;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use itertools::Itertools;
use sha2::Sha256;
use crate::disk::mount::filesystem::{FileSystem, MountType, ReadWrite};
@@ -12,12 +13,13 @@ use crate::prelude::*;
use crate::util::io::TmpDir;
pub struct OverlayFs<P0: AsRef<Path>, P1: AsRef<Path>, P2: AsRef<Path>> {
lower: P0,
lower: Vec<P0>,
upper: P1,
work: P2,
}
impl<P0: AsRef<Path>, P1: AsRef<Path>, P2: AsRef<Path>> OverlayFs<P0, P1, P2> {
pub fn new(lower: P0, upper: P1, work: P2) -> Self {
/// layers are top to bottom
pub fn new(lower: Vec<P0>, upper: P1, work: P2) -> Self {
Self { lower, upper, work }
}
}
@@ -32,8 +34,10 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
}
fn mount_options(&self) -> impl IntoIterator<Item = impl Display> {
[
Box::new(lazy_format!("lowerdir={}", self.lower.as_ref().display()))
as Box<dyn Display>,
Box::new(lazy_format!(
"lowerdir={}",
self.lower.iter().map(|p| p.as_ref().display()).join(":")
)) as Box<dyn Display>,
Box::new(lazy_format!("upperdir={}", self.upper.as_ref().display())),
Box::new(lazy_format!("workdir={}", self.work.as_ref().display())),
]
@@ -51,18 +55,21 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
tokio::fs::create_dir_all(self.work.as_ref()).await?;
let mut sha = Sha256::new();
sha.update("OverlayFs");
sha.update(
tokio::fs::canonicalize(self.lower.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
self.lower.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
);
for lower in &self.lower {
sha.update(
tokio::fs::canonicalize(lower.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
lower.as_ref().display().to_string(),
)
})?
.as_os_str()
.as_bytes(),
);
sha.update(b"\0");
}
sha.update(
tokio::fs::canonicalize(self.upper.as_ref())
.await
@@ -75,6 +82,7 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
.as_os_str()
.as_bytes(),
);
sha.update(b"\0");
sha.update(
tokio::fs::canonicalize(self.work.as_ref())
.await
@@ -87,6 +95,7 @@ impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Pat
.as_os_str()
.as_bytes(),
);
sha.update(b"\0");
Ok(sha.finalize())
}
}
@@ -98,11 +107,20 @@ pub struct OverlayGuard<G: GenericMountGuard> {
inner_guard: MountGuard,
}
impl<G: GenericMountGuard> OverlayGuard<G> {
pub async fn mount(lower: G, mountpoint: impl AsRef<Path>) -> Result<Self, Error> {
pub async fn mount_layers<P: AsRef<Path>>(
pre: &[P],
guard: G,
post: &[P],
mountpoint: impl AsRef<Path>,
) -> Result<Self, Error> {
let upper = TmpDir::new().await?;
let inner_guard = MountGuard::mount(
&OverlayFs::new(
lower.path(),
std::iter::empty()
.chain(pre.into_iter().map(|p| p.as_ref()))
.chain([guard.path()])
.chain(post.into_iter().map(|p| p.as_ref()))
.collect(),
upper.as_ref().join("upper"),
upper.as_ref().join("work"),
),
@@ -111,11 +129,14 @@ impl<G: GenericMountGuard> OverlayGuard<G> {
)
.await?;
Ok(Self {
lower: Some(lower),
lower: Some(guard),
upper: Some(upper),
inner_guard,
})
}
pub async fn mount(lower: G, mountpoint: impl AsRef<Path>) -> Result<Self, Error> {
Self::mount_layers::<&Path>(&[], lower, &[], mountpoint).await
}
pub async fn unmount(mut self, delete_mountpoint: bool) -> Result<(), Error> {
self.inner_guard.take().unmount(delete_mountpoint).await?;
if let Some(lower) = self.lower.take() {

View File

@@ -3,6 +3,7 @@ use std::path::Path;
use tracing::instrument;
use crate::Error;
use crate::prelude::*;
use crate::util::Invoke;
pub async fn is_mountpoint(path: impl AsRef<Path>) -> Result<bool, Error> {
@@ -22,9 +23,12 @@ pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
read_only: bool,
) -> Result<(), Error> {
tracing::info!(
"Binding {} to {}",
src.as_ref().display(),
dst.as_ref().display()
"{}",
t!(
"disk.mount.binding",
src = src.as_ref().display(),
dst = dst.as_ref().display()
)
);
if is_mountpoint(&dst).await? {
unmount(dst.as_ref(), true).await?;
@@ -56,3 +60,42 @@ pub async fn unmount<P: AsRef<Path>>(mountpoint: P, lazy: bool) -> Result<(), Er
.await?;
Ok(())
}
/// Unmounts all mountpoints under (and including) the given path, in reverse
/// depth order so that nested mounts are unmounted before their parents.
#[instrument(skip_all)]
pub async fn unmount_all_under<P: AsRef<Path>>(path: P, lazy: bool) -> Result<(), Error> {
let path = path.as_ref();
let canonical_path = tokio::fs::canonicalize(path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("canonicalize {path:?}")))?;
let mounts_content = tokio::fs::read_to_string("/proc/mounts")
.await
.with_ctx(|_| (ErrorKind::Filesystem, "read /proc/mounts"))?;
// Collect all mountpoints under our path
let mut mountpoints: Vec<&str> = mounts_content
.lines()
.filter_map(|line| {
let mountpoint = line.split_whitespace().nth(1)?;
// Check if this mountpoint is under our target path
let mp_path = Path::new(mountpoint);
if mp_path.starts_with(&canonical_path) {
Some(mountpoint)
} else {
None
}
})
.collect();
// Sort by path length descending so we unmount deepest first
mountpoints.sort_by(|a, b| b.len().cmp(&a.len()));
for mountpoint in mountpoints {
tracing::debug!("Unmounting nested mountpoint: {}", mountpoint);
unmount(mountpoint, lazy).await?;
}
Ok(())
}

View File

@@ -20,9 +20,9 @@ use super::mount::guard::TmpMountGuard;
use crate::disk::OsPartitionInfo;
use crate::disk::mount::guard::GenericMountGuard;
use crate::hostname::Hostname;
use crate::prelude::*;
use crate::util::Invoke;
use crate::util::serde::IoFormat;
use crate::{Error, ResultExt as _};
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
@@ -40,25 +40,31 @@ pub struct DiskInfo {
pub model: Option<String>,
pub partitions: Vec<PartitionInfo>,
pub capacity: u64,
pub guid: Option<String>,
pub guid: Option<InternedString>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[derive(Clone, Debug, Deserialize, Serialize, ts_rs::TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct PartitionInfo {
pub logicalname: PathBuf,
pub label: Option<String>,
#[ts(type = "number")]
pub capacity: u64,
#[ts(type = "number | null")]
pub used: Option<u64>,
pub start_os: BTreeMap<String, StartOsRecoveryInfo>,
pub guid: Option<String>,
pub guid: Option<InternedString>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[derive(Clone, Debug, Default, Deserialize, Serialize, ts_rs::TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct StartOsRecoveryInfo {
pub hostname: Hostname,
#[ts(type = "string")]
pub version: exver::Version,
#[ts(type = "string")]
pub timestamp: DateTime<Utc>,
pub password_hash: Option<String>,
pub wrapped_key: Option<String>,
@@ -95,7 +101,7 @@ pub async fn get_vendor<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error
Path::new(SYS_BLOCK_PATH)
.join(path.as_ref().strip_prefix("/dev").map_err(|_| {
Error::new(
eyre!("not a canonical block device"),
eyre!("{}", t!("disk.util.not-canonical-block-device")),
crate::ErrorKind::BlockDevice,
)
})?)
@@ -118,7 +124,7 @@ pub async fn get_model<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error>
Path::new(SYS_BLOCK_PATH)
.join(path.as_ref().strip_prefix("/dev").map_err(|_| {
Error::new(
eyre!("not a canonical block device"),
eyre!("{}", t!("disk.util.not-canonical-block-device")),
crate::ErrorKind::BlockDevice,
)
})?)
@@ -215,7 +221,7 @@ pub async fn get_percentage<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
}
#[instrument(skip_all)]
pub async fn pvscan() -> Result<BTreeMap<PathBuf, Option<String>>, Error> {
pub async fn pvscan() -> Result<BTreeMap<PathBuf, Option<InternedString>>, Error> {
let pvscan_out = Command::new("pvscan")
.invoke(crate::ErrorKind::DiskManagement)
.await?;
@@ -259,6 +265,31 @@ pub async fn recovery_info(
Ok(res)
}
/// Returns the canonical path of the source device for a given mount point,
/// or None if the mount point doesn't exist or isn't mounted.
#[instrument(skip_all)]
pub async fn get_mount_source(mountpoint: impl AsRef<Path>) -> Result<Option<PathBuf>, Error> {
let mounts_content = tokio::fs::read_to_string("/proc/mounts")
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, "/proc/mounts"))?;
let mountpoint = mountpoint.as_ref();
for line in mounts_content.lines() {
let mut parts = line.split_whitespace();
let source = parts.next();
let mount = parts.next();
if let (Some(source), Some(mount)) = (source, mount) {
if Path::new(mount) == mountpoint {
// Try to canonicalize the source path
if let Ok(canonical) = tokio::fs::canonicalize(source).await {
return Ok(Some(canonical));
}
}
}
}
Ok(None)
}
#[instrument(skip_all)]
pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
struct DiskIndex {
@@ -374,23 +405,53 @@ async fn disk_info(disk: PathBuf) -> DiskInfo {
.await
.map_err(|e| {
tracing::warn!(
"Could not get partition table of {}: {}",
disk.display(),
e.source
"{}",
t!(
"disk.util.could-not-get-partition-table",
disk = disk.display(),
error = e.source
)
)
})
.unwrap_or_default();
let vendor = get_vendor(&disk)
.await
.map_err(|e| tracing::warn!("Could not get vendor of {}: {}", disk.display(), e.source))
.map_err(|e| {
tracing::warn!(
"{}",
t!(
"disk.util.could-not-get-vendor",
disk = disk.display(),
error = e.source
)
)
})
.unwrap_or_default();
let model = get_model(&disk)
.await
.map_err(|e| tracing::warn!("Could not get model of {}: {}", disk.display(), e.source))
.map_err(|e| {
tracing::warn!(
"{}",
t!(
"disk.util.could-not-get-model",
disk = disk.display(),
error = e.source
)
)
})
.unwrap_or_default();
let capacity = get_capacity(&disk)
.await
.map_err(|e| tracing::warn!("Could not get capacity of {}: {}", disk.display(), e.source))
.map_err(|e| {
tracing::warn!(
"{}",
t!(
"disk.util.could-not-get-capacity",
disk = disk.display(),
error = e.source
)
)
})
.unwrap_or_default();
DiskInfo {
logicalname: disk,
@@ -407,21 +468,49 @@ async fn part_info(part: PathBuf) -> PartitionInfo {
let mut start_os = BTreeMap::new();
let label = get_label(&part)
.await
.map_err(|e| tracing::warn!("Could not get label of {}: {}", part.display(), e.source))
.map_err(|e| {
tracing::warn!(
"{}",
t!(
"disk.util.could-not-get-label",
part = part.display(),
error = e.source
)
)
})
.unwrap_or_default();
let capacity = get_capacity(&part)
.await
.map_err(|e| tracing::warn!("Could not get capacity of {}: {}", part.display(), e.source))
.map_err(|e| {
tracing::warn!(
"{}",
t!(
"disk.util.could-not-get-capacity-part",
part = part.display(),
error = e.source
)
)
})
.unwrap_or_default();
let mut used = None;
match TmpMountGuard::mount(&BlockDev::new(&part), ReadOnly).await {
Err(e) => tracing::warn!("Could not collect usage information: {}", e.source),
Err(e) => tracing::warn!(
"{}",
t!("disk.util.could-not-collect-usage-info", error = e.source)
),
Ok(mount_guard) => {
used = get_used(mount_guard.path())
.await
.map_err(|e| {
tracing::warn!("Could not get usage of {}: {}", part.display(), e.source)
tracing::warn!(
"{}",
t!(
"disk.util.could-not-get-usage",
part = part.display(),
error = e.source
)
)
})
.ok();
match recovery_info(mount_guard.path()).await {
@@ -429,11 +518,21 @@ async fn part_info(part: PathBuf) -> PartitionInfo {
start_os = a;
}
Err(e) => {
tracing::error!("Error fetching unencrypted backup metadata: {}", e);
tracing::error!(
"{}",
t!("disk.util.error-fetching-backup-metadata", error = e)
);
}
}
if let Err(e) = mount_guard.unmount().await {
tracing::error!("Error unmounting partition {}: {}", part.display(), e);
tracing::error!(
"{}",
t!(
"disk.util.error-unmounting-partition",
part = part.display(),
error = e
)
);
}
}
}
@@ -448,7 +547,7 @@ async fn part_info(part: PathBuf) -> PartitionInfo {
}
}
fn parse_pvscan_output(pvscan_output: &str) -> BTreeMap<PathBuf, Option<String>> {
fn parse_pvscan_output(pvscan_output: &str) -> BTreeMap<PathBuf, Option<InternedString>> {
fn parse_line(line: &str) -> IResult<&str, (&str, Option<&str>)> {
let pv_parse = preceded(
tag(" PV "),
@@ -471,10 +570,10 @@ fn parse_pvscan_output(pvscan_output: &str) -> BTreeMap<PathBuf, Option<String>>
for entry in entries {
match parse_line(entry) {
Ok((_, (pv, vg))) => {
ret.insert(PathBuf::from(pv), vg.map(|s| s.to_owned()));
ret.insert(PathBuf::from(pv), vg.map(InternedString::intern));
}
Err(_) => {
tracing::warn!("Failed to parse pvscan output line: {}", entry);
tracing::warn!("{}", t!("disk.util.failed-to-parse-pvscan", line = entry));
}
}
}

View File

@@ -4,17 +4,19 @@ use axum::http::StatusCode;
use axum::http::uri::InvalidUri;
use color_eyre::eyre::eyre;
use num_enum::TryFromPrimitive;
use patch_db::Revision;
use patch_db::Value;
use rpc_toolkit::reqwest;
use rpc_toolkit::yajrc::{
INVALID_PARAMS_ERROR, INVALID_REQUEST_ERROR, METHOD_NOT_FOUND_ERROR, PARSE_ERROR, RpcError,
};
use rust_i18n::t;
use serde::{Deserialize, Serialize};
use tokio::task::JoinHandle;
use tokio_rustls::rustls;
use ts_rs::TS;
use crate::InvalidId;
use crate::prelude::to_value;
#[derive(Debug, Clone, Copy, PartialEq, Eq, TryFromPrimitive)]
#[repr(i32)]
@@ -40,11 +42,11 @@ pub enum ErrorKind {
ParseUrl = 19,
DiskNotAvailable = 20,
BlockDevice = 21,
InvalidOnionAddress = 22,
// InvalidOnionAddress = 22,
Pack = 23,
ValidateS9pk = 24,
DiskCorrupted = 25, // Remove
Tor = 26,
// Tor = 26,
ConfigGen = 27,
ParseNumber = 28,
Database = 29,
@@ -97,95 +99,98 @@ pub enum ErrorKind {
InstallFailed = 76,
UpdateFailed = 77,
Smtp = 78,
SetSysInfo = 79,
}
impl ErrorKind {
pub fn as_str(&self) -> &'static str {
pub fn as_str(&self) -> String {
use ErrorKind::*;
match self {
Unknown => "Unknown Error",
Filesystem => "Filesystem I/O Error",
Docker => "Docker Error",
ConfigSpecViolation => "Config Spec Violation",
ConfigRulesViolation => "Config Rules Violation",
NotFound => "Not Found",
IncorrectPassword => "Incorrect Password",
VersionIncompatible => "Version Incompatible",
Network => "Network Error",
Registry => "Registry Error",
Serialization => "Serialization Error",
Deserialization => "Deserialization Error",
Utf8 => "UTF-8 Parse Error",
ParseVersion => "Version Parsing Error",
IncorrectDisk => "Incorrect Disk",
// Nginx => "Nginx Error",
Dependency => "Dependency Error",
ParseS9pk => "S9PK Parsing Error",
ParseUrl => "URL Parsing Error",
DiskNotAvailable => "Disk Not Available",
BlockDevice => "Block Device Error",
InvalidOnionAddress => "Invalid Onion Address",
Pack => "Pack Error",
ValidateS9pk => "S9PK Validation Error",
DiskCorrupted => "Disk Corrupted", // Remove
Tor => "Tor Daemon Error",
ConfigGen => "Config Generation Error",
ParseNumber => "Number Parsing Error",
Database => "Database Error",
InvalidId => "Invalid ID",
InvalidSignature => "Invalid Signature",
Backup => "Backup Error",
Restore => "Restore Error",
Authorization => "Unauthorized",
AutoConfigure => "Auto-Configure Error",
Action => "Action Failed",
RateLimited => "Rate Limited",
InvalidRequest => "Invalid Request",
MigrationFailed => "Migration Failed",
Uninitialized => "Uninitialized",
ParseNetAddress => "Net Address Parsing Error",
ParseSshKey => "SSH Key Parsing Error",
SoundError => "Sound Interface Error",
ParseTimestamp => "Timestamp Parsing Error",
ParseSysInfo => "System Info Parsing Error",
Wifi => "WiFi Internal Error",
Journald => "Journald Error",
DiskManagement => "Disk Management Error",
OpenSsl => "OpenSSL Internal Error",
PasswordHashGeneration => "Password Hash Generation Error",
DiagnosticMode => "Server is in Diagnostic Mode",
ParseDbField => "Database Field Parse Error",
Duplicate => "Duplication Error",
MultipleErrors => "Multiple Errors",
Incoherent => "Incoherent",
InvalidBackupTargetId => "Invalid Backup Target ID",
ProductKeyMismatch => "Incompatible Product Keys",
LanPortConflict => "Incompatible LAN Port Configuration",
Javascript => "Javascript Engine Error",
Pem => "PEM Encoding Error",
TLSInit => "TLS Backend Initialization Error",
Ascii => "ASCII Parse Error",
MissingHeader => "Missing Header",
Grub => "Grub Error",
Systemd => "Systemd Error",
OpenSsh => "OpenSSH Error",
Zram => "Zram Error",
Lshw => "LSHW Error",
CpuSettings => "CPU Settings Error",
Firmware => "Firmware Error",
Timeout => "Timeout Error",
Lxc => "LXC Error",
Cancelled => "Cancelled",
Git => "Git Error",
DBus => "DBus Error",
InstallFailed => "Install Failed",
UpdateFailed => "Update Failed",
Smtp => "SMTP Error",
Unknown => t!("error.unknown"),
Filesystem => t!("error.filesystem"),
Docker => t!("error.docker"),
ConfigSpecViolation => t!("error.config-spec-violation"),
ConfigRulesViolation => t!("error.config-rules-violation"),
NotFound => t!("error.not-found"),
IncorrectPassword => t!("error.incorrect-password"),
VersionIncompatible => t!("error.version-incompatible"),
Network => t!("error.network"),
Registry => t!("error.registry"),
Serialization => t!("error.serialization"),
Deserialization => t!("error.deserialization"),
Utf8 => t!("error.utf8"),
ParseVersion => t!("error.parse-version"),
IncorrectDisk => t!("error.incorrect-disk"),
// Nginx => t!("error.nginx"),
Dependency => t!("error.dependency"),
ParseS9pk => t!("error.parse-s9pk"),
ParseUrl => t!("error.parse-url"),
DiskNotAvailable => t!("error.disk-not-available"),
BlockDevice => t!("error.block-device"),
// InvalidOnionAddress => t!("error.invalid-onion-address"),
Pack => t!("error.pack"),
ValidateS9pk => t!("error.validate-s9pk"),
DiskCorrupted => t!("error.disk-corrupted"), // Remove
// Tor => t!("error.tor"),
ConfigGen => t!("error.config-gen"),
ParseNumber => t!("error.parse-number"),
Database => t!("error.database"),
InvalidId => t!("error.invalid-id"),
InvalidSignature => t!("error.invalid-signature"),
Backup => t!("error.backup"),
Restore => t!("error.restore"),
Authorization => t!("error.authorization"),
AutoConfigure => t!("error.auto-configure"),
Action => t!("error.action"),
RateLimited => t!("error.rate-limited"),
InvalidRequest => t!("error.invalid-request"),
MigrationFailed => t!("error.migration-failed"),
Uninitialized => t!("error.uninitialized"),
ParseNetAddress => t!("error.parse-net-address"),
ParseSshKey => t!("error.parse-ssh-key"),
SoundError => t!("error.sound-error"),
ParseTimestamp => t!("error.parse-timestamp"),
ParseSysInfo => t!("error.parse-sys-info"),
Wifi => t!("error.wifi"),
Journald => t!("error.journald"),
DiskManagement => t!("error.disk-management"),
OpenSsl => t!("error.openssl"),
PasswordHashGeneration => t!("error.password-hash-generation"),
DiagnosticMode => t!("error.diagnostic-mode"),
ParseDbField => t!("error.parse-db-field"),
Duplicate => t!("error.duplicate"),
MultipleErrors => t!("error.multiple-errors"),
Incoherent => t!("error.incoherent"),
InvalidBackupTargetId => t!("error.invalid-backup-target-id"),
ProductKeyMismatch => t!("error.product-key-mismatch"),
LanPortConflict => t!("error.lan-port-conflict"),
Javascript => t!("error.javascript"),
Pem => t!("error.pem"),
TLSInit => t!("error.tls-init"),
Ascii => t!("error.ascii"),
MissingHeader => t!("error.missing-header"),
Grub => t!("error.grub"),
Systemd => t!("error.systemd"),
OpenSsh => t!("error.openssh"),
Zram => t!("error.zram"),
Lshw => t!("error.lshw"),
CpuSettings => t!("error.cpu-settings"),
Firmware => t!("error.firmware"),
Timeout => t!("error.timeout"),
Lxc => t!("error.lxc"),
Cancelled => t!("error.cancelled"),
Git => t!("error.git"),
DBus => t!("error.dbus"),
InstallFailed => t!("error.install-failed"),
UpdateFailed => t!("error.update-failed"),
Smtp => t!("error.smtp"),
SetSysInfo => t!("error.set-sys-info"),
}
.to_string()
}
}
impl Display for ErrorKind {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.as_str())
write!(f, "{}", &self.as_str())
}
}
@@ -193,13 +198,13 @@ pub struct Error {
pub source: color_eyre::eyre::Error,
pub debug: Option<color_eyre::eyre::Error>,
pub kind: ErrorKind,
pub revision: Option<Revision>,
pub info: Value,
pub task: Option<JoinHandle<()>>,
}
impl Display for Error {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}: {:#}", self.kind.as_str(), self.source)
write!(f, "{}: {:#}", &self.kind.as_str(), self.source)
}
}
impl Debug for Error {
@@ -207,7 +212,7 @@ impl Debug for Error {
write!(
f,
"{}: {:?}",
self.kind.as_str(),
&self.kind.as_str(),
self.debug.as_ref().unwrap_or(&self.source)
)
}
@@ -224,7 +229,7 @@ impl Error {
source: source.into(),
debug,
kind,
revision: None,
info: Value::Null,
task: None,
}
}
@@ -233,7 +238,7 @@ impl Error {
source: eyre!("{}", self.source),
debug: self.debug.as_ref().map(|e| eyre!("{e}")),
kind: self.kind,
revision: self.revision.clone(),
info: self.info.clone(),
task: None,
}
}
@@ -241,6 +246,10 @@ impl Error {
self.task = Some(task);
self
}
pub fn with_info(mut self, info: Value) -> Self {
self.info = info;
self
}
pub async fn wait(mut self) -> Self {
if let Some(task) = &mut self.task {
task.await.log_err();
@@ -361,17 +370,6 @@ impl From<reqwest::Error> for Error {
Error::new(e, kind)
}
}
#[cfg(feature = "arti")]
impl From<arti_client::Error> for Error {
fn from(e: arti_client::Error) -> Self {
Error::new(e, ErrorKind::Tor)
}
}
impl From<torut::control::ConnError> for Error {
fn from(e: torut::control::ConnError) -> Self {
Error::new(e, ErrorKind::Tor)
}
}
impl From<zbus::Error> for Error {
fn from(e: zbus::Error) -> Self {
Error::new(e, ErrorKind::DBus)
@@ -419,6 +417,8 @@ impl From<patch_db::value::Error> for Error {
pub struct ErrorData {
pub details: String,
pub debug: String,
#[serde(default)]
pub info: Value,
}
impl Display for ErrorData {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
@@ -436,6 +436,7 @@ impl From<Error> for ErrorData {
Self {
details: value.to_string(),
debug: format!("{:?}", value),
info: value.info,
}
}
}
@@ -466,47 +467,40 @@ impl From<&RpcError> for ErrorData {
.or_else(|| d.as_str().map(|s| s.to_owned()))
})
.unwrap_or_else(|| value.message.clone().into_owned()),
info: to_value(
&value
.data
.as_ref()
.and_then(|d| d.as_object().and_then(|d| d.get("info"))),
)
.unwrap_or_default(),
}
}
}
impl From<Error> for RpcError {
fn from(e: Error) -> Self {
let mut data_object = serde_json::Map::with_capacity(3);
data_object.insert("details".to_owned(), format!("{}", e.source).into());
data_object.insert("debug".to_owned(), format!("{:?}", e.source).into());
data_object.insert(
"revision".to_owned(),
match serde_json::to_value(&e.revision) {
let kind = e.kind;
let data = ErrorData::from(e);
RpcError {
code: kind as i32,
message: kind.as_str().into(),
data: Some(match serde_json::to_value(&data) {
Ok(a) => a,
Err(e) => {
tracing::warn!("Error serializing revision for Error object: {}", e);
tracing::warn!("Error serializing ErrorData object: {}", e);
serde_json::Value::Null
}
},
);
RpcError {
code: e.kind as i32,
message: e.kind.as_str().into(),
data: Some(
match serde_json::to_value(&ErrorData {
details: format!("{}", e.source),
debug: format!("{:?}", e.source),
}) {
Ok(a) => a,
Err(e) => {
tracing::warn!("Error serializing revision for Error object: {}", e);
serde_json::Value::Null
}
},
),
}),
}
}
}
impl From<RpcError> for Error {
fn from(e: RpcError) -> Self {
let data = ErrorData::from(&e);
let info = data.info.clone();
Error::new(
ErrorData::from(&e),
data,
if let Ok(kind) = e.code.try_into() {
kind
} else if e.code == METHOD_NOT_FOUND_ERROR.code {
@@ -520,6 +514,7 @@ impl From<RpcError> for Error {
ErrorKind::Unknown
},
)
.with_info(info)
}
}
@@ -602,7 +597,7 @@ where
kind,
source,
debug,
revision: None,
info: Value::Null,
task: None,
}
})

View File

@@ -6,7 +6,8 @@ use tracing::instrument;
use crate::util::Invoke;
use crate::{Error, ErrorKind};
#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize)]
#[derive(Clone, Debug, Default, serde::Deserialize, serde::Serialize, ts_rs::TS)]
#[ts(type = "string")]
pub struct Hostname(pub InternedString);
lazy_static::lazy_static! {

Some files were not shown because too many files have changed in this diff Show More