Compare commits

..

74 Commits

Author SHA1 Message Date
Aiden McClelland
4ba55860dd Merge branch 'feat/preferred-port-design' into sdk-comments 2026-02-24 14:28:56 -07:00
Aiden McClelland
3974c09369 feat(core): refactor hostname to ServerHostnameInfo with name/hostname pair
- Rename Hostname to ServerHostnameInfo, add name + hostname fields
- Add set_hostname_rpc for changing hostname at runtime
- Migrate alpha_20: generate serverInfo.name from hostname, delete ui.name
- Extract gateway.rs helpers to fix rustfmt nesting depth issue
- Add i18n key for hostname validation error
- Update SDK bindings
2026-02-24 14:18:53 -07:00
Matt Hill
f0b41a3a4c Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into sdk-comments 2026-02-24 10:28:00 -07:00
Matt Hill
86ecc4cc99 frontend support for setting and changing hostname 2026-02-24 10:27:22 -07:00
Matt Hill
d1162272f0 clean up prefill flow 2026-02-24 07:19:56 -07:00
Matt Hill
13ac469ed0 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into sdk-comments 2026-02-23 18:14:06 -07:00
Matt Hill
5294e8f444 minor cleanup from patch-db audit 2026-02-23 18:13:33 -07:00
Aiden McClelland
b7da7cd59f fix(core): preserve plugin URLs across binding updates
BindInfo::update was replacing addresses with a new DerivedAddressInfo
that cleared the available set, wiping plugin-exported URLs whenever
bind() was called. Also simplify update_addresses plugin preservation
to use retain in place rather than collecting into a separate set.
2026-02-23 17:56:15 -07:00
Matt Hill
c196f250f6 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into sdk-comments 2026-02-23 17:00:23 -07:00
Matt Hill
bee8a0f9d8 send prefill for tasks and hide operations to hidden fields 2026-02-23 16:59:21 -07:00
Matt Hill
8213e45b85 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into sdk-comments 2026-02-23 14:26:49 -07:00
Matt Hill
e9b9925c0e rework smtp 2026-02-23 14:25:51 -07:00
Aiden McClelland
0724989792 feat(core): allow setting server hostname 2026-02-23 13:35:34 -07:00
Matt Hill
804560d43c add comments to everything potentially consumer facing 2026-02-21 20:40:45 -07:00
Aiden McClelland
31352a72c3 chore: migrate from ts-matches to zod across all TypeScript packages 2026-02-20 16:24:35 -07:00
Aiden McClelland
c7a4f0f9cb feat: tor hidden service key migration 2026-02-20 16:10:42 -07:00
Aiden McClelland
7879668c40 chore: remove completed TODO 2026-02-20 16:09:19 -07:00
Matt Hill
6a01b5eab1 frontend start-tunnel updates 2026-02-20 13:33:18 -07:00
Aiden McClelland
80cb2d9ba5 feat: add getOutboundGateway effect and simplify VersionGraph init/uninit
Add getOutboundGateway effect across core, container-runtime, and SDK
to let services query their effective outbound gateway with callback
support. Remove preInstall/uninstall hooks from VersionGraph as they
are no longer needed.
2026-02-20 13:26:45 -07:00
Aiden McClelland
8c1a452742 chore: replace OTA updates TODO with UI TODO for MattDHill 2026-02-19 23:28:20 -07:00
Aiden McClelland
135afd0251 fix: publish script dpkg-name, s3cfg fallback, and --reinstall for apply 2026-02-19 23:26:16 -07:00
Aiden McClelland
35f3274f29 feat: OTA updates for start-tunnel via apt repository (untested)
- Add apt repo publish script (build/apt/publish-deb.sh) for S3-hosted repo
- Add apt source config and GPG key placeholder (apt/)
- Add tunnel.update.check and tunnel.update.apply RPC endpoints
- Wire up update API in tunnel frontend (api service + mock)
- Uses systemd-run --scope to survive service restart during update
2026-02-19 22:38:39 -07:00
Aiden McClelland
9af5b87c92 chore: remove completed URL plugins TODO 2026-02-19 21:40:36 -07:00
Aiden McClelland
66b5bc1897 fix: propagate host locale into LXC containers and write locale.conf 2026-02-19 21:39:37 -07:00
Aiden McClelland
7909941b70 feat: builder-style InputSpec API, prefill plumbing, and port forward fix
- Add addKey() and add() builder methods to InputSpec with InputSpecTools
- Move OuterType to last generic param on Value, List, and all dynamic methods
- Plumb prefill through getActionInput end-to-end (core → container-runtime → SDK)
- Filter port_forwards to enabled addresses only
- Bump SDK to 0.4.0-beta.50
2026-02-19 16:44:44 -07:00
Aiden McClelland
4527046f2e feat: NAT hairpinning, DNS static servers, clear service error on install
- Add POSTROUTING MASQUERADE rules for container and host hairpin NAT
- Allow bridge subnet containers to reach private forwards via LAN IPs
- Pass bridge_subnet env var from forward.rs to forward-port script
- Use DB-configured static DNS servers in resolver with DB watcher
- Fall back to resolv.conf servers when no static servers configured
- Clear service error state when install/update completes successfully
- Remove completed TODO items
2026-02-19 15:27:52 -07:00
Matt Hill
5a292e6e2a show table even when no addresses 2026-02-19 12:01:34 -07:00
Matt Hill
84149be3c1 touch up URL plugins table 2026-02-19 11:41:41 -07:00
Aiden McClelland
d562466fc4 feat: split row_actions into remove_action and overflow_actions for URL plugins 2026-02-18 18:18:53 -07:00
Aiden McClelland
9c3053f103 feat: implement URL plugins with table/row actions and prefill support
- Add URL plugin effects (register, export_url, clear_urls) in core
- Add PluginHostnameInfo, HostnameMetadata::Plugin, and plugin registration types
- Implement plugin URL table in web UI with tableAction button and rowAction overflow menus
- Thread urlPluginMetadata (packageId, hostId, interfaceId, internalPort) as prefill to actions
- Add prefill support to PackageActionData so metadata passes through form dialogs
- Add i18n translations for plugin error messages
- Clean up plugin URLs on package uninstall
2026-02-18 17:51:13 -07:00
Matt Hill
dce975410f interface row clickable again, bu now with a chevron! 2026-02-18 17:11:57 -07:00
Matt Hill
783ce4b3b6 version instead of os query param 2026-02-18 14:41:03 -07:00
Aiden McClelland
675a03bdc5 chore: add TODOs for URL plugins, NAT hairpinning, and start-tunnel OTA updates 2026-02-17 23:41:39 -07:00
Matt Hill
485fced691 round out dns check, dns server check, port forward check, and gateway port forwards 2026-02-17 23:31:47 -07:00
Aiden McClelland
a22707c1cb chore: add TODO to clear service error state on install/update 2026-02-17 19:05:19 -07:00
Aiden McClelland
74e10ec473 chore: add createTask decoupling TODO 2026-02-17 19:03:35 -07:00
Aiden McClelland
e25e0f0c12 chore: bump sdk version to 0.4.0-beta.49 2026-02-17 18:59:41 -07:00
Aiden McClelland
4cae00cb33 refactor: rename manifest metadata fields and improve error display
Rename wrapperRepo→packageRepo, marketingSite→marketingUrl,
docsUrl→docsUrls (array), remove supportSite. Add display_src/display_dbg
helpers to Error. Fix DepInfo description type to LocaleString. Update
web UI, SDK bindings, tests, and fixtures to match. Clean up cli_attach
error handling and remove dead commented code.
2026-02-17 18:40:50 -07:00
Aiden McClelland
313b2df540 feat: add check-dns gateway endpoint and fix per-interface routing tables
Add a `check-dns` RPC endpoint that verifies whether a gateway's DNS
is properly configured for private domain resolution. Uses a three-tier
check: direct match (DNS == server IP), TXT challenge probe (DNS on
LAN), or failure (DNS off-subnet).

Fix per-interface routing tables to clone all non-default routes from
the main table instead of only the interface's own subnets. This
preserves LAN reachability when the priority-75 catch-all overrides
default routing. Filter out status-only flags (linkdown, dead) that
are invalid for `ip route add`.
2026-02-17 16:22:24 -07:00
Aiden McClelland
5fbc73755d fix: replace .status() with .invoke() for iptables/ip commands
Using .status() leaks stderr directly to system logs, causing noisy
iptables error messages. Switch all networking CLI invocations to use
.invoke() which captures stderr properly. For check-then-act patterns
(iptables -C), use .invoke().await.is_err() instead of
.status().await.map_or(false, |s| s.success()).
2026-02-17 14:12:29 -07:00
Aiden McClelland
bc4478b0b9 refactor: manifest wraps PackageMetadata, move dependency_metadata to PackageVersionInfo
Manifest now embeds PackageMetadata via #[serde(flatten)] instead of
duplicating ~14 fields. icon and dependency_metadata moved from
PackageMetadata to PackageVersionInfo since they are registry-enrichment
data loaded from the S9PK archive. merge_with now returns errors on
metadata/icon/dependency_metadata mismatches instead of silently ignoring
them.
2026-02-17 14:12:14 -07:00
Aiden McClelland
68141112b7 feat: per-service and default outbound gateway routing
Add set-outbound-gateway RPC for packages and set-default-outbound RPC
for the server, with policy routing enforcement via ip rules. Fix
connmark restore to skip packets with existing fwmarks, add bridge
subnet routes to per-interface tables, and fix squashfs path in
update-image-local.sh.
2026-02-17 12:52:24 -07:00
Aiden McClelland
ccafb599a6 chore: update bindings and use typed params for outbound gateway API 2026-02-17 12:31:35 -07:00
Aiden McClelland
52272feb3e fix: switch BackgroundJobRunner from Vec to FuturesUnordered
BackgroundJobRunner stored active jobs in a Vec<BoxFuture> and polled
ALL of them on every wakeup — O(n) per poll. Since this runs in the
same tokio::select! as the WebServer accept loop, polling overhead from
active connections directly delayed acceptance of new connections.

FuturesUnordered only polls woken futures — O(woken) instead of O(n).
2026-02-16 22:02:59 -07:00
Aiden McClelland
1abad93646 fix: add TLS handshake timeout and fix accept loop deadlock
Two issues in TlsListener::poll_accept:

1. No timeout on TLS handshakes: LazyConfigAcceptor waits indefinitely
   for ClientHello. Attackers that complete TCP handshake but never send
   TLS data create zombie futures in `in_progress` that never complete.
   Fix: wrap the entire handshake in tokio::time::timeout(15s).

2. Missing waker on new-connection pending path: when a TCP connection
   is accepted and the TLS handshake is pending, poll_accept returned
   Pending without calling wake_by_ref(). Since the TcpListener returned
   Ready (not Pending), no waker was registered for it. With edge-
   triggered epoll and no other wakeup source, the task sleeps forever
   and remaining connections in the kernel accept queue are never
   drained. Fix: add cx.waker().wake_by_ref() so the task immediately
   re-polls and continues draining the accept queue.
2026-02-16 21:52:12 -07:00
Aiden McClelland
c9468dda02 fix: include public gateways for IP-based addresses in vhost targets
The server hostname vhost construction only collected private IPs,
always setting public to empty. Public IP addresses (Ipv4/Ipv6 metadata
with public=true) were never added to the vhost target's public gateway
set, causing the vhost filter to reject public traffic for IP-based
addresses.
2026-02-16 19:45:10 -07:00
Aiden McClelland
6a1b1627c5 chore: reserialize db on equal version, update bindings and docs
- Run de/ser roundtrip in pre_init even when db version matches, ensuring
  all #[serde(default)] fields are populated before any typed access
- Add patchdb.md documentation for TypedDbWatch patterns
- Update TS bindings for CheckPortParams, CheckPortRes, ifconfigUrl
- Update CLAUDE.md docs with patchdb and component-level references
2026-02-16 19:27:48 -07:00
Aiden McClelland
cfbace1d91 fix: add CONNMARK restore-mark to mangle OUTPUT chain
The CONNMARK --restore-mark rule was only in PREROUTING, which handles
forwarded packets. Locally-bound listeners (e.g. vhost) generate replies
through the OUTPUT chain, where the fwmark was never restored. This
caused response packets to route via the default table instead of back
through the originating interface.
2026-02-16 19:22:07 -07:00
Matt Hill
d97ab59bab update bindings for API types, add ARCHITECTURE (#3124)
* update binding for API types, add ARCHITECTURE

* translations
2026-02-16 16:23:28 +01:00
Aiden McClelland
3518eccc87 feat: add port_forwards field to Host for tracking gateway forwarding rules 2026-02-14 16:40:21 -07:00
Matt Hill
2f19188dae looking good 2026-02-14 16:37:04 -07:00
Aiden McClelland
3a63f3b840 feat: add mdns hostname metadata variant and fix vhost routing
- Add HostnameMetadata::Mdns variant to distinguish mDNS from private domains
- Mark mDNS addresses as private (public: false) since mDNS is local-only
- Fall back to null SNI entry when hostname not found in vhost mapping
- Simplify public detection in ProxyTarget filter
- Pass hostname to update_addresses for mDNS domain name generation
2026-02-14 15:34:48 -07:00
Matt Hill
098d9275f4 new service interfacee page 2026-02-14 12:24:16 -07:00
Matt Hill
d5c74bc22e re-arrange (#3123) 2026-02-14 08:15:50 -07:00
Aiden McClelland
49d4da03ca feat: refactor NetService to watch DB and reconcile network state
- NetService sync task now uses PatchDB DbWatch instead of being called
  directly after DB mutations
- Read gateways from DB instead of network interface context when
  updating host addresses
- gateway sync updates all host addresses in the DB
- Add Watch<u64> channel for callers to wait on sync completion
- Fix ts-rs codegen bug with #[ts(skip)] on flattened Plugin field
- Update SDK getServiceInterface.ts for new HostnameInfo shape
- Remove unnecessary HTTPS redirect in static_server.rs
- Fix tunnel/api.rs to filter for WAN IPv4 address
2026-02-13 16:21:57 -07:00
Aiden McClelland
3765465618 chore: update ts bindings for preferred port design 2026-02-13 14:23:48 -07:00
Aiden McClelland
61f820d09e Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-13 13:39:25 -07:00
Aiden McClelland
db7f3341ac wip refactor 2026-02-12 14:51:33 -07:00
Matt Hill
4decf9335c fix license display in marketplace 2026-02-12 13:07:19 -07:00
Matt Hill
339e5f799a build ts types and fix i18n 2026-02-12 11:32:29 -07:00
Aiden McClelland
89d3e0cf35 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-12 10:51:32 -07:00
Aiden McClelland
638ed27599 feat: replace SourceFilter with IpNet, add policy routing, remove MASQUERADE 2026-02-12 10:51:26 -07:00
Matt Hill
da75b8498e Merge branch 'next/major' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-12 08:28:36 -07:00
Matt Hill
8ef4ecf5ac outbound gateway support (#3120)
* Multiple (#3111)

* fix alerts i18n, fix status display, better, remove usb media, hide shutdown for install complete

* trigger chnage detection for localize pipe and round out implementing localize pipe for consistency even though not needed

* Fix PackageInfoShort to handle LocaleString on releaseNotes (#3112)

* Fix PackageInfoShort to handle LocaleString on releaseNotes

* fix: filter by target_version in get_matching_models and pass otherVersions from install

* chore: add exver documentation for ai agents

* frontend plus some be types

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2026-02-12 08:27:09 -07:00
Aiden McClelland
2a54625f43 feat: replace InterfaceFilter with ForwardRequirements, add WildcardListener, complete alpha.20 bump
- Replace DynInterfaceFilter with ForwardRequirements for per-IP forward
  precision with source-subnet iptables filtering for private forwards
- Add WildcardListener (binds [::]:port) to replace the per-gateway
  NetworkInterfaceListener/SelfContainedNetworkInterfaceListener/
  UpgradableListener infrastructure
- Update forward-port script with src_subnet and excluded_src env vars
- Remove unused filter types and listener infrastructure from gateway.rs
- Add availablePorts migration (IdPool -> BTreeMap<u16, bool>) to alpha.20
- Complete version bump to 0.4.0-alpha.20 in SDK and web
2026-02-11 18:10:27 -07:00
Aiden McClelland
4e638fb58e feat: implement preferred port allocation and per-address enable/disable
- Add AvailablePorts::try_alloc() with SSL tracking (BTreeMap<u16, bool>)
- Add DerivedAddressInfo on BindInfo with private_disabled/public_enabled/possible sets
- Add Bindings wrapper with Map impl for patchdb indexed access
- Flatten HostAddress from single-variant enum to struct
- Replace set-gateway-enabled RPC with set-address-enabled
- Remove hostname_info from Host; computed addresses now in BindInfo.addresses.possible
- Compute possible addresses inline in NetServiceData::update()
- Update DB migration, SDK types, frontend, and container-runtime
2026-02-10 17:38:51 -07:00
Aiden McClelland
73274ef6e0 docs: update TODO.md with DerivedAddressInfo design, remove completed tor task 2026-02-10 14:45:50 -07:00
Aiden McClelland
e1915bf497 chore: format RPCSpec.md markdown table 2026-02-10 13:38:40 -07:00
Aiden McClelland
8204074bdf chore: flatten HostnameInfo from enum to struct
HostnameInfo only had one variant (Ip) after removing Tor. Flatten it
into a plain struct with fields gateway, public, hostname. Remove all
kind === 'ip' type guards and narrowing across SDK, frontend, and
container runtime. Update DB migration to strip the kind field.
2026-02-10 13:38:12 -07:00
Aiden McClelland
2ee403e7de chore: remove tor from startos core
Tor is being moved from a built-in OS feature to a service. This removes
the Arti-based Tor client, onion address management, hidden service
creation, and all related code from the core backend, frontend, and SDK.

- Delete core/src/net/tor/ module (~2060 lines)
- Remove OnionAddress, TorSecretKey, TorController from all consumers
- Remove HostnameInfo::Onion and HostAddress::Onion variants
- Remove onion CRUD RPC endpoints and tor subcommand
- Remove tor key handling from account and backup/restore
- Remove ~12 tor-related Cargo dependencies (arti-client, torut, etc.)
- Remove tor UI components, API methods, mock data, and routes
- Remove OnionHostname and tor patterns/regexes from SDK
- Add v0_4_0_alpha_20 database migration to strip onion data
- Bump version to 0.4.0-alpha.20
2026-02-10 13:28:24 -07:00
Aiden McClelland
1974dfd66f docs: move address enable/disable to overflow menu, add SSL indicator, defer UI placement decisions 2026-02-09 13:29:49 -07:00
Aiden McClelland
2e03a95e47 docs: overhaul interfaces page design with view/manage split and per-address controls 2026-02-09 13:10:57 -07:00
Aiden McClelland
8f809dab21 docs: add user-controlled public/private and port forward mapping to design 2026-02-08 11:17:43 -07:00
Aiden McClelland
c0b2cbe1c8 docs: update preferred external port design in TODO 2026-02-06 09:30:35 -07:00
521 changed files with 17045 additions and 13233 deletions

View File

@@ -54,11 +54,11 @@ runs:
- name: Set up Python
if: inputs.setup-python == 'true'
uses: actions/setup-python@v6
uses: actions/setup-python@v5
with:
python-version: "3.x"
- uses: actions/setup-node@v6
- uses: actions/setup-node@v4
with:
node-version: ${{ inputs.nodejs-version }}
cache: npm
@@ -66,15 +66,15 @@ runs:
- name: Set up Docker QEMU
if: inputs.setup-docker == 'true'
uses: docker/setup-qemu-action@v4
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
if: inputs.setup-docker == 'true'
uses: docker/setup-buildx-action@v4
uses: docker/setup-buildx-action@v3
- name: Configure sccache
if: inputs.setup-sccache == 'true'
uses: actions/github-script@v8
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');

View File

@@ -68,7 +68,7 @@ jobs:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v6
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/setup-build
@@ -82,7 +82,7 @@ jobs:
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v7
- uses: actions/upload-artifact@v4
with:
name: start-cli_${{ matrix.triple }}
path: core/target/${{ matrix.triple }}/release/start-cli

View File

@@ -64,7 +64,7 @@ jobs:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v6
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/setup-build
@@ -78,7 +78,7 @@ jobs:
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v7
- uses: actions/upload-artifact@v4
with:
name: start-registry_${{ matrix.arch }}.deb
path: results/start-registry-*_${{ matrix.arch }}.deb
@@ -102,13 +102,13 @@ jobs:
if: ${{ github.event.inputs.runner == 'fast' }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v4
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v4
uses: docker/setup-buildx-action@v3
- name: "Login to GitHub Container Registry"
uses: docker/login-action@v4
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{github.actor}}
@@ -116,14 +116,14 @@ jobs:
- name: Docker meta
id: meta
uses: docker/metadata-action@v6
uses: docker/metadata-action@v5
with:
images: ghcr.io/Start9Labs/startos-registry
tags: |
type=raw,value=${{ github.ref_name }}
- name: Download debian package
uses: actions/download-artifact@v8
uses: actions/download-artifact@v4
with:
pattern: start-registry_*.deb
@@ -162,7 +162,7 @@ jobs:
ADD *.deb .
RUN apt-get update && apt-get install -y ./*_$(uname -m).deb && rm -rf *.deb /var/lib/apt/lists/*
RUN apt-get install -y ./*_$(uname -m).deb && rm *.deb
VOLUME /var/lib/startos

View File

@@ -64,7 +64,7 @@ jobs:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v6
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/setup-build
@@ -78,7 +78,7 @@ jobs:
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v7
- uses: actions/upload-artifact@v4
with:
name: start-tunnel_${{ matrix.arch }}.deb
path: results/start-tunnel-*_${{ matrix.arch }}.deb

View File

@@ -25,13 +25,10 @@ on:
- ALL
- x86_64
- x86_64-nonfree
- x86_64-nvidia
- aarch64
- aarch64-nonfree
- aarch64-nvidia
# - raspberrypi
- riscv64
- riscv64-nonfree
deploy:
type: choice
description: Deploy
@@ -68,13 +65,10 @@ jobs:
fromJson('{
"x86_64": ["x86_64"],
"x86_64-nonfree": ["x86_64"],
"x86_64-nvidia": ["x86_64"],
"aarch64": ["aarch64"],
"aarch64-nonfree": ["aarch64"],
"aarch64-nvidia": ["aarch64"],
"raspberrypi": ["aarch64"],
"riscv64": ["riscv64"],
"riscv64-nonfree": ["riscv64"],
"ALL": ["x86_64", "aarch64", "riscv64"]
}')[github.event.inputs.platform || 'ALL']
}}
@@ -100,7 +94,7 @@ jobs:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v6
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/setup-build
@@ -114,7 +108,7 @@ jobs:
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v7
- uses: actions/upload-artifact@v4
with:
name: compiled-${{ matrix.arch }}.tar
path: compiled-${{ matrix.arch }}.tar
@@ -124,13 +118,14 @@ jobs:
strategy:
fail-fast: false
matrix:
# TODO: re-add "raspberrypi" to the platform list below
platform: >-
${{
fromJson(
format(
'[
["{0}"],
["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "raspberrypi", "riscv64", "riscv64-nonfree"]
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "riscv64"]
]',
github.event.inputs.platform || 'ALL'
)
@@ -144,24 +139,18 @@ jobs:
fromJson('{
"x86_64": "ubuntu-latest",
"x86_64-nonfree": "ubuntu-latest",
"x86_64-nvidia": "ubuntu-latest",
"aarch64": "ubuntu-24.04-arm",
"aarch64-nonfree": "ubuntu-24.04-arm",
"aarch64-nvidia": "ubuntu-24.04-arm",
"raspberrypi": "ubuntu-24.04-arm",
"riscv64": "ubuntu-24.04-arm",
"riscv64-nonfree": "ubuntu-24.04-arm",
}')[matrix.platform],
fromJson('{
"x86_64": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nvidia": "buildjet-8vcpu-ubuntu-2204",
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nvidia": "buildjet-8vcpu-ubuntu-2204-arm",
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
"riscv64": "buildjet-8vcpu-ubuntu-2204",
"riscv64-nonfree": "buildjet-8vcpu-ubuntu-2204",
}')[matrix.platform]
)
)[github.event.inputs.runner == 'fast']
@@ -172,13 +161,10 @@ jobs:
fromJson('{
"x86_64": "x86_64",
"x86_64-nonfree": "x86_64",
"x86_64-nvidia": "x86_64",
"aarch64": "aarch64",
"aarch64-nonfree": "aarch64",
"aarch64-nvidia": "aarch64",
"raspberrypi": "aarch64",
"riscv64": "riscv64",
"riscv64-nonfree": "riscv64",
}')[matrix.platform]
}}
steps:
@@ -208,14 +194,14 @@ jobs:
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v4
uses: docker/setup-qemu-action@v3
- uses: actions/checkout@v6
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Download compiled artifacts
uses: actions/download-artifact@v8
uses: actions/download-artifact@v4
with:
name: compiled-${{ env.ARCH }}.tar
@@ -252,18 +238,18 @@ jobs:
run: PLATFORM=${{ matrix.platform }} make img
if: ${{ matrix.platform == 'raspberrypi' }}
- uses: actions/upload-artifact@v7
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}.squashfs
path: results/*.squashfs
- uses: actions/upload-artifact@v7
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}.iso
path: results/*.iso
if: ${{ matrix.platform != 'raspberrypi' }}
- uses: actions/upload-artifact@v7
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}.img
path: results/*.img

View File

@@ -24,7 +24,7 @@ jobs:
if: github.event.pull_request.draft != true
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v6
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/setup-build

1
.gitignore vendored
View File

@@ -22,4 +22,3 @@ secrets.db
tmp
web/.i18n-checked
docs/USER.md
*.s9pk

View File

@@ -5,7 +5,7 @@ StartOS is an open-source Linux distribution for running personal servers. It ma
## Tech Stack
- Backend: Rust (async/Tokio, Axum web framework)
- Frontend: Angular 21 + TypeScript + Taiga UI 5
- Frontend: Angular 20 + TypeScript + TaigaUI
- Container runtime: Node.js/TypeScript with LXC
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`)
@@ -30,7 +30,7 @@ StartOS is an open-source Linux distribution for running personal servers. It ma
- **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md).
- **`web/`** — Angular 21 + TypeScript workspace using Taiga UI 5. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md).
- **`web/`** — Angular 20 + TypeScript workspace using Taiga UI. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md).
- **`container-runtime/`** — Node.js runtime that runs inside each service's LXC container. Loads the service's JavaScript from its S9PK package and manages subcontainers. Communicates with the host daemon via JSON-RPC over Unix socket. See [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md).

View File

@@ -11,14 +11,12 @@ Each major component has its own `CLAUDE.md` with detailed guidance: `core/`, `w
## Build & Development
See [CONTRIBUTING.md](CONTRIBUTING.md) for:
- Environment setup and requirements
- Build commands and make targets
- Testing and formatting commands
- Environment variables
**Quick reference:**
```bash
. ./devmode.sh # Enable dev mode
make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
@@ -30,8 +28,6 @@ make test-core # Run Rust tests
- Always verify cross-layer changes using the order described in [ARCHITECTURE.md](ARCHITECTURE.md#cross-layer-verification)
- Check component-level CLAUDE.md files for component-specific conventions. ALWAYS read it before operating on that component.
- Follow existing patterns before inventing new ones
- Always use `make` recipes when they exist for testing builds rather than manually invoking build commands
- **Commit signing:** Never push unsigned commits. Before pushing, check all unpushed commits for signatures with `git log --show-signature @{upstream}..HEAD`. If any are unsigned, prompt the user to sign them with `git rebase --exec 'git commit --amend -S --no-edit' @{upstream}`.
## Supplementary Documentation

View File

@@ -7,7 +7,7 @@ GIT_HASH_FILE := $(shell ./build/env/check-git-hash.sh)
VERSION_FILE := $(shell ./build/env/check-version.sh)
BASENAME := $(shell PROJECT=startos ./build/env/basename.sh)
PLATFORM := $(shell if [ -f $(PLATFORM_FILE) ]; then cat $(PLATFORM_FILE); else echo unknown; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; elif [ "$(PLATFORM)" = "rockchip64" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g; s/-nvidia$$//g'; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else echo $(ARCH); fi)
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./build/env/basename.sh)
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./build/env/basename.sh)
@@ -15,8 +15,7 @@ IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
TOR_S9PK := build/lib/tor_$(ARCH).s9pk
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) $(TOR_S9PK)
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/)
STARTD_SRC := core/startd.service $(BUILD_SRC)
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
@@ -156,7 +155,7 @@ results/$(BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/startos) $(
registry-deb: results/$(REGISTRY_BASENAME).deb
results/$(REGISTRY_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-registry) $(REGISTRY_TARGETS)
PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=ca-certificates ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
tunnel-deb: results/$(TUNNEL_BASENAME).deb
@@ -189,9 +188,6 @@ install: $(STARTOS_TARGETS)
$(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
sed -i '/^Environment=/a Environment=RUST_BACKTRACE=full' $(DESTDIR)/lib/systemd/system/startd.service; \
fi
$(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/startos)
@@ -287,10 +283,6 @@ core/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
rm -rf core/bindings
./core/build/build-ts.sh
ls core/bindings/*.ts | sed 's/core\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/bindings/index.ts
if [ -d core/bindings/tunnel ]; then \
ls core/bindings/tunnel/*.ts | sed 's/core\/bindings\/tunnel\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' > core/bindings/tunnel/index.ts; \
echo 'export * as Tunnel from "./tunnel";' >> core/bindings/index.ts; \
fi
npm --prefix sdk/base exec -- prettier --config=./sdk/base/package.json -w './core/bindings/**/*.ts'
touch core/bindings/index.ts
@@ -316,9 +308,6 @@ build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(sh
$(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE)
./build/download-firmware.sh $(PLATFORM)
$(TOR_S9PK): ./build/download-tor-s9pk.sh
./build/download-tor-s9pk.sh $(ARCH)
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh
touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox

View File

@@ -52,7 +52,7 @@ The easiest path. [Buy a server](https://store.start9.com) from Start9 and plug
### Build your own
Follow the [install guide](https://docs.start9.com/start-os/installing.html) to install StartOS on your own hardware. . Reasons to go this route:
Install StartOS on your own hardware. Follow one of the [DIY guides](https://start9.com/latest/diy). Reasons to go this route:
1. You already have compatible hardware
2. You want to save on shipping costs

View File

@@ -1,14 +0,0 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
ARCH=$1
if [ -z "$ARCH" ]; then
>&2 echo "usage: $0 <ARCH>"
exit 1
fi
curl --fail -L -o "./lib/tor_${ARCH}.s9pk" "https://s9pks.nyc3.cdn.digitaloceanspaces.com/tor_${ARCH}.s9pk"

View File

@@ -11,7 +11,6 @@ cifs-utils
conntrack
cryptsetup
curl
dkms
dmidecode
dnsutils
dosfstools
@@ -37,7 +36,6 @@ lvm2
lxc
magic-wormhole
man-db
mokutil
ncdu
net-tools
network-manager

View File

@@ -12,10 +12,6 @@ fi
if [[ "$PLATFORM" =~ -nonfree$ ]]; then
FEATURES+=("nonfree")
fi
if [[ "$PLATFORM" =~ -nvidia$ ]]; then
FEATURES+=("nonfree")
FEATURES+=("nvidia")
fi
feature_file_checker='
/^#/ { next }

View File

@@ -4,4 +4,7 @@
+ firmware-iwlwifi
+ firmware-libertas
+ firmware-misc-nonfree
+ firmware-realtek
+ firmware-realtek
+ nvidia-container-toolkit
# + nvidia-driver
# + nvidia-kernel-dkms

View File

@@ -1 +0,0 @@
+ nvidia-container-toolkit

View File

@@ -1,6 +1,5 @@
+ gdisk
- grub-efi
+ parted
+ u-boot-rpi
+ raspberrypi-net-mods
+ raspberrypi-sys-mods
+ raspi-config

View File

@@ -23,8 +23,6 @@ RUN apt-get update && \
squashfs-tools \
rsync \
b3sum \
btrfs-progs \
gdisk \
dpkg-dev

View File

@@ -1,6 +1,7 @@
#!/bin/bash
set -e
MAX_IMG_LEN=$((4 * 1024 * 1024 * 1024)) # 4GB
echo "==== StartOS Image Build ===="
@@ -33,11 +34,11 @@ fi
IMAGE_BASENAME=startos-${VERSION_FULL}_${IB_TARGET_PLATFORM}
BOOTLOADERS=grub-efi
if [ "$IB_TARGET_PLATFORM" = "x86_64" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nvidia" ]; then
if [ "$IB_TARGET_PLATFORM" = "x86_64" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nonfree" ]; then
IB_TARGET_ARCH=amd64
QEMU_ARCH=x86_64
BOOTLOADERS=grub-efi,syslinux
elif [ "$IB_TARGET_PLATFORM" = "aarch64" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nvidia" ] || [ "$IB_TARGET_PLATFORM" = "raspberrypi" ] || [ "$IB_TARGET_PLATFORM" = "rockchip64" ]; then
elif [ "$IB_TARGET_PLATFORM" = "aarch64" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "raspberrypi" ] || [ "$IB_TARGET_PLATFORM" = "rockchip64" ]; then
IB_TARGET_ARCH=arm64
QEMU_ARCH=aarch64
elif [ "$IB_TARGET_PLATFORM" = "riscv64" ] || [ "$IB_TARGET_PLATFORM" = "riscv64-nonfree" ]; then
@@ -59,13 +60,9 @@ mkdir -p $prep_results_dir
cd $prep_results_dir
NON_FREE=
if [[ "${IB_TARGET_PLATFORM}" =~ -nonfree$ ]] || [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]] || [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
if [[ "${IB_TARGET_PLATFORM}" =~ -nonfree$ ]] || [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
NON_FREE=1
fi
NVIDIA=
if [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]]; then
NVIDIA=1
fi
IMAGE_TYPE=iso
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ] || [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
IMAGE_TYPE=img
@@ -104,7 +101,7 @@ lb config \
--iso-preparer "START9 LABS; HTTPS://START9.COM" \
--iso-publisher "START9 LABS; HTTPS://START9.COM" \
--backports true \
--bootappend-live "boot=live noautologin console=tty0" \
--bootappend-live "boot=live noautologin" \
--bootloaders $BOOTLOADERS \
--cache false \
--mirror-bootstrap "https://deb.debian.org/debian/" \
@@ -131,15 +128,6 @@ ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
EOT
if [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
mkdir -p config/includes.chroot/etc/ssh/sshd_config.d
echo "PasswordAuthentication yes" > config/includes.chroot/etc/ssh/sshd_config.d/dev-password-auth.conf
fi
# Installer marker file (used by installed GRUB to detect the live USB)
mkdir -p config/includes.binary
touch config/includes.binary/.startos-installer
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
mkdir -p config/includes.chroot
git clone --depth=1 --branch=stable https://github.com/raspberrypi/rpi-firmware.git config/includes.chroot/boot
@@ -180,13 +168,7 @@ sed -i -e '2i set timeout=5' config/bootloaders/grub-pc/config.cfg
mkdir -p config/archives
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
# Fetch the keyring package (not the old raspberrypi.gpg.key, which has
# SHA1-only binding signatures that sqv on Trixie rejects).
KEYRING_DEB=$(mktemp)
curl -fsSL -o "$KEYRING_DEB" https://archive.raspberrypi.com/debian/pool/main/r/raspberrypi-archive-keyring/raspberrypi-archive-keyring_2025.1+rpt1_all.deb
dpkg-deb -x "$KEYRING_DEB" "$KEYRING_DEB.d"
cp "$KEYRING_DEB.d/usr/share/keyrings/raspberrypi-archive-keyring.gpg" config/archives/raspi.key
rm -rf "$KEYRING_DEB" "$KEYRING_DEB.d"
curl -fsSL https://archive.raspberrypi.com/debian/raspberrypi.gpg.key | gpg --dearmor -o config/archives/raspi.key
echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/raspi.key.gpg] https://archive.raspberrypi.com/debian/ ${IB_SUITE} main" > config/archives/raspi.list
fi
@@ -195,7 +177,7 @@ if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
fi
if [ "$NVIDIA" = 1 ]; then
if [ "$NON_FREE" = 1 ]; then
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o config/archives/nvidia-container-toolkit.key
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
| sed 's#deb https://#deb [signed-by=/etc/apt/trusted.gpg.d/nvidia-container-toolkit.key.gpg] https://#g' \
@@ -223,15 +205,11 @@ cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF
set -e
if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
/usr/lib/startos/scripts/enable-kiosk
fi
if [ "${NVIDIA}" = "1" ]; then
if [ "${NON_FREE}" = "1" ] && [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ] && [ "${IB_TARGET_PLATFORM}" != "riscv64-nonfree" ]; then
# install a specific NVIDIA driver version
# ---------------- configuration ----------------
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.126.09}"
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.119.02}"
BASE_URL="https://download.nvidia.com/XFree86/Linux-${QEMU_ARCH}"
@@ -254,7 +232,7 @@ if [ "${NVIDIA}" = "1" ]; then
echo "[nvidia-hook] Target kernel version: \${KVER}" >&2
# Ensure kernel headers are present
TEMP_APT_DEPS=(build-essential pkg-config)
TEMP_APT_DEPS=(build-essential)
if [ ! -e "/lib/modules/\${KVER}/build" ]; then
TEMP_APT_DEPS+=(linux-headers-\${KVER})
fi
@@ -281,15 +259,12 @@ if [ "${NVIDIA}" = "1" ]; then
echo "[nvidia-hook] Running NVIDIA installer for kernel \${KVER}" >&2
if ! sh "\${RUN_PATH}" \
sh "\${RUN_PATH}" \
--silent \
--kernel-name="\${KVER}" \
--no-x-check \
--no-nouveau-check \
--no-runlevel-check; then
cat /var/log/nvidia-installer.log
exit 1
fi
--no-runlevel-check
# Rebuild module metadata
echo "[nvidia-hook] Running depmod for \${KVER}" >&2
@@ -297,32 +272,12 @@ if [ "${NVIDIA}" = "1" ]; then
echo "[nvidia-hook] NVIDIA \${NVIDIA_DRIVER_VERSION} installation complete for kernel \${KVER}" >&2
echo "[nvidia-hook] Removing .run installer..." >&2
rm -f "\${RUN_PATH}"
echo "[nvidia-hook] Blacklisting nouveau..." >&2
echo "blacklist nouveau" > /etc/modprobe.d/blacklist-nouveau.conf
echo "options nouveau modeset=0" >> /etc/modprobe.d/blacklist-nouveau.conf
echo "[nvidia-hook] Rebuilding initramfs..." >&2
update-initramfs -u -k "\${KVER}"
echo "[nvidia-hook] Removing build dependencies..." >&2
apt-get purge -y nvidia-depends
apt-get autoremove -y
echo "[nvidia-hook] Removed build dependencies." >&2
fi
# Install linux-kbuild for sign-file (Secure Boot module signing)
KVER_ALL="\$(ls -1t /boot/vmlinuz-* 2>/dev/null | head -n1 | sed 's|.*/vmlinuz-||')"
if [ -n "\${KVER_ALL}" ]; then
KBUILD_VER="\$(echo "\${KVER_ALL}" | grep -oP '^\d+\.\d+')"
if [ -n "\${KBUILD_VER}" ]; then
echo "[build] Installing linux-kbuild-\${KBUILD_VER} for Secure Boot support" >&2
apt-get install -y "linux-kbuild-\${KBUILD_VER}" || echo "[build] WARNING: linux-kbuild-\${KBUILD_VER} not available" >&2
fi
fi
cp /etc/resolv.conf /etc/resolv.conf.bak
if [ "${IB_SUITE}" = trixie ] && [ "${IB_TARGET_ARCH}" != riscv64 ]; then
@@ -336,10 +291,9 @@ fi
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
sh /boot/firmware/config.sh > /boot/firmware/config.txt
KERNEL_VERSION=${RPI_KERNEL_VERSION} sh /boot/config.sh > /boot/config.txt
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
cp /usr/lib/u-boot/rpi_arm64/u-boot.bin /boot/firmware/u-boot.bin
fi
useradd --shell /bin/bash -G startos -m start9
@@ -349,14 +303,14 @@ usermod -aG systemd-journal start9
echo "start9 ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/010_start9-nopasswd"
if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
/usr/lib/startos/scripts/enable-kiosk
fi
if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
passwd -l start9
fi
mkdir -p /media/startos
chmod 750 /media/startos
chown root:startos /media/startos
EOF
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(date '+%s')}"
@@ -409,85 +363,38 @@ if [ "${IMAGE_TYPE}" = iso ]; then
elif [ "${IMAGE_TYPE}" = img ]; then
SECTOR_LEN=512
FW_START=$((1024 * 1024)) # 1MiB (sector 2048) — Pi-specific
FW_LEN=$((128 * 1024 * 1024)) # 128MiB (Pi firmware + U-Boot + DTBs)
FW_END=$((FW_START + FW_LEN - 1))
ESP_START=$((FW_END + 1)) # 100MB EFI System Partition (matches os_install)
ESP_LEN=$((100 * 1024 * 1024))
ESP_END=$((ESP_START + ESP_LEN - 1))
BOOT_START=$((ESP_END + 1)) # 2GB /boot (matches os_install)
BOOT_LEN=$((2 * 1024 * 1024 * 1024))
BOOT_START=$((1024 * 1024)) # 1MiB
BOOT_LEN=$((512 * 1024 * 1024)) # 512MiB
BOOT_END=$((BOOT_START + BOOT_LEN - 1))
ROOT_START=$((BOOT_END + 1))
# Size root partition to fit the squashfs + 256MB overhead for btrfs
# metadata and config overlay, avoiding the need for btrfs resize
SQUASHFS_SIZE=$(stat -c %s $prep_results_dir/binary/live/filesystem.squashfs)
ROOT_LEN=$(( SQUASHFS_SIZE + 256 * 1024 * 1024 ))
# Align to sector boundary
ROOT_LEN=$(( (ROOT_LEN + SECTOR_LEN - 1) / SECTOR_LEN * SECTOR_LEN ))
# Total image: partitions + GPT backup header (34 sectors)
IMG_LEN=$((ROOT_START + ROOT_LEN + 34 * SECTOR_LEN))
# Fixed GPT partition UUIDs (deterministic, based on old MBR disk ID cb15ae4d)
FW_UUID=cb15ae4d-0001-4000-8000-000000000001
ESP_UUID=cb15ae4d-0002-4000-8000-000000000002
BOOT_UUID=cb15ae4d-0003-4000-8000-000000000003
ROOT_UUID=cb15ae4d-0004-4000-8000-000000000004
ROOT_LEN=$((MAX_IMG_LEN - ROOT_START))
ROOT_END=$((MAX_IMG_LEN - 1))
TARGET_NAME=$prep_results_dir/${IMAGE_BASENAME}.img
truncate -s $IMG_LEN $TARGET_NAME
truncate -s $MAX_IMG_LEN $TARGET_NAME
sfdisk $TARGET_NAME <<-EOF
label: gpt
label: dos
label-id: 0xcb15ae4d
unit: sectors
sector-size: 512
${TARGET_NAME}1 : start=$((FW_START / SECTOR_LEN)), size=$((FW_LEN / SECTOR_LEN)), type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=${FW_UUID}, name="firmware"
${TARGET_NAME}2 : start=$((ESP_START / SECTOR_LEN)), size=$((ESP_LEN / SECTOR_LEN)), type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B, uuid=${ESP_UUID}, name="efi"
${TARGET_NAME}3 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=${BOOT_UUID}, name="boot"
${TARGET_NAME}4 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=B921B045-1DF0-41C3-AF44-4C6F280D3FAE, uuid=${ROOT_UUID}, name="root"
${TARGET_NAME}1 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=c, bootable
${TARGET_NAME}2 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=83
EOF
# Create named loop device nodes (high minor numbers to avoid conflicts)
# and detach any stale ones from previous failed builds
FW_DEV=/dev/startos-loop-fw
ESP_DEV=/dev/startos-loop-esp
BOOT_DEV=/dev/startos-loop-boot
ROOT_DEV=/dev/startos-loop-root
for dev in $FW_DEV:200 $ESP_DEV:201 $BOOT_DEV:202 $ROOT_DEV:203; do
name=${dev%:*}
minor=${dev#*:}
[ -e $name ] || mknod $name b 7 $minor
losetup -d $name 2>/dev/null || true
done
BOOT_DEV=$(losetup --show -f --offset $BOOT_START --sizelimit $BOOT_LEN $TARGET_NAME)
ROOT_DEV=$(losetup --show -f --offset $ROOT_START --sizelimit $ROOT_LEN $TARGET_NAME)
losetup $FW_DEV --offset $FW_START --sizelimit $FW_LEN $TARGET_NAME
losetup $ESP_DEV --offset $ESP_START --sizelimit $ESP_LEN $TARGET_NAME
losetup $BOOT_DEV --offset $BOOT_START --sizelimit $BOOT_LEN $TARGET_NAME
losetup $ROOT_DEV --offset $ROOT_START --sizelimit $ROOT_LEN $TARGET_NAME
mkfs.vfat -F32 -n firmware $FW_DEV
mkfs.vfat -F32 -n efi $ESP_DEV
mkfs.vfat -F32 -n boot $BOOT_DEV
mkfs.btrfs -f -L rootfs $ROOT_DEV
mkfs.vfat -F32 $BOOT_DEV
mkfs.ext4 $ROOT_DEV
TMPDIR=$(mktemp -d)
# Extract boot files from squashfs to staging area
BOOT_STAGING=$(mktemp -d)
unsquashfs -n -f -d $BOOT_STAGING $prep_results_dir/binary/live/filesystem.squashfs boot
# Mount partitions (nested: firmware and efi inside boot)
mkdir -p $TMPDIR/boot $TMPDIR/root
mount $BOOT_DEV $TMPDIR/boot
mkdir -p $TMPDIR/boot/firmware $TMPDIR/boot/efi
mount $FW_DEV $TMPDIR/boot/firmware
mount $ESP_DEV $TMPDIR/boot/efi
mount $ROOT_DEV $TMPDIR/root
# Copy boot files — nested mounts route firmware/* to the firmware partition
cp -a $BOOT_STAGING/boot/. $TMPDIR/boot/
rm -rf $BOOT_STAGING
mount $BOOT_DEV $TMPDIR/boot
unsquashfs -n -f -d $TMPDIR $prep_results_dir/binary/live/filesystem.squashfs boot
mkdir $TMPDIR/root/images $TMPDIR/root/config
B3SUM=$(b3sum $prep_results_dir/binary/live/filesystem.squashfs | head -c 16)
@@ -500,46 +407,40 @@ elif [ "${IMAGE_TYPE}" = img ]; then
mount -t overlay -o lowerdir=$TMPDIR/lower,workdir=$TMPDIR/root/config/work,upperdir=$TMPDIR/root/config/overlay overlay $TMPDIR/next
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
sed -i 's| boot=startos| boot=startos init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
rsync -a $SOURCE_DIR/raspberrypi/img/ $TMPDIR/next/
# Install GRUB: ESP at /boot/efi (Part 2), /boot (Part 3)
mkdir -p $TMPDIR/next/boot \
$TMPDIR/next/dev $TMPDIR/next/proc $TMPDIR/next/sys $TMPDIR/next/media/startos/root
mount --rbind $TMPDIR/boot $TMPDIR/next/boot
mount --bind /dev $TMPDIR/next/dev
mount -t proc proc $TMPDIR/next/proc
mount -t sysfs sysfs $TMPDIR/next/sys
mount --bind $TMPDIR/root $TMPDIR/next/media/startos/root
chroot $TMPDIR/next grub-install --target=arm64-efi --removable --efi-directory=/boot/efi --boot-directory=/boot --no-nvram
chroot $TMPDIR/next update-grub
umount $TMPDIR/next/media/startos/root
umount $TMPDIR/next/sys
umount $TMPDIR/next/proc
umount $TMPDIR/next/dev
umount -l $TMPDIR/next/boot
# Fix root= in grub.cfg: update-grub sees loop devices, but the
# real device uses a fixed GPT PARTUUID for root (Part 4).
sed -i "s|root=[^ ]*|root=PARTUUID=${ROOT_UUID}|g" $TMPDIR/boot/grub/grub.cfg
# Inject first-boot resize script into GRUB config
sed -i 's| boot=startos| boot=startos init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/grub/grub.cfg
fi
umount $TMPDIR/next
umount $TMPDIR/lower
umount $TMPDIR/boot/firmware
umount $TMPDIR/boot/efi
umount $TMPDIR/boot
umount $TMPDIR/root
e2fsck -fy $ROOT_DEV
resize2fs -M $ROOT_DEV
BLOCK_COUNT=$(dumpe2fs -h $ROOT_DEV | awk '/^Block count:/ { print $3 }')
BLOCK_SIZE=$(dumpe2fs -h $ROOT_DEV | awk '/^Block size:/ { print $3 }')
ROOT_LEN=$((BLOCK_COUNT * BLOCK_SIZE))
losetup -d $ROOT_DEV
losetup -d $BOOT_DEV
losetup -d $ESP_DEV
losetup -d $FW_DEV
# Recreate partition 2 with the new size using sfdisk
sfdisk $TARGET_NAME <<-EOF
label: dos
label-id: 0xcb15ae4d
unit: sectors
sector-size: 512
${TARGET_NAME}1 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=c, bootable
${TARGET_NAME}2 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=83
EOF
TARGET_SIZE=$((ROOT_START + ROOT_LEN))
truncate -s $TARGET_SIZE $TARGET_NAME
mv $TARGET_NAME $RESULTS_DIR/$IMAGE_BASENAME.img

View File

@@ -1,4 +1,2 @@
PARTUUID=cb15ae4d-0001-4000-8000-000000000001 /boot/firmware vfat umask=0077 0 2
PARTUUID=cb15ae4d-0002-4000-8000-000000000002 /boot/efi vfat umask=0077 0 1
PARTUUID=cb15ae4d-0003-4000-8000-000000000003 /boot vfat umask=0077 0 2
PARTUUID=cb15ae4d-0004-4000-8000-000000000004 / btrfs defaults 0 1
/dev/mmcblk0p1 /boot vfat umask=0077 0 2
/dev/mmcblk0p2 / ext4 defaults 0 1

View File

@@ -12,16 +12,15 @@ get_variables () {
BOOT_DEV_NAME=$(echo /sys/block/*/"${BOOT_PART_NAME}" | cut -d "/" -f 4)
BOOT_PART_NUM=$(cat "/sys/block/${BOOT_DEV_NAME}/${BOOT_PART_NAME}/partition")
ROOT_DEV_SIZE=$(cat "/sys/block/${ROOT_DEV_NAME}/size")
# GPT backup header/entries occupy last 33 sectors
USABLE_END=$((ROOT_DEV_SIZE - 34))
OLD_DISKID=$(fdisk -l "$ROOT_DEV" | sed -n 's/Disk identifier: 0x\([^ ]*\)/\1/p')
if [ "$USABLE_END" -le 67108864 ]; then
TARGET_END=$USABLE_END
ROOT_DEV_SIZE=$(cat "/sys/block/${ROOT_DEV_NAME}/size")
if [ "$ROOT_DEV_SIZE" -le 67108864 ]; then
TARGET_END=$((ROOT_DEV_SIZE - 1))
else
TARGET_END=$((33554432 - 1))
DATA_PART_START=33554432
DATA_PART_END=$USABLE_END
DATA_PART_END=$((ROOT_DEV_SIZE - 1))
fi
PARTITION_TABLE=$(parted -m "$ROOT_DEV" unit s print | tr -d 's')
@@ -58,30 +57,37 @@ check_variables () {
main () {
get_variables
# Fix GPT backup header first — the image was built with a tight root
# partition, so the backup GPT is not at the end of the SD card. parted
# will prompt interactively if this isn't fixed before we use it.
sgdisk -e "$ROOT_DEV" 2>/dev/null || true
if ! check_variables; then
return 1
fi
# if [ "$ROOT_PART_END" -eq "$TARGET_END" ]; then
# reboot_pi
# fi
if ! echo Yes | parted -m --align=optimal "$ROOT_DEV" ---pretend-input-tty u s resizepart "$ROOT_PART_NUM" "$TARGET_END" ; then
FAIL_REASON="Root partition resize failed"
return 1
fi
if [ -n "$DATA_PART_START" ]; then
if ! parted -ms --align=optimal "$ROOT_DEV" u s mkpart data "$DATA_PART_START" "$DATA_PART_END"; then
if ! parted -ms --align=optimal "$ROOT_DEV" u s mkpart primary "$DATA_PART_START" "$DATA_PART_END"; then
FAIL_REASON="Data partition creation failed"
return 1
fi
fi
(
echo x
echo i
echo "0xcb15ae4d"
echo r
echo w
) | fdisk $ROOT_DEV
mount / -o remount,rw
btrfs filesystem resize max /media/startos/root
resize2fs $ROOT_PART_DEV
if ! systemd-machine-id-setup --root=/media/startos/config/overlay/; then
FAIL_REASON="systemd-machine-id-setup failed"
@@ -105,7 +111,7 @@ mount / -o remount,ro
beep
if main; then
sed -i 's| init=/usr/lib/startos/scripts/init_resize\.sh||' /boot/grub/grub.cfg
sed -i 's| init=/usr/lib/startos/scripts/init_resize\.sh||' /boot/cmdline.txt
echo "Resized root filesystem. Rebooting in 5 seconds..."
sleep 5
else

View File

@@ -0,0 +1 @@
usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory boot=startos

View File

@@ -27,18 +27,20 @@ disable_overscan=1
# (e.g. for USB device mode) or if USB support is not required.
otg_mode=1
[all]
[pi4]
# Run as fast as firmware / board allows
arm_boost=1
kernel=vmlinuz-${KERNEL_VERSION}-rpi-v8
initramfs initrd.img-${KERNEL_VERSION}-rpi-v8 followkernel
[pi5]
kernel=vmlinuz-${KERNEL_VERSION}-rpi-2712
initramfs initrd.img-${KERNEL_VERSION}-rpi-2712 followkernel
[all]
gpu_mem=16
dtoverlay=pwm-2chan,disable-bt
# Enable UART for U-Boot and serial console
enable_uart=1
# Load U-Boot as the bootloader (GRUB is chainloaded from U-Boot)
kernel=u-boot.bin
EOF
EOF

View File

@@ -84,8 +84,4 @@ arm_boost=1
gpu_mem=16
dtoverlay=pwm-2chan,disable-bt
# Enable UART for U-Boot and serial console
enable_uart=1
# Load U-Boot as the bootloader (GRUB is chainloaded from U-Boot)
kernel=u-boot.bin
auto_initramfs=1

View File

@@ -1,4 +0,0 @@
# Raspberry Pi-specific GRUB overrides
# Overrides GRUB_CMDLINE_LINUX from /etc/default/grub with Pi-specific
# console devices and hardware quirks.
GRUB_CMDLINE_LINUX="boot=startos console=serial0,115200 console=tty1 usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory"

View File

@@ -1,3 +1,6 @@
os-partitions:
boot: /dev/mmcblk0p1
root: /dev/mmcblk0p2
ethernet-interface: end0
wifi-interface: wlan0
disable-encryption: true

View File

@@ -34,7 +34,7 @@ set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
if [ -z "$NO_SYNC" ]; then
echo 'Syncing...'
umount -l /media/startos/next 2> /dev/null
umount -R /media/startos/next 2> /dev/null
umount /media/startos/upper 2> /dev/null
rm -rf /media/startos/upper /media/startos/next
mkdir /media/startos/upper
@@ -58,13 +58,13 @@ mkdir -p /media/startos/next/media/startos/root
mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev
mount -t sysfs sysfs /media/startos/next/sys
mount -t proc proc /media/startos/next/proc
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi
if [ -z "$*" ]; then
@@ -111,6 +111,6 @@ if [ "$CHROOT_RES" -eq 0 ]; then
reboot
fi
umount -l /media/startos/next
umount -l /media/startos/upper
umount /media/startos/next
umount /media/startos/upper
rm -rf /media/startos/upper /media/startos/next

View File

@@ -1,76 +0,0 @@
#!/bin/bash
# sign-unsigned-modules [--source <dir> --dest <dir>] [--sign-file <path>]
# [--mok-key <path>] [--mok-pub <path>]
#
# Signs all unsigned kernel modules using the DKMS MOK key.
#
# Default (install) mode:
# Run inside a chroot. Finds and signs unsigned modules in /lib/modules in-place.
# sign-file and MOK key are auto-detected from standard paths.
#
# Overlay mode (--source/--dest):
# Finds unsigned modules in <source>, copies to <dest>, signs the copies.
# Clears old signed modules in <dest> first. Used during upgrades where the
# overlay upper is tmpfs and writes would be lost.
set -e
SOURCE=""
DEST=""
SIGN_FILE=""
MOK_KEY="/var/lib/dkms/mok.key"
MOK_PUB="/var/lib/dkms/mok.pub"
while [[ $# -gt 0 ]]; do
case $1 in
--source) SOURCE="$2"; shift 2;;
--dest) DEST="$2"; shift 2;;
--sign-file) SIGN_FILE="$2"; shift 2;;
--mok-key) MOK_KEY="$2"; shift 2;;
--mok-pub) MOK_PUB="$2"; shift 2;;
*) echo "Unknown option: $1" >&2; exit 1;;
esac
done
# Auto-detect sign-file if not specified
if [ -z "$SIGN_FILE" ]; then
SIGN_FILE="$(ls -1 /usr/lib/linux-kbuild-*/scripts/sign-file 2>/dev/null | head -1)"
fi
if [ -z "$SIGN_FILE" ] || [ ! -x "$SIGN_FILE" ]; then
exit 0
fi
if [ ! -f "$MOK_KEY" ] || [ ! -f "$MOK_PUB" ]; then
exit 0
fi
COUNT=0
if [ -n "$SOURCE" ] && [ -n "$DEST" ]; then
# Overlay mode: find unsigned in source, copy to dest, sign in dest
rm -rf "${DEST}"/lib/modules
for ko in $(find "${SOURCE}"/lib/modules -name '*.ko' 2>/dev/null); do
if ! modinfo "$ko" 2>/dev/null | grep -q '^sig_id:'; then
rel_path="${ko#${SOURCE}}"
mkdir -p "${DEST}$(dirname "$rel_path")"
cp "$ko" "${DEST}${rel_path}"
"$SIGN_FILE" sha256 "$MOK_KEY" "$MOK_PUB" "${DEST}${rel_path}"
COUNT=$((COUNT + 1))
fi
done
else
# In-place mode: sign modules directly
for ko in $(find /lib/modules -name '*.ko' 2>/dev/null); do
if ! modinfo "$ko" 2>/dev/null | grep -q '^sig_id:'; then
"$SIGN_FILE" sha256 "$MOK_KEY" "$MOK_PUB" "$ko"
COUNT=$((COUNT + 1))
fi
done
fi
if [ $COUNT -gt 0 ]; then
echo "[sign-modules] Signed $COUNT unsigned kernel modules"
fi

View File

@@ -104,7 +104,6 @@ local_mount_root()
-olowerdir=/startos/config/overlay:/lower,upperdir=/upper/data,workdir=/upper/work \
overlay ${rootmnt}
mkdir -m 750 -p ${rootmnt}/media/startos
mkdir -p ${rootmnt}/media/startos/config
mount --bind /startos/config ${rootmnt}/media/startos/config
mkdir -p ${rootmnt}/media/startos/images

View File

@@ -24,7 +24,7 @@ fi
unsquashfs -f -d / $1 boot
umount -l /media/startos/next 2> /dev/null || true
umount -R /media/startos/next 2> /dev/null || true
umount /media/startos/upper 2> /dev/null || true
umount /media/startos/lower 2> /dev/null || true
@@ -45,13 +45,18 @@ mkdir -p /media/startos/next/media/startos/root
mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev
mount -t sysfs sysfs /media/startos/next/sys
mount -t proc proc /media/startos/next/proc
mount --rbind /boot /media/startos/next/boot
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /boot/efi 2>&1 > /dev/null; then
mkdir -p /media/startos/next/boot/efi
mount --bind /boot/efi /media/startos/next/boot/efi
fi
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi
chroot /media/startos/next bash -e << "EOF"
@@ -63,18 +68,9 @@ fi
EOF
# Sign unsigned kernel modules for Secure Boot
SIGN_FILE="$(ls -1 /media/startos/next/usr/lib/linux-kbuild-*/scripts/sign-file 2>/dev/null | head -1)"
/media/startos/next/usr/lib/startos/scripts/sign-unsigned-modules \
--source /media/startos/lower \
--dest /media/startos/config/overlay \
--sign-file "$SIGN_FILE" \
--mok-key /media/startos/config/overlay/var/lib/dkms/mok.key \
--mok-pub /media/startos/config/overlay/var/lib/dkms/mok.pub
sync
umount -l /media/startos/next
umount -Rl /media/startos/next
umount /media/startos/upper
umount /media/startos/lower

View File

@@ -1,367 +0,0 @@
#!/bin/bash
set -e
REPO="Start9Labs/start-os"
REGISTRY="https://alpha-registry-x.start9.com"
S3_BUCKET="s3://startos-images"
S3_CDN="https://startos-images.nyc3.cdn.digitaloceanspaces.com"
START9_GPG_KEY="2D63C217"
ARCHES="aarch64 aarch64-nonfree aarch64-nvidia riscv64 riscv64-nonfree x86_64 x86_64-nonfree x86_64-nvidia"
CLI_ARCHES="aarch64 riscv64 x86_64"
parse_run_id() {
local val="$1"
if [[ "$val" =~ /actions/runs/([0-9]+) ]]; then
echo "${BASH_REMATCH[1]}"
else
echo "$val"
fi
}
require_version() {
if [ -z "${VERSION:-}" ]; then
read -rp "VERSION: " VERSION
if [ -z "$VERSION" ]; then
>&2 echo '$VERSION required'
exit 2
fi
fi
}
release_dir() {
echo "$HOME/Downloads/v$VERSION"
}
ensure_release_dir() {
local dir
dir=$(release_dir)
if [ "$CLEAN" = "1" ]; then
rm -rf "$dir"
fi
mkdir -p "$dir"
cd "$dir"
}
enter_release_dir() {
local dir
dir=$(release_dir)
if [ ! -d "$dir" ]; then
>&2 echo "Release directory $dir does not exist. Run 'download' or 'pull' first."
exit 1
fi
cd "$dir"
}
cli_target_for() {
local arch=$1 os=$2
local pair="${arch}-${os}"
if [ "$pair" = "riscv64-linux" ]; then
echo "riscv64gc-unknown-linux-musl"
elif [ "$pair" = "riscv64-macos" ]; then
return 1
elif [ "$os" = "linux" ]; then
echo "${arch}-unknown-linux-musl"
elif [ "$os" = "macos" ]; then
echo "${arch}-apple-darwin"
fi
}
release_files() {
for file in *.iso *.squashfs *.deb; do
[ -f "$file" ] && echo "$file"
done
for file in start-cli_*; do
[[ "$file" == *.asc ]] && continue
[ -f "$file" ] && echo "$file"
done
}
resolve_gh_user() {
GH_USER=${GH_USER:-$(gh api user -q .login 2>/dev/null || true)}
GH_GPG_KEY=$(git config user.signingkey 2>/dev/null || true)
}
# --- Subcommands ---
cmd_download() {
require_version
if [ -z "${RUN_ID:-}" ]; then
read -rp "RUN_ID (OS images, leave blank to skip): " RUN_ID
fi
RUN_ID=$(parse_run_id "${RUN_ID:-}")
if [ -z "${ST_RUN_ID:-}" ]; then
read -rp "ST_RUN_ID (start-tunnel, leave blank to skip): " ST_RUN_ID
fi
ST_RUN_ID=$(parse_run_id "${ST_RUN_ID:-}")
if [ -z "${CLI_RUN_ID:-}" ]; then
read -rp "CLI_RUN_ID (start-cli, leave blank to skip): " CLI_RUN_ID
fi
CLI_RUN_ID=$(parse_run_id "${CLI_RUN_ID:-}")
ensure_release_dir
if [ -n "$RUN_ID" ]; then
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.squashfs" -D "$(pwd)"; do sleep 1; done
done
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.iso" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$ST_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
while ! gh run download -R $REPO "$ST_RUN_ID" -n "start-tunnel_$arch.deb" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$CLI_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
for os in linux macos; do
local target
target=$(cli_target_for "$arch" "$os") || continue
while ! gh run download -R $REPO "$CLI_RUN_ID" -n "start-cli_$target" -D "$(pwd)"; do sleep 1; done
mv start-cli "start-cli_${arch}-${os}"
done
done
fi
}
cmd_pull() {
require_version
ensure_release_dir
echo "Downloading release assets from tag v$VERSION..."
# Download debs and CLI binaries from the GH release
for file in $(gh release view -R $REPO "v$VERSION" --json assets -q '.assets[].name' | grep -E '\.(deb)$|^start-cli_'); do
gh release download -R $REPO "v$VERSION" -p "$file" -D "$(pwd)" --clobber
done
# Download ISOs and squashfs from S3 CDN
for arch in $ARCHES; do
for ext in squashfs iso; do
# Get the actual filename from the GH release asset list or body
local filename
filename=$(gh release view -R $REPO "v$VERSION" --json assets -q ".assets[].name" | grep "_${arch}\\.${ext}$" || true)
if [ -z "$filename" ]; then
filename=$(gh release view -R $REPO "v$VERSION" --json body -q .body | grep -oP "[^ ]*_${arch}\\.${ext}" | head -1 || true)
fi
if [ -n "$filename" ]; then
echo "Downloading $filename from S3..."
curl -fSL -o "$filename" "$S3_CDN/v$VERSION/$filename"
fi
done
done
}
cmd_register() {
require_version
enter_release_dir
start-cli --registry=$REGISTRY registry os version add "$VERSION" "v$VERSION" '' ">=0.3.5 <=$VERSION"
}
cmd_upload() {
require_version
enter_release_dir
for file in $(release_files); do
case "$file" in
*.iso|*.squashfs)
s3cmd put -P "$file" "$S3_BUCKET/v$VERSION/$file"
;;
*)
gh release upload -R $REPO "v$VERSION" "$file"
;;
esac
done
}
cmd_index() {
require_version
enter_release_dir
for arch in $ARCHES; do
for file in *_"$arch".squashfs *_"$arch".iso; do
start-cli --registry=$REGISTRY registry os asset add --platform="$arch" --version="$VERSION" "$file" "$S3_CDN/v$VERSION/$file"
done
done
}
cmd_sign() {
require_version
enter_release_dir
resolve_gh_user
mkdir -p signatures
for file in $(release_files); do
gpg -u $START9_GPG_KEY --detach-sign --armor -o "signatures/${file}.start9.asc" "$file"
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "signatures/${file}.${GH_USER}.asc" "$file"
fi
done
gpg --export -a $START9_GPG_KEY > signatures/start9.key.asc
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg --export -a "$GH_GPG_KEY" > "signatures/${GH_USER}.key.asc"
else
>&2 echo 'Warning: could not determine GitHub user or GPG signing key, skipping personal signature'
fi
tar -czvf signatures.tar.gz -C signatures .
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
}
cmd_cosign() {
require_version
enter_release_dir
resolve_gh_user
if [ -z "$GH_USER" ] || [ -z "$GH_GPG_KEY" ]; then
>&2 echo 'Error: could not determine GitHub user or GPG signing key'
>&2 echo "Set GH_USER and/or configure git user.signingkey"
exit 1
fi
echo "Downloading existing signatures..."
gh release download -R $REPO "v$VERSION" -p "signatures.tar.gz" -D "$(pwd)" --clobber
mkdir -p signatures
tar -xzf signatures.tar.gz -C signatures
echo "Adding personal signatures as $GH_USER..."
for file in $(release_files); do
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "signatures/${file}.${GH_USER}.asc" "$file"
done
gpg --export -a "$GH_GPG_KEY" > "signatures/${GH_USER}.key.asc"
echo "Re-packing signatures..."
tar -czvf signatures.tar.gz -C signatures .
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
echo "Done. Personal signatures for $GH_USER added to v$VERSION."
}
cmd_notes() {
require_version
enter_release_dir
cat << EOF
# ISO Downloads
- [x86_64/AMD64]($S3_CDN/v$VERSION/$(ls *_x86_64-nonfree.iso))
- [x86_64/AMD64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_x86_64-nvidia.iso))
- [x86_64/AMD64-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
- [aarch64/ARM64]($S3_CDN/v$VERSION/$(ls *_aarch64-nonfree.iso))
- [aarch64/ARM64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_aarch64-nvidia.iso))
- [aarch64/ARM64-slim (FOSS-Only)]($S3_CDN/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
- [RISCV64 (RVA23)]($S3_CDN/v$VERSION/$(ls *_riscv64-nonfree.iso))
- [RISCV64 (RVA23)-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_riscv64.iso) "Without proprietary software or drivers")
EOF
cat << 'EOF'
# StartOS Checksums
## SHA-256
```
EOF
sha256sum *.iso *.squashfs
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum *.iso *.squashfs
cat << 'EOF'
```
# Start-Tunnel Checksums
## SHA-256
```
EOF
sha256sum start-tunnel*.deb
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-tunnel*.deb
cat << 'EOF'
```
# start-cli Checksums
## SHA-256
```
EOF
release_files | grep '^start-cli_' | xargs sha256sum
cat << 'EOF'
```
## BLAKE-3
```
EOF
release_files | grep '^start-cli_' | xargs b3sum
cat << 'EOF'
```
EOF
}
cmd_full_release() {
cmd_download
cmd_register
cmd_upload
cmd_index
cmd_sign
cmd_notes
}
usage() {
cat << 'EOF'
Usage: manage-release.sh <subcommand>
Subcommands:
download Download artifacts from GitHub Actions runs
Requires: RUN_ID, ST_RUN_ID, CLI_RUN_ID (any combination)
pull Download an existing release from the GH tag and S3
register Register the version in the Start9 registry
upload Upload artifacts to GitHub Releases and S3
index Add assets to the registry index
sign Sign all artifacts with Start9 org key (+ personal key if available)
and upload signatures.tar.gz
cosign Add personal GPG signature to an existing release's signatures
(requires 'pull' first so you can verify assets before signing)
notes Print release notes with download links and checksums
full-release Run: download → register → upload → index → sign → notes
Environment variables:
VERSION (required) Release version
RUN_ID GitHub Actions run ID for OS images (download subcommand)
ST_RUN_ID GitHub Actions run ID for start-tunnel (download subcommand)
CLI_RUN_ID GitHub Actions run ID for start-cli (download subcommand)
GH_USER Override GitHub username (default: autodetected via gh cli)
CLEAN Set to 1 to wipe and recreate the release directory
EOF
}
case "${1:-}" in
download) cmd_download ;;
pull) cmd_pull ;;
register) cmd_register ;;
upload) cmd_upload ;;
index) cmd_index ;;
sign) cmd_sign ;;
cosign) cmd_cosign ;;
notes) cmd_notes ;;
full-release) cmd_full_release ;;
*) usage; exit 1 ;;
esac

142
build/upload-ota.sh Executable file
View File

@@ -0,0 +1,142 @@
#!/bin/bash
if [ -z "$VERSION" ]; then
>&2 echo '$VERSION required'
exit 2
fi
set -e
if [ "$SKIP_DL" != "1" ]; then
if [ "$SKIP_CLEAN" != "1" ]; then
rm -rf ~/Downloads/v$VERSION
mkdir ~/Downloads/v$VERSION
cd ~/Downloads/v$VERSION
fi
if [ -n "$RUN_ID" ]; then
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.squashfs -D $(pwd); do sleep 1; done
done
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.iso -D $(pwd); do sleep 1; done
done
fi
if [ -n "$ST_RUN_ID" ]; then
for arch in aarch64 riscv64 x86_64; do
while ! gh run download -R Start9Labs/start-os $ST_RUN_ID -n start-tunnel_$arch.deb -D $(pwd); do sleep 1; done
done
fi
if [ -n "$CLI_RUN_ID" ]; then
for arch in aarch64 riscv64 x86_64; do
for os in linux macos; do
pair=${arch}-${os}
if [ "${pair}" = "riscv64-linux" ]; then
target=riscv64gc-unknown-linux-musl
elif [ "${pair}" = "riscv64-macos" ]; then
continue
elif [ "${os}" = "linux" ]; then
target="${arch}-unknown-linux-musl"
elif [ "${os}" = "macos" ]; then
target="${arch}-apple-darwin"
fi
while ! gh run download -R Start9Labs/start-os $CLI_RUN_ID -n start-cli_$target -D $(pwd); do sleep 1; done
mv start-cli "start-cli_${pair}"
done
done
fi
else
cd ~/Downloads/v$VERSION
fi
start-cli --registry=https://alpha-registry-x.start9.com registry os version add $VERSION "v$VERSION" '' ">=0.3.5 <=$VERSION"
if [ "$SKIP_UL" = "2" ]; then
exit 2
elif [ "$SKIP_UL" != "1" ]; then
for file in *.deb start-cli_*; do
gh release upload -R Start9Labs/start-os v$VERSION $file
done
for file in *.iso *.squashfs; do
s3cmd put -P $file s3://startos-images/v$VERSION/$file
done
fi
if [ "$SKIP_INDEX" != "1" ]; then
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
for file in *_$arch.squashfs *_$arch.iso; do
start-cli --registry=https://alpha-registry-x.start9.com registry os asset add --platform=$arch --version=$VERSION $file https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$file
done
done
fi
for file in *.iso *.squashfs *.deb start-cli_*; do
gpg -u 7CFFDA41CA66056A --detach-sign --armor -o "${file}.asc" "$file"
done
gpg --export -a 7CFFDA41CA66056A > dr-bonez.key.asc
tar -czvf signatures.tar.gz *.asc
gh release upload -R Start9Labs/start-os v$VERSION signatures.tar.gz
cat << EOF
# ISO Downloads
- [x86_64/AMD64](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_x86_64-nonfree.iso))
- [x86_64/AMD64-slim (FOSS-only)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
- [aarch64/ARM64](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_aarch64-nonfree.iso))
- [aarch64/ARM64-slim (FOSS-Only)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
- [RISCV64 (RVA23)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_riscv64.iso))
EOF
cat << 'EOF'
# StartOS Checksums
## SHA-256
```
EOF
sha256sum *.iso *.squashfs
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum *.iso *.squashfs
cat << 'EOF'
```
# Start-Tunnel Checksums
## SHA-256
```
EOF
sha256sum start-tunnel*.deb
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-tunnel*.deb
cat << 'EOF'
```
# start-cli Checksums
## SHA-256
```
EOF
sha256sum start-cli_*
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-cli_*
cat << 'EOF'
```
EOF

View File

@@ -1,30 +0,0 @@
// Mock for ESM-only mime package — Jest's module loader doesn't support require(esm)
const types = {
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".svg": "image/svg+xml",
".webp": "image/webp",
".ico": "image/x-icon",
".json": "application/json",
".js": "application/javascript",
".html": "text/html",
".css": "text/css",
".txt": "text/plain",
".md": "text/markdown",
}
module.exports = {
default: {
getType(path) {
const ext = "." + path.split(".").pop()
return types[ext] || null
},
getExtension(type) {
const entry = Object.entries(types).find(([, v]) => v === type)
return entry ? entry[0].slice(1) : null
},
},
__esModule: true,
}

View File

@@ -5,7 +5,7 @@ OnFailure=container-runtime-failure.service
[Service]
Type=simple
Environment=RUST_LOG=startos=debug
ExecStart=/usr/bin/node --experimental-detect-module --trace-warnings /usr/lib/startos/init/index.js
ExecStart=/usr/bin/node --experimental-detect-module --trace-warnings --unhandled-rejections=warn /usr/lib/startos/init/index.js
Restart=no
[Install]

View File

@@ -5,7 +5,4 @@ module.exports = {
testEnvironment: "node",
rootDir: "./src/",
modulePathIgnorePatterns: ["./dist/"],
moduleNameMapper: {
"^mime$": "<rootDir>/../__mocks__/mime.js",
},
}

View File

@@ -37,7 +37,7 @@
},
"../sdk/dist": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.61",
"version": "0.4.0-beta.51",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",
@@ -49,8 +49,7 @@
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"yaml": "^2.7.1",
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
"zod": "^4.3.6"
},
"devDependencies": {
"@types/jest": "^29.4.0",

View File

@@ -187,10 +187,9 @@ export function makeEffects(context: EffectContext): Effects {
getServiceManifest(
...[options]: Parameters<T.Effects["getServiceManifest"]>
) {
return rpcRound("get-service-manifest", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getServiceManifest"]>
return rpcRound("get-service-manifest", options) as ReturnType<
T.Effects["getServiceManifest"]
>
},
subcontainer: {
createFs(options: { imageId: string; name: string }) {
@@ -212,10 +211,9 @@ export function makeEffects(context: EffectContext): Effects {
>
}) as Effects["exportServiceInterface"],
getContainerIp(...[options]: Parameters<T.Effects["getContainerIp"]>) {
return rpcRound("get-container-ip", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getContainerIp"]>
return rpcRound("get-container-ip", options) as ReturnType<
T.Effects["getContainerIp"]
>
},
getOsIp(...[]: Parameters<T.Effects["getOsIp"]>) {
return rpcRound("get-os-ip", {}) as ReturnType<T.Effects["getOsIp"]>
@@ -246,10 +244,9 @@ export function makeEffects(context: EffectContext): Effects {
>
},
getSslCertificate(options: Parameters<T.Effects["getSslCertificate"]>[0]) {
return rpcRound("get-ssl-certificate", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getSslCertificate"]>
return rpcRound("get-ssl-certificate", options) as ReturnType<
T.Effects["getSslCertificate"]
>
},
getSslKey(options: Parameters<T.Effects["getSslKey"]>[0]) {
return rpcRound("get-ssl-key", options) as ReturnType<
@@ -311,10 +308,7 @@ export function makeEffects(context: EffectContext): Effects {
},
getStatus(...[o]: Parameters<T.Effects["getStatus"]>) {
return rpcRound("get-status", {
...o,
callback: context.callbacks?.addCallback(o.callback) || null,
}) as ReturnType<T.Effects["getStatus"]>
return rpcRound("get-status", o) as ReturnType<T.Effects["getStatus"]>
},
/// DEPRECATED
setMainStatus(o: { status: "running" | "stopped" }): Promise<null> {

View File

@@ -298,10 +298,13 @@ export class RpcListener {
}
case "stop": {
const { id } = stopType.parse(input)
this.callbacks?.removeChild("main")
return handleRpc(
id,
this.system.stop().then((result) => ({ result })),
this.system.stop().then((result) => {
this.callbacks?.removeChild("main")
return { result }
}),
)
}
case "exit": {

View File

@@ -42,74 +42,6 @@ function todo(): never {
throw new Error("Not implemented")
}
function getStatus(
effects: Effects,
options: Omit<Parameters<Effects["getStatus"]>[0], "callback"> = {},
) {
async function* watch(abort?: AbortSignal) {
const resolveCell = { resolve: () => {} }
effects.onLeaveContext(() => {
resolveCell.resolve()
})
abort?.addEventListener("abort", () => resolveCell.resolve())
while (effects.isInContext && !abort?.aborted) {
let callback: () => void = () => {}
const waitForNext = new Promise<void>((resolve) => {
callback = resolve
resolveCell.resolve = resolve
})
yield await effects.getStatus({ ...options, callback })
await waitForNext
}
}
return {
const: () =>
effects.getStatus({
...options,
callback:
effects.constRetry &&
(() => effects.constRetry && effects.constRetry()),
}),
once: () => effects.getStatus(options),
watch: (abort?: AbortSignal) => {
const ctrl = new AbortController()
abort?.addEventListener("abort", () => ctrl.abort())
return watch(ctrl.signal)
},
onChange: (
callback: (
value: T.StatusInfo | null,
error?: Error,
) => { cancel: boolean } | Promise<{ cancel: boolean }>,
) => {
;(async () => {
const ctrl = new AbortController()
for await (const value of watch(ctrl.signal)) {
try {
const res = await callback(value)
if (res.cancel) {
ctrl.abort()
break
}
} catch (e) {
console.error(
"callback function threw an error @ getStatus.onChange",
e,
)
}
}
})()
.catch((e) => callback(null, e as Error))
.catch((e) =>
console.error(
"callback function threw an error @ getStatus.onChange",
e,
),
)
},
}
}
/**
* Local type for procedure values from the manifest.
* The manifest's zod schemas use ZodTypeAny casts that produce `unknown` in zod v4.
@@ -1114,26 +1046,16 @@ export class SystemForEmbassy implements System {
timeoutMs: number | null,
): Promise<void> {
// TODO: docker
const status = await getStatus(effects, { packageId: id }).const()
if (!status) return
try {
await effects.mount({
location: `/media/embassy/${id}`,
target: {
packageId: id,
volumeId: "embassy",
subpath: null,
readonly: true,
idmap: [],
},
})
} catch (e) {
console.error(
`Failed to mount dependency volume for ${id}, skipping autoconfig:`,
e,
)
return
}
await effects.mount({
location: `/media/embassy/${id}`,
target: {
packageId: id,
volumeId: "embassy",
subpath: null,
readonly: true,
idmap: [],
},
})
configFile
.withPath(`/media/embassy/${id}/config.json`)
.read()
@@ -1282,11 +1204,6 @@ async function updateConfig(
if (specValue.target === "config") {
const jp = require("jsonpath")
const depId = specValue["package-id"]
const depStatus = await getStatus(effects, { packageId: depId }).const()
if (!depStatus) {
mutConfigValue[key] = null
continue
}
await effects.mount({
location: `/media/embassy/${depId}`,
target: {

View File

@@ -10,11 +10,6 @@ const matchJsProcedure = z.object({
const matchProcedure = z.union([matchDockerProcedure, matchJsProcedure])
export type Procedure = z.infer<typeof matchProcedure>
const healthCheckFields = {
name: z.string(),
"success-message": z.string().nullable().optional(),
}
const matchAction = z.object({
name: z.string(),
description: z.string(),
@@ -37,10 +32,13 @@ export const matchManifest = z.object({
.optional(),
"health-checks": z.record(
z.string(),
z.union([
matchDockerProcedure.extend(healthCheckFields),
matchJsProcedure.extend(healthCheckFields),
]),
z.intersection(
matchProcedure,
z.object({
name: z.string(),
"success-message": z.string().nullable().optional(),
}),
),
),
config: z
.object({

View File

@@ -71,7 +71,7 @@ export class SystemForStartOs implements System {
this.starting = true
effects.constRetry = utils.once(() => {
console.debug(".const() triggered")
if (effects.isInContext) effects.restart()
effects.restart()
})
let mainOnTerm: () => Promise<void> | undefined
const daemons = await (

View File

@@ -1,4 +1,5 @@
import { RpcListener } from "./Adapters/RpcListener"
import { SystemForEmbassy } from "./Adapters/Systems/SystemForEmbassy"
import { AllGetDependencies } from "./Interfaces/AllGetDependencies"
import { getSystem } from "./Adapters/Systems"
@@ -6,18 +7,6 @@ const getDependencies: AllGetDependencies = {
system: getSystem,
}
process.on("unhandledRejection", (reason) => {
if (
reason instanceof Error &&
"muteUnhandled" in reason &&
reason.muteUnhandled
) {
// mute
} else {
console.error("Unhandled promise rejection", reason)
}
})
for (let s of ["SIGTERM", "SIGINT", "SIGHUP"]) {
process.on(s, (s) => {
console.log(`Caught ${s}`)

View File

@@ -22,7 +22,6 @@ cd sdk && make baseDist dist # Rebuild SDK after ts-bindings
- Always run `cargo check -p start-os` after modifying Rust code
- When adding RPC endpoints, follow the patterns in [rpc-toolkit.md](rpc-toolkit.md)
- When modifying `#[ts(export)]` types, regenerate bindings and rebuild the SDK (see [ARCHITECTURE.md](../ARCHITECTURE.md#build-pipeline))
- **i18n is mandatory** — any user-facing string must go in `core/locales/i18n.yaml` with all 5 locales (`en_US`, `de_DE`, `es_ES`, `fr_FR`, `pl_PL`). This includes CLI subcommand descriptions (`about.<name>`), CLI arg help (`help.arg.<name>`), error messages (`error.<name>`), notifications, setup messages, and any other text shown to users. Entries are alphabetically ordered within their section. See [i18n-patterns.md](i18n-patterns.md)
- When adding i18n keys, add all 5 locales in `core/locales/i18n.yaml` (see [i18n-patterns.md](i18n-patterns.md))
- When using DB watches, follow the `TypedDbWatch<T>` patterns in [patchdb.md](patchdb.md)
- **Always use `.invoke(ErrorKind::...)` instead of `.status()` when running CLI commands** via `tokio::process::Command`. The `Invoke` trait (from `crate::util::Invoke`) captures stdout/stderr and checks exit codes properly. Using `.status()` leaks stderr directly to system logs, creating noise. For check-then-act patterns (e.g. `iptables -C`), use `.invoke(...).await.is_ok()` / `.is_err()` instead of `.status().await.map_or(false, |s| s.success())`.
- Always use file utils in util::io instead of tokio::fs when available

39
core/Cargo.lock generated
View File

@@ -3376,15 +3376,6 @@ dependencies = [
"serde_json",
]
[[package]]
name = "keccak"
version = "0.1.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "cb26cec98cce3a3d96cbb7bced3c4b16e3d13f27ec56dbd62cbc8f39cfb9d653"
dependencies = [
"cpufeatures",
]
[[package]]
name = "kv"
version = "0.24.0"
@@ -4364,7 +4355,7 @@ dependencies = [
"nix 0.30.1",
"patch-db-macro",
"serde",
"serde_cbor_2",
"serde_cbor 0.11.1",
"thiserror 2.0.18",
"tokio",
"tracing",
@@ -5386,7 +5377,7 @@ dependencies = [
"pin-project",
"reqwest",
"serde",
"serde_cbor",
"serde_cbor 0.11.2",
"serde_json",
"thiserror 2.0.18",
"tokio",
@@ -5794,20 +5785,19 @@ dependencies = [
[[package]]
name = "serde_cbor"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
version = "0.11.1"
dependencies = [
"half 1.8.3",
"serde",
]
[[package]]
name = "serde_cbor_2"
version = "0.13.0"
source = "git+https://github.com/dr-bonez/cbor.git#2ce7fe5a5ca5700aa095668b5ba67154b7f213a4"
name = "serde_cbor"
version = "0.11.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2bef2ebfde456fb76bbcf9f59315333decc4fda0b2b44b420243c11e0f5ec1f5"
dependencies = [
"half 2.7.1",
"half 1.8.3",
"serde",
]
@@ -5994,16 +5984,6 @@ dependencies = [
"digest 0.10.7",
]
[[package]]
name = "sha3"
version = "0.10.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75872d278a8f37ef87fa0ddbda7802605cb18344497949862c0d4dcb291eba60"
dependencies = [
"digest 0.10.7",
"keccak",
]
[[package]]
name = "sharded-slab"
version = "0.1.7"
@@ -6434,7 +6414,7 @@ dependencies = [
[[package]]
name = "start-os"
version = "0.4.0-alpha.21"
version = "0.4.0-alpha.20"
dependencies = [
"aes",
"async-acme",
@@ -6538,7 +6518,6 @@ dependencies = [
"serde_yml",
"sha-crypt",
"sha2 0.10.9",
"sha3",
"signal-hook",
"socket2 0.6.2",
"socks5-impl",

View File

@@ -15,7 +15,7 @@ license = "MIT"
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.21" # VERSION_BUMP
version = "0.4.0-alpha.20" # VERSION_BUMP
[lib]
name = "startos"
@@ -170,7 +170,9 @@ once_cell = "1.19.0"
openssh-keys = "0.6.2"
openssl = { version = "0.10.57", features = ["vendored"] }
p256 = { version = "0.13.2", features = ["pem"] }
patch-db = { version = "*", path = "../patch-db/core", features = ["trace"] }
patch-db = { version = "*", path = "../patch-db/patch-db", features = [
"trace",
] }
pbkdf2 = "0.12.2"
pin-project = "1.1.3"
pkcs8 = { version = "0.10.2", features = ["std"] }
@@ -200,7 +202,6 @@ serde_toml = { package = "toml", version = "0.9.9+spec-1.0.0" }
serde_yaml = { package = "serde_yml", version = "0.0.12" }
sha-crypt = "0.5.0"
sha2 = "0.10.2"
sha3 = "0.10"
signal-hook = "0.3.17"
socket2 = { version = "0.6.0", features = ["all"] }
socks5-impl = { version = "0.7.2", features = ["client", "server"] }

View File

@@ -67,10 +67,6 @@ if [[ "${ENVIRONMENT:-}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT:-}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-cli --target=$TARGET

View File

@@ -38,10 +38,6 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -38,10 +38,6 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-container --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -38,10 +38,6 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin startbox --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -38,10 +38,6 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin tunnelbox --target=$RUST_ARCH-unknown-linux-musl

View File

@@ -197,13 +197,6 @@ setup.transferring-data:
fr_FR: "Transfert de données"
pl_PL: "Przesyłanie danych"
setup.password-required:
en_US: "Password is required for fresh setup"
de_DE: "Passwort ist für die Ersteinrichtung erforderlich"
es_ES: "Se requiere contraseña para la configuración inicial"
fr_FR: "Le mot de passe est requis pour la première configuration"
pl_PL: "Hasło jest wymagane do nowej konfiguracji"
# system.rs
system.governor-not-available:
en_US: "Governor %{governor} not available"
@@ -857,13 +850,6 @@ error.set-sys-info:
fr_FR: "Erreur de Définition des Infos Système"
pl_PL: "Błąd Ustawiania Informacji o Systemie"
error.bios:
en_US: "BIOS/UEFI Error"
de_DE: "BIOS/UEFI-Fehler"
es_ES: "Error de BIOS/UEFI"
fr_FR: "Erreur BIOS/UEFI"
pl_PL: "Błąd BIOS/UEFI"
# disk/main.rs
disk.main.disk-not-found:
en_US: "StartOS disk not found."
@@ -1255,13 +1241,6 @@ backup.bulk.leaked-reference:
fr_FR: "référence fuitée vers BackupMountGuard"
pl_PL: "wyciekła referencja do BackupMountGuard"
backup.bulk.service-not-ready:
en_US: "Cannot create a backup of a service that is still initializing or in an error state"
de_DE: "Es kann keine Sicherung eines Dienstes erstellt werden, der noch initialisiert wird oder sich im Fehlerzustand befindet"
es_ES: "No se puede crear una copia de seguridad de un servicio que aún se está inicializando o está en estado de error"
fr_FR: "Impossible de créer une sauvegarde d'un service encore en cours d'initialisation ou en état d'erreur"
pl_PL: "Nie można utworzyć kopii zapasowej usługi, która jest jeszcze inicjalizowana lub znajduje się w stanie błędu"
# backup/restore.rs
backup.restore.package-error:
en_US: "Error restoring package %{id}: %{error}"
@@ -1386,21 +1365,6 @@ net.tor.client-error:
fr_FR: "Erreur du client Tor : %{error}"
pl_PL: "Błąd klienta Tor: %{error}"
# net/tunnel.rs
net.tunnel.timeout-waiting-for-add:
en_US: "timed out waiting for gateway %{gateway} to appear in database"
de_DE: "Zeitüberschreitung beim Warten auf das Erscheinen von Gateway %{gateway} in der Datenbank"
es_ES: "se agotó el tiempo esperando que la puerta de enlace %{gateway} aparezca en la base de datos"
fr_FR: "délai d'attente dépassé pour l'apparition de la passerelle %{gateway} dans la base de données"
pl_PL: "upłynął limit czasu oczekiwania na pojawienie się bramy %{gateway} w bazie danych"
net.tunnel.timeout-waiting-for-remove:
en_US: "timed out waiting for gateway %{gateway} to be removed from database"
de_DE: "Zeitüberschreitung beim Warten auf das Entfernen von Gateway %{gateway} aus der Datenbank"
es_ES: "se agotó el tiempo esperando que la puerta de enlace %{gateway} sea eliminada de la base de datos"
fr_FR: "délai d'attente dépassé pour la suppression de la passerelle %{gateway} de la base de données"
pl_PL: "upłynął limit czasu oczekiwania na usunięcie bramy %{gateway} z bazy danych"
# net/wifi.rs
net.wifi.ssid-no-special-characters:
en_US: "SSID may not have special characters"
@@ -1614,13 +1578,6 @@ net.gateway.cannot-delete-without-connection:
fr_FR: "Impossible de supprimer l'appareil sans connexion active"
pl_PL: "Nie można usunąć urządzenia bez aktywnego połączenia"
net.gateway.no-configured-echoip-urls:
en_US: "No configured echoip URLs"
de_DE: "Keine konfigurierten EchoIP-URLs"
es_ES: "No hay URLs de echoip configuradas"
fr_FR: "Aucune URL echoip configurée"
pl_PL: "Brak skonfigurowanych adresów URL echoip"
# net/dns.rs
net.dns.timeout-updating-catalog:
en_US: "timed out waiting to update dns catalog"
@@ -2782,13 +2739,6 @@ help.arg.download-directory:
fr_FR: "Chemin du répertoire de téléchargement"
pl_PL: "Ścieżka katalogu do pobrania"
help.arg.echoip-urls:
en_US: "Echo IP service URLs for external IP detection"
de_DE: "Echo-IP-Dienst-URLs zur externen IP-Erkennung"
es_ES: "URLs del servicio Echo IP para detección de IP externa"
fr_FR: "URLs du service Echo IP pour la détection d'IP externe"
pl_PL: "Adresy URL usługi Echo IP do wykrywania zewnętrznego IP"
help.arg.emulate-missing-arch:
en_US: "Emulate missing architecture using this one"
de_DE: "Fehlende Architektur mit dieser emulieren"
@@ -2957,13 +2907,6 @@ help.arg.log-limit:
fr_FR: "Nombre maximum d'entrées de journal"
pl_PL: "Maksymalna liczba wpisów logu"
help.arg.merge:
en_US: "Merge with existing version range instead of replacing"
de_DE: "Mit vorhandenem Versionsbereich zusammenführen statt ersetzen"
es_ES: "Combinar con el rango de versiones existente en lugar de reemplazar"
fr_FR: "Fusionner avec la plage de versions existante au lieu de remplacer"
pl_PL: "Połącz z istniejącym zakresem wersji zamiast zastępować"
help.arg.mirror-url:
en_US: "URL of the mirror"
de_DE: "URL des Spiegels"
@@ -3734,13 +3677,6 @@ help.arg.s9pk-file-path:
fr_FR: "Chemin vers le fichier de paquet s9pk"
pl_PL: "Ścieżka do pliku pakietu s9pk"
help.arg.s9pk-file-paths:
en_US: "Paths to s9pk package files"
de_DE: "Pfade zu s9pk-Paketdateien"
es_ES: "Rutas a los archivos de paquete s9pk"
fr_FR: "Chemins vers les fichiers de paquet s9pk"
pl_PL: "Ścieżki do plików pakietów s9pk"
help.arg.session-ids:
en_US: "Session identifiers"
de_DE: "Sitzungskennungen"
@@ -5037,13 +4973,6 @@ about.publish-s9pk:
fr_FR: "Publier s9pk dans le bucket S3 et indexer dans le registre"
pl_PL: "Opublikuj s9pk do bucketu S3 i zindeksuj w rejestrze"
about.select-s9pk-for-device:
en_US: "Select the best compatible s9pk for a target device"
de_DE: "Das beste kompatible s9pk für ein Zielgerät auswählen"
es_ES: "Seleccionar el s9pk más compatible para un dispositivo destino"
fr_FR: "Sélectionner le meilleur s9pk compatible pour un appareil cible"
pl_PL: "Wybierz najlepiej kompatybilny s9pk dla urządzenia docelowego"
about.rebuild-service-container:
en_US: "Rebuild service container"
de_DE: "Dienst-Container neu erstellen"
@@ -5254,12 +5183,12 @@ about.reset-user-interface-password:
fr_FR: "Réinitialiser le mot de passe de l'interface utilisateur"
pl_PL: "Zresetuj hasło interfejsu użytkownika"
about.uninitialize-webserver:
en_US: "Uninitialize the webserver"
de_DE: "Den Webserver deinitialisieren"
es_ES: "Desinicializar el servidor web"
fr_FR: "Désinitialiser le serveur web"
pl_PL: "Zdezinicjalizuj serwer internetowy"
about.reset-webserver:
en_US: "Reset the webserver"
de_DE: "Den Webserver zurücksetzen"
es_ES: "Restablecer el servidor web"
fr_FR: "initialiser le serveur web"
pl_PL: "Zresetuj serwer internetowy"
about.restart-server:
en_US: "Restart the server"
@@ -5296,13 +5225,6 @@ about.set-country:
fr_FR: "Définir le pays"
pl_PL: "Ustaw kraj"
about.set-echoip-urls:
en_US: "Set the Echo IP service URLs"
de_DE: "Die Echo-IP-Dienst-URLs festlegen"
es_ES: "Establecer las URLs del servicio Echo IP"
fr_FR: "Définir les URLs du service Echo IP"
pl_PL: "Ustaw adresy URL usługi Echo IP"
about.set-hostname:
en_US: "Set the server hostname"
de_DE: "Den Server-Hostnamen festlegen"

View File

@@ -21,14 +21,6 @@ pub async fn my_handler(ctx: RpcContext, params: MyParams) -> Result<MyResponse,
from_fn_async(my_handler)
```
If a handler takes no params, simply omit the params argument entirely (no need for `_: Empty`):
```rust
pub async fn no_params_handler(ctx: RpcContext) -> Result<MyResponse, Error> {
// ...
}
```
### `from_fn_async_local` - Non-thread-safe async handlers
For async functions that are not `Send` (cannot be safely moved between threads). Use when working with non-thread-safe types.
@@ -189,9 +181,9 @@ pub struct MyParams {
### Adding a New RPC Endpoint
1. Define params struct with `Deserialize, Serialize, Parser, TS` (skip if no params needed)
1. Define params struct with `Deserialize, Serialize, Parser, TS`
2. Choose handler type based on sync/async and thread-safety
3. Write handler function taking `(Context, Params) -> Result<Response, Error>` (omit Params if none needed)
3. Write handler function taking `(Context, Params) -> Result<Response, Error>`
4. Add to parent handler with appropriate extensions (display modifiers before `with_about`)
5. TypeScript types auto-generated via `make ts-bindings`

View File

@@ -300,15 +300,6 @@ async fn perform_backup(
error: backup_result,
},
);
} else {
backup_report.insert(
id.clone(),
PackageBackupReport {
error: Some(
t!("backup.bulk.service-not-ready").to_string(),
),
},
);
}
}
@@ -332,7 +323,9 @@ async fn perform_backup(
os_backup_file.save().await?;
let luks_folder_old = backup_guard.path().join("luks.old");
crate::util::io::delete_dir(&luks_folder_old).await?;
if tokio::fs::metadata(&luks_folder_old).await.is_ok() {
tokio::fs::remove_dir_all(&luks_folder_old).await?;
}
let luks_folder_bak = backup_guard.path().join("luks");
if tokio::fs::metadata(&luks_folder_bak).await.is_ok() {
tokio::fs::rename(&luks_folder_bak, &luks_folder_old).await?;

View File

@@ -10,7 +10,6 @@ use tracing::instrument;
use ts_rs::TS;
use super::target::BackupTargetId;
use crate::PackageId;
use crate::backup::os::OsBackup;
use crate::context::setup::SetupResult;
use crate::context::{RpcContext, SetupContext};
@@ -27,6 +26,7 @@ use crate::service::service_map::DownloadInstallFuture;
use crate::setup::SetupExecuteProgress;
use crate::system::{save_language, sync_kiosk};
use crate::util::serde::{IoFormat, Pem};
use crate::{PLATFORM, PackageId};
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
@@ -86,11 +86,11 @@ pub async fn restore_packages_rpc(
pub async fn recover_full_server(
ctx: &SetupContext,
disk_guid: InternedString,
password: Option<String>,
password: String,
recovery_source: TmpMountGuard,
server_id: &str,
recovery_password: &str,
kiosk: bool,
kiosk: Option<bool>,
hostname: Option<ServerHostnameInfo>,
SetupExecuteProgress {
init_phases,
@@ -110,19 +110,18 @@ pub async fn recover_full_server(
.with_ctx(|_| (ErrorKind::Filesystem, os_backup_path.display().to_string()))?,
)?;
if let Some(password) = password {
os_backup.account.password = argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::rfc9106_low_mem(),
)
.with_kind(ErrorKind::PasswordHashGeneration)?;
}
os_backup.account.password = argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::rfc9106_low_mem(),
)
.with_kind(ErrorKind::PasswordHashGeneration)?;
if let Some(h) = hostname {
os_backup.account.hostname = h;
}
let kiosk = Some(kiosk.unwrap_or(true)).filter(|_| &*PLATFORM != "raspberrypi");
sync_kiosk(kiosk).await?;
let language = ctx.language.peek(|a| a.clone());

View File

@@ -7,6 +7,10 @@ use crate::service::cli::{ContainerCliContext, ContainerClientConfig};
use crate::util::logger::LOGGER;
use crate::version::{Current, VersionT};
lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::default().semver().to_string();
}
pub fn main(args: impl IntoIterator<Item = OsString>) {
LOGGER.enable();
if let Err(e) = CliApp::new(
@@ -14,10 +18,6 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
crate::service::effects::handler(),
)
.mutate_command(super::translate_cli)
.mutate_command(|cmd| {
cmd.name("start-container")
.version(Current::default().semver().to_string())
})
.run(args)
{
match e.data {

View File

@@ -149,11 +149,6 @@ impl MultiExecutable {
}
pub fn execute(&self) {
#[cfg(feature = "backtrace-on-stack-overflow")]
unsafe {
backtrace_on_stack_overflow::enable()
};
set_locale_from_env();
let mut popped = Vec::with_capacity(2);

View File

@@ -8,7 +8,6 @@ use tokio::signal::unix::signal;
use tracing::instrument;
use crate::context::CliContext;
use crate::version::{Current, VersionT};
use crate::context::config::ClientConfig;
use crate::net::web_server::{Acceptor, WebServer};
use crate::prelude::*;
@@ -102,10 +101,6 @@ pub fn cli(args: impl IntoIterator<Item = OsString>) {
crate::registry::registry_api(),
)
.mutate_command(super::translate_cli)
.mutate_command(|cmd| {
cmd.name("start-registry")
.version(Current::default().semver().to_string())
})
.run(args)
{
match e.data {

View File

@@ -8,6 +8,10 @@ use crate::context::config::ClientConfig;
use crate::util::logger::LOGGER;
use crate::version::{Current, VersionT};
lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::default().semver().to_string();
}
pub fn main(args: impl IntoIterator<Item = OsString>) {
LOGGER.enable();
@@ -16,10 +20,6 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
crate::main_api(),
)
.mutate_command(super::translate_cli)
.mutate_command(|cmd| {
cmd.name("start-cli")
.version(Current::default().semver().to_string())
})
.run(args)
{
match e.data {

View File

@@ -190,7 +190,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
}
}
});
rt.shutdown_timeout(Duration::from_millis(100));
rt.shutdown_timeout(Duration::from_secs(60));
res
};

View File

@@ -13,7 +13,6 @@ use tracing::instrument;
use visit_rs::Visit;
use crate::context::CliContext;
use crate::version::{Current, VersionT};
use crate::context::config::ClientConfig;
use crate::net::tls::TlsListener;
use crate::net::web_server::{Accept, Acceptor, MetadataVisitor, WebServer};
@@ -187,10 +186,6 @@ pub fn cli(args: impl IntoIterator<Item = OsString>) {
crate::tunnel::api::tunnel_api(),
)
.mutate_command(super::translate_cli)
.mutate_command(|cmd| {
cmd.name("start-tunnel")
.version(Current::default().semver().to_string())
})
.run(args)
{
match e.data {

View File

@@ -9,6 +9,7 @@ use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use crate::MAIN_DATA;
use crate::disk::OsPartitionInfo;
use crate::prelude::*;
use crate::util::serde::IoFormat;
use crate::version::VersionT;
@@ -119,6 +120,8 @@ impl ClientConfig {
pub struct ServerConfig {
#[arg(short, long, help = "help.arg.config-file-path")]
pub config: Option<PathBuf>,
#[arg(skip)]
pub os_partitions: Option<OsPartitionInfo>,
#[arg(long, help = "help.arg.socks-listen-address")]
pub socks_listen: Option<SocketAddr>,
#[arg(long, help = "help.arg.revision-cache-size")]
@@ -135,6 +138,7 @@ impl ContextConfig for ServerConfig {
self.config.take()
}
fn merge_with(&mut self, other: Self) {
self.os_partitions = self.os_partitions.take().or(other.os_partitions);
self.socks_listen = self.socks_listen.take().or(other.socks_listen);
self.revision_cache_size = self
.revision_cache_size

View File

@@ -39,7 +39,7 @@ impl DiagnosticContext {
shutdown,
disk_guid,
error: Arc::new(error.into()),
rpc_continuations: RpcContinuations::new(None),
rpc_continuations: RpcContinuations::new(),
})))
}
}

View File

@@ -32,7 +32,7 @@ impl InitContext {
error: watch::channel(None).0,
progress,
shutdown,
rpc_continuations: RpcContinuations::new(None),
rpc_continuations: RpcContinuations::new(),
})))
}
}

View File

@@ -10,6 +10,7 @@ use std::time::Duration;
use chrono::{TimeDelta, Utc};
use imbl::OrdMap;
use imbl_value::InternedString;
use itertools::Itertools;
use josekit::jwk::Jwk;
use reqwest::{Client, Proxy};
use rpc_toolkit::yajrc::RpcError;
@@ -24,6 +25,7 @@ use crate::account::AccountInfo;
use crate::auth::Sessions;
use crate::context::config::ServerConfig;
use crate::db::model::Database;
use crate::db::model::package::TaskSeverity;
use crate::disk::OsPartitionInfo;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
@@ -42,6 +44,7 @@ use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle};
use crate::rpc_continuations::{Guid, OpenAuthedContinuations, RpcContinuations};
use crate::service::ServiceMap;
use crate::service::action::update_tasks;
use crate::service::effects::callbacks::ServiceCallbacks;
use crate::service::effects::subcontainer::NVIDIA_OVERLAY_PATH;
use crate::shutdown::Shutdown;
@@ -50,7 +53,7 @@ use crate::util::future::NonDetachingJoinHandle;
use crate::util::io::{TmpDir, delete_file};
use crate::util::lshw::LshwDevice;
use crate::util::sync::{SyncMutex, SyncRwLock, Watch};
use crate::{DATA_DIR, PLATFORM, PackageId};
use crate::{ActionId, DATA_DIR, PLATFORM, PackageId};
pub struct RpcContextSeed {
is_closed: AtomicBool,
@@ -62,8 +65,8 @@ pub struct RpcContextSeed {
pub db: TypedPatchDb<Database>,
pub sync_db: watch::Sender<u64>,
pub account: SyncRwLock<AccountInfo>,
pub os_net_service: NetService,
pub net_controller: Arc<NetController>,
pub os_net_service: NetService,
pub s9pk_arch: Option<&'static str>,
pub services: ServiceMap,
pub cancellable_installs: SyncMutex<BTreeMap<PackageId, oneshot::Sender<()>>>,
@@ -111,6 +114,7 @@ pub struct CleanupInitPhases {
cleanup_sessions: PhaseProgressTrackerHandle,
init_services: PhaseProgressTrackerHandle,
prune_s9pks: PhaseProgressTrackerHandle,
check_tasks: PhaseProgressTrackerHandle,
}
impl CleanupInitPhases {
pub fn new(handle: &FullProgressTracker) -> Self {
@@ -118,6 +122,7 @@ impl CleanupInitPhases {
cleanup_sessions: handle.add_phase("Cleaning up sessions".into(), Some(1)),
init_services: handle.add_phase("Initializing services".into(), Some(10)),
prune_s9pks: handle.add_phase("Pruning S9PKs".into(), Some(1)),
check_tasks: handle.add_phase("Checking action requests".into(), Some(1)),
}
}
}
@@ -168,7 +173,7 @@ impl RpcContext {
init_net_ctrl.complete();
tracing::info!("{}", t!("context.rpc.initialized-net-controller"));
if PLATFORM.ends_with("-nvidia") {
if PLATFORM.ends_with("-nonfree") {
if let Err(e) = Command::new("nvidia-smi")
.invoke(ErrorKind::ParseSysInfo)
.await
@@ -327,7 +332,12 @@ impl RpcContext {
let seed = Arc::new(RpcContextSeed {
is_closed: AtomicBool::new(false),
os_partitions: OsPartitionInfo::from_fstab().await?,
os_partitions: config.os_partitions.clone().ok_or_else(|| {
Error::new(
eyre!("{}", t!("context.rpc.os-partition-info-missing")),
ErrorKind::Filesystem,
)
})?,
wifi_interface: wifi_interface.clone(),
ethernet_interface: find_eth_iface().await?,
disk_guid,
@@ -346,10 +356,10 @@ impl RpcContext {
services,
cancellable_installs: SyncMutex::new(BTreeMap::new()),
metrics_cache,
rpc_continuations: RpcContinuations::new(Some(shutdown.clone())),
shutdown,
lxc_manager: Arc::new(LxcManager::new()),
open_authed_continuations: OpenAuthedContinuations::new(),
rpc_continuations: RpcContinuations::new(),
wifi_manager: Arc::new(RwLock::new(wifi_interface.clone().map(|i| WpaCli::init(i)))),
current_secret: Arc::new(
Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).map_err(|e| {
@@ -401,6 +411,7 @@ impl RpcContext {
mut cleanup_sessions,
mut init_services,
mut prune_s9pks,
mut check_tasks,
}: CleanupInitPhases,
) -> Result<(), Error> {
cleanup_sessions.start();
@@ -492,6 +503,76 @@ impl RpcContext {
}
prune_s9pks.complete();
check_tasks.start();
let mut action_input: OrdMap<PackageId, BTreeMap<ActionId, Value>> = OrdMap::new();
let tasks: BTreeSet<_> = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| {
Ok(pde
.as_tasks()
.as_entries()?
.into_iter()
.map(|(_, r)| {
let t = r.as_task();
Ok::<_, Error>(if t.as_input().transpose_ref().is_some() {
Some((t.as_package_id().de()?, t.as_action_id().de()?))
} else {
None
})
})
.filter_map_ok(|a| a))
})
.flatten_ok()
.map(|a| a.and_then(|a| a))
.try_collect()?;
let procedure_id = Guid::new();
for (package_id, action_id) in tasks {
if let Some(service) = self.services.get(&package_id).await.as_ref() {
if let Some(input) = service
.get_action_input(procedure_id.clone(), action_id.clone(), Value::Null)
.await
.log_err()
.flatten()
.and_then(|i| i.value)
{
action_input
.entry(package_id)
.or_default()
.insert(action_id, input);
}
}
}
self.db
.mutate(|db| {
for (package_id, action_input) in &action_input {
for (action_id, input) in action_input {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, package_id, action_id, input, false))
})?;
}
}
}
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
if pde
.as_tasks()
.de()?
.into_iter()
.any(|(_, t)| t.active && t.task.severity == TaskSeverity::Critical)
{
pde.as_status_info_mut().stop()?;
}
}
Ok(())
})
.await
.result?;
check_tasks.complete();
Ok(())
}
pub async fn call_remote<RemoteContext>(

View File

@@ -85,7 +85,7 @@ impl SetupContext {
result: OnceCell::new(),
disk_guid: OnceCell::new(),
shutdown,
rpc_continuations: RpcContinuations::new(None),
rpc_continuations: RpcContinuations::new(),
install_rootfs: SyncMutex::new(None),
language: SyncMutex::new(None),
keyboard: SyncMutex::new(None),

View File

@@ -31,7 +31,7 @@ pub struct Database {
impl Database {
pub fn init(
account: &AccountInfo,
kiosk: bool,
kiosk: Option<bool>,
language: Option<InternedString>,
keyboard: Option<KeyboardOptions>,
) -> Result<Self, Error> {

View File

@@ -24,7 +24,7 @@ use crate::net::host::Host;
use crate::net::host::binding::{
AddSslOptions, BindInfo, BindOptions, Bindings, DerivedAddressInfo, NetInfo,
};
use crate::net::vhost::{AlpnInfo, PassthroughInfo};
use crate::net::vhost::AlpnInfo;
use crate::prelude::*;
use crate::progress::FullProgress;
use crate::system::{KeyboardOptions, SmtpValue};
@@ -49,7 +49,7 @@ pub struct Public {
impl Public {
pub fn init(
account: &AccountInfo,
kiosk: bool,
kiosk: Option<bool>,
language: Option<InternedString>,
keyboard: Option<KeyboardOptions>,
) -> Result<Self, Error> {
@@ -121,7 +121,6 @@ impl Public {
},
dns: Default::default(),
default_outbound: None,
passthroughs: Vec::new(),
},
status_info: ServerStatus {
backup_progress: None,
@@ -146,10 +145,10 @@ impl Public {
zram: true,
governor: None,
smtp: None,
echoip_urls: default_echoip_urls(),
ifconfig_url: default_ifconfig_url(),
ram: 0,
devices: Vec::new(),
kiosk: Some(kiosk).filter(|_| &*PLATFORM != "raspberrypi"),
kiosk,
language,
keyboard,
},
@@ -168,11 +167,8 @@ fn get_platform() -> InternedString {
(&*PLATFORM).into()
}
pub fn default_echoip_urls() -> Vec<Url> {
vec![
"https://ipconfig.io".parse().unwrap(),
"https://ifconfig.co".parse().unwrap(),
]
pub fn default_ifconfig_url() -> Url {
"https://ifconfig.co".parse().unwrap()
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
@@ -209,9 +205,9 @@ pub struct ServerInfo {
pub zram: bool,
pub governor: Option<Governor>,
pub smtp: Option<SmtpValue>,
#[serde(default = "default_echoip_urls")]
#[ts(type = "string[]")]
pub echoip_urls: Vec<Url>,
#[serde(default = "default_ifconfig_url")]
#[ts(type = "string")]
pub ifconfig_url: Url,
#[ts(type = "number")]
pub ram: u64,
pub devices: Vec<LshwDevice>,
@@ -237,8 +233,6 @@ pub struct NetworkInfo {
#[serde(default)]
#[ts(type = "string | null")]
pub default_outbound: Option<GatewayId>,
#[serde(default)]
pub passthroughs: Vec<PassthroughInfo>,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]

View File

@@ -25,28 +25,20 @@ pub enum RepairStrategy {
Preen,
Aggressive,
}
/// Detects the filesystem type of a block device using `grub-probe`.
/// Returns e.g. `"ext2"` (for ext4), `"btrfs"`, etc.
pub async fn detect_filesystem(
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<String, Error> {
Ok(String::from_utf8(
Command::new("grub-probe")
.arg("-d")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?,
)?
.trim()
.to_owned())
}
impl RepairStrategy {
pub async fn fsck(
&self,
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
match &*detect_filesystem(&logicalname).await? {
match &*String::from_utf8(
Command::new("grub-probe")
.arg("-d")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?,
)?
.trim()
{
"ext2" => self.e2fsck(logicalname).await,
"btrfs" => self.btrfs_check(logicalname).await,
fs => {

View File

@@ -7,7 +7,7 @@ use rust_i18n::t;
use tokio::process::Command;
use tracing::instrument;
use super::fsck::{RepairStrategy, RequiresReboot, detect_filesystem};
use super::fsck::{RepairStrategy, RequiresReboot};
use super::util::pvscan;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::{FileSystem, ReadWrite};
@@ -301,37 +301,6 @@ pub async fn mount_fs<P: AsRef<Path>>(
.with_ctx(|_| (crate::ErrorKind::Filesystem, PASSWORD_PATH))?;
blockdev_path = Path::new("/dev/mapper").join(&full_name);
}
// Convert ext4 → btrfs on the package-data partition if needed
let fs_type = detect_filesystem(&blockdev_path).await?;
if fs_type == "ext2" {
tracing::info!("Running e2fsck before converting {name} from ext4 to btrfs");
Command::new("e2fsck")
.arg("-fy")
.arg(&blockdev_path)
.invoke(ErrorKind::DiskManagement)
.await?;
tracing::info!("Converting {name} from ext4 to btrfs");
Command::new("btrfs-convert")
.arg("--no-progress")
.arg(&blockdev_path)
.invoke(ErrorKind::DiskManagement)
.await?;
// Defragment after conversion for optimal performance
let tmp_mount = datadir.as_ref().join(format!("{name}.convert-tmp"));
tokio::fs::create_dir_all(&tmp_mount).await?;
BlockDev::new(&blockdev_path)
.mount(&tmp_mount, ReadWrite)
.await?;
Command::new("btrfs")
.args(["filesystem", "defragment", "-r"])
.arg(&tmp_mount)
.invoke(ErrorKind::DiskManagement)
.await?;
unmount(&tmp_mount, false).await?;
tokio::fs::remove_dir(&tmp_mount).await?;
}
let reboot = repair.fsck(&blockdev_path).await?;
if !guid.ends_with("_UNENC") {
@@ -373,99 +342,3 @@ pub async fn mount_all_fs<P: AsRef<Path>>(
reboot |= mount_fs(guid, &datadir, "package-data", repair, password).await?;
Ok(reboot)
}
/// Temporarily activates a VG and opens LUKS to probe the `package-data`
/// filesystem type. Returns `None` if probing fails (e.g. LV doesn't exist).
#[instrument(skip_all)]
pub async fn probe_package_data_fs(guid: &str) -> Result<Option<String>, Error> {
// Import and activate the VG
match Command::new("vgimport")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
{
Ok(_) => {}
Err(e)
if format!("{}", e.source)
.lines()
.any(|l| l.trim() == format!("Volume group \"{}\" is not exported", guid)) =>
{
// Already imported, that's fine
}
Err(e) => {
tracing::warn!("Could not import VG {guid} for filesystem probe: {e}");
return Ok(None);
}
}
if let Err(e) = Command::new("vgchange")
.arg("-ay")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
{
tracing::warn!("Could not activate VG {guid} for filesystem probe: {e}");
return Ok(None);
}
let mut opened_luks = false;
let result = async {
let lv_path = Path::new("/dev").join(guid).join("package-data");
if tokio::fs::metadata(&lv_path).await.is_err() {
return Ok(None);
}
let blockdev_path = if !guid.ends_with("_UNENC") {
let full_name = format!("{guid}_package-data");
let password = DEFAULT_PASSWORD;
if let Some(parent) = Path::new(PASSWORD_PATH).parent() {
tokio::fs::create_dir_all(parent).await?;
}
tokio::fs::write(PASSWORD_PATH, password)
.await
.with_ctx(|_| (ErrorKind::Filesystem, PASSWORD_PATH))?;
Command::new("cryptsetup")
.arg("-q")
.arg("luksOpen")
.arg("--allow-discards")
.arg(format!("--key-file={PASSWORD_PATH}"))
.arg(format!("--keyfile-size={}", password.len()))
.arg(&lv_path)
.arg(&full_name)
.invoke(ErrorKind::DiskManagement)
.await?;
let _ = tokio::fs::remove_file(PASSWORD_PATH).await;
opened_luks = true;
PathBuf::from(format!("/dev/mapper/{full_name}"))
} else {
lv_path.clone()
};
detect_filesystem(&blockdev_path).await.map(Some)
}
.await;
// Always clean up: close LUKS, deactivate VG, export VG
if opened_luks {
let full_name = format!("{guid}_package-data");
Command::new("cryptsetup")
.arg("-q")
.arg("luksClose")
.arg(&full_name)
.invoke(ErrorKind::DiskManagement)
.await
.log_err();
}
Command::new("vgchange")
.arg("-an")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
.log_err();
Command::new("vgexport")
.arg(guid)
.invoke(ErrorKind::DiskManagement)
.await
.log_err();
result
}

View File

@@ -1,17 +1,13 @@
use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use itertools::Itertools;
use lazy_format::lazy_format;
use rpc_toolkit::{CallRemoteHandler, Context, Empty, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use crate::{Error, ErrorKind};
use crate::Error;
use crate::context::{CliContext, RpcContext};
use crate::disk::util::DiskInfo;
use crate::prelude::*;
use crate::util::Invoke;
use crate::util::serde::{HandlerExtSerde, WithIoFormat, display_serializable};
pub mod fsck;
@@ -25,143 +21,27 @@ pub const REPAIR_DISK_PATH: &str = "/media/startos/config/repair-disk";
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct OsPartitionInfo {
pub efi: Option<PathBuf>,
pub bios: Option<PathBuf>,
pub boot: PathBuf,
pub root: PathBuf,
#[serde(default)]
pub extra_boot: BTreeMap<String, PathBuf>,
#[serde(skip)]
#[serde(skip)] // internal use only
pub data: Option<PathBuf>,
}
impl OsPartitionInfo {
pub fn contains(&self, logicalname: impl AsRef<Path>) -> bool {
let p = logicalname.as_ref();
self.bios.as_deref() == Some(p)
|| p == &*self.boot
|| p == &*self.root
|| self.extra_boot.values().any(|v| v == p)
self.efi
.as_ref()
.map(|p| p == logicalname.as_ref())
.unwrap_or(false)
|| self
.bios
.as_ref()
.map(|p| p == logicalname.as_ref())
.unwrap_or(false)
|| &*self.boot == logicalname.as_ref()
|| &*self.root == logicalname.as_ref()
}
/// Build partition info by parsing /etc/fstab and resolving device specs,
/// then discovering the BIOS boot partition (which is never mounted).
pub async fn from_fstab() -> Result<Self, Error> {
let fstab = tokio::fs::read_to_string("/etc/fstab")
.await
.with_ctx(|_| (ErrorKind::Filesystem, "/etc/fstab"))?;
let mut boot = None;
let mut root = None;
let mut extra_boot = BTreeMap::new();
for line in fstab.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with('#') {
continue;
}
let mut fields = line.split_whitespace();
let Some(source) = fields.next() else {
continue;
};
let Some(target) = fields.next() else {
continue;
};
let dev = match resolve_fstab_source(source).await {
Ok(d) => d,
Err(e) => {
tracing::warn!("Failed to resolve fstab source {source}: {e}");
continue;
}
};
match target {
"/" => root = Some(dev),
"/boot" => boot = Some(dev),
t if t.starts_with("/boot/") => {
if let Some(name) = t.strip_prefix("/boot/") {
extra_boot.insert(name.to_string(), dev);
}
}
_ => {}
}
}
let boot = boot.unwrap_or_default();
let bios = if !boot.as_os_str().is_empty() {
find_bios_boot_partition(&boot).await.ok().flatten()
} else {
None
};
Ok(Self {
bios,
boot,
root: root.unwrap_or_default(),
extra_boot,
data: None,
})
}
}
const BIOS_BOOT_TYPE_GUID: &str = "21686148-6449-6e6f-744e-656564726548";
/// Find the BIOS boot partition on the same disk as `known_part`.
async fn find_bios_boot_partition(known_part: &Path) -> Result<Option<PathBuf>, Error> {
let output = Command::new("lsblk")
.args(["-n", "-l", "-o", "NAME,PKNAME,PARTTYPE"])
.arg(known_part)
.invoke(ErrorKind::DiskManagement)
.await?;
let text = String::from_utf8(output)?;
let parent_disk = text.lines().find_map(|line| {
let mut fields = line.split_whitespace();
let _name = fields.next()?;
let pkname = fields.next()?;
(!pkname.is_empty()).then(|| pkname.to_string())
});
let Some(parent_disk) = parent_disk else {
return Ok(None);
};
let output = Command::new("lsblk")
.args(["-n", "-l", "-o", "NAME,PARTTYPE"])
.arg(format!("/dev/{parent_disk}"))
.invoke(ErrorKind::DiskManagement)
.await?;
let text = String::from_utf8(output)?;
for line in text.lines() {
let mut fields = line.split_whitespace();
let Some(name) = fields.next() else { continue };
let Some(parttype) = fields.next() else {
continue;
};
if parttype.eq_ignore_ascii_case(BIOS_BOOT_TYPE_GUID) {
return Ok(Some(PathBuf::from(format!("/dev/{name}"))));
}
}
Ok(None)
}
/// Resolve an fstab device spec (e.g. /dev/sda1, PARTUUID=..., UUID=...) to a
/// canonical device path.
async fn resolve_fstab_source(source: &str) -> Result<PathBuf, Error> {
if source.starts_with('/') {
return Ok(
tokio::fs::canonicalize(source)
.await
.unwrap_or_else(|_| PathBuf::from(source)),
);
}
// PARTUUID=, UUID=, LABEL= — resolve via blkid
let output = Command::new("blkid")
.args(["-o", "device", "-t", source])
.invoke(ErrorKind::DiskManagement)
.await?;
Ok(PathBuf::from(String::from_utf8(output)?.trim()))
}
pub fn disk<C: Context>() -> ParentHandler<C> {

View File

@@ -53,7 +53,9 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
})?,
)?
} else {
crate::util::io::delete_dir(&crypt_path).await?;
if tokio::fs::metadata(&crypt_path).await.is_ok() {
tokio::fs::remove_dir_all(&crypt_path).await?;
}
Default::default()
};
let enc_key = if let (Some(hash), Some(wrapped_key)) = (

View File

@@ -52,19 +52,13 @@ pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
pub async fn unmount<P: AsRef<Path>>(mountpoint: P, lazy: bool) -> Result<(), Error> {
tracing::debug!("Unmounting {}.", mountpoint.as_ref().display());
let mut cmd = tokio::process::Command::new("umount");
cmd.env("LANG", "C.UTF-8");
if lazy {
cmd.arg("-l");
}
match cmd
.arg(mountpoint.as_ref())
cmd.arg(mountpoint.as_ref())
.invoke(crate::ErrorKind::Filesystem)
.await
{
Ok(_) => Ok(()),
Err(e) if e.to_string().contains("not mounted") => Ok(()),
Err(e) => Err(e),
}
.await?;
Ok(())
}
/// Unmounts all mountpoints under (and including) the given path, in reverse

View File

@@ -41,7 +41,6 @@ pub struct DiskInfo {
pub partitions: Vec<PartitionInfo>,
pub capacity: u64,
pub guid: Option<InternedString>,
pub filesystem: Option<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize, ts_rs::TS)]
@@ -56,7 +55,6 @@ pub struct PartitionInfo {
pub used: Option<u64>,
pub start_os: BTreeMap<String, StartOsRecoveryInfo>,
pub guid: Option<InternedString>,
pub filesystem: Option<String>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, ts_rs::TS)]
@@ -376,15 +374,6 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
disk_info.capacity = part_info.capacity;
if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone();
if let Some(guid) = g {
disk_info.filesystem =
crate::disk::main::probe_package_data_fs(guid)
.await
.unwrap_or_else(|e| {
tracing::warn!("Failed to probe filesystem for {guid}: {e}");
None
});
}
} else {
disk_info.partitions = vec![part_info];
}
@@ -395,31 +384,11 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
disk_info.partitions = Vec::with_capacity(index.parts.len());
if let Some(g) = disk_guids.get(&disk_info.logicalname) {
disk_info.guid = g.clone();
if let Some(guid) = g {
disk_info.filesystem =
crate::disk::main::probe_package_data_fs(guid)
.await
.unwrap_or_else(|e| {
tracing::warn!("Failed to probe filesystem for {guid}: {e}");
None
});
}
} else {
for part in index.parts {
let mut part_info = part_info(part).await;
if let Some(g) = disk_guids.get(&part_info.logicalname) {
part_info.guid = g.clone();
if let Some(guid) = g {
part_info.filesystem =
crate::disk::main::probe_package_data_fs(guid)
.await
.unwrap_or_else(|e| {
tracing::warn!(
"Failed to probe filesystem for {guid}: {e}"
);
None
});
}
}
disk_info.partitions.push(part_info);
}
@@ -492,7 +461,6 @@ async fn disk_info(disk: PathBuf) -> DiskInfo {
partitions: Vec::new(),
capacity,
guid: None,
filesystem: None,
}
}
@@ -576,7 +544,6 @@ async fn part_info(part: PathBuf) -> PartitionInfo {
used,
start_os,
guid: None,
filesystem: None,
}
}

View File

@@ -101,7 +101,6 @@ pub enum ErrorKind {
UpdateFailed = 77,
Smtp = 78,
SetSysInfo = 79,
Bios = 80,
}
impl ErrorKind {
pub fn as_str(&self) -> String {
@@ -186,7 +185,6 @@ impl ErrorKind {
UpdateFailed => t!("error.update-failed"),
Smtp => t!("error.smtp"),
SetSysInfo => t!("error.set-sys-info"),
Bios => t!("error.bios"),
}
.to_string()
}

View File

@@ -251,35 +251,18 @@ pub async fn set_hostname_rpc(
ctx: RpcContext,
SetServerHostnameParams { name, hostname }: SetServerHostnameParams,
) -> Result<(), Error> {
let name = name.filter(|n| !n.is_empty());
let hostname = hostname
.filter(|h| !h.is_empty())
.map(ServerHostname::new)
.transpose()?;
if name.is_none() && hostname.is_none() {
let Some(hostname) = ServerHostnameInfo::new_opt(name, hostname)? else {
return Err(Error::new(
eyre!("{}", t!("hostname.must-provide-name-or-hostname")),
ErrorKind::InvalidRequest,
));
};
let info = ctx
.db
.mutate(|db| {
let server_info = db.as_public_mut().as_server_info_mut();
if let Some(name) = name {
server_info.as_name_mut().ser(&name)?;
}
if let Some(hostname) = &hostname {
hostname.save(server_info)?;
}
ServerHostnameInfo::load(server_info)
})
ctx.db
.mutate(|db| hostname.save(db.as_public_mut().as_server_info_mut()))
.await
.result?;
ctx.account.mutate(|a| a.hostname = info.clone());
if let Some(h) = hostname {
sync_hostname(&h).await?;
}
ctx.account.mutate(|a| a.hostname = hostname.clone());
sync_hostname(&hostname.hostname).await?;
Ok(())
}

View File

@@ -173,13 +173,6 @@ pub async fn init(
RpcContext::init_auth_cookie().await?;
local_auth.complete();
// Re-enroll MOK on every boot if Secure Boot key exists but isn't enrolled yet
if let Err(e) =
crate::util::mok::enroll_mok(std::path::Path::new(crate::util::mok::DKMS_MOK_PUB)).await
{
tracing::warn!("MOK enrollment failed: {e}");
}
load_database.start();
let db = cfg.db().await?;
crate::version::Current::default().pre_init(&db).await?;
@@ -298,15 +291,21 @@ pub async fn init(
init_tmp.start();
let tmp_dir = Path::new(PACKAGE_DATA).join("tmp");
crate::util::io::delete_dir(&tmp_dir).await?;
if tokio::fs::metadata(&tmp_dir).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_dir).await?;
}
if tokio::fs::metadata(&tmp_dir).await.is_err() {
tokio::fs::create_dir_all(&tmp_dir).await?;
}
let tmp_var = Path::new(PACKAGE_DATA).join("tmp/var");
crate::util::io::delete_dir(&tmp_var).await?;
if tokio::fs::metadata(&tmp_var).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_var).await?;
}
crate::disk::mount::util::bind(&tmp_var, "/var/tmp", false).await?;
let downloading = Path::new(PACKAGE_DATA).join("archive/downloading");
crate::util::io::delete_dir(&downloading).await?;
if tokio::fs::metadata(&downloading).await.is_ok() {
tokio::fs::remove_dir_all(&downloading).await?;
}
let tmp_docker = Path::new(PACKAGE_DATA).join("tmp").join(*CONTAINER_TOOL);
crate::disk::mount::util::bind(&tmp_docker, *CONTAINER_DATADIR, false).await?;
init_tmp.complete();
@@ -371,7 +370,7 @@ pub async fn init(
enable_zram.complete();
update_server_info.start();
sync_kiosk(server_info.as_kiosk().de()?.unwrap_or(false)).await?;
sync_kiosk(server_info.as_kiosk().de()?).await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
let devices = lshw().await?;
let status_info = ServerStatus {

View File

@@ -25,9 +25,6 @@ pub fn platform_to_arch(platform: &str) -> &str {
if let Some(arch) = platform.strip_suffix("-nonfree") {
return arch;
}
if let Some(arch) = platform.strip_suffix("-nvidia") {
return arch;
}
match platform {
"raspberrypi" | "rockchip64" => "aarch64",
_ => platform,
@@ -271,18 +268,6 @@ pub fn server<C: Context>() -> ParentHandler<C> {
.with_about("about.display-time-uptime")
.with_call_remote::<CliContext>(),
)
.subcommand(
"device-info",
ParentHandler::<C, WithIoFormat<Empty>>::new().root_handler(
from_fn_async(system::device_info)
.with_display_serializable()
.with_custom_display_fn(|handle, result| {
system::display_device_info(handle.params, result)
})
.with_about("about.get-device-info")
.with_call_remote::<CliContext>(),
),
)
.subcommand(
"experimental",
system::experimental::<C>().with_about("about.commands-experimental"),
@@ -400,10 +385,10 @@ pub fn server<C: Context>() -> ParentHandler<C> {
.with_call_remote::<CliContext>(),
)
.subcommand(
"set-echoip-urls",
from_fn_async(system::set_echoip_urls)
"set-ifconfig-url",
from_fn_async(system::set_ifconfig_url)
.no_display()
.with_about("about.set-echoip-urls")
.with_about("about.set-ifconfig-url")
.with_call_remote::<CliContext>(),
)
.subcommand(

View File

@@ -20,6 +20,9 @@ use crate::context::RpcContext;
use crate::middleware::auth::DbContext;
use crate::prelude::*;
use crate::rpc_continuations::OpenAuthedContinuations;
use crate::util::Invoke;
use crate::util::io::{create_file_mod, read_file_to_string};
use crate::util::serde::{BASE64, const_true};
use crate::util::sync::SyncMutex;
pub trait SessionAuthContext: DbContext {

View File

@@ -27,7 +27,7 @@ use crate::db::model::public::AcmeSettings;
use crate::db::{DbAccess, DbAccessByKey, DbAccessMut};
use crate::error::ErrorData;
use crate::net::ssl::should_use_cert;
use crate::net::tls::{SingleCertResolver, TlsHandler, TlsHandlerAction};
use crate::net::tls::{SingleCertResolver, TlsHandler};
use crate::net::web_server::Accept;
use crate::prelude::*;
use crate::util::FromStrParser;
@@ -173,7 +173,7 @@ where
&'a mut self,
hello: &'a ClientHello<'a>,
_: &'a <A as Accept>::Metadata,
) -> Option<TlsHandlerAction> {
) -> Option<ServerConfig> {
let domain = hello.server_name()?;
if hello
.alpn()
@@ -207,20 +207,20 @@ where
cfg.alpn_protocols = vec![ACME_TLS_ALPN_NAME.to_vec()];
tracing::info!("performing ACME auth challenge");
return Some(TlsHandlerAction::Tls(cfg));
return Some(cfg);
}
let domains: BTreeSet<InternedString> = [domain.into()].into_iter().collect();
let crypto_provider = self.crypto_provider.clone();
if let Some(cert) = self.get_cert(&domains).await {
return Some(TlsHandlerAction::Tls(
return Some(
ServerConfig::builder_with_provider(crypto_provider)
.with_safe_default_protocol_versions()
.log_err()?
.with_no_client_auth()
.with_cert_resolver(Arc::new(SingleCertResolver(Arc::new(cert)))),
));
);
}
None

View File

@@ -32,7 +32,6 @@ use crate::context::{CliContext, RpcContext};
use crate::db::model::Database;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::gateway::NetworkInterfaceWatcher;
use crate::net::utils::is_private_ip;
use crate::prelude::*;
use crate::util::future::NonDetachingJoinHandle;
use crate::util::io::file_string_stream;
@@ -401,18 +400,6 @@ impl Resolver {
})
}) {
return Some(res);
} else if is_private_ip(src) {
// Source is a private IP not in any known subnet (e.g. VPN on a different VLAN).
// Return all private IPs from all interfaces.
let res: Vec<IpAddr> = self.net_iface.peek(|i| {
i.values()
.filter_map(|i| i.ip_info.as_ref())
.flat_map(|ip_info| ip_info.subnets.iter().map(|s| s.addr()))
.collect()
});
if !res.is_empty() {
return Some(res);
}
} else {
tracing::warn!(
"{}",

View File

@@ -174,38 +174,28 @@ async fn set_name(
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CheckPortParams {
struct CheckPortParams {
#[arg(help = "help.arg.port")]
pub port: u16,
port: u16,
#[arg(help = "help.arg.gateway-id")]
pub gateway: GatewayId,
gateway: GatewayId,
}
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CheckPortRes {
pub ip: Ipv4Addr,
pub port: u16,
pub open_externally: bool,
pub open_internally: bool,
pub hairpinning: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct IfconfigPortRes {
pub ip: Ipv4Addr,
pub port: u16,
pub reachable: bool,
}
pub async fn check_port(
async fn check_port(
ctx: RpcContext,
CheckPortParams { port, gateway }: CheckPortParams,
) -> Result<CheckPortRes, Error> {
let db = ctx.db.peek().await;
let base_urls = db.as_public().as_server_info().as_echoip_urls().de()?;
let base_url = db.as_public().as_server_info().as_ifconfig_url().de()?;
let gateways = db
.as_public()
.as_server_info()
@@ -221,86 +211,35 @@ pub async fn check_port(
ErrorKind::NotFound,
)
})?;
let internal_ips = ip_info
.subnets
.iter()
.map(|i| i.addr())
.filter(|a| a.is_ipv4())
.map(|a| SocketAddr::new(a, port))
.collect::<Vec<_>>();
let open_internally = tokio::time::timeout(
Duration::from_secs(5),
tokio::net::TcpStream::connect(&*internal_ips),
)
.await
.map_or(false, |r| r.is_ok());
let iface = &*ip_info.name;
let client = reqwest::Client::builder();
#[cfg(target_os = "linux")]
let client = client.interface(gateway.as_str());
let client = client.build()?;
let mut res = None;
for base_url in base_urls {
let url = base_url
.join(&format!("/port/{port}"))
.with_kind(ErrorKind::ParseUrl)?;
res = Some(
async {
client
.get(url)
.timeout(Duration::from_secs(5))
.send()
.await?
.error_for_status()?
.json()
.await
}
.await,
);
if res.as_ref().map_or(false, |r| r.is_ok()) {
break;
}
}
let Some(IfconfigPortRes {
ip,
port,
reachable: open_externally,
}) = res.transpose()?
else {
return Err(Error::new(
eyre!("{}", t!("net.gateway.no-configured-echoip-urls")),
ErrorKind::Network,
));
};
let hairpinning = tokio::time::timeout(
Duration::from_secs(5),
tokio::net::TcpStream::connect(SocketAddr::new(ip.into(), port)),
)
.await
.map_or(false, |r| r.is_ok());
Ok(CheckPortRes {
ip,
port,
open_externally,
open_internally,
hairpinning,
})
let client = client.interface(iface);
let url = base_url
.join(&format!("/port/{port}"))
.with_kind(ErrorKind::ParseUrl)?;
let res: CheckPortRes = client
.build()?
.get(url)
.timeout(Duration::from_secs(10))
.send()
.await?
.error_for_status()?
.json()
.await?;
Ok(res)
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CheckDnsParams {
struct CheckDnsParams {
#[arg(help = "help.arg.gateway-id")]
pub gateway: GatewayId,
gateway: GatewayId,
}
pub async fn check_dns(
async fn check_dns(
ctx: RpcContext,
CheckDnsParams { gateway }: CheckDnsParams,
) -> Result<bool, Error> {
@@ -780,7 +719,7 @@ async fn get_wan_ipv4(iface: &str, base_url: &Url) -> Result<Option<Ipv4Addr>, E
let text = client
.build()?
.get(url)
.timeout(Duration::from_secs(5))
.timeout(Duration::from_secs(10))
.send()
.await?
.error_for_status()?
@@ -876,7 +815,7 @@ async fn watch_ip(
.fuse()
});
let mut echoip_ratelimit_state: BTreeMap<Url, Instant> = BTreeMap::new();
let mut prev_attempt: Option<Instant> = None;
loop {
until
@@ -986,7 +925,7 @@ async fn watch_ip(
&dhcp4_proxy,
&policy_guard,
&iface,
&mut echoip_ratelimit_state,
&mut prev_attempt,
db,
write_to,
device_type,
@@ -1018,16 +957,18 @@ async fn apply_policy_routing(
})
.copied();
// Rebuild per-interface routing table using `ip route replace` to avoid
// the connectivity gap that a flush+add cycle would create. We replace
// every desired route in-place (each replace is atomic in the kernel),
// then delete any stale routes that are no longer in the desired set.
// Collect the set of desired non-default route prefixes (the first
// whitespace-delimited token of each `ip route show` line is the
// destination prefix, e.g. "192.168.1.0/24" or "10.0.0.0/8").
let mut desired_prefixes = BTreeSet::<String>::new();
// Flush and rebuild per-interface routing table.
// Clone all non-default routes from the main table so that LAN IPs on
// other subnets remain reachable when the priority-75 catch-all overrides
// default routing, then replace the default route with this interface's.
Command::new("ip")
.arg("route")
.arg("flush")
.arg("table")
.arg(&table_str)
.invoke(ErrorKind::Network)
.await
.log_err();
if let Ok(main_routes) = Command::new("ip")
.arg("route")
.arg("show")
@@ -1042,14 +983,11 @@ async fn apply_policy_routing(
if line.is_empty() || line.starts_with("default") {
continue;
}
if let Some(prefix) = line.split_whitespace().next() {
desired_prefixes.insert(prefix.to_owned());
}
let mut cmd = Command::new("ip");
cmd.arg("route").arg("replace");
cmd.arg("route").arg("add");
for part in line.split_whitespace() {
// Skip status flags that appear in route output but
// are not valid for `ip route replace`.
// are not valid for `ip route add`.
if part == "linkdown" || part == "dead" {
continue;
}
@@ -1059,11 +997,10 @@ async fn apply_policy_routing(
cmd.invoke(ErrorKind::Network).await.log_err();
}
}
// Replace the default route via this interface's gateway.
// Add default route via this interface's gateway
{
let mut cmd = Command::new("ip");
cmd.arg("route").arg("replace").arg("default");
cmd.arg("route").arg("add").arg("default");
if let Some(gw) = ipv4_gateway {
cmd.arg("via").arg(gw.to_string());
}
@@ -1077,40 +1014,6 @@ async fn apply_policy_routing(
cmd.invoke(ErrorKind::Network).await.log_err();
}
// Delete stale routes: any non-default route in the per-interface table
// whose prefix is not in the desired set.
if let Ok(existing_routes) = Command::new("ip")
.arg("route")
.arg("show")
.arg("table")
.arg(&table_str)
.invoke(ErrorKind::Network)
.await
.and_then(|b| String::from_utf8(b).with_kind(ErrorKind::Utf8))
{
for line in existing_routes.lines() {
let line = line.trim();
if line.is_empty() || line.starts_with("default") {
continue;
}
let Some(prefix) = line.split_whitespace().next() else {
continue;
};
if desired_prefixes.contains(prefix) {
continue;
}
Command::new("ip")
.arg("route")
.arg("del")
.arg(prefix)
.arg("table")
.arg(&table_str)
.invoke(ErrorKind::Network)
.await
.log_err();
}
}
// Ensure global CONNMARK restore rules in mangle PREROUTING (forwarded
// packets) and OUTPUT (locally-generated replies). Both are needed:
// PREROUTING handles DNAT-forwarded traffic, OUTPUT handles replies from
@@ -1229,7 +1132,7 @@ async fn poll_ip_info(
dhcp4_proxy: &Option<Dhcp4ConfigProxy<'_>>,
policy_guard: &Option<PolicyRoutingCleanup>,
iface: &GatewayId,
echoip_ratelimit_state: &mut BTreeMap<Url, Instant>,
prev_attempt: &mut Option<Instant>,
db: Option<&TypedPatchDb<Database>>,
write_to: &Watch<OrdMap<GatewayId, NetworkInterfaceInfo>>,
device_type: Option<NetworkInterfaceType>,
@@ -1276,49 +1179,42 @@ async fn poll_ip_info(
apply_policy_routing(guard, iface, &lan_ip).await?;
}
let echoip_urls = if let Some(db) = db {
let ifconfig_url = if let Some(db) = db {
db.peek()
.await
.as_public()
.as_server_info()
.as_echoip_urls()
.as_ifconfig_url()
.de()
.unwrap_or_else(|_| crate::db::model::public::default_echoip_urls())
.unwrap_or_else(|_| crate::db::model::public::default_ifconfig_url())
} else {
crate::db::model::public::default_echoip_urls()
crate::db::model::public::default_ifconfig_url()
};
let mut wan_ip = None;
for echoip_url in echoip_urls {
let wan_ip = if echoip_ratelimit_state
.get(&echoip_url)
.map_or(true, |i| i.elapsed() > Duration::from_secs(300))
&& !subnets.is_empty()
&& !matches!(
device_type,
Some(NetworkInterfaceType::Bridge | NetworkInterfaceType::Loopback)
) {
match get_wan_ipv4(iface.as_str(), &echoip_url).await {
Ok(a) => {
wan_ip = a;
}
Err(e) => {
tracing::error!(
"{}",
t!(
"net.gateway.failed-to-determine-wan-ip",
iface = iface.to_string(),
error = e.to_string()
)
);
tracing::debug!("{e:?}");
}
};
echoip_ratelimit_state.insert(echoip_url, Instant::now());
if wan_ip.is_some() {
break;
let wan_ip = if prev_attempt.map_or(true, |i| i.elapsed() > Duration::from_secs(300))
&& !subnets.is_empty()
&& !matches!(
device_type,
Some(NetworkInterfaceType::Bridge | NetworkInterfaceType::Loopback)
) {
*prev_attempt = Some(Instant::now());
match get_wan_ipv4(iface.as_str(), &ifconfig_url).await {
Ok(a) => a,
Err(e) => {
tracing::error!(
"{}",
t!(
"net.gateway.failed-to-determine-wan-ip",
iface = iface.to_string(),
error = e.to_string()
)
);
tracing::debug!("{e:?}");
None
}
};
}
}
} else {
None
};
let mut ip_info = IpInfo {
name: name.clone(),
scope_id,

View File

@@ -12,7 +12,6 @@ use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::hostname::ServerHostname;
use crate::net::acme::AcmeProvider;
use crate::net::gateway::{CheckDnsParams, CheckPortParams, CheckPortRes, check_dns, check_port};
use crate::net::host::{HostApiKind, all_hosts};
use crate::prelude::*;
use crate::util::serde::{HandlerExtSerde, display_serializable};
@@ -161,7 +160,6 @@ pub fn address_api<C: Context, Kind: HostApiKind>()
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AddPublicDomainParams {
#[arg(help = "help.arg.fqdn")]
@@ -170,17 +168,6 @@ pub struct AddPublicDomainParams {
pub acme: Option<AcmeProvider>,
#[arg(help = "help.arg.gateway-id")]
pub gateway: GatewayId,
#[arg(help = "help.arg.internal-port")]
pub internal_port: u16,
}
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AddPublicDomainRes {
#[ts(type = "string | null")]
pub dns: Option<Ipv4Addr>,
pub port: CheckPortRes,
}
pub async fn add_public_domain<Kind: HostApiKind>(
@@ -189,12 +176,10 @@ pub async fn add_public_domain<Kind: HostApiKind>(
fqdn,
acme,
gateway,
internal_port,
}: AddPublicDomainParams,
inheritance: Kind::Inheritance,
) -> Result<AddPublicDomainRes, Error> {
let ext_port = ctx
.db
) -> Result<Option<Ipv4Addr>, Error> {
ctx.db
.mutate(|db| {
if let Some(acme) = &acme {
if !db
@@ -210,92 +195,22 @@ pub async fn add_public_domain<Kind: HostApiKind>(
Kind::host_for(&inheritance, db)?
.as_public_domains_mut()
.insert(
&fqdn,
&PublicDomainConfig {
acme,
gateway: gateway.clone(),
},
)?;
.insert(&fqdn, &PublicDomainConfig { acme, gateway })?;
handle_duplicates(db)?;
let hostname = ServerHostname::load(db.as_public().as_server_info())?;
let gateways = db
.as_public()
.as_server_info()
.as_network()
.as_gateways()
.de()?;
let available_ports = db.as_private().as_available_ports().de()?;
let host = Kind::host_for(&inheritance, db)?;
host.update_addresses(&hostname, &gateways, &available_ports)?;
// Find the external port for the target binding
let bindings = host.as_bindings().de()?;
let target_bind = bindings
.get(&internal_port)
.ok_or_else(|| Error::new(eyre!("binding not found for internal port {internal_port}"), ErrorKind::NotFound))?;
let ext_port = target_bind
.addresses
.available
.iter()
.find(|a| a.public && a.hostname == fqdn)
.and_then(|a| a.port)
.ok_or_else(|| Error::new(eyre!("no public address found for {fqdn} on port {internal_port}"), ErrorKind::NotFound))?;
// Disable the domain on all other bindings
host.as_bindings_mut().mutate(|b| {
for (&port, bind) in b.iter_mut() {
if port == internal_port {
continue;
}
let has_addr = bind
.addresses
.available
.iter()
.any(|a| a.public && a.hostname == fqdn);
if has_addr {
let other_ext = bind
.addresses
.available
.iter()
.find(|a| a.public && a.hostname == fqdn)
.and_then(|a| a.port)
.unwrap_or(ext_port);
bind.addresses.disabled.insert((fqdn.clone(), other_ext));
}
}
Ok(())
})?;
Ok(ext_port)
let gateways = db.as_public().as_server_info().as_network().as_gateways().de()?;
let ports = db.as_private().as_available_ports().de()?;
Kind::host_for(&inheritance, db)?.update_addresses(&hostname, &gateways, &ports)
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
let ctx2 = ctx.clone();
let fqdn2 = fqdn.clone();
let (dns_result, port_result) = tokio::join!(
async {
tokio::task::spawn_blocking(move || {
crate::net::dns::query_dns(ctx2, crate::net::dns::QueryDnsParams { fqdn: fqdn2 })
})
.await
.with_kind(ErrorKind::Unknown)?
},
check_port(
ctx.clone(),
CheckPortParams {
port: ext_port,
gateway: gateway.clone(),
},
)
);
Ok(AddPublicDomainRes {
dns: dns_result?,
port: port_result?,
tokio::task::spawn_blocking(|| {
crate::net::dns::query_dns(ctx, crate::net::dns::QueryDnsParams { fqdn })
})
.await
.with_kind(ErrorKind::Unknown)?
}
#[derive(Deserialize, Serialize, Parser, TS)]
@@ -327,6 +242,7 @@ pub async fn remove_public_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
@@ -343,13 +259,13 @@ pub async fn add_private_domain<Kind: HostApiKind>(
ctx: RpcContext,
AddPrivateDomainParams { fqdn, gateway }: AddPrivateDomainParams,
inheritance: Kind::Inheritance,
) -> Result<bool, Error> {
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_private_domains_mut()
.upsert(&fqdn, || Ok(BTreeSet::new()))?
.mutate(|d| Ok(d.insert(gateway.clone())))?;
.mutate(|d| Ok(d.insert(gateway)))?;
handle_duplicates(db)?;
let hostname = ServerHostname::load(db.as_public().as_server_info())?;
let gateways = db
@@ -363,8 +279,9 @@ pub async fn add_private_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
check_dns(ctx, CheckDnsParams { gateway }).await
Ok(())
}
pub async fn remove_private_domain<Kind: HostApiKind>(
@@ -389,6 +306,7 @@ pub async fn remove_private_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}

View File

@@ -358,5 +358,5 @@ pub async fn set_address_enabled<Kind: HostApiKind>(
})
.await
.result?;
Ok(())
Kind::sync_host(&ctx, inheritance).await
}

View File

@@ -1,4 +1,5 @@
use std::collections::{BTreeMap, BTreeSet};
use std::future::Future;
use std::net::{IpAddr, SocketAddrV4};
use std::panic::RefUnwindSafe;
@@ -181,26 +182,15 @@ impl Model<Host> {
opt.secure
.map_or(true, |s| !(s.ssl && opt.add_ssl.is_some()))
}) {
let mdns_gateways = if opt.secure.is_some() {
mdns_gateways.clone()
} else {
mdns_gateways
.iter()
.filter(|g| gateways.get(*g).map_or(false, |g| g.secure()))
.cloned()
.collect()
};
if !mdns_gateways.is_empty() {
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: false,
hostname: mdns_host.clone(),
port: Some(port),
metadata: HostnameMetadata::Mdns {
gateways: mdns_gateways,
},
});
}
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: false,
hostname: mdns_host.clone(),
port: Some(port),
metadata: HostnameMetadata::Mdns {
gateways: mdns_gateways.clone(),
},
});
}
if let Some(port) = net.assigned_ssl_port {
available.insert(HostnameInfo {
@@ -249,20 +239,6 @@ impl Model<Host> {
port: Some(port),
metadata,
});
} else if opt.secure.map_or(false, |s| s.ssl)
&& opt.add_ssl.is_none()
&& available_ports.is_ssl(opt.preferred_external_port)
&& net.assigned_port != Some(opt.preferred_external_port)
{
// Service handles its own TLS and the preferred port is
// allocated as SSL — add an address for passthrough vhost.
available.insert(HostnameInfo {
ssl: true,
public: true,
hostname: domain,
port: Some(opt.preferred_external_port),
metadata,
});
}
}
@@ -283,7 +259,7 @@ impl Model<Host> {
};
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: false,
public: true,
hostname: domain.clone(),
port: Some(port),
metadata: HostnameMetadata::PrivateDomain { gateways },
@@ -300,27 +276,13 @@ impl Model<Host> {
}
available.insert(HostnameInfo {
ssl: true,
public: false,
public: true,
hostname: domain,
port: Some(port),
metadata: HostnameMetadata::PrivateDomain {
gateways: domain_gateways,
},
});
} else if opt.secure.map_or(false, |s| s.ssl)
&& opt.add_ssl.is_none()
&& available_ports.is_ssl(opt.preferred_external_port)
&& net.assigned_port != Some(opt.preferred_external_port)
{
available.insert(HostnameInfo {
ssl: true,
public: false,
hostname: domain,
port: Some(opt.preferred_external_port),
metadata: HostnameMetadata::PrivateDomain {
gateways: domain_gateways,
},
});
}
}
bind.as_addresses_mut().as_available_mut().ser(&available)?;
@@ -467,6 +429,10 @@ pub trait HostApiKind: 'static {
inheritance: &Self::Inheritance,
db: &'a mut DatabaseModel,
) -> Result<&'a mut Model<Host>, Error>;
fn sync_host(
ctx: &RpcContext,
inheritance: Self::Inheritance,
) -> impl Future<Output = Result<(), Error>> + Send;
}
pub struct ForPackage;
impl HostApiKind for ForPackage {
@@ -485,6 +451,12 @@ impl HostApiKind for ForPackage {
) -> Result<&'a mut Model<Host>, Error> {
host_for(db, Some(package), host)
}
async fn sync_host(ctx: &RpcContext, (package, host): Self::Inheritance) -> Result<(), Error> {
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.sync_host(host).await?;
Ok(())
}
}
pub struct ForServer;
impl HostApiKind for ForServer {
@@ -500,6 +472,9 @@ impl HostApiKind for ForServer {
) -> Result<&'a mut Model<Host>, Error> {
host_for(db, None, &HostId::default())
}
async fn sync_host(ctx: &RpcContext, _: Self::Inheritance) -> Result<(), Error> {
ctx.os_net_service.sync_host(HostId::default()).await
}
}
pub fn host_api<C: Context>() -> ParentHandler<C, RequiresPackageId> {

View File

@@ -76,22 +76,9 @@ impl NetController {
],
)
.await?;
let passthroughs = db
.peek()
.await
.as_public()
.as_server_info()
.as_network()
.as_passthroughs()
.de()?;
Ok(Self {
db: db.clone(),
vhost: VHostController::new(
db.clone(),
net_iface.clone(),
crypto_provider,
passthroughs,
),
vhost: VHostController::new(db.clone(), net_iface.clone(), crypto_provider),
tls_client_config,
dns: DnsController::init(db, &net_iface.watcher).await?,
forward: InterfacePortForwardController::new(net_iface.watcher.subscribe()),
@@ -250,7 +237,6 @@ impl NetServiceData {
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
passthrough: false,
},
);
}
@@ -267,9 +253,7 @@ impl NetServiceData {
_ => continue,
}
let domain = &addr_info.hostname;
let Some(domain_ssl_port) = addr_info.port else {
continue;
};
let domain_ssl_port = addr_info.port.unwrap_or(443);
let key = (Some(domain.clone()), domain_ssl_port);
let target = vhosts.entry(key).or_insert_with(|| ProxyTarget {
public: BTreeSet::new(),
@@ -282,7 +266,6 @@ impl NetServiceData {
addr,
add_x_forwarded_headers: ssl.add_x_forwarded_headers,
connect_ssl: connect_ssl.clone().map(|_| ctrl.tls_client_config.clone()),
passthrough: false,
});
if addr_info.public {
for gw in addr_info.metadata.gateways() {
@@ -334,53 +317,6 @@ impl NetServiceData {
),
);
}
// Passthrough vhosts: if the service handles its own TLS
// (secure.ssl && no add_ssl) and a domain address is enabled on
// an SSL port different from assigned_port, add a passthrough
// vhost so the service's TLS endpoint is reachable on that port.
if bind.options.secure.map_or(false, |s| s.ssl) && bind.options.add_ssl.is_none() {
let assigned = bind.net.assigned_port;
for addr_info in &enabled_addresses {
if !addr_info.ssl {
continue;
}
let Some(pt_port) = addr_info.port.filter(|p| assigned != Some(*p)) else {
continue;
};
match &addr_info.metadata {
HostnameMetadata::PublicDomain { .. }
| HostnameMetadata::PrivateDomain { .. } => {}
_ => continue,
}
let domain = &addr_info.hostname;
let key = (Some(domain.clone()), pt_port);
let target = vhosts.entry(key).or_insert_with(|| ProxyTarget {
public: BTreeSet::new(),
private: BTreeSet::new(),
acme: None,
addr,
add_x_forwarded_headers: false,
connect_ssl: Err(AlpnInfo::Reflect),
passthrough: true,
});
if addr_info.public {
for gw in addr_info.metadata.gateways() {
target.public.insert(gw.clone());
}
} else {
for gw in addr_info.metadata.gateways() {
if let Some(info) = net_ifaces.get(gw) {
if let Some(ip_info) = &info.ip_info {
for subnet in &ip_info.subnets {
target.private.insert(subnet.addr());
}
}
}
}
}
}
}
}
// ── Phase 3: Reconcile ──
@@ -789,6 +725,13 @@ impl NetService {
.result
}
pub async fn sync_host(&self, _id: HostId) -> Result<(), Error> {
let current = self.synced.peek(|v| *v);
let mut w = self.synced.clone();
w.wait_for(|v| *v > current).await;
Ok(())
}
pub async fn remove_all(mut self) -> Result<(), Error> {
if Weak::upgrade(&self.data.lock().await.controller).is_none() {
self.shutdown = true;
@@ -820,6 +763,7 @@ impl NetService {
break;
}
}
self.shutdown = true;
Ok(())
}
@@ -831,7 +775,6 @@ impl NetService {
impl Drop for NetService {
fn drop(&mut self) {
if !self.shutdown {
self.shutdown = true;
let svc = std::mem::replace(self, Self::dummy());
tokio::spawn(async move { svc.remove_all().await.log_err() });
}

View File

@@ -145,10 +145,9 @@ pub struct GatewayInfo {
pub public: bool,
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct ServiceInterface {
pub id: ServiceInterfaceId,
pub name: String,

View File

@@ -36,7 +36,7 @@ use crate::db::{DbAccess, DbAccessMut};
use crate::hostname::ServerHostname;
use crate::init::check_time_is_synchronized;
use crate::net::gateway::GatewayInfo;
use crate::net::tls::{TlsHandler, TlsHandlerAction};
use crate::net::tls::TlsHandler;
use crate::net::web_server::{Accept, ExtractVisitor, TcpMetadata, extract};
use crate::prelude::*;
use crate::util::serde::Pem;
@@ -188,7 +188,7 @@ lazy_static::lazy_static! {
}
fn asn1_time_to_system_time(time: &Asn1TimeRef) -> Result<SystemTime, Error> {
let diff = ASN1_UNIX_EPOCH.diff(time)?;
let diff = time.diff(&**ASN1_UNIX_EPOCH)?;
let mut res = UNIX_EPOCH;
if diff.days >= 0 {
res += Duration::from_secs(diff.days as u64 * 86400);
@@ -620,7 +620,7 @@ where
&mut self,
hello: &ClientHello<'_>,
metadata: &<A as Accept>::Metadata,
) -> Option<TlsHandlerAction> {
) -> Option<ServerConfig> {
let hostnames: BTreeSet<InternedString> = hello
.server_name()
.map(InternedString::from)
@@ -684,6 +684,5 @@ where
)
}
.log_err()
.map(TlsHandlerAction::Tls)
}
}

View File

@@ -9,7 +9,7 @@ use async_compression::tokio::bufread::GzipEncoder;
use axum::Router;
use axum::body::Body;
use axum::extract::{self as x, Request};
use axum::response::Response;
use axum::response::{IntoResponse, Response};
use axum::routing::{any, get};
use base64::display::Base64Display;
use digest::Digest;

View File

@@ -16,14 +16,6 @@ use tokio_rustls::rustls::sign::CertifiedKey;
use tokio_rustls::rustls::{ClientConfig, RootCertStore, ServerConfig};
use visit_rs::{Visit, VisitFields};
/// Result of a TLS handler's decision about how to handle a connection.
pub enum TlsHandlerAction {
/// Complete the TLS handshake with this ServerConfig.
Tls(ServerConfig),
/// Don't complete TLS — rewind the BackTrackingIO and return the raw stream.
Passthrough,
}
use crate::net::http::handle_http_on_https;
use crate::net::web_server::{Accept, AcceptStream, MetadataVisitor};
use crate::prelude::*;
@@ -58,7 +50,7 @@ pub trait TlsHandler<'a, A: Accept> {
&'a mut self,
hello: &'a ClientHello<'a>,
metadata: &'a A::Metadata,
) -> impl Future<Output = Option<TlsHandlerAction>> + Send + 'a;
) -> impl Future<Output = Option<ServerConfig>> + Send + 'a;
}
#[derive(Clone)]
@@ -74,7 +66,7 @@ where
&'a mut self,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> Option<TlsHandlerAction> {
) -> Option<ServerConfig> {
if let Some(config) = self.0.get_config(hello, metadata).await {
return Some(config);
}
@@ -94,7 +86,7 @@ pub trait WrapTlsHandler<A: Accept> {
prev: ServerConfig,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> impl Future<Output = Option<TlsHandlerAction>> + Send + 'a
) -> impl Future<Output = Option<ServerConfig>> + Send + 'a
where
Self: 'a;
}
@@ -110,12 +102,9 @@ where
&'a mut self,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> Option<TlsHandlerAction> {
let action = self.inner.get_config(hello, metadata).await?;
match action {
TlsHandlerAction::Tls(cfg) => self.wrapper.wrap(cfg, hello, metadata).await,
other => Some(other),
}
) -> Option<ServerConfig> {
let prev = self.inner.get_config(hello, metadata).await?;
self.wrapper.wrap(prev, hello, metadata).await
}
}
@@ -214,56 +203,34 @@ where
}
};
let hello = mid.client_hello();
let sni = hello.server_name().map(InternedString::intern);
match tls_handler.get_config(&hello, &metadata).await {
Some(TlsHandlerAction::Tls(cfg)) => {
let buffered = mid.io.stop_buffering();
mid.io
.write_all(&buffered)
.await
.with_kind(ErrorKind::Network)?;
return Ok(match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => {
let s = stream.get_ref().1;
Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: s
.server_name()
.map(InternedString::intern),
alpn: s
.alpn_protocol()
.map(|a| MaybeUtf8String(a.to_vec())),
},
if let Some(cfg) = tls_handler.get_config(&hello, &metadata).await {
let buffered = mid.io.stop_buffering();
mid.io
.write_all(&buffered)
.await
.with_kind(ErrorKind::Network)?;
return Ok(match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => {
let s = stream.get_ref().1;
Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: s.server_name().map(InternedString::intern),
alpn: s
.alpn_protocol()
.map(|a| MaybeUtf8String(a.to_vec())),
},
Box::pin(stream) as AcceptStream,
))
}
Err(e) => {
tracing::trace!("Error completing TLS handshake: {e}");
tracing::trace!("{e:?}");
None
}
});
}
Some(TlsHandlerAction::Passthrough) => {
let (dummy, _drop) = tokio::io::duplex(1);
let mut bt = std::mem::replace(
&mut mid.io,
BackTrackingIO::new(Box::pin(dummy) as AcceptStream),
);
drop(mid);
bt.rewind();
return Ok(Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo { sni, alpn: None },
},
Box::pin(bt) as AcceptStream,
)));
}
None => {}
},
Box::pin(stream) as AcceptStream,
))
}
Err(e) => {
tracing::trace!("Error completing TLS handshake: {e}");
tracing::trace!("{e:?}");
None
}
});
}
Ok(None)

View File

@@ -1,5 +1,3 @@
use std::time::Duration;
use clap::Parser;
use imbl_value::InternedString;
use patch_db::json_ptr::JsonPointer;
@@ -10,9 +8,7 @@ use ts_rs::TS;
use crate::GatewayId;
use crate::context::{CliContext, RpcContext};
use crate::db::model::public::{
GatewayType, NetworkInfo, NetworkInterfaceInfo, NetworkInterfaceType,
};
use crate::db::model::public::{GatewayType, NetworkInterfaceInfo, NetworkInterfaceType};
use crate::net::host::all_hosts;
use crate::prelude::*;
use crate::util::Invoke;
@@ -143,34 +139,6 @@ pub async fn add_tunnel(
.result?;
}
// Wait for the sync loop to fully commit gateway state (addresses, hosts)
// to the database, with a 15-second timeout.
if tokio::time::timeout(Duration::from_secs(15), async {
let mut watch = ctx
.db
.watch("/public/serverInfo/network".parse::<JsonPointer>().unwrap())
.await
.typed::<NetworkInfo>();
loop {
if watch
.peek()?
.as_gateways()
.as_idx(&iface)
.and_then(|g| g.as_ip_info().transpose_ref())
.is_some()
{
break;
}
watch.changed().await?;
}
Ok::<_, Error>(())
})
.await
.is_err()
{
tracing::warn!("{}", t!("net.tunnel.timeout-waiting-for-add", gateway = iface.as_str()));
}
Ok(iface)
}
@@ -256,27 +224,5 @@ pub async fn remove_tunnel(
.await
.result?;
// Wait for the sync loop to fully commit gateway removal to the database,
// with a 15-second timeout.
if tokio::time::timeout(Duration::from_secs(15), async {
let mut watch = ctx
.db
.watch("/public/serverInfo/network".parse::<JsonPointer>().unwrap())
.await
.typed::<NetworkInfo>();
loop {
if watch.peek()?.as_gateways().as_idx(&id).is_none() {
break;
}
watch.changed().await?;
}
Ok::<_, Error>(())
})
.await
.is_err()
{
tracing::warn!("{}", t!("net.tunnel.timeout-waiting-for-remove", gateway = id.as_str()));
}
Ok(())
}

View File

@@ -66,13 +66,6 @@ pub fn ipv6_is_local(addr: Ipv6Addr) -> bool {
addr.is_loopback() || (addr.segments()[0] & 0xfe00) == 0xfc00 || ipv6_is_link_local(addr)
}
pub fn is_private_ip(addr: IpAddr) -> bool {
match addr {
IpAddr::V4(v4) => v4.is_private() || v4.is_loopback() || v4.is_link_local(),
IpAddr::V6(v6) => ipv6_is_local(v6),
}
}
fn parse_iface_ip(output: &str) -> Result<Vec<&str>, Error> {
let output = output.trim();
if output.is_empty() {

View File

@@ -6,13 +6,12 @@ use std::sync::{Arc, Weak};
use std::task::{Poll, ready};
use async_acme::acme::ACME_TLS_ALPN_NAME;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use futures::future::BoxFuture;
use imbl::OrdMap;
use imbl_value::{InOMap, InternedString};
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn, from_fn_async};
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn};
use serde::{Deserialize, Serialize};
use tokio::net::{TcpListener, TcpStream};
use tokio_rustls::TlsConnector;
@@ -36,9 +35,9 @@ use crate::net::gateway::{
};
use crate::net::ssl::{CertStore, RootCaTlsHandler};
use crate::net::tls::{
ChainedHandler, TlsHandlerAction, TlsHandlerWrapper, TlsListener, TlsMetadata, WrapTlsHandler,
ChainedHandler, TlsHandlerWrapper, TlsListener, TlsMetadata, WrapTlsHandler,
};
use crate::net::utils::{ipv6_is_link_local, is_private_ip};
use crate::net::utils::ipv6_is_link_local;
use crate::net::web_server::{Accept, AcceptStream, ExtractVisitor, TcpMetadata, extract};
use crate::prelude::*;
use crate::util::collections::EqSet;
@@ -47,228 +46,68 @@ use crate::util::serde::{HandlerExtSerde, MaybeUtf8String, display_serializable}
use crate::util::sync::{SyncMutex, Watch};
use crate::{GatewayId, ResultExt};
#[derive(Debug, Clone, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct PassthroughInfo {
#[ts(type = "string")]
pub hostname: InternedString,
pub listen_port: u16,
#[ts(type = "string")]
pub backend: SocketAddr,
#[ts(type = "string[]")]
pub public_gateways: BTreeSet<GatewayId>,
#[ts(type = "string[]")]
pub private_ips: BTreeSet<IpAddr>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
struct AddPassthroughParams {
#[arg(long)]
pub hostname: InternedString,
#[arg(long)]
pub listen_port: u16,
#[arg(long)]
pub backend: SocketAddr,
#[arg(long)]
pub public_gateway: Vec<GatewayId>,
#[arg(long)]
pub private_ip: Vec<IpAddr>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
struct RemovePassthroughParams {
#[arg(long)]
pub hostname: InternedString,
#[arg(long)]
pub listen_port: u16,
}
pub fn vhost_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"dump-table",
from_fn(dump_table)
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
ParentHandler::new().subcommand(
"dump-table",
from_fn(|ctx: RpcContext| Ok(ctx.net_controller.vhost.dump_table()))
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
if let Some(format) = params.format {
display_serializable(format, res)?;
return Ok::<_, Error>(());
}
if let Some(format) = params.format {
display_serializable(format, res)?;
return Ok::<_, Error>(());
}
let mut table = Table::new();
table.add_row(row![bc => "FROM", "TO", "ACTIVE"]);
let mut table = Table::new();
table.add_row(row![bc => "FROM", "TO", "ACTIVE"]);
for (external, targets) in res {
for (host, targets) in targets {
for (idx, target) in targets.into_iter().enumerate() {
table.add_row(row![
format!(
"{}:{}",
host.as_ref().map(|s| &**s).unwrap_or("*"),
external.0
),
target,
idx == 0
]);
}
for (external, targets) in res {
for (host, targets) in targets {
for (idx, target) in targets.into_iter().enumerate() {
table.add_row(row![
format!(
"{}:{}",
host.as_ref().map(|s| &**s).unwrap_or("*"),
external.0
),
target,
idx == 0
]);
}
}
}
table.print_tty(false)?;
table.print_tty(false)?;
Ok(())
})
.with_call_remote::<CliContext>(),
)
.subcommand(
"add-passthrough",
from_fn_async(add_passthrough)
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove-passthrough",
from_fn_async(remove_passthrough)
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand(
"list-passthrough",
from_fn(list_passthrough)
.with_display_serializable()
.with_call_remote::<CliContext>(),
)
}
fn dump_table(
ctx: RpcContext,
) -> Result<BTreeMap<JsonKey<u16>, BTreeMap<JsonKey<Option<InternedString>>, EqSet<String>>>, Error>
{
Ok(ctx.net_controller.vhost.dump_table())
}
async fn add_passthrough(
ctx: RpcContext,
AddPassthroughParams {
hostname,
listen_port,
backend,
public_gateway,
private_ip,
}: AddPassthroughParams,
) -> Result<(), Error> {
let public_gateways: BTreeSet<GatewayId> = public_gateway.into_iter().collect();
let private_ips: BTreeSet<IpAddr> = private_ip.into_iter().collect();
ctx.net_controller.vhost.add_passthrough(
hostname.clone(),
listen_port,
backend,
public_gateways.clone(),
private_ips.clone(),
)?;
ctx.db
.mutate(|db| {
let pts = db
.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_passthroughs_mut();
let mut vec: Vec<PassthroughInfo> = pts.de()?;
vec.retain(|p| !(p.hostname == hostname && p.listen_port == listen_port));
vec.push(PassthroughInfo {
hostname,
listen_port,
backend,
public_gateways,
private_ips,
});
pts.ser(&vec)
})
.await
.result?;
Ok(())
}
async fn remove_passthrough(
ctx: RpcContext,
RemovePassthroughParams {
hostname,
listen_port,
}: RemovePassthroughParams,
) -> Result<(), Error> {
ctx.net_controller
.vhost
.remove_passthrough(&hostname, listen_port);
ctx.db
.mutate(|db| {
let pts = db
.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_passthroughs_mut();
let mut vec: Vec<PassthroughInfo> = pts.de()?;
vec.retain(|p| !(p.hostname == hostname && p.listen_port == listen_port));
pts.ser(&vec)
})
.await
.result?;
Ok(())
}
fn list_passthrough(ctx: RpcContext) -> Result<Vec<PassthroughInfo>, Error> {
Ok(ctx.net_controller.vhost.list_passthrough())
Ok(())
})
.with_call_remote::<CliContext>(),
)
}
// not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
struct PassthroughHandle {
_rc: Arc<()>,
backend: SocketAddr,
public: BTreeSet<GatewayId>,
private: BTreeSet<IpAddr>,
}
pub struct VHostController {
db: TypedPatchDb<Database>,
interfaces: Arc<NetworkInterfaceController>,
crypto_provider: Arc<CryptoProvider>,
acme_cache: AcmeTlsAlpnCache,
servers: SyncMutex<BTreeMap<u16, VHostServer<VHostBindListener>>>,
passthrough_handles: SyncMutex<BTreeMap<(InternedString, u16), PassthroughHandle>>,
}
impl VHostController {
pub fn new(
db: TypedPatchDb<Database>,
interfaces: Arc<NetworkInterfaceController>,
crypto_provider: Arc<CryptoProvider>,
passthroughs: Vec<PassthroughInfo>,
) -> Self {
let controller = Self {
Self {
db,
interfaces,
crypto_provider,
acme_cache: Arc::new(SyncMutex::new(BTreeMap::new())),
servers: SyncMutex::new(BTreeMap::new()),
passthrough_handles: SyncMutex::new(BTreeMap::new()),
};
for pt in passthroughs {
if let Err(e) = controller.add_passthrough(
pt.hostname,
pt.listen_port,
pt.backend,
pt.public_gateways,
pt.private_ips,
) {
tracing::warn!("failed to restore passthrough: {e}");
}
}
controller
}
#[instrument(skip_all)]
pub fn add(
@@ -281,7 +120,20 @@ impl VHostController {
let server = if let Some(server) = writable.remove(&external) {
server
} else {
self.create_server(external)
let bind_reqs = Watch::new(VHostBindRequirements::default());
let listener = VHostBindListener {
ip_info: self.interfaces.watcher.subscribe(),
port: external,
bind_reqs: bind_reqs.clone_unseen(),
listeners: BTreeMap::new(),
};
VHostServer::new(
listener,
bind_reqs,
self.db.clone(),
self.crypto_provider.clone(),
self.acme_cache.clone(),
)
};
let rc = server.add(hostname, target);
writable.insert(external, server);
@@ -289,75 +141,6 @@ impl VHostController {
})
}
fn create_server(&self, port: u16) -> VHostServer<VHostBindListener> {
let bind_reqs = Watch::new(VHostBindRequirements::default());
let listener = VHostBindListener {
ip_info: self.interfaces.watcher.subscribe(),
port,
bind_reqs: bind_reqs.clone_unseen(),
listeners: BTreeMap::new(),
};
VHostServer::new(
listener,
bind_reqs,
self.db.clone(),
self.crypto_provider.clone(),
self.acme_cache.clone(),
)
}
pub fn add_passthrough(
&self,
hostname: InternedString,
port: u16,
backend: SocketAddr,
public: BTreeSet<GatewayId>,
private: BTreeSet<IpAddr>,
) -> Result<(), Error> {
let target = ProxyTarget {
public: public.clone(),
private: private.clone(),
acme: None,
addr: backend,
add_x_forwarded_headers: false,
connect_ssl: Err(AlpnInfo::Reflect),
passthrough: true,
};
let rc = self.add(Some(hostname.clone()), port, DynVHostTarget::new(target))?;
self.passthrough_handles.mutate(|h| {
h.insert(
(hostname, port),
PassthroughHandle {
_rc: rc,
backend,
public,
private,
},
);
});
Ok(())
}
pub fn remove_passthrough(&self, hostname: &InternedString, port: u16) {
self.passthrough_handles
.mutate(|h| h.remove(&(hostname.clone(), port)));
self.gc(Some(hostname.clone()), port);
}
pub fn list_passthrough(&self) -> Vec<PassthroughInfo> {
self.passthrough_handles.peek(|h| {
h.iter()
.map(|((hostname, port), handle)| PassthroughInfo {
hostname: hostname.clone(),
listen_port: *port,
backend: handle.backend,
public_gateways: handle.public.clone(),
private_ips: handle.private.clone(),
})
.collect()
})
}
pub fn dump_table(
&self,
) -> BTreeMap<JsonKey<u16>, BTreeMap<JsonKey<Option<InternedString>>, EqSet<String>>> {
@@ -547,9 +330,6 @@ pub trait VHostTarget<A: Accept>: std::fmt::Debug + Eq {
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
(BTreeSet::new(), BTreeSet::new())
}
fn is_passthrough(&self) -> bool {
false
}
fn preprocess<'a>(
&'a self,
prev: ServerConfig,
@@ -569,7 +349,6 @@ pub trait DynVHostTargetT<A: Accept>: std::fmt::Debug + Any {
fn filter(&self, metadata: &<A as Accept>::Metadata) -> bool;
fn acme(&self) -> Option<&AcmeProvider>;
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>);
fn is_passthrough(&self) -> bool;
fn preprocess<'a>(
&'a self,
prev: ServerConfig,
@@ -594,9 +373,6 @@ impl<A: Accept, T: VHostTarget<A> + 'static> DynVHostTargetT<A> for T {
fn acme(&self) -> Option<&AcmeProvider> {
VHostTarget::acme(self)
}
fn is_passthrough(&self) -> bool {
VHostTarget::is_passthrough(self)
}
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
VHostTarget::bind_requirements(self)
}
@@ -683,7 +459,6 @@ pub struct ProxyTarget {
pub addr: SocketAddr,
pub add_x_forwarded_headers: bool,
pub connect_ssl: Result<Arc<ClientConfig>, AlpnInfo>, // Ok: yes, connect using ssl, pass through alpn; Err: connect tcp, use provided strategy for alpn
pub passthrough: bool,
}
impl PartialEq for ProxyTarget {
fn eq(&self, other: &Self) -> bool {
@@ -691,7 +466,6 @@ impl PartialEq for ProxyTarget {
&& self.private == other.private
&& self.acme == other.acme
&& self.addr == other.addr
&& self.passthrough == other.passthrough
&& self.connect_ssl.as_ref().map(Arc::as_ptr)
== other.connect_ssl.as_ref().map(Arc::as_ptr)
}
@@ -706,7 +480,6 @@ impl fmt::Debug for ProxyTarget {
.field("addr", &self.addr)
.field("add_x_forwarded_headers", &self.add_x_forwarded_headers)
.field("connect_ssl", &self.connect_ssl.as_ref().map(|_| ()))
.field("passthrough", &self.passthrough)
.finish()
}
}
@@ -732,9 +505,8 @@ where
};
let src = tcp.peer_addr.ip();
// Private: source is in a known subnet or is a private IP (e.g. VPN on a different VLAN)
let is_public =
!ip_info.subnets.iter().any(|s| s.contains(&src)) && !is_private_ip(src);
// Public: source is outside all known subnets (direct internet)
let is_public = !ip_info.subnets.iter().any(|s| s.contains(&src));
if is_public {
self.public.contains(&gw.id)
@@ -752,9 +524,6 @@ where
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
(self.public.clone(), self.private.clone())
}
fn is_passthrough(&self) -> bool {
self.passthrough
}
async fn preprocess<'a>(
&'a self,
mut prev: ServerConfig,
@@ -908,7 +677,7 @@ where
prev: ServerConfig,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> Option<TlsHandlerAction>
) -> Option<ServerConfig>
where
Self: 'a,
{
@@ -918,7 +687,7 @@ where
.flatten()
.any(|a| a == ACME_TLS_ALPN_NAME)
{
return Some(TlsHandlerAction::Tls(prev));
return Some(prev);
}
let (target, rc) = self.0.peek(|m| {
@@ -931,16 +700,11 @@ where
.map(|(t, rc)| (t.clone(), rc.clone()))
})?;
let is_pt = target.0.is_passthrough();
let (prev, store) = target.into_preprocessed(rc, prev, hello, metadata).await?;
self.1 = Some(store);
if is_pt {
Some(TlsHandlerAction::Passthrough)
} else {
Some(TlsHandlerAction::Tls(prev))
}
Some(prev)
}
}

View File

@@ -509,7 +509,7 @@ where
drop(queue_cell.replace(None));
if !runner.is_empty() {
tokio::time::timeout(Duration::from_millis(100), runner)
tokio::time::timeout(Duration::from_secs(60), runner)
.await
.log_err();
}

View File

@@ -1,3 +1,3 @@
{boot} /boot vfat umask=0077 0 2
{efi} /boot/efi vfat umask=0077 0 1
{root} / btrfs defaults 0 1
{root} / ext4 defaults 0 1

View File

@@ -197,19 +197,11 @@ pub async fn partition(
.invoke(crate::ErrorKind::DiskManagement)
.await?;
let mut extra_boot = std::collections::BTreeMap::new();
let bios;
if efi {
extra_boot.insert("efi".to_string(), partition_for(&disk_path, 1));
bios = None;
} else {
bios = Some(partition_for(&disk_path, 1));
}
Ok(OsPartitionInfo {
bios,
efi: efi.then(|| partition_for(&disk_path, 1)),
bios: (!efi).then(|| partition_for(&disk_path, 1)),
boot: partition_for(&disk_path, 2),
root: partition_for(&disk_path, 3),
extra_boot,
data: data_part,
})
}

View File

@@ -164,10 +164,10 @@ pub async fn partition(
.await?;
Ok(OsPartitionInfo {
efi: None,
bios: None,
boot: partition_for(&disk_path, 1),
root: partition_for(&disk_path, 2),
extra_boot: Default::default(),
data: data_part,
})
}

View File

@@ -21,7 +21,7 @@ use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::setup::SetupInfo;
use crate::util::Invoke;
use crate::util::io::{TmpDir, delete_dir, delete_file, open_file, write_file_atomic};
use crate::util::io::{TmpDir, delete_file, open_file, write_file_atomic};
use crate::util::serde::IoFormat;
mod gpt;
@@ -125,7 +125,6 @@ struct DataDrive {
pub struct InstallOsResult {
pub part_info: OsPartitionInfo,
pub rootfs: TmpMountGuard,
pub mok_enrolled: bool,
}
pub async fn install_os_to(
@@ -143,7 +142,7 @@ pub async fn install_os_to(
let part_info = partition(disk_path, capacity, partition_table, protect, use_efi).await?;
if let Some(efi) = part_info.extra_boot.get("efi") {
if let Some(efi) = &part_info.efi {
Command::new("mkfs.vfat")
.arg(efi)
.invoke(crate::ErrorKind::DiskManagement)
@@ -174,7 +173,6 @@ pub async fn install_os_to(
delete_file(guard.path().join("config/upgrade")).await?;
delete_file(guard.path().join("config/overlay/etc/hostname")).await?;
delete_file(guard.path().join("config/disk.guid")).await?;
delete_dir(guard.path().join("config/lib/modules")).await?;
Command::new("cp")
.arg("-r")
.arg(guard.path().join("config"))
@@ -210,7 +208,9 @@ pub async fn install_os_to(
let config_path = rootfs.path().join("config");
if tokio::fs::metadata("/tmp/config.bak").await.is_ok() {
crate::util::io::delete_dir(&config_path).await?;
if tokio::fs::metadata(&config_path).await.is_ok() {
tokio::fs::remove_dir_all(&config_path).await?;
}
Command::new("cp")
.arg("-r")
.arg("/tmp/config.bak")
@@ -260,7 +260,10 @@ pub async fn install_os_to(
tokio::fs::write(
rootfs.path().join("config/config.yaml"),
IoFormat::Yaml.to_vec(&ServerConfig::default())?,
IoFormat::Yaml.to_vec(&ServerConfig {
os_partitions: Some(part_info.clone()),
..Default::default()
})?,
)
.await?;
@@ -279,7 +282,7 @@ pub async fn install_os_to(
ReadWrite,
)
.await?;
let efi = if let Some(efi) = part_info.extra_boot.get("efi") {
let efi = if let Some(efi) = &part_info.efi {
Some(
MountGuard::mount(
&BlockDev::new(efi),
@@ -320,8 +323,8 @@ pub async fn install_os_to(
include_str!("fstab.template"),
boot = part_info.boot.display(),
efi = part_info
.extra_boot
.get("efi")
.efi
.as_ref()
.map(|p| p.display().to_string())
.unwrap_or_else(|| "# N/A".to_owned()),
root = part_info.root.display(),
@@ -342,28 +345,6 @@ pub async fn install_os_to(
.invoke(crate::ErrorKind::OpenSsh)
.await?;
// Secure Boot: generate MOK key, sign unsigned modules, enroll MOK
let mut mok_enrolled = false;
if use_efi && crate::util::mok::is_secure_boot_enabled().await {
let new_key = crate::util::mok::ensure_dkms_key(overlay.path()).await?;
tracing::info!(
"DKMS MOK key: {}",
if new_key {
"generated"
} else {
"already exists"
}
);
crate::util::mok::sign_unsigned_modules(overlay.path()).await?;
let mok_pub = overlay.path().join(crate::util::mok::DKMS_MOK_PUB.trim_start_matches('/'));
match crate::util::mok::enroll_mok(&mok_pub).await {
Ok(enrolled) => mok_enrolled = enrolled,
Err(e) => tracing::warn!("MOK enrollment failed: {e}"),
}
}
let mut install = Command::new("chroot");
install.arg(overlay.path()).arg("grub-install");
if !use_efi {
@@ -405,11 +386,7 @@ pub async fn install_os_to(
tokio::fs::remove_dir_all(&work).await?;
lower.unmount().await?;
Ok(InstallOsResult {
part_info,
rootfs,
mok_enrolled,
})
Ok(InstallOsResult { part_info, rootfs })
}
pub async fn install_os(
@@ -451,12 +428,7 @@ pub async fn install_os(
});
let use_efi = tokio::fs::metadata("/sys/firmware/efi").await.is_ok();
let InstallOsResult {
part_info,
rootfs,
mok_enrolled,
} = install_os_to(
let InstallOsResult { part_info, rootfs } = install_os_to(
"/run/live/medium/live/filesystem.squashfs",
&disk.logicalname,
disk.capacity,
@@ -467,8 +439,10 @@ pub async fn install_os(
)
.await?;
ctx.config
.mutate(|c| c.os_partitions = Some(part_info.clone()));
let mut setup_info = SetupInfo::default();
setup_info.mok_enrolled = mok_enrolled;
if let Some(data_drive) = data_drive {
let mut logicalname = &*data_drive.logicalname;
@@ -552,11 +526,7 @@ pub async fn cli_install_os(
let use_efi = efi.unwrap_or_else(|| !matches!(partition_table, Some(PartitionTable::Mbr)));
let InstallOsResult {
part_info,
rootfs,
mok_enrolled: _,
} = install_os_to(
let InstallOsResult { part_info, rootfs } = install_os_to(
&squashfs,
&disk,
capacity,

View File

@@ -141,7 +141,7 @@ impl RegistryContext {
listen: config.registry_listen.unwrap_or(DEFAULT_REGISTRY_LISTEN),
db,
datadir,
rpc_continuations: RpcContinuations::new(None),
rpc_continuations: RpcContinuations::new(),
client: Client::builder()
.proxy(Proxy::custom(move |url| {
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {

View File

@@ -255,7 +255,30 @@ impl Model<PackageVersionInfo> {
}
if let Some(hw) = &device_info.hardware {
self.as_s9pks_mut().mutate(|s9pks| {
s9pks.retain(|(hw_req, _)| hw_req.is_compatible(hw));
s9pks.retain(|(hw_req, _)| {
if let Some(arch) = &hw_req.arch {
if !arch.contains(&hw.arch) {
return false;
}
}
if let Some(ram) = hw_req.ram {
if hw.ram < ram {
return false;
}
}
if let Some(dev) = &hw.devices {
for device_filter in &hw_req.device {
if !dev
.iter()
.filter(|d| d.class() == &*device_filter.class)
.any(|d| device_filter.matches(d))
{
return false;
}
}
}
true
});
if hw.devices.is_some() {
s9pks.sort_by_key(|(req, _)| req.specificity_desc());
} else {

View File

@@ -58,8 +58,6 @@ pub struct AddPackageSignerParams {
#[arg(long, help = "help.arg.version-range")]
#[ts(type = "string | null")]
pub versions: Option<VersionRange>,
#[arg(long, help = "help.arg.merge")]
pub merge: bool,
}
pub async fn add_package_signer(
@@ -68,7 +66,6 @@ pub async fn add_package_signer(
id,
signer,
versions,
merge,
}: AddPackageSignerParams,
) -> Result<(), Error> {
ctx.db
@@ -79,22 +76,13 @@ pub async fn add_package_signer(
"unknown signer {signer}"
);
let versions = versions.unwrap_or_default();
db.as_index_mut()
.as_package_mut()
.as_packages_mut()
.as_idx_mut(&id)
.or_not_found(&id)?
.as_authorized_mut()
.upsert(&signer, || Ok(VersionRange::None))?
.mutate(|existing| {
*existing = if merge {
VersionRange::or(existing.clone(), versions)
} else {
versions
};
Ok(())
})?;
.insert(&signer, &versions.unwrap_or_default())?;
Ok(())
})

Some files were not shown because too many files have changed in this diff Show More