Compare commits

..

26 Commits

Author SHA1 Message Date
Aiden McClelland
b7f7202e25 chore: bump version to 0.4.0-alpha.23 2026-03-25 23:27:57 -06:00
Aiden McClelland
0719c227ee fix: deduplicate tor keys using BTreeMap in v0_3_6 migration 2026-03-25 23:24:44 -06:00
Aiden McClelland
621da47990 fix: import data drive before setup if not mounted 2026-03-25 23:24:35 -06:00
Aiden McClelland
9fa81a0c9d feat: add deploy job to startos-iso workflow 2026-03-25 23:24:25 -06:00
Matt Hill
2dac2bb2b3 restart after server name change 2026-03-25 21:15:07 -06:00
Matt Hill
58f1dc5025 mask password in ST 2026-03-25 17:29:19 -06:00
Matt Hill
cc89023bbd fix spinner alignment 2026-03-25 13:39:13 -06:00
Matt Hill
7e35ad57e7 tor http is secure 2026-03-25 13:31:53 -06:00
Aiden McClelland
010e439d1d fix: guard against null startCursor in logs component 2026-03-25 13:24:52 -06:00
Aiden McClelland
cdbb512cca fix: trim whitespace from package data version file 2026-03-25 13:24:35 -06:00
Aiden McClelland
bb2e69299e fix: only log WAN IP error when all echoip URLs fail 2026-03-25 13:24:18 -06:00
Aiden McClelland
fd0dc9a5b8 fix: silence journalctl setup error in init 2026-03-25 13:24:02 -06:00
Aiden McClelland
e2e88f774e chore: add i18n entries for new CLI args and commands 2026-03-25 13:22:47 -06:00
Aiden McClelland
4bebcafdde fix: tolerate setsid EPERM in subcontainer pre_exec
In TTY mode, pty_process already calls setsid() on the child before
our pre_exec runs. The second setsid() fails with EPERM since the
process is already a session leader. This is harmless — ignore it.
2026-03-25 10:31:29 -06:00
Aiden McClelland
2bb1463f4f fix: mitigate tokio I/O driver starvation (tokio-rs/tokio#4730)
Tokio's multi-thread scheduler has an unfixed vulnerability where all
worker threads can end up parked on condvars with no worker driving the
I/O reactor.  Condvar-parked workers have no timeout and sleep
indefinitely, so once in this state the runtime never recovers.

This was observed on a box migrating from 0.3.5.1: after heavy task
churn (package reinstalls, container operations, logging) all 16 workers
ended up on futex_wait with no thread on epoll_wait.  The web server
listened on both HTTP and HTTPS but never replied.  The box was stuck
for 7+ hours with 0% CPU.

Two mitigations:

1. Watchdog OS thread (startd.rs): a plain std::thread that every 30s
   injects a no-op task via Handle::spawn.  This forces a condvar-parked
   worker to wake, cycle through park, and grab the driver TryLock —
   breaking the stall regardless of what triggered it.

2. block_in_place in the logger (logger.rs): the TeeWriter holds a
   std::sync::Mutex across blocking file + stderr writes on worker
   threads.  Wrapping in block_in_place tells tokio to hand off driver
   duties before the worker blocks, reducing the window for starvation.
   Guarded by runtime_flavor() to avoid panicking on current-thread
   runtimes used by the CLI.
2026-03-25 10:14:03 -06:00
Aiden McClelland
f20ece44a1 chore: bump sdk version in container-runtime lockfile 2026-03-24 19:26:56 -06:00
Aiden McClelland
9fddcb957f chore: bump direct_io buffer from 256KiB to 1MiB 2026-03-24 19:26:56 -06:00
Aiden McClelland
fd502cfb99 fix: probe active block device before vg import cycle
When the target VG is already active (e.g. the running system's own
VG), probe the block device directly instead of going through the
full import/activate/open/cleanup sequence.
2026-03-24 19:26:56 -06:00
Aiden McClelland
ee95eef395 fix: mark backup progress complete unconditionally
Remove the backup_succeeded gate so the progress indicator updates
regardless of the backup outcome — the status field already captures
success/failure separately.
2026-03-24 19:26:56 -06:00
Aiden McClelland
aaa43ce6af fix: network error resilience and wifi state tracking
- Demote transient route-replace errors (vanishing interfaces) to trace
- Tolerate errors during policy routing cleanup on drop
- Use join_all instead of try_join_all for gateway watcher jobs
- Simplify wifi interface detection to always use find_wifi_iface()
- Write wifi enabled state to db instead of interface name
2026-03-24 19:26:55 -06:00
Aiden McClelland
e0f27281d1 feat: load bundled migration images and log progress during os migration
Load pre-saved container images from /usr/lib/startos/migration-images
before migrating packages, removing the need for internet access during
the v1→v2 s9pk conversion.  Add a periodic progress logger so the user
can see which package is being migrated.
2026-03-24 19:26:55 -06:00
Aiden McClelland
ecc4703ae7 build: add migration image bundling to build pipeline
Bundle start9/compat, start9/utils, and tonistiigi/binfmt container
images into the OS image so the v1→v2 s9pk migration can run without
internet access.
2026-03-24 19:26:55 -06:00
Aiden McClelland
d478911311 fix: restore chown on /proc/self/fd/* for subcontainer exec
The pipe-wrap binary guarantees FDs are always pipes (not sockets),
making the chown safe. The chown is still needed because anonymous
pipes have mode 0600 — without it, non-root users cannot re-open
/dev/stderr via /proc/self/fd/2.
2026-03-24 19:26:55 -06:00
Matt Hill
23fe6fb663 align checkbox 2026-03-24 18:57:19 -06:00
Matt Hill
186925065d sdk db backups, wifi ux, release notes, minor copy 2026-03-24 16:39:31 -06:00
Aiden McClelland
53dff95365 revert: remove websocket shutdown signal from RpcContinuations 2026-03-24 11:13:59 -06:00
69 changed files with 918 additions and 3688 deletions

View File

@@ -268,3 +268,108 @@ jobs:
name: ${{ matrix.platform }}.img
path: results/*.img
if: ${{ matrix.platform == 'raspberrypi' }}
deploy:
name: Deploy
needs: [image]
if: github.event_name == 'workflow_dispatch' && github.event.inputs.deploy != 'NONE'
runs-on: ubuntu-latest
env:
REGISTRY: >-
${{
fromJson('{
"alpha": "https://alpha-registry-x.start9.com",
"beta": "https://beta-registry.start9.com"
}')[github.event.inputs.deploy]
}}
S3_BUCKET: s3://startos-images
S3_CDN: https://startos-images.nyc3.cdn.digitaloceanspaces.com
steps:
- uses: actions/checkout@v6
with:
sparse-checkout: web/package.json
- name: Determine version
id: version
run: |
VERSION=$(sed -n 's/.*"version": *"\([^"]*\)".*/\1/p' web/package.json | head -1)
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
echo "Version: $VERSION"
- name: Download squashfs artifacts
uses: actions/download-artifact@v8
with:
pattern: "*.squashfs"
path: artifacts/
merge-multiple: true
- name: Download ISO artifacts
uses: actions/download-artifact@v8
with:
pattern: "*.iso"
path: artifacts/
merge-multiple: true
- name: Install start-cli
run: |
ARCH=$(uname -m)
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
ASSET_NAME="start-cli_${ARCH}-${OS}"
DOWNLOAD_URL=$(curl -fsS \
-H "Authorization: token ${{ github.token }}" \
https://api.github.com/repos/Start9Labs/start-os/releases \
| jq -r '[.[].assets[] | select(.name=="'"$ASSET_NAME"'")] | first | .browser_download_url')
curl -fsSL \
-H "Authorization: token ${{ github.token }}" \
-H "Accept: application/octet-stream" \
"$DOWNLOAD_URL" -o /tmp/start-cli
sudo install -m 755 /tmp/start-cli /usr/local/bin/start-cli
echo "start-cli: $(start-cli --version)"
- name: Configure S3
run: |
sudo apt-get install -y -qq s3cmd > /dev/null
cat > ~/.s3cfg <<EOF
[default]
access_key = ${{ secrets.S3_ACCESS_KEY }}
secret_key = ${{ secrets.S3_SECRET_KEY }}
host_base = nyc3.digitaloceanspaces.com
host_bucket = %(bucket)s.nyc3.digitaloceanspaces.com
use_https = True
EOF
- name: Set up developer key
run: |
mkdir -p ~/.startos
printf '%s' "${{ secrets.DEV_KEY }}" > ~/.startos/developer.key.pem
- name: Upload to S3
run: |
VERSION="${{ steps.version.outputs.version }}"
cd artifacts
for file in *.iso *.squashfs; do
[ -f "$file" ] || continue
echo "Uploading $file..."
s3cmd put -P "$file" "${{ env.S3_BUCKET }}/v${VERSION}/$file"
done
- name: Register OS version
run: |
VERSION="${{ steps.version.outputs.version }}"
start-cli --registry="${{ env.REGISTRY }}" registry os version add \
"$VERSION" "v${VERSION}" '' ">=0.3.5 <=${VERSION}"
- name: Index assets in registry
run: |
VERSION="${{ steps.version.outputs.version }}"
cd artifacts
for file in *.squashfs *.iso; do
[ -f "$file" ] || continue
PLATFORM=$(echo "$file" | sed 's/.*_\([^.]*\)\.\(squashfs\|iso\)$/\1/')
echo "Indexing $file for platform $PLATFORM..."
start-cli --registry="${{ env.REGISTRY }}" registry os asset add \
--platform="$PLATFORM" \
--version="$VERSION" \
"$file" \
"${{ env.S3_CDN }}/v${VERSION}/$file"
done

1
.gitignore vendored
View File

@@ -23,3 +23,4 @@ tmp
web/.i18n-checked
docs/USER.md
*.s9pk
/build/lib/migration-images

View File

@@ -8,7 +8,7 @@ StartOS is an open-source Linux distribution for running personal servers. It ma
- Frontend: Angular 21 + TypeScript + Taiga UI 5
- Container runtime: Node.js/TypeScript with LXC
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`), MCP for LLM agents (see `core/mcp/ARCHITECTURE.md`)
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`)
- Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`)
## Project Structure
@@ -28,7 +28,7 @@ StartOS is an open-source Linux distribution for running personal servers. It ma
## Components
- **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, MCP server for LLM agents, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md).
- **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md).
- **`web/`** — Angular 21 + TypeScript workspace using Taiga UI 5. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md).
@@ -53,13 +53,13 @@ Rust (core/)
Key make targets along this chain:
| Step | Command | What it does |
| ---- | --------------------------------------- | --------------------------------- |
| 1 | `cargo check -p start-os` | Verify Rust compiles |
| 2 | `make ts-bindings` | Export ts-rs types → rsync to SDK |
| 3 | `cd sdk && make baseDist dist` | Build SDK packages |
| 4 | `cd web && npm run check` | Type-check Angular projects |
| 5 | `cd container-runtime && npm run check` | Type-check runtime |
| Step | Command | What it does |
|---|---|---|
| 1 | `cargo check -p start-os` | Verify Rust compiles |
| 2 | `make ts-bindings` | Export ts-rs types → rsync to SDK |
| 3 | `cd sdk && make baseDist dist` | Build SDK packages |
| 4 | `cd web && npm run check` | Type-check Angular projects |
| 5 | `cd container-runtime && npm run check` | Type-check runtime |
**Important**: Editing `sdk/base/lib/osBindings/*.ts` alone is NOT sufficient — you must rebuild the SDK bundle (step 3) before web/container-runtime can see the changes.
@@ -90,17 +90,6 @@ StartOS uses Patch-DB for reactive state synchronization:
This means the UI is always eventually consistent with the backend — after any mutating API call, the frontend waits for the corresponding PatchDB diff before resolving, so the UI reflects the result immediately.
## MCP Server (LLM Agent Interface)
StartOS includes an [MCP](https://modelcontextprotocol.io/) (Model Context Protocol) server at `/mcp`, enabling LLM agents to discover and invoke the same operations available through the UI and CLI. The MCP server runs inside the StartOS server process alongside the RPC API.
- **Tools**: Every RPC method is exposed as an MCP tool with LLM-optimized descriptions and JSON Schema inputs. Agents call `tools/list` to discover what's available and `tools/call` to invoke operations.
- **Resources**: System state is exposed via MCP resources backed by Patch-DB. Agents subscribe to `startos:///public` and receive debounced revision diffs over SSE, maintaining a local state cache without polling.
- **Auth**: Same session cookie auth as the UI — no separate credentials.
- **Transport**: MCP Streamable HTTP — POST for requests, GET for SSE notification stream, DELETE for session teardown.
See [core/ARCHITECTURE.md](core/ARCHITECTURE.md#mcp-server) for implementation details.
## Further Reading
- [core/ARCHITECTURE.md](core/ARCHITECTURE.md) — Rust backend architecture

View File

@@ -15,7 +15,7 @@ IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) build/lib/migration-images/.done
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/)
STARTD_SRC := core/startd.service $(BUILD_SRC)
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
@@ -89,6 +89,7 @@ clean:
rm -rf container-runtime/node_modules
rm -f container-runtime/*.squashfs
(cd sdk && make clean)
rm -rf build/lib/migration-images
rm -f env/*.txt
format:
@@ -105,6 +106,10 @@ test-sdk: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
cd container-runtime && npm test
build/lib/migration-images/.done: build/save-migration-images.sh
ARCH=$(ARCH) ./build/save-migration-images.sh build/lib/migration-images
touch $@
install-cli: $(GIT_HASH_FILE)
./core/build/build-cli.sh --install

33
build/save-migration-images.sh Executable file
View File

@@ -0,0 +1,33 @@
#!/bin/bash
# Save Docker images needed by the 0.3.6-alpha.0 migration as tarballs
# so they can be bundled into the OS and loaded without internet access.
set -e
ARCH="${ARCH:-x86_64}"
DESTDIR="${1:-build/lib/migration-images}"
if [ "$ARCH" = "x86_64" ]; then
DOCKER_PLATFORM="linux/amd64"
elif [ "$ARCH" = "aarch64" ]; then
DOCKER_PLATFORM="linux/arm64"
else
DOCKER_PLATFORM="linux/$ARCH"
fi
IMAGES=("start9/compat:latest" "start9/utils:latest" "tonistiigi/binfmt:latest")
mkdir -p "$DESTDIR"
for IMAGE in "${IMAGES[@]}"; do
FILENAME=$(echo "$IMAGE" | sed 's|/|_|g; s/:/_/g').tar
if [ -f "$DESTDIR/$FILENAME" ]; then
echo "Skipping $IMAGE (already saved)"
continue
fi
echo "Pulling $IMAGE for $DOCKER_PLATFORM..."
docker pull --platform "$DOCKER_PLATFORM" "$IMAGE"
echo "Saving $IMAGE to $DESTDIR/$FILENAME..."
docker save "$IMAGE" -o "$DESTDIR/$FILENAME"
done
echo "Migration images saved to $DESTDIR"

View File

@@ -37,7 +37,7 @@
},
"../sdk/dist": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.64",
"version": "0.4.0-beta.66",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",

View File

@@ -23,7 +23,6 @@ The crate produces a single binary `startbox` that is symlinked under different
- `src/context/` — Context types (RpcContext, CliContext, InitContext, DiagnosticContext)
- `src/service/` — Service lifecycle management with actor pattern (`service_actor.rs`)
- `src/db/model/` — Patch-DB models (`public.rs` synced to frontend, `private.rs` backend-only)
- `src/mcp/` — MCP server for LLM agents (see [MCP Server](#mcp-server) below)
- `src/net/` — Networking (DNS, ACME, WiFi, Tor via Arti, WireGuard)
- `src/s9pk/` — S9PK package format (merkle archive)
- `src/registry/` — Package registry management
@@ -39,19 +38,16 @@ See [rpc-toolkit.md](rpc-toolkit.md) for full handler patterns and configuration
Patch-DB provides diff-based state synchronization. Changes to `db/model/public.rs` automatically sync to the frontend.
**Key patterns:**
- `db.peek().await` — Get a read-only snapshot of the database state
- `db.mutate(|db| { ... }).await` — Apply mutations atomically, returns `MutateResult`
- `#[derive(HasModel)]` — Derive macro for types stored in the database, generates typed accessors
**Generated accessor types** (from `HasModel` derive):
- `as_field()` — Immutable reference: `&Model<T>`
- `as_field_mut()` — Mutable reference: `&mut Model<T>`
- `into_field()` — Owned value: `Model<T>`
**`Model<T>` APIs** (from `db/prelude.rs`):
- `.de()` — Deserialize to `T`
- `.ser(&value)` — Serialize from `T`
- `.mutate(|v| ...)` — Deserialize, mutate, reserialize
@@ -67,12 +63,6 @@ See [i18n-patterns.md](i18n-patterns.md) for internationalization key convention
See [core-rust-patterns.md](core-rust-patterns.md) for common utilities (Invoke trait, Guard pattern, mount guards, Apply trait, etc.).
## MCP Server
The MCP (Model Context Protocol) server at `src/mcp/` exposes the StartOS RPC API to LLM agents via the Streamable HTTP transport at `/mcp`. Tools wrap the existing RPC handlers; resources expose Patch-DB state with debounced SSE subscriptions; auth reuses the UI session cookie.
See [src/mcp/ARCHITECTURE.md](src/mcp/ARCHITECTURE.md) for transport details, session lifecycle, tool dispatch, resource subscriptions, CORS, and body size limits.
## Related Documentation
- [rpc-toolkit.md](rpc-toolkit.md) — JSON-RPC handler patterns

2
core/Cargo.lock generated
View File

@@ -6439,7 +6439,7 @@ dependencies = [
[[package]]
name = "start-os"
version = "0.4.0-alpha.22"
version = "0.4.0-alpha.23"
dependencies = [
"aes",
"async-acme",

View File

@@ -15,7 +15,7 @@ license = "MIT"
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.22" # VERSION_BUMP
version = "0.4.0-alpha.23" # VERSION_BUMP
[lib]
name = "startos"

View File

@@ -1721,6 +1721,14 @@ lxc.mod.cleaned-up-containers:
fr_FR: "Conteneurs LXC orphelins nettoyés avec succès"
pl_PL: "Pomyślnie wyczyszczono wiszące kontenery LXC"
# version/v0_3_6_alpha_0.rs
migration.migrating-package:
en_US: "Migrating package %{package}..."
de_DE: "Paket %{package} wird migriert..."
es_ES: "Migrando paquete %{package}..."
fr_FR: "Migration du paquet %{package}..."
pl_PL: "Migracja pakietu %{package}..."
# registry/admin.rs
registry.admin.unknown-signer:
en_US: "Unknown signer"
@@ -2649,6 +2657,13 @@ help.arg.add-signer-key:
fr_FR: "Ajouter une clé publique au signataire"
pl_PL: "Dodaj klucz publiczny do sygnatariusza"
help.arg.address:
en_US: "Network address"
de_DE: "Netzwerkadresse"
es_ES: "Dirección de red"
fr_FR: "Adresse réseau"
pl_PL: "Adres sieciowy"
help.arg.allow-model-mismatch:
en_US: "Allow database model mismatch"
de_DE: "Datenbankmodell-Abweichung erlauben"
@@ -3090,6 +3105,13 @@ help.arg.platform:
fr_FR: "Identifiant de la plateforme cible"
pl_PL: "Identyfikator platformy docelowej"
help.arg.port:
en_US: "Port number"
de_DE: "Portnummer"
es_ES: "Número de puerto"
fr_FR: "Numéro de port"
pl_PL: "Numer portu"
help.arg.postgres-connection-url:
en_US: "PostgreSQL connection URL"
de_DE: "PostgreSQL-Verbindungs-URL"
@@ -3174,6 +3196,13 @@ help.arg.server-id:
fr_FR: "Identifiant unique du serveur"
pl_PL: "Unikalny identyfikator serwera"
help.arg.set-as-default-outbound:
en_US: "Set as the default outbound gateway"
de_DE: "Als Standard-Ausgangs-Gateway festlegen"
es_ES: "Establecer como puerta de enlace de salida predeterminada"
fr_FR: "Définir comme passerelle de sortie par défaut"
pl_PL: "Ustaw jako domyślną bramę wychodzącą"
help.arg.set-signer-name:
en_US: "Set the signer name"
de_DE: "Unterzeichnernamen festlegen"
@@ -3538,6 +3567,13 @@ help.arg.gateway-name:
fr_FR: "Nom de la passerelle"
pl_PL: "Nazwa bramy"
help.arg.gateway-type:
en_US: "Type of gateway"
de_DE: "Typ des Gateways"
es_ES: "Tipo de puerta de enlace"
fr_FR: "Type de passerelle"
pl_PL: "Typ bramy"
help.arg.governor-name:
en_US: "CPU governor name"
de_DE: "CPU-Governor-Name"
@@ -4526,6 +4562,13 @@ about.display-s9pk-manifest:
fr_FR: "Afficher le manifeste s9pk"
pl_PL: "Wyświetl manifest s9pk"
about.display-s9pk-root-sighash-and-maxsize:
en_US: "Display the s9pk root signature hash and max size"
de_DE: "Den s9pk-Root-Signaturhash und die maximale Größe anzeigen"
es_ES: "Mostrar el hash de firma raíz y el tamaño máximo del s9pk"
fr_FR: "Afficher le hachage de signature racine et la taille maximale du s9pk"
pl_PL: "Wyświetl hash podpisu głównego i maksymalny rozmiar s9pk"
about.display-server-metrics:
en_US: "Display server metrics"
de_DE: "Server-Metriken anzeigen"

View File

@@ -148,6 +148,15 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
.build()
.expect(&t!("bins.startd.failed-to-initialize-runtime"));
let res = rt.block_on(async {
// Periodically wake a worker thread from a non-tokio OS thread to
// prevent tokio I/O driver starvation (all workers parked on
// condvar with no driver). See tokio-rs/tokio#4730.
let rt_handle = tokio::runtime::Handle::current();
std::thread::spawn(move || loop {
std::thread::sleep(Duration::from_secs(30));
rt_handle.spawn(async {});
});
let mut server = WebServer::new(Acceptor::new(WildcardListener::new(80)?), refresher());
match inner_main(&mut server, &config).await {
Ok(a) => {

View File

@@ -39,7 +39,7 @@ impl DiagnosticContext {
shutdown,
disk_guid,
error: Arc::new(error.into()),
rpc_continuations: RpcContinuations::new(None),
rpc_continuations: RpcContinuations::new(),
})))
}
}

View File

@@ -32,7 +32,7 @@ impl InitContext {
error: watch::channel(None).0,
progress,
shutdown,
rpc_continuations: RpcContinuations::new(None),
rpc_continuations: RpcContinuations::new(),
})))
}
}

View File

@@ -339,7 +339,7 @@ impl RpcContext {
services,
cancellable_installs: SyncMutex::new(BTreeMap::new()),
metrics_cache,
rpc_continuations: RpcContinuations::new(Some(shutdown.clone())),
rpc_continuations: RpcContinuations::new(),
shutdown,
lxc_manager: Arc::new(LxcManager::new()),
open_authed_continuations: OpenAuthedContinuations::new(),

View File

@@ -85,7 +85,7 @@ impl SetupContext {
result: OnceCell::new(),
disk_guid: OnceCell::new(),
shutdown,
rpc_continuations: RpcContinuations::new(None),
rpc_continuations: RpcContinuations::new(),
install_rootfs: SyncMutex::new(None),
language: SyncMutex::new(None),
keyboard: SyncMutex::new(None),

View File

@@ -164,13 +164,13 @@ pub struct SubscribeRes {
pub guid: Guid,
}
pub(crate) struct DbSubscriber {
pub(crate) rev: u64,
pub(crate) sub: UnboundedReceiver<Revision>,
pub(crate) sync_db: watch::Receiver<u64>,
struct DbSubscriber {
rev: u64,
sub: UnboundedReceiver<Revision>,
sync_db: watch::Receiver<u64>,
}
impl DbSubscriber {
pub(crate) async fn recv(&mut self) -> Option<Revision> {
async fn recv(&mut self) -> Option<Revision> {
loop {
tokio::select! {
rev = self.sub.recv() => {

View File

@@ -409,6 +409,18 @@ pub async fn mount_all_fs<P: AsRef<Path>>(
/// filesystem type. Returns `None` if probing fails (e.g. LV doesn't exist).
#[instrument(skip_all)]
pub async fn probe_package_data_fs(guid: &str) -> Result<Option<String>, Error> {
// If the target block device is already accessible (e.g. this is the
// currently active system VG), probe it directly without any
// import/activate/open/cleanup steps.
let blockdev_path = if !guid.ends_with("_UNENC") {
PathBuf::from(format!("/dev/mapper/{guid}_package-data"))
} else {
Path::new("/dev").join(guid).join("package-data")
};
if tokio::fs::metadata(&blockdev_path).await.is_ok() {
return detect_filesystem(&blockdev_path).await.map(Some);
}
// Import and activate the VG
match Command::new("vgimport")
.arg(guid)

View File

@@ -258,7 +258,7 @@ pub async fn init(
.arg("journalctl")
.invoke(crate::ErrorKind::Journald)
.await
.log_err();
.ok();
mount_logs.complete();
tokio::io::copy(
&mut open_file("/run/startos/init.log").await?,

View File

@@ -21,7 +21,6 @@ use tracing::instrument;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::registry::asset::BufferedHttpSource;
use crate::db::model::package::{ManifestPreference, PackageStateMatchModelRef};
use crate::prelude::*;
use crate::progress::{FullProgress, FullProgressTracker, PhasedProgressBar};
@@ -286,57 +285,6 @@ pub async fn sideload(
Ok(SideloadResponse { upload, progress })
}
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct SideloadUrlParams {
#[ts(type = "string")]
url: Url,
}
#[instrument(skip_all)]
pub async fn sideload_url(
ctx: RpcContext,
SideloadUrlParams { url }: SideloadUrlParams,
) -> Result<(), Error> {
if !matches!(url.scheme(), "http" | "https") {
return Err(Error::new(
eyre!("URL scheme must be http or https, got: {}", url.scheme()),
ErrorKind::InvalidRequest,
));
}
let progress_tracker = FullProgressTracker::new();
let download_progress = progress_tracker.add_phase("Downloading".into(), Some(100));
let client = ctx.client.clone();
let db = ctx.db.clone();
let pt_ref = progress_tracker.clone();
let download = ctx
.services
.install(
ctx.clone(),
|| async move {
let source = BufferedHttpSource::new(client, url, download_progress).await?;
let key = db.peek().await.into_private().into_developer_key();
crate::s9pk::load(source, || Ok(key.de()?.0), Some(&pt_ref)).await
},
None,
None::<Never>,
Some(progress_tracker),
)
.await?;
tokio::spawn(async move {
if let Err(e) = async { download.await?.await }.await {
tracing::error!("Error sideloading package from URL: {e}");
tracing::debug!("{e:?}");
}
});
Ok(())
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
#[group(skip)]
#[ts(export)]

View File

@@ -63,7 +63,6 @@ pub mod init;
pub mod install;
pub mod logs;
pub mod lxc;
pub mod mcp;
pub mod middleware;
pub mod net;
pub mod notifications;
@@ -442,12 +441,6 @@ pub fn package<C: Context>() -> ParentHandler<C> {
.with_metadata("get_session", Value::Bool(true))
.no_cli(),
)
.subcommand(
"sideload-url",
from_fn_async(install::sideload_url)
.with_metadata("sync_db", Value::Bool(true))
.no_cli(),
)
.subcommand(
"install",
from_fn_async_local(install::cli_install)

View File

@@ -1,193 +0,0 @@
# MCP Server Architecture
The Model Context Protocol server embedded in StartOS (`core/src/mcp/`).
## Transport: Streamable HTTP (MCP 2025-03-26)
The server implements the **Streamable HTTP** transport from the MCP spec, not the older stdio or SSE-only transports. A single route (`/mcp`) handles all three HTTP methods:
| Method | Purpose |
| ----------- | -------------------------------------------------------------------------------- |
| **POST** | JSON-RPC 2.0 requests from client (initialize, tools/call, resources/read, etc.) |
| **GET** | Opens an SSE stream for server→client notifications (resource change events) |
| **DELETE** | Explicitly ends a session |
| **OPTIONS** | CORS preflight |
A discovery endpoint at `/.well-known/mcp` returns `{"mcp_endpoint":"/mcp"}`.
## Authentication
Every HTTP method (POST, GET, DELETE) validates the caller's session cookie via `ValidSessionToken::from_header` before processing. This reuses the same auth infrastructure as the main StartOS web UI — MCP clients must present a valid session cookie obtained through the normal login flow. Unauthenticated requests get a 401.
## Session Lifecycle
1. **Create**: Client sends `initialize` via POST. Server generates a UUID session ID, creates an `McpSession` with a bounded mpsc channel (256 messages), and returns the ID in the `Mcp-Session-Id` response header.
2. **Connect SSE**: Client opens a GET with the session ID header. The server takes the receiver half of the notification channel (`take_notification_rx`) and streams it as SSE events. Only one GET connection per session is allowed (the rx is moved, not cloned).
3. **Use**: Client sends tool calls, resource reads, subscriptions via POST. All POST requests must include a valid session ID header — the server validates it against the session map before processing.
4. **Teardown**: Three paths:
- Client sends DELETE -> session is removed, subscription tasks are aborted.
- SSE stream disconnects -> `CleanupStream`'s `PinnedDrop` impl removes the session.
- Session is never connected -> background sweep task (every 30s) removes sessions older than 60s that never had a GET stream attached.
## Module Structure
```
core/src/mcp/
├── mod.rs — HTTP handlers, routing, MCP method dispatch, shell execution, CORS
├── protocol.rs — JSON-RPC 2.0 types, MCP request/response structs, error codes
├── session.rs — Session map, create/remove/sweep, resource subscriptions with debounce
└── tools.rs — Tool registry (89 tools), HashMap<String, ToolEntry> mapping names → RPC methods + schemas
```
## Tool Dispatch
`tool_registry()` returns a `HashMap<String, ToolEntry>`, each mapping:
- An MCP tool name (e.g. `"package.start"`)
- A JSON Schema for input validation (sent to clients via `tools/list`)
- A backing RPC method name (usually identical to the tool name)
- Flags: `sync_db` (whether to flush DB sequence after success), `needs_session` (whether to inject `__Auth_session`)
When `tools/call` arrives:
1. Look up the tool by name via HashMap O(1) lookup.
2. Convert arguments from `serde_json::Value` to `imbl_value::Value`.
3. **Special-case**: If `rpc_method` is `"__package_shell__"`, dispatch to `handle_package_shell_exec` directly (no RPC handler). Sets `kill_on_drop(true)` to ensure timed-out processes are terminated.
4. Otherwise, optionally inject `__Auth_session` into params, then call `server.handle_command(rpc_method, params)`.
5. On success: if `sync_db` is true, flush the DB sequence. Return the result pretty-printed as a text content block.
6. On error: return the error as a text content block with `is_error: true`, using `McpResponse::ok` (MCP spec: tool errors are results, not JSON-RPC errors).
## Shell Execution
One shell tool bypasses the RPC layer entirely:
- **`package.shell`** (`__package_shell__`): Resolves the target package's subcontainer via `Service::resolve_subcontainer`, then runs `/bin/sh -c <command>` inside it via `lxc-attach` with `kill_on_drop(true)`. 30s default timeout, 300s max. Host-level shell access (`system.shell`) is intentionally excluded — agents operate within package containers only.
## Resource Subscriptions
Four resources are exposed:
- `startos:///public` — full public DB tree
- `startos:///public/serverInfo` — server metadata
- `startos:///public/packageData` — installed packages
- `startos:///mcp/system-prompt` — curated AI assistant context (text/plain)
Resource URIs are validated to only allow `/public/**` subtrees and the special `/mcp/system-prompt` path. Attempts to access non-public paths (e.g. `startos:///private/...`) are rejected.
`resources/read` parses the URI into a `JsonPointer`, calls `ctx.db.dump(&pointer)`, and returns the JSON. The system prompt resource is handled as a special case, returning server info and version.
`resources/subscribe` creates a `DbSubscriber` that watches the patch-db for changes at the given pointer. Changes are **debounced** (500ms window): the subscriber collects multiple revisions and merges their `DiffPatch`es before sending a single `notifications/resources/updated` notification over the SSE channel. The subscription task runs as a spawned tokio task; its `JoinHandle` is stored in the session so it can be aborted on unsubscribe or session teardown. Re-subscribing to the same URI aborts the prior subscription first.
## CORS
- Preflight (OPTIONS): reflects the request's `Origin`, `Access-Control-Request-Method`, and `Access-Control-Request-Headers` back. Sets `Allow-Credentials: true` and caches for 24h.
- Normal responses (`apply_cors`): reflects the request's `Origin` header when present, falls back to `*` when absent. Exposes the `Mcp-Session-Id` header. This matches the behavior of the rpc-toolkit `Cors` middleware used by the main UI.
- CORS headers are applied to all response types: POST JSON-RPC, GET SSE, DELETE, and error responses.
## Excluded RPC Methods
Of the ~195 RPC methods registered in the StartOS backend, 88 are exposed as MCP tools (plus 1 MCP-only tool: `package.shell`). The remaining 105 are excluded for the following reasons.
### Wrong context — Setup / Init / Diagnostic modes
These methods belong to the setup wizard, initial install, or diagnostic recovery mode — entirely different server states that are not reachable during normal operation when the MCP server is running.
| Method | Reason |
|--------|--------|
| `setup.*` (15 methods) | Setup wizard only runs during initial OS configuration |
| `init.*` (14 methods) | Initial disk/install flow, not reachable post-boot |
| `diagnostic.*` (7 methods) | Diagnostic recovery mode, separate HTTP server |
| `flash-os` | Bare-metal OS flashing |
### Wrong context — CLI / Developer tooling
These are developer-facing commands invoked via the CLI, not the web UI. They operate on local files or require local filesystem access.
| Method | Reason |
|--------|--------|
| `s9pk.*` (9 methods) | Package building/inspection — CLI tool for developers |
| `util.b3sum` | BLAKE3 checksum utility — CLI helper |
| `init-key`, `pubkey` | Key management — CLI operations |
### Wrong context — Registry administration
These manage the package registry (a separate server-side component), not the local StartOS instance.
| Method | Reason |
|--------|--------|
| `registry.*` (20 methods) | Registry server administration, not local device management |
### Wrong context — Tunnel management
These configure the Start9 tunnel service, which has its own management interface.
| Method | Reason |
|--------|--------|
| `tunnel.*` (12 methods) | Tunnel server management, separate from local OS control |
### Replaced by MCP-native functionality
| Method | Reason |
|--------|--------|
| `db.subscribe` | Replaced by MCP `resources/subscribe` which calls `ctx.db.dump_and_sub()` directly with 500ms debounce |
| `server.metrics.follow` | WebSocket continuation for streaming metrics — use `server.metrics` (polling) instead |
### Requires middleware injection not available via MCP dispatch
| Method | Reason |
|--------|--------|
| `package.sideload` | Requires multipart file upload via REST continuation, not JSON-RPC params. Use `package.sideload-by-url` MCP tool (backed by `package.sideload-url` RPC) which accepts a URL instead |
### Security — host-level shell access excluded
| Method | Reason |
|--------|--------|
| `system.shell` | Arbitrary host-level command execution is too broad a privilege for MCP agents. Agents can execute commands inside package subcontainers via `package.shell`, which is scoped to the service's filesystem and processes |
### Auth methods — intentionally excluded
| Method | Reason |
|--------|--------|
| `auth.login` | MCP clients authenticate via session cookie before reaching the MCP server — login is a prerequisite, not an MCP operation |
| `auth.logout` | Logging out the session that the MCP client is using would break the connection. Clients should disconnect (DELETE) instead |
### Internal / low-value
| Method | Reason |
|--------|--------|
| `echo` | Debug echo — no agent value |
| `git-info` | Build metadata — available via `server.device-info` |
| `state` | Returns server state enum — available via DB resources |
| `notification.create` | Internal: creates notifications from backend code, not user-facing |
| `db.apply` | Bulk DB mutation — CLI-specific params (`apply_receipt`) not suitable for MCP |
| `kiosk.set` | Kiosk mode toggle — physical display setting, not agent-relevant |
### Deep host/binding management — not yet exposed
These methods manage individual domain bindings and address assignments at a granular level. The list (`server.host.address.list`, `server.host.binding.list`, `package.host.list`) and read operations are exposed; the mutation operations below are deferred until agent workflows demonstrate a need.
| Method | Reason |
|--------|--------|
| `server.host.address.domain.public.add` | Granular domain management — deferred |
| `server.host.address.domain.public.remove` | Granular domain management — deferred |
| `server.host.address.domain.private.add` | Granular domain management — deferred |
| `server.host.address.domain.private.remove` | Granular domain management — deferred |
| `server.host.binding.set-address-enabled` | Granular binding management — deferred |
| `package.host.address.domain.public.add` | Granular domain management — deferred |
| `package.host.address.domain.public.remove` | Granular domain management — deferred |
| `package.host.address.domain.private.add` | Granular domain management — deferred |
| `package.host.address.domain.private.remove` | Granular domain management — deferred |
| `package.host.address.list` | Per-package address listing — deferred |
| `package.host.binding.list` | Per-package binding listing — deferred |
| `package.host.binding.set-address-enabled` | Granular binding management — deferred |
| `net.gateway.set-default-outbound` | Gateway default route — deferred |
## Body Size Limits
POST request bodies are limited to 1 MiB:
1. `Content-Length` header is checked **before** reading the body (rejects oversized requests immediately).
2. After reading, the actual body size is re-checked as defense-in-depth for chunked transfers that lack `Content-Length`.

File diff suppressed because it is too large Load Diff

View File

@@ -1,211 +0,0 @@
use serde::{Deserialize, Serialize};
use serde_json::Value as JsonValue;
// JSON-RPC 2.0 error codes
pub const PARSE_ERROR: i32 = -32700;
pub const INVALID_REQUEST: i32 = -32600;
pub const METHOD_NOT_FOUND: i32 = -32601;
pub const INVALID_PARAMS: i32 = -32602;
pub const INTERNAL_ERROR: i32 = -32603;
pub const PROTOCOL_VERSION: &str = "2025-03-26";
// === JSON-RPC 2.0 envelope ===
#[derive(Deserialize)]
pub struct McpRequest {
pub jsonrpc: String,
pub id: Option<JsonValue>,
pub method: String,
#[serde(default)]
pub params: Option<JsonValue>,
}
#[derive(Serialize)]
pub struct McpResponse {
pub jsonrpc: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
pub id: Option<JsonValue>,
#[serde(skip_serializing_if = "Option::is_none")]
pub result: Option<JsonValue>,
#[serde(skip_serializing_if = "Option::is_none")]
pub error: Option<McpError>,
}
impl McpResponse {
pub fn ok(id: Option<JsonValue>, result: JsonValue) -> Self {
Self {
jsonrpc: "2.0",
id,
result: Some(result),
error: None,
}
}
pub fn error(id: Option<JsonValue>, code: i32, message: String, data: Option<JsonValue>) -> Self {
Self {
jsonrpc: "2.0",
id,
result: None,
error: Some(McpError {
code,
message,
data,
}),
}
}
}
#[derive(Serialize)]
pub struct McpError {
pub code: i32,
pub message: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub data: Option<JsonValue>,
}
// === initialize ===
#[derive(Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct InitializeParams {
pub protocol_version: String,
#[serde(default)]
pub capabilities: JsonValue,
#[serde(default)]
pub client_info: Option<ClientInfo>,
}
#[derive(Deserialize)]
pub struct ClientInfo {
pub name: String,
#[serde(default)]
pub version: Option<String>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct InitializeResult {
pub protocol_version: &'static str,
pub capabilities: ServerCapabilities,
pub server_info: ServerInfo,
}
#[derive(Serialize)]
pub struct ServerCapabilities {
pub tools: ToolsCapability,
pub resources: ResourcesCapability,
}
#[derive(Serialize)]
pub struct ToolsCapability {}
#[derive(Serialize)]
pub struct ResourcesCapability {
pub subscribe: bool,
}
#[derive(Serialize)]
pub struct ServerInfo {
pub name: &'static str,
pub version: String,
}
// === tools/list ===
#[derive(Serialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ToolDefinition {
pub name: String,
pub description: String,
pub input_schema: JsonValue,
}
#[derive(Serialize)]
pub struct ToolsListResult {
pub tools: Vec<ToolDefinition>,
}
// === tools/call ===
#[derive(Deserialize)]
pub struct ToolsCallParams {
pub name: String,
#[serde(default)]
pub arguments: JsonValue,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ToolsCallResult {
pub content: Vec<ContentBlock>,
#[serde(skip_serializing_if = "Option::is_none")]
pub is_error: Option<bool>,
}
#[derive(Serialize)]
#[serde(tag = "type")]
pub enum ContentBlock {
#[serde(rename = "text")]
Text { text: String },
}
// === resources/list ===
#[derive(Serialize, Clone)]
#[serde(rename_all = "camelCase")]
pub struct ResourceDefinition {
pub uri: String,
pub name: String,
pub description: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub mime_type: Option<String>,
}
#[derive(Serialize)]
pub struct ResourcesListResult {
pub resources: Vec<ResourceDefinition>,
}
// === resources/read ===
#[derive(Deserialize)]
pub struct ResourcesReadParams {
pub uri: String,
}
#[derive(Serialize)]
pub struct ResourcesReadResult {
pub contents: Vec<ResourceContent>,
}
#[derive(Serialize)]
#[serde(rename_all = "camelCase")]
pub struct ResourceContent {
pub uri: String,
#[serde(skip_serializing_if = "Option::is_none")]
pub mime_type: Option<String>,
#[serde(skip_serializing_if = "Option::is_none")]
pub text: Option<String>,
}
// === resources/subscribe + unsubscribe ===
#[derive(Deserialize)]
pub struct ResourcesSubscribeParams {
pub uri: String,
}
#[derive(Deserialize)]
pub struct ResourcesUnsubscribeParams {
pub uri: String,
}
// === Server→client notification ===
#[derive(Serialize)]
pub struct McpNotification {
pub jsonrpc: &'static str,
pub method: &'static str,
pub params: serde_json::Value,
}

View File

@@ -1,232 +0,0 @@
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant};
use patch_db::json_ptr::JsonPointer;
use patch_db::DiffPatch;
use serde_json::Value as JsonValue;
use tokio::sync::mpsc;
use tokio::task::JoinHandle;
use uuid::Uuid;
use crate::context::RpcContext;
use crate::db::DbSubscriber;
use crate::prelude::*;
use crate::util::sync::SyncMutex;
use super::protocol::McpNotification;
pub(crate) type SessionMap = Arc<SyncMutex<HashMap<String, McpSession>>>;
/// Maximum time a session can exist without a GET stream before being cleaned up.
const SESSION_STALE_TIMEOUT: Duration = Duration::from_secs(60);
/// Maximum buffered notifications before backpressure kicks in.
const NOTIFICATION_CHANNEL_BOUND: usize = 256;
pub(crate) struct McpSession {
pub notification_tx: mpsc::Sender<JsonValue>,
pub notification_rx: Option<mpsc::Receiver<JsonValue>>,
pub subscriptions: HashMap<String, JoinHandle<()>>,
pub created_at: Instant,
}
pub(crate) fn create_session(sessions: &SessionMap) -> String {
let id = Uuid::new_v4().to_string();
let (tx, rx) = mpsc::channel(NOTIFICATION_CHANNEL_BOUND);
sessions.mutate(|map| {
map.insert(
id.clone(),
McpSession {
notification_tx: tx,
notification_rx: Some(rx),
subscriptions: HashMap::new(),
created_at: Instant::now(),
},
);
});
id
}
/// Sweep stale sessions. Call this from any frequent code path (POST handler, create_session).
pub(crate) fn sweep_stale_sessions_if_needed(sessions: &SessionMap) {
sessions.mutate(|map| sweep_stale_sessions(map));
}
/// Remove sessions that were created but never connected a GET stream within the timeout.
fn sweep_stale_sessions(map: &mut HashMap<String, McpSession>) {
let stale: Vec<String> = map
.iter()
.filter(|(_, session)| {
// Session is stale if rx is still present (no GET connected) and it's old
session.notification_rx.is_some()
&& session.created_at.elapsed() > SESSION_STALE_TIMEOUT
})
.map(|(id, _)| id.clone())
.collect();
for id in stale {
tracing::info!(
target: "mcp_audit",
session_id = %id,
"Sweeping stale MCP session (no GET stream connected)"
);
if let Some(session) = map.remove(&id) {
for (_, handle) in session.subscriptions {
handle.abort();
}
}
}
}
pub(crate) fn remove_session(sessions: &SessionMap, id: &str) {
sessions.mutate(|map| {
if let Some(session) = map.remove(id) {
for (_, handle) in session.subscriptions {
handle.abort();
}
}
});
}
/// Take the notification receiver from a session (for use by the SSE stream).
/// Returns None if the session doesn't exist or the rx was already taken.
pub(crate) fn take_notification_rx(
sessions: &SessionMap,
id: &str,
) -> Option<mpsc::Receiver<JsonValue>> {
sessions.mutate(|map| map.get_mut(id)?.notification_rx.take())
}
/// Check whether the given session ID exists in the session map.
pub(crate) fn session_exists(sessions: &SessionMap, id: &str) -> bool {
sessions.peek(|map| map.contains_key(id))
}
/// Parse a `startos:///...` URI into a JsonPointer.
fn parse_resource_uri(uri: &str) -> Result<JsonPointer, Error> {
let path = uri.strip_prefix("startos://").ok_or_else(|| {
Error::new(
eyre!("Invalid resource URI: must start with startos://"),
ErrorKind::InvalidRequest,
)
})?;
path.parse::<JsonPointer>()
.with_kind(ErrorKind::InvalidRequest)
}
pub(crate) async fn subscribe_resource(
ctx: &RpcContext,
sessions: &SessionMap,
session_id: &str,
uri: &str,
) -> Result<(), Error> {
let pointer = parse_resource_uri(uri)?;
let (dump, sub) = ctx.db.dump_and_sub(pointer).await;
let mut db_sub = DbSubscriber {
rev: dump.id,
sub,
sync_db: ctx.sync_db.subscribe(),
};
let tx = sessions
.peek(|map| map.get(session_id).map(|s| s.notification_tx.clone()))
.ok_or_else(|| Error::new(eyre!("Session not found"), ErrorKind::InvalidRequest))?;
let uri_owned = uri.to_string();
let handle = tokio::spawn(async move {
loop {
// Wait for first revision
let first = match db_sub.recv().await {
Some(rev) => rev,
None => break,
};
// Debounce: collect more revisions for up to 500ms
let mut merged_id = first.id;
let mut merged_patch = first.patch;
let debounce = tokio::time::sleep(Duration::from_millis(500));
tokio::pin!(debounce);
loop {
tokio::select! {
_ = &mut debounce => break,
rev = db_sub.recv() => {
match rev {
Some(rev) => {
merged_id = rev.id;
merged_patch.append(rev.patch);
}
None => {
// Subscriber closed — send what we have and exit
let _ = send_notification(&tx, &uri_owned, merged_id, &merged_patch);
return;
}
}
}
}
}
if send_notification(&tx, &uri_owned, merged_id, &merged_patch).is_err() {
break; // SSE stream closed or channel full
}
}
});
// Store the task handle, aborting any prior subscription for the same URI
sessions.mutate(|map| {
if let Some(session) = map.get_mut(session_id) {
if let Some(old_handle) = session.subscriptions.remove(uri) {
tracing::info!(
target: "mcp_audit",
uri = %uri,
session_id = %session_id,
"Aborting prior subscription for re-subscribed URI"
);
old_handle.abort();
}
session.subscriptions.insert(uri.to_string(), handle);
}
});
Ok(())
}
fn send_notification(
tx: &mpsc::Sender<JsonValue>,
uri: &str,
id: u64,
patch: &DiffPatch,
) -> Result<(), ()> {
let notification = McpNotification {
jsonrpc: "2.0",
method: "notifications/resources/updated",
params: serde_json::json!({
"uri": uri,
"revision": {
"id": id,
"patch": patch,
}
}),
};
tx.try_send(serde_json::to_value(&notification).unwrap_or_default())
.map_err(|e| {
tracing::warn!(
target: "mcp_audit",
uri = %uri,
"Notification channel full or closed, dropping notification: {e}"
);
})
}
pub(crate) fn unsubscribe_resource(sessions: &SessionMap, session_id: &str, uri: &str) {
sessions.mutate(|map| {
if let Some(session) = map.get_mut(session_id) {
if let Some(handle) = session.subscriptions.remove(uri) {
handle.abort();
}
}
});
}

File diff suppressed because it is too large Load Diff

View File

@@ -765,7 +765,9 @@ async fn watcher(
}
changed
});
futures::future::try_join_all(jobs).await?;
for result in futures::future::join_all(jobs).await {
result.log_err();
}
Ok::<_, Error>(())
})
@@ -824,7 +826,7 @@ impl Drop for PolicyRoutingCleanup {
.arg("50")
.invoke(ErrorKind::Network)
.await
.log_err();
.ok();
Command::new("ip")
.arg("route")
.arg("flush")
@@ -832,7 +834,7 @@ impl Drop for PolicyRoutingCleanup {
.arg(&table_str)
.invoke(ErrorKind::Network)
.await
.log_err();
.ok();
Command::new("iptables")
.arg("-t")
.arg("mangle")
@@ -850,7 +852,7 @@ impl Drop for PolicyRoutingCleanup {
.arg(&table_str)
.invoke(ErrorKind::Network)
.await
.log_err();
.ok();
});
}
}
@@ -1067,7 +1069,17 @@ async fn apply_policy_routing(
cmd.arg(part);
}
cmd.arg("table").arg(&table_str);
cmd.invoke(ErrorKind::Network).await.log_err();
if let Err(e) = cmd.invoke(ErrorKind::Network).await {
// Transient interfaces (podman, wg-quick, etc.) may
// vanish between reading the main table and replaying
// the route — demote to debug to avoid log noise.
if e.source.to_string().contains("No such file or directory") {
tracing::trace!("ip route replace (transient device): {e}");
} else {
tracing::error!("{e}");
tracing::debug!("{e:?}");
}
}
}
}
@@ -1299,6 +1311,7 @@ async fn poll_ip_info(
crate::db::model::public::default_echoip_urls()
};
let mut wan_ip = None;
let mut err = None;
for echoip_url in echoip_urls {
if echoip_ratelimit_state
.get(&echoip_url)
@@ -1314,15 +1327,7 @@ async fn poll_ip_info(
wan_ip = a;
}
Err(e) => {
tracing::error!(
"{}",
t!(
"net.gateway.failed-to-determine-wan-ip",
iface = iface.to_string(),
error = e.to_string()
)
);
tracing::debug!("{e:?}");
err = Some(e);
}
};
echoip_ratelimit_state.insert(echoip_url, Instant::now());
@@ -1331,6 +1336,19 @@ async fn poll_ip_info(
}
};
}
if wan_ip.is_none()
&& let Some(e) = err
{
tracing::error!(
"{}",
t!(
"net.gateway.failed-to-determine-wan-ip",
iface = iface.to_string(),
error = e.to_string()
)
);
tracing::debug!("{e:?}");
}
let mut ip_info = IpInfo {
name: name.clone(),
scope_id,
@@ -1470,22 +1488,12 @@ impl NetworkInterfaceController {
) -> Result<(), Error> {
tracing::debug!("syncronizing {info:?} to db");
let mut wifi_iface = info
.iter()
.find(|(_, info)| {
info.ip_info.as_ref().map_or(false, |i| {
i.device_type == Some(NetworkInterfaceType::Wireless)
})
})
.map(|(id, _)| id.clone());
if wifi_iface.is_none() {
wifi_iface = find_wifi_iface()
.await
.ok()
.and_then(|a| a)
.map(InternedString::from)
.map(GatewayId::from);
}
let wifi_iface = find_wifi_iface()
.await
.ok()
.and_then(|a| a)
.map(InternedString::from)
.map(GatewayId::from);
db.mutate(|db| {
let network = db.as_public_mut().as_server_info_mut().as_network_mut();

View File

@@ -100,17 +100,6 @@ impl UiContext for RpcContext {
})
})
.nest("/s9pk", s9pk_router(self.clone()))
.route("/mcp", crate::mcp::mcp_router(self.clone()))
.route(
"/.well-known/mcp",
get(|| async {
Response::builder()
.status(StatusCode::OK)
.header(CONTENT_TYPE, "application/json")
.body(Body::from(r#"{"mcp_endpoint":"/mcp"}"#))
.unwrap()
}),
)
.route(
"/static/local-root-ca.crt",
get(move || {

View File

@@ -180,19 +180,14 @@ pub async fn set_enabled(
.invoke(ErrorKind::Wifi)
.await?;
}
let iface = if let Some(man) = ctx.wifi_manager.read().await.as_ref().filter(|_| enabled) {
Some(man.interface.clone())
} else {
None
};
ctx.db
.mutate(|d| {
d.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_wifi_mut()
.as_interface_mut()
.ser(&iface)
.as_enabled_mut()
.ser(&enabled)
})
.await
.result?;

View File

@@ -142,7 +142,7 @@ impl RegistryContext {
listen: config.registry_listen.unwrap_or(DEFAULT_REGISTRY_LISTEN),
db,
datadir,
rpc_continuations: RpcContinuations::new(None),
rpc_continuations: RpcContinuations::new(),
client: Client::builder()
.proxy(Proxy::custom(move |url| {
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {

View File

@@ -17,7 +17,6 @@ use ts_rs::TS;
#[allow(unused_imports)]
use crate::prelude::*;
use crate::shutdown::Shutdown;
use crate::util::future::TimedResource;
use crate::util::net::WebSocket;
use crate::util::{FromStrParser, new_guid};
@@ -99,15 +98,12 @@ pub type RestHandler = Box<dyn FnOnce(Request) -> RestFuture + Send>;
pub struct WebSocketFuture {
kill: Option<broadcast::Receiver<()>>,
shutdown: Option<broadcast::Receiver<Option<Shutdown>>>,
fut: BoxFuture<'static, ()>,
}
impl Future for WebSocketFuture {
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
if self.kill.as_ref().map_or(false, |k| !k.is_empty())
|| self.shutdown.as_ref().map_or(false, |s| !s.is_empty())
{
if self.kill.as_ref().map_or(false, |k| !k.is_empty()) {
Poll::Ready(())
} else {
self.fut.poll_unpin(cx)
@@ -142,7 +138,6 @@ impl RpcContinuation {
RpcContinuation::WebSocket(TimedResource::new(
Box::new(|ws| WebSocketFuture {
kill: None,
shutdown: None,
fut: handler(ws.into()).boxed(),
}),
timeout,
@@ -175,7 +170,6 @@ impl RpcContinuation {
RpcContinuation::WebSocket(TimedResource::new(
Box::new(|ws| WebSocketFuture {
kill,
shutdown: None,
fut: handler(ws.into()).boxed(),
}),
timeout,
@@ -189,21 +183,15 @@ impl RpcContinuation {
}
}
pub struct RpcContinuations {
continuations: AsyncMutex<BTreeMap<Guid, RpcContinuation>>,
shutdown: Option<broadcast::Sender<Option<Shutdown>>>,
}
pub struct RpcContinuations(AsyncMutex<BTreeMap<Guid, RpcContinuation>>);
impl RpcContinuations {
pub fn new(shutdown: Option<broadcast::Sender<Option<Shutdown>>>) -> Self {
RpcContinuations {
continuations: AsyncMutex::new(BTreeMap::new()),
shutdown,
}
pub fn new() -> Self {
RpcContinuations(AsyncMutex::new(BTreeMap::new()))
}
#[instrument(skip_all)]
pub async fn clean(&self) {
let mut continuations = self.continuations.lock().await;
let mut continuations = self.0.lock().await;
let mut to_remove = Vec::new();
for (guid, cont) in &*continuations {
if cont.is_timed_out() {
@@ -218,28 +206,23 @@ impl RpcContinuations {
#[instrument(skip_all)]
pub async fn add(&self, guid: Guid, handler: RpcContinuation) {
self.clean().await;
self.continuations.lock().await.insert(guid, handler);
self.0.lock().await.insert(guid, handler);
}
pub async fn get_ws_handler(&self, guid: &Guid) -> Option<WebSocketHandler> {
let mut continuations = self.continuations.lock().await;
let mut continuations = self.0.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) {
return None;
}
let Some(RpcContinuation::WebSocket(x)) = continuations.remove(guid) else {
return None;
};
let handler = x.get().await?;
let shutdown = self.shutdown.as_ref().map(|s| s.subscribe());
Some(Box::new(move |ws| {
let mut fut = handler(ws);
fut.shutdown = shutdown;
fut
}))
x.get().await
}
pub async fn get_rest_handler(&self, guid: &Guid) -> Option<RestHandler> {
let mut continuations = self.continuations.lock().await;
let mut continuations: tokio::sync::MutexGuard<'_, BTreeMap<Guid, RpcContinuation>> =
self.0.lock().await;
if !matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) {
return None;
}

View File

@@ -269,6 +269,13 @@ impl ExecParams {
std::os::unix::fs::chroot(chroot)
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("chroot {chroot:?}")))?;
if let Ok(uid) = uid {
if uid != 0 {
std::os::unix::fs::chown("/proc/self/fd/0", Some(uid), gid.ok()).ok();
std::os::unix::fs::chown("/proc/self/fd/1", Some(uid), gid.ok()).ok();
std::os::unix::fs::chown("/proc/self/fd/2", Some(uid), gid.ok()).ok();
}
}
// Handle credential changes in pre_exec to control the order:
// setgroups must happen before setgid/setuid (requires CAP_SETGID)
{
@@ -276,10 +283,16 @@ impl ExecParams {
let set_gid = gid.ok();
unsafe {
cmd.pre_exec(move || {
// Create a new process group so entrypoint scripts that do
// Create a new session so entrypoint scripts that do
// kill(0, SIGTERM) don't cascade to other subcontainers.
nix::unistd::setsid()
.map_err(|e| std::io::Error::from_raw_os_error(e as i32))?;
// EPERM means we're already a session leader (e.g. pty_process
// called setsid() for us), which is fine.
match nix::unistd::setsid() {
Ok(_) | Err(Errno::EPERM) => {}
Err(e) => {
return Err(std::io::Error::from_raw_os_error(e as i32));
}
}
if !groups.is_empty() {
nix::unistd::setgroups(&groups)
.map_err(|e| std::io::Error::from_raw_os_error(e as i32))?;

View File

@@ -91,20 +91,11 @@ pub async fn get_data_version(id: &PackageId) -> Result<Option<String>, Error> {
.join(id)
.join("data")
.join(".version");
maybe_read_file_to_string(&path).await
let s = maybe_read_file_to_string(&path).await?;
Ok(s.map(|s| s.trim().to_string()))
}
pub(crate) struct RootCommand(pub String);
/// Resolved subcontainer info, ready for command construction.
pub(crate) struct ResolvedSubcontainer {
pub container_id: ContainerId,
pub subcontainer_id: Guid,
pub image_id: ImageId,
pub user: InternedString,
pub workdir: Option<String>,
pub root_command: RootCommand,
}
struct RootCommand(pub String);
#[derive(Clone, Debug, Serialize, Deserialize, Default, TS)]
pub struct MiB(pub u64);
@@ -735,158 +726,6 @@ impl Service {
.clone();
Ok(container_id)
}
/// Resolve a subcontainer by optional filters (guid, name, or imageId).
/// If no filter is provided and there is exactly one subcontainer, it is returned.
/// Errors if no match found or multiple matches found (with the list in error info).
pub(crate) async fn resolve_subcontainer(
&self,
subcontainer: Option<InternedString>,
name: Option<InternedString>,
image_id: Option<ImageId>,
user: Option<InternedString>,
) -> Result<ResolvedSubcontainer, Error> {
let id = &self.seed.id;
let container = &self.seed.persistent_container;
let root_dir = container
.lxc_container
.get()
.map(|x| x.rootfs_dir().to_owned())
.or_not_found(format!("container for {id}"))?;
let subcontainer_upper = subcontainer.as_ref().map(|x| AsRef::<str>::as_ref(x).to_uppercase());
let name_upper = name.as_ref().map(|x| AsRef::<str>::as_ref(x).to_uppercase());
let image_id_upper = image_id.as_ref().map(|x| AsRef::<Path>::as_ref(x).to_string_lossy().to_uppercase());
let subcontainers = container.subcontainers.lock().await;
let matches: Vec<_> = subcontainers
.iter()
.filter(|(x, wrapper)| {
if let Some(sc) = subcontainer_upper.as_ref() {
AsRef::<str>::as_ref(x).contains(sc.as_str())
} else if let Some(n) = name_upper.as_ref() {
AsRef::<str>::as_ref(&wrapper.name)
.to_uppercase()
.contains(n.as_str())
} else if let Some(img) = image_id_upper.as_ref() {
let Some(wrapper_image_id) = AsRef::<Path>::as_ref(&wrapper.image_id).to_str()
else {
return false;
};
wrapper_image_id.to_uppercase().contains(img.as_str())
} else {
true
}
})
.collect();
let Some((subcontainer_id, matched_image_id)) = matches
.first()
.map::<(Guid, ImageId), _>(|&x| (x.0.clone(), x.1.image_id.clone()))
else {
drop(subcontainers);
let info = container
.subcontainers
.lock()
.await
.iter()
.map(|(g, s)| SubcontainerInfo {
id: g.clone(),
name: s.name.clone(),
image_id: s.image_id.clone(),
})
.collect::<Vec<_>>();
return Err(Error::new(
eyre!("{}", t!("service.mod.no-matching-subcontainers", id = id)),
ErrorKind::NotFound,
)
.with_info(to_value(&info)?));
};
if matches.len() > 1 {
let info = matches
.into_iter()
.map(|(g, s)| SubcontainerInfo {
id: g.clone(),
name: s.name.clone(),
image_id: s.image_id.clone(),
})
.collect::<Vec<_>>();
return Err(Error::new(
eyre!("{}", t!("service.mod.multiple-subcontainers-found", id = id,)),
ErrorKind::InvalidRequest,
)
.with_info(to_value(&info)?));
}
let passwd = root_dir
.join("media/startos/subcontainers")
.join(subcontainer_id.as_ref())
.join("etc")
.join("passwd");
let image_meta = serde_json::from_str::<Value>(
&tokio::fs::read_to_string(
root_dir
.join("media/startos/images/")
.join(&matched_image_id)
.with_extension("json"),
)
.await?,
)
.with_kind(ErrorKind::Deserialization)?;
let resolved_user = user
.or_else(|| image_meta["user"].as_str().map(InternedString::intern))
.unwrap_or_else(|| InternedString::intern("root"));
let root_command = get_passwd_command(passwd, &*resolved_user).await;
let workdir = image_meta["workdir"].as_str().map(|s| s.to_owned());
Ok(ResolvedSubcontainer {
container_id: self.container_id()?,
subcontainer_id,
image_id: matched_image_id,
user: resolved_user,
workdir,
root_command,
})
}
/// Build a `Command` for executing inside a resolved subcontainer (non-interactive).
pub(crate) fn build_subcontainer_command(
resolved: &ResolvedSubcontainer,
command: &[&str],
) -> Command {
let root_path =
Path::new("/media/startos/subcontainers").join(resolved.subcontainer_id.as_ref());
let mut cmd = Command::new("lxc-attach");
cmd.kill_on_drop(true);
cmd.arg(&*resolved.container_id)
.arg("--")
.arg("start-container")
.arg("subcontainer")
.arg("exec")
.arg("--env-file")
.arg(
Path::new("/media/startos/images")
.join(&resolved.image_id)
.with_extension("env"),
)
.arg("--user")
.arg(&*resolved.user);
if let Some(ref workdir) = resolved.workdir {
cmd.arg("--workdir").arg(workdir);
}
cmd.arg(&root_path).arg("--");
if command.is_empty() {
cmd.arg(&resolved.root_command.0);
} else {
cmd.args(command);
}
cmd
}
#[instrument(skip_all)]
pub async fn stats(&self) -> Result<ServiceStats, Error> {
let container = &self.seed.persistent_container;
@@ -981,26 +820,124 @@ pub async fn attach(
user,
}: AttachParams,
) -> Result<Guid, Error> {
let resolved = {
let service = ctx.services.get(&id).await;
let service_ref = service.as_ref().or_not_found(&id)?;
service_ref
.resolve_subcontainer(
subcontainer.map(|g| InternedString::intern(g.as_ref())),
name,
image_id,
user,
let (container_id, subcontainer_id, image_id, user, workdir, root_command) = {
let id = &id;
let service = ctx.services.get(id).await;
let service_ref = service.as_ref().or_not_found(id)?;
let container = &service_ref.seed.persistent_container;
let root_dir = container
.lxc_container
.get()
.map(|x| x.rootfs_dir().to_owned())
.or_not_found(format!("container for {id}"))?;
let subcontainer = subcontainer.map(|x| AsRef::<str>::as_ref(&x).to_uppercase());
let name = name.map(|x| AsRef::<str>::as_ref(&x).to_uppercase());
let image_id = image_id.map(|x| AsRef::<Path>::as_ref(&x).to_string_lossy().to_uppercase());
let subcontainers = container.subcontainers.lock().await;
let subcontainer_ids: Vec<_> = subcontainers
.iter()
.filter(|(x, wrapper)| {
if let Some(subcontainer) = subcontainer.as_ref() {
AsRef::<str>::as_ref(x).contains(AsRef::<str>::as_ref(subcontainer))
} else if let Some(name) = name.as_ref() {
AsRef::<str>::as_ref(&wrapper.name)
.to_uppercase()
.contains(AsRef::<str>::as_ref(name))
} else if let Some(image_id) = image_id.as_ref() {
let Some(wrapper_image_id) = AsRef::<Path>::as_ref(&wrapper.image_id).to_str()
else {
return false;
};
wrapper_image_id
.to_uppercase()
.contains(AsRef::<str>::as_ref(&image_id))
} else {
true
}
})
.collect();
let Some((subcontainer_id, image_id)) = subcontainer_ids
.first()
.map::<(Guid, ImageId), _>(|&x| (x.0.clone(), x.1.image_id.clone()))
else {
drop(subcontainers);
let subcontainers = container
.subcontainers
.lock()
.await
.iter()
.map(|(g, s)| SubcontainerInfo {
id: g.clone(),
name: s.name.clone(),
image_id: s.image_id.clone(),
})
.collect::<Vec<_>>();
return Err(Error::new(
eyre!("{}", t!("service.mod.no-matching-subcontainers", id = id)),
ErrorKind::NotFound,
)
.await?
.with_info(to_value(&subcontainers)?));
};
let passwd = root_dir
.join("media/startos/subcontainers")
.join(subcontainer_id.as_ref())
.join("etc")
.join("passwd");
let image_meta = serde_json::from_str::<Value>(
&tokio::fs::read_to_string(
root_dir
.join("media/startos/images/")
.join(&image_id)
.with_extension("json"),
)
.await?,
)
.with_kind(ErrorKind::Deserialization)?;
let user = user
.clone()
.or_else(|| image_meta["user"].as_str().map(InternedString::intern))
.unwrap_or_else(|| InternedString::intern("root"));
let root_command = get_passwd_command(passwd, &*user).await;
let workdir = image_meta["workdir"].as_str().map(|s| s.to_owned());
if subcontainer_ids.len() > 1 {
let subcontainers = subcontainer_ids
.into_iter()
.map(|(g, s)| SubcontainerInfo {
id: g.clone(),
name: s.name.clone(),
image_id: s.image_id.clone(),
})
.collect::<Vec<_>>();
return Err(Error::new(
eyre!(
"{}",
t!("service.mod.multiple-subcontainers-found", id = id,)
),
ErrorKind::InvalidRequest,
)
.with_info(to_value(&subcontainers)?));
}
(
service_ref.container_id()?,
subcontainer_id,
image_id,
user.into(),
workdir,
root_command,
)
};
let ResolvedSubcontainer {
container_id,
subcontainer_id,
image_id,
user,
workdir,
root_command,
} = resolved;
let guid = Guid::new();
async fn handler(

View File

@@ -30,7 +30,6 @@ impl ServiceActorSeed {
ErrorKind::Cancelled,
))
};
let backup_succeeded = res.is_ok();
let id = &self.id;
self.ctx
.db
@@ -52,16 +51,14 @@ impl ServiceActorSeed {
x => x,
})
})?;
if backup_succeeded {
if let Some(progress) = db
.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut()
.transpose_mut()
{
progress.insert(id, &BackupProgress { complete: true })?;
}
if let Some(progress) = db
.as_public_mut()
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut()
.transpose_mut()
{
progress.insert(id, &BackupProgress { complete: true })?;
}
Ok(())
})

View File

@@ -557,6 +557,39 @@ pub async fn execute_inner(
hostname: Option<ServerHostnameInfo>,
) -> Result<(SetupResult, RpcContext), Error> {
let progress = &ctx.progress;
if !crate::disk::mount::util::is_mountpoint(Path::new(DATA_DIR).join("main")).await? {
let mut disk_phase =
progress.add_phase(t!("setup.opening-data-drive").into(), Some(10));
disk_phase.start();
let requires_reboot = crate::disk::main::import(
&*guid,
DATA_DIR,
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive
} else {
RepairStrategy::Preen
},
if guid.ends_with("_UNENC") {
None
} else {
Some(DEFAULT_PASSWORD)
},
Some(progress),
)
.await?;
let _ = ctx.disk_guid.set(guid.clone());
crate::util::io::delete_file(REPAIR_DISK_PATH).await?;
if requires_reboot.0 {
crate::disk::main::export(&*guid, DATA_DIR).await?;
return Err(Error::new(
eyre!("{}", t!("setup.disk-errors-corrected-restart-required")),
ErrorKind::DiskManagement,
));
}
disk_phase.complete();
}
let restore_phase = match recovery_source.as_ref() {
Some(RecoverySource::Backup { .. }) => {
Some(progress.add_phase(t!("setup.restoring-backup").into(), Some(100)))

View File

@@ -202,7 +202,7 @@ impl TunnelContext {
listen,
db,
datadir,
rpc_continuations: RpcContinuations::new(None),
rpc_continuations: RpcContinuations::new(),
open_authed_continuations: OpenAuthedContinuations::new(),
ephemeral_sessions: SyncMutex::new(Sessions::new()),
net_iface,

View File

@@ -521,7 +521,7 @@ pub async fn init_web(ctx: CliContext) -> Result<(), Error> {
.or_not_found("certificate in chain")?;
println!("📝 Root CA:");
print!("{cert}\n");
println!("Follow instructions to trust your Root CA (recommended): https://docs.start9.com/start-tunnel/installing/index.html#trust-your-root-ca");
println!("Follow instructions to trust your Root CA (recommended): https://docs.start9.com/start-tunnel/installing.html#trust-your-root-ca");
return Ok(());
}

View File

@@ -8,7 +8,7 @@ use tokio::io::AsyncWrite;
use tokio::task::JoinHandle;
const BLOCK_SIZE: usize = 4096;
const BUF_CAP: usize = 256 * 1024; // 256KB
const BUF_CAP: usize = 1024 * 1024; // 1MiB
/// Aligned buffer for O_DIRECT I/O.
struct AlignedBuf {

View File

@@ -26,6 +26,30 @@ impl<'a> MakeWriter<'a> for LogFile {
struct TeeWriter<'a>(MutexGuard<'a, Option<File>>);
impl<'a> Write for TeeWriter<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
// Blocking file+stderr I/O on a tokio worker thread can
// starve the I/O driver (tokio-rs/tokio#4730).
// block_in_place tells the runtime to hand off driver
// duties before we block. Only available on the
// multi-thread runtime; falls back to a direct write on
// current-thread runtimes (CLI) or outside a runtime.
if matches!(
tokio::runtime::Handle::try_current().map(|h| h.runtime_flavor()),
Ok(tokio::runtime::RuntimeFlavor::MultiThread),
) {
tokio::task::block_in_place(|| self.write_inner(buf))
} else {
self.write_inner(buf)
}
}
fn flush(&mut self) -> io::Result<()> {
if let Some(f) = &mut *self.0 {
f.flush()?;
}
Ok(())
}
}
impl<'a> TeeWriter<'a> {
fn write_inner(&mut self, buf: &[u8]) -> io::Result<usize> {
let n = if let Some(f) = &mut *self.0 {
f.write(buf)?
} else {
@@ -34,12 +58,6 @@ impl<'a> MakeWriter<'a> for LogFile {
io::stderr().write_all(&buf[..n])?;
Ok(n)
}
fn flush(&mut self) -> io::Result<()> {
if let Some(f) = &mut *self.0 {
f.flush()?;
}
Ok(())
}
}
Box::new(TeeWriter(f))
} else {

View File

@@ -62,8 +62,9 @@ mod v0_4_0_alpha_19;
mod v0_4_0_alpha_20;
mod v0_4_0_alpha_21;
mod v0_4_0_alpha_22;
mod v0_4_0_alpha_23;
pub type Current = v0_4_0_alpha_22::Version; // VERSION_BUMP
pub type Current = v0_4_0_alpha_23::Version; // VERSION_BUMP
impl Current {
#[instrument(skip(self, db))]
@@ -193,7 +194,8 @@ enum Version {
V0_4_0_alpha_19(Wrapper<v0_4_0_alpha_19::Version>),
V0_4_0_alpha_20(Wrapper<v0_4_0_alpha_20::Version>),
V0_4_0_alpha_21(Wrapper<v0_4_0_alpha_21::Version>),
V0_4_0_alpha_22(Wrapper<v0_4_0_alpha_22::Version>), // VERSION_BUMP
V0_4_0_alpha_22(Wrapper<v0_4_0_alpha_22::Version>),
V0_4_0_alpha_23(Wrapper<v0_4_0_alpha_23::Version>), // VERSION_BUMP
Other(exver::Version),
}
@@ -258,7 +260,8 @@ impl Version {
Self::V0_4_0_alpha_19(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_21(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_22(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::V0_4_0_alpha_22(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_23(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::Other(v) => {
return Err(Error::new(
eyre!("unknown version {v}"),
@@ -315,7 +318,8 @@ impl Version {
Version::V0_4_0_alpha_19(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_21(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_22(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::V0_4_0_alpha_22(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_23(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::Other(x) => x.clone(),
}
}

View File

@@ -8,108 +8,120 @@ Previous backups are incompatible with v0.4.0. It is strongly recommended that y
A server is not a toy. It is a critical component of the computing paradigm, and its failure can be catastrophic, resulting in downtime or loss of data. From the beginning, Start9 has taken a "security and reliability first" approach to the development of StartOS, favoring soundness over speed, and prioritizing essential features such as encrypted network connections, simple backups, and a reliable container runtime over nice-to-haves like custom theming and more services.
Start9 is paving new ground with StartOS, trying to create what most developers and IT professionals thought impossible; namely, an OS and user experience that affords a normal person the same independent control over their data and communications as an experienced Linux sysadmin.
Start9 is paving new ground with StartOS, trying to create what most developers and IT professionals thought impossible: an OS and user experience that affords a normal person the same independent control over their data and communications as an experienced Linux sysadmin.
The difficulty of our endeavor requires making mistakes; and our integrity and dedication to excellence require that we correct them. This means a willingness to discard bad ideas and broken parts, and if absolutely necessary, to tear it all down and start over. That is exactly what we did with StartOS v0.2.0 in 2020. It is what we did with StartOS v0.3.0 in 2022. And we are doing it now with StartOS v0.4.0 in 2026.
The difficulty of our endeavor requires making mistakes, and our integrity and dedication to excellence require that we correct them. This means a willingness to discard bad ideas and broken parts, and if absolutely necessary, to tear it all down and start over. That is exactly what we did with StartOS v0.2.0 in 2020. It is what we did with StartOS v0.3.0 in 2022. And we are doing it now with StartOS v0.4.0 in 2026.
v0.4.0 is a complete rewrite of StartOS, almost nothing survived. After nearly six years of building StartOS, we believe that we have finally arrived at the correct architecture and foundation that will allow us to deliver on the promise of sovereign computing.
v0.4.0 is a complete rewrite of StartOS almost nothing survived. After nearly six years of building StartOS, we believe we have finally arrived at the correct architecture and foundation to deliver on the promise of sovereign computing.
## Changelog
### New User interface
### User Experience
We re-wrote the StartOS UI to be more performant, more intuitive, and better looking on both mobile and desktop. Enjoy.
#### New User Interface
### Translations
The StartOS UI has been rewritten to be more performant, more intuitive, and better looking on both mobile and desktop.
StartOS v0.4.0 supports multiple languages and also makes it easy to add more later on.
#### Internationalization
### LXC Container Runtime
StartOS v0.4.0 and available services now support multiple languages and keyboard layouts.
Neither Docker nor Podman offer the reliability and flexibility needed for StartOS. Instead, v0.4.0 uses a nested container paradigm based on LXC for the outer container and Linux namespaces for sub containers. This architecture naturally supports multi container setups.
#### Improved Actions
### Hardware Acceleration
Actions accept arbitrary form input and return arbitrary responses, replacing the old "Config" and "Properties" concepts, which have been removed. The new Actions API gives package developers the ability to break configuration and properties into smaller, more specific forms — or to exclude them entirely without polluting the UI. Improved form design and new input types round out the experience.
Services can take advantage of (and require) the presence of certain hardware modules, such as Nvidia GPUs, for transcoding or inference purposes. For example, StartOS and Ollama can run natively on The Nvidia DGX Spark and take full advantage of the hardware/firmware stack to perform local inference against open source models.
#### Progress Reporting
### New S9PK archive format
A new progress reporting API enables package developers to define custom phases and provide real-time progress updates for operations such as installing, updating, or backing up a service.
The S9PK archive format has been overhauled to allow for signature verification of partial downloads, and allow direct mounting of container images without unpacking the s9pk.
#### Email Notifications via SMTP
### Improved Actions
You can now add your Gmail, SES, or other SMTP credentials to StartOS to deliver email notifications from StartOS and from installed services that support SMTP.
Actions take arbitrary form input and return arbitrary responses, thus satisfying the needs of both "Config" and "Properties", which have now been removed. The new actions API gives package developers the ability to break up Config and Properties into smaller, more specific formats, or to exclude them entirely without polluting the UI. Improved form design and new input types round out the new actions experience.
### Networking & Connectivity
### Squashfs Images for OS Updates
#### LAN Port Forwarding
StartOS now uses squashfs images instead of rsync for OS updates. This allows for better update verification and improved reliability.
Perhaps the biggest complaint with prior versions of StartOS was the use of unique `.local` URLs for service interfaces. This has been corrected. Service interfaces are now available on unique ports, supporting non-HTTP traffic on the LAN as well as remote access via VPN.
### Typescript Package API and SDK
#### Gateways
Package developers can now take advantage of StartOS APIs using the new start-sdk, available in Typescript. A barebones StartOS package (s9pk) can be produced in minutes with minimal knowledge or skill. More advanced developers can use the SDK to create highly customized user experiences for their service.
Gateways connect your server to the Internet, facilitating inbound and outbound traffic. It is now possible to add Wireguard VPN gateways to your server to control how devices outside the LAN connect to your server and how your server connects out to the Internet. Outbound traffic can also be overridden on a per-service basis.
### Removed PostgresSQL
#### Private Domains
StartOS itself has miniscule data persistence needs. PostgresSQL was overkill and has been removed in favor of lightweight PatchDB.
A private domain is like your server's `.local` address, except it also works over VPN, and it can be _anything_ — a real domain you control, a made-up domain, or even a domain controlled by someone else.
### Sending Emails via SMTP
Like your local domain, private domains can only be accessed when connected to the same LAN as your server, either physically or via VPN, and they require trusting your server's Root CA.
You can now add your Gmail, SES, or other SMTP credentials to StartOS in order to send deliver email notifications from StartOS and from installed services that support SMTP.
#### Public Domains (Clearnet)
### SSH password auth
It is now easy to expose service interfaces to the public Internet on a domain you control. There are two options:
1. **Open ports on your router.** This option is free and supported by all routers. The drawback is that your home IP address is revealed to anyone accessing an exposed interface.
2. **Use a Wireguard reverse tunnel**, such as [StartTunnel](#start-tunnel), to proxy web traffic. This option requires renting a $5$10/month VPS and installing StartTunnel (or similar). The result is a virtual router in the cloud that you can use to expose service interfaces instead of your real router, hiding your IP address from visitors.
#### Let's Encrypt
StartOS now supports Let's Encrypt to automatically obtain SSL/TLS certificates for public domains. Visitors to your public websites and APIs will no longer need to download and trust your server's Root CA.
#### Internal DNS Server
StartOS runs its own DNS server and automatically adds records for your private domains. You can configure your router or other gateway to use the StartOS DNS server to resolve these domains locally.
#### Static DNS Servers
By default, StartOS uses the DNS servers it receives via DHCP from its gateway(s). It is now possible to override these with custom, static DNS servers.
#### Tor as a Plugin
With the expanded networking capabilities of StartOS v0.4.0, Tor is now an optional plugin that can be installed from the Marketplace. Users can run their own Tor relay, route outbound connections through Tor, and generate hidden service URLs for any service interface, including vanity addresses.
#### Tor Address Management
StartOS v0.4.0 supports adding and removing Tor addresses for both StartOS itself and all service interfaces. You can even provide your own private key instead of using one auto-generated by StartOS, enabling vanity addresses.
### System & Infrastructure
#### LXC Container Runtime
Neither Docker nor Podman offer the reliability and flexibility needed for StartOS. Instead, v0.4.0 uses a nested container paradigm based on LXC for the outer container and Linux namespaces for sub-containers. This architecture naturally supports multi-container setups.
#### Hardware Acceleration
Services can take advantage of — and require — the presence of certain hardware modules, such as Nvidia GPUs, for transcoding or inference. For example, StartOS and Ollama can run natively on the Nvidia DGX Spark and take full advantage of its hardware and firmware stack to perform local inference against open source models.
#### Squashfs Images for OS Updates
StartOS now uses squashfs images instead of rsync for OS updates, enabling better update verification and improved reliability.
#### Replaced PostgreSQL with PatchDB
StartOS itself has minimal data persistence needs. PostgreSQL was overkill and has been replaced with the lightweight PatchDB.
#### Improved Backups
The new `start-fs` FUSE module unifies filesystem expectations across platforms, enabling more reliable backups. The system now defaults to rsync differential backups instead of incremental backups, which is both faster and more space-efficient — files deleted from the server are also deleted from the backup.
#### SSH Password Authentication
You can now SSH into your server using your master password. SSH public key authentication is still supported as well.
### Tor Address Management
### Developer Experience
StartOS v0.4.0 supports adding and removing Tor addresses for StartOS and all service interfaces. You can even provide your own private key instead of using one auto-generated by StartOS. This has the added benefit of permitting vanity addresses.
#### New S9PK Archive Format
### Progress Reporting
The S9PK archive format has been overhauled to support signature verification of partial downloads and direct mounting of container images without unpacking the archive.
A new progress reporting API enabled package developers to create unique phases and provide real-time progress reporting for actions such as installing, updating, or backing up a service.
#### TypeScript Package API and SDK
### Registry Protocol
Package developers can now interact with StartOS APIs using the new `start-sdk`, available in TypeScript. A barebones StartOS package (S9PK) can be produced in minutes with minimal knowledge or skill. More advanced developers can use the SDK to create highly customized user experiences for their services.
The new registry protocol bifurcates package indexing (listing/validating) and package hosting (downloading). Registries are now simple indexes of packages that reference binaries hosted in arbitrary locations, locally or externally. For example, when someone visits the Start9 Registry, the curated list of packages comes from Start9. But when someone installs a listed service, the package binary is being downloaded from Github. The registry also validates the binary. This makes it much easier to host a custom registry, since it is just a curated list of services tat reference package binaries hosted on Github or elsewhere.
#### Registry Protocol
### LAN port forwarding
The new registry protocol separates package indexing (listing and validation) from package hosting (downloading). Registries are now simple indexes that reference binaries hosted in arbitrary locations, locally or externally. For example, when someone visits the Start9 Registry, the curated list of packages comes from Start9, but when they install a service, the binary is downloaded from GitHub. The registry also validates the binary. This makes it much easier to host a custom registry, since it is just a curated list of services that reference package binaries hosted on GitHub or elsewhere.
Perhaps the biggest complaint with prior version of StartOS was use of unique .local URLs for service interfaces. This has been corrected. Service interfaces are now available on unique ports, allowing for non-http traffic on the LAN as well as remote access via VPN.
#### Exver and Service Flavors
### Improved Backups
The new start-fs fuse module unifies file system expectations for various platforms, enabling more reliable backups. The new system also defaults to using rsync differential backups instead of incremental backups, which is faster and saves on disk space by also deleting from the backup files that were deleted from the server.
### Exver
StartOS now uses Extended Versioning (Exver), which consists of three parts: (1) a Semver-compliant upstream version, (2) a Semver-compliant wrapper version, and (3) an optional "flavor" prefix. Flavors can be thought of as alternative implementations of services, where a user would only want one or the other installed, and data can feasibly be migrating between the two. Another common characteristic of flavors is that they satisfy the same API requirement of dependents, though this is not strictly necessary. A valid Exver looks something like this: `#knots:29.0:1.0-beta.1`. This would translate to "the first beta release of StartOS wrapper version 1.0 of Bitcoin Knots version 29.0".
### Let's Encrypt
StartOS now supports Let's Encrypt to automatically obtain SSL/TLS certificates for public domains. This means people visiting your public websites and APIs will not need to download and trust your server's Root CA.
### Gateways
Gateways connect your server to the Internet, facilitating inbound and outbound traffic. Your router is a gateway. It is now possible to add Wireguard VPN gateways to your server to control how devices outside the LAN connect to your server and how your server connects out to the Internet.
### Static DNS Servers
By default, StartOS uses the DNS servers it receives via DHCP from its gateway(s). It is now possible to override these DNS servers with custom, static ones.
### Internal DNS Server
StartOS runs its own DNS server and automatically adds records for your private domains. You can update your router or other gateway to use StartOS DNS server in order to resolve these domains locally.
### Private Domains
A private domain is like to your server's .local, except it also works for VPN connectivity, and it can be _anything_. It can be a real domain you control, a made up domain, or even a domain controlled by someone else.
Similar to your local domain, private domains can only be accessed when connected to the same LAN as your server, either physically or via VPN, and they require trusting your server's Root CA.
### Public Domains (Clearnet)
It is now easy to expose service interfaces to the public Internet on a public domain you control. There are two options, both of which are easy to accomplish:
1. Open ports on your router. This option is free and supported by all routers. The drawback is that your home IP address is revealed to anyone accessing an exposed interface.
2. Use a Wireguard reverse tunnel, such as [StartTunnel](#start-tunnel) to proxy web traffic. This option requires renting a $5-$10/month VPS and installing StartTunnel (or similar). The result is a new gateway, a virtual router in the cloud, that you can use to expose service interfaces instead of your real router, thereby hiding your IP address from visitors.
StartOS now uses Extended Versioning (Exver), which consists of three parts: (1) a semver-compliant upstream version, (2) a semver-compliant wrapper version, and (3) an optional "flavor" prefix. Flavors are alternative implementations of a service where a user would typically want only one installed, and data can be migrated between them. Flavors commonly satisfy the same dependency API for downstream packages, though this is not strictly required. A valid Exver looks like: `#knots:29.0:1.0-beta.1` — the first beta release of StartOS wrapper version 1.0 of Bitcoin Knots version 29.0.

View File

@@ -27,6 +27,7 @@ use crate::net::keys::KeyStore;
use crate::notifications::Notifications;
use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::s9pk::v2::pack::CONTAINER_TOOL;
use crate::ssh::{SshKeys, SshPubKey};
use crate::util::Invoke;
use crate::util::serde::Pem;
@@ -143,12 +144,11 @@ pub struct Version;
impl VersionT for Version {
type Previous = v0_3_5_2::Version;
/// (package_id, host_id, expanded_key)
type PreUpRes = (
AccountInfo,
SshKeys,
CifsTargets,
Vec<(String, String, [u8; 64])>,
BTreeMap<(String, String), [u8; 64]>,
);
fn semver(self) -> exver::Version {
V0_3_6_alpha_0.clone()
@@ -250,7 +250,7 @@ impl VersionT for Version {
let mut onion_map: Value = json!({});
let onion_obj = onion_map.as_object_mut().unwrap();
let mut tor_migration = imbl::Vector::<Value>::new();
for (package_id, host_id, key_bytes) in &tor_keys {
for ((package_id, host_id), key_bytes) in &tor_keys {
let onion_addr = onion_address_from_key(key_bytes);
let encoded_key =
base64::Engine::encode(&crate::util::serde::BASE64, key_bytes);
@@ -326,7 +326,41 @@ impl VersionT for Version {
.await?;
}
// Load bundled migration images (start9/compat, start9/utils,
// tonistiigi/binfmt) so the v1->v2 s9pk conversion doesn't need
// internet access.
let migration_images_dir = Path::new("/usr/lib/startos/migration-images");
if let Ok(mut entries) = tokio::fs::read_dir(migration_images_dir).await {
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension() == Some(OsStr::new("tar")) {
tracing::info!("Loading migration image: {}", path.display());
Command::new(*CONTAINER_TOOL)
.arg("load")
.arg("-i")
.arg(&path)
.invoke(crate::ErrorKind::Docker)
.await?;
}
}
}
// Should be the name of the package
let current_package: std::sync::Arc<tokio::sync::watch::Sender<Option<PackageId>>> =
std::sync::Arc::new(tokio::sync::watch::channel(None).0);
let progress_logger = {
let current_package = current_package.clone();
tokio::spawn(async move {
let mut interval = tokio::time::interval(std::time::Duration::from_secs(30));
interval.tick().await; // skip immediate first tick
loop {
interval.tick().await;
if let Some(ref id) = *current_package.borrow() {
tracing::info!("{}", t!("migration.migrating-package", package = id.to_string()));
}
}
})
};
let mut paths = tokio::fs::read_dir(path).await?;
while let Some(path) = paths.next_entry().await? {
let Ok(id) = path.file_name().to_string_lossy().parse::<PackageId>() else {
@@ -367,6 +401,9 @@ impl VersionT for Version {
false
};
tracing::info!("{}", t!("migration.migrating-package", package = id.to_string()));
current_package.send_replace(Some(id.clone()));
if let Err(e) = async {
let package_s9pk = tokio::fs::File::open(path).await?;
let file = MultiCursorFile::open(&package_s9pk).await?;
@@ -411,6 +448,7 @@ impl VersionT for Version {
}
}
}
progress_logger.abort();
Ok(())
}
}
@@ -534,13 +572,16 @@ async fn previous_ssh_keys(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<SshKeys, E
Ok(ssh_keys)
}
/// Returns `Vec<(package_id, host_id, expanded_key)>`.
/// Returns deduplicated map of `(package_id, host_id) -> expanded_key`.
/// Server key uses `("STARTOS", "STARTOS")`.
/// When the same (package, interface) exists in both the `network_keys` and
/// `tor` tables, the `tor` table entry wins because it contains the actual
/// expanded key that was used by tor.
#[tracing::instrument(skip_all)]
async fn previous_tor_keys(
pg: &sqlx::Pool<sqlx::Postgres>,
) -> Result<Vec<(String, String, [u8; 64])>, Error> {
let mut keys = Vec::new();
) -> Result<BTreeMap<(String, String), [u8; 64]>, Error> {
let mut keys = BTreeMap::new();
// Server tor key from the account table.
// Older installs have tor_key (64 bytes). Newer installs (post-NetworkKeys migration)
@@ -551,15 +592,14 @@ async fn previous_tor_keys(
.with_kind(ErrorKind::Database)?;
if let Ok(tor_key) = row.try_get::<Vec<u8>, _>("tor_key") {
if let Ok(key) = <[u8; 64]>::try_from(tor_key) {
keys.push(("STARTOS".to_owned(), "STARTOS".to_owned(), key));
keys.insert(("STARTOS".to_owned(), "STARTOS".to_owned()), key);
}
} else if let Ok(net_key) = row.try_get::<Vec<u8>, _>("network_key") {
if let Ok(seed) = <[u8; 32]>::try_from(net_key) {
keys.push((
"STARTOS".to_owned(),
"STARTOS".to_owned(),
keys.insert(
("STARTOS".to_owned(), "STARTOS".to_owned()),
crate::util::crypto::ed25519_expand_key(&seed),
));
);
}
}
@@ -579,16 +619,17 @@ async fn previous_tor_keys(
continue;
};
if let Ok(seed) = <[u8; 32]>::try_from(key_bytes) {
keys.push((
package,
interface,
keys.insert(
(package, interface),
crate::util::crypto::ed25519_expand_key(&seed),
));
);
}
}
}
// Package tor keys from the tor table (already 64-byte expanded keys)
// Package tor keys from the tor table (already 64-byte expanded keys).
// These overwrite network_keys entries for the same (package, interface)
// because the tor table has the actual expanded key used by tor.
if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM tor"#)
.fetch_all(pg)
.await
@@ -604,7 +645,7 @@ async fn previous_tor_keys(
continue;
};
if let Ok(key) = <[u8; 64]>::try_from(key_bytes) {
keys.push((package, interface, key));
keys.insert((package, interface), key);
}
}
}

View File

@@ -0,0 +1,37 @@
use exver::{PreReleaseSegment, VersionRange};
use super::v0_3_5::V0_3_0_COMPAT;
use super::{VersionT, v0_4_0_alpha_22};
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_4_0_alpha_23: exver::Version = exver::Version::new(
[0, 4, 0],
[PreReleaseSegment::String("alpha".into()), 23.into()]
);
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_4_0_alpha_22::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_4_0_alpha_23.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
#[instrument(skip_all)]
fn up(self, _db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}

View File

@@ -1,5 +1,12 @@
# Changelog
## 0.4.0-beta.66 (2026-03-24)
- **Breaking:** `withPgDump()` replaces `pgdata` with required `mountpoint` + `pgdataPath`
- Passwordless/trust auth support for `withPgDump()` and `withMysqlDump()`
- New options: `pgOptions` for postgres, `mysqldOptions` for mysql/mariadb
- Fixed MariaDB backup/restore support
## 0.4.0-beta.65 (2026-03-23)
### Added

View File

@@ -70,7 +70,7 @@ import { createVolumes } from './util/Volume'
import { getDataVersion, setDataVersion } from './version'
/** The minimum StartOS version required by this SDK release */
export const OSVersion = testTypeVersion('0.4.0-alpha.22')
export const OSVersion = testTypeVersion('0.4.0-alpha.23')
// prettier-ignore
type AnyNeverCond<T extends any[], Then, Else> =

View File

@@ -10,9 +10,10 @@ const BACKUP_HOST_PATH = '/media/startos/backup'
const BACKUP_CONTAINER_MOUNT = '/backup-target'
/** A password value, or a function that returns one. Functions are resolved lazily (only during restore). */
export type LazyPassword = string | (() => string | Promise<string>)
export type LazyPassword = string | (() => string | Promise<string>) | null
async function resolvePassword(pw: LazyPassword): Promise<string> {
async function resolvePassword(pw: LazyPassword): Promise<string | null> {
if (pw === null) return null
return typeof pw === 'function' ? pw() : pw
}
@@ -22,16 +23,20 @@ export type PgDumpConfig<M extends T.SDKManifest> = {
imageId: keyof M['images'] & T.ImageId
/** Volume ID containing the PostgreSQL data directory */
dbVolume: M['volumes'][number]
/** Path to PGDATA within the container (e.g. '/var/lib/postgresql/data') */
pgdata: string
/** Volume mountpoint (e.g. '/var/lib/postgresql') */
mountpoint: string
/** Subpath from mountpoint to PGDATA (e.g. '/data', '/18/docker') */
pgdataPath: string
/** PostgreSQL database name to dump */
database: string
/** PostgreSQL user */
user: string
/** PostgreSQL password (for restore). Can be a string or a function that returns one — functions are resolved lazily after volumes are restored. */
/** PostgreSQL password (for restore). Can be a string, a function that returns one (resolved lazily after volumes are restored), or null for trust auth. */
password: LazyPassword
/** Additional initdb arguments (e.g. ['--data-checksums']) */
initdbArgs?: string[]
/** Additional options passed to `pg_ctl start -o` (e.g. '-c shared_preload_libraries=vectorchord'). Appended after `-c listen_addresses=`. */
pgOptions?: string
}
/** Configuration for MySQL/MariaDB dump-based backup */
@@ -52,6 +57,8 @@ export type MysqlDumpConfig<M extends T.SDKManifest> = {
engine: 'mysql' | 'mariadb'
/** Custom readiness check command (default: ['mysqladmin', 'ping', ...]) */
readyCommand?: string[]
/** Additional options passed to `mysqld` on startup (e.g. '--innodb-buffer-pool-size=256M'). Appended after `--bind-address=127.0.0.1`. */
mysqldOptions?: string[]
}
/** Bind-mount the backup target into a SubContainer's rootfs */
@@ -154,19 +161,21 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
const {
imageId,
dbVolume,
pgdata,
mountpoint,
pgdataPath,
database,
user,
password,
initdbArgs = [],
pgOptions,
} = config
const pgdata = `${mountpoint}${pgdataPath}`
const dumpFile = `${BACKUP_CONTAINER_MOUNT}/${database}-db.dump`
const pgMountpoint = pgdata.replace(/\/data$/, '') || pgdata
function dbMounts() {
return Mounts.of<M>().mountVolume({
volumeId: dbVolume,
mountpoint: pgMountpoint,
mountpoint: mountpoint,
readonly: false,
subpath: null,
})
@@ -193,10 +202,12 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
user: 'root',
})
console.log(`[${label}] starting postgres`)
await sub.execFail(
['pg_ctl', 'start', '-D', pgdata, '-o', '-c listen_addresses='],
{ user: 'postgres' },
)
const pgStartOpts = pgOptions
? `-c listen_addresses= ${pgOptions}`
: '-c listen_addresses='
await sub.execFail(['pg_ctl', 'start', '-D', pgdata, '-o', pgStartOpts], {
user: 'postgres',
})
for (let i = 0; i < 60; i++) {
const { exitCode } = await sub.exec(['pg_isready', '-U', user], {
user: 'postgres',
@@ -249,7 +260,7 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
async (sub) => {
await mountBackupTarget(sub.rootfs)
await sub.execFail(
['chown', '-R', 'postgres:postgres', pgMountpoint],
['chown', '-R', 'postgres:postgres', mountpoint],
{ user: 'root' },
)
await sub.execFail(
@@ -274,18 +285,20 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
{ user: 'postgres' },
null,
)
await sub.execFail(
[
'psql',
'-U',
user,
'-d',
database,
'-c',
`ALTER USER ${user} WITH PASSWORD '${resolvedPassword}'`,
],
{ user: 'postgres' },
)
if (resolvedPassword !== null) {
await sub.execFail(
[
'psql',
'-U',
user,
'-d',
database,
'-c',
`ALTER USER ${user} WITH PASSWORD '${resolvedPassword}'`,
],
{ user: 'postgres' },
)
}
await sub.execFail(['pg_ctl', 'stop', '-D', pgdata, '-w'], {
user: 'postgres',
})
@@ -318,6 +331,7 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
password,
engine,
readyCommand,
mysqldOptions = [],
} = config
const dumpFile = `${BACKUP_CONTAINER_MOUNT}/${database}-db.dump`
@@ -342,6 +356,42 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
throw new Error('MySQL/MariaDB failed to become ready within 30 seconds')
}
async function startMysql(sub: {
exec(cmd: string[], opts?: any): Promise<{ exitCode: number | null }>
execFail(cmd: string[], opts?: any, timeout?: number | null): Promise<any>
}) {
if (engine === 'mariadb') {
// MariaDB doesn't support --daemonize; fire-and-forget the exec
sub
.exec(
[
'mysqld',
'--user=mysql',
`--datadir=${datadir}`,
'--bind-address=127.0.0.1',
...mysqldOptions,
],
{ user: 'root' },
)
.catch((e) =>
console.error('[mysql-backup] mysqld exited unexpectedly:', e),
)
} else {
await sub.execFail(
[
'mysqld',
'--user=mysql',
`--datadir=${datadir}`,
'--bind-address=127.0.0.1',
'--daemonize',
...mysqldOptions,
],
{ user: 'root' },
null,
)
}
}
return new Backups<M>()
.setPreBackup(async (effects) => {
const pw = await resolvePassword(password)
@@ -350,7 +400,7 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
'ping',
'-u',
user,
`-p${pw}`,
...(pw !== null ? [`-p${pw}`] : []),
'--silent',
]
await SubContainerRc.withTemp<M, void, BackupEffects>(
@@ -371,24 +421,14 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
user: 'root',
})
}
await sub.execFail(
[
'mysqld',
'--user=mysql',
`--datadir=${datadir}`,
'--skip-networking',
'--daemonize',
],
{ user: 'root' },
null,
)
await startMysql(sub)
await waitForMysql(sub, readyCmd)
await sub.execFail(
[
'mysqldump',
'-u',
user,
`-p${pw}`,
...(pw !== null ? [`-p${pw}`] : []),
'--single-transaction',
`--result-file=${dumpFile}`,
database,
@@ -396,9 +436,15 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
{ user: 'root' },
null,
)
// Graceful shutdown via SIGTERM; wait for exit
await sub.execFail(
['mysqladmin', '-u', user, `-p${pw}`, 'shutdown'],
[
'sh',
'-c',
'PID=$(cat /var/run/mysqld/mysqld.pid) && kill $PID && tail --pid=$PID -f /dev/null',
],
{ user: 'root' },
null,
)
},
)
@@ -435,17 +481,7 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
{ user: 'root' },
)
}
await sub.execFail(
[
'mysqld',
'--user=mysql',
`--datadir=${datadir}`,
'--skip-networking',
'--daemonize',
],
{ user: 'root' },
null,
)
await startMysql(sub)
// After fresh init, root has no password
await waitForMysql(sub, [
'mysqladmin',
@@ -455,29 +491,32 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
'--silent',
])
// Create database, user, and set password
await sub.execFail(
[
'mysql',
'-u',
'root',
'-e',
`CREATE DATABASE IF NOT EXISTS \`${database}\`; CREATE USER IF NOT EXISTS '${user}'@'localhost' IDENTIFIED BY '${pw}'; GRANT ALL ON \`${database}\`.* TO '${user}'@'localhost'; ALTER USER 'root'@'localhost' IDENTIFIED BY '${pw}'; FLUSH PRIVILEGES;`,
],
{ user: 'root' },
)
const grantSql =
pw !== null
? `CREATE DATABASE IF NOT EXISTS \`${database}\`; CREATE USER IF NOT EXISTS '${user}'@'localhost' IDENTIFIED BY '${pw}'; GRANT ALL ON \`${database}\`.* TO '${user}'@'localhost'; ALTER USER 'root'@'localhost' IDENTIFIED BY '${pw}'; FLUSH PRIVILEGES;`
: `CREATE DATABASE IF NOT EXISTS \`${database}\`; CREATE USER IF NOT EXISTS '${user}'@'localhost'; GRANT ALL ON \`${database}\`.* TO '${user}'@'localhost'; FLUSH PRIVILEGES;`
await sub.execFail(['mysql', '-u', 'root', '-e', grantSql], {
user: 'root',
})
// Restore from dump
await sub.execFail(
[
'sh',
'-c',
`mysql -u root -p'${pw}' \`${database}\` < ${dumpFile}`,
`mysql -u root ${pw !== null ? `-p'${pw}'` : ''} ${database} < ${dumpFile}`,
],
{ user: 'root' },
null,
)
// Graceful shutdown via SIGTERM; wait for exit
await sub.execFail(
['mysqladmin', '-u', 'root', `-p${password}`, 'shutdown'],
[
'sh',
'-c',
'PID=$(cat /var/run/mysqld/mysqld.pid) && kill $PID && tail --pid=$PID -f /dev/null',
],
{ user: 'root' },
null,
)
},
)

View File

@@ -1,15 +1,15 @@
import { z } from 'zod'
import * as YAML from 'yaml'
import * as TOML from '@iarna/toml'
import * as INI from 'ini'
import {
XMLParser,
XMLBuilder,
XMLParser,
type X2jOptions,
type XmlBuilderOptions,
} from 'fast-xml-parser'
import * as T from '../../../base/lib/types'
import * as INI from 'ini'
import * as fs from 'node:fs/promises'
import * as YAML from 'yaml'
import { z } from 'zod'
import * as T from '../../../base/lib/types'
import { asError, deepEqual } from '../../../base/lib/util'
import { Watchable } from '../../../base/lib/util/Watchable'
import { PathBase } from './Volume'
@@ -382,7 +382,7 @@ export class FileHelper<A> {
const mergeData = this.validate(fileMerge({}, fileData, data))
const toWrite = this.writeData(mergeData)
if (toWrite !== fileDataRaw) {
this.writeFile(mergeData)
await this.writeFile(mergeData)
if (!options.allowWriteAfterConst && effects.constRetry) {
const records = this.consts.filter(([c]) => c === effects.constRetry)
for (const record of records) {

View File

@@ -1,12 +1,12 @@
{
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.65",
"version": "0.4.0-beta.66",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.65",
"version": "0.4.0-beta.66",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",

View File

@@ -1,6 +1,6 @@
{
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.65",
"version": "0.4.0-beta.66",
"description": "Software development kit to facilitate packaging services for StartOS",
"main": "./package/lib/index.js",
"types": "./package/lib/index.d.ts",

4
web/package-lock.json generated
View File

@@ -1,12 +1,12 @@
{
"name": "startos-ui",
"version": "0.4.0-alpha.22",
"version": "0.4.0-alpha.23",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "startos-ui",
"version": "0.4.0-alpha.22",
"version": "0.4.0-alpha.23",
"license": "MIT",
"dependencies": {
"@angular/cdk": "^21.2.1",

View File

@@ -1,6 +1,6 @@
{
"name": "startos-ui",
"version": "0.4.0-alpha.22",
"version": "0.4.0-alpha.23",
"author": "Start9 Labs, Inc",
"homepage": "https://start9.com/",
"license": "MIT",

View File

@@ -3,6 +3,7 @@ import { FormsModule } from '@angular/forms'
import { i18nPipe } from '@start9labs/shared'
import {
TuiButton,
TuiCell,
TuiCheckbox,
TuiDialogContext,
TuiNotification,
@@ -19,6 +20,7 @@ export interface PreserveOverwriteData {
imports: [
FormsModule,
TuiButton,
TuiCell,
TuiCheckbox,
TuiHeader,
TuiNotification,
@@ -49,9 +51,9 @@ export interface PreserveOverwriteData {
| i18n
}}
</p>
<label>
<label tuiCell>
<input tuiCheckbox type="checkbox" [(ngModel)]="backupAck" />
{{ 'I have a backup of my data' | i18n }}
<span tuiTitle>{{ 'I have a backup of my data' | i18n }}</span>
</label>
}
<footer>

View File

@@ -680,7 +680,7 @@ export default {
755: 'Schnittstelle(n)',
756: 'Keine Portweiterleitungsregeln',
757: 'Portweiterleitungsregeln am Gateway erforderlich',
763: 'Sie sind derzeit über Ihre .local-Adresse verbunden. Das Ändern des Hostnamens erfordert einen Wechsel zur neuen .local-Adresse.',
763: 'Sie sind derzeit über Ihre .local-Adresse verbunden. Das Ändern des Hostnamens erfordert einen Wechsel zur neuen .local-Adresse. Ein Neustart des Servers ist ebenfalls erforderlich.',
764: 'Hostname geändert',
765: 'Neue Adresse öffnen',
766: 'Ihr Server ist jetzt erreichbar unter',
@@ -722,4 +722,6 @@ export default {
804: 'Ich habe ein Backup meiner Daten',
805: 'Öffentliche Domain hinzufügen',
806: 'Ergebnis',
807: 'Nach dem Öffnen der neuen Adresse werden Sie zum Neustart aufgefordert.',
808: 'Ein Neustart ist erforderlich, damit die Dienstschnittstellen den neuen Hostnamen verwenden.',
} satisfies i18n

View File

@@ -680,7 +680,7 @@ export const ENGLISH: Record<string, number> = {
'Interface(s)': 755,
'No port forwarding rules': 756,
'Port forwarding rules required on gateway': 757,
'You are currently connected via your .local address. Changing the hostname will require you to switch to the new .local address.': 763,
'You are currently connected via your .local address. Changing the hostname will require you to switch to the new .local address. A server restart will also be needed.': 763,
'Hostname Changed': 764,
'Open new address': 765,
'Your server is now reachable at': 766,
@@ -723,4 +723,6 @@ export const ENGLISH: Record<string, number> = {
'I have a backup of my data': 804,
'Add Public Domain': 805,
'Result': 806,
'After opening the new address, you will be prompted to restart.': 807,
'A restart is required for service interfaces to use the new hostname.': 808,
}

View File

@@ -680,7 +680,7 @@ export default {
755: 'Interfaz/Interfaces',
756: 'Sin reglas de redirección de puertos',
757: 'Reglas de redirección de puertos requeridas en la puerta de enlace',
763: 'Actualmente está conectado a través de su dirección .local. Cambiar el nombre de host requerirá que cambie a la nueva dirección .local.',
763: 'Actualmente está conectado a través de su dirección .local. Cambiar el nombre de host requerirá que cambie a la nueva dirección .local. También será necesario reiniciar el servidor.',
764: 'Nombre de host cambiado',
765: 'Abrir nueva dirección',
766: 'Su servidor ahora es accesible en',
@@ -722,4 +722,6 @@ export default {
804: 'Tengo una copia de seguridad de mis datos',
805: 'Agregar dominio público',
806: 'Resultado',
807: 'Después de abrir la nueva dirección, se le pedirá que reinicie.',
808: 'Se requiere un reinicio para que las interfaces de servicio utilicen el nuevo nombre de host.',
} satisfies i18n

View File

@@ -680,7 +680,7 @@ export default {
755: 'Interface(s)',
756: 'Aucune règle de redirection de port',
757: 'Règles de redirection de ports requises sur la passerelle',
763: "Vous êtes actuellement connecté via votre adresse .local. Changer le nom d'hôte nécessitera de passer à la nouvelle adresse .local.",
763: "Vous êtes actuellement connecté via votre adresse .local. Changer le nom d'hôte nécessitera de passer à la nouvelle adresse .local. Un redémarrage du serveur sera également nécessaire.",
764: "Nom d'hôte modifié",
765: 'Ouvrir la nouvelle adresse',
766: 'Votre serveur est maintenant accessible à',
@@ -722,4 +722,6 @@ export default {
804: "J'ai une sauvegarde de mes données",
805: 'Ajouter un domaine public',
806: 'Résultat',
807: 'Après avoir ouvert la nouvelle adresse, vous serez invité à redémarrer.',
808: "Un redémarrage est nécessaire pour que les interfaces de service utilisent le nouveau nom d'hôte.",
} satisfies i18n

View File

@@ -680,7 +680,7 @@ export default {
755: 'Interfejs(y)',
756: 'Brak reguł przekierowania portów',
757: 'Reguły przekierowania portów wymagane na bramce',
763: 'Jesteś obecnie połączony przez adres .local. Zmiana nazwy hosta będzie wymagać przełączenia na nowy adres .local.',
763: 'Jesteś obecnie połączony przez adres .local. Zmiana nazwy hosta będzie wymagać przełączenia na nowy adres .local. Konieczne będzie również ponowne uruchomienie serwera.',
764: 'Nazwa hosta zmieniona',
765: 'Otwórz nowy adres',
766: 'Twój serwer jest teraz dostępny pod adresem',
@@ -722,4 +722,6 @@ export default {
804: 'Mam kopię zapasową moich danych',
805: 'Dodaj domenę publiczną',
806: 'Wynik',
807: 'Po otwarciu nowego adresu zostaniesz poproszony o ponowne uruchomienie.',
808: 'Ponowne uruchomienie jest wymagane, aby interfejsy usług używały nowej nazwy hosta.',
} satisfies i18n

View File

@@ -1,4 +1,5 @@
export type AccessType =
| 'tor'
| 'mdns'
| 'localhost'
| 'ipv4'

View File

@@ -17,11 +17,13 @@ import {
TuiButton,
TuiDialogContext,
TuiError,
TuiNotificationService,
TuiIcon,
TuiInput,
TuiNotificationService,
TuiTextfield,
tuiValidationErrorsProvider,
} from '@taiga-ui/core'
import { TuiButtonLoading } from '@taiga-ui/kit'
import { TuiButtonLoading, TuiPassword } from '@taiga-ui/kit'
import { TuiForm } from '@taiga-ui/layout'
import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus'
import { map } from 'rxjs'
@@ -32,16 +34,24 @@ import { ApiService } from 'src/app/services/api/api.service'
<form tuiForm [formGroup]="form">
<tui-textfield>
<label tuiLabel>New password</label>
<input tuiInput tuiAutoFocus formControlName="password" />
<input
tuiInput
tuiAutoFocus
type="password"
formControlName="password"
/>
<tui-icon tuiPassword />
</tui-textfield>
<tui-error formControlName="password" />
<tui-textfield>
<label tuiLabel>Confirm new password</label>
<input
tuiInput
type="password"
formControlName="confirm"
[tuiValidator]="matchValidator()"
/>
<tui-icon tuiPassword />
</tui-textfield>
<tui-error formControlName="confirm" />
<footer>
@@ -72,7 +82,10 @@ import { ApiService } from 'src/app/services/api/api.service'
TuiButtonLoading,
TuiError,
TuiForm,
TuiIcon,
TuiInput,
TuiPassword,
TuiTextfield,
TuiValidator,
],
})

View File

@@ -1,8 +1,5 @@
@if (config.isLanHttp()) {
<!-- Local HTTP -->
<ca-wizard />
} @else {
<!-- not Local HTTP -->
@if (config.isSecureContext()) {
<!-- Secure context -->
<div tuiCardLarge class="card">
<img alt="StartOS Icon" class="logo" src="assets/img/icon.png" />
<h1 class="header">{{ 'Login to StartOS' | i18n }}</h1>
@@ -23,4 +20,7 @@
<button tuiButton class="button">{{ 'Login' | i18n }}</button>
</form>
</div>
} @else {
<!-- Insecure context -->
<ca-wizard />
}

View File

@@ -24,7 +24,11 @@ export class LogsFetchDirective {
}),
),
),
tap(res => this.component.setCursor(res.startCursor)),
tap(res => {
if (res.startCursor) {
this.component.setCursor(res.startCursor)
}
}),
map(({ entries }) => convertAnsi(entries)),
catchError(e => {
this.errors.handleError(e)

View File

@@ -52,7 +52,7 @@ export class LogsComponent {
@Input({ required: true }) context!: string
scrollTop = 0
startCursor?: string | null
startCursor?: string
scroll = true
loading = false
previous: readonly string[] = []

View File

@@ -45,7 +45,11 @@ export class LogsPipe implements PipeTransform {
map(() => this.getMessage(true)),
),
defer(() => followLogs(this.options)).pipe(
tap(r => this.logs.setCursor(r.startCursor)),
tap(r => {
if (r.startCursor) {
this.logs.setCursor(r.startCursor)
}
}),
switchMap(r =>
this.api.openWebsocket$<T.LogEntry>(r.guid, {
openObserver: {

View File

@@ -27,10 +27,15 @@ import { TuiIcon, TuiLoader } from '@taiga-ui/core'
styles: `
span {
display: flex;
align-items: center;
align-items: start;
gap: 0.5rem;
}
tui-loader,
tui-icon {
flex-shrink: 0;
}
.name {
width: 9.5rem;
}

View File

@@ -202,7 +202,7 @@ export class ActionInputModal {
const message = `${this.i18n.transform('As a result of this change, the following services will no longer work properly and may crash')}:<ul>`
const content =
`${message}${breakages.map(id => `<li><b>${getManifest(packages[id]!).title}</b></li>`)}</ul>` as i18nKey
`${message}${breakages.map(id => `<li><b>${getManifest(packages[id]!).title}</b></li>`).join('')}</ul>` as i18nKey
return firstValueFrom(
this.dialog

View File

@@ -4,10 +4,11 @@ import {
Component,
inject,
INJECTOR,
OnInit,
} from '@angular/core'
import { toSignal } from '@angular/core/rxjs-interop'
import { FormsModule } from '@angular/forms'
import { RouterLink } from '@angular/router'
import { ActivatedRoute, Router, RouterLink } from '@angular/router'
import { WA_WINDOW } from '@ng-web-apis/common'
import {
DialogService,
@@ -277,7 +278,7 @@ import { UPDATE } from './update.component'
TuiAnimated,
],
})
export default class SystemGeneralComponent {
export default class SystemGeneralComponent implements OnInit {
private readonly dialogs = inject(TuiResponsiveDialogService)
private readonly loader = inject(TuiNotificationMiddleService)
private readonly errorService = inject(ErrorService)
@@ -287,6 +288,20 @@ export default class SystemGeneralComponent {
private readonly i18n = inject(i18nPipe)
private readonly injector = inject(INJECTOR)
private readonly win = inject(WA_WINDOW)
private readonly route = inject(ActivatedRoute)
private readonly router = inject(Router)
ngOnInit() {
this.route.queryParams
.pipe(filter(params => params['restart'] === 'hostname'))
.subscribe(async () => {
await this.router.navigate([], {
relativeTo: this.route,
queryParams: {},
})
this.promptHostnameRestart()
})
}
count = 0
@@ -399,7 +414,7 @@ export default class SystemGeneralComponent {
label: 'Warning',
data: {
content:
'You are currently connected via your .local address. Changing the hostname will require you to switch to the new .local address.',
'You are currently connected via your .local address. Changing the hostname will require you to switch to the new .local address. A server restart will also be needed.',
yes: 'Save',
no: 'Cancel',
},
@@ -419,20 +434,23 @@ export default class SystemGeneralComponent {
if (wasLocal) {
const { protocol, port } = this.win.location
const newUrl = `${protocol}//${hostname}.local${port ? ':' + port : ''}`
const portSuffix = port ? ':' + port : ''
const newUrl = `${protocol}//${hostname}.local${portSuffix}/system/general?restart=hostname`
this.dialog
.openConfirm({
label: 'Hostname Changed',
data: {
content:
`${this.i18n.transform('Your server is now reachable at')} ${hostname}.local` as i18nKey,
`${this.i18n.transform('Your server is now reachable at')} ${hostname}.local. ${this.i18n.transform('After opening the new address, you will be prompted to restart.')}` as i18nKey,
yes: 'Open new address',
no: 'Dismiss',
},
})
.pipe(filter(Boolean))
.subscribe(() => this.win.open(newUrl, '_blank'))
} else {
this.promptHostnameRestart()
}
} catch (e: any) {
this.errorService.handleError(e)
@@ -563,6 +581,21 @@ export default class SystemGeneralComponent {
.subscribe(() => this.restart())
}
private promptHostnameRestart() {
this.dialog
.openConfirm({
label: 'Restart to apply',
data: {
content:
'A restart is required for service interfaces to use the new hostname.',
yes: 'Restart now',
no: 'Later',
},
})
.pipe(filter(Boolean))
.subscribe(() => this.restart())
}
private promptLanguageRestart() {
this.dialog
.openConfirm({

View File

@@ -26,6 +26,7 @@ import { PatchDB } from 'patch-db-client'
import {
catchError,
defer,
exhaustMap,
finalize,
first,
map,
@@ -34,7 +35,10 @@ import {
of,
Subject,
switchMap,
takeUntil,
takeWhile,
tap,
timer,
} from 'rxjs'
import {
FormComponent,
@@ -184,7 +188,7 @@ export default class SystemWifiComponent {
),
this.refresh$.pipe(
tap(() => this.refreshing.set(true)),
switchMap(() =>
exhaustMap(() =>
this.getWifi$().pipe(finalize(() => this.refreshing.set(false))),
),
),
@@ -205,6 +209,10 @@ export default class SystemWifiComponent {
try {
await this.api.enableWifi({ enabled: enable })
if (enable) {
this.update$.next({ known: [], available: [] })
this.pollForNetworks()
}
} catch (e: any) {
this.errorService.handleError(e)
} finally {
@@ -259,6 +267,15 @@ export default class SystemWifiComponent {
}
}
private pollForNetworks(): void {
timer(0, 500)
.pipe(
takeWhile(() => !this.wifi()?.available?.length),
takeUntil(timer(5000)),
)
.subscribe(() => this.refresh$.next())
}
private async confirmWifi(ssid: string): Promise<void> {
const maxAttempts = 5
let attempts = 0

View File

@@ -1201,7 +1201,8 @@ export class MockApiService extends ApiService {
'chain-state': {
name: 'Chain State',
result: 'loading',
message: 'Bitcoin is syncing from genesis',
message:
'Bitcoin is syncing from genesis. Downloading block headers and verifying chain integrity, please wait',
},
'p2p-interface': {
name: 'P2P Interface',

View File

@@ -32,6 +32,7 @@ export class ConfigService {
private getAccessType = utils.once(() => {
if (useMocks) return mocks.maskAs
if (this.hostname === 'localhost') return 'localhost'
if (this.hostname.endsWith('.onion')) return 'tor'
if (this.hostname.endsWith('.local')) return 'mdns'
let ip = null
try {
@@ -49,11 +50,11 @@ export class ConfigService {
return this.getAccessType()
}
isLanHttp(): boolean {
return !this.isHttps() && this.accessType !== 'localhost'
}
isHttps(): boolean {
return useMocks ? mocks.maskAsHttps : this.protocol === 'https:'
isSecureContext(): boolean {
return useMocks
? mocks.maskAsHttps ||
mocks.maskAs === 'localhost' ||
mocks.maskAs === 'tor'
: window.isSecureContext
}
}

View File

@@ -71,13 +71,16 @@ export function getInstalledPrimaryStatus({
tasks,
statusInfo,
}: T.PackageDataEntry): PrimaryStatus {
const base = getInstalledBaseStatus(statusInfo)
if (
!INACTIVE_STATUSES.includes(base) &&
Object.values(tasks).some(t => t.active && t.task.severity === 'critical')
) {
return 'task-required'
}
return getInstalledBaseStatus(statusInfo)
return base
}
function getHealthStatus(statusInfo: T.StatusInfo): T.HealthStatus | null {