mirror of
https://github.com/Start9Labs/start-os.git
synced 2026-03-31 20:43:41 +00:00
Compare commits
5 Commits
v0.4.0-alp
...
mcp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ca6a17eaad | ||
|
|
f6db176f6d | ||
|
|
d8f8759dea | ||
|
|
a9b0b43005 | ||
|
|
2b70b67824 |
2
.github/actions/setup-build/action.yml
vendored
2
.github/actions/setup-build/action.yml
vendored
@@ -47,7 +47,7 @@ runs:
|
||||
sudo rm -rf /usr/share/swift
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
|
||||
# Some runners lack /opt/hostedtoolcache, which setup-python and setup-qemu expect
|
||||
# BuildJet runners lack /opt/hostedtoolcache, which setup-python and setup-qemu expect
|
||||
- name: Ensure hostedtoolcache exists
|
||||
shell: bash
|
||||
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
|
||||
|
||||
2
.github/workflows/start-cli.yaml
vendored
2
.github/workflows/start-cli.yaml
vendored
@@ -63,7 +63,7 @@ jobs:
|
||||
"ALL": ["x86_64-unknown-linux-musl", "x86_64-apple-darwin", "aarch64-unknown-linux-musl", "aarch64-apple-darwin", "riscv64gc-unknown-linux-musl"]
|
||||
}')[github.event.inputs.platform || 'ALL']
|
||||
}}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "ubuntu-24.04-32-cores"]')[github.event.inputs.runner == 'fast'] }}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
steps:
|
||||
- name: Mount tmpfs
|
||||
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||
|
||||
4
.github/workflows/start-registry.yaml
vendored
4
.github/workflows/start-registry.yaml
vendored
@@ -59,7 +59,7 @@ jobs:
|
||||
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||
}')[github.event.inputs.platform || 'ALL']
|
||||
}}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "ubuntu-24.04-32-cores"]')[github.event.inputs.runner == 'fast'] }}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
steps:
|
||||
- name: Mount tmpfs
|
||||
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||
@@ -89,7 +89,7 @@ jobs:
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "ubuntu-24.04-32-cores"]')[github.event.inputs.runner == 'fast'] }}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
steps:
|
||||
- name: Cleaning up unnecessary files
|
||||
run: |
|
||||
|
||||
2
.github/workflows/start-tunnel.yaml
vendored
2
.github/workflows/start-tunnel.yaml
vendored
@@ -59,7 +59,7 @@ jobs:
|
||||
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||
}')[github.event.inputs.platform || 'ALL']
|
||||
}}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "ubuntu-24.04-32-cores"]')[github.event.inputs.runner == 'fast'] }}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
steps:
|
||||
- name: Mount tmpfs
|
||||
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||
|
||||
148
.github/workflows/startos-iso.yaml
vendored
148
.github/workflows/startos-iso.yaml
vendored
@@ -29,7 +29,7 @@ on:
|
||||
- aarch64
|
||||
- aarch64-nonfree
|
||||
- aarch64-nvidia
|
||||
- raspberrypi
|
||||
# - raspberrypi
|
||||
- riscv64
|
||||
- riscv64-nonfree
|
||||
deploy:
|
||||
@@ -89,9 +89,9 @@ jobs:
|
||||
"riscv64": "ubuntu-latest"
|
||||
}')[matrix.arch],
|
||||
fromJson('{
|
||||
"x86_64": "amd64-fast",
|
||||
"aarch64": "aarch64-fast",
|
||||
"riscv64": "amd64-fast"
|
||||
"x86_64": "buildjet-32vcpu-ubuntu-2204",
|
||||
"aarch64": "buildjet-32vcpu-ubuntu-2204-arm",
|
||||
"riscv64": "buildjet-32vcpu-ubuntu-2204"
|
||||
}')[matrix.arch]
|
||||
)
|
||||
)[github.event.inputs.runner == 'fast']
|
||||
@@ -153,15 +153,15 @@ jobs:
|
||||
"riscv64-nonfree": "ubuntu-24.04-arm",
|
||||
}')[matrix.platform],
|
||||
fromJson('{
|
||||
"x86_64": "amd64-fast",
|
||||
"x86_64-nonfree": "amd64-fast",
|
||||
"x86_64-nvidia": "amd64-fast",
|
||||
"aarch64": "aarch64-fast",
|
||||
"aarch64-nonfree": "aarch64-fast",
|
||||
"aarch64-nvidia": "aarch64-fast",
|
||||
"raspberrypi": "aarch64-fast",
|
||||
"riscv64": "amd64-fast",
|
||||
"riscv64-nonfree": "amd64-fast",
|
||||
"x86_64": "buildjet-8vcpu-ubuntu-2204",
|
||||
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
|
||||
"x86_64-nvidia": "buildjet-8vcpu-ubuntu-2204",
|
||||
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"aarch64-nvidia": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"riscv64": "buildjet-8vcpu-ubuntu-2204",
|
||||
"riscv64-nonfree": "buildjet-8vcpu-ubuntu-2204",
|
||||
}')[matrix.platform]
|
||||
)
|
||||
)[github.event.inputs.runner == 'fast']
|
||||
@@ -203,7 +203,7 @@ jobs:
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY" # Pre-cached tool cache (Go, Node, etc.)
|
||||
if: ${{ github.event.inputs.runner != 'fast' }}
|
||||
|
||||
# Some runners lack /opt/hostedtoolcache, which setup-qemu expects
|
||||
# BuildJet runners lack /opt/hostedtoolcache, which setup-qemu expects
|
||||
- name: Ensure hostedtoolcache exists
|
||||
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
|
||||
|
||||
@@ -268,123 +268,3 @@ jobs:
|
||||
name: ${{ matrix.platform }}.img
|
||||
path: results/*.img
|
||||
if: ${{ matrix.platform == 'raspberrypi' }}
|
||||
|
||||
deploy:
|
||||
name: Deploy
|
||||
needs: [image]
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.deploy != 'NONE'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
REGISTRY: >-
|
||||
${{
|
||||
fromJson('{
|
||||
"alpha": "https://alpha-registry-x.start9.com",
|
||||
"beta": "https://beta-registry.start9.com"
|
||||
}')[github.event.inputs.deploy]
|
||||
}}
|
||||
S3_BUCKET: s3://startos-images
|
||||
S3_CDN: https://startos-images.nyc3.cdn.digitaloceanspaces.com
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
sparse-checkout: web/package.json
|
||||
|
||||
- name: Determine version
|
||||
id: version
|
||||
run: |
|
||||
VERSION=$(sed -n 's/.*"version": *"\([^"]*\)".*/\1/p' web/package.json | head -1)
|
||||
echo "version=$VERSION" >> "$GITHUB_OUTPUT"
|
||||
echo "Version: $VERSION"
|
||||
|
||||
- name: Determine platforms
|
||||
id: platforms
|
||||
run: |
|
||||
INPUT="${{ github.event.inputs.platform }}"
|
||||
if [ "$INPUT" = "ALL" ]; then
|
||||
PLATFORMS="x86_64 x86_64-nonfree x86_64-nvidia aarch64 aarch64-nonfree aarch64-nvidia riscv64 riscv64-nonfree"
|
||||
else
|
||||
PLATFORMS="$INPUT"
|
||||
fi
|
||||
echo "list=$PLATFORMS" >> "$GITHUB_OUTPUT"
|
||||
echo "Platforms: $PLATFORMS"
|
||||
|
||||
- name: Download squashfs artifacts
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
pattern: "*.squashfs"
|
||||
path: artifacts/
|
||||
merge-multiple: true
|
||||
|
||||
- name: Download ISO artifacts
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
pattern: "*.iso"
|
||||
path: artifacts/
|
||||
merge-multiple: true
|
||||
|
||||
- name: Install start-cli
|
||||
run: |
|
||||
ARCH=$(uname -m)
|
||||
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
ASSET_NAME="start-cli_${ARCH}-${OS}"
|
||||
DOWNLOAD_URL=$(curl -fsS \
|
||||
-H "Authorization: token ${{ github.token }}" \
|
||||
https://api.github.com/repos/Start9Labs/start-os/releases \
|
||||
| jq -r '[.[].assets[] | select(.name=="'"$ASSET_NAME"'")] | first | .browser_download_url')
|
||||
curl -fsSL \
|
||||
-H "Authorization: token ${{ github.token }}" \
|
||||
-H "Accept: application/octet-stream" \
|
||||
"$DOWNLOAD_URL" -o /tmp/start-cli
|
||||
sudo install -m 755 /tmp/start-cli /usr/local/bin/start-cli
|
||||
echo "start-cli: $(start-cli --version)"
|
||||
|
||||
- name: Configure S3
|
||||
run: |
|
||||
sudo apt-get install -y -qq s3cmd > /dev/null
|
||||
cat > ~/.s3cfg <<EOF
|
||||
[default]
|
||||
access_key = ${{ secrets.S3_ACCESS_KEY }}
|
||||
secret_key = ${{ secrets.S3_SECRET_KEY }}
|
||||
host_base = nyc3.digitaloceanspaces.com
|
||||
host_bucket = %(bucket)s.nyc3.digitaloceanspaces.com
|
||||
use_https = True
|
||||
EOF
|
||||
|
||||
- name: Set up developer key
|
||||
run: |
|
||||
mkdir -p ~/.startos
|
||||
printf '%s' "${{ secrets.DEV_KEY }}" > ~/.startos/developer.key.pem
|
||||
|
||||
- name: Upload to S3
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
cd artifacts
|
||||
for PLATFORM in ${{ steps.platforms.outputs.list }}; do
|
||||
for file in *_${PLATFORM}.squashfs *_${PLATFORM}.iso; do
|
||||
[ -f "$file" ] || continue
|
||||
echo "Uploading $file..."
|
||||
s3cmd put -P "$file" "${{ env.S3_BUCKET }}/v${VERSION}/$file"
|
||||
done
|
||||
done
|
||||
|
||||
- name: Register OS version
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
start-cli --registry="${{ env.REGISTRY }}" registry os version add \
|
||||
"$VERSION" "v${VERSION}" '' ">=0.3.5 <=${VERSION}"
|
||||
|
||||
- name: Index assets in registry
|
||||
run: |
|
||||
VERSION="${{ steps.version.outputs.version }}"
|
||||
cd artifacts
|
||||
for PLATFORM in ${{ steps.platforms.outputs.list }}; do
|
||||
for file in *_${PLATFORM}.squashfs *_${PLATFORM}.iso; do
|
||||
[ -f "$file" ] || continue
|
||||
echo "Indexing $file for platform $PLATFORM..."
|
||||
start-cli --registry="${{ env.REGISTRY }}" registry os asset add \
|
||||
--platform="$PLATFORM" \
|
||||
--version="$VERSION" \
|
||||
"$file" \
|
||||
"${{ env.S3_CDN }}/v${VERSION}/$file"
|
||||
done
|
||||
done
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -23,4 +23,3 @@ tmp
|
||||
web/.i18n-checked
|
||||
docs/USER.md
|
||||
*.s9pk
|
||||
/build/lib/migration-images
|
||||
|
||||
@@ -8,7 +8,7 @@ StartOS is an open-source Linux distribution for running personal servers. It ma
|
||||
- Frontend: Angular 21 + TypeScript + Taiga UI 5
|
||||
- Container runtime: Node.js/TypeScript with LXC
|
||||
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
|
||||
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`)
|
||||
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`), MCP for LLM agents (see `core/mcp/ARCHITECTURE.md`)
|
||||
- Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`)
|
||||
|
||||
## Project Structure
|
||||
@@ -28,7 +28,7 @@ StartOS is an open-source Linux distribution for running personal servers. It ma
|
||||
|
||||
## Components
|
||||
|
||||
- **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md).
|
||||
- **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, MCP server for LLM agents, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md).
|
||||
|
||||
- **`web/`** — Angular 21 + TypeScript workspace using Taiga UI 5. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md).
|
||||
|
||||
@@ -53,13 +53,13 @@ Rust (core/)
|
||||
|
||||
Key make targets along this chain:
|
||||
|
||||
| Step | Command | What it does |
|
||||
|---|---|---|
|
||||
| 1 | `cargo check -p start-os` | Verify Rust compiles |
|
||||
| 2 | `make ts-bindings` | Export ts-rs types → rsync to SDK |
|
||||
| 3 | `cd sdk && make baseDist dist` | Build SDK packages |
|
||||
| 4 | `cd web && npm run check` | Type-check Angular projects |
|
||||
| 5 | `cd container-runtime && npm run check` | Type-check runtime |
|
||||
| Step | Command | What it does |
|
||||
| ---- | --------------------------------------- | --------------------------------- |
|
||||
| 1 | `cargo check -p start-os` | Verify Rust compiles |
|
||||
| 2 | `make ts-bindings` | Export ts-rs types → rsync to SDK |
|
||||
| 3 | `cd sdk && make baseDist dist` | Build SDK packages |
|
||||
| 4 | `cd web && npm run check` | Type-check Angular projects |
|
||||
| 5 | `cd container-runtime && npm run check` | Type-check runtime |
|
||||
|
||||
**Important**: Editing `sdk/base/lib/osBindings/*.ts` alone is NOT sufficient — you must rebuild the SDK bundle (step 3) before web/container-runtime can see the changes.
|
||||
|
||||
@@ -90,6 +90,17 @@ StartOS uses Patch-DB for reactive state synchronization:
|
||||
|
||||
This means the UI is always eventually consistent with the backend — after any mutating API call, the frontend waits for the corresponding PatchDB diff before resolving, so the UI reflects the result immediately.
|
||||
|
||||
## MCP Server (LLM Agent Interface)
|
||||
|
||||
StartOS includes an [MCP](https://modelcontextprotocol.io/) (Model Context Protocol) server at `/mcp`, enabling LLM agents to discover and invoke the same operations available through the UI and CLI. The MCP server runs inside the StartOS server process alongside the RPC API.
|
||||
|
||||
- **Tools**: Every RPC method is exposed as an MCP tool with LLM-optimized descriptions and JSON Schema inputs. Agents call `tools/list` to discover what's available and `tools/call` to invoke operations.
|
||||
- **Resources**: System state is exposed via MCP resources backed by Patch-DB. Agents subscribe to `startos:///public` and receive debounced revision diffs over SSE, maintaining a local state cache without polling.
|
||||
- **Auth**: Same session cookie auth as the UI — no separate credentials.
|
||||
- **Transport**: MCP Streamable HTTP — POST for requests, GET for SSE notification stream, DELETE for session teardown.
|
||||
|
||||
See [core/ARCHITECTURE.md](core/ARCHITECTURE.md#mcp-server) for implementation details.
|
||||
|
||||
## Further Reading
|
||||
|
||||
- [core/ARCHITECTURE.md](core/ARCHITECTURE.md) — Rust backend architecture
|
||||
|
||||
7
Makefile
7
Makefile
@@ -15,7 +15,7 @@ IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo
|
||||
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html
|
||||
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html
|
||||
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
|
||||
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) build/lib/migration-images/.done
|
||||
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
|
||||
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/)
|
||||
STARTD_SRC := core/startd.service $(BUILD_SRC)
|
||||
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
|
||||
@@ -89,7 +89,6 @@ clean:
|
||||
rm -rf container-runtime/node_modules
|
||||
rm -f container-runtime/*.squashfs
|
||||
(cd sdk && make clean)
|
||||
rm -rf build/lib/migration-images
|
||||
rm -f env/*.txt
|
||||
|
||||
format:
|
||||
@@ -106,10 +105,6 @@ test-sdk: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
|
||||
test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
|
||||
cd container-runtime && npm test
|
||||
|
||||
build/lib/migration-images/.done: build/save-migration-images.sh
|
||||
ARCH=$(ARCH) ./build/save-migration-images.sh build/lib/migration-images
|
||||
touch $@
|
||||
|
||||
install-cli: $(GIT_HASH_FILE)
|
||||
./core/build/build-cli.sh --install
|
||||
|
||||
|
||||
@@ -58,18 +58,15 @@ iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p udp --dport "$sport" -j DNAT --to
|
||||
iptables -A ${NAME}_FORWARD -d $dip -p tcp --dport $dport -m state --state NEW -j ACCEPT
|
||||
iptables -A ${NAME}_FORWARD -d $dip -p udp --dport $dport -m state --state NEW -j ACCEPT
|
||||
|
||||
# NAT hairpin: masquerade so replies route back through this host for proper
|
||||
# NAT reversal instead of taking a direct path that bypasses conntrack.
|
||||
# Host-to-target hairpin: locally-originated packets whose original destination
|
||||
# was sip (before OUTPUT DNAT rewrote it to dip). Using --ctorigdst ties the
|
||||
# rule to this specific sip, so multiple WAN IPs forwarding the same port to
|
||||
# different targets each get their own masquerade.
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -m addrtype --src-type LOCAL -m conntrack --ctorigdst "$sip" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -m addrtype --src-type LOCAL -m conntrack --ctorigdst "$sip" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
|
||||
# Same-subnet hairpin: when traffic originates from the same subnet as the DNAT
|
||||
# target (e.g. a container reaching another container, or a WireGuard peer
|
||||
# connecting to itself via the tunnel's public IP).
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$dip/$dprefix" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$dip/$dprefix" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
|
||||
# NAT hairpin: masquerade traffic from the bridge subnet or host to the DNAT
|
||||
# target, so replies route back through the host for proper NAT reversal.
|
||||
# Container-to-container hairpin (source is on the bridge subnet)
|
||||
if [ -n "$bridge_subnet" ]; then
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$bridge_subnet" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$bridge_subnet" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
|
||||
fi
|
||||
# Host-to-container hairpin (host connects to its own gateway IP, source is sip)
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$sip" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$sip" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
|
||||
|
||||
exit $err
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Save Docker images needed by the 0.3.6-alpha.0 migration as tarballs
|
||||
# so they can be bundled into the OS and loaded without internet access.
|
||||
set -e
|
||||
|
||||
ARCH="${ARCH:-x86_64}"
|
||||
DESTDIR="${1:-build/lib/migration-images}"
|
||||
|
||||
if [ "$ARCH" = "x86_64" ]; then
|
||||
DOCKER_PLATFORM="linux/amd64"
|
||||
elif [ "$ARCH" = "aarch64" ]; then
|
||||
DOCKER_PLATFORM="linux/arm64"
|
||||
else
|
||||
DOCKER_PLATFORM="linux/$ARCH"
|
||||
fi
|
||||
|
||||
IMAGES=("tonistiigi/binfmt:latest")
|
||||
if [ "$ARCH" != "riscv64" ]; then
|
||||
IMAGES=("start9/compat:latest" "start9/utils:latest" "${IMAGES[@]}")
|
||||
fi
|
||||
|
||||
mkdir -p "$DESTDIR"
|
||||
|
||||
for IMAGE in "${IMAGES[@]}"; do
|
||||
FILENAME=$(echo "$IMAGE" | sed 's|/|_|g; s/:/_/g').tar
|
||||
if [ -f "$DESTDIR/$FILENAME" ]; then
|
||||
echo "Skipping $IMAGE (already saved)"
|
||||
continue
|
||||
fi
|
||||
echo "Pulling $IMAGE for $DOCKER_PLATFORM..."
|
||||
docker pull --platform "$DOCKER_PLATFORM" "$IMAGE"
|
||||
echo "Saving $IMAGE to $DESTDIR/$FILENAME..."
|
||||
docker save "$IMAGE" -o "$DESTDIR/$FILENAME"
|
||||
done
|
||||
|
||||
echo "Migration images saved to $DESTDIR"
|
||||
2
container-runtime/package-lock.json
generated
2
container-runtime/package-lock.json
generated
@@ -37,7 +37,7 @@
|
||||
},
|
||||
"../sdk/dist": {
|
||||
"name": "@start9labs/start-sdk",
|
||||
"version": "0.4.0-beta.66",
|
||||
"version": "0.4.0-beta.64",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^3.0.0",
|
||||
|
||||
@@ -494,7 +494,7 @@ export class SystemForEmbassy implements System {
|
||||
const host = new MultiHost({ effects, id })
|
||||
const internalPorts = new Set(
|
||||
Object.values(interfaceValue["tor-config"]?.["port-mapping"] ?? {})
|
||||
.map((v) => parseInt(v))
|
||||
.map(Number.parseInt)
|
||||
.concat(
|
||||
...Object.values(interfaceValue["lan-config"] ?? {}).map(
|
||||
(c) => c.internal,
|
||||
|
||||
@@ -23,6 +23,7 @@ The crate produces a single binary `startbox` that is symlinked under different
|
||||
- `src/context/` — Context types (RpcContext, CliContext, InitContext, DiagnosticContext)
|
||||
- `src/service/` — Service lifecycle management with actor pattern (`service_actor.rs`)
|
||||
- `src/db/model/` — Patch-DB models (`public.rs` synced to frontend, `private.rs` backend-only)
|
||||
- `src/mcp/` — MCP server for LLM agents (see [MCP Server](#mcp-server) below)
|
||||
- `src/net/` — Networking (DNS, ACME, WiFi, Tor via Arti, WireGuard)
|
||||
- `src/s9pk/` — S9PK package format (merkle archive)
|
||||
- `src/registry/` — Package registry management
|
||||
@@ -38,16 +39,19 @@ See [rpc-toolkit.md](rpc-toolkit.md) for full handler patterns and configuration
|
||||
Patch-DB provides diff-based state synchronization. Changes to `db/model/public.rs` automatically sync to the frontend.
|
||||
|
||||
**Key patterns:**
|
||||
|
||||
- `db.peek().await` — Get a read-only snapshot of the database state
|
||||
- `db.mutate(|db| { ... }).await` — Apply mutations atomically, returns `MutateResult`
|
||||
- `#[derive(HasModel)]` — Derive macro for types stored in the database, generates typed accessors
|
||||
|
||||
**Generated accessor types** (from `HasModel` derive):
|
||||
|
||||
- `as_field()` — Immutable reference: `&Model<T>`
|
||||
- `as_field_mut()` — Mutable reference: `&mut Model<T>`
|
||||
- `into_field()` — Owned value: `Model<T>`
|
||||
|
||||
**`Model<T>` APIs** (from `db/prelude.rs`):
|
||||
|
||||
- `.de()` — Deserialize to `T`
|
||||
- `.ser(&value)` — Serialize from `T`
|
||||
- `.mutate(|v| ...)` — Deserialize, mutate, reserialize
|
||||
@@ -63,6 +67,12 @@ See [i18n-patterns.md](i18n-patterns.md) for internationalization key convention
|
||||
|
||||
See [core-rust-patterns.md](core-rust-patterns.md) for common utilities (Invoke trait, Guard pattern, mount guards, Apply trait, etc.).
|
||||
|
||||
## MCP Server
|
||||
|
||||
The MCP (Model Context Protocol) server at `src/mcp/` exposes the StartOS RPC API to LLM agents via the Streamable HTTP transport at `/mcp`. Tools wrap the existing RPC handlers; resources expose Patch-DB state with debounced SSE subscriptions; auth reuses the UI session cookie.
|
||||
|
||||
See [src/mcp/ARCHITECTURE.md](src/mcp/ARCHITECTURE.md) for transport details, session lifecycle, tool dispatch, resource subscriptions, CORS, and body size limits.
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [rpc-toolkit.md](rpc-toolkit.md) — JSON-RPC handler patterns
|
||||
|
||||
2
core/Cargo.lock
generated
2
core/Cargo.lock
generated
@@ -6439,7 +6439,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "start-os"
|
||||
version = "0.4.0-alpha.23"
|
||||
version = "0.4.0-alpha.22"
|
||||
dependencies = [
|
||||
"aes",
|
||||
"async-acme",
|
||||
|
||||
@@ -15,7 +15,7 @@ license = "MIT"
|
||||
name = "start-os"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/Start9Labs/start-os"
|
||||
version = "0.4.0-alpha.23" # VERSION_BUMP
|
||||
version = "0.4.0-alpha.22" # VERSION_BUMP
|
||||
|
||||
[lib]
|
||||
name = "startos"
|
||||
|
||||
@@ -1721,14 +1721,6 @@ lxc.mod.cleaned-up-containers:
|
||||
fr_FR: "Conteneurs LXC orphelins nettoyés avec succès"
|
||||
pl_PL: "Pomyślnie wyczyszczono wiszące kontenery LXC"
|
||||
|
||||
# version/v0_3_6_alpha_0.rs
|
||||
migration.migrating-package:
|
||||
en_US: "Migrating package %{package}..."
|
||||
de_DE: "Paket %{package} wird migriert..."
|
||||
es_ES: "Migrando paquete %{package}..."
|
||||
fr_FR: "Migration du paquet %{package}..."
|
||||
pl_PL: "Migracja pakietu %{package}..."
|
||||
|
||||
# registry/admin.rs
|
||||
registry.admin.unknown-signer:
|
||||
en_US: "Unknown signer"
|
||||
@@ -2657,13 +2649,6 @@ help.arg.add-signer-key:
|
||||
fr_FR: "Ajouter une clé publique au signataire"
|
||||
pl_PL: "Dodaj klucz publiczny do sygnatariusza"
|
||||
|
||||
help.arg.address:
|
||||
en_US: "Network address"
|
||||
de_DE: "Netzwerkadresse"
|
||||
es_ES: "Dirección de red"
|
||||
fr_FR: "Adresse réseau"
|
||||
pl_PL: "Adres sieciowy"
|
||||
|
||||
help.arg.allow-model-mismatch:
|
||||
en_US: "Allow database model mismatch"
|
||||
de_DE: "Datenbankmodell-Abweichung erlauben"
|
||||
@@ -2818,13 +2803,6 @@ help.arg.echoip-urls:
|
||||
fr_FR: "URLs du service Echo IP pour la détection d'IP externe"
|
||||
pl_PL: "Adresy URL usługi Echo IP do wykrywania zewnętrznego IP"
|
||||
|
||||
help.arg.ed25519:
|
||||
en_US: "Use Ed25519 instead of NIST P-256"
|
||||
de_DE: "Ed25519 anstelle von NIST P-256 verwenden"
|
||||
es_ES: "Usar Ed25519 en lugar de NIST P-256"
|
||||
fr_FR: "Utiliser Ed25519 au lieu de NIST P-256"
|
||||
pl_PL: "Użyj Ed25519 zamiast NIST P-256"
|
||||
|
||||
help.arg.emulate-missing-arch:
|
||||
en_US: "Emulate missing architecture using this one"
|
||||
de_DE: "Fehlende Architektur mit dieser emulieren"
|
||||
@@ -2909,13 +2887,6 @@ help.arg.host-url:
|
||||
fr_FR: "URL du serveur StartOS"
|
||||
pl_PL: "URL serwera StartOS"
|
||||
|
||||
help.arg.hostnames:
|
||||
en_US: "Hostnames to include in the certificate"
|
||||
de_DE: "Hostnamen, die in das Zertifikat aufgenommen werden sollen"
|
||||
es_ES: "Nombres de host para incluir en el certificado"
|
||||
fr_FR: "Noms d'hôtes à inclure dans le certificat"
|
||||
pl_PL: "Nazwy hostów do uwzględnienia w certyfikacie"
|
||||
|
||||
help.arg.icon-path:
|
||||
en_US: "Path to service icon file"
|
||||
de_DE: "Pfad zur Service-Icon-Datei"
|
||||
@@ -3119,13 +3090,6 @@ help.arg.platform:
|
||||
fr_FR: "Identifiant de la plateforme cible"
|
||||
pl_PL: "Identyfikator platformy docelowej"
|
||||
|
||||
help.arg.port:
|
||||
en_US: "Port number"
|
||||
de_DE: "Portnummer"
|
||||
es_ES: "Número de puerto"
|
||||
fr_FR: "Numéro de port"
|
||||
pl_PL: "Numer portu"
|
||||
|
||||
help.arg.postgres-connection-url:
|
||||
en_US: "PostgreSQL connection URL"
|
||||
de_DE: "PostgreSQL-Verbindungs-URL"
|
||||
@@ -3210,13 +3174,6 @@ help.arg.server-id:
|
||||
fr_FR: "Identifiant unique du serveur"
|
||||
pl_PL: "Unikalny identyfikator serwera"
|
||||
|
||||
help.arg.set-as-default-outbound:
|
||||
en_US: "Set as the default outbound gateway"
|
||||
de_DE: "Als Standard-Ausgangs-Gateway festlegen"
|
||||
es_ES: "Establecer como puerta de enlace de salida predeterminada"
|
||||
fr_FR: "Définir comme passerelle de sortie par défaut"
|
||||
pl_PL: "Ustaw jako domyślną bramę wychodzącą"
|
||||
|
||||
help.arg.set-signer-name:
|
||||
en_US: "Set the signer name"
|
||||
de_DE: "Unterzeichnernamen festlegen"
|
||||
@@ -3581,13 +3538,6 @@ help.arg.gateway-name:
|
||||
fr_FR: "Nom de la passerelle"
|
||||
pl_PL: "Nazwa bramy"
|
||||
|
||||
help.arg.gateway-type:
|
||||
en_US: "Type of gateway"
|
||||
de_DE: "Typ des Gateways"
|
||||
es_ES: "Tipo de puerta de enlace"
|
||||
fr_FR: "Type de passerelle"
|
||||
pl_PL: "Typ bramy"
|
||||
|
||||
help.arg.governor-name:
|
||||
en_US: "CPU governor name"
|
||||
de_DE: "CPU-Governor-Name"
|
||||
@@ -4114,13 +4064,6 @@ about.add-version-signer:
|
||||
fr_FR: "Ajouter un signataire de version"
|
||||
pl_PL: "Dodaj sygnatariusza wersji"
|
||||
|
||||
about.add-vhost-passthrough:
|
||||
en_US: "Add vhost passthrough"
|
||||
de_DE: "Vhost-Passthrough hinzufügen"
|
||||
es_ES: "Agregar passthrough de vhost"
|
||||
fr_FR: "Ajouter un passthrough vhost"
|
||||
pl_PL: "Dodaj passthrough vhost"
|
||||
|
||||
about.add-wifi-ssid-password:
|
||||
en_US: "Add wifi ssid and password"
|
||||
de_DE: "WLAN-SSID und Passwort hinzufügen"
|
||||
@@ -4170,13 +4113,6 @@ about.check-for-updates:
|
||||
fr_FR: "Vérifier les mises à jour disponibles"
|
||||
pl_PL: "Sprawdź dostępne aktualizacje"
|
||||
|
||||
about.check-port-reachability:
|
||||
en_US: "Check if a port is reachable from the WAN"
|
||||
de_DE: "Prüfen, ob ein Port vom WAN erreichbar ist"
|
||||
es_ES: "Comprobar si un puerto es accesible desde la WAN"
|
||||
fr_FR: "Vérifier si un port est accessible depuis le WAN"
|
||||
pl_PL: "Sprawdź, czy port jest osiągalny z WAN"
|
||||
|
||||
about.check-update-startos:
|
||||
en_US: "Check a given registry for StartOS updates and update if available"
|
||||
de_DE: "Ein bestimmtes Registry auf StartOS-Updates prüfen und bei Verfügbarkeit aktualisieren"
|
||||
@@ -4275,13 +4211,6 @@ about.commands-authentication:
|
||||
fr_FR: "Commandes liées à l'authentification, comme connexion, déconnexion"
|
||||
pl_PL: "Polecenia związane z uwierzytelnianiem, np. logowanie, wylogowanie"
|
||||
|
||||
about.commands-authorized-keys:
|
||||
en_US: "Commands for managing authorized keys"
|
||||
de_DE: "Befehle zur Verwaltung autorisierter Schlüssel"
|
||||
es_ES: "Comandos para gestionar claves autorizadas"
|
||||
fr_FR: "Commandes pour gérer les clés autorisées"
|
||||
pl_PL: "Polecenia do zarządzania autoryzowanymi kluczami"
|
||||
|
||||
about.commands-backup:
|
||||
en_US: "Commands related to backup creation and backup targets"
|
||||
de_DE: "Befehle zur Backup-Erstellung und Backup-Zielen"
|
||||
@@ -4345,41 +4274,6 @@ about.commands-experimental:
|
||||
fr_FR: "Commandes liées à la configuration d'options expérimentales comme zram et le gouverneur CPU"
|
||||
pl_PL: "Polecenia konfiguracji opcji eksperymentalnych jak zram i regulator CPU"
|
||||
|
||||
about.commands-host-address-domain:
|
||||
en_US: "Commands for managing host address domains"
|
||||
de_DE: "Befehle zur Verwaltung von Host-Adressdomänen"
|
||||
es_ES: "Comandos para gestionar dominios de direcciones del host"
|
||||
fr_FR: "Commandes pour gérer les domaines d'adresses de l'hôte"
|
||||
pl_PL: "Polecenia do zarządzania domenami adresów hosta"
|
||||
|
||||
about.commands-host-addresses:
|
||||
en_US: "Commands for managing host addresses"
|
||||
de_DE: "Befehle zur Verwaltung von Host-Adressen"
|
||||
es_ES: "Comandos para gestionar direcciones del host"
|
||||
fr_FR: "Commandes pour gérer les adresses de l'hôte"
|
||||
pl_PL: "Polecenia do zarządzania adresami hosta"
|
||||
|
||||
about.commands-host-bindings:
|
||||
en_US: "Commands for managing host bindings"
|
||||
de_DE: "Befehle zur Verwaltung von Host-Bindungen"
|
||||
es_ES: "Comandos para gestionar vínculos del host"
|
||||
fr_FR: "Commandes pour gérer les liaisons de l'hôte"
|
||||
pl_PL: "Polecenia do zarządzania powiązaniami hosta"
|
||||
|
||||
about.commands-host-private-domain:
|
||||
en_US: "Commands for managing private domains for a host"
|
||||
de_DE: "Befehle zur Verwaltung privater Domänen für einen Host"
|
||||
es_ES: "Comandos para gestionar dominios privados de un host"
|
||||
fr_FR: "Commandes pour gérer les domaines privés d'un hôte"
|
||||
pl_PL: "Polecenia do zarządzania prywatnymi domenami hosta"
|
||||
|
||||
about.commands-host-public-domain:
|
||||
en_US: "Commands for managing public domains for a host"
|
||||
de_DE: "Befehle zur Verwaltung öffentlicher Domänen für einen Host"
|
||||
es_ES: "Comandos para gestionar dominios públicos de un host"
|
||||
fr_FR: "Commandes pour gérer les domaines publics d'un hôte"
|
||||
pl_PL: "Polecenia do zarządzania publicznymi domenami hosta"
|
||||
|
||||
about.commands-host-system-ui:
|
||||
en_US: "Commands for modifying the host for the system ui"
|
||||
de_DE: "Befehle zum Ändern des Hosts für die System-UI"
|
||||
@@ -4436,13 +4330,6 @@ about.commands-packages:
|
||||
fr_FR: "Commandes liées aux paquets"
|
||||
pl_PL: "Polecenia związane z pakietami"
|
||||
|
||||
about.commands-port-forward:
|
||||
en_US: "Commands for managing port forwards"
|
||||
de_DE: "Befehle zur Verwaltung von Portweiterleitungen"
|
||||
es_ES: "Comandos para gestionar reenvíos de puertos"
|
||||
fr_FR: "Commandes pour gérer les redirections de ports"
|
||||
pl_PL: "Polecenia do zarządzania przekierowaniami portów"
|
||||
|
||||
about.commands-registry:
|
||||
en_US: "Commands related to the registry"
|
||||
de_DE: "Befehle zum Registry"
|
||||
@@ -4457,13 +4344,6 @@ about.commands-registry-db:
|
||||
fr_FR: "Commandes pour interagir avec la base de données, comme dump et apply"
|
||||
pl_PL: "Polecenia interakcji z bazą danych, np. dump i apply"
|
||||
|
||||
about.commands-registry-info:
|
||||
en_US: "View or edit registry information"
|
||||
de_DE: "Registry-Informationen anzeigen oder bearbeiten"
|
||||
es_ES: "Ver o editar información del registro"
|
||||
fr_FR: "Afficher ou modifier les informations du registre"
|
||||
pl_PL: "Wyświetl lub edytuj informacje rejestru"
|
||||
|
||||
about.commands-restore-backup:
|
||||
en_US: "Commands for restoring package(s) from backup"
|
||||
de_DE: "Befehle zum Wiederherstellen von Paketen aus dem Backup"
|
||||
@@ -4506,20 +4386,6 @@ about.commands-tunnel:
|
||||
fr_FR: "Commandes liées à StartTunnel"
|
||||
pl_PL: "Polecenia związane z StartTunnel"
|
||||
|
||||
about.commands-tunnel-update:
|
||||
en_US: "Commands for checking and applying tunnel updates"
|
||||
de_DE: "Befehle zum Prüfen und Anwenden von Tunnel-Updates"
|
||||
es_ES: "Comandos para verificar y aplicar actualizaciones del túnel"
|
||||
fr_FR: "Commandes pour vérifier et appliquer les mises à jour du tunnel"
|
||||
pl_PL: "Polecenia do sprawdzania i stosowania aktualizacji tunelu"
|
||||
|
||||
about.commands-tunnel-web:
|
||||
en_US: "Commands for managing the tunnel web interface"
|
||||
de_DE: "Befehle zur Verwaltung der Tunnel-Weboberfläche"
|
||||
es_ES: "Comandos para gestionar la interfaz web del túnel"
|
||||
fr_FR: "Commandes pour gérer l'interface web du tunnel"
|
||||
pl_PL: "Polecenia do zarządzania interfejsem webowym tunelu"
|
||||
|
||||
about.commands-wifi:
|
||||
en_US: "Commands related to wifi networks i.e. add, connect, delete"
|
||||
de_DE: "Befehle zu WLAN-Netzwerken, z.B. hinzufügen, verbinden, löschen"
|
||||
@@ -4660,13 +4526,6 @@ about.display-s9pk-manifest:
|
||||
fr_FR: "Afficher le manifeste s9pk"
|
||||
pl_PL: "Wyświetl manifest s9pk"
|
||||
|
||||
about.display-s9pk-root-sighash-and-maxsize:
|
||||
en_US: "Display the s9pk root signature hash and max size"
|
||||
de_DE: "Den s9pk-Root-Signaturhash und die maximale Größe anzeigen"
|
||||
es_ES: "Mostrar el hash de firma raíz y el tamaño máximo del s9pk"
|
||||
fr_FR: "Afficher le hachage de signature racine et la taille maximale du s9pk"
|
||||
pl_PL: "Wyświetl hash podpisu głównego i maksymalny rozmiar s9pk"
|
||||
|
||||
about.display-server-metrics:
|
||||
en_US: "Display server metrics"
|
||||
de_DE: "Server-Metriken anzeigen"
|
||||
@@ -4730,20 +4589,6 @@ about.dump-address-resolution-table:
|
||||
fr_FR: "Exporter la table de résolution d'adresses"
|
||||
pl_PL: "Zrzuć tabelę rozpoznawania adresów"
|
||||
|
||||
about.dump-port-forward-table:
|
||||
en_US: "Dump port forward table"
|
||||
de_DE: "Portweiterleitungstabelle ausgeben"
|
||||
es_ES: "Volcar tabla de reenvío de puertos"
|
||||
fr_FR: "Exporter la table de redirection de ports"
|
||||
pl_PL: "Zrzuć tabelę przekierowań portów"
|
||||
|
||||
about.dump-vhost-proxy-table:
|
||||
en_US: "Dump vhost proxy table"
|
||||
de_DE: "Vhost-Proxy-Tabelle ausgeben"
|
||||
es_ES: "Volcar tabla de proxy vhost"
|
||||
fr_FR: "Exporter la table de proxy vhost"
|
||||
pl_PL: "Zrzuć tabelę proxy vhost"
|
||||
|
||||
about.echo-message:
|
||||
en_US: "Echo a message back"
|
||||
de_DE: "Eine Nachricht zurückgeben"
|
||||
@@ -4779,13 +4624,6 @@ about.enable-kiosk-mode:
|
||||
fr_FR: "Activer le mode kiosque"
|
||||
pl_PL: "Włącz tryb kiosku"
|
||||
|
||||
about.enable-or-disable-port-forward:
|
||||
en_US: "Enable or disable a port forward"
|
||||
de_DE: "Portweiterleitung aktivieren oder deaktivieren"
|
||||
es_ES: "Habilitar o deshabilitar un reenvío de puerto"
|
||||
fr_FR: "Activer ou désactiver une redirection de port"
|
||||
pl_PL: "Włącz lub wyłącz przekierowanie portu"
|
||||
|
||||
about.enable-webserver:
|
||||
en_US: "Enable the webserver"
|
||||
de_DE: "Webserver aktivieren"
|
||||
@@ -4877,13 +4715,6 @@ about.get-developer-pubkey:
|
||||
fr_FR: "Obtenir la clé publique du développeur"
|
||||
pl_PL: "Pobierz klucz publiczny dewelopera"
|
||||
|
||||
about.get-device-info:
|
||||
en_US: "Display device information"
|
||||
de_DE: "Geräteinformationen anzeigen"
|
||||
es_ES: "Mostrar información del dispositivo"
|
||||
fr_FR: "Afficher les informations de l'appareil"
|
||||
pl_PL: "Wyświetl informacje o urządzeniu"
|
||||
|
||||
about.get-initialization-progress:
|
||||
en_US: "Get initialization progress"
|
||||
de_DE: "Initialisierungsfortschritt abrufen"
|
||||
@@ -5073,13 +4904,6 @@ about.list-paths-of-package-ingredients:
|
||||
fr_FR: "Lister les chemins des composants du package"
|
||||
pl_PL: "Wyświetl ścieżki składników pakietu"
|
||||
|
||||
about.list-registry-categories:
|
||||
en_US: "List registry categories"
|
||||
de_DE: "Registry-Kategorien auflisten"
|
||||
es_ES: "Listar categorías del registro"
|
||||
fr_FR: "Lister les catégories du registre"
|
||||
pl_PL: "Wyświetl kategorie rejestru"
|
||||
|
||||
about.list-registry-info-packages:
|
||||
en_US: "List registry info and packages"
|
||||
de_DE: "Registry-Informationen und Pakete auflisten"
|
||||
@@ -5108,13 +4932,6 @@ about.list-version-signers:
|
||||
fr_FR: "Lister les signataires de versions"
|
||||
pl_PL: "Wyświetl sygnatariuszy wersji"
|
||||
|
||||
about.list-vhost-passthrough:
|
||||
en_US: "List vhost passthroughs"
|
||||
de_DE: "Vhost-Passthroughs auflisten"
|
||||
es_ES: "Listar passthroughs de vhost"
|
||||
fr_FR: "Lister les passthroughs vhost"
|
||||
pl_PL: "Wyświetl passthrough vhost"
|
||||
|
||||
about.list-wifi-info:
|
||||
en_US: "List wifi information"
|
||||
de_DE: "WLAN-Informationen auflisten"
|
||||
@@ -5164,13 +4981,6 @@ about.manage-query-dns:
|
||||
fr_FR: "Gérer et interroger le DNS"
|
||||
pl_PL: "Zarządzaj i odpytuj DNS"
|
||||
|
||||
about.manage-ssl-certificates:
|
||||
en_US: "Manage SSL certificates"
|
||||
de_DE: "SSL-Zertifikate verwalten"
|
||||
es_ES: "Gestionar certificados SSL"
|
||||
fr_FR: "Gérer les certificats SSL"
|
||||
pl_PL: "Zarządzaj certyfikatami SSL"
|
||||
|
||||
about.manage-ssl-vhost-proxy:
|
||||
en_US: "Manage SSL vhost proxy"
|
||||
de_DE: "SSL-vhost-Proxy verwalten"
|
||||
@@ -5416,13 +5226,6 @@ about.remove-version-signer:
|
||||
fr_FR: "Supprimer le signataire de version"
|
||||
pl_PL: "Usuń sygnatariusza wersji"
|
||||
|
||||
about.remove-vhost-passthrough:
|
||||
en_US: "Remove vhost passthrough"
|
||||
de_DE: "Vhost-Passthrough entfernen"
|
||||
es_ES: "Eliminar passthrough de vhost"
|
||||
fr_FR: "Supprimer un passthrough vhost"
|
||||
pl_PL: "Usuń passthrough vhost"
|
||||
|
||||
about.remove-wifi-network:
|
||||
en_US: "Remove a wifi network"
|
||||
de_DE: "Ein WLAN-Netzwerk entfernen"
|
||||
@@ -5507,13 +5310,6 @@ about.run-service-action:
|
||||
fr_FR: "Exécuter une action de service"
|
||||
pl_PL: "Uruchom akcję usługi"
|
||||
|
||||
about.set-address-enabled-for-binding:
|
||||
en_US: "Set a gateway address enabled for a binding"
|
||||
de_DE: "Gateway-Adresse für eine Bindung aktivieren"
|
||||
es_ES: "Establecer una dirección de gateway habilitada para un vínculo"
|
||||
fr_FR: "Définir une adresse de passerelle activée pour une liaison"
|
||||
pl_PL: "Ustaw adres bramy jako włączony dla powiązania"
|
||||
|
||||
about.set-country:
|
||||
en_US: "Set the country"
|
||||
de_DE: "Das Land festlegen"
|
||||
@@ -5521,13 +5317,6 @@ about.set-country:
|
||||
fr_FR: "Définir le pays"
|
||||
pl_PL: "Ustaw kraj"
|
||||
|
||||
about.set-default-outbound-gateway:
|
||||
en_US: "Set the default outbound gateway"
|
||||
de_DE: "Standard-Ausgangs-Gateway festlegen"
|
||||
es_ES: "Establecer la puerta de enlace de salida predeterminada"
|
||||
fr_FR: "Définir la passerelle sortante par défaut"
|
||||
pl_PL: "Ustaw domyślną bramę wychodzącą"
|
||||
|
||||
about.set-echoip-urls:
|
||||
en_US: "Set the Echo IP service URLs"
|
||||
de_DE: "Die Echo-IP-Dienst-URLs festlegen"
|
||||
@@ -5570,13 +5359,6 @@ about.set-listen-address-for-webserver:
|
||||
fr_FR: "Définir l'adresse d'écoute du serveur web"
|
||||
pl_PL: "Ustaw adres nasłuchiwania serwera internetowego"
|
||||
|
||||
about.set-outbound-gateway-package:
|
||||
en_US: "Set the outbound gateway for a package"
|
||||
de_DE: "Ausgangs-Gateway für ein Paket festlegen"
|
||||
es_ES: "Establecer la puerta de enlace de salida para un paquete"
|
||||
fr_FR: "Définir la passerelle sortante pour un package"
|
||||
pl_PL: "Ustaw bramę wychodzącą dla pakietu"
|
||||
|
||||
about.set-registry-icon:
|
||||
en_US: "Set the registry icon"
|
||||
de_DE: "Das Registry-Symbol festlegen"
|
||||
@@ -5675,13 +5457,6 @@ about.stop-service:
|
||||
fr_FR: "Arrêter un service"
|
||||
pl_PL: "Zatrzymaj usługę"
|
||||
|
||||
about.ssl-generate-certificate:
|
||||
en_US: "Generate an SSL certificate from the system root CA"
|
||||
de_DE: "SSL-Zertifikat von der System-Root-CA generieren"
|
||||
es_ES: "Generar un certificado SSL desde la CA raíz del sistema"
|
||||
fr_FR: "Générer un certificat SSL depuis l'autorité racine du système"
|
||||
pl_PL: "Wygeneruj certyfikat SSL z głównego CA systemu"
|
||||
|
||||
about.teardown-rebuild-containers:
|
||||
en_US: "Teardown and rebuild containers"
|
||||
de_DE: "Container abbauen und neu erstellen"
|
||||
@@ -5752,13 +5527,6 @@ about.update-firmware:
|
||||
fr_FR: "Mettre à jour le firmware"
|
||||
pl_PL: "Zaktualizuj oprogramowanie układowe"
|
||||
|
||||
about.update-port-forward-label:
|
||||
en_US: "Update the label of a port forward"
|
||||
de_DE: "Bezeichnung einer Portweiterleitung aktualisieren"
|
||||
es_ES: "Actualizar la etiqueta de un reenvío de puerto"
|
||||
fr_FR: "Mettre à jour le libellé d'une redirection de port"
|
||||
pl_PL: "Zaktualizuj etykietę przekierowania portu"
|
||||
|
||||
about.view-edit-gateway-configs:
|
||||
en_US: "View and edit gateway configurations"
|
||||
de_DE: "Gateway-Konfigurationen anzeigen und bearbeiten"
|
||||
|
||||
@@ -148,15 +148,6 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
|
||||
.build()
|
||||
.expect(&t!("bins.startd.failed-to-initialize-runtime"));
|
||||
let res = rt.block_on(async {
|
||||
// Periodically wake a worker thread from a non-tokio OS thread to
|
||||
// prevent tokio I/O driver starvation (all workers parked on
|
||||
// condvar with no driver). See tokio-rs/tokio#4730.
|
||||
let rt_handle = tokio::runtime::Handle::current();
|
||||
std::thread::spawn(move || loop {
|
||||
std::thread::sleep(Duration::from_secs(30));
|
||||
rt_handle.spawn(async {});
|
||||
});
|
||||
|
||||
let mut server = WebServer::new(Acceptor::new(WildcardListener::new(80)?), refresher());
|
||||
match inner_main(&mut server, &config).await {
|
||||
Ok(a) => {
|
||||
|
||||
@@ -39,7 +39,7 @@ impl DiagnosticContext {
|
||||
shutdown,
|
||||
disk_guid,
|
||||
error: Arc::new(error.into()),
|
||||
rpc_continuations: RpcContinuations::new(),
|
||||
rpc_continuations: RpcContinuations::new(None),
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -32,7 +32,7 @@ impl InitContext {
|
||||
error: watch::channel(None).0,
|
||||
progress,
|
||||
shutdown,
|
||||
rpc_continuations: RpcContinuations::new(),
|
||||
rpc_continuations: RpcContinuations::new(None),
|
||||
})))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -339,7 +339,7 @@ impl RpcContext {
|
||||
services,
|
||||
cancellable_installs: SyncMutex::new(BTreeMap::new()),
|
||||
metrics_cache,
|
||||
rpc_continuations: RpcContinuations::new(),
|
||||
rpc_continuations: RpcContinuations::new(Some(shutdown.clone())),
|
||||
shutdown,
|
||||
lxc_manager: Arc::new(LxcManager::new()),
|
||||
open_authed_continuations: OpenAuthedContinuations::new(),
|
||||
|
||||
@@ -85,7 +85,7 @@ impl SetupContext {
|
||||
result: OnceCell::new(),
|
||||
disk_guid: OnceCell::new(),
|
||||
shutdown,
|
||||
rpc_continuations: RpcContinuations::new(),
|
||||
rpc_continuations: RpcContinuations::new(None),
|
||||
install_rootfs: SyncMutex::new(None),
|
||||
language: SyncMutex::new(None),
|
||||
keyboard: SyncMutex::new(None),
|
||||
|
||||
@@ -164,13 +164,13 @@ pub struct SubscribeRes {
|
||||
pub guid: Guid,
|
||||
}
|
||||
|
||||
struct DbSubscriber {
|
||||
rev: u64,
|
||||
sub: UnboundedReceiver<Revision>,
|
||||
sync_db: watch::Receiver<u64>,
|
||||
pub(crate) struct DbSubscriber {
|
||||
pub(crate) rev: u64,
|
||||
pub(crate) sub: UnboundedReceiver<Revision>,
|
||||
pub(crate) sync_db: watch::Receiver<u64>,
|
||||
}
|
||||
impl DbSubscriber {
|
||||
async fn recv(&mut self) -> Option<Revision> {
|
||||
pub(crate) async fn recv(&mut self) -> Option<Revision> {
|
||||
loop {
|
||||
tokio::select! {
|
||||
rev = self.sub.recv() => {
|
||||
|
||||
@@ -125,10 +125,10 @@ impl Public {
|
||||
},
|
||||
status_info: ServerStatus {
|
||||
backup_progress: None,
|
||||
updated: false,
|
||||
update_progress: None,
|
||||
shutting_down: false,
|
||||
restarting: false,
|
||||
restart: None,
|
||||
},
|
||||
unread_notification_count: 0,
|
||||
password_hash: account.password.clone(),
|
||||
@@ -220,16 +220,6 @@ pub struct ServerInfo {
|
||||
pub keyboard: Option<KeyboardOptions>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Deserialize, Serialize, TS)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
#[ts(export)]
|
||||
pub enum RestartReason {
|
||||
Mdns,
|
||||
Language,
|
||||
Kiosk,
|
||||
Update,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[model = "Model<Self>"]
|
||||
@@ -374,13 +364,12 @@ pub struct BackupProgress {
|
||||
#[ts(export)]
|
||||
pub struct ServerStatus {
|
||||
pub backup_progress: Option<BTreeMap<PackageId, BackupProgress>>,
|
||||
pub updated: bool,
|
||||
pub update_progress: Option<FullProgress>,
|
||||
#[serde(default)]
|
||||
pub shutting_down: bool,
|
||||
#[serde(default)]
|
||||
pub restarting: bool,
|
||||
#[serde(default)]
|
||||
pub restart: Option<RestartReason>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
|
||||
|
||||
@@ -344,17 +344,12 @@ pub async fn mount_fs<P: AsRef<Path>>(
|
||||
.arg(&blockdev_path)
|
||||
.invoke(ErrorKind::DiskManagement)
|
||||
.await?;
|
||||
// Delete ext2_saved subvolume and defragment after conversion
|
||||
// Defragment after conversion for optimal performance
|
||||
let tmp_mount = datadir.as_ref().join(format!("{name}.convert-tmp"));
|
||||
tokio::fs::create_dir_all(&tmp_mount).await?;
|
||||
BlockDev::new(&blockdev_path)
|
||||
.mount(&tmp_mount, ReadWrite)
|
||||
.await?;
|
||||
Command::new("btrfs")
|
||||
.args(["subvolume", "delete"])
|
||||
.arg(tmp_mount.join("ext2_saved"))
|
||||
.invoke(ErrorKind::DiskManagement)
|
||||
.await?;
|
||||
Command::new("btrfs")
|
||||
.args(["filesystem", "defragment", "-r"])
|
||||
.arg(&tmp_mount)
|
||||
@@ -414,18 +409,6 @@ pub async fn mount_all_fs<P: AsRef<Path>>(
|
||||
/// filesystem type. Returns `None` if probing fails (e.g. LV doesn't exist).
|
||||
#[instrument(skip_all)]
|
||||
pub async fn probe_package_data_fs(guid: &str) -> Result<Option<String>, Error> {
|
||||
// If the target block device is already accessible (e.g. this is the
|
||||
// currently active system VG), probe it directly without any
|
||||
// import/activate/open/cleanup steps.
|
||||
let blockdev_path = if !guid.ends_with("_UNENC") {
|
||||
PathBuf::from(format!("/dev/mapper/{guid}_package-data"))
|
||||
} else {
|
||||
Path::new("/dev").join(guid).join("package-data")
|
||||
};
|
||||
if tokio::fs::metadata(&blockdev_path).await.is_ok() {
|
||||
return detect_filesystem(&blockdev_path).await.map(Some);
|
||||
}
|
||||
|
||||
// Import and activate the VG
|
||||
match Command::new("vgimport")
|
||||
.arg(guid)
|
||||
|
||||
@@ -103,7 +103,7 @@ impl OsPartitionInfo {
|
||||
}
|
||||
}
|
||||
|
||||
const BIOS_BOOT_TYPE_GUID: &str = "21686148-6449-6E6F-744E-656564454649";
|
||||
const BIOS_BOOT_TYPE_GUID: &str = "21686148-6449-6e6f-744e-656564726548";
|
||||
|
||||
/// Find the BIOS boot partition on the same disk as `known_part`.
|
||||
async fn find_bios_boot_partition(known_part: &Path) -> Result<Option<PathBuf>, Error> {
|
||||
|
||||
@@ -7,7 +7,7 @@ use tracing::instrument;
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::public::{RestartReason, ServerInfo};
|
||||
use crate::db::model::public::ServerInfo;
|
||||
use crate::prelude::*;
|
||||
use crate::util::Invoke;
|
||||
|
||||
@@ -272,7 +272,6 @@ pub async fn set_hostname_rpc(
|
||||
}
|
||||
if let Some(hostname) = &hostname {
|
||||
hostname.save(server_info)?;
|
||||
server_info.as_status_info_mut().as_restart_mut().ser(&Some(RestartReason::Mdns))?;
|
||||
}
|
||||
ServerHostnameInfo::load(server_info)
|
||||
})
|
||||
|
||||
@@ -258,7 +258,7 @@ pub async fn init(
|
||||
.arg("journalctl")
|
||||
.invoke(crate::ErrorKind::Journald)
|
||||
.await
|
||||
.ok();
|
||||
.log_err();
|
||||
mount_logs.complete();
|
||||
tokio::io::copy(
|
||||
&mut open_file("/run/startos/init.log").await?,
|
||||
@@ -371,11 +371,11 @@ pub async fn init(
|
||||
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
|
||||
let devices = lshw().await?;
|
||||
let status_info = ServerStatus {
|
||||
updated: false,
|
||||
update_progress: None,
|
||||
backup_progress: None,
|
||||
shutting_down: false,
|
||||
restarting: false,
|
||||
restart: None,
|
||||
};
|
||||
db.mutate(|v| {
|
||||
let server_info = v.as_public_mut().as_server_info_mut();
|
||||
|
||||
@@ -21,6 +21,7 @@ use tracing::instrument;
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::registry::asset::BufferedHttpSource;
|
||||
use crate::db::model::package::{ManifestPreference, PackageStateMatchModelRef};
|
||||
use crate::prelude::*;
|
||||
use crate::progress::{FullProgress, FullProgressTracker, PhasedProgressBar};
|
||||
@@ -285,6 +286,57 @@ pub async fn sideload(
|
||||
Ok(SideloadResponse { upload, progress })
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export)]
|
||||
pub struct SideloadUrlParams {
|
||||
#[ts(type = "string")]
|
||||
url: Url,
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn sideload_url(
|
||||
ctx: RpcContext,
|
||||
SideloadUrlParams { url }: SideloadUrlParams,
|
||||
) -> Result<(), Error> {
|
||||
if !matches!(url.scheme(), "http" | "https") {
|
||||
return Err(Error::new(
|
||||
eyre!("URL scheme must be http or https, got: {}", url.scheme()),
|
||||
ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
|
||||
let progress_tracker = FullProgressTracker::new();
|
||||
let download_progress = progress_tracker.add_phase("Downloading".into(), Some(100));
|
||||
let client = ctx.client.clone();
|
||||
let db = ctx.db.clone();
|
||||
let pt_ref = progress_tracker.clone();
|
||||
|
||||
let download = ctx
|
||||
.services
|
||||
.install(
|
||||
ctx.clone(),
|
||||
|| async move {
|
||||
let source = BufferedHttpSource::new(client, url, download_progress).await?;
|
||||
let key = db.peek().await.into_private().into_developer_key();
|
||||
crate::s9pk::load(source, || Ok(key.de()?.0), Some(&pt_ref)).await
|
||||
},
|
||||
None,
|
||||
None::<Never>,
|
||||
Some(progress_tracker),
|
||||
)
|
||||
.await?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = async { download.await?.await }.await {
|
||||
tracing::error!("Error sideloading package from URL: {e}");
|
||||
tracing::debug!("{e:?}");
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
|
||||
#[group(skip)]
|
||||
#[ts(export)]
|
||||
|
||||
@@ -63,6 +63,7 @@ pub mod init;
|
||||
pub mod install;
|
||||
pub mod logs;
|
||||
pub mod lxc;
|
||||
pub mod mcp;
|
||||
pub mod middleware;
|
||||
pub mod net;
|
||||
pub mod notifications;
|
||||
@@ -441,6 +442,12 @@ pub fn package<C: Context>() -> ParentHandler<C> {
|
||||
.with_metadata("get_session", Value::Bool(true))
|
||||
.no_cli(),
|
||||
)
|
||||
.subcommand(
|
||||
"sideload-url",
|
||||
from_fn_async(install::sideload_url)
|
||||
.with_metadata("sync_db", Value::Bool(true))
|
||||
.no_cli(),
|
||||
)
|
||||
.subcommand(
|
||||
"install",
|
||||
from_fn_async_local(install::cli_install)
|
||||
@@ -572,9 +579,7 @@ pub fn package<C: Context>() -> ParentHandler<C> {
|
||||
)
|
||||
.subcommand(
|
||||
"attach",
|
||||
from_fn_async_local(service::cli_attach)
|
||||
.no_display()
|
||||
.with_about("about.execute-commands-container"),
|
||||
from_fn_async_local(service::cli_attach).no_display(),
|
||||
)
|
||||
.subcommand(
|
||||
"host",
|
||||
|
||||
193
core/src/mcp/ARCHITECTURE.md
Normal file
193
core/src/mcp/ARCHITECTURE.md
Normal file
@@ -0,0 +1,193 @@
|
||||
# MCP Server Architecture
|
||||
|
||||
The Model Context Protocol server embedded in StartOS (`core/src/mcp/`).
|
||||
|
||||
## Transport: Streamable HTTP (MCP 2025-03-26)
|
||||
|
||||
The server implements the **Streamable HTTP** transport from the MCP spec, not the older stdio or SSE-only transports. A single route (`/mcp`) handles all three HTTP methods:
|
||||
|
||||
| Method | Purpose |
|
||||
| ----------- | -------------------------------------------------------------------------------- |
|
||||
| **POST** | JSON-RPC 2.0 requests from client (initialize, tools/call, resources/read, etc.) |
|
||||
| **GET** | Opens an SSE stream for server→client notifications (resource change events) |
|
||||
| **DELETE** | Explicitly ends a session |
|
||||
| **OPTIONS** | CORS preflight |
|
||||
|
||||
A discovery endpoint at `/.well-known/mcp` returns `{"mcp_endpoint":"/mcp"}`.
|
||||
|
||||
## Authentication
|
||||
|
||||
Every HTTP method (POST, GET, DELETE) validates the caller's session cookie via `ValidSessionToken::from_header` before processing. This reuses the same auth infrastructure as the main StartOS web UI — MCP clients must present a valid session cookie obtained through the normal login flow. Unauthenticated requests get a 401.
|
||||
|
||||
## Session Lifecycle
|
||||
|
||||
1. **Create**: Client sends `initialize` via POST. Server generates a UUID session ID, creates an `McpSession` with a bounded mpsc channel (256 messages), and returns the ID in the `Mcp-Session-Id` response header.
|
||||
|
||||
2. **Connect SSE**: Client opens a GET with the session ID header. The server takes the receiver half of the notification channel (`take_notification_rx`) and streams it as SSE events. Only one GET connection per session is allowed (the rx is moved, not cloned).
|
||||
|
||||
3. **Use**: Client sends tool calls, resource reads, subscriptions via POST. All POST requests must include a valid session ID header — the server validates it against the session map before processing.
|
||||
|
||||
4. **Teardown**: Three paths:
|
||||
- Client sends DELETE -> session is removed, subscription tasks are aborted.
|
||||
- SSE stream disconnects -> `CleanupStream`'s `PinnedDrop` impl removes the session.
|
||||
- Session is never connected -> background sweep task (every 30s) removes sessions older than 60s that never had a GET stream attached.
|
||||
|
||||
## Module Structure
|
||||
|
||||
```
|
||||
core/src/mcp/
|
||||
├── mod.rs — HTTP handlers, routing, MCP method dispatch, shell execution, CORS
|
||||
├── protocol.rs — JSON-RPC 2.0 types, MCP request/response structs, error codes
|
||||
├── session.rs — Session map, create/remove/sweep, resource subscriptions with debounce
|
||||
└── tools.rs — Tool registry (89 tools), HashMap<String, ToolEntry> mapping names → RPC methods + schemas
|
||||
```
|
||||
|
||||
## Tool Dispatch
|
||||
|
||||
`tool_registry()` returns a `HashMap<String, ToolEntry>`, each mapping:
|
||||
|
||||
- An MCP tool name (e.g. `"package.start"`)
|
||||
- A JSON Schema for input validation (sent to clients via `tools/list`)
|
||||
- A backing RPC method name (usually identical to the tool name)
|
||||
- Flags: `sync_db` (whether to flush DB sequence after success), `needs_session` (whether to inject `__Auth_session`)
|
||||
|
||||
When `tools/call` arrives:
|
||||
|
||||
1. Look up the tool by name via HashMap O(1) lookup.
|
||||
2. Convert arguments from `serde_json::Value` to `imbl_value::Value`.
|
||||
3. **Special-case**: If `rpc_method` is `"__package_shell__"`, dispatch to `handle_package_shell_exec` directly (no RPC handler). Sets `kill_on_drop(true)` to ensure timed-out processes are terminated.
|
||||
4. Otherwise, optionally inject `__Auth_session` into params, then call `server.handle_command(rpc_method, params)`.
|
||||
5. On success: if `sync_db` is true, flush the DB sequence. Return the result pretty-printed as a text content block.
|
||||
6. On error: return the error as a text content block with `is_error: true`, using `McpResponse::ok` (MCP spec: tool errors are results, not JSON-RPC errors).
|
||||
|
||||
## Shell Execution
|
||||
|
||||
One shell tool bypasses the RPC layer entirely:
|
||||
|
||||
- **`package.shell`** (`__package_shell__`): Resolves the target package's subcontainer via `Service::resolve_subcontainer`, then runs `/bin/sh -c <command>` inside it via `lxc-attach` with `kill_on_drop(true)`. 30s default timeout, 300s max. Host-level shell access (`system.shell`) is intentionally excluded — agents operate within package containers only.
|
||||
|
||||
## Resource Subscriptions
|
||||
|
||||
Four resources are exposed:
|
||||
|
||||
- `startos:///public` — full public DB tree
|
||||
- `startos:///public/serverInfo` — server metadata
|
||||
- `startos:///public/packageData` — installed packages
|
||||
- `startos:///mcp/system-prompt` — curated AI assistant context (text/plain)
|
||||
|
||||
Resource URIs are validated to only allow `/public/**` subtrees and the special `/mcp/system-prompt` path. Attempts to access non-public paths (e.g. `startos:///private/...`) are rejected.
|
||||
|
||||
`resources/read` parses the URI into a `JsonPointer`, calls `ctx.db.dump(&pointer)`, and returns the JSON. The system prompt resource is handled as a special case, returning server info and version.
|
||||
|
||||
`resources/subscribe` creates a `DbSubscriber` that watches the patch-db for changes at the given pointer. Changes are **debounced** (500ms window): the subscriber collects multiple revisions and merges their `DiffPatch`es before sending a single `notifications/resources/updated` notification over the SSE channel. The subscription task runs as a spawned tokio task; its `JoinHandle` is stored in the session so it can be aborted on unsubscribe or session teardown. Re-subscribing to the same URI aborts the prior subscription first.
|
||||
|
||||
## CORS
|
||||
|
||||
- Preflight (OPTIONS): reflects the request's `Origin`, `Access-Control-Request-Method`, and `Access-Control-Request-Headers` back. Sets `Allow-Credentials: true` and caches for 24h.
|
||||
- Normal responses (`apply_cors`): reflects the request's `Origin` header when present, falls back to `*` when absent. Exposes the `Mcp-Session-Id` header. This matches the behavior of the rpc-toolkit `Cors` middleware used by the main UI.
|
||||
- CORS headers are applied to all response types: POST JSON-RPC, GET SSE, DELETE, and error responses.
|
||||
|
||||
## Excluded RPC Methods
|
||||
|
||||
Of the ~195 RPC methods registered in the StartOS backend, 88 are exposed as MCP tools (plus 1 MCP-only tool: `package.shell`). The remaining 105 are excluded for the following reasons.
|
||||
|
||||
### Wrong context — Setup / Init / Diagnostic modes
|
||||
|
||||
These methods belong to the setup wizard, initial install, or diagnostic recovery mode — entirely different server states that are not reachable during normal operation when the MCP server is running.
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `setup.*` (15 methods) | Setup wizard only runs during initial OS configuration |
|
||||
| `init.*` (14 methods) | Initial disk/install flow, not reachable post-boot |
|
||||
| `diagnostic.*` (7 methods) | Diagnostic recovery mode, separate HTTP server |
|
||||
| `flash-os` | Bare-metal OS flashing |
|
||||
|
||||
### Wrong context — CLI / Developer tooling
|
||||
|
||||
These are developer-facing commands invoked via the CLI, not the web UI. They operate on local files or require local filesystem access.
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `s9pk.*` (9 methods) | Package building/inspection — CLI tool for developers |
|
||||
| `util.b3sum` | BLAKE3 checksum utility — CLI helper |
|
||||
| `init-key`, `pubkey` | Key management — CLI operations |
|
||||
|
||||
### Wrong context — Registry administration
|
||||
|
||||
These manage the package registry (a separate server-side component), not the local StartOS instance.
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `registry.*` (20 methods) | Registry server administration, not local device management |
|
||||
|
||||
### Wrong context — Tunnel management
|
||||
|
||||
These configure the Start9 tunnel service, which has its own management interface.
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `tunnel.*` (12 methods) | Tunnel server management, separate from local OS control |
|
||||
|
||||
### Replaced by MCP-native functionality
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `db.subscribe` | Replaced by MCP `resources/subscribe` which calls `ctx.db.dump_and_sub()` directly with 500ms debounce |
|
||||
| `server.metrics.follow` | WebSocket continuation for streaming metrics — use `server.metrics` (polling) instead |
|
||||
|
||||
### Requires middleware injection not available via MCP dispatch
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `package.sideload` | Requires multipart file upload via REST continuation, not JSON-RPC params. Use `package.sideload-by-url` MCP tool (backed by `package.sideload-url` RPC) which accepts a URL instead |
|
||||
|
||||
### Security — host-level shell access excluded
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `system.shell` | Arbitrary host-level command execution is too broad a privilege for MCP agents. Agents can execute commands inside package subcontainers via `package.shell`, which is scoped to the service's filesystem and processes |
|
||||
|
||||
### Auth methods — intentionally excluded
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `auth.login` | MCP clients authenticate via session cookie before reaching the MCP server — login is a prerequisite, not an MCP operation |
|
||||
| `auth.logout` | Logging out the session that the MCP client is using would break the connection. Clients should disconnect (DELETE) instead |
|
||||
|
||||
### Internal / low-value
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `echo` | Debug echo — no agent value |
|
||||
| `git-info` | Build metadata — available via `server.device-info` |
|
||||
| `state` | Returns server state enum — available via DB resources |
|
||||
| `notification.create` | Internal: creates notifications from backend code, not user-facing |
|
||||
| `db.apply` | Bulk DB mutation — CLI-specific params (`apply_receipt`) not suitable for MCP |
|
||||
| `kiosk.set` | Kiosk mode toggle — physical display setting, not agent-relevant |
|
||||
|
||||
### Deep host/binding management — not yet exposed
|
||||
|
||||
These methods manage individual domain bindings and address assignments at a granular level. The list (`server.host.address.list`, `server.host.binding.list`, `package.host.list`) and read operations are exposed; the mutation operations below are deferred until agent workflows demonstrate a need.
|
||||
|
||||
| Method | Reason |
|
||||
|--------|--------|
|
||||
| `server.host.address.domain.public.add` | Granular domain management — deferred |
|
||||
| `server.host.address.domain.public.remove` | Granular domain management — deferred |
|
||||
| `server.host.address.domain.private.add` | Granular domain management — deferred |
|
||||
| `server.host.address.domain.private.remove` | Granular domain management — deferred |
|
||||
| `server.host.binding.set-address-enabled` | Granular binding management — deferred |
|
||||
| `package.host.address.domain.public.add` | Granular domain management — deferred |
|
||||
| `package.host.address.domain.public.remove` | Granular domain management — deferred |
|
||||
| `package.host.address.domain.private.add` | Granular domain management — deferred |
|
||||
| `package.host.address.domain.private.remove` | Granular domain management — deferred |
|
||||
| `package.host.address.list` | Per-package address listing — deferred |
|
||||
| `package.host.binding.list` | Per-package binding listing — deferred |
|
||||
| `package.host.binding.set-address-enabled` | Granular binding management — deferred |
|
||||
| `net.gateway.set-default-outbound` | Gateway default route — deferred |
|
||||
|
||||
## Body Size Limits
|
||||
|
||||
POST request bodies are limited to 1 MiB:
|
||||
|
||||
1. `Content-Length` header is checked **before** reading the body (rejects oversized requests immediately).
|
||||
2. After reading, the actual body size is re-checked as defense-in-depth for chunked transfers that lack `Content-Length`.
|
||||
1087
core/src/mcp/mod.rs
Normal file
1087
core/src/mcp/mod.rs
Normal file
File diff suppressed because it is too large
Load Diff
211
core/src/mcp/protocol.rs
Normal file
211
core/src/mcp/protocol.rs
Normal file
@@ -0,0 +1,211 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value as JsonValue;
|
||||
|
||||
// JSON-RPC 2.0 error codes
|
||||
pub const PARSE_ERROR: i32 = -32700;
|
||||
pub const INVALID_REQUEST: i32 = -32600;
|
||||
pub const METHOD_NOT_FOUND: i32 = -32601;
|
||||
pub const INVALID_PARAMS: i32 = -32602;
|
||||
pub const INTERNAL_ERROR: i32 = -32603;
|
||||
|
||||
pub const PROTOCOL_VERSION: &str = "2025-03-26";
|
||||
|
||||
// === JSON-RPC 2.0 envelope ===
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct McpRequest {
|
||||
pub jsonrpc: String,
|
||||
pub id: Option<JsonValue>,
|
||||
pub method: String,
|
||||
#[serde(default)]
|
||||
pub params: Option<JsonValue>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct McpResponse {
|
||||
pub jsonrpc: &'static str,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<JsonValue>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub result: Option<JsonValue>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<McpError>,
|
||||
}
|
||||
|
||||
impl McpResponse {
|
||||
pub fn ok(id: Option<JsonValue>, result: JsonValue) -> Self {
|
||||
Self {
|
||||
jsonrpc: "2.0",
|
||||
id,
|
||||
result: Some(result),
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn error(id: Option<JsonValue>, code: i32, message: String, data: Option<JsonValue>) -> Self {
|
||||
Self {
|
||||
jsonrpc: "2.0",
|
||||
id,
|
||||
result: None,
|
||||
error: Some(McpError {
|
||||
code,
|
||||
message,
|
||||
data,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct McpError {
|
||||
pub code: i32,
|
||||
pub message: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub data: Option<JsonValue>,
|
||||
}
|
||||
|
||||
// === initialize ===
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct InitializeParams {
|
||||
pub protocol_version: String,
|
||||
#[serde(default)]
|
||||
pub capabilities: JsonValue,
|
||||
#[serde(default)]
|
||||
pub client_info: Option<ClientInfo>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ClientInfo {
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub version: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct InitializeResult {
|
||||
pub protocol_version: &'static str,
|
||||
pub capabilities: ServerCapabilities,
|
||||
pub server_info: ServerInfo,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ServerCapabilities {
|
||||
pub tools: ToolsCapability,
|
||||
pub resources: ResourcesCapability,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ToolsCapability {}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ResourcesCapability {
|
||||
pub subscribe: bool,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ServerInfo {
|
||||
pub name: &'static str,
|
||||
pub version: String,
|
||||
}
|
||||
|
||||
// === tools/list ===
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ToolDefinition {
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
pub input_schema: JsonValue,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ToolsListResult {
|
||||
pub tools: Vec<ToolDefinition>,
|
||||
}
|
||||
|
||||
// === tools/call ===
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ToolsCallParams {
|
||||
pub name: String,
|
||||
#[serde(default)]
|
||||
pub arguments: JsonValue,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ToolsCallResult {
|
||||
pub content: Vec<ContentBlock>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub is_error: Option<bool>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum ContentBlock {
|
||||
#[serde(rename = "text")]
|
||||
Text { text: String },
|
||||
}
|
||||
|
||||
// === resources/list ===
|
||||
|
||||
#[derive(Serialize, Clone)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResourceDefinition {
|
||||
pub uri: String,
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mime_type: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ResourcesListResult {
|
||||
pub resources: Vec<ResourceDefinition>,
|
||||
}
|
||||
|
||||
// === resources/read ===
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ResourcesReadParams {
|
||||
pub uri: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct ResourcesReadResult {
|
||||
pub contents: Vec<ResourceContent>,
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ResourceContent {
|
||||
pub uri: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub mime_type: Option<String>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub text: Option<String>,
|
||||
}
|
||||
|
||||
// === resources/subscribe + unsubscribe ===
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ResourcesSubscribeParams {
|
||||
pub uri: String,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ResourcesUnsubscribeParams {
|
||||
pub uri: String,
|
||||
}
|
||||
|
||||
// === Server→client notification ===
|
||||
|
||||
#[derive(Serialize)]
|
||||
pub struct McpNotification {
|
||||
pub jsonrpc: &'static str,
|
||||
pub method: &'static str,
|
||||
pub params: serde_json::Value,
|
||||
}
|
||||
232
core/src/mcp/session.rs
Normal file
232
core/src/mcp/session.rs
Normal file
@@ -0,0 +1,232 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
|
||||
use patch_db::json_ptr::JsonPointer;
|
||||
use patch_db::DiffPatch;
|
||||
use serde_json::Value as JsonValue;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::task::JoinHandle;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::DbSubscriber;
|
||||
use crate::prelude::*;
|
||||
use crate::util::sync::SyncMutex;
|
||||
|
||||
use super::protocol::McpNotification;
|
||||
|
||||
pub(crate) type SessionMap = Arc<SyncMutex<HashMap<String, McpSession>>>;
|
||||
|
||||
/// Maximum time a session can exist without a GET stream before being cleaned up.
|
||||
const SESSION_STALE_TIMEOUT: Duration = Duration::from_secs(60);
|
||||
|
||||
/// Maximum buffered notifications before backpressure kicks in.
|
||||
const NOTIFICATION_CHANNEL_BOUND: usize = 256;
|
||||
|
||||
pub(crate) struct McpSession {
|
||||
pub notification_tx: mpsc::Sender<JsonValue>,
|
||||
pub notification_rx: Option<mpsc::Receiver<JsonValue>>,
|
||||
pub subscriptions: HashMap<String, JoinHandle<()>>,
|
||||
pub created_at: Instant,
|
||||
}
|
||||
|
||||
pub(crate) fn create_session(sessions: &SessionMap) -> String {
|
||||
let id = Uuid::new_v4().to_string();
|
||||
let (tx, rx) = mpsc::channel(NOTIFICATION_CHANNEL_BOUND);
|
||||
sessions.mutate(|map| {
|
||||
map.insert(
|
||||
id.clone(),
|
||||
McpSession {
|
||||
notification_tx: tx,
|
||||
notification_rx: Some(rx),
|
||||
subscriptions: HashMap::new(),
|
||||
created_at: Instant::now(),
|
||||
},
|
||||
);
|
||||
});
|
||||
id
|
||||
}
|
||||
|
||||
/// Sweep stale sessions. Call this from any frequent code path (POST handler, create_session).
|
||||
pub(crate) fn sweep_stale_sessions_if_needed(sessions: &SessionMap) {
|
||||
sessions.mutate(|map| sweep_stale_sessions(map));
|
||||
}
|
||||
|
||||
/// Remove sessions that were created but never connected a GET stream within the timeout.
|
||||
fn sweep_stale_sessions(map: &mut HashMap<String, McpSession>) {
|
||||
let stale: Vec<String> = map
|
||||
.iter()
|
||||
.filter(|(_, session)| {
|
||||
// Session is stale if rx is still present (no GET connected) and it's old
|
||||
session.notification_rx.is_some()
|
||||
&& session.created_at.elapsed() > SESSION_STALE_TIMEOUT
|
||||
})
|
||||
.map(|(id, _)| id.clone())
|
||||
.collect();
|
||||
for id in stale {
|
||||
tracing::info!(
|
||||
target: "mcp_audit",
|
||||
session_id = %id,
|
||||
"Sweeping stale MCP session (no GET stream connected)"
|
||||
);
|
||||
if let Some(session) = map.remove(&id) {
|
||||
for (_, handle) in session.subscriptions {
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn remove_session(sessions: &SessionMap, id: &str) {
|
||||
sessions.mutate(|map| {
|
||||
if let Some(session) = map.remove(id) {
|
||||
for (_, handle) in session.subscriptions {
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Take the notification receiver from a session (for use by the SSE stream).
|
||||
/// Returns None if the session doesn't exist or the rx was already taken.
|
||||
pub(crate) fn take_notification_rx(
|
||||
sessions: &SessionMap,
|
||||
id: &str,
|
||||
) -> Option<mpsc::Receiver<JsonValue>> {
|
||||
sessions.mutate(|map| map.get_mut(id)?.notification_rx.take())
|
||||
}
|
||||
|
||||
/// Check whether the given session ID exists in the session map.
|
||||
pub(crate) fn session_exists(sessions: &SessionMap, id: &str) -> bool {
|
||||
sessions.peek(|map| map.contains_key(id))
|
||||
}
|
||||
|
||||
/// Parse a `startos:///...` URI into a JsonPointer.
|
||||
fn parse_resource_uri(uri: &str) -> Result<JsonPointer, Error> {
|
||||
let path = uri.strip_prefix("startos://").ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("Invalid resource URI: must start with startos://"),
|
||||
ErrorKind::InvalidRequest,
|
||||
)
|
||||
})?;
|
||||
path.parse::<JsonPointer>()
|
||||
.with_kind(ErrorKind::InvalidRequest)
|
||||
}
|
||||
|
||||
pub(crate) async fn subscribe_resource(
|
||||
ctx: &RpcContext,
|
||||
sessions: &SessionMap,
|
||||
session_id: &str,
|
||||
uri: &str,
|
||||
) -> Result<(), Error> {
|
||||
let pointer = parse_resource_uri(uri)?;
|
||||
|
||||
let (dump, sub) = ctx.db.dump_and_sub(pointer).await;
|
||||
let mut db_sub = DbSubscriber {
|
||||
rev: dump.id,
|
||||
sub,
|
||||
sync_db: ctx.sync_db.subscribe(),
|
||||
};
|
||||
|
||||
let tx = sessions
|
||||
.peek(|map| map.get(session_id).map(|s| s.notification_tx.clone()))
|
||||
.ok_or_else(|| Error::new(eyre!("Session not found"), ErrorKind::InvalidRequest))?;
|
||||
|
||||
let uri_owned = uri.to_string();
|
||||
|
||||
let handle = tokio::spawn(async move {
|
||||
loop {
|
||||
// Wait for first revision
|
||||
let first = match db_sub.recv().await {
|
||||
Some(rev) => rev,
|
||||
None => break,
|
||||
};
|
||||
|
||||
// Debounce: collect more revisions for up to 500ms
|
||||
let mut merged_id = first.id;
|
||||
let mut merged_patch = first.patch;
|
||||
|
||||
let debounce = tokio::time::sleep(Duration::from_millis(500));
|
||||
tokio::pin!(debounce);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = &mut debounce => break,
|
||||
rev = db_sub.recv() => {
|
||||
match rev {
|
||||
Some(rev) => {
|
||||
merged_id = rev.id;
|
||||
merged_patch.append(rev.patch);
|
||||
}
|
||||
None => {
|
||||
// Subscriber closed — send what we have and exit
|
||||
let _ = send_notification(&tx, &uri_owned, merged_id, &merged_patch);
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if send_notification(&tx, &uri_owned, merged_id, &merged_patch).is_err() {
|
||||
break; // SSE stream closed or channel full
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
// Store the task handle, aborting any prior subscription for the same URI
|
||||
sessions.mutate(|map| {
|
||||
if let Some(session) = map.get_mut(session_id) {
|
||||
if let Some(old_handle) = session.subscriptions.remove(uri) {
|
||||
tracing::info!(
|
||||
target: "mcp_audit",
|
||||
uri = %uri,
|
||||
session_id = %session_id,
|
||||
"Aborting prior subscription for re-subscribed URI"
|
||||
);
|
||||
old_handle.abort();
|
||||
}
|
||||
session.subscriptions.insert(uri.to_string(), handle);
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn send_notification(
|
||||
tx: &mpsc::Sender<JsonValue>,
|
||||
uri: &str,
|
||||
id: u64,
|
||||
patch: &DiffPatch,
|
||||
) -> Result<(), ()> {
|
||||
let notification = McpNotification {
|
||||
jsonrpc: "2.0",
|
||||
method: "notifications/resources/updated",
|
||||
params: serde_json::json!({
|
||||
"uri": uri,
|
||||
"revision": {
|
||||
"id": id,
|
||||
"patch": patch,
|
||||
}
|
||||
}),
|
||||
};
|
||||
tx.try_send(serde_json::to_value(¬ification).unwrap_or_default())
|
||||
.map_err(|e| {
|
||||
tracing::warn!(
|
||||
target: "mcp_audit",
|
||||
uri = %uri,
|
||||
"Notification channel full or closed, dropping notification: {e}"
|
||||
);
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) fn unsubscribe_resource(sessions: &SessionMap, session_id: &str, uri: &str) {
|
||||
sessions.mutate(|map| {
|
||||
if let Some(session) = map.get_mut(session_id) {
|
||||
if let Some(handle) = session.subscriptions.remove(uri) {
|
||||
handle.abort();
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
1382
core/src/mcp/tools.rs
Normal file
1382
core/src/mcp/tools.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -117,7 +117,6 @@ pub fn forward_api<C: Context>() -> ParentHandler<C> {
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.with_about("about.dump-port-forward-table")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -241,19 +241,11 @@ pub async fn check_port(
|
||||
.await
|
||||
.map_or(false, |r| r.is_ok());
|
||||
|
||||
let local_ipv4 = ip_info
|
||||
.subnets
|
||||
.iter()
|
||||
.find_map(|s| match s.addr() {
|
||||
IpAddr::V4(v4) => Some(v4),
|
||||
_ => None,
|
||||
})
|
||||
.unwrap_or(Ipv4Addr::UNSPECIFIED);
|
||||
let client = reqwest::Client::builder();
|
||||
#[cfg(target_os = "linux")]
|
||||
let client = client
|
||||
.interface(gateway.as_str())
|
||||
.local_address(IpAddr::V4(local_ipv4));
|
||||
.local_address(IpAddr::V4(Ipv4Addr::UNSPECIFIED));
|
||||
let client = client.build()?;
|
||||
|
||||
let mut res = None;
|
||||
@@ -290,7 +282,12 @@ pub async fn check_port(
|
||||
));
|
||||
};
|
||||
|
||||
let hairpinning = check_hairpin(gateway, local_ipv4, ip, port).await;
|
||||
let hairpinning = tokio::time::timeout(
|
||||
Duration::from_secs(5),
|
||||
tokio::net::TcpStream::connect(SocketAddr::new(ip.into(), port)),
|
||||
)
|
||||
.await
|
||||
.map_or(false, |r| r.is_ok());
|
||||
|
||||
Ok(CheckPortRes {
|
||||
ip,
|
||||
@@ -301,30 +298,6 @@ pub async fn check_port(
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(target_os = "linux")]
|
||||
async fn check_hairpin(gateway: GatewayId, local_ipv4: Ipv4Addr, ip: Ipv4Addr, port: u16) -> bool {
|
||||
let hairpinning = tokio::time::timeout(Duration::from_secs(5), async {
|
||||
let dest = SocketAddr::new(ip.into(), port);
|
||||
let socket = socket2::Socket::new(socket2::Domain::IPV4, socket2::Type::STREAM, None)?;
|
||||
socket.bind_device(Some(gateway.as_str().as_bytes()))?;
|
||||
socket.bind(&SocketAddr::new(IpAddr::V4(local_ipv4), 0).into())?;
|
||||
socket.set_nonblocking(true)?;
|
||||
let socket = unsafe {
|
||||
use std::os::fd::{FromRawFd, IntoRawFd};
|
||||
tokio::net::TcpSocket::from_raw_fd(socket.into_raw_fd())
|
||||
};
|
||||
socket.connect(dest).await.map(|_| ())
|
||||
})
|
||||
.await
|
||||
.map_or(false, |r| r.is_ok());
|
||||
hairpinning
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "linux"))]
|
||||
async fn check_hairpin(_: GatewayId, _: Ipv4Addr, _: Ipv4Addr, _: u16) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
|
||||
#[group(skip)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
@@ -792,10 +765,7 @@ async fn watcher(
|
||||
}
|
||||
changed
|
||||
});
|
||||
gc_policy_routing(&ifaces).await;
|
||||
for result in futures::future::join_all(jobs).await {
|
||||
result.log_err();
|
||||
}
|
||||
futures::future::try_join_all(jobs).await?;
|
||||
|
||||
Ok::<_, Error>(())
|
||||
})
|
||||
@@ -811,16 +781,12 @@ async fn watcher(
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_wan_ipv4(
|
||||
iface: &str,
|
||||
base_url: &Url,
|
||||
local_ipv4: Ipv4Addr,
|
||||
) -> Result<Option<Ipv4Addr>, Error> {
|
||||
async fn get_wan_ipv4(iface: &str, base_url: &Url) -> Result<Option<Ipv4Addr>, Error> {
|
||||
let client = reqwest::Client::builder();
|
||||
#[cfg(target_os = "linux")]
|
||||
let client = client
|
||||
.interface(iface)
|
||||
.local_address(IpAddr::V4(local_ipv4));
|
||||
.local_address(IpAddr::V4(Ipv4Addr::UNSPECIFIED));
|
||||
let url = base_url.join("/ip").with_kind(ErrorKind::ParseUrl)?;
|
||||
let text = client
|
||||
.build()?
|
||||
@@ -838,43 +804,15 @@ async fn get_wan_ipv4(
|
||||
Ok(Some(trimmed.parse()?))
|
||||
}
|
||||
|
||||
struct PolicyRoutingGuard {
|
||||
struct PolicyRoutingCleanup {
|
||||
table_id: u32,
|
||||
iface: String,
|
||||
}
|
||||
|
||||
/// Remove stale per-interface policy-routing state (fwmark rules, routing
|
||||
/// tables, iptables CONNMARK rules) for interfaces that no longer exist.
|
||||
async fn gc_policy_routing(active_ifaces: &BTreeSet<GatewayId>) {
|
||||
let active_tables: BTreeSet<u32> = active_ifaces
|
||||
.iter()
|
||||
.filter_map(|iface| if_nametoindex(iface.as_str()).ok().map(|idx| 1000 + idx))
|
||||
.collect();
|
||||
|
||||
// GC fwmark ip rules at priority 50 and their routing tables.
|
||||
if let Ok(rules) = Command::new("ip")
|
||||
.arg("rule")
|
||||
.arg("show")
|
||||
.invoke(ErrorKind::Network)
|
||||
.await
|
||||
.and_then(|b| String::from_utf8(b).with_kind(ErrorKind::Utf8))
|
||||
{
|
||||
for line in rules.lines() {
|
||||
let line = line.trim();
|
||||
if !line.starts_with("50:") {
|
||||
continue;
|
||||
}
|
||||
let Some(pos) = line.find("lookup ") else {
|
||||
continue;
|
||||
};
|
||||
let token = line[pos + 7..].split_whitespace().next().unwrap_or("");
|
||||
let Ok(table_id) = token.parse::<u32>() else {
|
||||
continue;
|
||||
};
|
||||
if table_id < 1000 || active_tables.contains(&table_id) {
|
||||
continue;
|
||||
}
|
||||
let table_str = table_id.to_string();
|
||||
tracing::debug!("gc_policy_routing: removing stale table {table_id}");
|
||||
impl Drop for PolicyRoutingCleanup {
|
||||
fn drop(&mut self) {
|
||||
let table_str = self.table_id.to_string();
|
||||
let iface = std::mem::take(&mut self.iface);
|
||||
tokio::spawn(async move {
|
||||
Command::new("ip")
|
||||
.arg("rule")
|
||||
.arg("del")
|
||||
@@ -886,7 +824,7 @@ async fn gc_policy_routing(active_ifaces: &BTreeSet<GatewayId>) {
|
||||
.arg("50")
|
||||
.invoke(ErrorKind::Network)
|
||||
.await
|
||||
.ok();
|
||||
.log_err();
|
||||
Command::new("ip")
|
||||
.arg("route")
|
||||
.arg("flush")
|
||||
@@ -894,47 +832,26 @@ async fn gc_policy_routing(active_ifaces: &BTreeSet<GatewayId>) {
|
||||
.arg(&table_str)
|
||||
.invoke(ErrorKind::Network)
|
||||
.await
|
||||
.ok();
|
||||
}
|
||||
}
|
||||
|
||||
// GC iptables CONNMARK set-mark rules for defunct interfaces.
|
||||
if let Ok(rules) = Command::new("iptables")
|
||||
.arg("-t")
|
||||
.arg("mangle")
|
||||
.arg("-S")
|
||||
.arg("PREROUTING")
|
||||
.invoke(ErrorKind::Network)
|
||||
.await
|
||||
.and_then(|b| String::from_utf8(b).with_kind(ErrorKind::Utf8))
|
||||
{
|
||||
// Rules look like:
|
||||
// -A PREROUTING -i wg0 -m conntrack --ctstate NEW -j CONNMARK --set-mark 1005
|
||||
for line in rules.lines() {
|
||||
let parts: Vec<&str> = line.split_whitespace().collect();
|
||||
if parts.first() != Some(&"-A") {
|
||||
continue;
|
||||
}
|
||||
if !parts.contains(&"--set-mark") {
|
||||
continue;
|
||||
}
|
||||
let Some(iface_idx) = parts.iter().position(|&p| p == "-i") else {
|
||||
continue;
|
||||
};
|
||||
let Some(&iface) = parts.get(iface_idx + 1) else {
|
||||
continue;
|
||||
};
|
||||
if active_ifaces.contains(&GatewayId::from(InternedString::intern(iface))) {
|
||||
continue;
|
||||
}
|
||||
tracing::debug!("gc_policy_routing: removing stale iptables rule for {iface}");
|
||||
let mut cmd = Command::new("iptables");
|
||||
cmd.arg("-t").arg("mangle").arg("-D");
|
||||
for &arg in &parts[1..] {
|
||||
cmd.arg(arg);
|
||||
}
|
||||
cmd.invoke(ErrorKind::Network).await.ok();
|
||||
}
|
||||
.log_err();
|
||||
Command::new("iptables")
|
||||
.arg("-t")
|
||||
.arg("mangle")
|
||||
.arg("-D")
|
||||
.arg("PREROUTING")
|
||||
.arg("-i")
|
||||
.arg(&iface)
|
||||
.arg("-m")
|
||||
.arg("conntrack")
|
||||
.arg("--ctstate")
|
||||
.arg("NEW")
|
||||
.arg("-j")
|
||||
.arg("CONNMARK")
|
||||
.arg("--set-mark")
|
||||
.arg(&table_str)
|
||||
.invoke(ErrorKind::Network)
|
||||
.await
|
||||
.log_err();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1066,8 +983,11 @@ async fn watch_ip(
|
||||
} else {
|
||||
None
|
||||
};
|
||||
let policy_guard: Option<PolicyRoutingGuard> =
|
||||
policy_table_id.map(|t| PolicyRoutingGuard { table_id: t });
|
||||
let policy_guard: Option<PolicyRoutingCleanup> =
|
||||
policy_table_id.map(|t| PolicyRoutingCleanup {
|
||||
table_id: t,
|
||||
iface: iface.as_str().to_owned(),
|
||||
});
|
||||
|
||||
loop {
|
||||
until
|
||||
@@ -1094,7 +1014,7 @@ async fn watch_ip(
|
||||
}
|
||||
|
||||
async fn apply_policy_routing(
|
||||
guard: &PolicyRoutingGuard,
|
||||
guard: &PolicyRoutingCleanup,
|
||||
iface: &GatewayId,
|
||||
lan_ip: &OrdSet<IpAddr>,
|
||||
) -> Result<(), Error> {
|
||||
@@ -1147,17 +1067,7 @@ async fn apply_policy_routing(
|
||||
cmd.arg(part);
|
||||
}
|
||||
cmd.arg("table").arg(&table_str);
|
||||
if let Err(e) = cmd.invoke(ErrorKind::Network).await {
|
||||
// Transient interfaces (podman, wg-quick, etc.) may
|
||||
// vanish between reading the main table and replaying
|
||||
// the route — demote to debug to avoid log noise.
|
||||
if e.source.to_string().contains("No such file or directory") {
|
||||
tracing::trace!("ip route replace (transient device): {e}");
|
||||
} else {
|
||||
tracing::error!("{e}");
|
||||
tracing::debug!("{e:?}");
|
||||
}
|
||||
}
|
||||
cmd.invoke(ErrorKind::Network).await.log_err();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1328,7 +1238,7 @@ async fn poll_ip_info(
|
||||
ip4_proxy: &Ip4ConfigProxy<'_>,
|
||||
ip6_proxy: &Ip6ConfigProxy<'_>,
|
||||
dhcp4_proxy: &Option<Dhcp4ConfigProxy<'_>>,
|
||||
policy_guard: &Option<PolicyRoutingGuard>,
|
||||
policy_guard: &Option<PolicyRoutingCleanup>,
|
||||
iface: &GatewayId,
|
||||
echoip_ratelimit_state: &mut BTreeMap<Url, Instant>,
|
||||
db: Option<&TypedPatchDb<Database>>,
|
||||
@@ -1377,18 +1287,57 @@ async fn poll_ip_info(
|
||||
apply_policy_routing(guard, iface, &lan_ip).await?;
|
||||
}
|
||||
|
||||
// Write IP info to the watch immediately so the gateway appears in the
|
||||
// DB without waiting for the (slow) WAN IP fetch. The echoip HTTP
|
||||
// request has a 5-second timeout per URL and is easily cancelled by
|
||||
// D-Bus signals via the Until mechanism, which would prevent the
|
||||
// gateway from ever appearing if we waited.
|
||||
let echoip_urls = if let Some(db) = db {
|
||||
db.peek()
|
||||
.await
|
||||
.as_public()
|
||||
.as_server_info()
|
||||
.as_echoip_urls()
|
||||
.de()
|
||||
.unwrap_or_else(|_| crate::db::model::public::default_echoip_urls())
|
||||
} else {
|
||||
crate::db::model::public::default_echoip_urls()
|
||||
};
|
||||
let mut wan_ip = None;
|
||||
for echoip_url in echoip_urls {
|
||||
if echoip_ratelimit_state
|
||||
.get(&echoip_url)
|
||||
.map_or(true, |i| i.elapsed() > Duration::from_secs(300))
|
||||
&& !subnets.is_empty()
|
||||
&& !matches!(
|
||||
device_type,
|
||||
Some(NetworkInterfaceType::Bridge | NetworkInterfaceType::Loopback)
|
||||
)
|
||||
{
|
||||
match get_wan_ipv4(iface.as_str(), &echoip_url).await {
|
||||
Ok(a) => {
|
||||
wan_ip = a;
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!(
|
||||
"{}",
|
||||
t!(
|
||||
"net.gateway.failed-to-determine-wan-ip",
|
||||
iface = iface.to_string(),
|
||||
error = e.to_string()
|
||||
)
|
||||
);
|
||||
tracing::debug!("{e:?}");
|
||||
}
|
||||
};
|
||||
echoip_ratelimit_state.insert(echoip_url, Instant::now());
|
||||
if wan_ip.is_some() {
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
||||
let mut ip_info = IpInfo {
|
||||
name: name.clone(),
|
||||
scope_id,
|
||||
device_type,
|
||||
subnets: subnets.clone(),
|
||||
subnets,
|
||||
lan_ip,
|
||||
wan_ip: None,
|
||||
wan_ip,
|
||||
ntp_servers,
|
||||
dns_servers,
|
||||
};
|
||||
@@ -1403,7 +1352,7 @@ async fn poll_ip_info(
|
||||
i.ip_info.as_ref().and_then(|i| i.wan_ip),
|
||||
)
|
||||
});
|
||||
ip_info.wan_ip = prev_wan_ip;
|
||||
ip_info.wan_ip = ip_info.wan_ip.or(prev_wan_ip);
|
||||
let ip_info = Arc::new(ip_info);
|
||||
m.insert(
|
||||
iface.clone(),
|
||||
@@ -1418,85 +1367,6 @@ async fn poll_ip_info(
|
||||
.is_none()
|
||||
});
|
||||
|
||||
// Now fetch the WAN IP in a second pass. Even if this is slow or
|
||||
// gets cancelled, the gateway already has valid ip_info above.
|
||||
let echoip_urls = if let Some(db) = db {
|
||||
db.peek()
|
||||
.await
|
||||
.as_public()
|
||||
.as_server_info()
|
||||
.as_echoip_urls()
|
||||
.de()
|
||||
.unwrap_or_else(|_| crate::db::model::public::default_echoip_urls())
|
||||
} else {
|
||||
crate::db::model::public::default_echoip_urls()
|
||||
};
|
||||
let mut wan_ip = None;
|
||||
let mut err = None;
|
||||
for echoip_url in echoip_urls {
|
||||
if echoip_ratelimit_state
|
||||
.get(&echoip_url)
|
||||
.map_or(true, |i| i.elapsed() > Duration::from_secs(300))
|
||||
&& !subnets.is_empty()
|
||||
&& !matches!(
|
||||
device_type,
|
||||
Some(NetworkInterfaceType::Bridge | NetworkInterfaceType::Loopback)
|
||||
)
|
||||
{
|
||||
let local_ipv4 = subnets
|
||||
.iter()
|
||||
.find_map(|s| match s.addr() {
|
||||
IpAddr::V4(v4) => Some(v4),
|
||||
_ => None,
|
||||
})
|
||||
.unwrap_or(Ipv4Addr::UNSPECIFIED);
|
||||
match get_wan_ipv4(iface.as_str(), &echoip_url, local_ipv4).await {
|
||||
Ok(a) => {
|
||||
wan_ip = a;
|
||||
}
|
||||
Err(e) => {
|
||||
err = Some(e);
|
||||
}
|
||||
};
|
||||
echoip_ratelimit_state.insert(echoip_url, Instant::now());
|
||||
if wan_ip.is_some() {
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
||||
if wan_ip.is_none()
|
||||
&& let Some(e) = err
|
||||
{
|
||||
tracing::error!(
|
||||
"{}",
|
||||
t!(
|
||||
"net.gateway.failed-to-determine-wan-ip",
|
||||
iface = iface.to_string(),
|
||||
error = e.to_string()
|
||||
)
|
||||
);
|
||||
tracing::debug!("{e:?}");
|
||||
}
|
||||
|
||||
// Update with WAN IP if we obtained one
|
||||
if wan_ip.is_some() {
|
||||
write_to.send_if_modified(|m: &mut OrdMap<GatewayId, NetworkInterfaceInfo>| {
|
||||
let Some(entry) = m.get_mut(iface) else {
|
||||
return false;
|
||||
};
|
||||
let Some(ref existing_ip) = entry.ip_info else {
|
||||
return false;
|
||||
};
|
||||
if existing_ip.wan_ip == wan_ip {
|
||||
return false;
|
||||
}
|
||||
let mut updated = (**existing_ip).clone();
|
||||
updated.wan_ip = wan_ip;
|
||||
entry.ip_info = Some(Arc::new(updated));
|
||||
true
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1600,12 +1470,22 @@ impl NetworkInterfaceController {
|
||||
) -> Result<(), Error> {
|
||||
tracing::debug!("syncronizing {info:?} to db");
|
||||
|
||||
let wifi_iface = find_wifi_iface()
|
||||
.await
|
||||
.ok()
|
||||
.and_then(|a| a)
|
||||
.map(InternedString::from)
|
||||
.map(GatewayId::from);
|
||||
let mut wifi_iface = info
|
||||
.iter()
|
||||
.find(|(_, info)| {
|
||||
info.ip_info.as_ref().map_or(false, |i| {
|
||||
i.device_type == Some(NetworkInterfaceType::Wireless)
|
||||
})
|
||||
})
|
||||
.map(|(id, _)| id.clone());
|
||||
if wifi_iface.is_none() {
|
||||
wifi_iface = find_wifi_iface()
|
||||
.await
|
||||
.ok()
|
||||
.and_then(|a| a)
|
||||
.map(InternedString::from)
|
||||
.map(GatewayId::from);
|
||||
}
|
||||
|
||||
db.mutate(|db| {
|
||||
let network = db.as_public_mut().as_server_info_mut().as_network_mut();
|
||||
|
||||
@@ -14,7 +14,6 @@ use crate::hostname::ServerHostname;
|
||||
use crate::net::acme::AcmeProvider;
|
||||
use crate::net::gateway::{CheckDnsParams, CheckPortParams, CheckPortRes, check_dns, check_port};
|
||||
use crate::net::host::{HostApiKind, all_hosts};
|
||||
use crate::net::service_interface::HostnameMetadata;
|
||||
use crate::prelude::*;
|
||||
use crate::util::serde::{HandlerExtSerde, display_serializable};
|
||||
|
||||
@@ -109,7 +108,6 @@ pub fn address_api<C: Context, Kind: HostApiKind>()
|
||||
.with_about("about.remove-public-domain-from-host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.with_about("about.commands-host-public-domain")
|
||||
.with_inherited(|_, a| a),
|
||||
)
|
||||
.subcommand(
|
||||
@@ -133,10 +131,8 @@ pub fn address_api<C: Context, Kind: HostApiKind>()
|
||||
.with_about("about.remove-private-domain-from-host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.with_about("about.commands-host-private-domain")
|
||||
.with_inherited(|_, a| a),
|
||||
)
|
||||
.with_about("about.commands-host-address-domain")
|
||||
.with_inherited(Kind::inheritance),
|
||||
)
|
||||
.subcommand(
|
||||
@@ -247,50 +243,8 @@ pub async fn add_public_domain<Kind: HostApiKind>(
|
||||
.and_then(|a| a.port)
|
||||
.ok_or_else(|| Error::new(eyre!("no public address found for {fqdn} on port {internal_port}"), ErrorKind::NotFound))?;
|
||||
|
||||
// On the target binding, enable the WAN IPv4 and all
|
||||
// public domains on the same gateway+port (no SNI without SSL).
|
||||
// Disable the domain on all other bindings
|
||||
host.as_bindings_mut().mutate(|b| {
|
||||
if let Some(bind) = b.get_mut(&internal_port) {
|
||||
let non_ssl_port = bind.addresses.available.iter().find_map(|a| {
|
||||
if a.ssl || !a.public || a.hostname != fqdn {
|
||||
return None;
|
||||
}
|
||||
if let HostnameMetadata::PublicDomain { gateway: gw } = &a.metadata {
|
||||
if *gw == gateway {
|
||||
return a.port;
|
||||
}
|
||||
}
|
||||
None
|
||||
});
|
||||
if let Some(dp) = non_ssl_port {
|
||||
for a in &bind.addresses.available {
|
||||
if a.ssl || !a.public {
|
||||
continue;
|
||||
}
|
||||
if let HostnameMetadata::Ipv4 { gateway: gw } = &a.metadata {
|
||||
if *gw == gateway {
|
||||
if let Some(sa) = a.to_socket_addr() {
|
||||
if sa.port() == dp {
|
||||
bind.addresses.enabled.insert(sa);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
for a in &bind.addresses.available {
|
||||
if a.ssl {
|
||||
continue;
|
||||
}
|
||||
if let HostnameMetadata::PublicDomain { gateway: gw } = &a.metadata {
|
||||
if *gw == gateway && a.port == Some(dp) {
|
||||
bind.addresses.disabled.remove(&(a.hostname.clone(), dp));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Disable the domain on all other bindings
|
||||
for (&port, bind) in b.iter_mut() {
|
||||
if port == internal_port {
|
||||
continue;
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::context::{CliContext, RpcContext};
|
||||
use crate::db::prelude::Map;
|
||||
use crate::net::forward::AvailablePorts;
|
||||
use crate::net::host::HostApiKind;
|
||||
use crate::net::service_interface::{HostnameInfo, HostnameMetadata};
|
||||
use crate::net::service_interface::HostnameInfo;
|
||||
use crate::net::vhost::AlpnInfo;
|
||||
use crate::prelude::*;
|
||||
use crate::util::FromStrParser;
|
||||
@@ -344,41 +344,6 @@ pub async fn set_address_enabled<Kind: HostApiKind>(
|
||||
} else {
|
||||
bind.addresses.enabled.remove(&sa);
|
||||
}
|
||||
// Non-SSL Ipv4: cascade to PublicDomains on same gateway
|
||||
if !address.ssl {
|
||||
if let HostnameMetadata::Ipv4 { gateway } =
|
||||
&address.metadata
|
||||
{
|
||||
let port = sa.port();
|
||||
for a in &bind.addresses.available {
|
||||
if a.ssl {
|
||||
continue;
|
||||
}
|
||||
if let HostnameMetadata::PublicDomain {
|
||||
gateway: gw,
|
||||
} = &a.metadata
|
||||
{
|
||||
if gw == gateway
|
||||
&& a.port.unwrap_or(80) == port
|
||||
{
|
||||
let k = (
|
||||
a.hostname.clone(),
|
||||
a.port.unwrap_or(80),
|
||||
);
|
||||
if enabled {
|
||||
bind.addresses
|
||||
.disabled
|
||||
.remove(&k);
|
||||
} else {
|
||||
bind.addresses
|
||||
.disabled
|
||||
.insert(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Domains and private IPs: toggle via (host, port) in `disabled` set
|
||||
let port = address.port.unwrap_or(if address.ssl { 443 } else { 80 });
|
||||
@@ -388,61 +353,6 @@ pub async fn set_address_enabled<Kind: HostApiKind>(
|
||||
} else {
|
||||
bind.addresses.disabled.insert(key);
|
||||
}
|
||||
// Non-SSL PublicDomain: cascade to Ipv4 + other PublicDomains on same gateway
|
||||
if !address.ssl {
|
||||
if let HostnameMetadata::PublicDomain { gateway } =
|
||||
&address.metadata
|
||||
{
|
||||
for a in &bind.addresses.available {
|
||||
if a.ssl {
|
||||
continue;
|
||||
}
|
||||
match &a.metadata {
|
||||
HostnameMetadata::Ipv4 { gateway: gw }
|
||||
if a.public
|
||||
&& gw == gateway =>
|
||||
{
|
||||
if let Some(sa) =
|
||||
a.to_socket_addr()
|
||||
{
|
||||
if sa.port() == port {
|
||||
if enabled {
|
||||
bind.addresses
|
||||
.enabled
|
||||
.insert(sa);
|
||||
} else {
|
||||
bind.addresses
|
||||
.enabled
|
||||
.remove(&sa);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
HostnameMetadata::PublicDomain {
|
||||
gateway: gw,
|
||||
} if gw == gateway => {
|
||||
let dp = a.port.unwrap_or(80);
|
||||
if dp == port {
|
||||
let k = (
|
||||
a.hostname.clone(),
|
||||
dp,
|
||||
);
|
||||
if enabled {
|
||||
bind.addresses
|
||||
.disabled
|
||||
.remove(&k);
|
||||
} else {
|
||||
bind.addresses
|
||||
.disabled
|
||||
.insert(k);
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
|
||||
@@ -521,27 +521,18 @@ pub fn host_api<C: Context>() -> ParentHandler<C, RequiresPackageId> {
|
||||
.subcommand(
|
||||
"address",
|
||||
address_api::<C, ForPackage>()
|
||||
.with_inherited(|RequiresPackageId { package }, _| package)
|
||||
.with_about("about.commands-host-addresses"),
|
||||
.with_inherited(|RequiresPackageId { package }, _| package),
|
||||
)
|
||||
.subcommand(
|
||||
"binding",
|
||||
binding::<C, ForPackage>()
|
||||
.with_inherited(|RequiresPackageId { package }, _| package)
|
||||
.with_about("about.commands-host-bindings"),
|
||||
binding::<C, ForPackage>().with_inherited(|RequiresPackageId { package }, _| package),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn server_host_api<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::<C>::new()
|
||||
.subcommand(
|
||||
"address",
|
||||
address_api::<C, ForServer>().with_about("about.commands-host-addresses"),
|
||||
)
|
||||
.subcommand(
|
||||
"binding",
|
||||
binding::<C, ForServer>().with_about("about.commands-host-bindings"),
|
||||
)
|
||||
.subcommand("address", address_api::<C, ForServer>())
|
||||
.subcommand("binding", binding::<C, ForServer>())
|
||||
}
|
||||
|
||||
pub async fn list_hosts(
|
||||
|
||||
@@ -42,10 +42,6 @@ pub fn net_api<C: Context>() -> ParentHandler<C> {
|
||||
"tunnel",
|
||||
tunnel::tunnel_api::<C>().with_about("about.manage-tunnels"),
|
||||
)
|
||||
.subcommand(
|
||||
"ssl",
|
||||
ssl::ssl_api::<C>().with_about("about.manage-ssl-certificates"),
|
||||
)
|
||||
.subcommand(
|
||||
"vhost",
|
||||
vhost::vhost_api::<C>().with_about("about.manage-ssl-vhost-proxy"),
|
||||
|
||||
@@ -5,7 +5,6 @@ use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
|
||||
use clap::Parser;
|
||||
use futures::FutureExt;
|
||||
use imbl_value::InternedString;
|
||||
use libc::time_t;
|
||||
@@ -22,19 +21,16 @@ use openssl::x509::extension::{
|
||||
use openssl::x509::{X509, X509Builder, X509NameBuilder, X509Ref};
|
||||
use openssl::*;
|
||||
use patch_db::HasModel;
|
||||
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio_rustls::rustls::ServerConfig;
|
||||
use tokio_rustls::rustls::crypto::CryptoProvider;
|
||||
use tokio_rustls::rustls::pki_types::{PrivateKeyDer, PrivatePkcs8KeyDer};
|
||||
use tokio_rustls::rustls::server::ClientHello;
|
||||
use tracing::instrument;
|
||||
use ts_rs::TS;
|
||||
use visit_rs::Visit;
|
||||
|
||||
use crate::SOURCE_DATE;
|
||||
use crate::account::AccountInfo;
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::db::model::Database;
|
||||
use crate::db::{DbAccess, DbAccessMut};
|
||||
use crate::hostname::ServerHostname;
|
||||
@@ -43,7 +39,7 @@ use crate::net::gateway::GatewayInfo;
|
||||
use crate::net::tls::{TlsHandler, TlsHandlerAction};
|
||||
use crate::net::web_server::{Accept, ExtractVisitor, TcpMetadata, extract};
|
||||
use crate::prelude::*;
|
||||
use crate::util::serde::{HandlerExtSerde, Pem};
|
||||
use crate::util::serde::Pem;
|
||||
|
||||
pub fn should_use_cert(cert: &X509Ref) -> Result<bool, ErrorStack> {
|
||||
Ok(cert
|
||||
@@ -596,85 +592,6 @@ pub fn make_self_signed(applicant: (&PKey<Private>, &SANInfo)) -> Result<X509, E
|
||||
Ok(cert)
|
||||
}
|
||||
|
||||
pub fn ssl_api<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::new().subcommand(
|
||||
"generate-certificate",
|
||||
from_fn_async(generate_certificate)
|
||||
.with_display_serializable()
|
||||
.with_custom_display_fn(|_, res: GenerateCertificateResponse| {
|
||||
println!("Private Key:");
|
||||
print!("{}", res.key);
|
||||
println!("\nCertificate Chain:");
|
||||
print!("{}", res.fullchain);
|
||||
Ok(())
|
||||
})
|
||||
.with_about("about.ssl-generate-certificate")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[command(rename_all = "kebab-case")]
|
||||
#[group(skip)]
|
||||
#[ts(export)]
|
||||
pub struct GenerateCertificateParams {
|
||||
#[arg(help = "help.arg.hostnames")]
|
||||
pub hostnames: Vec<String>,
|
||||
#[arg(long, help = "help.arg.ed25519")]
|
||||
#[serde(default)]
|
||||
pub ed25519: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export)]
|
||||
pub struct GenerateCertificateResponse {
|
||||
pub key: String,
|
||||
pub fullchain: String,
|
||||
}
|
||||
|
||||
pub async fn generate_certificate(
|
||||
ctx: RpcContext,
|
||||
GenerateCertificateParams { hostnames, ed25519 }: GenerateCertificateParams,
|
||||
) -> Result<GenerateCertificateResponse, Error> {
|
||||
let peek = ctx.db.peek().await;
|
||||
let cert_store = peek.as_private().as_key_store().as_local_certs();
|
||||
let int_key = cert_store.as_int_key().de()?.0;
|
||||
let int_cert = cert_store.as_int_cert().de()?.0;
|
||||
let root_cert = cert_store.as_root_cert().de()?.0;
|
||||
drop(peek);
|
||||
|
||||
let hostnames: BTreeSet<InternedString> = hostnames.into_iter().map(InternedString::from).collect();
|
||||
let san_info = SANInfo::new(&hostnames);
|
||||
|
||||
let (key, cert) = if ed25519 {
|
||||
let key = PKey::generate_ed25519()?;
|
||||
let cert = make_leaf_cert((&int_key, &int_cert), (&key, &san_info))?;
|
||||
(key, cert)
|
||||
} else {
|
||||
let key = gen_nistp256()?;
|
||||
let cert = make_leaf_cert((&int_key, &int_cert), (&key, &san_info))?;
|
||||
(key, cert)
|
||||
};
|
||||
|
||||
let key_pem =
|
||||
String::from_utf8(key.private_key_to_pem_pkcs8()?).with_kind(ErrorKind::Utf8)?;
|
||||
let fullchain_pem = String::from_utf8(
|
||||
[&cert, &int_cert, &root_cert]
|
||||
.into_iter()
|
||||
.map(|c| c.to_pem())
|
||||
.collect::<Result<Vec<_>, _>>()?
|
||||
.concat(),
|
||||
)
|
||||
.with_kind(ErrorKind::Utf8)?;
|
||||
|
||||
Ok(GenerateCertificateResponse {
|
||||
key: key_pem,
|
||||
fullchain: fullchain_pem,
|
||||
})
|
||||
}
|
||||
|
||||
pub struct RootCaTlsHandler<M: HasModel> {
|
||||
pub db: TypedPatchDb<M>,
|
||||
pub crypto_provider: Arc<CryptoProvider>,
|
||||
|
||||
@@ -100,6 +100,17 @@ impl UiContext for RpcContext {
|
||||
})
|
||||
})
|
||||
.nest("/s9pk", s9pk_router(self.clone()))
|
||||
.route("/mcp", crate::mcp::mcp_router(self.clone()))
|
||||
.route(
|
||||
"/.well-known/mcp",
|
||||
get(|| async {
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.body(Body::from(r#"{"mcp_endpoint":"/mcp"}"#))
|
||||
.unwrap()
|
||||
}),
|
||||
)
|
||||
.route(
|
||||
"/static/local-root-ca.crt",
|
||||
get(move || {
|
||||
|
||||
@@ -126,28 +126,24 @@ pub fn vhost_api<C: Context>() -> ParentHandler<C> {
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.with_about("about.dump-vhost-proxy-table")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"add-passthrough",
|
||||
from_fn_async(add_passthrough)
|
||||
.no_display()
|
||||
.with_about("about.add-vhost-passthrough")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"remove-passthrough",
|
||||
from_fn_async(remove_passthrough)
|
||||
.no_display()
|
||||
.with_about("about.remove-vhost-passthrough")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"list-passthrough",
|
||||
from_fn(list_passthrough)
|
||||
.with_display_serializable()
|
||||
.with_about("about.list-vhost-passthrough")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -180,14 +180,19 @@ pub async fn set_enabled(
|
||||
.invoke(ErrorKind::Wifi)
|
||||
.await?;
|
||||
}
|
||||
let iface = if let Some(man) = ctx.wifi_manager.read().await.as_ref().filter(|_| enabled) {
|
||||
Some(man.interface.clone())
|
||||
} else {
|
||||
None
|
||||
};
|
||||
ctx.db
|
||||
.mutate(|d| {
|
||||
d.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_network_mut()
|
||||
.as_wifi_mut()
|
||||
.as_enabled_mut()
|
||||
.ser(&enabled)
|
||||
.as_interface_mut()
|
||||
.ser(&iface)
|
||||
})
|
||||
.await
|
||||
.result?;
|
||||
|
||||
@@ -123,7 +123,7 @@ impl RegistryContext {
|
||||
.tor_proxy
|
||||
.clone()
|
||||
.map(Ok)
|
||||
.unwrap_or_else(|| "socks5h://tor.startos:9050".parse())?;
|
||||
.unwrap_or_else(|| "socks5h://localhost:9050".parse())?;
|
||||
let pool: Option<PgPool> = match &config.pg_connection_url {
|
||||
Some(url) => match PgPool::connect(url.as_str()).await {
|
||||
Ok(pool) => Some(pool),
|
||||
@@ -142,7 +142,7 @@ impl RegistryContext {
|
||||
listen: config.registry_listen.unwrap_or(DEFAULT_REGISTRY_LISTEN),
|
||||
db,
|
||||
datadir,
|
||||
rpc_continuations: RpcContinuations::new(),
|
||||
rpc_continuations: RpcContinuations::new(None),
|
||||
client: Client::builder()
|
||||
.proxy(Proxy::custom(move |url| {
|
||||
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {
|
||||
|
||||
@@ -79,10 +79,7 @@ pub fn registry_api<C: Context>() -> ParentHandler<C> {
|
||||
.with_about("about.list-registry-info-packages")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"info",
|
||||
info::info_api::<C>().with_about("about.commands-registry-info"),
|
||||
)
|
||||
.subcommand("info", info::info_api::<C>())
|
||||
// set info and categories
|
||||
.subcommand(
|
||||
"os",
|
||||
|
||||
@@ -56,7 +56,6 @@ pub fn category_api<C: Context>() -> ParentHandler<C> {
|
||||
.with_custom_display_fn(|params, categories| {
|
||||
display_categories(params.params, categories)
|
||||
})
|
||||
.with_about("about.list-registry-categories")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
|
||||
@@ -615,7 +615,6 @@ fn check_matching_info_short() {
|
||||
sdk_version: None,
|
||||
hardware_acceleration: false,
|
||||
plugins: BTreeSet::new(),
|
||||
satisfies: BTreeSet::new(),
|
||||
},
|
||||
icon: DataUrl::from_vec("image/png", vec![]),
|
||||
dependency_metadata: BTreeMap::new(),
|
||||
|
||||
@@ -110,8 +110,6 @@ pub struct PackageMetadata {
|
||||
pub hardware_acceleration: bool,
|
||||
#[serde(default)]
|
||||
pub plugins: BTreeSet<PluginId>,
|
||||
#[serde(default)]
|
||||
pub satisfies: BTreeSet<VersionString>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
|
||||
|
||||
@@ -17,6 +17,7 @@ use ts_rs::TS;
|
||||
|
||||
#[allow(unused_imports)]
|
||||
use crate::prelude::*;
|
||||
use crate::shutdown::Shutdown;
|
||||
use crate::util::future::TimedResource;
|
||||
use crate::util::net::WebSocket;
|
||||
use crate::util::{FromStrParser, new_guid};
|
||||
@@ -98,12 +99,15 @@ pub type RestHandler = Box<dyn FnOnce(Request) -> RestFuture + Send>;
|
||||
|
||||
pub struct WebSocketFuture {
|
||||
kill: Option<broadcast::Receiver<()>>,
|
||||
shutdown: Option<broadcast::Receiver<Option<Shutdown>>>,
|
||||
fut: BoxFuture<'static, ()>,
|
||||
}
|
||||
impl Future for WebSocketFuture {
|
||||
type Output = ();
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
if self.kill.as_ref().map_or(false, |k| !k.is_empty()) {
|
||||
if self.kill.as_ref().map_or(false, |k| !k.is_empty())
|
||||
|| self.shutdown.as_ref().map_or(false, |s| !s.is_empty())
|
||||
{
|
||||
Poll::Ready(())
|
||||
} else {
|
||||
self.fut.poll_unpin(cx)
|
||||
@@ -138,6 +142,7 @@ impl RpcContinuation {
|
||||
RpcContinuation::WebSocket(TimedResource::new(
|
||||
Box::new(|ws| WebSocketFuture {
|
||||
kill: None,
|
||||
shutdown: None,
|
||||
fut: handler(ws.into()).boxed(),
|
||||
}),
|
||||
timeout,
|
||||
@@ -170,6 +175,7 @@ impl RpcContinuation {
|
||||
RpcContinuation::WebSocket(TimedResource::new(
|
||||
Box::new(|ws| WebSocketFuture {
|
||||
kill,
|
||||
shutdown: None,
|
||||
fut: handler(ws.into()).boxed(),
|
||||
}),
|
||||
timeout,
|
||||
@@ -183,15 +189,21 @@ impl RpcContinuation {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RpcContinuations(AsyncMutex<BTreeMap<Guid, RpcContinuation>>);
|
||||
pub struct RpcContinuations {
|
||||
continuations: AsyncMutex<BTreeMap<Guid, RpcContinuation>>,
|
||||
shutdown: Option<broadcast::Sender<Option<Shutdown>>>,
|
||||
}
|
||||
impl RpcContinuations {
|
||||
pub fn new() -> Self {
|
||||
RpcContinuations(AsyncMutex::new(BTreeMap::new()))
|
||||
pub fn new(shutdown: Option<broadcast::Sender<Option<Shutdown>>>) -> Self {
|
||||
RpcContinuations {
|
||||
continuations: AsyncMutex::new(BTreeMap::new()),
|
||||
shutdown,
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn clean(&self) {
|
||||
let mut continuations = self.0.lock().await;
|
||||
let mut continuations = self.continuations.lock().await;
|
||||
let mut to_remove = Vec::new();
|
||||
for (guid, cont) in &*continuations {
|
||||
if cont.is_timed_out() {
|
||||
@@ -206,23 +218,28 @@ impl RpcContinuations {
|
||||
#[instrument(skip_all)]
|
||||
pub async fn add(&self, guid: Guid, handler: RpcContinuation) {
|
||||
self.clean().await;
|
||||
self.0.lock().await.insert(guid, handler);
|
||||
self.continuations.lock().await.insert(guid, handler);
|
||||
}
|
||||
|
||||
pub async fn get_ws_handler(&self, guid: &Guid) -> Option<WebSocketHandler> {
|
||||
let mut continuations = self.0.lock().await;
|
||||
let mut continuations = self.continuations.lock().await;
|
||||
if !matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) {
|
||||
return None;
|
||||
}
|
||||
let Some(RpcContinuation::WebSocket(x)) = continuations.remove(guid) else {
|
||||
return None;
|
||||
};
|
||||
x.get().await
|
||||
let handler = x.get().await?;
|
||||
let shutdown = self.shutdown.as_ref().map(|s| s.subscribe());
|
||||
Some(Box::new(move |ws| {
|
||||
let mut fut = handler(ws);
|
||||
fut.shutdown = shutdown;
|
||||
fut
|
||||
}))
|
||||
}
|
||||
|
||||
pub async fn get_rest_handler(&self, guid: &Guid) -> Option<RestHandler> {
|
||||
let mut continuations: tokio::sync::MutexGuard<'_, BTreeMap<Guid, RpcContinuation>> =
|
||||
self.0.lock().await;
|
||||
let mut continuations = self.continuations.lock().await;
|
||||
if !matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) {
|
||||
return None;
|
||||
}
|
||||
|
||||
@@ -197,6 +197,7 @@ impl TryFrom<ManifestV1> for Manifest {
|
||||
Ok(Self {
|
||||
id: value.id,
|
||||
version: version.into(),
|
||||
satisfies: BTreeSet::new(),
|
||||
can_migrate_from: VersionRange::any(),
|
||||
can_migrate_to: VersionRange::none(),
|
||||
metadata: PackageMetadata {
|
||||
@@ -218,7 +219,6 @@ impl TryFrom<ManifestV1> for Manifest {
|
||||
PackageProcedure::Script(_) => false,
|
||||
},
|
||||
plugins: BTreeSet::new(),
|
||||
satisfies: BTreeSet::new(),
|
||||
},
|
||||
images: BTreeMap::new(),
|
||||
volumes: value
|
||||
|
||||
@@ -32,6 +32,7 @@ pub(crate) fn current_version() -> Version {
|
||||
pub struct Manifest {
|
||||
pub id: PackageId,
|
||||
pub version: VersionString,
|
||||
pub satisfies: BTreeSet<VersionString>,
|
||||
#[ts(type = "string")]
|
||||
pub can_migrate_to: VersionRange,
|
||||
#[ts(type = "string")]
|
||||
|
||||
@@ -358,7 +358,7 @@ pub async fn check_dependencies(
|
||||
};
|
||||
let manifest = package.as_state_info().as_manifest(ManifestPreference::New);
|
||||
let installed_version = manifest.as_version().de()?.into_version();
|
||||
let satisfies = manifest.as_metadata().as_satisfies().de()?;
|
||||
let satisfies = manifest.as_satisfies().de()?;
|
||||
let installed_version = Some(installed_version.clone().into());
|
||||
let is_running = package
|
||||
.as_status_info()
|
||||
|
||||
@@ -134,7 +134,8 @@ pub async fn list_service_interfaces(
|
||||
.expect("valid json pointer");
|
||||
let mut watch = context.seed.ctx.db.watch(ptr).await;
|
||||
|
||||
let res = from_value(watch.peek_and_mark_seen()?)?;
|
||||
let res = imbl_value::from_value(watch.peek_and_mark_seen()?)
|
||||
.unwrap_or_default();
|
||||
|
||||
if let Some(callback) = callback {
|
||||
let callback = callback.register(&context.seed.persistent_container);
|
||||
@@ -173,7 +174,9 @@ pub async fn clear_service_interfaces(
|
||||
.as_idx_mut(&package_id)
|
||||
.or_not_found(&package_id)?
|
||||
.as_service_interfaces_mut()
|
||||
.mutate(|s| Ok(s.retain(|id, _| except.contains(id))))
|
||||
.mutate(|s| {
|
||||
Ok(s.retain(|id, _| except.contains(id)))
|
||||
})
|
||||
})
|
||||
.await
|
||||
.result?;
|
||||
|
||||
@@ -269,13 +269,6 @@ impl ExecParams {
|
||||
|
||||
std::os::unix::fs::chroot(chroot)
|
||||
.with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("chroot {chroot:?}")))?;
|
||||
if let Ok(uid) = uid {
|
||||
if uid != 0 {
|
||||
std::os::unix::fs::chown("/proc/self/fd/0", Some(uid), gid.ok()).ok();
|
||||
std::os::unix::fs::chown("/proc/self/fd/1", Some(uid), gid.ok()).ok();
|
||||
std::os::unix::fs::chown("/proc/self/fd/2", Some(uid), gid.ok()).ok();
|
||||
}
|
||||
}
|
||||
// Handle credential changes in pre_exec to control the order:
|
||||
// setgroups must happen before setgid/setuid (requires CAP_SETGID)
|
||||
{
|
||||
@@ -283,16 +276,10 @@ impl ExecParams {
|
||||
let set_gid = gid.ok();
|
||||
unsafe {
|
||||
cmd.pre_exec(move || {
|
||||
// Create a new session so entrypoint scripts that do
|
||||
// Create a new process group so entrypoint scripts that do
|
||||
// kill(0, SIGTERM) don't cascade to other subcontainers.
|
||||
// EPERM means we're already a session leader (e.g. pty_process
|
||||
// called setsid() for us), which is fine.
|
||||
match nix::unistd::setsid() {
|
||||
Ok(_) | Err(Errno::EPERM) => {}
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::from_raw_os_error(e as i32));
|
||||
}
|
||||
}
|
||||
nix::unistd::setsid()
|
||||
.map_err(|e| std::io::Error::from_raw_os_error(e as i32))?;
|
||||
if !groups.is_empty() {
|
||||
nix::unistd::setgroups(&groups)
|
||||
.map_err(|e| std::io::Error::from_raw_os_error(e as i32))?;
|
||||
|
||||
@@ -91,11 +91,20 @@ pub async fn get_data_version(id: &PackageId) -> Result<Option<String>, Error> {
|
||||
.join(id)
|
||||
.join("data")
|
||||
.join(".version");
|
||||
let s = maybe_read_file_to_string(&path).await?;
|
||||
Ok(s.map(|s| s.trim().to_string()))
|
||||
maybe_read_file_to_string(&path).await
|
||||
}
|
||||
|
||||
struct RootCommand(pub String);
|
||||
pub(crate) struct RootCommand(pub String);
|
||||
|
||||
/// Resolved subcontainer info, ready for command construction.
|
||||
pub(crate) struct ResolvedSubcontainer {
|
||||
pub container_id: ContainerId,
|
||||
pub subcontainer_id: Guid,
|
||||
pub image_id: ImageId,
|
||||
pub user: InternedString,
|
||||
pub workdir: Option<String>,
|
||||
pub root_command: RootCommand,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize, Default, TS)]
|
||||
pub struct MiB(pub u64);
|
||||
@@ -726,6 +735,158 @@ impl Service {
|
||||
.clone();
|
||||
Ok(container_id)
|
||||
}
|
||||
|
||||
/// Resolve a subcontainer by optional filters (guid, name, or imageId).
|
||||
/// If no filter is provided and there is exactly one subcontainer, it is returned.
|
||||
/// Errors if no match found or multiple matches found (with the list in error info).
|
||||
pub(crate) async fn resolve_subcontainer(
|
||||
&self,
|
||||
subcontainer: Option<InternedString>,
|
||||
name: Option<InternedString>,
|
||||
image_id: Option<ImageId>,
|
||||
user: Option<InternedString>,
|
||||
) -> Result<ResolvedSubcontainer, Error> {
|
||||
let id = &self.seed.id;
|
||||
let container = &self.seed.persistent_container;
|
||||
let root_dir = container
|
||||
.lxc_container
|
||||
.get()
|
||||
.map(|x| x.rootfs_dir().to_owned())
|
||||
.or_not_found(format!("container for {id}"))?;
|
||||
|
||||
let subcontainer_upper = subcontainer.as_ref().map(|x| AsRef::<str>::as_ref(x).to_uppercase());
|
||||
let name_upper = name.as_ref().map(|x| AsRef::<str>::as_ref(x).to_uppercase());
|
||||
let image_id_upper = image_id.as_ref().map(|x| AsRef::<Path>::as_ref(x).to_string_lossy().to_uppercase());
|
||||
|
||||
let subcontainers = container.subcontainers.lock().await;
|
||||
let matches: Vec<_> = subcontainers
|
||||
.iter()
|
||||
.filter(|(x, wrapper)| {
|
||||
if let Some(sc) = subcontainer_upper.as_ref() {
|
||||
AsRef::<str>::as_ref(x).contains(sc.as_str())
|
||||
} else if let Some(n) = name_upper.as_ref() {
|
||||
AsRef::<str>::as_ref(&wrapper.name)
|
||||
.to_uppercase()
|
||||
.contains(n.as_str())
|
||||
} else if let Some(img) = image_id_upper.as_ref() {
|
||||
let Some(wrapper_image_id) = AsRef::<Path>::as_ref(&wrapper.image_id).to_str()
|
||||
else {
|
||||
return false;
|
||||
};
|
||||
wrapper_image_id.to_uppercase().contains(img.as_str())
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let Some((subcontainer_id, matched_image_id)) = matches
|
||||
.first()
|
||||
.map::<(Guid, ImageId), _>(|&x| (x.0.clone(), x.1.image_id.clone()))
|
||||
else {
|
||||
drop(subcontainers);
|
||||
let info = container
|
||||
.subcontainers
|
||||
.lock()
|
||||
.await
|
||||
.iter()
|
||||
.map(|(g, s)| SubcontainerInfo {
|
||||
id: g.clone(),
|
||||
name: s.name.clone(),
|
||||
image_id: s.image_id.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
return Err(Error::new(
|
||||
eyre!("{}", t!("service.mod.no-matching-subcontainers", id = id)),
|
||||
ErrorKind::NotFound,
|
||||
)
|
||||
.with_info(to_value(&info)?));
|
||||
};
|
||||
|
||||
if matches.len() > 1 {
|
||||
let info = matches
|
||||
.into_iter()
|
||||
.map(|(g, s)| SubcontainerInfo {
|
||||
id: g.clone(),
|
||||
name: s.name.clone(),
|
||||
image_id: s.image_id.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
return Err(Error::new(
|
||||
eyre!("{}", t!("service.mod.multiple-subcontainers-found", id = id,)),
|
||||
ErrorKind::InvalidRequest,
|
||||
)
|
||||
.with_info(to_value(&info)?));
|
||||
}
|
||||
|
||||
let passwd = root_dir
|
||||
.join("media/startos/subcontainers")
|
||||
.join(subcontainer_id.as_ref())
|
||||
.join("etc")
|
||||
.join("passwd");
|
||||
|
||||
let image_meta = serde_json::from_str::<Value>(
|
||||
&tokio::fs::read_to_string(
|
||||
root_dir
|
||||
.join("media/startos/images/")
|
||||
.join(&matched_image_id)
|
||||
.with_extension("json"),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
.with_kind(ErrorKind::Deserialization)?;
|
||||
|
||||
let resolved_user = user
|
||||
.or_else(|| image_meta["user"].as_str().map(InternedString::intern))
|
||||
.unwrap_or_else(|| InternedString::intern("root"));
|
||||
|
||||
let root_command = get_passwd_command(passwd, &*resolved_user).await;
|
||||
let workdir = image_meta["workdir"].as_str().map(|s| s.to_owned());
|
||||
|
||||
Ok(ResolvedSubcontainer {
|
||||
container_id: self.container_id()?,
|
||||
subcontainer_id,
|
||||
image_id: matched_image_id,
|
||||
user: resolved_user,
|
||||
workdir,
|
||||
root_command,
|
||||
})
|
||||
}
|
||||
|
||||
/// Build a `Command` for executing inside a resolved subcontainer (non-interactive).
|
||||
pub(crate) fn build_subcontainer_command(
|
||||
resolved: &ResolvedSubcontainer,
|
||||
command: &[&str],
|
||||
) -> Command {
|
||||
let root_path =
|
||||
Path::new("/media/startos/subcontainers").join(resolved.subcontainer_id.as_ref());
|
||||
let mut cmd = Command::new("lxc-attach");
|
||||
cmd.kill_on_drop(true);
|
||||
cmd.arg(&*resolved.container_id)
|
||||
.arg("--")
|
||||
.arg("start-container")
|
||||
.arg("subcontainer")
|
||||
.arg("exec")
|
||||
.arg("--env-file")
|
||||
.arg(
|
||||
Path::new("/media/startos/images")
|
||||
.join(&resolved.image_id)
|
||||
.with_extension("env"),
|
||||
)
|
||||
.arg("--user")
|
||||
.arg(&*resolved.user);
|
||||
if let Some(ref workdir) = resolved.workdir {
|
||||
cmd.arg("--workdir").arg(workdir);
|
||||
}
|
||||
cmd.arg(&root_path).arg("--");
|
||||
if command.is_empty() {
|
||||
cmd.arg(&resolved.root_command.0);
|
||||
} else {
|
||||
cmd.args(command);
|
||||
}
|
||||
cmd
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn stats(&self) -> Result<ServiceStats, Error> {
|
||||
let container = &self.seed.persistent_container;
|
||||
@@ -820,124 +981,26 @@ pub async fn attach(
|
||||
user,
|
||||
}: AttachParams,
|
||||
) -> Result<Guid, Error> {
|
||||
let (container_id, subcontainer_id, image_id, user, workdir, root_command) = {
|
||||
let id = &id;
|
||||
|
||||
let service = ctx.services.get(id).await;
|
||||
|
||||
let service_ref = service.as_ref().or_not_found(id)?;
|
||||
|
||||
let container = &service_ref.seed.persistent_container;
|
||||
let root_dir = container
|
||||
.lxc_container
|
||||
.get()
|
||||
.map(|x| x.rootfs_dir().to_owned())
|
||||
.or_not_found(format!("container for {id}"))?;
|
||||
|
||||
let subcontainer = subcontainer.map(|x| AsRef::<str>::as_ref(&x).to_uppercase());
|
||||
let name = name.map(|x| AsRef::<str>::as_ref(&x).to_uppercase());
|
||||
let image_id = image_id.map(|x| AsRef::<Path>::as_ref(&x).to_string_lossy().to_uppercase());
|
||||
|
||||
let subcontainers = container.subcontainers.lock().await;
|
||||
let subcontainer_ids: Vec<_> = subcontainers
|
||||
.iter()
|
||||
.filter(|(x, wrapper)| {
|
||||
if let Some(subcontainer) = subcontainer.as_ref() {
|
||||
AsRef::<str>::as_ref(x).contains(AsRef::<str>::as_ref(subcontainer))
|
||||
} else if let Some(name) = name.as_ref() {
|
||||
AsRef::<str>::as_ref(&wrapper.name)
|
||||
.to_uppercase()
|
||||
.contains(AsRef::<str>::as_ref(name))
|
||||
} else if let Some(image_id) = image_id.as_ref() {
|
||||
let Some(wrapper_image_id) = AsRef::<Path>::as_ref(&wrapper.image_id).to_str()
|
||||
else {
|
||||
return false;
|
||||
};
|
||||
wrapper_image_id
|
||||
.to_uppercase()
|
||||
.contains(AsRef::<str>::as_ref(&image_id))
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let Some((subcontainer_id, image_id)) = subcontainer_ids
|
||||
.first()
|
||||
.map::<(Guid, ImageId), _>(|&x| (x.0.clone(), x.1.image_id.clone()))
|
||||
else {
|
||||
drop(subcontainers);
|
||||
let subcontainers = container
|
||||
.subcontainers
|
||||
.lock()
|
||||
.await
|
||||
.iter()
|
||||
.map(|(g, s)| SubcontainerInfo {
|
||||
id: g.clone(),
|
||||
name: s.name.clone(),
|
||||
image_id: s.image_id.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
return Err(Error::new(
|
||||
eyre!("{}", t!("service.mod.no-matching-subcontainers", id = id)),
|
||||
ErrorKind::NotFound,
|
||||
let resolved = {
|
||||
let service = ctx.services.get(&id).await;
|
||||
let service_ref = service.as_ref().or_not_found(&id)?;
|
||||
service_ref
|
||||
.resolve_subcontainer(
|
||||
subcontainer.map(|g| InternedString::intern(g.as_ref())),
|
||||
name,
|
||||
image_id,
|
||||
user,
|
||||
)
|
||||
.with_info(to_value(&subcontainers)?));
|
||||
};
|
||||
|
||||
let passwd = root_dir
|
||||
.join("media/startos/subcontainers")
|
||||
.join(subcontainer_id.as_ref())
|
||||
.join("etc")
|
||||
.join("passwd");
|
||||
|
||||
let image_meta = serde_json::from_str::<Value>(
|
||||
&tokio::fs::read_to_string(
|
||||
root_dir
|
||||
.join("media/startos/images/")
|
||||
.join(&image_id)
|
||||
.with_extension("json"),
|
||||
)
|
||||
.await?,
|
||||
)
|
||||
.with_kind(ErrorKind::Deserialization)?;
|
||||
|
||||
let user = user
|
||||
.clone()
|
||||
.or_else(|| image_meta["user"].as_str().map(InternedString::intern))
|
||||
.unwrap_or_else(|| InternedString::intern("root"));
|
||||
|
||||
let root_command = get_passwd_command(passwd, &*user).await;
|
||||
|
||||
let workdir = image_meta["workdir"].as_str().map(|s| s.to_owned());
|
||||
|
||||
if subcontainer_ids.len() > 1 {
|
||||
let subcontainers = subcontainer_ids
|
||||
.into_iter()
|
||||
.map(|(g, s)| SubcontainerInfo {
|
||||
id: g.clone(),
|
||||
name: s.name.clone(),
|
||||
image_id: s.image_id.clone(),
|
||||
})
|
||||
.collect::<Vec<_>>();
|
||||
return Err(Error::new(
|
||||
eyre!(
|
||||
"{}",
|
||||
t!("service.mod.multiple-subcontainers-found", id = id,)
|
||||
),
|
||||
ErrorKind::InvalidRequest,
|
||||
)
|
||||
.with_info(to_value(&subcontainers)?));
|
||||
}
|
||||
|
||||
(
|
||||
service_ref.container_id()?,
|
||||
subcontainer_id,
|
||||
image_id,
|
||||
user.into(),
|
||||
workdir,
|
||||
root_command,
|
||||
)
|
||||
.await?
|
||||
};
|
||||
let ResolvedSubcontainer {
|
||||
container_id,
|
||||
subcontainer_id,
|
||||
image_id,
|
||||
user,
|
||||
workdir,
|
||||
root_command,
|
||||
} = resolved;
|
||||
|
||||
let guid = Guid::new();
|
||||
async fn handler(
|
||||
|
||||
@@ -30,6 +30,7 @@ impl ServiceActorSeed {
|
||||
ErrorKind::Cancelled,
|
||||
))
|
||||
};
|
||||
let backup_succeeded = res.is_ok();
|
||||
let id = &self.id;
|
||||
self.ctx
|
||||
.db
|
||||
@@ -51,14 +52,16 @@ impl ServiceActorSeed {
|
||||
x => x,
|
||||
})
|
||||
})?;
|
||||
if let Some(progress) = db
|
||||
.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_status_info_mut()
|
||||
.as_backup_progress_mut()
|
||||
.transpose_mut()
|
||||
{
|
||||
progress.insert(id, &BackupProgress { complete: true })?;
|
||||
if backup_succeeded {
|
||||
if let Some(progress) = db
|
||||
.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_status_info_mut()
|
||||
.as_backup_progress_mut()
|
||||
.transpose_mut()
|
||||
{
|
||||
progress.insert(id, &BackupProgress { complete: true })?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
|
||||
@@ -557,39 +557,6 @@ pub async fn execute_inner(
|
||||
hostname: Option<ServerHostnameInfo>,
|
||||
) -> Result<(SetupResult, RpcContext), Error> {
|
||||
let progress = &ctx.progress;
|
||||
|
||||
if !crate::disk::mount::util::is_mountpoint(Path::new(DATA_DIR).join("main")).await? {
|
||||
let mut disk_phase =
|
||||
progress.add_phase(t!("setup.opening-data-drive").into(), Some(10));
|
||||
disk_phase.start();
|
||||
let requires_reboot = crate::disk::main::import(
|
||||
&*guid,
|
||||
DATA_DIR,
|
||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
||||
RepairStrategy::Aggressive
|
||||
} else {
|
||||
RepairStrategy::Preen
|
||||
},
|
||||
if guid.ends_with("_UNENC") {
|
||||
None
|
||||
} else {
|
||||
Some(DEFAULT_PASSWORD)
|
||||
},
|
||||
Some(progress),
|
||||
)
|
||||
.await?;
|
||||
let _ = ctx.disk_guid.set(guid.clone());
|
||||
crate::util::io::delete_file(REPAIR_DISK_PATH).await?;
|
||||
if requires_reboot.0 {
|
||||
crate::disk::main::export(&*guid, DATA_DIR).await?;
|
||||
return Err(Error::new(
|
||||
eyre!("{}", t!("setup.disk-errors-corrected-restart-required")),
|
||||
ErrorKind::DiskManagement,
|
||||
));
|
||||
}
|
||||
disk_phase.complete();
|
||||
}
|
||||
|
||||
let restore_phase = match recovery_source.as_ref() {
|
||||
Some(RecoverySource::Backup { .. }) => {
|
||||
Some(progress.add_phase(t!("setup.restoring-backup").into(), Some(100)))
|
||||
|
||||
@@ -16,7 +16,6 @@ use ts_rs::TS;
|
||||
|
||||
use crate::bins::set_locale;
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::db::model::public::RestartReason;
|
||||
use crate::disk::util::{get_available, get_used};
|
||||
use crate::logs::{LogSource, LogsParams, SYSTEM_UNIT};
|
||||
use crate::prelude::*;
|
||||
@@ -352,9 +351,10 @@ pub fn kiosk<C: Context>() -> ParentHandler<C> {
|
||||
from_fn_async(|ctx: RpcContext| async move {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
let server_info = db.as_public_mut().as_server_info_mut();
|
||||
server_info.as_kiosk_mut().ser(&Some(true))?;
|
||||
server_info.as_status_info_mut().as_restart_mut().ser(&Some(RestartReason::Kiosk))
|
||||
db.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_kiosk_mut()
|
||||
.ser(&Some(true))
|
||||
})
|
||||
.await
|
||||
.result?;
|
||||
@@ -369,9 +369,10 @@ pub fn kiosk<C: Context>() -> ParentHandler<C> {
|
||||
from_fn_async(|ctx: RpcContext| async move {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
let server_info = db.as_public_mut().as_server_info_mut();
|
||||
server_info.as_kiosk_mut().ser(&Some(false))?;
|
||||
server_info.as_status_info_mut().as_restart_mut().ser(&Some(RestartReason::Kiosk))
|
||||
db.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_kiosk_mut()
|
||||
.ser(&Some(false))
|
||||
})
|
||||
.await
|
||||
.result?;
|
||||
@@ -1366,11 +1367,10 @@ pub async fn set_language(
|
||||
save_language(&*language).await?;
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
let server_info = db.as_public_mut().as_server_info_mut();
|
||||
server_info
|
||||
db.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_language_mut()
|
||||
.ser(&Some(language.clone()))?;
|
||||
server_info.as_status_info_mut().as_restart_mut().ser(&Some(RestartReason::Language))
|
||||
.ser(&Some(language.clone()))
|
||||
})
|
||||
.await
|
||||
.result?;
|
||||
|
||||
@@ -18,10 +18,7 @@ use crate::util::serde::{HandlerExtSerde, display_serializable};
|
||||
|
||||
pub fn tunnel_api<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::new()
|
||||
.subcommand(
|
||||
"web",
|
||||
super::web::web_api::<C>().with_about("about.commands-tunnel-web"),
|
||||
)
|
||||
.subcommand("web", super::web::web_api::<C>())
|
||||
.subcommand(
|
||||
"db",
|
||||
super::db::db_api::<C>().with_about("about.commands-interact-with-db-dump-apply"),
|
||||
@@ -72,8 +69,7 @@ pub fn tunnel_api<C: Context>() -> ParentHandler<C> {
|
||||
.no_display()
|
||||
.with_about("about.enable-or-disable-port-forward")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.with_about("about.commands-port-forward"),
|
||||
),
|
||||
)
|
||||
.subcommand(
|
||||
"restart",
|
||||
@@ -98,8 +94,7 @@ pub fn tunnel_api<C: Context>() -> ParentHandler<C> {
|
||||
.with_display_serializable()
|
||||
.with_about("about.apply-available-update")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.with_about("about.commands-tunnel-update"),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -178,8 +178,7 @@ pub fn auth_api<C: Context>() -> ParentHandler<C> {
|
||||
})
|
||||
.with_about("about.list-authorized-keys")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.with_about("about.commands-authorized-keys"),
|
||||
),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -202,7 +202,7 @@ impl TunnelContext {
|
||||
listen,
|
||||
db,
|
||||
datadir,
|
||||
rpc_continuations: RpcContinuations::new(),
|
||||
rpc_continuations: RpcContinuations::new(None),
|
||||
open_authed_continuations: OpenAuthedContinuations::new(),
|
||||
ephemeral_sessions: SyncMutex::new(Sessions::new()),
|
||||
net_iface,
|
||||
|
||||
@@ -521,7 +521,7 @@ pub async fn init_web(ctx: CliContext) -> Result<(), Error> {
|
||||
.or_not_found("certificate in chain")?;
|
||||
println!("📝 Root CA:");
|
||||
print!("{cert}\n");
|
||||
println!("Follow instructions to trust your Root CA (recommended): https://docs.start9.com/start-tunnel/installing.html#trust-your-root-ca");
|
||||
println!("Follow instructions to trust your Root CA (recommended): https://docs.start9.com/start-tunnel/installing/index.html#trust-your-root-ca");
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ use ts_rs::TS;
|
||||
|
||||
use crate::PLATFORM;
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::db::model::public::RestartReason;
|
||||
use crate::notifications::{NotificationLevel, notify};
|
||||
use crate::prelude::*;
|
||||
use crate::progress::{
|
||||
@@ -82,9 +81,8 @@ pub async fn update_system(
|
||||
.into_public()
|
||||
.into_server_info()
|
||||
.into_status_info()
|
||||
.into_restart()
|
||||
.into_updated()
|
||||
.de()?
|
||||
== Some(RestartReason::Update)
|
||||
{
|
||||
return Err(Error::new(
|
||||
eyre!("{}", t!("update.already-updated-restart-required")),
|
||||
@@ -283,18 +281,10 @@ async fn maybe_do_update(
|
||||
|
||||
let start_progress = progress.snapshot();
|
||||
|
||||
ctx.db
|
||||
let status = ctx
|
||||
.db
|
||||
.mutate(|db| {
|
||||
let server_info = db.as_public_mut().as_server_info_mut();
|
||||
|
||||
if server_info.as_status_info().as_restart().de()?.is_some() {
|
||||
return Err(Error::new(
|
||||
eyre!("{}", t!("update.already-updated-restart-required")),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
|
||||
let mut status = server_info.as_status_info().de()?;
|
||||
let mut status = peeked.as_public().as_server_info().as_status_info().de()?;
|
||||
if status.update_progress.is_some() {
|
||||
return Err(Error::new(
|
||||
eyre!("{}", t!("update.already-updating")),
|
||||
@@ -303,12 +293,22 @@ async fn maybe_do_update(
|
||||
}
|
||||
|
||||
status.update_progress = Some(start_progress);
|
||||
server_info.as_status_info_mut().ser(&status)?;
|
||||
Ok(())
|
||||
db.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_status_info_mut()
|
||||
.ser(&status)?;
|
||||
Ok(status)
|
||||
})
|
||||
.await
|
||||
.result?;
|
||||
|
||||
if status.updated {
|
||||
return Err(Error::new(
|
||||
eyre!("{}", t!("update.already-updated-restart-required")),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
|
||||
let progress_task = NonDetachingJoinHandle::from(tokio::spawn(progress.clone().sync_to_db(
|
||||
ctx.db.clone(),
|
||||
|db| {
|
||||
@@ -338,15 +338,10 @@ async fn maybe_do_update(
|
||||
Ok(()) => {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
let server_info = db.as_public_mut().as_server_info_mut();
|
||||
server_info
|
||||
.as_status_info_mut()
|
||||
.as_update_progress_mut()
|
||||
.ser(&None)?;
|
||||
server_info
|
||||
.as_status_info_mut()
|
||||
.as_restart_mut()
|
||||
.ser(&Some(RestartReason::Update))
|
||||
let status_info =
|
||||
db.as_public_mut().as_server_info_mut().as_status_info_mut();
|
||||
status_info.as_update_progress_mut().ser(&None)?;
|
||||
status_info.as_updated_mut().ser(&true)
|
||||
})
|
||||
.await
|
||||
.result?;
|
||||
|
||||
@@ -8,7 +8,7 @@ use tokio::io::AsyncWrite;
|
||||
use tokio::task::JoinHandle;
|
||||
|
||||
const BLOCK_SIZE: usize = 4096;
|
||||
const BUF_CAP: usize = 1024 * 1024; // 1MiB
|
||||
const BUF_CAP: usize = 256 * 1024; // 256KB
|
||||
|
||||
/// Aligned buffer for O_DIRECT I/O.
|
||||
struct AlignedBuf {
|
||||
|
||||
@@ -26,30 +26,6 @@ impl<'a> MakeWriter<'a> for LogFile {
|
||||
struct TeeWriter<'a>(MutexGuard<'a, Option<File>>);
|
||||
impl<'a> Write for TeeWriter<'a> {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
// Blocking file+stderr I/O on a tokio worker thread can
|
||||
// starve the I/O driver (tokio-rs/tokio#4730).
|
||||
// block_in_place tells the runtime to hand off driver
|
||||
// duties before we block. Only available on the
|
||||
// multi-thread runtime; falls back to a direct write on
|
||||
// current-thread runtimes (CLI) or outside a runtime.
|
||||
if matches!(
|
||||
tokio::runtime::Handle::try_current().map(|h| h.runtime_flavor()),
|
||||
Ok(tokio::runtime::RuntimeFlavor::MultiThread),
|
||||
) {
|
||||
tokio::task::block_in_place(|| self.write_inner(buf))
|
||||
} else {
|
||||
self.write_inner(buf)
|
||||
}
|
||||
}
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
if let Some(f) = &mut *self.0 {
|
||||
f.flush()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
impl<'a> TeeWriter<'a> {
|
||||
fn write_inner(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
let n = if let Some(f) = &mut *self.0 {
|
||||
f.write(buf)?
|
||||
} else {
|
||||
@@ -58,6 +34,12 @@ impl<'a> MakeWriter<'a> for LogFile {
|
||||
io::stderr().write_all(&buf[..n])?;
|
||||
Ok(n)
|
||||
}
|
||||
fn flush(&mut self) -> io::Result<()> {
|
||||
if let Some(f) = &mut *self.0 {
|
||||
f.flush()?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
Box::new(TeeWriter(f))
|
||||
} else {
|
||||
|
||||
@@ -62,9 +62,8 @@ mod v0_4_0_alpha_19;
|
||||
mod v0_4_0_alpha_20;
|
||||
mod v0_4_0_alpha_21;
|
||||
mod v0_4_0_alpha_22;
|
||||
mod v0_4_0_alpha_23;
|
||||
|
||||
pub type Current = v0_4_0_alpha_23::Version; // VERSION_BUMP
|
||||
pub type Current = v0_4_0_alpha_22::Version; // VERSION_BUMP
|
||||
|
||||
impl Current {
|
||||
#[instrument(skip(self, db))]
|
||||
@@ -194,8 +193,7 @@ enum Version {
|
||||
V0_4_0_alpha_19(Wrapper<v0_4_0_alpha_19::Version>),
|
||||
V0_4_0_alpha_20(Wrapper<v0_4_0_alpha_20::Version>),
|
||||
V0_4_0_alpha_21(Wrapper<v0_4_0_alpha_21::Version>),
|
||||
V0_4_0_alpha_22(Wrapper<v0_4_0_alpha_22::Version>),
|
||||
V0_4_0_alpha_23(Wrapper<v0_4_0_alpha_23::Version>), // VERSION_BUMP
|
||||
V0_4_0_alpha_22(Wrapper<v0_4_0_alpha_22::Version>), // VERSION_BUMP
|
||||
Other(exver::Version),
|
||||
}
|
||||
|
||||
@@ -260,8 +258,7 @@ impl Version {
|
||||
Self::V0_4_0_alpha_19(v) => DynVersion(Box::new(v.0)),
|
||||
Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)),
|
||||
Self::V0_4_0_alpha_21(v) => DynVersion(Box::new(v.0)),
|
||||
Self::V0_4_0_alpha_22(v) => DynVersion(Box::new(v.0)),
|
||||
Self::V0_4_0_alpha_23(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
|
||||
Self::V0_4_0_alpha_22(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
|
||||
Self::Other(v) => {
|
||||
return Err(Error::new(
|
||||
eyre!("unknown version {v}"),
|
||||
@@ -318,8 +315,7 @@ impl Version {
|
||||
Version::V0_4_0_alpha_19(Wrapper(x)) => x.semver(),
|
||||
Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(),
|
||||
Version::V0_4_0_alpha_21(Wrapper(x)) => x.semver(),
|
||||
Version::V0_4_0_alpha_22(Wrapper(x)) => x.semver(),
|
||||
Version::V0_4_0_alpha_23(Wrapper(x)) => x.semver(), // VERSION_BUMP
|
||||
Version::V0_4_0_alpha_22(Wrapper(x)) => x.semver(), // VERSION_BUMP
|
||||
Version::Other(x) => x.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -8,120 +8,108 @@ Previous backups are incompatible with v0.4.0. It is strongly recommended that y
|
||||
|
||||
A server is not a toy. It is a critical component of the computing paradigm, and its failure can be catastrophic, resulting in downtime or loss of data. From the beginning, Start9 has taken a "security and reliability first" approach to the development of StartOS, favoring soundness over speed, and prioritizing essential features such as encrypted network connections, simple backups, and a reliable container runtime over nice-to-haves like custom theming and more services.
|
||||
|
||||
Start9 is paving new ground with StartOS, trying to create what most developers and IT professionals thought impossible: an OS and user experience that affords a normal person the same independent control over their data and communications as an experienced Linux sysadmin.
|
||||
Start9 is paving new ground with StartOS, trying to create what most developers and IT professionals thought impossible; namely, an OS and user experience that affords a normal person the same independent control over their data and communications as an experienced Linux sysadmin.
|
||||
|
||||
The difficulty of our endeavor requires making mistakes, and our integrity and dedication to excellence require that we correct them. This means a willingness to discard bad ideas and broken parts, and if absolutely necessary, to tear it all down and start over. That is exactly what we did with StartOS v0.2.0 in 2020. It is what we did with StartOS v0.3.0 in 2022. And we are doing it now with StartOS v0.4.0 in 2026.
|
||||
The difficulty of our endeavor requires making mistakes; and our integrity and dedication to excellence require that we correct them. This means a willingness to discard bad ideas and broken parts, and if absolutely necessary, to tear it all down and start over. That is exactly what we did with StartOS v0.2.0 in 2020. It is what we did with StartOS v0.3.0 in 2022. And we are doing it now with StartOS v0.4.0 in 2026.
|
||||
|
||||
v0.4.0 is a complete rewrite of StartOS — almost nothing survived. After nearly six years of building StartOS, we believe we have finally arrived at the correct architecture and foundation to deliver on the promise of sovereign computing.
|
||||
v0.4.0 is a complete rewrite of StartOS, almost nothing survived. After nearly six years of building StartOS, we believe that we have finally arrived at the correct architecture and foundation that will allow us to deliver on the promise of sovereign computing.
|
||||
|
||||
## Changelog
|
||||
|
||||
### User Experience
|
||||
### New User interface
|
||||
|
||||
#### New User Interface
|
||||
We re-wrote the StartOS UI to be more performant, more intuitive, and better looking on both mobile and desktop. Enjoy.
|
||||
|
||||
The StartOS UI has been rewritten to be more performant, more intuitive, and better looking on both mobile and desktop.
|
||||
### Translations
|
||||
|
||||
#### Internationalization
|
||||
StartOS v0.4.0 supports multiple languages and also makes it easy to add more later on.
|
||||
|
||||
StartOS v0.4.0 and available services now support multiple languages and keyboard layouts.
|
||||
### LXC Container Runtime
|
||||
|
||||
#### Improved Actions
|
||||
Neither Docker nor Podman offer the reliability and flexibility needed for StartOS. Instead, v0.4.0 uses a nested container paradigm based on LXC for the outer container and Linux namespaces for sub containers. This architecture naturally supports multi container setups.
|
||||
|
||||
Actions accept arbitrary form input and return arbitrary responses, replacing the old "Config" and "Properties" concepts, which have been removed. The new Actions API gives package developers the ability to break configuration and properties into smaller, more specific forms — or to exclude them entirely without polluting the UI. Improved form design and new input types round out the experience.
|
||||
### Hardware Acceleration
|
||||
|
||||
#### Progress Reporting
|
||||
Services can take advantage of (and require) the presence of certain hardware modules, such as Nvidia GPUs, for transcoding or inference purposes. For example, StartOS and Ollama can run natively on The Nvidia DGX Spark and take full advantage of the hardware/firmware stack to perform local inference against open source models.
|
||||
|
||||
A new progress reporting API enables package developers to define custom phases and provide real-time progress updates for operations such as installing, updating, or backing up a service.
|
||||
### New S9PK archive format
|
||||
|
||||
#### Email Notifications via SMTP
|
||||
The S9PK archive format has been overhauled to allow for signature verification of partial downloads, and allow direct mounting of container images without unpacking the s9pk.
|
||||
|
||||
You can now add your Gmail, SES, or other SMTP credentials to StartOS to deliver email notifications from StartOS and from installed services that support SMTP.
|
||||
### Improved Actions
|
||||
|
||||
### Networking & Connectivity
|
||||
Actions take arbitrary form input and return arbitrary responses, thus satisfying the needs of both "Config" and "Properties", which have now been removed. The new actions API gives package developers the ability to break up Config and Properties into smaller, more specific formats, or to exclude them entirely without polluting the UI. Improved form design and new input types round out the new actions experience.
|
||||
|
||||
#### LAN Port Forwarding
|
||||
### Squashfs Images for OS Updates
|
||||
|
||||
Perhaps the biggest complaint with prior versions of StartOS was the use of unique `.local` URLs for service interfaces. This has been corrected. Service interfaces are now available on unique ports, supporting non-HTTP traffic on the LAN as well as remote access via VPN.
|
||||
StartOS now uses squashfs images instead of rsync for OS updates. This allows for better update verification and improved reliability.
|
||||
|
||||
#### Gateways
|
||||
### Typescript Package API and SDK
|
||||
|
||||
Gateways connect your server to the Internet, facilitating inbound and outbound traffic. It is now possible to add Wireguard VPN gateways to your server to control how devices outside the LAN connect to your server and how your server connects out to the Internet. Outbound traffic can also be overridden on a per-service basis.
|
||||
Package developers can now take advantage of StartOS APIs using the new start-sdk, available in Typescript. A barebones StartOS package (s9pk) can be produced in minutes with minimal knowledge or skill. More advanced developers can use the SDK to create highly customized user experiences for their service.
|
||||
|
||||
#### Private Domains
|
||||
### Removed PostgresSQL
|
||||
|
||||
A private domain is like your server's `.local` address, except it also works over VPN, and it can be _anything_ — a real domain you control, a made-up domain, or even a domain controlled by someone else.
|
||||
StartOS itself has miniscule data persistence needs. PostgresSQL was overkill and has been removed in favor of lightweight PatchDB.
|
||||
|
||||
Like your local domain, private domains can only be accessed when connected to the same LAN as your server, either physically or via VPN, and they require trusting your server's Root CA.
|
||||
### Sending Emails via SMTP
|
||||
|
||||
#### Public Domains (Clearnet)
|
||||
You can now add your Gmail, SES, or other SMTP credentials to StartOS in order to send deliver email notifications from StartOS and from installed services that support SMTP.
|
||||
|
||||
It is now easy to expose service interfaces to the public Internet on a domain you control. There are two options:
|
||||
|
||||
1. **Open ports on your router.** This option is free and supported by all routers. The drawback is that your home IP address is revealed to anyone accessing an exposed interface.
|
||||
|
||||
2. **Use a Wireguard reverse tunnel**, such as [StartTunnel](#start-tunnel), to proxy web traffic. This option requires renting a $5–$10/month VPS and installing StartTunnel (or similar). The result is a virtual router in the cloud that you can use to expose service interfaces instead of your real router, hiding your IP address from visitors.
|
||||
|
||||
#### Let's Encrypt
|
||||
|
||||
StartOS now supports Let's Encrypt to automatically obtain SSL/TLS certificates for public domains. Visitors to your public websites and APIs will no longer need to download and trust your server's Root CA.
|
||||
|
||||
#### Internal DNS Server
|
||||
|
||||
StartOS runs its own DNS server and automatically adds records for your private domains. You can configure your router or other gateway to use the StartOS DNS server to resolve these domains locally.
|
||||
|
||||
#### Static DNS Servers
|
||||
|
||||
By default, StartOS uses the DNS servers it receives via DHCP from its gateway(s). It is now possible to override these with custom, static DNS servers.
|
||||
|
||||
#### Tor as a Plugin
|
||||
|
||||
With the expanded networking capabilities of StartOS v0.4.0, Tor is now an optional plugin that can be installed from the Marketplace. Users can run their own Tor relay, route outbound connections through Tor, and generate hidden service URLs for any service interface, including vanity addresses.
|
||||
|
||||
#### Tor Address Management
|
||||
|
||||
StartOS v0.4.0 supports adding and removing Tor addresses for both StartOS itself and all service interfaces. You can even provide your own private key instead of using one auto-generated by StartOS, enabling vanity addresses.
|
||||
|
||||
### System & Infrastructure
|
||||
|
||||
#### LXC Container Runtime
|
||||
|
||||
Neither Docker nor Podman offer the reliability and flexibility needed for StartOS. Instead, v0.4.0 uses a nested container paradigm based on LXC for the outer container and Linux namespaces for sub-containers. This architecture naturally supports multi-container setups.
|
||||
|
||||
#### Hardware Acceleration
|
||||
|
||||
Services can take advantage of — and require — the presence of certain hardware modules, such as Nvidia GPUs, for transcoding or inference. For example, StartOS and Ollama can run natively on the Nvidia DGX Spark and take full advantage of its hardware and firmware stack to perform local inference against open source models.
|
||||
|
||||
#### Squashfs Images for OS Updates
|
||||
|
||||
StartOS now uses squashfs images instead of rsync for OS updates, enabling better update verification and improved reliability.
|
||||
|
||||
#### Replaced PostgreSQL with PatchDB
|
||||
|
||||
StartOS itself has minimal data persistence needs. PostgreSQL was overkill and has been replaced with the lightweight PatchDB.
|
||||
|
||||
#### Improved Backups
|
||||
|
||||
The new `start-fs` FUSE module unifies filesystem expectations across platforms, enabling more reliable backups. The system now defaults to rsync differential backups instead of incremental backups, which is both faster and more space-efficient — files deleted from the server are also deleted from the backup.
|
||||
|
||||
#### SSH Password Authentication
|
||||
### SSH password auth
|
||||
|
||||
You can now SSH into your server using your master password. SSH public key authentication is still supported as well.
|
||||
|
||||
### Developer Experience
|
||||
### Tor Address Management
|
||||
|
||||
#### New S9PK Archive Format
|
||||
StartOS v0.4.0 supports adding and removing Tor addresses for StartOS and all service interfaces. You can even provide your own private key instead of using one auto-generated by StartOS. This has the added benefit of permitting vanity addresses.
|
||||
|
||||
The S9PK archive format has been overhauled to support signature verification of partial downloads and direct mounting of container images without unpacking the archive.
|
||||
### Progress Reporting
|
||||
|
||||
#### TypeScript Package API and SDK
|
||||
A new progress reporting API enabled package developers to create unique phases and provide real-time progress reporting for actions such as installing, updating, or backing up a service.
|
||||
|
||||
Package developers can now interact with StartOS APIs using the new `start-sdk`, available in TypeScript. A barebones StartOS package (S9PK) can be produced in minutes with minimal knowledge or skill. More advanced developers can use the SDK to create highly customized user experiences for their services.
|
||||
### Registry Protocol
|
||||
|
||||
#### Registry Protocol
|
||||
The new registry protocol bifurcates package indexing (listing/validating) and package hosting (downloading). Registries are now simple indexes of packages that reference binaries hosted in arbitrary locations, locally or externally. For example, when someone visits the Start9 Registry, the curated list of packages comes from Start9. But when someone installs a listed service, the package binary is being downloaded from Github. The registry also validates the binary. This makes it much easier to host a custom registry, since it is just a curated list of services tat reference package binaries hosted on Github or elsewhere.
|
||||
|
||||
The new registry protocol separates package indexing (listing and validation) from package hosting (downloading). Registries are now simple indexes that reference binaries hosted in arbitrary locations, locally or externally. For example, when someone visits the Start9 Registry, the curated list of packages comes from Start9, but when they install a service, the binary is downloaded from GitHub. The registry also validates the binary. This makes it much easier to host a custom registry, since it is just a curated list of services that reference package binaries hosted on GitHub or elsewhere.
|
||||
### LAN port forwarding
|
||||
|
||||
#### Exver and Service Flavors
|
||||
Perhaps the biggest complaint with prior version of StartOS was use of unique .local URLs for service interfaces. This has been corrected. Service interfaces are now available on unique ports, allowing for non-http traffic on the LAN as well as remote access via VPN.
|
||||
|
||||
StartOS now uses Extended Versioning (Exver), which consists of three parts: (1) a semver-compliant upstream version, (2) a semver-compliant wrapper version, and (3) an optional "flavor" prefix. Flavors are alternative implementations of a service where a user would typically want only one installed, and data can be migrated between them. Flavors commonly satisfy the same dependency API for downstream packages, though this is not strictly required. A valid Exver looks like: `#knots:29.0:1.0-beta.1` — the first beta release of StartOS wrapper version 1.0 of Bitcoin Knots version 29.0.
|
||||
### Improved Backups
|
||||
|
||||
The new start-fs fuse module unifies file system expectations for various platforms, enabling more reliable backups. The new system also defaults to using rsync differential backups instead of incremental backups, which is faster and saves on disk space by also deleting from the backup files that were deleted from the server.
|
||||
|
||||
### Exver
|
||||
|
||||
StartOS now uses Extended Versioning (Exver), which consists of three parts: (1) a Semver-compliant upstream version, (2) a Semver-compliant wrapper version, and (3) an optional "flavor" prefix. Flavors can be thought of as alternative implementations of services, where a user would only want one or the other installed, and data can feasibly be migrating between the two. Another common characteristic of flavors is that they satisfy the same API requirement of dependents, though this is not strictly necessary. A valid Exver looks something like this: `#knots:29.0:1.0-beta.1`. This would translate to "the first beta release of StartOS wrapper version 1.0 of Bitcoin Knots version 29.0".
|
||||
|
||||
### Let's Encrypt
|
||||
|
||||
StartOS now supports Let's Encrypt to automatically obtain SSL/TLS certificates for public domains. This means people visiting your public websites and APIs will not need to download and trust your server's Root CA.
|
||||
|
||||
### Gateways
|
||||
|
||||
Gateways connect your server to the Internet, facilitating inbound and outbound traffic. Your router is a gateway. It is now possible to add Wireguard VPN gateways to your server to control how devices outside the LAN connect to your server and how your server connects out to the Internet.
|
||||
|
||||
### Static DNS Servers
|
||||
|
||||
By default, StartOS uses the DNS servers it receives via DHCP from its gateway(s). It is now possible to override these DNS servers with custom, static ones.
|
||||
|
||||
### Internal DNS Server
|
||||
|
||||
StartOS runs its own DNS server and automatically adds records for your private domains. You can update your router or other gateway to use StartOS DNS server in order to resolve these domains locally.
|
||||
|
||||
### Private Domains
|
||||
|
||||
A private domain is like to your server's .local, except it also works for VPN connectivity, and it can be _anything_. It can be a real domain you control, a made up domain, or even a domain controlled by someone else.
|
||||
|
||||
Similar to your local domain, private domains can only be accessed when connected to the same LAN as your server, either physically or via VPN, and they require trusting your server's Root CA.
|
||||
|
||||
### Public Domains (Clearnet)
|
||||
|
||||
It is now easy to expose service interfaces to the public Internet on a public domain you control. There are two options, both of which are easy to accomplish:
|
||||
|
||||
1. Open ports on your router. This option is free and supported by all routers. The drawback is that your home IP address is revealed to anyone accessing an exposed interface.
|
||||
|
||||
2. Use a Wireguard reverse tunnel, such as [StartTunnel](#start-tunnel) to proxy web traffic. This option requires renting a $5-$10/month VPS and installing StartTunnel (or similar). The result is a new gateway, a virtual router in the cloud, that you can use to expose service interfaces instead of your real router, thereby hiding your IP address from visitors.
|
||||
|
||||
@@ -27,7 +27,6 @@ use crate::net::keys::KeyStore;
|
||||
use crate::notifications::Notifications;
|
||||
use crate::prelude::*;
|
||||
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
|
||||
use crate::s9pk::v2::pack::CONTAINER_TOOL;
|
||||
use crate::ssh::{SshKeys, SshPubKey};
|
||||
use crate::util::Invoke;
|
||||
use crate::util::serde::Pem;
|
||||
@@ -40,102 +39,6 @@ lazy_static::lazy_static! {
|
||||
);
|
||||
}
|
||||
|
||||
/// Detect the LC_COLLATE / LC_CTYPE the cluster was created with and generate
|
||||
/// those locales if they are missing from the running system. Older installs
|
||||
/// may have been initialized with a locale (e.g. en_GB.UTF-8) that the current
|
||||
/// image does not ship. Without it PostgreSQL starts but refuses
|
||||
/// connections, breaking the migration.
|
||||
async fn ensure_cluster_locale(pg_version: u32) -> Result<(), Error> {
|
||||
let cluster_dir = format!("/var/lib/postgresql/{pg_version}/main");
|
||||
let pg_controldata = format!("/usr/lib/postgresql/{pg_version}/bin/pg_controldata");
|
||||
|
||||
let output = Command::new(&pg_controldata)
|
||||
.arg(&cluster_dir)
|
||||
.kill_on_drop(true)
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::piped())
|
||||
.output()
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Database)?;
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
tracing::warn!("pg_controldata failed, skipping locale check: {stderr}");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
let mut locales_needed = Vec::new();
|
||||
for line in stdout.lines() {
|
||||
let locale = if let Some(rest) = line.strip_prefix("LC_COLLATE:") {
|
||||
rest.trim()
|
||||
} else if let Some(rest) = line.strip_prefix("LC_CTYPE:") {
|
||||
rest.trim()
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
if !locale.is_empty() && locale != "C" && locale != "POSIX" {
|
||||
locales_needed.push(locale.to_owned());
|
||||
}
|
||||
}
|
||||
locales_needed.sort();
|
||||
locales_needed.dedup();
|
||||
|
||||
if locales_needed.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check which locales are already available.
|
||||
let available = Command::new("locale")
|
||||
.arg("-a")
|
||||
.kill_on_drop(true)
|
||||
.stdout(std::process::Stdio::piped())
|
||||
.stderr(std::process::Stdio::null())
|
||||
.output()
|
||||
.await
|
||||
.map(|o| String::from_utf8_lossy(&o.stdout).to_string())
|
||||
.unwrap_or_default();
|
||||
|
||||
let mut need_gen = false;
|
||||
for locale in &locales_needed {
|
||||
// locale -a normalizes e.g. "en_GB.UTF-8" → "en_GB.utf8"
|
||||
let normalized = locale.replace("-", "").to_lowercase();
|
||||
if available.lines().any(|l| l.replace("-", "").to_lowercase() == normalized) {
|
||||
continue;
|
||||
}
|
||||
// Debian's locale-gen ignores positional args — the locale must be
|
||||
// uncommented in /etc/locale.gen or appended to it.
|
||||
tracing::info!("Enabling missing locale for PostgreSQL cluster: {locale}");
|
||||
let locale_gen_path = Path::new("/etc/locale.gen");
|
||||
let contents = tokio::fs::read_to_string(locale_gen_path)
|
||||
.await
|
||||
.unwrap_or_default();
|
||||
// Try to uncomment an existing entry first, otherwise append.
|
||||
let entry = format!("{locale} UTF-8");
|
||||
let commented = format!("# {entry}");
|
||||
if contents.contains(&commented) {
|
||||
let updated = contents.replace(&commented, &entry);
|
||||
tokio::fs::write(locale_gen_path, updated).await?;
|
||||
} else if !contents.contains(&entry) {
|
||||
use tokio::io::AsyncWriteExt;
|
||||
let mut f = tokio::fs::OpenOptions::new()
|
||||
.create(true)
|
||||
.append(true)
|
||||
.open(locale_gen_path)
|
||||
.await?;
|
||||
f.write_all(format!("\n{entry}\n").as_bytes()).await?;
|
||||
}
|
||||
need_gen = true;
|
||||
}
|
||||
|
||||
if need_gen {
|
||||
Command::new("locale-gen")
|
||||
.invoke(crate::ErrorKind::Database)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn init_postgres(datadir: impl AsRef<Path>) -> Result<PgPool, Error> {
|
||||
let db_dir = datadir.as_ref().join("main/postgresql");
|
||||
@@ -187,12 +90,6 @@ async fn init_postgres(datadir: impl AsRef<Path>) -> Result<PgPool, Error> {
|
||||
|
||||
crate::disk::mount::util::bind(&db_dir, "/var/lib/postgresql", false).await?;
|
||||
|
||||
// The cluster may have been created with a locale not present on the
|
||||
// current image (e.g. en_GB.UTF-8 on a server that predates the trixie
|
||||
// image). Detect and generate it before starting PostgreSQL, otherwise
|
||||
// PG will start but refuse connections.
|
||||
ensure_cluster_locale(pg_version).await?;
|
||||
|
||||
Command::new("systemctl")
|
||||
.arg("start")
|
||||
.arg(format!("postgresql@{pg_version}-main.service"))
|
||||
@@ -246,11 +143,12 @@ pub struct Version;
|
||||
|
||||
impl VersionT for Version {
|
||||
type Previous = v0_3_5_2::Version;
|
||||
/// (package_id, host_id, expanded_key)
|
||||
type PreUpRes = (
|
||||
AccountInfo,
|
||||
SshKeys,
|
||||
CifsTargets,
|
||||
BTreeMap<(String, String), [u8; 64]>,
|
||||
Vec<(String, String, [u8; 64])>,
|
||||
);
|
||||
fn semver(self) -> exver::Version {
|
||||
V0_3_6_alpha_0.clone()
|
||||
@@ -352,7 +250,7 @@ impl VersionT for Version {
|
||||
let mut onion_map: Value = json!({});
|
||||
let onion_obj = onion_map.as_object_mut().unwrap();
|
||||
let mut tor_migration = imbl::Vector::<Value>::new();
|
||||
for ((package_id, host_id), key_bytes) in &tor_keys {
|
||||
for (package_id, host_id, key_bytes) in &tor_keys {
|
||||
let onion_addr = onion_address_from_key(key_bytes);
|
||||
let encoded_key =
|
||||
base64::Engine::encode(&crate::util::serde::BASE64, key_bytes);
|
||||
@@ -428,41 +326,7 @@ impl VersionT for Version {
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Load bundled migration images (start9/compat, start9/utils,
|
||||
// tonistiigi/binfmt) so the v1->v2 s9pk conversion doesn't need
|
||||
// internet access.
|
||||
let migration_images_dir = Path::new("/usr/lib/startos/migration-images");
|
||||
if let Ok(mut entries) = tokio::fs::read_dir(migration_images_dir).await {
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let path = entry.path();
|
||||
if path.extension() == Some(OsStr::new("tar")) {
|
||||
tracing::info!("Loading migration image: {}", path.display());
|
||||
Command::new(*CONTAINER_TOOL)
|
||||
.arg("load")
|
||||
.arg("-i")
|
||||
.arg(&path)
|
||||
.invoke(crate::ErrorKind::Docker)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Should be the name of the package
|
||||
let current_package: std::sync::Arc<tokio::sync::watch::Sender<Option<PackageId>>> =
|
||||
std::sync::Arc::new(tokio::sync::watch::channel(None).0);
|
||||
let progress_logger = {
|
||||
let current_package = current_package.clone();
|
||||
tokio::spawn(async move {
|
||||
let mut interval = tokio::time::interval(std::time::Duration::from_secs(30));
|
||||
interval.tick().await; // skip immediate first tick
|
||||
loop {
|
||||
interval.tick().await;
|
||||
if let Some(ref id) = *current_package.borrow() {
|
||||
tracing::info!("{}", t!("migration.migrating-package", package = id.to_string()));
|
||||
}
|
||||
}
|
||||
})
|
||||
};
|
||||
let mut paths = tokio::fs::read_dir(path).await?;
|
||||
while let Some(path) = paths.next_entry().await? {
|
||||
let Ok(id) = path.file_name().to_string_lossy().parse::<PackageId>() else {
|
||||
@@ -503,9 +367,6 @@ impl VersionT for Version {
|
||||
false
|
||||
};
|
||||
|
||||
tracing::info!("{}", t!("migration.migrating-package", package = id.to_string()));
|
||||
current_package.send_replace(Some(id.clone()));
|
||||
|
||||
if let Err(e) = async {
|
||||
let package_s9pk = tokio::fs::File::open(path).await?;
|
||||
let file = MultiCursorFile::open(&package_s9pk).await?;
|
||||
@@ -550,7 +411,6 @@ impl VersionT for Version {
|
||||
}
|
||||
}
|
||||
}
|
||||
progress_logger.abort();
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -674,16 +534,13 @@ async fn previous_ssh_keys(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<SshKeys, E
|
||||
Ok(ssh_keys)
|
||||
}
|
||||
|
||||
/// Returns deduplicated map of `(package_id, host_id) -> expanded_key`.
|
||||
/// Returns `Vec<(package_id, host_id, expanded_key)>`.
|
||||
/// Server key uses `("STARTOS", "STARTOS")`.
|
||||
/// When the same (package, interface) exists in both the `network_keys` and
|
||||
/// `tor` tables, the `tor` table entry wins because it contains the actual
|
||||
/// expanded key that was used by tor.
|
||||
#[tracing::instrument(skip_all)]
|
||||
async fn previous_tor_keys(
|
||||
pg: &sqlx::Pool<sqlx::Postgres>,
|
||||
) -> Result<BTreeMap<(String, String), [u8; 64]>, Error> {
|
||||
let mut keys = BTreeMap::new();
|
||||
) -> Result<Vec<(String, String, [u8; 64])>, Error> {
|
||||
let mut keys = Vec::new();
|
||||
|
||||
// Server tor key from the account table.
|
||||
// Older installs have tor_key (64 bytes). Newer installs (post-NetworkKeys migration)
|
||||
@@ -694,14 +551,15 @@ async fn previous_tor_keys(
|
||||
.with_kind(ErrorKind::Database)?;
|
||||
if let Ok(tor_key) = row.try_get::<Vec<u8>, _>("tor_key") {
|
||||
if let Ok(key) = <[u8; 64]>::try_from(tor_key) {
|
||||
keys.insert(("STARTOS".to_owned(), "STARTOS".to_owned()), key);
|
||||
keys.push(("STARTOS".to_owned(), "STARTOS".to_owned(), key));
|
||||
}
|
||||
} else if let Ok(net_key) = row.try_get::<Vec<u8>, _>("network_key") {
|
||||
if let Ok(seed) = <[u8; 32]>::try_from(net_key) {
|
||||
keys.insert(
|
||||
("STARTOS".to_owned(), "STARTOS".to_owned()),
|
||||
keys.push((
|
||||
"STARTOS".to_owned(),
|
||||
"STARTOS".to_owned(),
|
||||
crate::util::crypto::ed25519_expand_key(&seed),
|
||||
);
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -721,17 +579,16 @@ async fn previous_tor_keys(
|
||||
continue;
|
||||
};
|
||||
if let Ok(seed) = <[u8; 32]>::try_from(key_bytes) {
|
||||
keys.insert(
|
||||
(package, interface),
|
||||
keys.push((
|
||||
package,
|
||||
interface,
|
||||
crate::util::crypto::ed25519_expand_key(&seed),
|
||||
);
|
||||
));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Package tor keys from the tor table (already 64-byte expanded keys).
|
||||
// These overwrite network_keys entries for the same (package, interface)
|
||||
// because the tor table has the actual expanded key used by tor.
|
||||
// Package tor keys from the tor table (already 64-byte expanded keys)
|
||||
if let Ok(rows) = sqlx::query(r#"SELECT package, interface, key FROM tor"#)
|
||||
.fetch_all(pg)
|
||||
.await
|
||||
@@ -747,7 +604,7 @@ async fn previous_tor_keys(
|
||||
continue;
|
||||
};
|
||||
if let Ok(key) = <[u8; 64]>::try_from(key_bytes) {
|
||||
keys.insert((package, interface), key);
|
||||
keys.push((package, interface, key));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
use exver::{PreReleaseSegment, VersionRange};
|
||||
|
||||
use super::v0_3_5::V0_3_0_COMPAT;
|
||||
use super::{VersionT, v0_4_0_alpha_22};
|
||||
use crate::prelude::*;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref V0_4_0_alpha_23: exver::Version = exver::Version::new(
|
||||
[0, 4, 0],
|
||||
[PreReleaseSegment::String("alpha".into()), 23.into()]
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default)]
|
||||
pub struct Version;
|
||||
|
||||
impl VersionT for Version {
|
||||
type Previous = v0_4_0_alpha_22::Version;
|
||||
type PreUpRes = ();
|
||||
|
||||
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
|
||||
Ok(())
|
||||
}
|
||||
fn semver(self) -> exver::Version {
|
||||
V0_4_0_alpha_23.clone()
|
||||
}
|
||||
fn compat(self) -> &'static VersionRange {
|
||||
&V0_3_0_COMPAT
|
||||
}
|
||||
#[instrument(skip_all)]
|
||||
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
|
||||
let status_info = db["public"]["serverInfo"]["statusInfo"]
|
||||
.as_object_mut();
|
||||
if let Some(m) = status_info {
|
||||
m.remove("updated");
|
||||
m.insert("restart".into(), Value::Null);
|
||||
}
|
||||
|
||||
Ok(Value::Null)
|
||||
}
|
||||
fn down(self, _db: &mut Value) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,5 @@
|
||||
# Changelog
|
||||
|
||||
## 0.4.0-beta.66 (2026-03-24)
|
||||
|
||||
- **Breaking:** `withPgDump()` replaces `pgdata` with required `mountpoint` + `pgdataPath`
|
||||
- Passwordless/trust auth support for `withPgDump()` and `withMysqlDump()`
|
||||
- New options: `pgOptions` for postgres, `mysqldOptions` for mysql/mariadb
|
||||
- Fixed MariaDB backup/restore support
|
||||
|
||||
## 0.4.0-beta.65 (2026-03-23)
|
||||
|
||||
### Added
|
||||
|
||||
@@ -1,6 +0,0 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type GenerateCertificateParams = {
|
||||
hostnames: Array<string>
|
||||
ed25519: boolean
|
||||
}
|
||||
@@ -1,3 +0,0 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type GenerateCertificateResponse = { key: string; fullchain: string }
|
||||
@@ -15,6 +15,7 @@ import type { VolumeId } from './VolumeId'
|
||||
export type Manifest = {
|
||||
id: PackageId
|
||||
version: Version
|
||||
satisfies: Array<Version>
|
||||
canMigrateTo: string
|
||||
canMigrateFrom: string
|
||||
images: { [key: ImageId]: ImageConfig }
|
||||
@@ -36,5 +37,4 @@ export type Manifest = {
|
||||
sdkVersion: string | null
|
||||
hardwareAcceleration: boolean
|
||||
plugins: Array<PluginId>
|
||||
satisfies: Array<Version>
|
||||
}
|
||||
|
||||
@@ -10,7 +10,6 @@ import type { MerkleArchiveCommitment } from './MerkleArchiveCommitment'
|
||||
import type { PackageId } from './PackageId'
|
||||
import type { PluginId } from './PluginId'
|
||||
import type { RegistryAsset } from './RegistryAsset'
|
||||
import type { Version } from './Version'
|
||||
|
||||
export type PackageVersionInfo = {
|
||||
icon: DataUrl
|
||||
@@ -32,5 +31,4 @@ export type PackageVersionInfo = {
|
||||
sdkVersion: string | null
|
||||
hardwareAcceleration: boolean
|
||||
plugins: Array<PluginId>
|
||||
satisfies: Array<Version>
|
||||
}
|
||||
|
||||
@@ -1,3 +0,0 @@
|
||||
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
|
||||
|
||||
export type RestartReason = 'mdns' | 'language' | 'kiosk' | 'update'
|
||||
@@ -2,12 +2,11 @@
|
||||
import type { BackupProgress } from './BackupProgress'
|
||||
import type { FullProgress } from './FullProgress'
|
||||
import type { PackageId } from './PackageId'
|
||||
import type { RestartReason } from './RestartReason'
|
||||
|
||||
export type ServerStatus = {
|
||||
backupProgress: { [key: PackageId]: BackupProgress } | null
|
||||
updated: boolean
|
||||
updateProgress: FullProgress | null
|
||||
shuttingDown: boolean
|
||||
restarting: boolean
|
||||
restart: RestartReason | null
|
||||
}
|
||||
|
||||
@@ -106,8 +106,6 @@ export { FullProgress } from './FullProgress'
|
||||
export { GatewayId } from './GatewayId'
|
||||
export { GatewayInfo } from './GatewayInfo'
|
||||
export { GatewayType } from './GatewayType'
|
||||
export { GenerateCertificateParams } from './GenerateCertificateParams'
|
||||
export { GenerateCertificateResponse } from './GenerateCertificateResponse'
|
||||
export { GetActionInputParams } from './GetActionInputParams'
|
||||
export { GetContainerIpParams } from './GetContainerIpParams'
|
||||
export { GetHostInfoParams } from './GetHostInfoParams'
|
||||
@@ -236,7 +234,6 @@ export { RenameGatewayParams } from './RenameGatewayParams'
|
||||
export { ReplayId } from './ReplayId'
|
||||
export { RequestCommitment } from './RequestCommitment'
|
||||
export { ResetPasswordParams } from './ResetPasswordParams'
|
||||
export { RestartReason } from './RestartReason'
|
||||
export { RestorePackageParams } from './RestorePackageParams'
|
||||
export { RunActionParams } from './RunActionParams'
|
||||
export { Security } from './Security'
|
||||
|
||||
@@ -1,10 +1,7 @@
|
||||
/**
|
||||
* Performs a deep structural equality check across all provided arguments.
|
||||
* Returns true only if every argument is deeply equal to every other argument.
|
||||
* Handles primitives, arrays, and plain objects (JSON-like) recursively.
|
||||
*
|
||||
* Non-plain objects (Set, Map, Date, etc.) are compared by reference only,
|
||||
* since Object.keys() does not enumerate their contents.
|
||||
* Handles primitives, arrays, and plain objects recursively.
|
||||
*
|
||||
* @param args - Two or more values to compare for deep equality
|
||||
* @returns True if all arguments are deeply equal
|
||||
@@ -26,18 +23,6 @@ export function deepEqual(...args: unknown[]) {
|
||||
}
|
||||
if (objects.length !== args.length) return false
|
||||
if (objects.some(Array.isArray) && !objects.every(Array.isArray)) return false
|
||||
if (
|
||||
objects.some(
|
||||
(x) => !Array.isArray(x) && Object.getPrototypeOf(x) !== Object.prototype,
|
||||
)
|
||||
) {
|
||||
return (
|
||||
objects.reduce<object | null>(
|
||||
(a, b) => (a === b ? a : null),
|
||||
objects[0],
|
||||
) !== null
|
||||
)
|
||||
}
|
||||
const allKeys = new Set(objects.flatMap((x) => Object.keys(x)))
|
||||
for (const key of allKeys) {
|
||||
for (const x of objects) {
|
||||
|
||||
@@ -70,7 +70,7 @@ import { createVolumes } from './util/Volume'
|
||||
import { getDataVersion, setDataVersion } from './version'
|
||||
|
||||
/** The minimum StartOS version required by this SDK release */
|
||||
export const OSVersion = testTypeVersion('0.4.0-alpha.23')
|
||||
export const OSVersion = testTypeVersion('0.4.0-alpha.22')
|
||||
|
||||
// prettier-ignore
|
||||
type AnyNeverCond<T extends any[], Then, Else> =
|
||||
|
||||
@@ -10,10 +10,9 @@ const BACKUP_HOST_PATH = '/media/startos/backup'
|
||||
const BACKUP_CONTAINER_MOUNT = '/backup-target'
|
||||
|
||||
/** A password value, or a function that returns one. Functions are resolved lazily (only during restore). */
|
||||
export type LazyPassword = string | (() => string | Promise<string>) | null
|
||||
export type LazyPassword = string | (() => string | Promise<string>)
|
||||
|
||||
async function resolvePassword(pw: LazyPassword): Promise<string | null> {
|
||||
if (pw === null) return null
|
||||
async function resolvePassword(pw: LazyPassword): Promise<string> {
|
||||
return typeof pw === 'function' ? pw() : pw
|
||||
}
|
||||
|
||||
@@ -23,20 +22,16 @@ export type PgDumpConfig<M extends T.SDKManifest> = {
|
||||
imageId: keyof M['images'] & T.ImageId
|
||||
/** Volume ID containing the PostgreSQL data directory */
|
||||
dbVolume: M['volumes'][number]
|
||||
/** Volume mountpoint (e.g. '/var/lib/postgresql') */
|
||||
mountpoint: string
|
||||
/** Subpath from mountpoint to PGDATA (e.g. '/data', '/18/docker') */
|
||||
pgdataPath: string
|
||||
/** Path to PGDATA within the container (e.g. '/var/lib/postgresql/data') */
|
||||
pgdata: string
|
||||
/** PostgreSQL database name to dump */
|
||||
database: string
|
||||
/** PostgreSQL user */
|
||||
user: string
|
||||
/** PostgreSQL password (for restore). Can be a string, a function that returns one (resolved lazily after volumes are restored), or null for trust auth. */
|
||||
/** PostgreSQL password (for restore). Can be a string or a function that returns one — functions are resolved lazily after volumes are restored. */
|
||||
password: LazyPassword
|
||||
/** Additional initdb arguments (e.g. ['--data-checksums']) */
|
||||
initdbArgs?: string[]
|
||||
/** Additional options passed to `pg_ctl start -o` (e.g. '-c shared_preload_libraries=vectorchord'). Appended after `-c listen_addresses=`. */
|
||||
pgOptions?: string
|
||||
}
|
||||
|
||||
/** Configuration for MySQL/MariaDB dump-based backup */
|
||||
@@ -57,8 +52,6 @@ export type MysqlDumpConfig<M extends T.SDKManifest> = {
|
||||
engine: 'mysql' | 'mariadb'
|
||||
/** Custom readiness check command (default: ['mysqladmin', 'ping', ...]) */
|
||||
readyCommand?: string[]
|
||||
/** Additional options passed to `mysqld` on startup (e.g. '--innodb-buffer-pool-size=256M'). Appended after `--bind-address=127.0.0.1`. */
|
||||
mysqldOptions?: string[]
|
||||
}
|
||||
|
||||
/** Bind-mount the backup target into a SubContainer's rootfs */
|
||||
@@ -161,21 +154,19 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
const {
|
||||
imageId,
|
||||
dbVolume,
|
||||
mountpoint,
|
||||
pgdataPath,
|
||||
pgdata,
|
||||
database,
|
||||
user,
|
||||
password,
|
||||
initdbArgs = [],
|
||||
pgOptions,
|
||||
} = config
|
||||
const pgdata = `${mountpoint}${pgdataPath}`
|
||||
const dumpFile = `${BACKUP_CONTAINER_MOUNT}/${database}-db.dump`
|
||||
const pgMountpoint = pgdata.replace(/\/data$/, '') || pgdata
|
||||
|
||||
function dbMounts() {
|
||||
return Mounts.of<M>().mountVolume({
|
||||
volumeId: dbVolume,
|
||||
mountpoint: mountpoint,
|
||||
mountpoint: pgMountpoint,
|
||||
readonly: false,
|
||||
subpath: null,
|
||||
})
|
||||
@@ -202,12 +193,10 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
user: 'root',
|
||||
})
|
||||
console.log(`[${label}] starting postgres`)
|
||||
const pgStartOpts = pgOptions
|
||||
? `-c listen_addresses= ${pgOptions}`
|
||||
: '-c listen_addresses='
|
||||
await sub.execFail(['pg_ctl', 'start', '-D', pgdata, '-o', pgStartOpts], {
|
||||
user: 'postgres',
|
||||
})
|
||||
await sub.execFail(
|
||||
['pg_ctl', 'start', '-D', pgdata, '-o', '-c listen_addresses='],
|
||||
{ user: 'postgres' },
|
||||
)
|
||||
for (let i = 0; i < 60; i++) {
|
||||
const { exitCode } = await sub.exec(['pg_isready', '-U', user], {
|
||||
user: 'postgres',
|
||||
@@ -260,7 +249,7 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
async (sub) => {
|
||||
await mountBackupTarget(sub.rootfs)
|
||||
await sub.execFail(
|
||||
['chown', '-R', 'postgres:postgres', mountpoint],
|
||||
['chown', '-R', 'postgres:postgres', pgMountpoint],
|
||||
{ user: 'root' },
|
||||
)
|
||||
await sub.execFail(
|
||||
@@ -285,20 +274,18 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
{ user: 'postgres' },
|
||||
null,
|
||||
)
|
||||
if (resolvedPassword !== null) {
|
||||
await sub.execFail(
|
||||
[
|
||||
'psql',
|
||||
'-U',
|
||||
user,
|
||||
'-d',
|
||||
database,
|
||||
'-c',
|
||||
`ALTER USER ${user} WITH PASSWORD '${resolvedPassword}'`,
|
||||
],
|
||||
{ user: 'postgres' },
|
||||
)
|
||||
}
|
||||
await sub.execFail(
|
||||
[
|
||||
'psql',
|
||||
'-U',
|
||||
user,
|
||||
'-d',
|
||||
database,
|
||||
'-c',
|
||||
`ALTER USER ${user} WITH PASSWORD '${resolvedPassword}'`,
|
||||
],
|
||||
{ user: 'postgres' },
|
||||
)
|
||||
await sub.execFail(['pg_ctl', 'stop', '-D', pgdata, '-w'], {
|
||||
user: 'postgres',
|
||||
})
|
||||
@@ -331,7 +318,6 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
password,
|
||||
engine,
|
||||
readyCommand,
|
||||
mysqldOptions = [],
|
||||
} = config
|
||||
const dumpFile = `${BACKUP_CONTAINER_MOUNT}/${database}-db.dump`
|
||||
|
||||
@@ -356,42 +342,6 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
throw new Error('MySQL/MariaDB failed to become ready within 30 seconds')
|
||||
}
|
||||
|
||||
async function startMysql(sub: {
|
||||
exec(cmd: string[], opts?: any): Promise<{ exitCode: number | null }>
|
||||
execFail(cmd: string[], opts?: any, timeout?: number | null): Promise<any>
|
||||
}) {
|
||||
if (engine === 'mariadb') {
|
||||
// MariaDB doesn't support --daemonize; fire-and-forget the exec
|
||||
sub
|
||||
.exec(
|
||||
[
|
||||
'mysqld',
|
||||
'--user=mysql',
|
||||
`--datadir=${datadir}`,
|
||||
'--bind-address=127.0.0.1',
|
||||
...mysqldOptions,
|
||||
],
|
||||
{ user: 'root' },
|
||||
)
|
||||
.catch((e) =>
|
||||
console.error('[mysql-backup] mysqld exited unexpectedly:', e),
|
||||
)
|
||||
} else {
|
||||
await sub.execFail(
|
||||
[
|
||||
'mysqld',
|
||||
'--user=mysql',
|
||||
`--datadir=${datadir}`,
|
||||
'--bind-address=127.0.0.1',
|
||||
'--daemonize',
|
||||
...mysqldOptions,
|
||||
],
|
||||
{ user: 'root' },
|
||||
null,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return new Backups<M>()
|
||||
.setPreBackup(async (effects) => {
|
||||
const pw = await resolvePassword(password)
|
||||
@@ -400,7 +350,7 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
'ping',
|
||||
'-u',
|
||||
user,
|
||||
...(pw !== null ? [`-p${pw}`] : []),
|
||||
`-p${pw}`,
|
||||
'--silent',
|
||||
]
|
||||
await SubContainerRc.withTemp<M, void, BackupEffects>(
|
||||
@@ -421,14 +371,24 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
user: 'root',
|
||||
})
|
||||
}
|
||||
await startMysql(sub)
|
||||
await sub.execFail(
|
||||
[
|
||||
'mysqld',
|
||||
'--user=mysql',
|
||||
`--datadir=${datadir}`,
|
||||
'--skip-networking',
|
||||
'--daemonize',
|
||||
],
|
||||
{ user: 'root' },
|
||||
null,
|
||||
)
|
||||
await waitForMysql(sub, readyCmd)
|
||||
await sub.execFail(
|
||||
[
|
||||
'mysqldump',
|
||||
'-u',
|
||||
user,
|
||||
...(pw !== null ? [`-p${pw}`] : []),
|
||||
`-p${pw}`,
|
||||
'--single-transaction',
|
||||
`--result-file=${dumpFile}`,
|
||||
database,
|
||||
@@ -436,15 +396,9 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
{ user: 'root' },
|
||||
null,
|
||||
)
|
||||
// Graceful shutdown via SIGTERM; wait for exit
|
||||
await sub.execFail(
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
'PID=$(cat /var/run/mysqld/mysqld.pid) && kill $PID && tail --pid=$PID -f /dev/null',
|
||||
],
|
||||
['mysqladmin', '-u', user, `-p${pw}`, 'shutdown'],
|
||||
{ user: 'root' },
|
||||
null,
|
||||
)
|
||||
},
|
||||
)
|
||||
@@ -481,7 +435,17 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
{ user: 'root' },
|
||||
)
|
||||
}
|
||||
await startMysql(sub)
|
||||
await sub.execFail(
|
||||
[
|
||||
'mysqld',
|
||||
'--user=mysql',
|
||||
`--datadir=${datadir}`,
|
||||
'--skip-networking',
|
||||
'--daemonize',
|
||||
],
|
||||
{ user: 'root' },
|
||||
null,
|
||||
)
|
||||
// After fresh init, root has no password
|
||||
await waitForMysql(sub, [
|
||||
'mysqladmin',
|
||||
@@ -491,32 +455,29 @@ export class Backups<M extends T.SDKManifest> implements InitScript {
|
||||
'--silent',
|
||||
])
|
||||
// Create database, user, and set password
|
||||
const grantSql =
|
||||
pw !== null
|
||||
? `CREATE DATABASE IF NOT EXISTS \`${database}\`; CREATE USER IF NOT EXISTS '${user}'@'localhost' IDENTIFIED BY '${pw}'; GRANT ALL ON \`${database}\`.* TO '${user}'@'localhost'; ALTER USER 'root'@'localhost' IDENTIFIED BY '${pw}'; FLUSH PRIVILEGES;`
|
||||
: `CREATE DATABASE IF NOT EXISTS \`${database}\`; CREATE USER IF NOT EXISTS '${user}'@'localhost'; GRANT ALL ON \`${database}\`.* TO '${user}'@'localhost'; FLUSH PRIVILEGES;`
|
||||
await sub.execFail(['mysql', '-u', 'root', '-e', grantSql], {
|
||||
user: 'root',
|
||||
})
|
||||
await sub.execFail(
|
||||
[
|
||||
'mysql',
|
||||
'-u',
|
||||
'root',
|
||||
'-e',
|
||||
`CREATE DATABASE IF NOT EXISTS \`${database}\`; CREATE USER IF NOT EXISTS '${user}'@'localhost' IDENTIFIED BY '${pw}'; GRANT ALL ON \`${database}\`.* TO '${user}'@'localhost'; ALTER USER 'root'@'localhost' IDENTIFIED BY '${pw}'; FLUSH PRIVILEGES;`,
|
||||
],
|
||||
{ user: 'root' },
|
||||
)
|
||||
// Restore from dump
|
||||
await sub.execFail(
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
`mysql -u root ${pw !== null ? `-p'${pw}'` : ''} ${database} < ${dumpFile}`,
|
||||
`mysql -u root -p'${pw}' \`${database}\` < ${dumpFile}`,
|
||||
],
|
||||
{ user: 'root' },
|
||||
null,
|
||||
)
|
||||
// Graceful shutdown via SIGTERM; wait for exit
|
||||
await sub.execFail(
|
||||
[
|
||||
'sh',
|
||||
'-c',
|
||||
'PID=$(cat /var/run/mysqld/mysqld.pid) && kill $PID && tail --pid=$PID -f /dev/null',
|
||||
],
|
||||
['mysqladmin', '-u', 'root', `-p${password}`, 'shutdown'],
|
||||
{ user: 'root' },
|
||||
null,
|
||||
)
|
||||
},
|
||||
)
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
import { z } from 'zod'
|
||||
import * as YAML from 'yaml'
|
||||
import * as TOML from '@iarna/toml'
|
||||
import * as INI from 'ini'
|
||||
import {
|
||||
XMLBuilder,
|
||||
XMLParser,
|
||||
XMLBuilder,
|
||||
type X2jOptions,
|
||||
type XmlBuilderOptions,
|
||||
} from 'fast-xml-parser'
|
||||
import * as INI from 'ini'
|
||||
import * as fs from 'node:fs/promises'
|
||||
import * as YAML from 'yaml'
|
||||
import { z } from 'zod'
|
||||
import * as T from '../../../base/lib/types'
|
||||
import * as fs from 'node:fs/promises'
|
||||
import { asError, deepEqual } from '../../../base/lib/util'
|
||||
import { Watchable } from '../../../base/lib/util/Watchable'
|
||||
import { PathBase } from './Volume'
|
||||
@@ -382,7 +382,7 @@ export class FileHelper<A> {
|
||||
const mergeData = this.validate(fileMerge({}, fileData, data))
|
||||
const toWrite = this.writeData(mergeData)
|
||||
if (toWrite !== fileDataRaw) {
|
||||
await this.writeFile(mergeData)
|
||||
this.writeFile(mergeData)
|
||||
if (!options.allowWriteAfterConst && effects.constRetry) {
|
||||
const records = this.consts.filter(([c]) => c === effects.constRetry)
|
||||
for (const record of records) {
|
||||
|
||||
4
sdk/package/package-lock.json
generated
4
sdk/package/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "@start9labs/start-sdk",
|
||||
"version": "0.4.0-beta.66",
|
||||
"version": "0.4.0-beta.65",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "@start9labs/start-sdk",
|
||||
"version": "0.4.0-beta.66",
|
||||
"version": "0.4.0-beta.65",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^3.0.0",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "@start9labs/start-sdk",
|
||||
"version": "0.4.0-beta.66",
|
||||
"version": "0.4.0-beta.65",
|
||||
"description": "Software development kit to facilitate packaging services for StartOS",
|
||||
"main": "./package/lib/index.js",
|
||||
"types": "./package/lib/index.d.ts",
|
||||
|
||||
4
web/package-lock.json
generated
4
web/package-lock.json
generated
@@ -1,12 +1,12 @@
|
||||
{
|
||||
"name": "startos-ui",
|
||||
"version": "0.4.0-alpha.23",
|
||||
"version": "0.4.0-alpha.22",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {
|
||||
"": {
|
||||
"name": "startos-ui",
|
||||
"version": "0.4.0-alpha.23",
|
||||
"version": "0.4.0-alpha.22",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@angular/cdk": "^21.2.1",
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"name": "startos-ui",
|
||||
"version": "0.4.0-alpha.23",
|
||||
"version": "0.4.0-alpha.22",
|
||||
"author": "Start9 Labs, Inc",
|
||||
"homepage": "https://start9.com/",
|
||||
"license": "MIT",
|
||||
|
||||
@@ -3,7 +3,6 @@ import { FormsModule } from '@angular/forms'
|
||||
import { i18nPipe } from '@start9labs/shared'
|
||||
import {
|
||||
TuiButton,
|
||||
TuiCell,
|
||||
TuiCheckbox,
|
||||
TuiDialogContext,
|
||||
TuiNotification,
|
||||
@@ -20,7 +19,6 @@ export interface PreserveOverwriteData {
|
||||
imports: [
|
||||
FormsModule,
|
||||
TuiButton,
|
||||
TuiCell,
|
||||
TuiCheckbox,
|
||||
TuiHeader,
|
||||
TuiNotification,
|
||||
@@ -51,9 +49,9 @@ export interface PreserveOverwriteData {
|
||||
| i18n
|
||||
}}
|
||||
</p>
|
||||
<label tuiCell>
|
||||
<label>
|
||||
<input tuiCheckbox type="checkbox" [(ngModel)]="backupAck" />
|
||||
<span tuiTitle>{{ 'I have a backup of my data' | i18n }}</span>
|
||||
{{ 'I have a backup of my data' | i18n }}
|
||||
</label>
|
||||
}
|
||||
<footer>
|
||||
|
||||
@@ -4,16 +4,8 @@ import {
|
||||
HostListener,
|
||||
inject,
|
||||
} from '@angular/core'
|
||||
import {
|
||||
AbstractControl,
|
||||
FormControl,
|
||||
FormGroup,
|
||||
ReactiveFormsModule,
|
||||
ValidatorFn,
|
||||
Validators,
|
||||
} from '@angular/forms'
|
||||
import { Router } from '@angular/router'
|
||||
import { WA_IS_MOBILE } from '@ng-web-apis/platform'
|
||||
import { FormsModule } from '@angular/forms'
|
||||
import {
|
||||
DialogService,
|
||||
DiskInfo,
|
||||
@@ -22,14 +14,13 @@ import {
|
||||
i18nPipe,
|
||||
toGuid,
|
||||
} from '@start9labs/shared'
|
||||
import { TuiMapperPipe, TuiValidator } from '@taiga-ui/cdk'
|
||||
import { WA_IS_MOBILE } from '@ng-web-apis/platform'
|
||||
import {
|
||||
TuiButton,
|
||||
TuiError,
|
||||
TuiIcon,
|
||||
TuiLoader,
|
||||
TuiInput,
|
||||
TuiNotification,
|
||||
TUI_VALIDATION_ERRORS,
|
||||
TuiTitle,
|
||||
} from '@taiga-ui/core'
|
||||
import {
|
||||
@@ -38,55 +29,49 @@ import {
|
||||
TuiSelect,
|
||||
TuiTooltip,
|
||||
} from '@taiga-ui/kit'
|
||||
import { TuiCardLarge, TuiForm, TuiHeader } from '@taiga-ui/layout'
|
||||
import { distinctUntilChanged, filter, Subscription } from 'rxjs'
|
||||
import { PRESERVE_OVERWRITE } from '../components/preserve-overwrite.dialog'
|
||||
import { TuiCardLarge, TuiHeader } from '@taiga-ui/layout'
|
||||
import { PolymorpheusComponent } from '@taiga-ui/polymorpheus'
|
||||
import { filter, Subscription } from 'rxjs'
|
||||
import { ApiService } from '../services/api.service'
|
||||
import { StateService } from '../services/state.service'
|
||||
import { PRESERVE_OVERWRITE } from '../components/preserve-overwrite.dialog'
|
||||
|
||||
@Component({
|
||||
template: `
|
||||
@if (!shuttingDown) {
|
||||
@if (loading) {
|
||||
<section tuiCardLarge="compact">
|
||||
<header tuiHeader>
|
||||
<h2 tuiTitle>{{ 'Select Drives' | i18n }}</h2>
|
||||
</header>
|
||||
<section tuiCardLarge="compact">
|
||||
<header tuiHeader>
|
||||
<h2 tuiTitle>{{ 'Select Drives' | i18n }}</h2>
|
||||
</header>
|
||||
|
||||
@if (loading) {
|
||||
<tui-loader />
|
||||
</section>
|
||||
} @else if (drives.length === 0) {
|
||||
<section tuiCardLarge="compact">
|
||||
<header tuiHeader>
|
||||
<h2 tuiTitle>{{ 'Select Drives' | i18n }}</h2>
|
||||
</header>
|
||||
} @else if (drives.length === 0) {
|
||||
<p tuiNotification size="m" appearance="warning">
|
||||
{{
|
||||
'No drives found. Please connect a drive and click Refresh.'
|
||||
| i18n
|
||||
}}
|
||||
</p>
|
||||
<footer>
|
||||
<button tuiButton appearance="secondary" (click)="refresh()">
|
||||
{{ 'Refresh' | i18n }}
|
||||
</button>
|
||||
</footer>
|
||||
</section>
|
||||
} @else {
|
||||
<form tuiCardLarge="compact" tuiForm [formGroup]="form">
|
||||
<header tuiHeader>
|
||||
<h2 tuiTitle>{{ 'Select Drives' | i18n }}</h2>
|
||||
</header>
|
||||
|
||||
<tui-textfield [stringify]="stringify">
|
||||
} @else {
|
||||
<tui-textfield
|
||||
[stringify]="stringify"
|
||||
[disabledItemHandler]="osDisabled"
|
||||
>
|
||||
<label tuiLabel>{{ 'OS Drive' | i18n }}</label>
|
||||
@if (mobile) {
|
||||
<select
|
||||
tuiSelect
|
||||
formControlName="osDrive"
|
||||
[ngModel]="selectedOsDrive"
|
||||
(ngModelChange)="onOsDriveChange($event)"
|
||||
[items]="drives"
|
||||
></select>
|
||||
} @else {
|
||||
<input tuiSelect formControlName="osDrive" />
|
||||
<input
|
||||
tuiSelect
|
||||
[ngModel]="selectedOsDrive"
|
||||
(ngModelChange)="onOsDriveChange($event)"
|
||||
/>
|
||||
}
|
||||
@if (!mobile) {
|
||||
<tui-data-list-wrapper
|
||||
@@ -97,28 +82,24 @@ import { StateService } from '../services/state.service'
|
||||
}
|
||||
<tui-icon [tuiTooltip]="osDriveTooltip" />
|
||||
</tui-textfield>
|
||||
@if (form.controls.osDrive.touched && form.controls.osDrive.invalid) {
|
||||
<tui-error formControlName="osDrive" />
|
||||
}
|
||||
|
||||
<tui-textfield [stringify]="stringify">
|
||||
<tui-textfield
|
||||
[stringify]="stringify"
|
||||
[disabledItemHandler]="dataDisabled"
|
||||
>
|
||||
<label tuiLabel>{{ 'Data Drive' | i18n }}</label>
|
||||
@if (mobile) {
|
||||
<select
|
||||
tuiSelect
|
||||
formControlName="dataDrive"
|
||||
[(ngModel)]="selectedDataDrive"
|
||||
(ngModelChange)="onDataDriveChange($event)"
|
||||
[items]="drives"
|
||||
[tuiValidator]="
|
||||
form.controls.osDrive.value | tuiMapper: dataValidator
|
||||
"
|
||||
></select>
|
||||
} @else {
|
||||
<input
|
||||
tuiSelect
|
||||
formControlName="dataDrive"
|
||||
[tuiValidator]="
|
||||
form.controls.osDrive.value | tuiMapper: dataValidator
|
||||
"
|
||||
[(ngModel)]="selectedDataDrive"
|
||||
(ngModelChange)="onDataDriveChange($event)"
|
||||
/>
|
||||
}
|
||||
@if (!mobile) {
|
||||
@@ -136,11 +117,6 @@ import { StateService } from '../services/state.service'
|
||||
}
|
||||
<tui-icon [tuiTooltip]="dataDriveTooltip" />
|
||||
</tui-textfield>
|
||||
@if (
|
||||
form.controls.dataDrive.touched && form.controls.dataDrive.invalid
|
||||
) {
|
||||
<tui-error formControlName="dataDrive" />
|
||||
}
|
||||
|
||||
<ng-template #driveContent let-drive>
|
||||
<span tuiTitle>
|
||||
@@ -150,14 +126,24 @@ import { StateService } from '../services/state.service'
|
||||
</span>
|
||||
</span>
|
||||
</ng-template>
|
||||
}
|
||||
|
||||
<footer>
|
||||
<button tuiButton [disabled]="form.invalid" (click)="continue()">
|
||||
<footer>
|
||||
@if (drives.length === 0) {
|
||||
<button tuiButton appearance="secondary" (click)="refresh()">
|
||||
{{ 'Refresh' | i18n }}
|
||||
</button>
|
||||
} @else {
|
||||
<button
|
||||
tuiButton
|
||||
[disabled]="!selectedOsDrive || !selectedDataDrive"
|
||||
(click)="continue()"
|
||||
>
|
||||
{{ 'Continue' | i18n }}
|
||||
</button>
|
||||
</footer>
|
||||
</form>
|
||||
}
|
||||
}
|
||||
</footer>
|
||||
</section>
|
||||
}
|
||||
`,
|
||||
styles: `
|
||||
@@ -166,34 +152,20 @@ import { StateService } from '../services/state.service'
|
||||
}
|
||||
`,
|
||||
imports: [
|
||||
ReactiveFormsModule,
|
||||
FormsModule,
|
||||
TuiCardLarge,
|
||||
TuiForm,
|
||||
TuiButton,
|
||||
TuiError,
|
||||
TuiIcon,
|
||||
TuiLoader,
|
||||
TuiInput,
|
||||
TuiNotification,
|
||||
TuiSelect,
|
||||
TuiDataListWrapper,
|
||||
TuiTooltip,
|
||||
TuiValidator,
|
||||
TuiMapperPipe,
|
||||
TuiHeader,
|
||||
TuiTitle,
|
||||
i18nPipe,
|
||||
],
|
||||
providers: [
|
||||
{
|
||||
provide: TUI_VALIDATION_ERRORS,
|
||||
useFactory: () => {
|
||||
const i18n = inject(i18nPipe)
|
||||
return {
|
||||
required: i18n.transform('Required'),
|
||||
}
|
||||
},
|
||||
},
|
||||
],
|
||||
})
|
||||
export default class DrivesPage {
|
||||
private readonly api = inject(ApiService)
|
||||
@@ -216,63 +188,29 @@ export default class DrivesPage {
|
||||
}
|
||||
|
||||
readonly osDriveTooltip = this.i18n.transform(
|
||||
'The drive where the StartOS operating system will be installed. Minimum 18 GB.',
|
||||
'The drive where the StartOS operating system will be installed.',
|
||||
)
|
||||
readonly dataDriveTooltip = this.i18n.transform(
|
||||
'The drive where your StartOS data (services, settings, etc.) will be stored. This can be the same as the OS drive or a separate drive. Minimum 20 GB, or 38 GB if using a single drive for both OS and data.',
|
||||
'The drive where your StartOS data (services, settings, etc.) will be stored. This can be the same as the OS drive or a separate drive.',
|
||||
)
|
||||
|
||||
private readonly MIN_OS = 18 * 2 ** 30 // 18 GiB
|
||||
private readonly MIN_DATA = 20 * 2 ** 30 // 20 GiB
|
||||
private readonly MIN_BOTH = 38 * 2 ** 30 // 38 GiB
|
||||
|
||||
private readonly osCapacityValidator: ValidatorFn = ({
|
||||
value,
|
||||
}: AbstractControl) => {
|
||||
if (!value) return null
|
||||
return value.capacity < this.MIN_OS
|
||||
? {
|
||||
tooSmallOs: this.i18n.transform('OS drive must be at least 18 GB'),
|
||||
}
|
||||
: null
|
||||
}
|
||||
|
||||
readonly form = new FormGroup({
|
||||
osDrive: new FormControl<DiskInfo | null>(null, [
|
||||
Validators.required,
|
||||
this.osCapacityValidator,
|
||||
]),
|
||||
dataDrive: new FormControl<DiskInfo | null>(null, [Validators.required]),
|
||||
})
|
||||
|
||||
readonly dataValidator =
|
||||
(osDrive: DiskInfo | null): ValidatorFn =>
|
||||
({ value }: AbstractControl) => {
|
||||
if (!value) return null
|
||||
const sameAsOs = osDrive && value.logicalname === osDrive.logicalname
|
||||
const min = sameAsOs ? this.MIN_BOTH : this.MIN_DATA
|
||||
if (value.capacity < min) {
|
||||
return sameAsOs
|
||||
? {
|
||||
tooSmallBoth: this.i18n.transform(
|
||||
'OS + data combined require at least 38 GB',
|
||||
),
|
||||
}
|
||||
: {
|
||||
tooSmallData: this.i18n.transform(
|
||||
'Data drive must be at least 20 GB',
|
||||
),
|
||||
}
|
||||
}
|
||||
return null
|
||||
}
|
||||
|
||||
drives: DiskInfo[] = []
|
||||
loading = true
|
||||
shuttingDown = false
|
||||
private dialogSub?: Subscription
|
||||
selectedOsDrive: DiskInfo | null = null
|
||||
selectedDataDrive: DiskInfo | null = null
|
||||
preserveData: boolean | null = null
|
||||
|
||||
readonly osDisabled = (drive: DiskInfo): boolean =>
|
||||
drive.capacity < this.MIN_OS
|
||||
|
||||
dataDisabled = (drive: DiskInfo): boolean => drive.capacity < this.MIN_DATA
|
||||
|
||||
readonly driveName = (drive: DiskInfo): string =>
|
||||
[drive.vendor, drive.model].filter(Boolean).join(' ') ||
|
||||
this.i18n.transform('Unknown Drive')
|
||||
@@ -290,40 +228,51 @@ export default class DrivesPage {
|
||||
|
||||
async ngOnInit() {
|
||||
await this.loadDrives()
|
||||
|
||||
this.form.controls.osDrive.valueChanges.subscribe(drive => {
|
||||
if (drive) {
|
||||
this.form.controls.osDrive.markAsTouched()
|
||||
}
|
||||
})
|
||||
|
||||
this.form.controls.dataDrive.valueChanges
|
||||
.pipe(distinctUntilChanged())
|
||||
.subscribe(drive => {
|
||||
this.preserveData = null
|
||||
if (drive) {
|
||||
this.form.controls.dataDrive.markAsTouched()
|
||||
if (toGuid(drive)) {
|
||||
this.showPreserveOverwriteDialog()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
async refresh() {
|
||||
this.loading = true
|
||||
this.form.reset()
|
||||
this.selectedOsDrive = null
|
||||
this.selectedDataDrive = null
|
||||
this.preserveData = null
|
||||
await this.loadDrives()
|
||||
}
|
||||
|
||||
continue() {
|
||||
const osDrive = this.form.controls.osDrive.value
|
||||
const dataDrive = this.form.controls.dataDrive.value
|
||||
if (!osDrive || !dataDrive) return
|
||||
onOsDriveChange(osDrive: DiskInfo | null) {
|
||||
this.selectedOsDrive = osDrive
|
||||
this.dataDisabled = (drive: DiskInfo) => {
|
||||
if (osDrive && drive.logicalname === osDrive.logicalname) {
|
||||
return drive.capacity < this.MIN_BOTH
|
||||
}
|
||||
return drive.capacity < this.MIN_DATA
|
||||
}
|
||||
|
||||
const sameDevice = osDrive.logicalname === dataDrive.logicalname
|
||||
const dataHasStartOS = !!toGuid(dataDrive)
|
||||
// Clear data drive if it's now invalid
|
||||
if (this.selectedDataDrive && this.dataDisabled(this.selectedDataDrive)) {
|
||||
this.selectedDataDrive = null
|
||||
this.preserveData = null
|
||||
}
|
||||
}
|
||||
|
||||
onDataDriveChange(drive: DiskInfo | null) {
|
||||
this.preserveData = null
|
||||
|
||||
if (!drive) {
|
||||
return
|
||||
}
|
||||
|
||||
const hasStartOSData = !!toGuid(drive)
|
||||
if (hasStartOSData) {
|
||||
this.showPreserveOverwriteDialog()
|
||||
}
|
||||
}
|
||||
|
||||
continue() {
|
||||
if (!this.selectedOsDrive || !this.selectedDataDrive) return
|
||||
|
||||
const sameDevice =
|
||||
this.selectedOsDrive.logicalname === this.selectedDataDrive.logicalname
|
||||
const dataHasStartOS = !!toGuid(this.selectedDataDrive)
|
||||
|
||||
// Scenario 1: Same drive, has StartOS data, preserving → no warning
|
||||
if (sameDevice && dataHasStartOS && this.preserveData) {
|
||||
@@ -343,7 +292,7 @@ export default class DrivesPage {
|
||||
|
||||
private showPreserveOverwriteDialog() {
|
||||
let selectionMade = false
|
||||
const drive = this.form.controls.dataDrive.value
|
||||
const drive = this.selectedDataDrive
|
||||
const filesystem =
|
||||
drive?.filesystem ||
|
||||
drive?.partitions.find(p => p.guid)?.filesystem ||
|
||||
@@ -355,20 +304,20 @@ export default class DrivesPage {
|
||||
data: { isExt4 },
|
||||
})
|
||||
.subscribe({
|
||||
next: preserve => {
|
||||
selectionMade = true
|
||||
this.preserveData = preserve
|
||||
next: preserve => {
|
||||
selectionMade = true
|
||||
this.preserveData = preserve
|
||||
this.cdr.markForCheck()
|
||||
},
|
||||
complete: () => {
|
||||
if (!selectionMade) {
|
||||
// Dialog was dismissed without selection - clear the data drive
|
||||
this.selectedDataDrive = null
|
||||
this.preserveData = null
|
||||
this.cdr.markForCheck()
|
||||
},
|
||||
complete: () => {
|
||||
if (!selectionMade) {
|
||||
// Dialog was dismissed without selection - clear the data drive
|
||||
this.form.controls.dataDrive.reset()
|
||||
this.preserveData = null
|
||||
this.cdr.markForCheck()
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
private showOsDriveWarning() {
|
||||
@@ -411,15 +360,13 @@ export default class DrivesPage {
|
||||
}
|
||||
|
||||
private async installOs(wipe: boolean) {
|
||||
const osDrive = this.form.controls.osDrive.value!
|
||||
const dataDrive = this.form.controls.dataDrive.value!
|
||||
const loader = this.loader.open('Installing StartOS').subscribe()
|
||||
|
||||
try {
|
||||
const result = await this.api.installOs({
|
||||
osDrive: osDrive.logicalname,
|
||||
osDrive: this.selectedOsDrive!.logicalname,
|
||||
dataDrive: {
|
||||
logicalname: dataDrive.logicalname,
|
||||
logicalname: this.selectedDataDrive!.logicalname,
|
||||
wipe,
|
||||
},
|
||||
})
|
||||
|
||||
@@ -104,7 +104,15 @@ import { StateService } from '../services/state.service'
|
||||
{{ 'Skip' | i18n }}
|
||||
</button>
|
||||
}
|
||||
<button tuiButton size="m" [disabled]="form.invalid">
|
||||
<button
|
||||
tuiButton
|
||||
size="m"
|
||||
[disabled]="
|
||||
isFresh
|
||||
? form.invalid
|
||||
: form.controls.password.value && form.invalid
|
||||
"
|
||||
>
|
||||
{{ 'Finish' | i18n }}
|
||||
</button>
|
||||
</footer>
|
||||
|
||||
@@ -485,6 +485,7 @@ export default {
|
||||
512: 'Der Kiosk-Modus ist auf diesem Gerät nicht verfügbar',
|
||||
513: 'Aktivieren',
|
||||
514: 'Deaktivieren',
|
||||
515: 'Diese Änderung wird nach dem nächsten Neustart wirksam',
|
||||
516: 'Empfohlen',
|
||||
517: 'Möchten Sie diese Aufgabe wirklich verwerfen?',
|
||||
518: 'Verwerfen',
|
||||
@@ -628,8 +629,8 @@ export default {
|
||||
697: 'Geben Sie das Passwort ein, das zum Verschlüsseln dieses Backups verwendet wurde.',
|
||||
698: 'Mehrere Backups gefunden. Wählen Sie aus, welches wiederhergestellt werden soll.',
|
||||
699: 'Backups',
|
||||
700: 'Das Laufwerk, auf dem das StartOS-Betriebssystem installiert wird. Mindestens 18 GB.',
|
||||
701: 'Das Laufwerk, auf dem Ihre StartOS-Daten (Dienste, Einstellungen usw.) gespeichert werden. Dies kann dasselbe wie das OS-Laufwerk oder ein separates Laufwerk sein. Mindestens 20 GB, oder 38 GB bei Verwendung eines einzelnen Laufwerks für OS und Daten.',
|
||||
700: 'Das Laufwerk, auf dem das StartOS-Betriebssystem installiert wird.',
|
||||
701: 'Das Laufwerk, auf dem Ihre StartOS-Daten (Dienste, Einstellungen usw.) gespeichert werden. Dies kann dasselbe wie das OS-Laufwerk oder ein separates Laufwerk sein.',
|
||||
702: 'Versuchen Sie nach der Datenübertragung von diesem Laufwerk nicht, erneut als Start9-Server davon zu booten. Dies kann zu Fehlfunktionen von Diensten, Datenbeschädigung oder Geldverlust führen.',
|
||||
703: 'Muss mindestens 12 Zeichen lang sein',
|
||||
704: 'Darf höchstens 64 Zeichen lang sein',
|
||||
@@ -679,7 +680,7 @@ export default {
|
||||
755: 'Schnittstelle(n)',
|
||||
756: 'Keine Portweiterleitungsregeln',
|
||||
757: 'Portweiterleitungsregeln am Gateway erforderlich',
|
||||
763: 'Sie sind derzeit über Ihre .local-Adresse verbunden. Das Ändern des Hostnamens erfordert einen Wechsel zur neuen .local-Adresse. Ein Neustart des Servers ist ebenfalls erforderlich.',
|
||||
763: 'Sie sind derzeit über Ihre .local-Adresse verbunden. Das Ändern des Hostnamens erfordert einen Wechsel zur neuen .local-Adresse.',
|
||||
764: 'Hostname geändert',
|
||||
765: 'Neue Adresse öffnen',
|
||||
766: 'Ihr Server ist jetzt erreichbar unter',
|
||||
@@ -716,15 +717,9 @@ export default {
|
||||
799: 'Nach Klick auf "Enroll MOK":',
|
||||
800: 'Geben Sie bei Aufforderung Ihr StartOS-Passwort ein',
|
||||
801: 'Ihr System hat Secure Boot aktiviert, was erfordert, dass alle Kernel-Module mit einem vertrauenswürdigen Schlüssel signiert sind. Einige Hardware-Treiber \u2014 wie die für NVIDIA-GPUs \u2014 sind nicht mit dem Standard-Distributionsschlüssel signiert. Die Registrierung des StartOS-Signaturschlüssels ermöglicht es Ihrer Firmware, diesen Modulen zu vertrauen, damit Ihre Hardware vollständig genutzt werden kann.',
|
||||
802: 'Die Übersetzungen auf Betriebssystemebene sind bereits aktiv. Ein Neustart ist erforderlich, damit die Übersetzungen auf Dienstebene wirksam werden.',
|
||||
803: 'Dieses Laufwerk verwendet ext4 und wird automatisch in btrfs konvertiert. Ein Backup wird dringend empfohlen, bevor Sie fortfahren.',
|
||||
804: 'Ich habe ein Backup meiner Daten',
|
||||
805: 'Öffentliche Domain hinzufügen',
|
||||
806: 'Ergebnis',
|
||||
807: 'Download abgeschlossen. Neustart zum Anwenden.',
|
||||
808: 'Hostname geändert, Neustart damit installierte Dienste die neue Adresse verwenden',
|
||||
809: 'Sprache geändert, Neustart damit installierte Dienste die neue Sprache verwenden',
|
||||
810: 'Kioskmodus geändert, Neustart zum Anwenden',
|
||||
811: 'OS-Laufwerk muss mindestens 18 GB groß sein',
|
||||
812: 'Datenlaufwerk muss mindestens 20 GB groß sein',
|
||||
813: 'OS + Daten zusammen erfordern mindestens 38 GB',
|
||||
} satisfies i18n
|
||||
|
||||
@@ -484,6 +484,7 @@ export const ENGLISH: Record<string, number> = {
|
||||
'Kiosk Mode is unavailable on this device': 512,
|
||||
'Enable': 513,
|
||||
'Disable': 514,
|
||||
'This change will take effect after the next boot': 515,
|
||||
'Recommended': 516, // as in, we recommend this
|
||||
'Are you sure you want to dismiss this task?': 517,
|
||||
'Dismiss': 518, // as in, dismiss or delete a task
|
||||
@@ -628,8 +629,8 @@ export const ENGLISH: Record<string, number> = {
|
||||
'Enter the password that was used to encrypt this backup.': 697,
|
||||
'Multiple backups found. Select which one to restore.': 698,
|
||||
'Backups': 699,
|
||||
'The drive where the StartOS operating system will be installed. Minimum 18 GB.': 700,
|
||||
'The drive where your StartOS data (services, settings, etc.) will be stored. This can be the same as the OS drive or a separate drive. Minimum 20 GB, or 38 GB if using a single drive for both OS and data.': 701,
|
||||
'The drive where the StartOS operating system will be installed.': 700,
|
||||
'The drive where your StartOS data (services, settings, etc.) will be stored. This can be the same as the OS drive or a separate drive.': 701,
|
||||
'After transferring data from this drive, do not attempt to boot into it again as a Start9 Server. This may result in services malfunctioning, data corruption, or loss of funds.': 702,
|
||||
'Must be 12 characters or greater': 703,
|
||||
'Must be 64 character or less': 704,
|
||||
@@ -679,7 +680,7 @@ export const ENGLISH: Record<string, number> = {
|
||||
'Interface(s)': 755,
|
||||
'No port forwarding rules': 756,
|
||||
'Port forwarding rules required on gateway': 757,
|
||||
'You are currently connected via your .local address. Changing the hostname will require you to switch to the new .local address. A server restart will also be needed.': 763,
|
||||
'You are currently connected via your .local address. Changing the hostname will require you to switch to the new .local address.': 763,
|
||||
'Hostname Changed': 764,
|
||||
'Open new address': 765,
|
||||
'Your server is now reachable at': 766,
|
||||
@@ -717,15 +718,9 @@ export const ENGLISH: Record<string, number> = {
|
||||
'After clicking "Enroll MOK":': 799,
|
||||
'When prompted, enter your StartOS password': 800,
|
||||
'Your system has Secure Boot enabled, which requires all kernel modules to be signed with a trusted key. Some hardware drivers \u2014 such as those for NVIDIA GPUs \u2014 are not signed by the default distribution key. Enrolling the StartOS signing key allows your firmware to trust these modules so your hardware can be fully utilized.': 801,
|
||||
'OS-level translations are already in effect. A restart is required for service-level translations to take effect.': 802,
|
||||
'This drive uses ext4 and will be automatically converted to btrfs. A backup is strongly recommended before proceeding.': 803,
|
||||
'I have a backup of my data': 804,
|
||||
'Add Public Domain': 805,
|
||||
'Result': 806,
|
||||
'Download complete. Restart to apply.': 807,
|
||||
'Hostname changed, restart for installed services to use the new address': 808,
|
||||
'Language changed, restart for installed services to use the new language': 809,
|
||||
'Kiosk mode changed, restart to apply': 810,
|
||||
'OS drive must be at least 18 GB': 811,
|
||||
'Data drive must be at least 20 GB': 812,
|
||||
'OS + data combined require at least 38 GB': 813,
|
||||
}
|
||||
|
||||
@@ -485,6 +485,7 @@ export default {
|
||||
512: 'El modo quiosco no está disponible en este dispositivo',
|
||||
513: 'Activar',
|
||||
514: 'Desactivar',
|
||||
515: 'Este cambio tendrá efecto después del próximo inicio',
|
||||
516: 'Recomendado',
|
||||
517: '¿Estás seguro de que deseas descartar esta tarea?',
|
||||
518: 'Descartar',
|
||||
@@ -628,8 +629,8 @@ export default {
|
||||
697: 'Introduzca la contraseña que se utilizó para cifrar esta copia de seguridad.',
|
||||
698: 'Se encontraron varias copias de seguridad. Seleccione cuál restaurar.',
|
||||
699: 'Copias de seguridad',
|
||||
700: 'La unidad donde se instalará el sistema operativo StartOS. Mínimo 18 GB.',
|
||||
701: 'La unidad donde se almacenarán sus datos de StartOS (servicios, ajustes, etc.). Puede ser la misma que la unidad del sistema operativo o una unidad separada. Mínimo 20 GB, o 38 GB si se usa una sola unidad para el sistema operativo y los datos.',
|
||||
700: 'La unidad donde se instalará el sistema operativo StartOS.',
|
||||
701: 'La unidad donde se almacenarán sus datos de StartOS (servicios, ajustes, etc.). Puede ser la misma que la unidad del sistema operativo o una unidad separada.',
|
||||
702: 'Después de transferir datos desde esta unidad, no intente arrancar desde ella nuevamente como un servidor Start9. Esto puede provocar fallos en los servicios, corrupción de datos o pérdida de fondos.',
|
||||
703: 'Debe tener 12 caracteres o más',
|
||||
704: 'Debe tener 64 caracteres o menos',
|
||||
@@ -679,7 +680,7 @@ export default {
|
||||
755: 'Interfaz/Interfaces',
|
||||
756: 'Sin reglas de redirección de puertos',
|
||||
757: 'Reglas de redirección de puertos requeridas en la puerta de enlace',
|
||||
763: 'Actualmente está conectado a través de su dirección .local. Cambiar el nombre de host requerirá que cambie a la nueva dirección .local. También será necesario reiniciar el servidor.',
|
||||
763: 'Actualmente está conectado a través de su dirección .local. Cambiar el nombre de host requerirá que cambie a la nueva dirección .local.',
|
||||
764: 'Nombre de host cambiado',
|
||||
765: 'Abrir nueva dirección',
|
||||
766: 'Su servidor ahora es accesible en',
|
||||
@@ -716,15 +717,9 @@ export default {
|
||||
799: 'Después de hacer clic en "Enroll MOK":',
|
||||
800: 'Cuando se le solicite, ingrese su contraseña de StartOS',
|
||||
801: 'Su sistema tiene Secure Boot habilitado, lo que requiere que todos los módulos del kernel estén firmados con una clave de confianza. Algunos controladores de hardware \u2014 como los de las GPU NVIDIA \u2014 no están firmados con la clave de distribución predeterminada. Registrar la clave de firma de StartOS permite que su firmware confíe en estos módulos para que su hardware pueda utilizarse completamente.',
|
||||
802: 'Las traducciones a nivel del sistema operativo ya están en vigor. Se requiere un reinicio para que las traducciones a nivel de servicio surtan efecto.',
|
||||
803: 'Esta unidad usa ext4 y se convertirá automáticamente a btrfs. Se recomienda encarecidamente hacer una copia de seguridad antes de continuar.',
|
||||
804: 'Tengo una copia de seguridad de mis datos',
|
||||
805: 'Agregar dominio público',
|
||||
806: 'Resultado',
|
||||
807: 'Descarga completa. Reiniciar para aplicar.',
|
||||
808: 'Nombre de host cambiado, reiniciar para que los servicios instalados usen la nueva dirección',
|
||||
809: 'Idioma cambiado, reiniciar para que los servicios instalados usen el nuevo idioma',
|
||||
810: 'Modo kiosco cambiado, reiniciar para aplicar',
|
||||
811: 'La unidad del SO debe tener al menos 18 GB',
|
||||
812: 'La unidad de datos debe tener al menos 20 GB',
|
||||
813: 'SO + datos combinados requieren al menos 38 GB',
|
||||
} satisfies i18n
|
||||
|
||||
@@ -485,6 +485,7 @@ export default {
|
||||
512: 'Le mode kiosque n’est pas disponible sur cet appareil',
|
||||
513: 'Activer',
|
||||
514: 'Désactiver',
|
||||
515: 'Ce changement va prendre effet après le prochain démarrage',
|
||||
516: 'Recommandé',
|
||||
517: 'Êtes-vous sûr de vouloir ignorer cette tâche ?',
|
||||
518: 'Ignorer',
|
||||
@@ -628,8 +629,8 @@ export default {
|
||||
697: 'Saisissez le mot de passe utilisé pour chiffrer cette sauvegarde.',
|
||||
698: 'Plusieurs sauvegardes trouvées. Sélectionnez celle à restaurer.',
|
||||
699: 'Sauvegardes',
|
||||
700: 'Le disque sur lequel le système d’exploitation StartOS sera installé. Minimum 18 Go.',
|
||||
701: 'Le disque sur lequel vos données StartOS (services, paramètres, etc.) seront stockées. Il peut s’agir du même disque que le système ou d’un disque séparé. Minimum 20 Go, ou 38 Go si un seul disque est utilisé pour le système et les données.',
|
||||
700: 'Le disque sur lequel le système d’exploitation StartOS sera installé.',
|
||||
701: 'Le disque sur lequel vos données StartOS (services, paramètres, etc.) seront stockées. Il peut s’agir du même disque que le système ou d’un disque séparé.',
|
||||
702: 'Après le transfert des données depuis ce disque, n’essayez pas de démarrer dessus à nouveau en tant que serveur Start9. Cela peut entraîner des dysfonctionnements des services, une corruption des données ou une perte de fonds.',
|
||||
703: 'Doit comporter au moins 12 caractères',
|
||||
704: 'Doit comporter au maximum 64 caractères',
|
||||
@@ -679,7 +680,7 @@ export default {
|
||||
755: 'Interface(s)',
|
||||
756: 'Aucune règle de redirection de port',
|
||||
757: 'Règles de redirection de ports requises sur la passerelle',
|
||||
763: "Vous êtes actuellement connecté via votre adresse .local. Changer le nom d'hôte nécessitera de passer à la nouvelle adresse .local. Un redémarrage du serveur sera également nécessaire.",
|
||||
763: "Vous êtes actuellement connecté via votre adresse .local. Changer le nom d'hôte nécessitera de passer à la nouvelle adresse .local.",
|
||||
764: "Nom d'hôte modifié",
|
||||
765: 'Ouvrir la nouvelle adresse',
|
||||
766: 'Votre serveur est maintenant accessible à',
|
||||
@@ -716,15 +717,9 @@ export default {
|
||||
799: 'Après avoir cliqué sur "Enroll MOK" :',
|
||||
800: 'Lorsque vous y êtes invité, entrez votre mot de passe StartOS',
|
||||
801: "Votre système a Secure Boot activé, ce qui exige que tous les modules du noyau soient signés avec une clé de confiance. Certains pilotes matériels \u2014 comme ceux des GPU NVIDIA \u2014 ne sont pas signés par la clé de distribution par défaut. L'enregistrement de la clé de signature StartOS permet à votre firmware de faire confiance à ces modules afin que votre matériel puisse être pleinement utilisé.",
|
||||
802: "Les traductions au niveau du système d'exploitation sont déjà en vigueur. Un redémarrage est nécessaire pour que les traductions au niveau des services prennent effet.",
|
||||
803: 'Ce disque utilise ext4 et sera automatiquement converti en btrfs. Il est fortement recommandé de faire une sauvegarde avant de continuer.',
|
||||
804: "J'ai une sauvegarde de mes données",
|
||||
805: 'Ajouter un domaine public',
|
||||
806: 'Résultat',
|
||||
807: 'Téléchargement terminé. Redémarrer pour appliquer.',
|
||||
808: "Nom d'hôte modifié, redémarrer pour que les services installés utilisent la nouvelle adresse",
|
||||
809: 'Langue modifiée, redémarrer pour que les services installés utilisent la nouvelle langue',
|
||||
810: 'Mode kiosque modifié, redémarrer pour appliquer',
|
||||
811: 'Le disque système doit faire au moins 18 Go',
|
||||
812: 'Le disque de données doit faire au moins 20 Go',
|
||||
813: 'Système + données combinés nécessitent au moins 38 Go',
|
||||
} satisfies i18n
|
||||
|
||||
@@ -485,6 +485,7 @@ export default {
|
||||
512: 'Tryb kiosku jest niedostępny na tym urządzeniu',
|
||||
513: 'Włącz',
|
||||
514: 'Wyłącz',
|
||||
515: 'Ta zmiana zacznie obowiązywać po następnym uruchomieniu',
|
||||
516: 'Zalecane',
|
||||
517: 'Czy na pewno chcesz odrzucić to zadanie?',
|
||||
518: 'Odrzuć',
|
||||
@@ -628,8 +629,8 @@ export default {
|
||||
697: 'Wprowadź hasło użyte do zaszyfrowania tej kopii zapasowej.',
|
||||
698: 'Znaleziono wiele kopii zapasowych. Wybierz, którą przywrócić.',
|
||||
699: 'Kopie zapasowe',
|
||||
700: 'Dysk, na którym zostanie zainstalowany system operacyjny StartOS. Minimum 18 GB.',
|
||||
701: 'Dysk, na którym będą przechowywane dane StartOS (usługi, ustawienia itp.). Może to być ten sam dysk co systemowy lub oddzielny dysk. Minimum 20 GB lub 38 GB w przypadku jednego dysku na system i dane.',
|
||||
700: 'Dysk, na którym zostanie zainstalowany system operacyjny StartOS.',
|
||||
701: 'Dysk, na którym będą przechowywane dane StartOS (usługi, ustawienia itp.). Może to być ten sam dysk co systemowy lub oddzielny dysk.',
|
||||
702: 'Po przeniesieniu danych z tego dysku nie próbuj ponownie uruchamiać z niego systemu jako serwer Start9. Może to spowodować nieprawidłowe działanie usług, uszkodzenie danych lub utratę środków.',
|
||||
703: 'Musi mieć co najmniej 12 znaków',
|
||||
704: 'Musi mieć maksymalnie 64 znaki',
|
||||
@@ -679,7 +680,7 @@ export default {
|
||||
755: 'Interfejs(y)',
|
||||
756: 'Brak reguł przekierowania portów',
|
||||
757: 'Reguły przekierowania portów wymagane na bramce',
|
||||
763: 'Jesteś obecnie połączony przez adres .local. Zmiana nazwy hosta będzie wymagać przełączenia na nowy adres .local. Konieczne będzie również ponowne uruchomienie serwera.',
|
||||
763: 'Jesteś obecnie połączony przez adres .local. Zmiana nazwy hosta będzie wymagać przełączenia na nowy adres .local.',
|
||||
764: 'Nazwa hosta zmieniona',
|
||||
765: 'Otwórz nowy adres',
|
||||
766: 'Twój serwer jest teraz dostępny pod adresem',
|
||||
@@ -716,15 +717,9 @@ export default {
|
||||
799: 'Po kliknięciu "Enroll MOK":',
|
||||
800: 'Po wyświetleniu monitu wprowadź swoje hasło StartOS',
|
||||
801: 'Twój system ma włączony Secure Boot, co wymaga, aby wszystkie moduły jądra były podpisane zaufanym kluczem. Niektóre sterowniki sprzętowe \u2014 takie jak te dla GPU NVIDIA \u2014 nie są podpisane domyślnym kluczem dystrybucji. Zarejestrowanie klucza podpisu StartOS pozwala firmware ufać tym modułom, aby sprzęt mógł być w pełni wykorzystany.',
|
||||
802: 'Tłumaczenia na poziomie systemu operacyjnego są już aktywne. Wymagane jest ponowne uruchomienie, aby tłumaczenia na poziomie usług zaczęły obowiązywać.',
|
||||
803: 'Ten dysk używa ext4 i zostanie automatycznie skonwertowany na btrfs. Zdecydowanie zaleca się wykonanie kopii zapasowej przed kontynuowaniem.',
|
||||
804: 'Mam kopię zapasową moich danych',
|
||||
805: 'Dodaj domenę publiczną',
|
||||
806: 'Wynik',
|
||||
807: 'Pobieranie zakończone. Uruchom ponownie, aby zastosować.',
|
||||
808: 'Nazwa hosta zmieniona, uruchom ponownie, aby zainstalowane usługi używały nowego adresu',
|
||||
809: 'Język zmieniony, uruchom ponownie, aby zainstalowane usługi używały nowego języka',
|
||||
810: 'Tryb kiosku zmieniony, uruchom ponownie, aby zastosować',
|
||||
811: 'Dysk systemowy musi mieć co najmniej 18 GB',
|
||||
812: 'Dysk danych musi mieć co najmniej 20 GB',
|
||||
813: 'System + dane łącznie wymagają co najmniej 38 GB',
|
||||
} satisfies i18n
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
export type AccessType =
|
||||
| 'tor'
|
||||
| 'mdns'
|
||||
| 'localhost'
|
||||
| 'ipv4'
|
||||
|
||||
@@ -17,13 +17,11 @@ import {
|
||||
TuiButton,
|
||||
TuiDialogContext,
|
||||
TuiError,
|
||||
TuiIcon,
|
||||
TuiInput,
|
||||
TuiNotificationService,
|
||||
TuiTextfield,
|
||||
TuiInput,
|
||||
tuiValidationErrorsProvider,
|
||||
} from '@taiga-ui/core'
|
||||
import { TuiButtonLoading, TuiPassword } from '@taiga-ui/kit'
|
||||
import { TuiButtonLoading } from '@taiga-ui/kit'
|
||||
import { TuiForm } from '@taiga-ui/layout'
|
||||
import { injectContext, PolymorpheusComponent } from '@taiga-ui/polymorpheus'
|
||||
import { map } from 'rxjs'
|
||||
@@ -34,24 +32,16 @@ import { ApiService } from 'src/app/services/api/api.service'
|
||||
<form tuiForm [formGroup]="form">
|
||||
<tui-textfield>
|
||||
<label tuiLabel>New password</label>
|
||||
<input
|
||||
tuiInput
|
||||
tuiAutoFocus
|
||||
type="password"
|
||||
formControlName="password"
|
||||
/>
|
||||
<tui-icon tuiPassword />
|
||||
<input tuiInput tuiAutoFocus formControlName="password" />
|
||||
</tui-textfield>
|
||||
<tui-error formControlName="password" />
|
||||
<tui-textfield>
|
||||
<label tuiLabel>Confirm new password</label>
|
||||
<input
|
||||
tuiInput
|
||||
type="password"
|
||||
formControlName="confirm"
|
||||
[tuiValidator]="matchValidator()"
|
||||
/>
|
||||
<tui-icon tuiPassword />
|
||||
</tui-textfield>
|
||||
<tui-error formControlName="confirm" />
|
||||
<footer>
|
||||
@@ -82,10 +72,7 @@ import { ApiService } from 'src/app/services/api/api.service'
|
||||
TuiButtonLoading,
|
||||
TuiError,
|
||||
TuiForm,
|
||||
TuiIcon,
|
||||
TuiInput,
|
||||
TuiPassword,
|
||||
TuiTextfield,
|
||||
TuiValidator,
|
||||
],
|
||||
})
|
||||
|
||||
@@ -50,32 +50,45 @@ import { CHANGE_PASSWORD } from './change-password'
|
||||
</button>
|
||||
}
|
||||
</div>
|
||||
</div>
|
||||
<div tuiCardLarge [style.align-items]="'start'">
|
||||
<button tuiButton size="s" (click)="onChangePassword()">
|
||||
Change password
|
||||
</button>
|
||||
<button
|
||||
tuiButton
|
||||
size="s"
|
||||
iconStart="@tui.rotate-cw"
|
||||
[loading]="restarting()"
|
||||
(click)="onRestart()"
|
||||
>
|
||||
Reboot VPS
|
||||
</button>
|
||||
<button tuiButton size="s" iconStart="@tui.log-out" (click)="onLogout()">
|
||||
Logout
|
||||
</button>
|
||||
<div tuiCell>
|
||||
<span tuiTitle>
|
||||
<strong>Change password</strong>
|
||||
</span>
|
||||
<button tuiButton size="s" (click)="onChangePassword()">Change</button>
|
||||
</div>
|
||||
<div tuiCell>
|
||||
<span tuiTitle>
|
||||
<strong>Restart</strong>
|
||||
<span tuiSubtitle>Restart the VPS</span>
|
||||
</span>
|
||||
<button
|
||||
tuiButton
|
||||
size="s"
|
||||
appearance="secondary"
|
||||
iconStart="@tui.rotate-cw"
|
||||
[loading]="restarting()"
|
||||
(click)="onRestart()"
|
||||
>
|
||||
Restart
|
||||
</button>
|
||||
</div>
|
||||
<div tuiCell>
|
||||
<span tuiTitle>
|
||||
<strong>Logout</strong>
|
||||
</span>
|
||||
<button
|
||||
tuiButton
|
||||
size="s"
|
||||
appearance="secondary-destructive"
|
||||
iconStart="@tui.log-out"
|
||||
(click)="onLogout()"
|
||||
>
|
||||
Logout
|
||||
</button>
|
||||
</div>
|
||||
</div>
|
||||
`,
|
||||
styles: `
|
||||
:host {
|
||||
display: flex;
|
||||
flex-direction: column;
|
||||
gap: 1rem;
|
||||
}
|
||||
|
||||
[tuiCardLarge] {
|
||||
background: var(--tui-background-neutral-1);
|
||||
|
||||
@@ -135,9 +148,9 @@ export default class Settings {
|
||||
await this.api.restart()
|
||||
this.dialogs
|
||||
.open(
|
||||
'The VPS is rebooting. Please wait 1\u20132 minutes, then refresh the page.',
|
||||
'The VPS is restarting. Please wait 1\u20132 minutes, then refresh the page.',
|
||||
{
|
||||
label: 'Rebooting',
|
||||
label: 'Restarting',
|
||||
},
|
||||
)
|
||||
.subscribe()
|
||||
|
||||
@@ -14,7 +14,7 @@ body {
|
||||
isolation: isolate;
|
||||
overflow-x: hidden;
|
||||
background:
|
||||
linear-gradient(var(--tui-background-base, #171717), var(--tui-background-base, #171717)),
|
||||
conic-gradient(var(--tui-background-base)),
|
||||
radial-gradient(circle at top right, #5240a8, transparent 40%),
|
||||
radial-gradient(circle at bottom right, #9236c9, transparent),
|
||||
radial-gradient(circle at 25% 100%, #5b65d5, transparent 30%),
|
||||
|
||||
@@ -1,5 +1,8 @@
|
||||
@if (config.isSecureContext()) {
|
||||
<!-- Secure context -->
|
||||
@if (config.isLanHttp()) {
|
||||
<!-- Local HTTP -->
|
||||
<ca-wizard />
|
||||
} @else {
|
||||
<!-- not Local HTTP -->
|
||||
<div tuiCardLarge class="card">
|
||||
<img alt="StartOS Icon" class="logo" src="assets/img/icon.png" />
|
||||
<h1 class="header">{{ 'Login to StartOS' | i18n }}</h1>
|
||||
@@ -20,7 +23,4 @@
|
||||
<button tuiButton class="button">{{ 'Login' | i18n }}</button>
|
||||
</form>
|
||||
</div>
|
||||
} @else {
|
||||
<!-- Insecure context -->
|
||||
<ca-wizard />
|
||||
}
|
||||
|
||||
@@ -24,11 +24,7 @@ export class LogsFetchDirective {
|
||||
}),
|
||||
),
|
||||
),
|
||||
tap(res => {
|
||||
if (res.startCursor) {
|
||||
this.component.setCursor(res.startCursor)
|
||||
}
|
||||
}),
|
||||
tap(res => this.component.setCursor(res.startCursor)),
|
||||
map(({ entries }) => convertAnsi(entries)),
|
||||
catchError(e => {
|
||||
this.errors.handleError(e)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user