Compare commits

..

1 Commits

Author SHA1 Message Date
Matt Hill
fe28a812a4 fix ssh, undeprecate wifi 2026-02-10 13:41:54 -07:00
138 changed files with 15166 additions and 12315 deletions

View File

@@ -1,6 +1,5 @@
{ {
"attribution": { "attribution": {
"commit": "", "commit": ""
"pr": ""
} }
} }

2
.gitignore vendored
View File

@@ -21,4 +21,4 @@ secrets.db
/build/lib/firmware /build/lib/firmware
tmp tmp
web/.i18n-checked web/.i18n-checked
docs/USER.md agents/USER.md

127
CLAUDE.md
View File

@@ -11,7 +11,7 @@ StartOS is an open-source Linux distribution for running personal servers. It ma
- Frontend: Angular 20 + TypeScript + TaigaUI - Frontend: Angular 20 + TypeScript + TaigaUI
- Container runtime: Node.js/TypeScript with LXC - Container runtime: Node.js/TypeScript with LXC
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync - Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`) - API: JSON-RPC via rpc-toolkit (see `agents/rpc-toolkit.md`)
- Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`) - Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`)
## Build & Development ## Build & Development
@@ -29,52 +29,115 @@ make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
make test-core # Run Rust tests make test-core # Run Rust tests
``` ```
### Verifying code changes
When making changes across multiple layers (Rust, SDK, web, container-runtime), verify in this order:
1. **Rust**: `cargo check -p start-os` — verifies core compiles
2. **TS bindings**: `make ts-bindings` — regenerates TypeScript types from Rust `#[ts(export)]` structs
- Runs `./core/build/build-ts.sh` to export ts-rs types to `core/bindings/`
- Syncs `core/bindings/``sdk/base/lib/osBindings/` via rsync
- If you manually edit files in `sdk/base/lib/osBindings/`, you must still rebuild the SDK (step 3)
3. **SDK bundle**: `cd sdk && make baseDist dist` — compiles SDK source into packages
- `baseDist/` is consumed by `/web` (via `@start9labs/start-sdk-base`)
- `dist/` is consumed by `/container-runtime` (via `@start9labs/start-sdk`)
- Web and container-runtime reference the **built** SDK, not source files
4. **Web type check**: `cd web && npm run check` — type-checks all Angular projects
5. **Container runtime type check**: `cd container-runtime && npm run check` — type-checks the runtime
**Important**: Editing `sdk/base/lib/osBindings/*.ts` alone is NOT sufficient — you must rebuild the SDK bundle (step 3) before web/container-runtime can see the changes.
## Architecture ## Architecture
Each major component has its own `CLAUDE.md` with detailed guidance. ### Core (`/core`)
The Rust backend daemon. Main binaries:
- `startbox` - Main daemon (runs as `startd`)
- `start-cli` - CLI interface
- `start-container` - Runs inside LXC containers; communicates with host and manages subcontainers
- `registrybox` - Registry daemon
- `tunnelbox` - VPN/tunnel daemon
- **`core/`** — Rust backend daemon (startbox, start-cli, start-container, registrybox, tunnelbox) **Key modules:**
- **`web/`** — Angular frontend workspace (admin UI, setup wizard, marketplace, shared library) - `src/context/` - Context types (RpcContext, CliContext, InitContext, DiagnosticContext)
- **`container-runtime/`** — Node.js runtime managing service containers via JSON-RPC - `src/service/` - Service lifecycle management with actor pattern (`service_actor.rs`)
- **`sdk/`** — TypeScript SDK for packaging services (`@start9labs/start-sdk`) - `src/db/model/` - Patch-DB models (`public.rs` synced to frontend, `private.rs` backend-only)
- **`patch-db/`** — Git submodule providing diff-based state synchronization - `src/net/` - Networking (DNS, ACME, WiFi, Tor via Arti, WireGuard)
- `src/s9pk/` - S9PK package format (merkle archive)
- `src/registry/` - Package registry management
**RPC Pattern:** See `agents/rpc-toolkit.md`
### Web (`/web`)
Angular projects sharing common code:
- `projects/ui/` - Main admin interface
- `projects/setup-wizard/` - Initial setup
- `projects/start-tunnel/` - VPN management UI
- `projects/shared/` - Common library (API clients, components)
- `projects/marketplace/` - Service discovery
**Development:**
```bash
cd web
npm ci
npm run start:ui # Dev server with mocks
npm run build:ui # Production build
npm run check # Type check all projects
```
### Container Runtime (`/container-runtime`)
Node.js runtime that manages service containers via RPC. See `RPCSpec.md` for protocol.
**Container Architecture:**
```
LXC Container (uniform base for all services)
└── systemd
└── container-runtime.service
└── Loads /usr/lib/startos/package/index.js (from s9pk javascript.squashfs)
└── Package JS launches subcontainers (from images in s9pk)
```
The container runtime communicates with the host via JSON-RPC over Unix socket. Package JavaScript must export functions conforming to the `ABI` type defined in `sdk/base/lib/types.ts`.
**`/media/startos/` directory (mounted by host into container):**
| Path | Description |
|------|-------------|
| `volumes/<name>/` | Package data volumes (id-mapped, persistent) |
| `assets/` | Read-only assets from s9pk `assets.squashfs` |
| `images/<name>/` | Container images (squashfs, used for subcontainers) |
| `images/<name>.env` | Environment variables for image |
| `images/<name>.json` | Image metadata |
| `backup/` | Backup mount point (mounted during backup operations) |
| `rpc/service.sock` | RPC socket (container runtime listens here) |
| `rpc/host.sock` | Host RPC socket (for effects callbacks to host) |
**S9PK Structure:** See `agents/s9pk-structure.md`
### SDK (`/sdk`)
TypeScript SDK for packaging services (`@start9labs/start-sdk`).
- `base/` - Core types, ABI definitions, effects interface (`@start9labs/start-sdk-base`)
- `package/` - Full SDK for package developers, re-exports base
### Patch-DB (`/patch-db`)
Git submodule providing diff-based state synchronization. Changes to `db/model/public.rs` automatically sync to the frontend.
**Key patterns:**
- `db.peek().await` - Get a read-only snapshot of the database state
- `db.mutate(|db| { ... }).await` - Apply mutations atomically, returns `MutateResult`
- `#[derive(HasModel)]` - Derive macro for types stored in the database, generates typed accessors
**Generated accessor types** (from `HasModel` derive):
- `as_field()` - Immutable reference: `&Model<T>`
- `as_field_mut()` - Mutable reference: `&mut Model<T>`
- `into_field()` - Owned value: `Model<T>`
**`Model<T>` APIs** (from `db/prelude.rs`):
- `.de()` - Deserialize to `T`
- `.ser(&value)` - Serialize from `T`
- `.mutate(|v| ...)` - Deserialize, mutate, reserialize
- For maps: `.keys()`, `.as_idx(&key)`, `.as_idx_mut(&key)`, `.insert()`, `.remove()`, `.contains_key()`
## Supplementary Documentation ## Supplementary Documentation
The `docs/` directory contains cross-cutting documentation for AI assistants: The `agents/` directory contains detailed documentation for AI assistants:
- `TODO.md` - Pending tasks for AI agents (check this first, remove items when completed) - `TODO.md` - Pending tasks for AI agents (check this first, remove items when completed)
- `USER.md` - Current user identifier (gitignored, see below) - `USER.md` - Current user identifier (gitignored, see below)
- `exver.md` - Extended versioning format (used across core, sdk, and web) - `rpc-toolkit.md` - JSON-RPC patterns and handler configuration
- `VERSION_BUMP.md` - Guide for bumping the StartOS version across the codebase - `core-rust-patterns.md` - Common utilities and patterns for Rust code in `/core` (guard pattern, mount guards, etc.)
- `s9pk-structure.md` - S9PK package format structure
Component-specific docs live alongside their code (e.g., `core/rpc-toolkit.md`, `core/i18n-patterns.md`). - `i18n-patterns.md` - Internationalization key conventions and usage in `/core`
### Session Startup ### Session Startup
On startup: On startup:
1. **Check for `docs/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer. 1. **Check for `agents/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer.
2. **Check `docs/TODO.md` for relevant tasks** - Show TODOs that either: 2. **Check `agents/TODO.md` for relevant tasks** - Show TODOs that either:
- Have no `@username` tag (relevant to everyone) - Have no `@username` tag (relevant to everyone)
- Are tagged with the current user's identifier - Are tagged with the current user's identifier

View File

@@ -1,10 +1,11 @@
# Contributing to StartOS # Contributing to StartOS
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://github.com/Start9Labs/ai-service-packaging). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute). This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://docs.start9.com/latest/packaging-guide/). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
## Collaboration ## Collaboration
- [Matrix](https://matrix.to/#/#dev-startos:matrix.start9labs.com) - [Matrix](https://matrix.to/#/#community-dev:matrix.start9labs.com)
- [Telegram](https://t.me/start9_labs/47471)
## Project Structure ## Project Structure
@@ -22,7 +23,6 @@ This guide is for contributing to the StartOS. If you are interested in packagin
``` ```
See component READMEs for details: See component READMEs for details:
- [`core`](core/README.md) - [`core`](core/README.md)
- [`web`](web/README.md) - [`web`](web/README.md)
- [`build`](build/README.md) - [`build`](build/README.md)
@@ -30,36 +30,8 @@ See component READMEs for details:
## Environment Setup ## Environment Setup
### Installing Dependencies (Debian/Ubuntu)
> Debian/Ubuntu is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install).
```sh ```sh
sudo apt update git clone https://github.com/Start9Labs/start-os.git --recurse-submodules
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 24
nvm use 24
nvm alias default 24 # this prevents your machine from reverting back to another version
```
### Cloning the Repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
cd start-os cd start-os
``` ```
@@ -91,21 +63,19 @@ This project uses [GNU Make](https://www.gnu.org/software/make/) to build its co
### Environment Variables ### Environment Variables
| Variable | Description | | Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------------- | |----------|-------------|
| `PLATFORM` | Target platform: `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `riscv64`, `raspberrypi` | | `PLATFORM` | Target platform: `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `riscv64`, `raspberrypi` |
| `ENVIRONMENT` | Hyphen-separated feature flags (see below) | | `ENVIRONMENT` | Hyphen-separated feature flags (see below) |
| `PROFILE` | Build profile: `release` (default) or `dev` | | `PROFILE` | Build profile: `release` (default) or `dev` |
| `GIT_BRANCH_AS_HASH` | Set to `1` to use git branch name as version hash (avoids rebuilds) | | `GIT_BRANCH_AS_HASH` | Set to `1` to use git branch name as version hash (avoids rebuilds) |
**ENVIRONMENT flags:** **ENVIRONMENT flags:**
- `dev` - Enables password SSH before setup, skips frontend compression - `dev` - Enables password SSH before setup, skips frontend compression
- `unstable` - Enables assertions and debugging with performance penalty - `unstable` - Enables assertions and debugging with performance penalty
- `console` - Enables tokio-console for async debugging - `console` - Enables tokio-console for async debugging
**Platform notes:** **Platform notes:**
- `-nonfree` variants include proprietary firmware and drivers - `-nonfree` variants include proprietary firmware and drivers
- `raspberrypi` includes non-free components by necessity - `raspberrypi` includes non-free components by necessity
- Platform is remembered between builds if not specified - Platform is remembered between builds if not specified
@@ -114,72 +84,46 @@ This project uses [GNU Make](https://www.gnu.org/software/make/) to build its co
#### Building #### Building
| Target | Description | | Target | Description |
| ------------- | ---------------------------------------------- | |--------|-------------|
| `iso` | Create full `.iso` image (not for raspberrypi) | | `iso` | Create full `.iso` image (not for raspberrypi) |
| `img` | Create full `.img` image (raspberrypi only) | | `img` | Create full `.img` image (raspberrypi only) |
| `deb` | Build Debian package | | `deb` | Build Debian package |
| `all` | Build all Rust binaries | | `all` | Build all Rust binaries |
| `uis` | Build all web UIs | | `uis` | Build all web UIs |
| `ui` | Build main UI only | | `ui` | Build main UI only |
| `ts-bindings` | Generate TypeScript bindings from Rust types | | `ts-bindings` | Generate TypeScript bindings from Rust types |
#### Deploying to Device #### Deploying to Device
For devices on the same network: For devices on the same network:
| Target | Description | | Target | Description |
| ------------------------------------ | ----------------------------------------------- | |--------|-------------|
| `update-startbox REMOTE=start9@<ip>` | Deploy binary + UI only (fastest) | | `update-startbox REMOTE=start9@<ip>` | Deploy binary + UI only (fastest) |
| `update-deb REMOTE=start9@<ip>` | Deploy full Debian package | | `update-deb REMOTE=start9@<ip>` | Deploy full Debian package |
| `update REMOTE=start9@<ip>` | OTA-style update | | `update REMOTE=start9@<ip>` | OTA-style update |
| `reflash REMOTE=start9@<ip>` | Reflash as if using live ISO | | `reflash REMOTE=start9@<ip>` | Reflash as if using live ISO |
| `update-overlay REMOTE=start9@<ip>` | Deploy to in-memory overlay (reverts on reboot) | | `update-overlay REMOTE=start9@<ip>` | Deploy to in-memory overlay (reverts on reboot) |
For devices on different networks (uses [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)): For devices on different networks (uses [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)):
| Target | Description | | Target | Description |
| ------------------- | -------------------- | |--------|-------------|
| `wormhole` | Send startbox binary | | `wormhole` | Send startbox binary |
| `wormhole-deb` | Send Debian package | | `wormhole-deb` | Send Debian package |
| `wormhole-squashfs` | Send squashfs image | | `wormhole-squashfs` | Send squashfs image |
### Creating a VM
Install virt-manager:
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
virt-manager
```
Follow the screenshot walkthrough in [`assets/create-vm/`](assets/create-vm/) to create a new virtual machine. Key steps:
1. Create a new virtual machine
2. Browse for the ISO — create a storage pool pointing to your `results/` directory
3. Select "Generic or unknown OS"
4. Set memory and CPUs
5. Create a disk and name the VM
Build an ISO first:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
#### Other #### Other
| Target | Description | | Target | Description |
| ------------------------ | ------------------------------------------- | |--------|-------------|
| `format` | Run code formatting (Rust nightly required) | | `format` | Run code formatting (Rust nightly required) |
| `test` | Run all automated tests | | `test` | Run all automated tests |
| `test-core` | Run Rust tests | | `test-core` | Run Rust tests |
| `test-sdk` | Run SDK tests | | `test-sdk` | Run SDK tests |
| `test-container-runtime` | Run container runtime tests | | `test-container-runtime` | Run container runtime tests |
| `clean` | Delete all compiled artifacts | | `clean` | Delete all compiled artifacts |
## Testing ## Testing
@@ -212,18 +156,15 @@ Run the formatters before committing. Configuration is handled by `rustfmt.toml`
### Documentation & Comments ### Documentation & Comments
**Rust:** **Rust:**
- Add doc comments (`///`) to public APIs, structs, and non-obvious functions - Add doc comments (`///`) to public APIs, structs, and non-obvious functions
- Use `//` comments sparingly for complex logic that isn't self-evident - Use `//` comments sparingly for complex logic that isn't self-evident
- Prefer self-documenting code (clear naming, small functions) over comments - Prefer self-documenting code (clear naming, small functions) over comments
**TypeScript:** **TypeScript:**
- Document exported functions and complex types with JSDoc - Document exported functions and complex types with JSDoc
- Keep comments focused on "why" rather than "what" - Keep comments focused on "why" rather than "what"
**General:** **General:**
- Don't add comments that just restate the code - Don't add comments that just restate the code
- Update or remove comments when code changes - Update or remove comments when code changes
- TODOs should include context: `// TODO(username): reason` - TODOs should include context: `// TODO(username): reason`
@@ -241,7 +182,6 @@ Use [Conventional Commits](https://www.conventionalcommits.org/):
``` ```
**Types:** **Types:**
- `feat` - New feature - `feat` - New feature
- `fix` - Bug fix - `fix` - Bug fix
- `docs` - Documentation only - `docs` - Documentation only
@@ -251,10 +191,10 @@ Use [Conventional Commits](https://www.conventionalcommits.org/):
- `chore` - Build process, dependencies, etc. - `chore` - Build process, dependencies, etc.
**Examples:** **Examples:**
``` ```
feat(web): add dark mode toggle feat(web): add dark mode toggle
fix(core): resolve race condition in service startup fix(core): resolve race condition in service startup
docs: update CONTRIBUTING.md with style guidelines docs: update CONTRIBUTING.md with style guidelines
refactor(sdk): simplify package validation logic refactor(sdk): simplify package validation logic
``` ```

134
DEVELOPMENT.md Normal file
View File

@@ -0,0 +1,134 @@
# Setting up your development environment on Debian/Ubuntu
A step-by-step guide
> This is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
## Installing dependencies
Run the following commands one at a time
```sh
sudo apt update
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 24
nvm use 24
nvm alias default 24 # this prevents your machine from reverting back to another version
```
## Cloning the repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
cd start-os
```
## Building an ISO
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
This will build an ISO for your current architecture. If you are building to run on an architecture other than the one you are currently on, replace `$(uname -m)` with the correct platform for the device (one of `aarch64`, `aarch64-nonfree`, `x86_64`, `x86_64-nonfree`, `raspberrypi`)
## Creating a VM
### Install virt-manager
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
```
### Launch virt-manager
```sh
virt-manager
```
### Create new virtual machine
![Select "Create a new virtual machine"](assets/create-vm/step-1.png)
![Click "Forward"](assets/create-vm/step-2.png)
![Click "Browse"](assets/create-vm/step-3.png)
![Click "+"](assets/create-vm/step-4.png)
#### make sure to set "Target Path" to the path to your results directory in start-os
![Create storage pool](assets/create-vm/step-5.png)
![Select storage pool](assets/create-vm/step-6.png)
![Select ISO](assets/create-vm/step-7.png)
![Select "Generic or unknown OS" and click "Forward"](assets/create-vm/step-8.png)
![Set Memory and CPUs](assets/create-vm/step-9.png)
![Create disk](assets/create-vm/step-10.png)
![Name VM](assets/create-vm/step-11.png)
![Create network](assets/create-vm/step-12.png)
## Updating a VM
The fastest way to update a VM to your latest code depends on what you changed:
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-startbox REMOTE=start9@<VM IP>
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-deb REMOTE=start9@<VM IP>
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-squashfs REMOTE=start9@<VM IP>
```
---
If the device you are building for is not available via ssh, it is also possible to use `magic-wormhole` to send the relevant files.
### Prerequisites:
```sh
sudo apt update
sudo apt install -y magic-wormhole
```
As before, the fastest way to update a VM to your latest code depends on what you changed. Each of the following commands will return a command to paste into the shell of the device you would like to upgrade.
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-deb
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-squashfs
```

View File

@@ -236,9 +236,9 @@ update-startbox: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
update-deb: results/$(BASENAME).deb # better than update, but only available from debian update-deb: results/$(BASENAME).deb # better than update, but only available from debian
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create') $(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call mkdir,/media/startos/next/var/tmp/startos-deb) $(call mkdir,/media/startos/next/tmp/startos-deb)
$(call cp,results/$(BASENAME).deb,/media/startos/next/var/tmp/startos-deb/$(BASENAME).deb) $(call cp,results/$(BASENAME).deb,/media/startos/next/tmp/startos-deb/$(BASENAME).deb)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /var/tmp/startos-deb/$(BASENAME).deb"') $(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /tmp/startos-deb/$(BASENAME).deb"')
update-squashfs: results/$(BASENAME).squashfs update-squashfs: results/$(BASENAME).squashfs
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi

View File

@@ -7,64 +7,76 @@
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml"> <a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg"> <img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
</a> </a>
<a href="https://heyapollo.com/product/startos"> <a href="https://heyapollo.com/product/startos">
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue"> <img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
</a> </a>
<a href="https://twitter.com/start9labs"> <a href="https://twitter.com/start9labs">
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs"> <img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
</a> </a>
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
</a>
<a href="https://t.me/start9_labs">
<img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
</a>
<a href="https://docs.start9.com"> <a href="https://docs.start9.com">
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support"> <img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
</a> </a>
<a href="https://matrix.to/#/#dev-startos:matrix.start9labs.com"> <a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix"> <img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
</a> </a>
<a href="https://start9.com"> <a href="https://start9.com">
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website"> <img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
</a> </a>
</div> </div>
<br />
<div align="center">
<h3>
Welcome to the era of Sovereign Computing
</h3>
<p>
StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
</p>
</div>
<br />
<p align="center">
<img src="assets/StartOS.png" alt="StartOS" width="85%">
</p>
<br />
## What is StartOS? ## Running StartOS
> [!WARNING]
> StartOS is in beta. It lacks features. It doesn't always work perfectly. Start9 servers are not plug and play. Using them properly requires some effort and patience. Please do not use StartOS or purchase a server if you are unable or unwilling to follow instructions and learn new concepts.
StartOS is an open-source Linux distribution for running a personal server. It handles discovery, installation, network configuration, data backup, dependency management, and health monitoring of self-hosted services. ### 💰 Buy a Start9 server
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
**Tech stack:** Rust backend (Tokio/Axum), Angular frontend, Node.js container runtime with LXC, and a custom diff-based database ([Patch-DB](https://github.com/Start9Labs/patch-db)) for reactive state synchronization. ### 👷 Build your own server
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have hardware
1. You want to save on shipping costs
1. You prefer not to divulge your physical address
1. You just like building things
Services run in isolated LXC containers, packaged as [S9PKs](https://github.com/Start9Labs/start-os/blob/master/core/s9pk-structure.md) — a signed, merkle-archived format that supports partial downloads and cryptographic verification. To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
## What can you do with it? ## ❤️ Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
StartOS lets you self-host services that would otherwise depend on third-party cloud providers — giving you full ownership of your data and infrastructure. To report security issues, please email our security team - security@start9.com.
Browse available services on the [Start9 Marketplace](https://marketplace.start9.com/), including: ## 🌎 Marketplace
There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
- **Bitcoin & Lightning** — Run a full Bitcoin node, Lightning node, BTCPay Server, and other payment infrastructure ## 🖥️ User Interface Screenshots
- **Communication** — Self-host Matrix, SimpleX, or other messaging platforms
- **Cloud Storage** — Run Nextcloud, Vaultwarden, and other productivity tools
Services are added by the community. If a service you want isn't available, you can [package it yourself](https://github.com/Start9Labs/ai-service-packaging/). <p align="center">
<img src="assets/registry.png" alt="StartOS Marketplace" width="49%">
## Getting StartOS <img src="assets/community.png" alt="StartOS Community Registry" width="49%">
<img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%">
### Buy a Start9 server <img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%">
<img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%">
The easiest path. [Buy a server](https://store.start9.com) from Start9 and plug it in. <img src="assets/system.png" alt="StartOS System Settings" width="49%">
<img src="assets/welcome.png" alt="StartOS System Settings" width="49%">
### Build your own <img src="assets/logs.png" alt="StartOS System Settings" width="49%">
</p>
Install StartOS on your own hardware. Follow one of the [DIY guides](https://start9.com/latest/diy). Reasons to go this route:
1. You already have compatible hardware
2. You want to save on shipping costs
3. You prefer not to share your physical address
4. You enjoy building things
### Build from source
See [CONTRIBUTING.md](CONTRIBUTING.md) for environment setup, build instructions, and development workflow.
## Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. See [CONTRIBUTING.md](CONTRIBUTING.md) or visit [start9.com/contribute](https://start9.com/contribute/).
To report security issues, email [security@start9.com](mailto:security@start9.com).

261
TODO.md
View File

@@ -1,261 +0,0 @@
# AI Agent TODOs
Pending tasks for AI agents. Remove items when completed.
## Unreviewed CLAUDE.md Sections
- [ ] Architecture - Web (`/web`) - @MattDHill
## Features
- [ ] Support preferred external ports besides 443 - @dr-bonez
**Problem**: Currently, port 443 is the only preferred external port that is actually honored. When a
service requests `preferred_external_port: 8443` (or any non-443 value) for SSL, the system ignores
the preference and assigns a dynamic-range port (49152-65535). The `preferred_external_port` is only
used as a label for Tor mappings and as a trigger for the port-443 special case in `update()`.
**Goal**: Honor `preferred_external_port` for both SSL and non-SSL binds when the requested port is
available, with proper conflict resolution and fallback to dynamic-range allocation.
### Design
**Key distinction**: There are two separate concepts for SSL port usage:
1. **Port ownership** (`assigned_ssl_port`) — A port exclusively owned by a binding, allocated from
`AvailablePorts`. Used for server hostnames (`.local`, mDNS, etc.) and iptables forwards.
2. **Domain SSL port** — The port used for domain-based vhost entries. A binding does NOT need to own
a port to have a domain vhost on it. The VHostController already supports multiple hostnames on the
same port via SNI. Any binding can create a domain vhost entry on any SSL port that the
VHostController has a listener for, regardless of who "owns" that port.
For example: the OS owns port 443 as its `assigned_ssl_port`. A service with
`preferred_external_port: 443` won't get 443 as its `assigned_ssl_port` (it's taken), but it CAN
still have domain vhost entries on port 443 — SNI routes by hostname.
#### 1. Preferred Port Allocation for Ownership ✅ DONE
`AvailablePorts::try_alloc(port) -> Option<u16>` added to `forward.rs`. `BindInfo::new()` and
`BindInfo::update()` attempt the preferred port first, falling back to dynamic-range allocation.
#### 2. Per-Address Enable/Disable ✅ DONE
Gateway-level `private_disabled`/`public_enabled` on `NetInfo` replaced with per-address
`DerivedAddressInfo` on `BindInfo`. `hostname_info` removed from `Host` — computed addresses now
live in `BindInfo.addresses.possible`.
**`DerivedAddressInfo` struct** (on `BindInfo`):
```rust
pub struct DerivedAddressInfo {
pub private_disabled: BTreeSet<HostnameInfo>,
pub public_enabled: BTreeSet<HostnameInfo>,
pub possible: BTreeSet<HostnameInfo>, // COMPUTED by update()
}
```
`DerivedAddressInfo::enabled()` returns `possible` filtered by the two sets. `HostnameInfo` derives
`Ord` for `BTreeSet` usage. `AddressFilter` (implementing `InterfaceFilter`) derives enabled
gateway set from `DerivedAddressInfo` for vhost/forward filtering.
**RPC endpoint**: `set-gateway-enabled` replaced with `set-address-enabled` (on both
`server.host.binding` and `package.host.binding`).
**How disabling works per address type** (enforcement deferred to Section 3):
- **WAN/LAN IP:port**: Will be enforced via **source-IP gating** in the vhost layer (Section 3).
- **Hostname-based addresses** (`.local`, domains): Disabled by **not creating the vhost/SNI
entry** for that hostname.
#### 3. Eliminate the Port 5443 Hack: Source-IP-Based WAN Blocking (`vhost.rs`, `net_controller.rs`)
**Current problem**: The `if ssl.preferred_external_port == 443` branch (line 341 of
`net_controller.rs`) creates a bespoke dual-vhost setup: port 5443 for private-only access and port
443 for public (or public+private). This exists because both public and private traffic arrive on the
same port 443 listener, and the current `InterfaceFilter`/`PublicFilter` model distinguishes
public/private by which *network interface* the connection arrived on — which doesn't work when both
traffic types share a listener.
**Solution**: Determine public vs private based on **source IP** at the vhost level. Traffic arriving
from the gateway IP should be treated as public (the gateway may MASQUERADE/NAT internet traffic, so
anything from the gateway is potentially public). Traffic from LAN IPs is private.
This applies to **all** vhost targets, not just port 443:
- **Add a `public` field to `ProxyTarget`** (or an enum: `Public`, `Private`, `Both`) indicating
what traffic this target accepts, derived from the binding's user-controlled `public` field.
- **Modify `VHostTarget::filter()`** (`vhost.rs:342`): Instead of (or in addition to) checking the
network interface via `GatewayInfo`, check the source IP of the TCP connection against known gateway
IPs. If the source IP matches a gateway or IP outside the subnet, the connection is public;
otherwise it's private. Use this to gate against the target's `public` field.
- **Eliminate the 5443 port entirely**: A single vhost entry on port 443 (or any shared SSL port) can
serve both public and private traffic, with per-target source-IP gating determining which backend
handles which connections.
#### 4. Port Forward Mapping in Patch-DB
When a binding is marked `public = true`, StartOS must record the required port forwards in patch-db
so the frontend can display them to the user. The user then configures these on their router manually.
For each public binding, store:
- The external port the router should forward (the actual vhost port used for domains, or the
`assigned_port` / `assigned_ssl_port` for non-domain access)
- The protocol (TCP/UDP)
- The StartOS LAN IP as the forward target
- Which service/binding this forward is for (for display purposes)
This mapping should be in the public database model so the frontend can read and display it.
#### 5. Simplify `update()` Domain Vhost Logic (`net_controller.rs`)
With source-IP gating in the vhost controller:
- **Remove the `== 443` special case** and the 5443 secondary vhost.
- For **server hostnames** (`.local`, mDNS, embassy, startos, localhost): use `assigned_ssl_port`
(the port the binding owns).
- For **domain-based vhost entries**: attempt to use `preferred_external_port` as the vhost port.
This succeeds if the port is either unused or already has an SSL listener (SNI handles sharing).
It fails only if the port is already in use by a non-SSL binding, or is a restricted port. On
failure, fall back to `assigned_ssl_port`.
- The binding's `public` field determines the `ProxyTarget`'s public/private gating.
- Hostname info must exactly match the actual vhost port used: for server hostnames, report
`ssl_port: assigned_ssl_port`. For domains, report `ssl_port: preferred_external_port` if it was
successfully used for the domain vhost, otherwise report `ssl_port: assigned_ssl_port`.
#### 6. Frontend: Interfaces Page Overhaul (View/Manage Split)
The current interfaces page is a single page showing gateways (with toggle), addresses, public
domains, and private domains. It gets split into two pages: **View** and **Manage**.
**SDK**: `preferredExternalPort` is already exposed. No additional SDK changes needed.
##### View Page
Displays all computed addresses for the interface (from `BindInfo.addresses`) as a flat list. For each
address, show: URL, type (IPv4, IPv6, .local, domain), access level (public/private),
gateway name, SSL indicator, enable/disable state, port forward info for public addresses, and a test button
for reachability (see Section 7).
No gateway-level toggles. The old `gateways.component.ts` toggle UI is removed.
**Note**: Exact UI element placement (where toggles, buttons, info badges go) is sensitive.
Prompt the user for specific placement decisions during implementation.
##### Manage Page
Simple CRUD interface for configuring which addresses exist. Two sections:
- **Public domains**: Add/remove. Uses existing RPC endpoints:
- `{server,package}.host.address.domain.public.add`
- `{server,package}.host.address.domain.public.remove`
- **Private domains**: Add/remove. Uses existing RPC endpoints:
- `{server,package}.host.address.domain.private.add`
- `{server,package}.host.address.domain.private.remove`
##### Key Frontend Files to Modify
| File | Change |
|------|--------|
| `web/projects/ui/src/app/routes/portal/components/interfaces/` | Overhaul: split into view/manage |
| `web/projects/ui/src/app/routes/portal/components/interfaces/gateways.component.ts` | Remove (replaced by per-address toggles on View page) |
| `web/projects/ui/src/app/routes/portal/components/interfaces/interface.service.ts` | Update `MappedServiceInterface` to compute enabled addresses from `DerivedAddressInfo` |
| `web/projects/ui/src/app/routes/portal/components/interfaces/addresses/` | Refactor for View page with overflow menu (enable/disable) and test buttons |
| `web/projects/ui/src/app/routes/portal/routes/services/services.routes.ts` | Add routes for view/manage sub-pages |
| `web/projects/ui/src/app/routes/portal/routes/system/system.routes.ts` | Add routes for view/manage sub-pages |
#### 7. Reachability Test Endpoint
New RPC endpoint that tests whether an address is actually reachable, with diagnostic info on
failure.
**RPC endpoint** (`binding.rs` or new file):
- **`test-address`** — Test reachability of a specific address.
```ts
interface BindingTestAddressParams {
internalPort: number
address: HostnameInfo
}
```
The backend simply performs the raw checks and returns the results. The **frontend** owns all
interpretation — it already knows the address type, expected IP, expected port, etc. from the
`HostnameInfo` data, so it can compare against the backend results and construct fix messaging.
```ts
interface TestAddressResult {
dns: string[] | null // resolved IPs, null if not a domain address or lookup failed
portOpen: boolean | null // TCP connect result, null if not applicable
}
```
This yields two RPC methods:
- `server.host.binding.test-address`
- `package.host.binding.test-address`
The frontend already has the full `HostnameInfo` context (expected IP, domain, port, gateway,
public/private). It compares the backend's raw results against the expected state and constructs
localized fix instructions. For example:
- `dns` returned but doesn't contain the expected WAN IP → "Update DNS A record for {domain}
to {wanIp}"
- `dns` is `null` for a domain address → "DNS lookup failed for {domain}"
- `portOpen` is `false` → "Configure port forward on your router: external {port} TCP →
{lanIp}:{port}"
### Key Files
| File | Role |
|------|------|
| `core/src/net/forward.rs` | `AvailablePorts` — port pool allocation, `try_alloc()` for preferred ports |
| `core/src/net/host/binding.rs` | `Bindings` (Map wrapper for patchdb), `BindInfo`/`NetInfo`/`DerivedAddressInfo`/`AddressFilter` — per-address enable/disable, `set-address-enabled` RPC |
| `core/src/net/net_controller.rs:259` | `NetServiceData::update()` — computes `DerivedAddressInfo.possible`, vhost/forward/DNS reconciliation, 5443 hack removal |
| `core/src/net/vhost.rs` | `VHostController` / `ProxyTarget` — source-IP gating for public/private |
| `core/src/net/gateway.rs` | `InterfaceFilter` trait and filter types (`AddressFilter`, `PublicFilter`, etc.) |
| `core/src/net/service_interface.rs` | `HostnameInfo` — derives `Ord` for `BTreeSet` usage |
| `core/src/net/host/address.rs` | `HostAddress` (flattened struct), domain CRUD endpoints |
| `sdk/base/lib/interfaces/Host.ts` | SDK `MultiHost.bindPort()` — no changes needed |
| `core/src/db/model/public.rs` | Public DB model — port forward mapping |
- [ ] Extract TS-exported types into a lightweight sub-crate for fast binding generation
**Problem**: `make ts-bindings` compiles the entire `start-os` crate (with all dependencies: tokio,
axum, openssl, etc.) just to run test functions that serialize type definitions to `.ts` files.
Even in debug mode, this takes minutes. The generated output is pure type info — no runtime code
is needed.
**Goal**: Generate TS bindings in seconds by isolating exported types in a small crate with minimal
dependencies.
**Approach**: Create a `core/bindings-types/` sub-crate containing (or re-exporting) all 168
`#[ts(export)]` types. This crate depends only on `serde`, `ts-rs`, `exver`, and other type-only
crates — not on tokio, axum, openssl, etc. Then `build-ts.sh` runs `cargo test -p bindings-types`
instead of `cargo test -p start-os`.
**Challenge**: The exported types are scattered across `core/src/` and reference each other and
other crate types. Extracting them requires either moving the type definitions into the sub-crate
(and importing them back into `start-os`) or restructuring to share a common types crate.
- [ ] Use auto-generated RPC types in the frontend instead of manual duplicates
**Problem**: The web frontend manually defines ~755 lines of API request/response types in
`web/projects/ui/src/app/services/api/api.types.ts` that can drift from the actual Rust types.
**Current state**: The Rust backend already has `#[ts(export)]` on RPC param types (e.g.
`AddTunnelParams`, `SetWifiEnabledParams`, `LoginParams`), and they are generated into
`core/bindings/`. However, commit `71b83245b` ("Chore/unexport api ts #2585", April 2024)
deliberately stopped building them into the SDK and had the frontend maintain its own types.
**Goal**: Reverse that decision — pipe the generated RPC types through the SDK into the frontend
so `api.types.ts` can import them instead of duplicating them. This eliminates drift between
backend and frontend API contracts.
- [ ] Auto-configure port forwards via UPnP/NAT-PMP/PCP - @dr-bonez
**Blocked by**: "Support preferred external ports besides 443" (must be implemented and tested
end-to-end first).
**Goal**: When a binding is marked public, automatically configure port forwards on the user's router
using UPnP, NAT-PMP, or PCP, instead of requiring manual router configuration. Fall back to
displaying manual instructions (the port forward mapping from patch-db) when auto-configuration is
unavailable or fails.

9
agents/TODO.md Normal file
View File

@@ -0,0 +1,9 @@
# AI Agent TODOs
Pending tasks for AI agents. Remove items when completed.
## Unreviewed CLAUDE.md Sections
- [ ] Architecture - Web (`/web`) - @MattDHill

BIN
assets/StartOS.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 MiB

BIN
assets/btcpay.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 396 KiB

BIN
assets/c-lightning.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

BIN
assets/community.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 591 KiB

BIN
assets/logs.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

BIN
assets/nextcloud.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 319 KiB

BIN
assets/registry.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 521 KiB

BIN
assets/system.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 331 KiB

BIN
assets/welcome.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

View File

@@ -5,7 +5,7 @@ if [ -z "$sip" ] || [ -z "$dip" ] || [ -z "$dprefix" ] || [ -z "$sport" ] || [ -
exit 1 exit 1
fi fi
NAME="F$(echo "$sip:$sport -> $dip/$dprefix:$dport ${src_subnet:-any}" | sha256sum | head -c 15)" NAME="F$(echo "$sip:$sport -> $dip/$dprefix:$dport" | sha256sum | head -c 15)"
for kind in INPUT FORWARD ACCEPT; do for kind in INPUT FORWARD ACCEPT; do
if ! iptables -C $kind -j "${NAME}_${kind}" 2> /dev/null; then if ! iptables -C $kind -j "${NAME}_${kind}" 2> /dev/null; then
@@ -13,7 +13,7 @@ for kind in INPUT FORWARD ACCEPT; do
iptables -A $kind -j "${NAME}_${kind}" iptables -A $kind -j "${NAME}_${kind}"
fi fi
done done
for kind in PREROUTING OUTPUT; do for kind in PREROUTING INPUT OUTPUT POSTROUTING; do
if ! iptables -t nat -C $kind -j "${NAME}_${kind}" 2> /dev/null; then if ! iptables -t nat -C $kind -j "${NAME}_${kind}" 2> /dev/null; then
iptables -t nat -N "${NAME}_${kind}" 2> /dev/null iptables -t nat -N "${NAME}_${kind}" 2> /dev/null
iptables -t nat -A $kind -j "${NAME}_${kind}" iptables -t nat -A $kind -j "${NAME}_${kind}"
@@ -26,7 +26,7 @@ trap 'err=1' ERR
for kind in INPUT FORWARD ACCEPT; do for kind in INPUT FORWARD ACCEPT; do
iptables -F "${NAME}_${kind}" 2> /dev/null iptables -F "${NAME}_${kind}" 2> /dev/null
done done
for kind in PREROUTING OUTPUT; do for kind in PREROUTING INPUT OUTPUT POSTROUTING; do
iptables -t nat -F "${NAME}_${kind}" 2> /dev/null iptables -t nat -F "${NAME}_${kind}" 2> /dev/null
done done
if [ "$UNDO" = 1 ]; then if [ "$UNDO" = 1 ]; then
@@ -36,21 +36,20 @@ if [ "$UNDO" = 1 ]; then
fi fi
# DNAT: rewrite destination for incoming packets (external traffic) # DNAT: rewrite destination for incoming packets (external traffic)
# When src_subnet is set, only forward traffic from that subnet (private forwards) iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
if [ -n "$src_subnet" ]; then iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
else
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
fi
# DNAT: rewrite destination for locally-originated packets (hairpin from host itself) # DNAT: rewrite destination for locally-originated packets (hairpin from host itself)
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport" iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport" iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
# MASQUERADE: rewrite source for all forwarded traffic to the destination
# This ensures responses are routed back through the host regardless of source IP
iptables -t nat -A ${NAME}_POSTROUTING -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -d "$dip" -p udp --dport "$dport" -j MASQUERADE
# Allow new connections to be forwarded to the destination # Allow new connections to be forwarded to the destination
iptables -A ${NAME}_FORWARD -d $dip -p tcp --dport $dport -m state --state NEW -j ACCEPT iptables -A ${NAME}_FORWARD -d $dip -p tcp --dport $dport -m state --state NEW -j ACCEPT
iptables -A ${NAME}_FORWARD -d $dip -p udp --dport $dport -m state --state NEW -j ACCEPT iptables -A ${NAME}_FORWARD -d $dip -p udp --dport $dport -m state --state NEW -j ACCEPT
exit $err exit $err

View File

@@ -1,32 +0,0 @@
# Container Runtime — Node.js Service Manager
Node.js runtime that manages service containers via JSON-RPC. See `RPCSpec.md` in this directory for the full RPC protocol.
## Architecture
```
LXC Container (uniform base for all services)
└── systemd
└── container-runtime.service
└── Loads /usr/lib/startos/package/index.js (from s9pk javascript.squashfs)
└── Package JS launches subcontainers (from images in s9pk)
```
The container runtime communicates with the host via JSON-RPC over Unix socket. Package JavaScript must export functions conforming to the `ABI` type defined in `sdk/base/lib/types.ts`.
## `/media/startos/` Directory (mounted by host into container)
| Path | Description |
|------|-------------|
| `volumes/<name>/` | Package data volumes (id-mapped, persistent) |
| `assets/` | Read-only assets from s9pk `assets.squashfs` |
| `images/<name>/` | Container images (squashfs, used for subcontainers) |
| `images/<name>.env` | Environment variables for image |
| `images/<name>.json` | Image metadata |
| `backup/` | Backup mount point (mounted during backup operations) |
| `rpc/service.sock` | RPC socket (container runtime listens here) |
| `rpc/host.sock` | Host RPC socket (for effects callbacks to host) |
## S9PK Structure
See `../core/s9pk-structure.md` for the S9PK package format.

View File

@@ -139,8 +139,8 @@ Evaluate a script in the runtime context. Used for debugging.
The `execute` and `sandbox` methods route to procedures based on the `procedure` path: The `execute` and `sandbox` methods route to procedures based on the `procedure` path:
| Procedure | Description | | Procedure | Description |
| -------------------------- | ---------------------------- | |-----------|-------------|
| `/backup/create` | Create a backup | | `/backup/create` | Create a backup |
| `/actions/{name}/getInput` | Get input spec for an action | | `/actions/{name}/getInput` | Get input spec for an action |
| `/actions/{name}/run` | Run an action with input | | `/actions/{name}/run` | Run an action with input |

View File

@@ -82,15 +82,18 @@ export class DockerProcedureContainer extends Drop {
}), }),
) )
} else if (volumeMount.type === "certificate") { } else if (volumeMount.type === "certificate") {
const hostInfo = await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
const hostnames = [ const hostnames = [
`${packageId}.embassy`, `${packageId}.embassy`,
...new Set( ...new Set(
Object.values(hostInfo?.bindings || {}) Object.values(
.flatMap((b) => b.addresses.possible) (
.map((h) => h.hostname.value), await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
)?.hostnameInfo || {},
)
.flatMap((h) => h)
.flatMap((h) => (h.kind === "onion" ? [h.hostname.value] : [])),
).values(), ).values(),
] ]
const certChain = await effects.getSslCertificate({ const certChain = await effects.getSslCertificate({

View File

@@ -1244,8 +1244,12 @@ async function updateConfig(
? "" ? ""
: catchFn( : catchFn(
() => () =>
filled.addressInfo!.filter({ kind: "mdns" })!.hostnames[0] (specValue.target === "lan-address"
.hostname.value, ? filled.addressInfo!.filter({ kind: "mdns" }) ||
filled.addressInfo!.onion
: filled.addressInfo!.onion ||
filled.addressInfo!.filter({ kind: "mdns" })
).hostnames[0].hostname.value,
) || "" ) || ""
mutConfigValue[key] = url mutConfigValue[key] = url
} }

View File

@@ -1,52 +0,0 @@
# Core — Rust Backend
The Rust backend daemon for StartOS.
## Binaries
- `startbox` — Main daemon (runs as `startd`)
- `start-cli` — CLI interface
- `start-container` — Runs inside LXC containers; communicates with host and manages subcontainers
- `registrybox` — Registry daemon
- `tunnelbox` — VPN/tunnel daemon
## Key Modules
- `src/context/` — Context types (RpcContext, CliContext, InitContext, DiagnosticContext)
- `src/service/` — Service lifecycle management with actor pattern (`service_actor.rs`)
- `src/db/model/` — Patch-DB models (`public.rs` synced to frontend, `private.rs` backend-only)
- `src/net/` — Networking (DNS, ACME, WiFi, Tor via Arti, WireGuard)
- `src/s9pk/` — S9PK package format (merkle archive)
- `src/registry/` — Package registry management
## RPC Pattern
See `rpc-toolkit.md` for JSON-RPC handler patterns and configuration.
## Patch-DB Patterns
Patch-DB provides diff-based state synchronization. Changes to `db/model/public.rs` automatically sync to the frontend.
**Key patterns:**
- `db.peek().await` — Get a read-only snapshot of the database state
- `db.mutate(|db| { ... }).await` — Apply mutations atomically, returns `MutateResult`
- `#[derive(HasModel)]` — Derive macro for types stored in the database, generates typed accessors
**Generated accessor types** (from `HasModel` derive):
- `as_field()` — Immutable reference: `&Model<T>`
- `as_field_mut()` — Mutable reference: `&mut Model<T>`
- `into_field()` — Owned value: `Model<T>`
**`Model<T>` APIs** (from `db/prelude.rs`):
- `.de()` — Deserialize to `T`
- `.ser(&value)` — Serialize from `T`
- `.mutate(|v| ...)` — Deserialize, mutate, reserialize
- For maps: `.keys()`, `.as_idx(&key)`, `.as_idx_mut(&key)`, `.insert()`, `.remove()`, `.contains_key()`
## i18n
See `i18n-patterns.md` for internationalization key conventions and the `t!()` macro.
## Rust Utilities & Patterns
See `core-rust-patterns.md` for common utilities (Invoke trait, Guard pattern, mount guards, Apply trait, etc.).

3395
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -15,7 +15,7 @@ license = "MIT"
name = "start-os" name = "start-os"
readme = "README.md" readme = "README.md"
repository = "https://github.com/Start9Labs/start-os" repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.20" # VERSION_BUMP version = "0.4.0-alpha.19" # VERSION_BUMP
[lib] [lib]
name = "startos" name = "startos"
@@ -42,6 +42,17 @@ name = "tunnelbox"
path = "src/main/tunnelbox.rs" path = "src/main/tunnelbox.rs"
[features] [features]
arti = [
"arti-client",
"safelog",
"tor-cell",
"tor-hscrypto",
"tor-hsservice",
"tor-keymgr",
"tor-llcrypto",
"tor-proto",
"tor-rtcompat",
]
beta = [] beta = []
console = ["console-subscriber", "tokio/tracing"] console = ["console-subscriber", "tokio/tracing"]
default = [] default = []
@@ -51,6 +62,16 @@ unstable = ["backtrace-on-stack-overflow"]
[dependencies] [dependencies]
aes = { version = "0.7.5", features = ["ctr"] } aes = { version = "0.7.5", features = ["ctr"] }
arti-client = { version = "0.33", features = [
"compression",
"ephemeral-keystore",
"experimental-api",
"onion-service-client",
"onion-service-service",
"rustls",
"static",
"tokio",
], default-features = false, git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
async-acme = { version = "0.6.0", git = "https://github.com/dr-bonez/async-acme.git", features = [ async-acme = { version = "0.6.0", git = "https://github.com/dr-bonez/async-acme.git", features = [
"use_rustls", "use_rustls",
"use_tokio", "use_tokio",
@@ -79,6 +100,7 @@ console-subscriber = { version = "0.5.0", optional = true }
const_format = "0.2.34" const_format = "0.2.34"
cookie = "0.18.0" cookie = "0.18.0"
cookie_store = "0.22.0" cookie_store = "0.22.0"
curve25519-dalek = "4.1.3"
der = { version = "0.7.9", features = ["derive", "pem"] } der = { version = "0.7.9", features = ["derive", "pem"] }
digest = "0.10.7" digest = "0.10.7"
divrem = "1.0.0" divrem = "1.0.0"
@@ -194,6 +216,7 @@ rpassword = "7.2.0"
rust-argon2 = "3.0.0" rust-argon2 = "3.0.0"
rust-i18n = "3.1.5" rust-i18n = "3.1.5"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git" } rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git" }
safelog = { version = "0.4.8", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
semver = { version = "1.0.20", features = ["serde"] } semver = { version = "1.0.20", features = ["serde"] }
serde = { version = "1.0", features = ["derive", "rc"] } serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.1" } serde_cbor = { package = "ciborium", version = "0.2.1" }
@@ -221,6 +244,23 @@ tokio-stream = { version = "0.1.14", features = ["io-util", "net", "sync"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" } tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.26.2", features = ["native-tls", "url"] } tokio-tungstenite = { version = "0.26.2", features = ["native-tls", "url"] }
tokio-util = { version = "0.7.9", features = ["io"] } tokio-util = { version = "0.7.9", features = ["io"] }
tor-cell = { version = "0.33", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-hscrypto = { version = "0.33", features = [
"full",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-hsservice = { version = "0.33", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-keymgr = { version = "0.33", features = [
"ephemeral-keystore",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-llcrypto = { version = "0.33", features = [
"full",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-proto = { version = "0.33", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
tor-rtcompat = { version = "0.33", features = [
"rustls",
"tokio",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit", optional = true }
torut = "0.2.1"
tower-service = "0.3.3" tower-service = "0.3.3"
tracing = "0.1.39" tracing = "0.1.39"
tracing-error = "0.2.0" tracing-error = "0.2.0"

View File

@@ -7,11 +7,11 @@ source ./builder-alias.sh
set -ea set -ea
shopt -s expand_aliases shopt -s expand_aliases
PROFILE=${PROFILE:-debug} PROFILE=${PROFILE:-release}
if [ "${PROFILE}" = "release" ]; then if [ "${PROFILE}" = "release" ]; then
BUILD_FLAGS="--release" BUILD_FLAGS="--release"
else else
if [ "$PROFILE" != "debug" ]; then if [ "$PROFILE" != "debug"]; then
>&2 echo "Unknown profile $PROFILE: falling back to debug..." >&2 echo "Unknown profile $PROFILE: falling back to debug..."
PROFILE=debug PROFILE=debug
fi fi
@@ -38,7 +38,7 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
fi fi
echo "FEATURES=\"$FEATURES\"" echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\"" echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo test --manifest-path=./core/Cargo.toml --lib $BUILD_FLAGS --features test,$FEATURES --locked 'export_bindings_' rust-zig-builder cargo test --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features test,$FEATURES --locked 'export_bindings_'
if [ "$(ls -nd "core/bindings" | awk '{ print $3 }')" != "$UID" ]; then if [ "$(ls -nd "core/bindings" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID core/bindings && chown -R $UID:$UID /usr/local/cargo" rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID core/bindings && chown -R $UID:$UID /usr/local/cargo"
fi fi

View File

@@ -8,6 +8,7 @@ use openssl::x509::X509;
use crate::db::model::DatabaseModel; use crate::db::model::DatabaseModel;
use crate::hostname::{Hostname, generate_hostname, generate_id}; use crate::hostname::{Hostname, generate_hostname, generate_id};
use crate::net::ssl::{gen_nistp256, make_root_cert}; use crate::net::ssl::{gen_nistp256, make_root_cert};
use crate::net::tor::TorSecretKey;
use crate::prelude::*; use crate::prelude::*;
use crate::util::serde::Pem; use crate::util::serde::Pem;
@@ -25,6 +26,7 @@ pub struct AccountInfo {
pub server_id: String, pub server_id: String,
pub hostname: Hostname, pub hostname: Hostname,
pub password: String, pub password: String,
pub tor_keys: Vec<TorSecretKey>,
pub root_ca_key: PKey<Private>, pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509, pub root_ca_cert: X509,
pub ssh_key: ssh_key::PrivateKey, pub ssh_key: ssh_key::PrivateKey,
@@ -34,6 +36,7 @@ impl AccountInfo {
pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> { pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> {
let server_id = generate_id(); let server_id = generate_id();
let hostname = generate_hostname(); let hostname = generate_hostname();
let tor_key = vec![TorSecretKey::generate()];
let root_ca_key = gen_nistp256()?; let root_ca_key = gen_nistp256()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?; let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?;
let ssh_key = ssh_key::PrivateKey::from(ssh_key::private::Ed25519Keypair::random( let ssh_key = ssh_key::PrivateKey::from(ssh_key::private::Ed25519Keypair::random(
@@ -45,6 +48,7 @@ impl AccountInfo {
server_id, server_id,
hostname, hostname,
password: hash_password(password)?, password: hash_password(password)?,
tor_keys: tor_key,
root_ca_key, root_ca_key,
root_ca_cert, root_ca_cert,
ssh_key, ssh_key,
@@ -57,6 +61,17 @@ impl AccountInfo {
let hostname = Hostname(db.as_public().as_server_info().as_hostname().de()?); let hostname = Hostname(db.as_public().as_server_info().as_hostname().de()?);
let password = db.as_private().as_password().de()?; let password = db.as_private().as_password().de()?;
let key_store = db.as_private().as_key_store(); let key_store = db.as_private().as_key_store();
let tor_addrs = db
.as_public()
.as_server_info()
.as_network()
.as_host()
.as_onions()
.de()?;
let tor_keys = tor_addrs
.into_iter()
.map(|tor_addr| key_store.as_onion().get_key(&tor_addr))
.collect::<Result<_, _>>()?;
let cert_store = key_store.as_local_certs(); let cert_store = key_store.as_local_certs();
let root_ca_key = cert_store.as_root_key().de()?.0; let root_ca_key = cert_store.as_root_key().de()?.0;
let root_ca_cert = cert_store.as_root_cert().de()?.0; let root_ca_cert = cert_store.as_root_cert().de()?.0;
@@ -67,6 +82,7 @@ impl AccountInfo {
server_id, server_id,
hostname, hostname,
password, password,
tor_keys,
root_ca_key, root_ca_key,
root_ca_cert, root_ca_cert,
ssh_key, ssh_key,
@@ -81,6 +97,17 @@ impl AccountInfo {
server_info server_info
.as_pubkey_mut() .as_pubkey_mut()
.ser(&self.ssh_key.public_key().to_openssh()?)?; .ser(&self.ssh_key.public_key().to_openssh()?)?;
server_info
.as_network_mut()
.as_host_mut()
.as_onions_mut()
.ser(
&self
.tor_keys
.iter()
.map(|tor_key| tor_key.onion_address())
.collect(),
)?;
server_info.as_password_hash_mut().ser(&self.password)?; server_info.as_password_hash_mut().ser(&self.password)?;
db.as_private_mut().as_password_mut().ser(&self.password)?; db.as_private_mut().as_password_mut().ser(&self.password)?;
db.as_private_mut() db.as_private_mut()
@@ -90,6 +117,9 @@ impl AccountInfo {
.as_developer_key_mut() .as_developer_key_mut()
.ser(Pem::new_ref(&self.developer_key))?; .ser(Pem::new_ref(&self.developer_key))?;
let key_store = db.as_private_mut().as_key_store_mut(); let key_store = db.as_private_mut().as_key_store_mut();
for tor_key in &self.tor_keys {
key_store.as_onion_mut().insert_key(tor_key)?;
}
let cert_store = key_store.as_local_certs_mut(); let cert_store = key_store.as_local_certs_mut();
if cert_store.as_root_cert().de()?.0 != self.root_ca_cert { if cert_store.as_root_cert().de()?.0 != self.root_ca_cert {
cert_store cert_store
@@ -118,5 +148,11 @@ impl AccountInfo {
self.hostname.no_dot_host_name(), self.hostname.no_dot_host_name(),
self.hostname.local_domain_name(), self.hostname.local_domain_name(),
] ]
.into_iter()
.chain(
self.tor_keys
.iter()
.map(|k| InternedString::from_display(&k.onion_address())),
)
} }
} }

View File

@@ -7,7 +7,9 @@ use ssh_key::private::Ed25519Keypair;
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::hostname::{Hostname, generate_hostname, generate_id}; use crate::hostname::{Hostname, generate_hostname, generate_id};
use crate::net::tor::TorSecretKey;
use crate::prelude::*; use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
use crate::util::serde::{Base32, Base64, Pem}; use crate::util::serde::{Base32, Base64, Pem};
pub struct OsBackup { pub struct OsBackup {
@@ -83,6 +85,10 @@ impl OsBackupV0 {
&mut ssh_key::rand_core::OsRng::default(), &mut ssh_key::rand_core::OsRng::default(),
ssh_key::Algorithm::Ed25519, ssh_key::Algorithm::Ed25519,
)?, )?,
tor_keys: TorSecretKey::from_bytes(self.tor_key.0)
.ok()
.into_iter()
.collect(),
developer_key: ed25519_dalek::SigningKey::generate( developer_key: ed25519_dalek::SigningKey::generate(
&mut ssh_key::rand_core::OsRng::default(), &mut ssh_key::rand_core::OsRng::default(),
), ),
@@ -113,6 +119,10 @@ impl OsBackupV1 {
root_ca_key: self.root_ca_key.0, root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0, root_ca_cert: self.root_ca_cert.0,
ssh_key: ssh_key::PrivateKey::from(Ed25519Keypair::from_seed(&self.net_key.0)), ssh_key: ssh_key::PrivateKey::from(Ed25519Keypair::from_seed(&self.net_key.0)),
tor_keys: TorSecretKey::from_bytes(ed25519_expand_key(&self.net_key.0))
.ok()
.into_iter()
.collect(),
developer_key: ed25519_dalek::SigningKey::from_bytes(&self.net_key), developer_key: ed25519_dalek::SigningKey::from_bytes(&self.net_key),
}, },
ui: self.ui, ui: self.ui,
@@ -130,6 +140,7 @@ struct OsBackupV2 {
root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key
root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate
ssh_key: Pem<ssh_key::PrivateKey>, // PEM Encoded OpenSSH Key ssh_key: Pem<ssh_key::PrivateKey>, // PEM Encoded OpenSSH Key
tor_keys: Vec<TorSecretKey>, // Base64 Encoded Ed25519 Expanded Secret Key
compat_s9pk_key: Pem<ed25519_dalek::SigningKey>, // PEM Encoded ED25519 Key compat_s9pk_key: Pem<ed25519_dalek::SigningKey>, // PEM Encoded ED25519 Key
ui: Value, // JSON Value ui: Value, // JSON Value
} }
@@ -143,6 +154,7 @@ impl OsBackupV2 {
root_ca_key: self.root_ca_key.0, root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0, root_ca_cert: self.root_ca_cert.0,
ssh_key: self.ssh_key.0, ssh_key: self.ssh_key.0,
tor_keys: self.tor_keys,
developer_key: self.compat_s9pk_key.0, developer_key: self.compat_s9pk_key.0,
}, },
ui: self.ui, ui: self.ui,
@@ -155,6 +167,7 @@ impl OsBackupV2 {
root_ca_key: Pem(backup.account.root_ca_key.clone()), root_ca_key: Pem(backup.account.root_ca_key.clone()),
root_ca_cert: Pem(backup.account.root_ca_cert.clone()), root_ca_cert: Pem(backup.account.root_ca_cert.clone()),
ssh_key: Pem(backup.account.ssh_key.clone()), ssh_key: Pem(backup.account.ssh_key.clone()),
tor_keys: backup.account.tor_keys.clone(),
compat_s9pk_key: Pem(backup.account.developer_key.clone()), compat_s9pk_key: Pem(backup.account.developer_key.clone()),
ui: backup.ui.clone(), ui: backup.ui.clone(),
} }

View File

@@ -9,7 +9,7 @@ use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD; use crate::disk::main::DEFAULT_PASSWORD;
use crate::firmware::{check_for_firmware_update, update_firmware}; use crate::firmware::{check_for_firmware_update, update_firmware};
use crate::init::{InitPhases, STANDBY_MODE_PATH}; use crate::init::{InitPhases, STANDBY_MODE_PATH};
use crate::net::gateway::WildcardListener; use crate::net::gateway::UpgradableListener;
use crate::net::web_server::WebServer; use crate::net::web_server::WebServer;
use crate::prelude::*; use crate::prelude::*;
use crate::progress::FullProgressTracker; use crate::progress::FullProgressTracker;
@@ -19,7 +19,7 @@ use crate::{DATA_DIR, PLATFORM};
#[instrument(skip_all)] #[instrument(skip_all)]
async fn setup_or_init( async fn setup_or_init(
server: &mut WebServer<WildcardListener>, server: &mut WebServer<UpgradableListener>,
config: &ServerConfig, config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> { ) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if let Some(firmware) = check_for_firmware_update() if let Some(firmware) = check_for_firmware_update()
@@ -204,7 +204,7 @@ async fn setup_or_init(
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn main( pub async fn main(
server: &mut WebServer<WildcardListener>, server: &mut WebServer<UpgradableListener>,
config: &ServerConfig, config: &ServerConfig,
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> { ) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() { if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {

View File

@@ -12,7 +12,7 @@ use tracing::instrument;
use crate::context::config::ServerConfig; use crate::context::config::ServerConfig;
use crate::context::rpc::InitRpcContextPhases; use crate::context::rpc::InitRpcContextPhases;
use crate::context::{DiagnosticContext, InitContext, RpcContext}; use crate::context::{DiagnosticContext, InitContext, RpcContext};
use crate::net::gateway::WildcardListener; use crate::net::gateway::{BindTcp, SelfContainedNetworkInterfaceListener, UpgradableListener};
use crate::net::static_server::refresher; use crate::net::static_server::refresher;
use crate::net::web_server::{Acceptor, WebServer}; use crate::net::web_server::{Acceptor, WebServer};
use crate::prelude::*; use crate::prelude::*;
@@ -23,7 +23,7 @@ use crate::util::logger::LOGGER;
#[instrument(skip_all)] #[instrument(skip_all)]
async fn inner_main( async fn inner_main(
server: &mut WebServer<WildcardListener>, server: &mut WebServer<UpgradableListener>,
config: &ServerConfig, config: &ServerConfig,
) -> Result<Option<Shutdown>, Error> { ) -> Result<Option<Shutdown>, Error> {
let rpc_ctx = if !tokio::fs::metadata("/run/startos/initialized") let rpc_ctx = if !tokio::fs::metadata("/run/startos/initialized")
@@ -148,7 +148,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
.expect(&t!("bins.startd.failed-to-initialize-runtime")); .expect(&t!("bins.startd.failed-to-initialize-runtime"));
let res = rt.block_on(async { let res = rt.block_on(async {
let mut server = WebServer::new( let mut server = WebServer::new(
Acceptor::new(WildcardListener::new(80)?), Acceptor::bind_upgradable(SelfContainedNetworkInterfaceListener::bind(BindTcp, 80)),
refresher(), refresher(),
); );
match inner_main(&mut server, &config).await { match inner_main(&mut server, &config).await {

View File

@@ -13,7 +13,7 @@ use visit_rs::Visit;
use crate::context::CliContext; use crate::context::CliContext;
use crate::context::config::ClientConfig; use crate::context::config::ClientConfig;
use tokio::net::TcpListener; use crate::net::gateway::{Bind, BindTcp};
use crate::net::tls::TlsListener; use crate::net::tls::TlsListener;
use crate::net::web_server::{Accept, Acceptor, MetadataVisitor, WebServer}; use crate::net::web_server::{Accept, Acceptor, MetadataVisitor, WebServer};
use crate::prelude::*; use crate::prelude::*;
@@ -57,12 +57,7 @@ async fn inner_main(config: &TunnelConfig) -> Result<(), Error> {
if !a.contains_key(&key) { if !a.contains_key(&key) {
match (|| { match (|| {
Ok::<_, Error>(TlsListener::new( Ok::<_, Error>(TlsListener::new(
TcpListener::from_std( BindTcp.bind(addr)?,
mio::net::TcpListener::bind(addr)
.with_kind(ErrorKind::Network)?
.into(),
)
.with_kind(ErrorKind::Network)?,
TunnelCertHandler { TunnelCertHandler {
db: https_db.clone(), db: https_db.clone(),
crypto_provider: Arc::new(tokio_rustls::rustls::crypto::ring::default_provider()), crypto_provider: Arc::new(tokio_rustls::rustls::crypto::ring::default_provider()),

View File

@@ -34,7 +34,7 @@ use crate::disk::mount::guard::MountGuard;
use crate::init::{InitResult, check_time_is_synchronized}; use crate::init::{InitResult, check_time_is_synchronized};
use crate::install::PKG_ARCHIVE_DIR; use crate::install::PKG_ARCHIVE_DIR;
use crate::lxc::LxcManager; use crate::lxc::LxcManager;
use crate::net::gateway::WildcardListener; use crate::net::gateway::UpgradableListener;
use crate::net::net_controller::{NetController, NetService}; use crate::net::net_controller::{NetController, NetService};
use crate::net::socks::DEFAULT_SOCKS_LISTEN; use crate::net::socks::DEFAULT_SOCKS_LISTEN;
use crate::net::utils::{find_eth_iface, find_wifi_iface}; use crate::net::utils::{find_eth_iface, find_wifi_iface};
@@ -132,7 +132,7 @@ pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext { impl RpcContext {
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn init( pub async fn init(
webserver: &WebServerAcceptorSetter<WildcardListener>, webserver: &WebServerAcceptorSetter<UpgradableListener>,
config: &ServerConfig, config: &ServerConfig,
disk_guid: InternedString, disk_guid: InternedString,
init_result: Option<InitResult>, init_result: Option<InitResult>,
@@ -167,7 +167,7 @@ impl RpcContext {
} else { } else {
let net_ctrl = let net_ctrl =
Arc::new(NetController::init(db.clone(), &account.hostname, socks_proxy).await?); Arc::new(NetController::init(db.clone(), &account.hostname, socks_proxy).await?);
webserver.send_modify(|wl| wl.set_ip_info(net_ctrl.net_iface.watcher.subscribe())); webserver.try_upgrade(|a| net_ctrl.net_iface.watcher.upgrade_listener(a))?;
let os_net_service = net_ctrl.os_bindings().await?; let os_net_service = net_ctrl.os_bindings().await?;
(net_ctrl, os_net_service) (net_ctrl, os_net_service)
}; };

View File

@@ -20,7 +20,7 @@ use crate::context::RpcContext;
use crate::context::config::ServerConfig; use crate::context::config::ServerConfig;
use crate::disk::mount::guard::{MountGuard, TmpMountGuard}; use crate::disk::mount::guard::{MountGuard, TmpMountGuard};
use crate::hostname::Hostname; use crate::hostname::Hostname;
use crate::net::gateway::WildcardListener; use crate::net::gateway::UpgradableListener;
use crate::net::web_server::{WebServer, WebServerAcceptorSetter}; use crate::net::web_server::{WebServer, WebServerAcceptorSetter};
use crate::prelude::*; use crate::prelude::*;
use crate::progress::FullProgressTracker; use crate::progress::FullProgressTracker;
@@ -51,7 +51,7 @@ pub struct SetupResult {
} }
pub struct SetupContextSeed { pub struct SetupContextSeed {
pub webserver: WebServerAcceptorSetter<WildcardListener>, pub webserver: WebServerAcceptorSetter<UpgradableListener>,
pub config: SyncMutex<ServerConfig>, pub config: SyncMutex<ServerConfig>,
pub disable_encryption: bool, pub disable_encryption: bool,
pub progress: FullProgressTracker, pub progress: FullProgressTracker,
@@ -70,7 +70,7 @@ pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext { impl SetupContext {
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn init( pub fn init(
webserver: &WebServer<WildcardListener>, webserver: &WebServer<UpgradableListener>,
config: ServerConfig, config: ServerConfig,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let (shutdown, _) = tokio::sync::broadcast::channel(1); let (shutdown, _) = tokio::sync::broadcast::channel(1);

View File

@@ -18,7 +18,7 @@ use crate::s9pk::manifest::{LocaleString, Manifest};
use crate::status::StatusInfo; use crate::status::StatusInfo;
use crate::util::DataUrl; use crate::util::DataUrl;
use crate::util::serde::{Pem, is_partial_of}; use crate::util::serde::{Pem, is_partial_of};
use crate::{ActionId, GatewayId, HealthCheckId, HostId, PackageId, ReplayId, ServiceInterfaceId}; use crate::{ActionId, HealthCheckId, HostId, PackageId, ReplayId, ServiceInterfaceId};
#[derive(Debug, Default, Deserialize, Serialize, TS)] #[derive(Debug, Default, Deserialize, Serialize, TS)]
#[ts(export)] #[ts(export)]
@@ -381,9 +381,6 @@ pub struct PackageDataEntry {
pub hosts: Hosts, pub hosts: Hosts,
#[ts(type = "string[]")] #[ts(type = "string[]")]
pub store_exposed_dependents: Vec<JsonPointer>, pub store_exposed_dependents: Vec<JsonPointer>,
#[serde(default)]
#[ts(type = "string | null")]
pub outbound_gateway: Option<GatewayId>,
} }
impl AsRef<PackageDataEntry> for PackageDataEntry { impl AsRef<PackageDataEntry> for PackageDataEntry {
fn as_ref(&self) -> &PackageDataEntry { fn as_ref(&self) -> &PackageDataEntry {

View File

@@ -20,9 +20,8 @@ use crate::db::model::Database;
use crate::db::model::package::AllPackageData; use crate::db::model::package::AllPackageData;
use crate::net::acme::AcmeProvider; use crate::net::acme::AcmeProvider;
use crate::net::host::Host; use crate::net::host::Host;
use crate::net::host::binding::{ use crate::net::host::binding::{AddSslOptions, BindInfo, BindOptions, NetInfo};
AddSslOptions, BindInfo, BindOptions, Bindings, DerivedAddressInfo, NetInfo, use crate::net::utils::ipv6_is_local;
};
use crate::net::vhost::AlpnInfo; use crate::net::vhost::AlpnInfo;
use crate::prelude::*; use crate::prelude::*;
use crate::progress::FullProgress; use crate::progress::FullProgress;
@@ -64,35 +63,36 @@ impl Public {
post_init_migration_todos: BTreeMap::new(), post_init_migration_todos: BTreeMap::new(),
network: NetworkInfo { network: NetworkInfo {
host: Host { host: Host {
bindings: Bindings( bindings: [(
[( 80,
80, BindInfo {
BindInfo { enabled: false,
enabled: false, options: BindOptions {
options: BindOptions { preferred_external_port: 80,
preferred_external_port: 80, add_ssl: Some(AddSslOptions {
add_ssl: Some(AddSslOptions { preferred_external_port: 443,
preferred_external_port: 443, add_x_forwarded_headers: false,
add_x_forwarded_headers: false, alpn: Some(AlpnInfo::Specified(vec![
alpn: Some(AlpnInfo::Specified(vec![ MaybeUtf8String("h2".into()),
MaybeUtf8String("h2".into()), MaybeUtf8String("http/1.1".into()),
MaybeUtf8String("http/1.1".into()), ])),
])), }),
}), secure: None,
secure: None,
},
net: NetInfo {
assigned_port: None,
assigned_ssl_port: Some(443),
},
addresses: DerivedAddressInfo::default(),
}, },
)] net: NetInfo {
.into_iter() assigned_port: None,
.collect(), assigned_ssl_port: Some(443),
), private_disabled: OrdSet::new(),
public_enabled: OrdSet::new(),
},
},
)]
.into_iter()
.collect(),
onions: account.tor_keys.iter().map(|k| k.onion_address()).collect(),
public_domains: BTreeMap::new(), public_domains: BTreeMap::new(),
private_domains: BTreeMap::new(), private_domains: BTreeSet::new(),
hostname_info: BTreeMap::new(),
}, },
wifi: WifiInfo { wifi: WifiInfo {
enabled: true, enabled: true,
@@ -117,7 +117,6 @@ impl Public {
acme acme
}, },
dns: Default::default(), dns: Default::default(),
default_outbound: None,
}, },
status_info: ServerStatus { status_info: ServerStatus {
backup_progress: None, backup_progress: None,
@@ -221,9 +220,6 @@ pub struct NetworkInfo {
pub acme: BTreeMap<AcmeProvider, AcmeSettings>, pub acme: BTreeMap<AcmeProvider, AcmeSettings>,
#[serde(default)] #[serde(default)]
pub dns: DnsSettings, pub dns: DnsSettings,
#[serde(default)]
#[ts(type = "string | null")]
pub default_outbound: Option<GatewayId>,
} }
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)] #[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
@@ -243,12 +239,41 @@ pub struct DnsSettings {
#[ts(export)] #[ts(export)]
pub struct NetworkInterfaceInfo { pub struct NetworkInterfaceInfo {
pub name: Option<InternedString>, pub name: Option<InternedString>,
pub public: Option<bool>,
pub secure: Option<bool>, pub secure: Option<bool>,
pub ip_info: Option<Arc<IpInfo>>, pub ip_info: Option<Arc<IpInfo>>,
#[serde(default, rename = "type")]
pub gateway_type: Option<GatewayType>,
} }
impl NetworkInterfaceInfo { impl NetworkInterfaceInfo {
pub fn public(&self) -> bool {
self.public.unwrap_or_else(|| {
!self.ip_info.as_ref().map_or(true, |ip_info| {
let ip4s = ip_info
.subnets
.iter()
.filter_map(|ipnet| {
if let IpAddr::V4(ip4) = ipnet.addr() {
Some(ip4)
} else {
None
}
})
.collect::<BTreeSet<_>>();
if !ip4s.is_empty() {
return ip4s
.iter()
.all(|ip4| ip4.is_loopback() || ip4.is_private() || ip4.is_link_local());
}
ip_info.subnets.iter().all(|ipnet| {
if let IpAddr::V6(ip6) = ipnet.addr() {
ipv6_is_local(ip6)
} else {
true
}
})
})
})
}
pub fn secure(&self) -> bool { pub fn secure(&self) -> bool {
self.secure.unwrap_or(false) self.secure.unwrap_or(false)
} }
@@ -285,28 +310,6 @@ pub enum NetworkInterfaceType {
Loopback, Loopback,
} }
#[derive(
Clone,
Copy,
Debug,
Default,
PartialEq,
Eq,
PartialOrd,
Ord,
Deserialize,
Serialize,
TS,
clap::ValueEnum,
)]
#[ts(export)]
#[serde(rename_all = "kebab-case")]
pub enum GatewayType {
#[default]
InboundOutbound,
OutboundOnly,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)] #[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[model = "Model<Self>"] #[model = "Model<Self>"]

View File

@@ -42,11 +42,11 @@ pub enum ErrorKind {
ParseUrl = 19, ParseUrl = 19,
DiskNotAvailable = 20, DiskNotAvailable = 20,
BlockDevice = 21, BlockDevice = 21,
// InvalidOnionAddress = 22, InvalidOnionAddress = 22,
Pack = 23, Pack = 23,
ValidateS9pk = 24, ValidateS9pk = 24,
DiskCorrupted = 25, // Remove DiskCorrupted = 25, // Remove
// Tor = 26, Tor = 26,
ConfigGen = 27, ConfigGen = 27,
ParseNumber = 28, ParseNumber = 28,
Database = 29, Database = 29,
@@ -126,11 +126,11 @@ impl ErrorKind {
ParseUrl => t!("error.parse-url"), ParseUrl => t!("error.parse-url"),
DiskNotAvailable => t!("error.disk-not-available"), DiskNotAvailable => t!("error.disk-not-available"),
BlockDevice => t!("error.block-device"), BlockDevice => t!("error.block-device"),
// InvalidOnionAddress => t!("error.invalid-onion-address"), InvalidOnionAddress => t!("error.invalid-onion-address"),
Pack => t!("error.pack"), Pack => t!("error.pack"),
ValidateS9pk => t!("error.validate-s9pk"), ValidateS9pk => t!("error.validate-s9pk"),
DiskCorrupted => t!("error.disk-corrupted"), // Remove DiskCorrupted => t!("error.disk-corrupted"), // Remove
// Tor => t!("error.tor"), Tor => t!("error.tor"),
ConfigGen => t!("error.config-gen"), ConfigGen => t!("error.config-gen"),
ParseNumber => t!("error.parse-number"), ParseNumber => t!("error.parse-number"),
Database => t!("error.database"), Database => t!("error.database"),
@@ -370,6 +370,17 @@ impl From<reqwest::Error> for Error {
Error::new(e, kind) Error::new(e, kind)
} }
} }
#[cfg(feature = "arti")]
impl From<arti_client::Error> for Error {
fn from(e: arti_client::Error) -> Self {
Error::new(e, ErrorKind::Tor)
}
}
impl From<torut::control::ConnError> for Error {
fn from(e: torut::control::ConnError) -> Self {
Error::new(e, ErrorKind::Tor)
}
}
impl From<zbus::Error> for Error { impl From<zbus::Error> for Error {
fn from(e: zbus::Error) -> Self { fn from(e: zbus::Error) -> Self {
Error::new(e, ErrorKind::DBus) Error::new(e, ErrorKind::DBus)

View File

@@ -20,7 +20,7 @@ use crate::db::model::public::ServerStatus;
use crate::developer::OS_DEVELOPER_KEY_PATH; use crate::developer::OS_DEVELOPER_KEY_PATH;
use crate::hostname::Hostname; use crate::hostname::Hostname;
use crate::middleware::auth::local::LocalAuthContext; use crate::middleware::auth::local::LocalAuthContext;
use crate::net::gateway::WildcardListener; use crate::net::gateway::UpgradableListener;
use crate::net::net_controller::{NetController, NetService}; use crate::net::net_controller::{NetController, NetService};
use crate::net::socks::DEFAULT_SOCKS_LISTEN; use crate::net::socks::DEFAULT_SOCKS_LISTEN;
use crate::net::utils::find_wifi_iface; use crate::net::utils::find_wifi_iface;
@@ -144,7 +144,7 @@ pub async fn run_script<P: AsRef<Path>>(path: P, mut progress: PhaseProgressTrac
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn init( pub async fn init(
webserver: &WebServerAcceptorSetter<WildcardListener>, webserver: &WebServerAcceptorSetter<UpgradableListener>,
cfg: &ServerConfig, cfg: &ServerConfig,
InitPhases { InitPhases {
preinit, preinit,
@@ -218,7 +218,7 @@ pub async fn init(
) )
.await?, .await?,
); );
webserver.send_modify(|wl| wl.set_ip_info(net_ctrl.net_iface.watcher.subscribe())); webserver.try_upgrade(|a| net_ctrl.net_iface.watcher.upgrade_listener(a))?;
let os_net_service = net_ctrl.os_bindings().await?; let os_net_service = net_ctrl.os_bindings().await?;
start_net.complete(); start_net.complete();

View File

@@ -71,7 +71,7 @@ impl SignatureAuthContext for RpcContext {
.as_network() .as_network()
.as_host() .as_host()
.as_private_domains() .as_private_domains()
.keys() .de()
.map(|k| k.into_iter()) .map(|k| k.into_iter())
.transpose(), .transpose(),
) )

View File

@@ -10,7 +10,7 @@ use color_eyre::eyre::eyre;
use futures::{FutureExt, StreamExt, TryStreamExt}; use futures::{FutureExt, StreamExt, TryStreamExt};
use hickory_server::authority::{AuthorityObject, Catalog, MessageResponseBuilder}; use hickory_server::authority::{AuthorityObject, Catalog, MessageResponseBuilder};
use hickory_server::proto::op::{Header, ResponseCode}; use hickory_server::proto::op::{Header, ResponseCode};
use hickory_server::proto::rr::{Name, Record, RecordType}; use hickory_server::proto::rr::{LowerName, Name, Record, RecordType};
use hickory_server::resolver::config::{ResolverConfig, ResolverOpts}; use hickory_server::resolver::config::{ResolverConfig, ResolverOpts};
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo}; use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use hickory_server::store::forwarder::{ForwardAuthority, ForwardConfig}; use hickory_server::store::forwarder::{ForwardAuthority, ForwardConfig};

View File

@@ -3,11 +3,9 @@ use std::net::{IpAddr, SocketAddrV4};
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::time::Duration; use std::time::Duration;
use ipnet::IpNet;
use futures::channel::oneshot; use futures::channel::oneshot;
use id_pool::IdPool;
use iddqd::{IdOrdItem, IdOrdMap}; use iddqd::{IdOrdItem, IdOrdMap};
use rand::Rng;
use imbl::OrdMap; use imbl::OrdMap;
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn_async}; use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -17,6 +15,7 @@ use tokio::sync::mpsc;
use crate::GatewayId; use crate::GatewayId;
use crate::context::{CliContext, RpcContext}; use crate::context::{CliContext, RpcContext};
use crate::db::model::public::NetworkInterfaceInfo; use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::gateway::{DynInterfaceFilter, InterfaceFilter};
use crate::prelude::*; use crate::prelude::*;
use crate::util::Invoke; use crate::util::Invoke;
use crate::util::future::NonDetachingJoinHandle; use crate::util::future::NonDetachingJoinHandle;
@@ -24,66 +23,25 @@ use crate::util::serde::{HandlerExtSerde, display_serializable};
use crate::util::sync::Watch; use crate::util::sync::Watch;
pub const START9_BRIDGE_IFACE: &str = "lxcbr0"; pub const START9_BRIDGE_IFACE: &str = "lxcbr0";
const EPHEMERAL_PORT_START: u16 = 49152; pub const FIRST_DYNAMIC_PRIVATE_PORT: u16 = 49152;
// vhost.rs:89 — not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
const RESTRICTED_PORTS: &[u16] = &[5353, 5355, 5432, 6010, 9050, 9051];
fn is_restricted(port: u16) -> bool {
port <= 1024 || RESTRICTED_PORTS.contains(&port)
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct ForwardRequirements {
pub public_gateways: BTreeSet<GatewayId>,
pub private_ips: BTreeSet<IpAddr>,
pub secure: bool,
}
impl std::fmt::Display for ForwardRequirements {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"ForwardRequirements {{ public: {:?}, private: {:?}, secure: {} }}",
self.public_gateways, self.private_ips, self.secure
)
}
}
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
pub struct AvailablePorts(BTreeMap<u16, bool>); pub struct AvailablePorts(IdPool);
impl AvailablePorts { impl AvailablePorts {
pub fn new() -> Self { pub fn new() -> Self {
Self(BTreeMap::new()) Self(IdPool::new_ranged(FIRST_DYNAMIC_PRIVATE_PORT..u16::MAX))
} }
pub fn alloc(&mut self, ssl: bool) -> Result<u16, Error> { pub fn alloc(&mut self) -> Result<u16, Error> {
let mut rng = rand::rng(); self.0.request_id().ok_or_else(|| {
for _ in 0..1000 { Error::new(
let port = rng.random_range(EPHEMERAL_PORT_START..u16::MAX); eyre!("{}", t!("net.forward.no-dynamic-ports-available")),
if !self.0.contains_key(&port) { ErrorKind::Network,
self.0.insert(port, ssl); )
return Ok(port); })
}
}
Err(Error::new(
eyre!("{}", t!("net.forward.no-dynamic-ports-available")),
ErrorKind::Network,
))
}
/// Try to allocate a specific port. Returns Some(port) if available, None if taken/restricted.
pub fn try_alloc(&mut self, port: u16, ssl: bool) -> Option<u16> {
if is_restricted(port) || self.0.contains_key(&port) {
return None;
}
self.0.insert(port, ssl);
Some(port)
}
/// Returns whether a given allocated port is SSL.
pub fn is_ssl(&self, port: u16) -> bool {
self.0.get(&port).copied().unwrap_or(false)
} }
pub fn free(&mut self, ports: impl IntoIterator<Item = u16>) { pub fn free(&mut self, ports: impl IntoIterator<Item = u16>) {
for port in ports { for port in ports {
self.0.remove(&port); self.0.return_id(port).unwrap_or_default();
} }
} }
} }
@@ -103,10 +61,10 @@ pub fn forward_api<C: Context>() -> ParentHandler<C> {
} }
let mut table = Table::new(); let mut table = Table::new();
table.add_row(row![bc => "FROM", "TO", "REQS"]); table.add_row(row![bc => "FROM", "TO", "FILTER"]);
for (external, target) in res.0 { for (external, target) in res.0 {
table.add_row(row![external, target.target, target.reqs]); table.add_row(row![external, target.target, target.filter]);
} }
table.print_tty(false)?; table.print_tty(false)?;
@@ -121,7 +79,6 @@ struct ForwardMapping {
source: SocketAddrV4, source: SocketAddrV4,
target: SocketAddrV4, target: SocketAddrV4,
target_prefix: u8, target_prefix: u8,
src_filter: Option<IpNet>,
rc: Weak<()>, rc: Weak<()>,
} }
@@ -136,10 +93,9 @@ impl PortForwardState {
source: SocketAddrV4, source: SocketAddrV4,
target: SocketAddrV4, target: SocketAddrV4,
target_prefix: u8, target_prefix: u8,
src_filter: Option<IpNet>,
) -> Result<Arc<()>, Error> { ) -> Result<Arc<()>, Error> {
if let Some(existing) = self.mappings.get_mut(&source) { if let Some(existing) = self.mappings.get_mut(&source) {
if existing.target == target && existing.src_filter == src_filter { if existing.target == target {
if let Some(existing_rc) = existing.rc.upgrade() { if let Some(existing_rc) = existing.rc.upgrade() {
return Ok(existing_rc); return Ok(existing_rc);
} else { } else {
@@ -148,28 +104,21 @@ impl PortForwardState {
return Ok(rc); return Ok(rc);
} }
} else { } else {
// Different target or src_filter, need to remove old and add new // Different target, need to remove old and add new
if let Some(mapping) = self.mappings.remove(&source) { if let Some(mapping) = self.mappings.remove(&source) {
unforward( unforward(mapping.source, mapping.target, mapping.target_prefix).await?;
mapping.source,
mapping.target,
mapping.target_prefix,
mapping.src_filter.as_ref(),
)
.await?;
} }
} }
} }
let rc = Arc::new(()); let rc = Arc::new(());
forward(source, target, target_prefix, src_filter.as_ref()).await?; forward(source, target, target_prefix).await?;
self.mappings.insert( self.mappings.insert(
source, source,
ForwardMapping { ForwardMapping {
source, source,
target, target,
target_prefix, target_prefix,
src_filter,
rc: Arc::downgrade(&rc), rc: Arc::downgrade(&rc),
}, },
); );
@@ -187,13 +136,7 @@ impl PortForwardState {
for source in to_remove { for source in to_remove {
if let Some(mapping) = self.mappings.remove(&source) { if let Some(mapping) = self.mappings.remove(&source) {
unforward( unforward(mapping.source, mapping.target, mapping.target_prefix).await?;
mapping.source,
mapping.target,
mapping.target_prefix,
mapping.src_filter.as_ref(),
)
.await?;
} }
} }
Ok(()) Ok(())
@@ -214,14 +157,9 @@ impl Drop for PortForwardState {
let mappings = std::mem::take(&mut self.mappings); let mappings = std::mem::take(&mut self.mappings);
tokio::spawn(async move { tokio::spawn(async move {
for (_, mapping) in mappings { for (_, mapping) in mappings {
unforward( unforward(mapping.source, mapping.target, mapping.target_prefix)
mapping.source, .await
mapping.target, .log_err();
mapping.target_prefix,
mapping.src_filter.as_ref(),
)
.await
.log_err();
} }
}); });
} }
@@ -233,7 +171,6 @@ enum PortForwardCommand {
source: SocketAddrV4, source: SocketAddrV4,
target: SocketAddrV4, target: SocketAddrV4,
target_prefix: u8, target_prefix: u8,
src_filter: Option<IpNet>,
respond: oneshot::Sender<Result<Arc<()>, Error>>, respond: oneshot::Sender<Result<Arc<()>, Error>>,
}, },
Gc { Gc {
@@ -320,12 +257,9 @@ impl PortForwardController {
source, source,
target, target,
target_prefix, target_prefix,
src_filter,
respond, respond,
} => { } => {
let result = state let result = state.add_forward(source, target, target_prefix).await;
.add_forward(source, target, target_prefix, src_filter)
.await;
respond.send(result).ok(); respond.send(result).ok();
} }
PortForwardCommand::Gc { respond } => { PortForwardCommand::Gc { respond } => {
@@ -350,7 +284,6 @@ impl PortForwardController {
source: SocketAddrV4, source: SocketAddrV4,
target: SocketAddrV4, target: SocketAddrV4,
target_prefix: u8, target_prefix: u8,
src_filter: Option<IpNet>,
) -> Result<Arc<()>, Error> { ) -> Result<Arc<()>, Error> {
let (send, recv) = oneshot::channel(); let (send, recv) = oneshot::channel();
self.req self.req
@@ -358,7 +291,6 @@ impl PortForwardController {
source, source,
target, target,
target_prefix, target_prefix,
src_filter,
respond: send, respond: send,
}) })
.map_err(err_has_exited)?; .map_err(err_has_exited)?;
@@ -389,14 +321,14 @@ struct InterfaceForwardRequest {
external: u16, external: u16,
target: SocketAddrV4, target: SocketAddrV4,
target_prefix: u8, target_prefix: u8,
reqs: ForwardRequirements, filter: DynInterfaceFilter,
rc: Arc<()>, rc: Arc<()>,
} }
#[derive(Clone)] #[derive(Clone)]
struct InterfaceForwardEntry { struct InterfaceForwardEntry {
external: u16, external: u16,
targets: BTreeMap<ForwardRequirements, (SocketAddrV4, u8, Weak<()>)>, filter: BTreeMap<DynInterfaceFilter, (SocketAddrV4, u8, Weak<()>)>,
// Maps source SocketAddr -> strong reference for the forward created in PortForwardController // Maps source SocketAddr -> strong reference for the forward created in PortForwardController
forwards: BTreeMap<SocketAddrV4, Arc<()>>, forwards: BTreeMap<SocketAddrV4, Arc<()>>,
} }
@@ -414,7 +346,7 @@ impl InterfaceForwardEntry {
fn new(external: u16) -> Self { fn new(external: u16) -> Self {
Self { Self {
external, external,
targets: BTreeMap::new(), filter: BTreeMap::new(),
forwards: BTreeMap::new(), forwards: BTreeMap::new(),
} }
} }
@@ -426,38 +358,28 @@ impl InterfaceForwardEntry {
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut keep = BTreeSet::<SocketAddrV4>::new(); let mut keep = BTreeSet::<SocketAddrV4>::new();
for (gw_id, info) in ip_info.iter() { for (iface, info) in ip_info.iter() {
if let Some(ip_info) = &info.ip_info { if let Some((target, target_prefix)) = self
for subnet in ip_info.subnets.iter() { .filter
if let IpAddr::V4(ip) = subnet.addr() { .iter()
let addr = SocketAddrV4::new(ip, self.external); .filter(|(_, (_, _, rc))| rc.strong_count() > 0)
if keep.contains(&addr) { .find(|(filter, _)| filter.filter(iface, info))
continue; .map(|(_, (target, target_prefix, _))| (*target, *target_prefix))
{
if let Some(ip_info) = &info.ip_info {
for addr in ip_info.subnets.iter().filter_map(|net| {
if let IpAddr::V4(ip) = net.addr() {
Some(SocketAddrV4::new(ip, self.external))
} else {
None
} }
}) {
for (reqs, (target, target_prefix, rc)) in self.targets.iter() { keep.insert(addr);
if rc.strong_count() == 0 { if !self.forwards.contains_key(&addr) {
continue; let rc = port_forward
} .add_forward(addr, target, target_prefix)
if !reqs.secure && !info.secure() {
continue;
}
let src_filter =
if reqs.public_gateways.contains(gw_id) {
None
} else if reqs.private_ips.contains(&IpAddr::V4(ip)) {
Some(subnet.trunc())
} else {
continue;
};
keep.insert(addr);
let fwd_rc = port_forward
.add_forward(addr, *target, *target_prefix, src_filter)
.await?; .await?;
self.forwards.insert(addr, fwd_rc); self.forwards.insert(addr, rc);
break;
} }
} }
} }
@@ -476,7 +398,7 @@ impl InterfaceForwardEntry {
external, external,
target, target,
target_prefix, target_prefix,
reqs, filter,
mut rc, mut rc,
}: InterfaceForwardRequest, }: InterfaceForwardRequest,
ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>, ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>,
@@ -490,8 +412,8 @@ impl InterfaceForwardEntry {
} }
let entry = self let entry = self
.targets .filter
.entry(reqs) .entry(filter)
.or_insert_with(|| (target, target_prefix, Arc::downgrade(&rc))); .or_insert_with(|| (target, target_prefix, Arc::downgrade(&rc)));
if entry.0 != target { if entry.0 != target {
entry.0 = target; entry.0 = target;
@@ -514,7 +436,7 @@ impl InterfaceForwardEntry {
ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>, ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>,
port_forward: &PortForwardController, port_forward: &PortForwardController,
) -> Result<(), Error> { ) -> Result<(), Error> {
self.targets.retain(|_, (_, _, rc)| rc.strong_count() > 0); self.filter.retain(|_, (_, _, rc)| rc.strong_count() > 0);
self.update(ip_info, port_forward).await self.update(ip_info, port_forward).await
} }
@@ -573,7 +495,7 @@ pub struct ForwardTable(pub BTreeMap<u16, ForwardTarget>);
pub struct ForwardTarget { pub struct ForwardTarget {
pub target: SocketAddrV4, pub target: SocketAddrV4,
pub target_prefix: u8, pub target_prefix: u8,
pub reqs: String, pub filter: String,
} }
impl From<&InterfaceForwardState> for ForwardTable { impl From<&InterfaceForwardState> for ForwardTable {
@@ -584,16 +506,16 @@ impl From<&InterfaceForwardState> for ForwardTable {
.iter() .iter()
.flat_map(|entry| { .flat_map(|entry| {
entry entry
.targets .filter
.iter() .iter()
.filter(|(_, (_, _, rc))| rc.strong_count() > 0) .filter(|(_, (_, _, rc))| rc.strong_count() > 0)
.map(|(reqs, (target, target_prefix, _))| { .map(|(filter, (target, target_prefix, _))| {
( (
entry.external, entry.external,
ForwardTarget { ForwardTarget {
target: *target, target: *target,
target_prefix: *target_prefix, target_prefix: *target_prefix,
reqs: format!("{reqs}"), filter: format!("{:#?}", filter),
}, },
) )
}) })
@@ -612,6 +534,16 @@ enum InterfaceForwardCommand {
DumpTable(oneshot::Sender<ForwardTable>), DumpTable(oneshot::Sender<ForwardTable>),
} }
#[test]
fn test() {
use crate::net::gateway::SecureFilter;
assert_ne!(
false.into_dyn(),
SecureFilter { secure: false }.into_dyn().into_dyn()
);
}
pub struct InterfacePortForwardController { pub struct InterfacePortForwardController {
req: mpsc::UnboundedSender<InterfaceForwardCommand>, req: mpsc::UnboundedSender<InterfaceForwardCommand>,
_thread: NonDetachingJoinHandle<()>, _thread: NonDetachingJoinHandle<()>,
@@ -661,7 +593,7 @@ impl InterfacePortForwardController {
pub async fn add( pub async fn add(
&self, &self,
external: u16, external: u16,
reqs: ForwardRequirements, filter: DynInterfaceFilter,
target: SocketAddrV4, target: SocketAddrV4,
target_prefix: u8, target_prefix: u8,
) -> Result<Arc<()>, Error> { ) -> Result<Arc<()>, Error> {
@@ -673,7 +605,7 @@ impl InterfacePortForwardController {
external, external,
target, target,
target_prefix, target_prefix,
reqs, filter,
rc, rc,
}, },
send, send,
@@ -705,18 +637,15 @@ async fn forward(
source: SocketAddrV4, source: SocketAddrV4,
target: SocketAddrV4, target: SocketAddrV4,
target_prefix: u8, target_prefix: u8,
src_filter: Option<&IpNet>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut cmd = Command::new("/usr/lib/startos/scripts/forward-port"); Command::new("/usr/lib/startos/scripts/forward-port")
cmd.env("sip", source.ip().to_string()) .env("sip", source.ip().to_string())
.env("dip", target.ip().to_string()) .env("dip", target.ip().to_string())
.env("dprefix", target_prefix.to_string()) .env("dprefix", target_prefix.to_string())
.env("sport", source.port().to_string()) .env("sport", source.port().to_string())
.env("dport", target.port().to_string()); .env("dport", target.port().to_string())
if let Some(subnet) = src_filter { .invoke(ErrorKind::Network)
cmd.env("src_subnet", subnet.to_string()); .await?;
}
cmd.invoke(ErrorKind::Network).await?;
Ok(()) Ok(())
} }
@@ -724,18 +653,15 @@ async fn unforward(
source: SocketAddrV4, source: SocketAddrV4,
target: SocketAddrV4, target: SocketAddrV4,
target_prefix: u8, target_prefix: u8,
src_filter: Option<&IpNet>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut cmd = Command::new("/usr/lib/startos/scripts/forward-port"); Command::new("/usr/lib/startos/scripts/forward-port")
cmd.env("UNDO", "1") .env("UNDO", "1")
.env("sip", source.ip().to_string()) .env("sip", source.ip().to_string())
.env("dip", target.ip().to_string()) .env("dip", target.ip().to_string())
.env("dprefix", target_prefix.to_string()) .env("dprefix", target_prefix.to_string())
.env("sport", source.port().to_string()) .env("sport", source.port().to_string())
.env("dport", target.port().to_string()); .env("dport", target.port().to_string())
if let Some(subnet) = src_filter { .invoke(ErrorKind::Network)
cmd.env("src_subnet", subnet.to_string()); .await?;
}
cmd.invoke(ErrorKind::Network).await?;
Ok(()) Ok(())
} }

File diff suppressed because it is too large Load Diff

View File

@@ -12,15 +12,23 @@ use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel; use crate::db::model::DatabaseModel;
use crate::net::acme::AcmeProvider; use crate::net::acme::AcmeProvider;
use crate::net::host::{HostApiKind, all_hosts}; use crate::net::host::{HostApiKind, all_hosts};
use crate::net::tor::OnionAddress;
use crate::prelude::*; use crate::prelude::*;
use crate::util::serde::{HandlerExtSerde, display_serializable}; use crate::util::serde::{HandlerExtSerde, display_serializable};
#[derive(Clone, Debug, Deserialize, Serialize)] #[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "kebab-case")]
pub struct HostAddress { #[serde(rename_all_fields = "camelCase")]
pub address: InternedString, #[serde(tag = "kind")]
pub public: Option<PublicDomainConfig>, pub enum HostAddress {
pub private: Option<BTreeSet<GatewayId>>, Onion {
address: OnionAddress,
},
Domain {
address: InternedString,
public: Option<PublicDomainConfig>,
private: bool,
},
} }
#[derive(Debug, Clone, Deserialize, Serialize, TS)] #[derive(Debug, Clone, Deserialize, Serialize, TS)]
@@ -30,7 +38,18 @@ pub struct PublicDomainConfig {
} }
fn handle_duplicates(db: &mut DatabaseModel) -> Result<(), Error> { fn handle_duplicates(db: &mut DatabaseModel) -> Result<(), Error> {
let mut onions = BTreeSet::<OnionAddress>::new();
let mut domains = BTreeSet::<InternedString>::new(); let mut domains = BTreeSet::<InternedString>::new();
let check_onion = |onions: &mut BTreeSet<OnionAddress>, onion: OnionAddress| {
if onions.contains(&onion) {
return Err(Error::new(
eyre!("onion address {onion} is already in use"),
ErrorKind::InvalidRequest,
));
}
onions.insert(onion);
Ok(())
};
let check_domain = |domains: &mut BTreeSet<InternedString>, domain: InternedString| { let check_domain = |domains: &mut BTreeSet<InternedString>, domain: InternedString| {
if domains.contains(&domain) { if domains.contains(&domain) {
return Err(Error::new( return Err(Error::new(
@@ -49,27 +68,35 @@ fn handle_duplicates(db: &mut DatabaseModel) -> Result<(), Error> {
not_in_use.push(host); not_in_use.push(host);
continue; continue;
} }
for onion in host.as_onions().de()? {
check_onion(&mut onions, onion)?;
}
let public = host.as_public_domains().keys()?; let public = host.as_public_domains().keys()?;
for domain in &public { for domain in &public {
check_domain(&mut domains, domain.clone())?; check_domain(&mut domains, domain.clone())?;
} }
for domain in host.as_private_domains().keys()? { for domain in host.as_private_domains().de()? {
if !public.contains(&domain) { if !public.contains(&domain) {
check_domain(&mut domains, domain)?; check_domain(&mut domains, domain)?;
} }
} }
} }
for host in not_in_use { for host in not_in_use {
host.as_onions_mut()
.mutate(|o| Ok(o.retain(|o| !onions.contains(o))))?;
host.as_public_domains_mut() host.as_public_domains_mut()
.mutate(|d| Ok(d.retain(|d, _| !domains.contains(d))))?; .mutate(|d| Ok(d.retain(|d, _| !domains.contains(d))))?;
host.as_private_domains_mut() host.as_private_domains_mut()
.mutate(|d| Ok(d.retain(|d, _| !domains.contains(d))))?; .mutate(|d| Ok(d.retain(|d| !domains.contains(d))))?;
for onion in host.as_onions().de()? {
check_onion(&mut onions, onion)?;
}
let public = host.as_public_domains().keys()?; let public = host.as_public_domains().keys()?;
for domain in &public { for domain in &public {
check_domain(&mut domains, domain.clone())?; check_domain(&mut domains, domain.clone())?;
} }
for domain in host.as_private_domains().keys()? { for domain in host.as_private_domains().de()? {
if !public.contains(&domain) { if !public.contains(&domain) {
check_domain(&mut domains, domain)?; check_domain(&mut domains, domain)?;
} }
@@ -132,6 +159,29 @@ pub fn address_api<C: Context, Kind: HostApiKind>()
) )
.with_inherited(Kind::inheritance), .with_inherited(Kind::inheritance),
) )
.subcommand(
"onion",
ParentHandler::<C, Empty, Kind::Inheritance>::new()
.subcommand(
"add",
from_fn_async(add_onion::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("about.add-address-to-host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_onion::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("about.remove-address-from-host")
.with_call_remote::<CliContext>(),
)
.with_inherited(Kind::inheritance),
)
.subcommand( .subcommand(
"list", "list",
from_fn_async(list_addresses::<Kind>) from_fn_async(list_addresses::<Kind>)
@@ -146,7 +196,35 @@ pub fn address_api<C: Context, Kind: HostApiKind>()
} }
let mut table = Table::new(); let mut table = Table::new();
todo!("find a good way to represent this"); table.add_row(row![bc => "ADDRESS", "PUBLIC", "ACME PROVIDER"]);
for address in &res {
match address {
HostAddress::Onion { address } => {
table.add_row(row![address, true, "N/A"]);
}
HostAddress::Domain {
address,
public: Some(PublicDomainConfig { gateway, acme }),
private,
} => {
table.add_row(row![
address,
&format!(
"{} ({gateway})",
if *private { "YES" } else { "ONLY" }
),
acme.as_ref().map(|a| a.0.as_str()).unwrap_or("NONE")
]);
}
HostAddress::Domain {
address,
public: None,
..
} => {
table.add_row(row![address, &format!("NO"), "N/A"]);
}
}
}
table.print_tty(false)?; table.print_tty(false)?;
@@ -193,10 +271,7 @@ pub async fn add_public_domain<Kind: HostApiKind>(
Kind::host_for(&inheritance, db)? Kind::host_for(&inheritance, db)?
.as_public_domains_mut() .as_public_domains_mut()
.insert(&fqdn, &PublicDomainConfig { acme, gateway })?; .insert(&fqdn, &PublicDomainConfig { acme, gateway })?;
handle_duplicates(db)?; handle_duplicates(db)
let gateways = db.as_public().as_server_info().as_network().as_gateways().de()?;
let ports = db.as_private().as_available_ports().de()?;
Kind::host_for(&inheritance, db)?.update_addresses(&gateways, &ports)
}) })
.await .await
.result?; .result?;
@@ -224,10 +299,7 @@ pub async fn remove_public_domain<Kind: HostApiKind>(
.mutate(|db| { .mutate(|db| {
Kind::host_for(&inheritance, db)? Kind::host_for(&inheritance, db)?
.as_public_domains_mut() .as_public_domains_mut()
.remove(&fqdn)?; .remove(&fqdn)
let gateways = db.as_public().as_server_info().as_network().as_gateways().de()?;
let ports = db.as_private().as_available_ports().de()?;
Kind::host_for(&inheritance, db)?.update_addresses(&gateways, &ports)
}) })
.await .await
.result?; .result?;
@@ -240,24 +312,19 @@ pub async fn remove_public_domain<Kind: HostApiKind>(
pub struct AddPrivateDomainParams { pub struct AddPrivateDomainParams {
#[arg(help = "help.arg.fqdn")] #[arg(help = "help.arg.fqdn")]
pub fqdn: InternedString, pub fqdn: InternedString,
pub gateway: GatewayId,
} }
pub async fn add_private_domain<Kind: HostApiKind>( pub async fn add_private_domain<Kind: HostApiKind>(
ctx: RpcContext, ctx: RpcContext,
AddPrivateDomainParams { fqdn, gateway }: AddPrivateDomainParams, AddPrivateDomainParams { fqdn }: AddPrivateDomainParams,
inheritance: Kind::Inheritance, inheritance: Kind::Inheritance,
) -> Result<(), Error> { ) -> Result<(), Error> {
ctx.db ctx.db
.mutate(|db| { .mutate(|db| {
Kind::host_for(&inheritance, db)? Kind::host_for(&inheritance, db)?
.as_private_domains_mut() .as_private_domains_mut()
.upsert(&fqdn, || Ok(BTreeSet::new()))? .mutate(|d| Ok(d.insert(fqdn)))?;
.mutate(|d| Ok(d.insert(gateway)))?; handle_duplicates(db)
handle_duplicates(db)?;
let gateways = db.as_public().as_server_info().as_network().as_gateways().de()?;
let ports = db.as_private().as_available_ports().de()?;
Kind::host_for(&inheritance, db)?.update_addresses(&gateways, &ports)
}) })
.await .await
.result?; .result?;
@@ -275,10 +342,7 @@ pub async fn remove_private_domain<Kind: HostApiKind>(
.mutate(|db| { .mutate(|db| {
Kind::host_for(&inheritance, db)? Kind::host_for(&inheritance, db)?
.as_private_domains_mut() .as_private_domains_mut()
.mutate(|d| Ok(d.remove(&domain)))?; .mutate(|d| Ok(d.remove(&domain)))
let gateways = db.as_public().as_server_info().as_network().as_gateways().de()?;
let ports = db.as_private().as_available_ports().de()?;
Kind::host_for(&inheritance, db)?.update_addresses(&gateways, &ports)
}) })
.await .await
.result?; .result?;
@@ -287,6 +351,55 @@ pub async fn remove_private_domain<Kind: HostApiKind>(
Ok(()) Ok(())
} }
#[derive(Deserialize, Serialize, Parser)]
pub struct OnionParams {
#[arg(help = "help.arg.onion-address")]
pub onion: String,
}
pub async fn add_onion<Kind: HostApiKind>(
ctx: RpcContext,
OnionParams { onion }: OnionParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
let onion = onion.parse::<OnionAddress>()?;
ctx.db
.mutate(|db| {
db.as_private().as_key_store().as_onion().get_key(&onion)?;
Kind::host_for(&inheritance, db)?
.as_onions_mut()
.mutate(|a| Ok(a.insert(onion)))?;
handle_duplicates(db)
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
pub async fn remove_onion<Kind: HostApiKind>(
ctx: RpcContext,
OnionParams { onion }: OnionParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
let onion = onion.parse::<OnionAddress>()?;
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_onions_mut()
.mutate(|a| Ok(a.remove(&onion)))
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
pub async fn list_addresses<Kind: HostApiKind>( pub async fn list_addresses<Kind: HostApiKind>(
ctx: RpcContext, ctx: RpcContext,
_: Empty, _: Empty,

View File

@@ -1,23 +1,23 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::net::SocketAddr;
use std::str::FromStr; use std::str::FromStr;
use clap::Parser; use clap::Parser;
use clap::builder::ValueParserFactory; use clap::builder::ValueParserFactory;
use imbl::OrdSet;
use rpc_toolkit::{Context, Empty, HandlerArgs, HandlerExt, ParentHandler, from_fn_async}; use rpc_toolkit::{Context, Empty, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use ts_rs::TS; use ts_rs::TS;
use crate::HostId;
use crate::context::{CliContext, RpcContext}; use crate::context::{CliContext, RpcContext};
use crate::db::prelude::Map; use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::forward::AvailablePorts; use crate::net::forward::AvailablePorts;
use crate::net::gateway::InterfaceFilter;
use crate::net::host::HostApiKind; use crate::net::host::HostApiKind;
use crate::net::service_interface::HostnameInfo;
use crate::net::vhost::AlpnInfo; use crate::net::vhost::AlpnInfo;
use crate::prelude::*; use crate::prelude::*;
use crate::util::FromStrParser; use crate::util::FromStrParser;
use crate::util::serde::{HandlerExtSerde, display_serializable}; use crate::util::serde::{HandlerExtSerde, display_serializable};
use crate::{GatewayId, HostId};
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, TS)] #[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, TS)]
#[ts(export)] #[ts(export)]
@@ -45,87 +45,25 @@ impl FromStr for BindId {
} }
} }
#[derive(Debug, Default, Clone, Deserialize, Serialize, TS, HasModel)] #[derive(Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[ts(export)] #[ts(export)]
#[model = "Model<Self>"]
pub struct DerivedAddressInfo {
/// User override: enable these addresses (only for public IP & port)
pub enabled: BTreeSet<SocketAddr>,
/// User override: disable these addresses (only for domains and private IP & port)
pub disabled: BTreeSet<(InternedString, u16)>,
/// COMPUTED: NetServiceData::update — all possible addresses for this binding
pub available: BTreeSet<HostnameInfo>,
}
impl DerivedAddressInfo {
/// Returns addresses that are currently enabled after applying overrides.
/// Default: public IPs are disabled, everything else is enabled.
/// Explicit `enabled`/`disabled` overrides take precedence.
pub fn enabled(&self) -> BTreeSet<&HostnameInfo> {
self.available
.iter()
.filter(|h| {
if h.public && h.metadata.is_ip() {
// Public IPs: disabled by default, explicitly enabled via SocketAddr
h.to_socket_addr().map_or(
true, // should never happen, but would rather see them if it does
|sa| self.enabled.contains(&sa),
)
} else {
!self
.disabled
.contains(&(h.host.clone(), h.port.unwrap_or_default())) // disablable addresses will always have a port
}
})
.collect()
}
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[model = "Model<Self>"]
#[ts(export)]
pub struct Bindings(pub BTreeMap<u16, BindInfo>);
impl Map for Bindings {
type Key = u16;
type Value = BindInfo;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Self::key_string(key)
}
fn key_string(key: &Self::Key) -> Result<InternedString, Error> {
Ok(InternedString::from_display(key))
}
}
impl std::ops::Deref for Bindings {
type Target = BTreeMap<u16, BindInfo>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl std::ops::DerefMut for Bindings {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct BindInfo { pub struct BindInfo {
pub enabled: bool, pub enabled: bool,
pub options: BindOptions, pub options: BindOptions,
pub net: NetInfo, pub net: NetInfo,
pub addresses: DerivedAddressInfo,
} }
#[derive(Clone, Debug, Deserialize, Serialize, TS, PartialEq, Eq, PartialOrd, Ord)] #[derive(Clone, Debug, Deserialize, Serialize, TS, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[ts(export)] #[ts(export)]
pub struct NetInfo { pub struct NetInfo {
#[ts(as = "BTreeSet::<GatewayId>")]
#[serde(default)]
pub private_disabled: OrdSet<GatewayId>,
#[ts(as = "BTreeSet::<GatewayId>")]
#[serde(default)]
pub public_enabled: OrdSet<GatewayId>,
pub assigned_port: Option<u16>, pub assigned_port: Option<u16>,
pub assigned_ssl_port: Option<u16>, pub assigned_ssl_port: Option<u16>,
} }
@@ -133,28 +71,25 @@ impl BindInfo {
pub fn new(available_ports: &mut AvailablePorts, options: BindOptions) -> Result<Self, Error> { pub fn new(available_ports: &mut AvailablePorts, options: BindOptions) -> Result<Self, Error> {
let mut assigned_port = None; let mut assigned_port = None;
let mut assigned_ssl_port = None; let mut assigned_ssl_port = None;
if let Some(ssl) = &options.add_ssl { if options.add_ssl.is_some() {
assigned_ssl_port = available_ports assigned_ssl_port = Some(available_ports.alloc()?);
.try_alloc(ssl.preferred_external_port, true)
.or_else(|| Some(available_ports.alloc(true).ok()?));
} }
if options if options
.secure .secure
.map_or(true, |s| !(s.ssl && options.add_ssl.is_some())) .map_or(true, |s| !(s.ssl && options.add_ssl.is_some()))
{ {
assigned_port = available_ports assigned_port = Some(available_ports.alloc()?);
.try_alloc(options.preferred_external_port, false)
.or_else(|| Some(available_ports.alloc(false).ok()?));
} }
Ok(Self { Ok(Self {
enabled: true, enabled: true,
options, options,
net: NetInfo { net: NetInfo {
private_disabled: OrdSet::new(),
public_enabled: OrdSet::new(),
assigned_port, assigned_port,
assigned_ssl_port, assigned_ssl_port,
}, },
addresses: DerivedAddressInfo::default(),
}) })
} }
pub fn update( pub fn update(
@@ -162,11 +97,7 @@ impl BindInfo {
available_ports: &mut AvailablePorts, available_ports: &mut AvailablePorts,
options: BindOptions, options: BindOptions,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
let Self { let Self { net: mut lan, .. } = self;
net: mut lan,
addresses,
..
} = self;
if options if options
.secure .secure
.map_or(true, |s| !(s.ssl && options.add_ssl.is_some())) .map_or(true, |s| !(s.ssl && options.add_ssl.is_some()))
@@ -174,26 +105,19 @@ impl BindInfo {
{ {
lan.assigned_port = if let Some(port) = lan.assigned_port.take() { lan.assigned_port = if let Some(port) = lan.assigned_port.take() {
Some(port) Some(port)
} else if let Some(port) =
available_ports.try_alloc(options.preferred_external_port, false)
{
Some(port)
} else { } else {
Some(available_ports.alloc(false)?) Some(available_ports.alloc()?)
}; };
} else { } else {
if let Some(port) = lan.assigned_port.take() { if let Some(port) = lan.assigned_port.take() {
available_ports.free([port]); available_ports.free([port]);
} }
} }
if let Some(ssl) = &options.add_ssl { if options.add_ssl.is_some() {
lan.assigned_ssl_port = if let Some(port) = lan.assigned_ssl_port.take() { lan.assigned_ssl_port = if let Some(port) = lan.assigned_ssl_port.take() {
Some(port) Some(port)
} else if let Some(port) = available_ports.try_alloc(ssl.preferred_external_port, true)
{
Some(port)
} else { } else {
Some(available_ports.alloc(true)?) Some(available_ports.alloc()?)
}; };
} else { } else {
if let Some(port) = lan.assigned_ssl_port.take() { if let Some(port) = lan.assigned_ssl_port.take() {
@@ -204,17 +128,22 @@ impl BindInfo {
enabled: true, enabled: true,
options, options,
net: lan, net: lan,
addresses: DerivedAddressInfo {
enabled: addresses.enabled,
disabled: addresses.disabled,
available: BTreeSet::new(),
},
}) })
} }
pub fn disable(&mut self) { pub fn disable(&mut self) {
self.enabled = false; self.enabled = false;
} }
} }
impl InterfaceFilter for NetInfo {
fn filter(&self, id: &GatewayId, info: &NetworkInterfaceInfo) -> bool {
info.ip_info.is_some()
&& if info.public() {
self.public_enabled.contains(id)
} else {
!self.private_disabled.contains(id)
}
}
}
#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize, TS)] #[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize, TS)]
#[ts(export)] #[ts(export)]
@@ -259,7 +188,7 @@ pub fn binding<C: Context, Kind: HostApiKind>()
let mut table = Table::new(); let mut table = Table::new();
table.add_row(row![bc => "INTERNAL PORT", "ENABLED", "EXTERNAL PORT", "EXTERNAL SSL PORT"]); table.add_row(row![bc => "INTERNAL PORT", "ENABLED", "EXTERNAL PORT", "EXTERNAL SSL PORT"]);
for (internal, info) in res.iter() { for (internal, info) in res {
table.add_row(row![ table.add_row(row![
internal, internal,
info.enabled, info.enabled,
@@ -284,12 +213,12 @@ pub fn binding<C: Context, Kind: HostApiKind>()
.with_call_remote::<CliContext>(), .with_call_remote::<CliContext>(),
) )
.subcommand( .subcommand(
"set-address-enabled", "set-gateway-enabled",
from_fn_async(set_address_enabled::<Kind>) from_fn_async(set_gateway_enabled::<Kind>)
.with_metadata("sync_db", Value::Bool(true)) .with_metadata("sync_db", Value::Bool(true))
.with_inherited(Kind::inheritance) .with_inherited(Kind::inheritance)
.no_display() .no_display()
.with_about("about.set-address-enabled-for-binding") .with_about("about.set-gateway-enabled-for-binding")
.with_call_remote::<CliContext>(), .with_call_remote::<CliContext>(),
) )
} }
@@ -298,7 +227,7 @@ pub async fn list_bindings<Kind: HostApiKind>(
ctx: RpcContext, ctx: RpcContext,
_: Empty, _: Empty,
inheritance: Kind::Inheritance, inheritance: Kind::Inheritance,
) -> Result<Bindings, Error> { ) -> Result<BTreeMap<u16, BindInfo>, Error> {
Kind::host_for(&inheritance, &mut ctx.db.peek().await)? Kind::host_for(&inheritance, &mut ctx.db.peek().await)?
.as_bindings() .as_bindings()
.de() .de()
@@ -307,54 +236,50 @@ pub async fn list_bindings<Kind: HostApiKind>(
#[derive(Deserialize, Serialize, Parser, TS)] #[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[ts(export)] #[ts(export)]
pub struct BindingSetAddressEnabledParams { pub struct BindingGatewaySetEnabledParams {
#[arg(help = "help.arg.internal-port")] #[arg(help = "help.arg.internal-port")]
internal_port: u16, internal_port: u16,
#[arg(long, help = "help.arg.address")] #[arg(help = "help.arg.gateway-id")]
address: String, gateway: GatewayId,
#[arg(long, help = "help.arg.binding-enabled")] #[arg(long, help = "help.arg.binding-enabled")]
enabled: Option<bool>, enabled: Option<bool>,
} }
pub async fn set_address_enabled<Kind: HostApiKind>( pub async fn set_gateway_enabled<Kind: HostApiKind>(
ctx: RpcContext, ctx: RpcContext,
BindingSetAddressEnabledParams { BindingGatewaySetEnabledParams {
internal_port, internal_port,
address, gateway,
enabled, enabled,
}: BindingSetAddressEnabledParams, }: BindingGatewaySetEnabledParams,
inheritance: Kind::Inheritance, inheritance: Kind::Inheritance,
) -> Result<(), Error> { ) -> Result<(), Error> {
let enabled = enabled.unwrap_or(true); let enabled = enabled.unwrap_or(true);
let address: HostnameInfo = let gateway_public = ctx
serde_json::from_str(&address).with_kind(ErrorKind::Deserialization)?; .net_controller
.net_iface
.watcher
.ip_info()
.get(&gateway)
.or_not_found(&gateway)?
.public();
ctx.db ctx.db
.mutate(|db| { .mutate(|db| {
Kind::host_for(&inheritance, db)? Kind::host_for(&inheritance, db)?
.as_bindings_mut() .as_bindings_mut()
.mutate(|b| { .mutate(|b| {
let bind = b.get_mut(&internal_port).or_not_found(internal_port)?; let net = &mut b.get_mut(&internal_port).or_not_found(internal_port)?.net;
if address.public && address.metadata.is_ip() { if gateway_public {
// Public IPs: toggle via SocketAddr in `enabled` set
let sa = address.to_socket_addr().ok_or_else(|| {
Error::new(
eyre!("cannot convert address to socket addr"),
ErrorKind::InvalidRequest,
)
})?;
if enabled { if enabled {
bind.addresses.enabled.insert(sa); net.public_enabled.insert(gateway);
} else { } else {
bind.addresses.enabled.remove(&sa); net.public_enabled.remove(&gateway);
} }
} else { } else {
// Domains and private IPs: toggle via (host, port) in `disabled` set
let port = address.port.unwrap_or(if address.ssl { 443 } else { 80 });
let key = (address.host.clone(), port);
if enabled { if enabled {
bind.addresses.disabled.remove(&key); net.private_disabled.remove(&gateway);
} else { } else {
bind.addresses.disabled.insert(key); net.private_disabled.insert(gateway);
} }
} }
Ok(()) Ok(())

View File

@@ -3,23 +3,21 @@ use std::future::Future;
use std::panic::RefUnwindSafe; use std::panic::RefUnwindSafe;
use clap::Parser; use clap::Parser;
use imbl::OrdMap;
use imbl_value::InternedString; use imbl_value::InternedString;
use itertools::Itertools; use itertools::Itertools;
use patch_db::DestructureMut;
use rpc_toolkit::{Context, Empty, HandlerExt, OrEmpty, ParentHandler, from_fn_async}; use rpc_toolkit::{Context, Empty, HandlerExt, OrEmpty, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use ts_rs::TS; use ts_rs::TS;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::DatabaseModel; use crate::db::model::DatabaseModel;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::forward::AvailablePorts; use crate::net::forward::AvailablePorts;
use crate::net::host::address::{HostAddress, PublicDomainConfig, address_api}; use crate::net::host::address::{HostAddress, PublicDomainConfig, address_api};
use crate::net::host::binding::{BindInfo, BindOptions, Bindings, binding}; use crate::net::host::binding::{BindInfo, BindOptions, binding};
use crate::net::service_interface::{HostnameInfo, HostnameMetadata}; use crate::net::service_interface::HostnameInfo;
use crate::net::tor::OnionAddress;
use crate::prelude::*; use crate::prelude::*;
use crate::{GatewayId, HostId, PackageId}; use crate::{HostId, PackageId};
pub mod address; pub mod address;
pub mod binding; pub mod binding;
@@ -29,9 +27,13 @@ pub mod binding;
#[model = "Model<Self>"] #[model = "Model<Self>"]
#[ts(export)] #[ts(export)]
pub struct Host { pub struct Host {
pub bindings: Bindings, pub bindings: BTreeMap<u16, BindInfo>,
#[ts(type = "string[]")]
pub onions: BTreeSet<OnionAddress>,
pub public_domains: BTreeMap<InternedString, PublicDomainConfig>, pub public_domains: BTreeMap<InternedString, PublicDomainConfig>,
pub private_domains: BTreeMap<InternedString, BTreeSet<GatewayId>>, pub private_domains: BTreeSet<InternedString>,
/// COMPUTED: NetService::update
pub hostname_info: BTreeMap<u16, Vec<HostnameInfo>>, // internal port -> Hostnames
} }
impl AsRef<Host> for Host { impl AsRef<Host> for Host {
@@ -44,188 +46,31 @@ impl Host {
Self::default() Self::default()
} }
pub fn addresses<'a>(&'a self) -> impl Iterator<Item = HostAddress> + 'a { pub fn addresses<'a>(&'a self) -> impl Iterator<Item = HostAddress> + 'a {
self.public_domains self.onions
.iter() .iter()
.map(|(address, config)| HostAddress { .cloned()
address: address.clone(), .map(|address| HostAddress::Onion { address })
public: Some(config.clone()), .chain(
private: self.private_domains.get(address).cloned(), self.public_domains
}) .iter()
.map(|(address, config)| HostAddress::Domain {
address: address.clone(),
public: Some(config.clone()),
private: self.private_domains.contains(address),
}),
)
.chain( .chain(
self.private_domains self.private_domains
.iter() .iter()
.filter(|(domain, _)| !self.public_domains.contains_key(*domain)) .filter(|a| !self.public_domains.contains_key(*a))
.map(|(domain, gateways)| HostAddress { .map(|address| HostAddress::Domain {
address: domain.clone(), address: address.clone(),
public: None, public: None,
private: Some(gateways.clone()), private: true,
}), }),
) )
} }
} }
impl Model<Host> {
pub fn update_addresses(
&mut self,
gateways: &OrdMap<GatewayId, NetworkInterfaceInfo>,
available_ports: &AvailablePorts,
) -> Result<(), Error> {
let this = self.destructure_mut();
for (_, bind) in this.bindings.as_entries_mut()? {
let net = bind.as_net().de()?;
let opt = bind.as_options().de()?;
let mut available = BTreeSet::new();
for (gid, g) in gateways {
let Some(ip_info) = &g.ip_info else {
continue;
};
let gateway_secure = g.secure();
for subnet in &ip_info.subnets {
let host = InternedString::from_display(&subnet.addr());
let metadata = if subnet.addr().is_ipv4() {
HostnameMetadata::Ipv4 {
gateway: gid.clone(),
}
} else {
HostnameMetadata::Ipv6 {
gateway: gid.clone(),
scope_id: ip_info.scope_id,
}
};
if let Some(port) = net.assigned_port.filter(|_| {
opt.secure
.map_or(gateway_secure, |s| !(s.ssl && opt.add_ssl.is_some()))
}) {
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: false,
host: host.clone(),
port: Some(port),
metadata: metadata.clone(),
});
}
if let Some(port) = net.assigned_ssl_port {
available.insert(HostnameInfo {
ssl: true,
public: false,
host: host.clone(),
port: Some(port),
metadata,
});
}
}
if let Some(wan_ip) = &ip_info.wan_ip {
let host = InternedString::from_display(&wan_ip);
let metadata = HostnameMetadata::Ipv4 {
gateway: gid.clone(),
};
if let Some(port) = net.assigned_port.filter(|_| {
opt.secure.map_or(
false, // the public internet is never secure
|s| !(s.ssl && opt.add_ssl.is_some()),
)
}) {
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: true,
host: host.clone(),
port: Some(port),
metadata: metadata.clone(),
});
}
if let Some(port) = net.assigned_ssl_port {
available.insert(HostnameInfo {
ssl: true,
public: true,
host: host.clone(),
port: Some(port),
metadata,
});
}
}
}
for (domain, info) in this.public_domains.de()? {
let metadata = HostnameMetadata::PublicDomain {
gateway: info.gateway.clone(),
};
if let Some(port) = net.assigned_port.filter(|_| {
opt.secure.map_or(
false, // the public internet is never secure
|s| !(s.ssl && opt.add_ssl.is_some()),
)
}) {
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: true,
host: domain.clone(),
port: Some(port),
metadata: metadata.clone(),
});
}
if let Some(mut port) = net.assigned_ssl_port {
if let Some(preferred) = opt
.add_ssl
.as_ref()
.map(|s| s.preferred_external_port)
.filter(|p| available_ports.is_ssl(*p))
{
port = preferred;
}
available.insert(HostnameInfo {
ssl: true,
public: true,
host: domain.clone(),
port: Some(port),
metadata,
});
}
}
for (domain, domain_gateways) in this.private_domains.de()? {
if let Some(port) = net.assigned_port.filter(|_| {
opt.secure
.map_or(true, |s| !(s.ssl && opt.add_ssl.is_some()))
}) {
let gateways = if opt.secure.is_some() {
domain_gateways.clone()
} else {
domain_gateways
.iter()
.cloned()
.filter(|g| gateways.get(g).map_or(false, |g| g.secure()))
.collect()
};
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: true,
host: domain.clone(),
port: Some(port),
metadata: HostnameMetadata::PrivateDomain { gateways },
});
}
if let Some(mut port) = net.assigned_ssl_port {
if let Some(preferred) = opt
.add_ssl
.as_ref()
.map(|s| s.preferred_external_port)
.filter(|p| available_ports.is_ssl(*p))
{
port = preferred;
}
available.insert(HostnameInfo {
ssl: true,
public: true,
host: domain.clone(),
port: Some(port),
metadata: HostnameMetadata::PrivateDomain {
gateways: domain_gateways,
},
});
}
}
bind.as_addresses_mut().as_available_mut().ser(&available)?;
}
Ok(())
}
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)] #[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[model = "Model<Self>"] #[model = "Model<Self>"]
@@ -267,7 +112,22 @@ pub fn host_for<'a>(
.as_hosts_mut(), .as_hosts_mut(),
) )
} }
host_info(db, package_id)?.upsert(host_id, || Ok(Host::new())) let tor_key = if host_info(db, package_id)?.as_idx(host_id).is_none() {
Some(
db.as_private_mut()
.as_key_store_mut()
.as_onion_mut()
.new_key()?,
)
} else {
None
};
host_info(db, package_id)?.upsert(host_id, || {
let mut h = Host::new();
h.onions
.insert(tor_key.or_not_found("generated tor key")?.onion_address());
Ok(h)
})
} }
pub fn all_hosts(db: &mut DatabaseModel) -> impl Iterator<Item = Result<&mut Model<Host>, Error>> { pub fn all_hosts(db: &mut DatabaseModel) -> impl Iterator<Item = Result<&mut Model<Host>, Error>> {

View File

@@ -3,21 +3,28 @@ use serde::{Deserialize, Serialize};
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::net::acme::AcmeCertStore; use crate::net::acme::AcmeCertStore;
use crate::net::ssl::CertStore; use crate::net::ssl::CertStore;
use crate::net::tor::OnionStore;
use crate::prelude::*; use crate::prelude::*;
#[derive(Debug, Deserialize, Serialize, HasModel)] #[derive(Debug, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"] #[model = "Model<Self>"]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct KeyStore { pub struct KeyStore {
pub onion: OnionStore,
pub local_certs: CertStore, pub local_certs: CertStore,
#[serde(default)] #[serde(default)]
pub acme: AcmeCertStore, pub acme: AcmeCertStore,
} }
impl KeyStore { impl KeyStore {
pub fn new(account: &AccountInfo) -> Result<Self, Error> { pub fn new(account: &AccountInfo) -> Result<Self, Error> {
Ok(Self { let mut res = Self {
onion: OnionStore::new(),
local_certs: CertStore::new(account)?, local_certs: CertStore::new(account)?,
acme: AcmeCertStore::new(), acme: AcmeCertStore::new(),
}) };
for tor_key in account.tor_keys.iter().cloned() {
res.onion.insert(tor_key);
}
Ok(res)
} }
} }

View File

@@ -14,6 +14,7 @@ pub mod socks;
pub mod ssl; pub mod ssl;
pub mod static_server; pub mod static_server;
pub mod tls; pub mod tls;
pub mod tor;
pub mod tunnel; pub mod tunnel;
pub mod utils; pub mod utils;
pub mod vhost; pub mod vhost;
@@ -22,6 +23,7 @@ pub mod wifi;
pub fn net_api<C: Context>() -> ParentHandler<C> { pub fn net_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new() ParentHandler::new()
.subcommand("tor", tor::tor_api::<C>().with_about("about.tor-commands"))
.subcommand( .subcommand(
"acme", "acme",
acme::acme_api::<C>().with_about("about.setup-acme-certificate"), acme::acme_api::<C>().with_about("about.setup-acme-certificate"),

File diff suppressed because it is too large Load Diff

View File

@@ -1,79 +1,36 @@
use std::collections::BTreeSet; use std::net::{Ipv4Addr, Ipv6Addr};
use std::net::SocketAddr;
use imbl_value::{InOMap, InternedString}; use imbl_value::InternedString;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use ts_rs::TS; use ts_rs::TS;
use crate::prelude::*; use crate::{GatewayId, HostId, ServiceInterfaceId};
use crate::{GatewayId, HostId, PackageId, ServiceInterfaceId};
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, TS)] #[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)] #[ts(export)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct HostnameInfo {
pub ssl: bool,
pub public: bool,
pub host: InternedString,
pub port: Option<u16>,
pub metadata: HostnameMetadata,
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "kebab-case")]
#[serde(rename_all_fields = "camelCase")] #[serde(rename_all_fields = "camelCase")]
#[serde(tag = "kind")] #[serde(tag = "kind")]
pub enum HostnameMetadata { pub enum HostnameInfo {
Ipv4 { Ip {
gateway: GatewayId, gateway: GatewayInfo,
public: bool,
hostname: IpHostname,
}, },
Ipv6 { Onion {
gateway: GatewayId, hostname: OnionHostname,
scope_id: u32,
},
PrivateDomain {
gateways: BTreeSet<GatewayId>,
},
PublicDomain {
gateway: GatewayId,
},
Plugin {
package: PackageId,
#[serde(flatten)]
#[ts(skip)]
extra: InOMap<InternedString, Value>,
}, },
} }
impl HostnameInfo { impl HostnameInfo {
pub fn to_socket_addr(&self) -> Option<SocketAddr> {
let ip = self.host.parse().ok()?;
Some(SocketAddr::new(ip, self.port?))
}
pub fn to_san_hostname(&self) -> InternedString { pub fn to_san_hostname(&self) -> InternedString {
self.host.clone()
}
}
impl HostnameMetadata {
pub fn is_ip(&self) -> bool {
matches!(self, Self::Ipv4 { .. } | Self::Ipv6 { .. })
}
pub fn gateways(&self) -> Box<dyn Iterator<Item = &GatewayId> + '_> {
match self { match self {
Self::Ipv4 { gateway } Self::Ip { hostname, .. } => hostname.to_san_hostname(),
| Self::Ipv6 { gateway, .. } Self::Onion { hostname } => hostname.to_san_hostname(),
| Self::PublicDomain { gateway } => Box::new(std::iter::once(gateway)),
Self::PrivateDomain { gateways } => Box::new(gateways.iter()),
Self::Plugin { .. } => Box::new(std::iter::empty()),
} }
} }
} }
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, TS)] #[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)] #[ts(export)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
pub struct GatewayInfo { pub struct GatewayInfo {
@@ -82,6 +39,63 @@ pub struct GatewayInfo {
pub public: bool, pub public: bool,
} }
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct OnionHostname {
#[ts(type = "string")]
pub value: InternedString,
pub port: Option<u16>,
pub ssl_port: Option<u16>,
}
impl OnionHostname {
pub fn to_san_hostname(&self) -> InternedString {
self.value.clone()
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)]
#[serde(rename_all = "camelCase")]
#[serde(rename_all_fields = "camelCase")]
#[serde(tag = "kind")]
pub enum IpHostname {
Ipv4 {
value: Ipv4Addr,
port: Option<u16>,
ssl_port: Option<u16>,
},
Ipv6 {
value: Ipv6Addr,
#[serde(default)]
scope_id: u32,
port: Option<u16>,
ssl_port: Option<u16>,
},
Local {
#[ts(type = "string")]
value: InternedString,
port: Option<u16>,
ssl_port: Option<u16>,
},
Domain {
#[ts(type = "string")]
value: InternedString,
port: Option<u16>,
ssl_port: Option<u16>,
},
}
impl IpHostname {
pub fn to_san_hostname(&self) -> InternedString {
match self {
Self::Ipv4 { value, .. } => InternedString::from_display(value),
Self::Ipv6 { value, .. } => InternedString::from_display(value),
Self::Local { value, .. } => value.clone(),
Self::Domain { value, .. } => value.clone(),
}
}
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)] #[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[ts(export)] #[ts(export)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]

View File

@@ -8,6 +8,7 @@ use socks5_impl::server::{AuthAdaptor, ClientConnection, Server};
use tokio::net::{TcpListener, TcpStream}; use tokio::net::{TcpListener, TcpStream};
use crate::HOST_IP; use crate::HOST_IP;
use crate::net::tor::TorController;
use crate::prelude::*; use crate::prelude::*;
use crate::util::actor::background::BackgroundJobQueue; use crate::util::actor::background::BackgroundJobQueue;
use crate::util::future::NonDetachingJoinHandle; use crate::util::future::NonDetachingJoinHandle;
@@ -21,7 +22,7 @@ pub struct SocksController {
_thread: NonDetachingJoinHandle<()>, _thread: NonDetachingJoinHandle<()>,
} }
impl SocksController { impl SocksController {
pub fn new(listen: SocketAddr) -> Result<Self, Error> { pub fn new(listen: SocketAddr, tor: TorController) -> Result<Self, Error> {
Ok(Self { Ok(Self {
_thread: tokio::spawn(async move { _thread: tokio::spawn(async move {
let auth: AuthAdaptor<()> = Arc::new(NoAuth); let auth: AuthAdaptor<()> = Arc::new(NoAuth);
@@ -44,6 +45,7 @@ impl SocksController {
loop { loop {
match server.accept().await { match server.accept().await {
Ok((stream, _)) => { Ok((stream, _)) => {
let tor = tor.clone();
bg.add_job(async move { bg.add_job(async move {
if let Err(e) = async { if let Err(e) = async {
match stream match stream
@@ -55,6 +57,40 @@ impl SocksController {
.await .await
.with_kind(ErrorKind::Network)? .with_kind(ErrorKind::Network)?
{ {
ClientConnection::Connect(
reply,
Address::DomainAddress(domain, port),
) if domain.ends_with(".onion") => {
if let Ok(mut target) = tor
.connect_onion(&domain.parse()?, port)
.await
{
let mut sock = reply
.reply(
Reply::Succeeded,
Address::unspecified(),
)
.await
.with_kind(ErrorKind::Network)?;
tokio::io::copy_bidirectional(
&mut sock,
&mut target,
)
.await
.with_kind(ErrorKind::Network)?;
} else {
let mut sock = reply
.reply(
Reply::HostUnreachable,
Address::unspecified(),
)
.await
.with_kind(ErrorKind::Network)?;
sock.shutdown()
.await
.with_kind(ErrorKind::Network)?;
}
}
ClientConnection::Connect(reply, addr) => { ClientConnection::Connect(reply, addr) => {
if let Ok(mut target) = match addr { if let Ok(mut target) = match addr {
Address::DomainAddress(domain, port) => { Address::DomainAddress(domain, port) => {

View File

@@ -9,14 +9,14 @@ use async_compression::tokio::bufread::GzipEncoder;
use axum::Router; use axum::Router;
use axum::body::Body; use axum::body::Body;
use axum::extract::{self as x, Request}; use axum::extract::{self as x, Request};
use axum::response::{IntoResponse, Response}; use axum::response::{IntoResponse, Redirect, Response};
use axum::routing::{any, get}; use axum::routing::{any, get};
use base64::display::Base64Display; use base64::display::Base64Display;
use digest::Digest; use digest::Digest;
use futures::future::ready; use futures::future::ready;
use http::header::{ use http::header::{
ACCEPT_ENCODING, ACCEPT_RANGES, CACHE_CONTROL, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, ACCEPT_ENCODING, ACCEPT_RANGES, CACHE_CONTROL, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH,
CONTENT_RANGE, CONTENT_TYPE, ETAG, RANGE, CONTENT_RANGE, CONTENT_TYPE, ETAG, HOST, RANGE,
}; };
use http::request::Parts as RequestParts; use http::request::Parts as RequestParts;
use http::{HeaderValue, Method, StatusCode}; use http::{HeaderValue, Method, StatusCode};
@@ -36,6 +36,8 @@ use crate::middleware::auth::Auth;
use crate::middleware::auth::session::ValidSessionToken; use crate::middleware::auth::session::ValidSessionToken;
use crate::middleware::cors::Cors; use crate::middleware::cors::Cors;
use crate::middleware::db::SyncDb; use crate::middleware::db::SyncDb;
use crate::net::gateway::GatewayInfo;
use crate::net::tls::TlsHandshakeInfo;
use crate::prelude::*; use crate::prelude::*;
use crate::rpc_continuations::{Guid, RpcContinuations}; use crate::rpc_continuations::{Guid, RpcContinuations};
use crate::s9pk::S9pk; use crate::s9pk::S9pk;
@@ -87,6 +89,30 @@ impl UiContext for RpcContext {
.middleware(SyncDb::new()) .middleware(SyncDb::new())
} }
fn extend_router(self, router: Router) -> Router { fn extend_router(self, router: Router) -> Router {
async fn https_redirect_if_public_http(
req: Request,
next: axum::middleware::Next,
) -> Response {
if req
.extensions()
.get::<GatewayInfo>()
.map_or(false, |p| p.info.public())
&& req.extensions().get::<TlsHandshakeInfo>().is_none()
{
Redirect::temporary(&format!(
"https://{}{}",
req.headers()
.get(HOST)
.and_then(|s| s.to_str().ok())
.unwrap_or("localhost"),
req.uri()
))
.into_response()
} else {
next.run(req).await
}
}
router router
.route("/proxy/{url}", { .route("/proxy/{url}", {
let ctx = self.clone(); let ctx = self.clone();
@@ -110,6 +136,7 @@ impl UiContext for RpcContext {
} }
}), }),
) )
.layer(axum::middleware::from_fn(https_redirect_if_public_http))
} }
} }

964
core/src/net/tor/arti.rs Normal file
View File

@@ -0,0 +1,964 @@
use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet};
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::{Arc, Weak};
use std::time::{Duration, Instant};
use arti_client::config::onion_service::OnionServiceConfigBuilder;
use arti_client::{TorClient, TorClientConfig};
use base64::Engine;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::{FutureExt, StreamExt};
use imbl_value::InternedString;
use itertools::Itertools;
use rpc_toolkit::{Context, Empty, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
use tokio::net::TcpStream;
use tokio::sync::Notify;
use tor_cell::relaycell::msg::Connected;
use tor_hscrypto::pk::{HsId, HsIdKeypair};
use tor_hsservice::status::State as ArtiOnionServiceState;
use tor_hsservice::{HsNickname, RunningOnionService};
use tor_keymgr::config::ArtiKeystoreKind;
use tor_proto::client::stream::IncomingStreamRequest;
use tor_rtcompat::tokio::TokioRustlsRuntime;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::prelude::*;
use crate::util::actor::background::BackgroundJobQueue;
use crate::util::future::{NonDetachingJoinHandle, Until};
use crate::util::io::ReadWriter;
use crate::util::serde::{
BASE64, Base64, HandlerExtSerde, WithIoFormat, deserialize_from_str, display_serializable,
serialize_display,
};
use crate::util::sync::{SyncMutex, SyncRwLock, Watch};
const BOOTSTRAP_PROGRESS_TIMEOUT: Duration = Duration::from_secs(300);
const HS_BOOTSTRAP_TIMEOUT: Duration = Duration::from_secs(300);
const RETRY_COOLDOWN: Duration = Duration::from_secs(15);
const HEALTH_CHECK_FAILURE_ALLOWANCE: usize = 5;
const HEALTH_CHECK_COOLDOWN: Duration = Duration::from_secs(120);
#[derive(Debug, Clone, Copy)]
pub struct OnionAddress(pub HsId);
impl std::fmt::Display for OnionAddress {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
safelog::DisplayRedacted::fmt_unredacted(&self.0, f)
}
}
impl FromStr for OnionAddress {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Self(
if s.ends_with(".onion") {
Cow::Borrowed(s)
} else {
Cow::Owned(format!("{s}.onion"))
}
.parse::<HsId>()
.with_kind(ErrorKind::Tor)?,
))
}
}
impl Serialize for OnionAddress {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serialize_display(self, serializer)
}
}
impl<'de> Deserialize<'de> for OnionAddress {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserialize_from_str(deserializer)
}
}
impl PartialEq for OnionAddress {
fn eq(&self, other: &Self) -> bool {
self.0.as_ref() == other.0.as_ref()
}
}
impl Eq for OnionAddress {}
impl PartialOrd for OnionAddress {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.0.as_ref().partial_cmp(other.0.as_ref())
}
}
impl Ord for OnionAddress {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.0.as_ref().cmp(other.0.as_ref())
}
}
pub struct TorSecretKey(pub HsIdKeypair);
impl TorSecretKey {
pub fn onion_address(&self) -> OnionAddress {
OnionAddress(HsId::from(self.0.as_ref().public().to_bytes()))
}
pub fn from_bytes(bytes: [u8; 64]) -> Result<Self, Error> {
Ok(Self(
tor_llcrypto::pk::ed25519::ExpandedKeypair::from_secret_key_bytes(bytes)
.ok_or_else(|| {
Error::new(
eyre!("{}", t!("net.tor.invalid-ed25519-key")),
ErrorKind::Tor,
)
})?
.into(),
))
}
pub fn generate() -> Self {
Self(
tor_llcrypto::pk::ed25519::ExpandedKeypair::from(
&tor_llcrypto::pk::ed25519::Keypair::generate(&mut rand::rng()),
)
.into(),
)
}
}
impl Clone for TorSecretKey {
fn clone(&self) -> Self {
Self(HsIdKeypair::from(
tor_llcrypto::pk::ed25519::ExpandedKeypair::from_secret_key_bytes(
self.0.as_ref().to_secret_key_bytes(),
)
.unwrap(),
))
}
}
impl std::fmt::Display for TorSecretKey {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"{}",
BASE64.encode(self.0.as_ref().to_secret_key_bytes())
)
}
}
impl FromStr for TorSecretKey {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Self::from_bytes(Base64::<[u8; 64]>::from_str(s)?.0)
}
}
impl Serialize for TorSecretKey {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serialize_display(self, serializer)
}
}
impl<'de> Deserialize<'de> for TorSecretKey {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserialize_from_str(deserializer)
}
}
#[derive(Default, Deserialize, Serialize)]
pub struct OnionStore(BTreeMap<OnionAddress, TorSecretKey>);
impl Map for OnionStore {
type Key = OnionAddress;
type Value = TorSecretKey;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Self::key_string(key)
}
fn key_string(key: &Self::Key) -> Result<imbl_value::InternedString, Error> {
Ok(InternedString::from_display(key))
}
}
impl OnionStore {
pub fn new() -> Self {
Self::default()
}
pub fn insert(&mut self, key: TorSecretKey) {
self.0.insert(key.onion_address(), key);
}
}
impl Model<OnionStore> {
pub fn new_key(&mut self) -> Result<TorSecretKey, Error> {
let key = TorSecretKey::generate();
self.insert(&key.onion_address(), &key)?;
Ok(key)
}
pub fn insert_key(&mut self, key: &TorSecretKey) -> Result<(), Error> {
self.insert(&key.onion_address(), &key)
}
pub fn get_key(&self, address: &OnionAddress) -> Result<TorSecretKey, Error> {
self.as_idx(address)
.or_not_found(lazy_format!("private key for {address}"))?
.de()
}
}
impl std::fmt::Debug for OnionStore {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
struct OnionStoreMap<'a>(&'a BTreeMap<OnionAddress, TorSecretKey>);
impl<'a> std::fmt::Debug for OnionStoreMap<'a> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
#[derive(Debug)]
struct KeyFor(#[allow(unused)] OnionAddress);
let mut map = f.debug_map();
for (k, v) in self.0 {
map.key(k);
map.value(&KeyFor(v.onion_address()));
}
map.finish()
}
}
f.debug_tuple("OnionStore")
.field(&OnionStoreMap(&self.0))
.finish()
}
}
pub fn tor_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"list-services",
from_fn_async(list_services)
.with_display_serializable()
.with_custom_display_fn(|handle, result| display_services(handle.params, result))
.with_about("about.display-tor-v3-onion-addresses")
.with_call_remote::<CliContext>(),
)
.subcommand(
"reset",
from_fn_async(reset)
.no_display()
.with_about("about.reset-tor-daemon")
.with_call_remote::<CliContext>(),
)
.subcommand(
"key",
key::<C>().with_about("about.manage-onion-service-key-store"),
)
}
pub fn key<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"generate",
from_fn_async(generate_key)
.with_about("about.generate-onion-service-key-add-to-store")
.with_call_remote::<CliContext>(),
)
.subcommand(
"add",
from_fn_async(add_key)
.with_about("about.add-onion-service-key-to-store")
.with_call_remote::<CliContext>(),
)
.subcommand(
"list",
from_fn_async(list_keys)
.with_custom_display_fn(|_, res| {
for addr in res {
println!("{addr}");
}
Ok(())
})
.with_about("about.list-onion-services-with-keys-in-store")
.with_call_remote::<CliContext>(),
)
}
pub async fn generate_key(ctx: RpcContext) -> Result<OnionAddress, Error> {
ctx.db
.mutate(|db| {
Ok(db
.as_private_mut()
.as_key_store_mut()
.as_onion_mut()
.new_key()?
.onion_address())
})
.await
.result
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddKeyParams {
#[arg(help = "help.arg.onion-secret-key")]
pub key: Base64<[u8; 64]>,
}
pub async fn add_key(
ctx: RpcContext,
AddKeyParams { key }: AddKeyParams,
) -> Result<OnionAddress, Error> {
let key = TorSecretKey::from_bytes(key.0)?;
ctx.db
.mutate(|db| {
db.as_private_mut()
.as_key_store_mut()
.as_onion_mut()
.insert_key(&key)
})
.await
.result?;
Ok(key.onion_address())
}
pub async fn list_keys(ctx: RpcContext) -> Result<BTreeSet<OnionAddress>, Error> {
ctx.db
.peek()
.await
.into_private()
.into_key_store()
.into_onion()
.keys()
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[command(rename_all = "kebab-case")]
pub struct ResetParams {
#[arg(
name = "wipe-state",
short = 'w',
long = "wipe-state",
help = "help.arg.wipe-tor-state"
)]
wipe_state: bool,
}
pub async fn reset(ctx: RpcContext, ResetParams { wipe_state }: ResetParams) -> Result<(), Error> {
ctx.net_controller.tor.reset(wipe_state).await
}
pub fn display_services(
params: WithIoFormat<Empty>,
services: BTreeMap<OnionAddress, OnionServiceInfo>,
) -> Result<(), Error> {
use prettytable::*;
if let Some(format) = params.format {
return display_serializable(format, services);
}
let mut table = Table::new();
table.add_row(row![bc => "ADDRESS", "STATE", "BINDINGS"]);
for (service, info) in services {
let row = row![
&service.to_string(),
&format!("{:?}", info.state),
&info
.bindings
.into_iter()
.map(|(port, addr)| lazy_format!("{port} -> {addr}"))
.join("; ")
];
table.add_row(row);
}
table.print_tty(false)?;
Ok(())
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub enum OnionServiceState {
Shutdown,
Bootstrapping,
DegradedReachable,
DegradedUnreachable,
Running,
Recovering,
Broken,
}
impl From<ArtiOnionServiceState> for OnionServiceState {
fn from(value: ArtiOnionServiceState) -> Self {
match value {
ArtiOnionServiceState::Shutdown => Self::Shutdown,
ArtiOnionServiceState::Bootstrapping => Self::Bootstrapping,
ArtiOnionServiceState::DegradedReachable => Self::DegradedReachable,
ArtiOnionServiceState::DegradedUnreachable => Self::DegradedUnreachable,
ArtiOnionServiceState::Running => Self::Running,
ArtiOnionServiceState::Recovering => Self::Recovering,
ArtiOnionServiceState::Broken => Self::Broken,
_ => unreachable!(),
}
}
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct OnionServiceInfo {
pub state: OnionServiceState,
pub bindings: BTreeMap<u16, SocketAddr>,
}
pub async fn list_services(
ctx: RpcContext,
_: Empty,
) -> Result<BTreeMap<OnionAddress, OnionServiceInfo>, Error> {
ctx.net_controller.tor.list_services().await
}
#[derive(Clone)]
pub struct TorController(Arc<TorControllerInner>);
struct TorControllerInner {
client: Watch<(usize, TorClient<TokioRustlsRuntime>)>,
_bootstrapper: NonDetachingJoinHandle<()>,
services: SyncMutex<BTreeMap<OnionAddress, OnionService>>,
reset: Arc<Notify>,
}
impl TorController {
pub fn new() -> Result<Self, Error> {
let mut config = TorClientConfig::builder();
config
.storage()
.keystore()
.primary()
.kind(ArtiKeystoreKind::Ephemeral.into());
let client = Watch::new((
0,
TorClient::with_runtime(TokioRustlsRuntime::current()?)
.config(config.build().with_kind(ErrorKind::Tor)?)
.local_resource_timeout(Duration::from_secs(0))
.create_unbootstrapped()?,
));
let reset = Arc::new(Notify::new());
let bootstrapper_reset = reset.clone();
let bootstrapper_client = client.clone();
let bootstrapper = tokio::spawn(async move {
loop {
let (epoch, client): (usize, _) = bootstrapper_client.read();
if let Err(e) = Until::new()
.with_async_fn(|| bootstrapper_reset.notified().map(Ok))
.run(async {
let mut events = client.bootstrap_events();
let bootstrap_fut =
client.bootstrap().map(|res| res.with_kind(ErrorKind::Tor));
let failure_fut = async {
let mut prev_frac = 0_f32;
let mut prev_inst = Instant::now();
while let Some(event) =
tokio::time::timeout(BOOTSTRAP_PROGRESS_TIMEOUT, events.next())
.await
.with_kind(ErrorKind::Tor)?
{
if event.ready_for_traffic() {
return Ok::<_, Error>(());
}
let frac = event.as_frac();
if frac == prev_frac {
if prev_inst.elapsed() > BOOTSTRAP_PROGRESS_TIMEOUT {
return Err(Error::new(
eyre!(
"{}",
t!(
"net.tor.bootstrap-no-progress",
duration = crate::util::serde::Duration::from(
BOOTSTRAP_PROGRESS_TIMEOUT
)
.to_string()
)
),
ErrorKind::Tor,
));
}
} else {
prev_frac = frac;
prev_inst = Instant::now();
}
}
futures::future::pending().await
};
if let Err::<(), Error>(e) = tokio::select! {
res = bootstrap_fut => res,
res = failure_fut => res,
} {
tracing::error!(
"{}",
t!("net.tor.bootstrap-error", error = e.to_string())
);
tracing::debug!("{e:?}");
} else {
bootstrapper_client.send_modify(|_| ());
for _ in 0..HEALTH_CHECK_FAILURE_ALLOWANCE {
if let Err::<(), Error>(e) = async {
loop {
let (bg, mut runner) = BackgroundJobQueue::new();
runner
.run_while(async {
const PING_BUF_LEN: usize = 8;
let key = TorSecretKey::generate();
let onion = key.onion_address();
let (hs, stream) = client
.launch_onion_service_with_hsid(
OnionServiceConfigBuilder::default()
.nickname(
onion
.to_string()
.trim_end_matches(".onion")
.parse::<HsNickname>()
.with_kind(ErrorKind::Tor)?,
)
.build()
.with_kind(ErrorKind::Tor)?,
key.clone().0,
)
.with_kind(ErrorKind::Tor)?;
bg.add_job(async move {
if let Err(e) = async {
let mut stream =
tor_hsservice::handle_rend_requests(
stream,
);
while let Some(req) = stream.next().await {
let mut stream = req
.accept(Connected::new_empty())
.await
.with_kind(ErrorKind::Tor)?;
let mut buf = [0; PING_BUF_LEN];
stream.read_exact(&mut buf).await?;
stream.write_all(&buf).await?;
stream.flush().await?;
stream.shutdown().await?;
}
Ok::<_, Error>(())
}
.await
{
tracing::error!(
"{}",
t!(
"net.tor.health-error",
error = e.to_string()
)
);
tracing::debug!("{e:?}");
}
});
tokio::time::timeout(HS_BOOTSTRAP_TIMEOUT, async {
let mut status = hs.status_events();
while let Some(status) = status.next().await {
if status.state().is_fully_reachable() {
return Ok(());
}
}
Err(Error::new(
eyre!(
"{}",
t!("net.tor.status-stream-ended")
),
ErrorKind::Tor,
))
})
.await
.with_kind(ErrorKind::Tor)??;
let mut stream = client
.connect((onion.to_string(), 8080))
.await?;
let mut ping_buf = [0; PING_BUF_LEN];
rand::fill(&mut ping_buf);
stream.write_all(&ping_buf).await?;
stream.flush().await?;
let mut ping_res = [0; PING_BUF_LEN];
stream.read_exact(&mut ping_res).await?;
ensure_code!(
ping_buf == ping_res,
ErrorKind::Tor,
"ping buffer mismatch"
);
stream.shutdown().await?;
Ok::<_, Error>(())
})
.await?;
tokio::time::sleep(HEALTH_CHECK_COOLDOWN).await;
}
}
.await
{
tracing::error!(
"{}",
t!("net.tor.client-health-error", error = e.to_string())
);
tracing::debug!("{e:?}");
}
}
tracing::error!(
"{}",
t!(
"net.tor.health-check-failed-recycling",
count = HEALTH_CHECK_FAILURE_ALLOWANCE
)
);
}
Ok(())
})
.await
{
tracing::error!(
"{}",
t!("net.tor.bootstrapper-error", error = e.to_string())
);
tracing::debug!("{e:?}");
}
if let Err::<(), Error>(e) = async {
tokio::time::sleep(RETRY_COOLDOWN).await;
bootstrapper_client.send((
epoch.wrapping_add(1),
TorClient::with_runtime(TokioRustlsRuntime::current()?)
.config(config.build().with_kind(ErrorKind::Tor)?)
.local_resource_timeout(Duration::from_secs(0))
.create_unbootstrapped_async()
.await?,
));
tracing::debug!("TorClient recycled");
Ok(())
}
.await
{
tracing::error!(
"{}",
t!("net.tor.client-creation-error", error = e.to_string())
);
tracing::debug!("{e:?}");
}
}
})
.into();
Ok(Self(Arc::new(TorControllerInner {
client,
_bootstrapper: bootstrapper,
services: SyncMutex::new(BTreeMap::new()),
reset,
})))
}
pub fn service(&self, key: TorSecretKey) -> Result<OnionService, Error> {
self.0.services.mutate(|s| {
use std::collections::btree_map::Entry;
let addr = key.onion_address();
match s.entry(addr) {
Entry::Occupied(e) => Ok(e.get().clone()),
Entry::Vacant(e) => Ok(e
.insert(OnionService::launch(self.0.client.clone(), key)?)
.clone()),
}
})
}
pub async fn gc(&self, addr: Option<OnionAddress>) -> Result<(), Error> {
if let Some(addr) = addr {
if let Some(s) = self.0.services.mutate(|s| {
let rm = if let Some(s) = s.get(&addr) {
!s.gc()
} else {
false
};
if rm { s.remove(&addr) } else { None }
}) {
s.shutdown().await
} else {
Ok(())
}
} else {
for s in self.0.services.mutate(|s| {
let mut rm = Vec::new();
s.retain(|_, s| {
if s.gc() {
true
} else {
rm.push(s.clone());
false
}
});
rm
}) {
s.shutdown().await?;
}
Ok(())
}
}
pub async fn reset(&self, wipe_state: bool) -> Result<(), Error> {
self.0.reset.notify_waiters();
Ok(())
}
pub async fn list_services(&self) -> Result<BTreeMap<OnionAddress, OnionServiceInfo>, Error> {
Ok(self
.0
.services
.peek(|s| s.iter().map(|(a, s)| (a.clone(), s.info())).collect()))
}
pub async fn connect_onion(
&self,
addr: &OnionAddress,
port: u16,
) -> Result<Box<dyn ReadWriter + Unpin + Send + Sync + 'static>, Error> {
if let Some(target) = self.0.services.peek(|s| {
s.get(addr).and_then(|s| {
s.0.bindings.peek(|b| {
b.get(&port).and_then(|b| {
b.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(a, _)| *a)
})
})
})
}) {
let tcp_stream = TcpStream::connect(target)
.await
.with_kind(ErrorKind::Network)?;
if let Err(e) = socket2::SockRef::from(&tcp_stream).set_keepalive(true) {
tracing::error!(
"{}",
t!("net.tor.failed-to-set-tcp-keepalive", error = e.to_string())
);
tracing::debug!("{e:?}");
}
Ok(Box::new(tcp_stream))
} else {
let mut client = self.0.client.clone();
client
.wait_for(|(_, c)| c.bootstrap_status().ready_for_traffic())
.await;
let stream = client
.read()
.1
.connect((addr.to_string(), port))
.await
.with_kind(ErrorKind::Tor)?;
Ok(Box::new(stream))
}
}
}
#[derive(Clone)]
pub struct OnionService(Arc<OnionServiceData>);
struct OnionServiceData {
service: Arc<SyncMutex<Option<Arc<RunningOnionService>>>>,
bindings: Arc<SyncRwLock<BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>>,
_thread: NonDetachingJoinHandle<()>,
}
impl OnionService {
fn launch(
mut client: Watch<(usize, TorClient<TokioRustlsRuntime>)>,
key: TorSecretKey,
) -> Result<Self, Error> {
let service = Arc::new(SyncMutex::new(None));
let bindings = Arc::new(SyncRwLock::new(BTreeMap::<
u16,
BTreeMap<SocketAddr, Weak<()>>,
>::new()));
Ok(Self(Arc::new(OnionServiceData {
service: service.clone(),
bindings: bindings.clone(),
_thread: tokio::spawn(async move {
let (bg, mut runner) = BackgroundJobQueue::new();
runner
.run_while(async {
loop {
if let Err(e) = async {
client.wait_for(|(_,c)| c.bootstrap_status().ready_for_traffic()).await;
let epoch = client.peek(|(e, c)| {
ensure_code!(c.bootstrap_status().ready_for_traffic(), ErrorKind::Tor, "TorClient recycled");
Ok::<_, Error>(*e)
})?;
let addr = key.onion_address();
let (new_service, stream) = client.peek(|(_, c)| {
c.launch_onion_service_with_hsid(
OnionServiceConfigBuilder::default()
.nickname(
addr
.to_string()
.trim_end_matches(".onion")
.parse::<HsNickname>()
.with_kind(ErrorKind::Tor)?,
)
.build()
.with_kind(ErrorKind::Tor)?,
key.clone().0,
)
.with_kind(ErrorKind::Tor)
})?;
let mut status_stream = new_service.status_events();
let mut status = new_service.status();
if status.state().is_fully_reachable() {
tracing::debug!("{addr} is fully reachable");
} else {
tracing::debug!("{addr} is not fully reachable");
}
bg.add_job(async move {
while let Some(new_status) = status_stream.next().await {
if status.state().is_fully_reachable() && !new_status.state().is_fully_reachable() {
tracing::debug!("{addr} is no longer fully reachable");
} else if !status.state().is_fully_reachable() && new_status.state().is_fully_reachable() {
tracing::debug!("{addr} is now fully reachable");
}
status = new_status;
// TODO: health daemon?
}
});
service.replace(Some(new_service));
let mut stream = tor_hsservice::handle_rend_requests(stream);
while let Some(req) = tokio::select! {
req = stream.next() => req,
_ = client.wait_for(|(e, _)| *e != epoch) => None
} {
bg.add_job({
let bg = bg.clone();
let bindings = bindings.clone();
async move {
if let Err(e) = async {
let IncomingStreamRequest::Begin(begin) =
req.request()
else {
return req
.reject(tor_cell::relaycell::msg::End::new_with_reason(
tor_cell::relaycell::msg::EndReason::DONE,
))
.await
.with_kind(ErrorKind::Tor);
};
let Some(target) = bindings.peek(|b| {
b.get(&begin.port()).and_then(|a| {
a.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| *addr)
})
}) else {
return req
.reject(tor_cell::relaycell::msg::End::new_with_reason(
tor_cell::relaycell::msg::EndReason::DONE,
))
.await
.with_kind(ErrorKind::Tor);
};
bg.add_job(async move {
if let Err(e) = async {
let mut outgoing =
TcpStream::connect(target)
.await
.with_kind(ErrorKind::Network)?;
if let Err(e) = socket2::SockRef::from(&outgoing).set_keepalive(true) {
tracing::error!("{}", t!("net.tor.failed-to-set-tcp-keepalive", error = e.to_string()));
tracing::debug!("{e:?}");
}
let mut incoming = req
.accept(Connected::new_empty())
.await
.with_kind(ErrorKind::Tor)?;
if let Err(e) =
tokio::io::copy_bidirectional(
&mut outgoing,
&mut incoming,
)
.await
{
tracing::trace!("Tor Stream Error: {e}");
tracing::trace!("{e:?}");
}
Ok::<_, Error>(())
}
.await
{
tracing::trace!("Tor Stream Error: {e}");
tracing::trace!("{e:?}");
}
});
Ok::<_, Error>(())
}
.await
{
tracing::trace!("Tor Request Error: {e}");
tracing::trace!("{e:?}");
}
}
});
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("{}", t!("net.tor.client-error", error = e.to_string()));
tracing::debug!("{e:?}");
}
}
})
.await
})
.into(),
})))
}
pub async fn proxy_all<Rcs: FromIterator<Arc<()>>>(
&self,
bindings: impl IntoIterator<Item = (u16, SocketAddr)>,
) -> Result<Rcs, Error> {
Ok(self.0.bindings.mutate(|b| {
bindings
.into_iter()
.map(|(port, target)| {
let entry = b.entry(port).or_default().entry(target).or_default();
if let Some(rc) = entry.upgrade() {
rc
} else {
let rc = Arc::new(());
*entry = Arc::downgrade(&rc);
rc
}
})
.collect()
}))
}
pub fn gc(&self) -> bool {
self.0.bindings.mutate(|b| {
b.retain(|_, targets| {
targets.retain(|_, rc| rc.strong_count() > 0);
!targets.is_empty()
});
!b.is_empty()
})
}
pub async fn shutdown(self) -> Result<(), Error> {
self.0.service.replace(None);
self.0._thread.abort();
Ok(())
}
pub fn state(&self) -> OnionServiceState {
self.0
.service
.peek(|s| s.as_ref().map(|s| s.status().state().into()))
.unwrap_or(OnionServiceState::Bootstrapping)
}
pub fn info(&self) -> OnionServiceInfo {
OnionServiceInfo {
state: self.state(),
bindings: self.0.bindings.peek(|b| {
b.iter()
.filter_map(|(port, b)| {
b.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*port, *addr))
})
.collect()
}),
}
}
}

1092
core/src/net/tor/ctor.rs Normal file

File diff suppressed because it is too large Load Diff

10
core/src/net/tor/mod.rs Normal file
View File

@@ -0,0 +1,10 @@
#[cfg(feature = "arti")]
mod arti;
#[cfg(not(feature = "arti"))]
mod ctor;
#[cfg(feature = "arti")]
pub use arti::{OnionAddress, OnionStore, TorController, TorSecretKey, tor_api};
#[cfg(not(feature = "arti"))]
pub use ctor::{OnionAddress, OnionStore, TorController, TorSecretKey, tor_api};

View File

@@ -8,7 +8,7 @@ use ts_rs::TS;
use crate::GatewayId; use crate::GatewayId;
use crate::context::{CliContext, RpcContext}; use crate::context::{CliContext, RpcContext};
use crate::db::model::public::{GatewayType, NetworkInterfaceInfo, NetworkInterfaceType}; use crate::db::model::public::{NetworkInterfaceInfo, NetworkInterfaceType};
use crate::net::host::all_hosts; use crate::net::host::all_hosts;
use crate::prelude::*; use crate::prelude::*;
use crate::util::Invoke; use crate::util::Invoke;
@@ -32,19 +32,14 @@ pub fn tunnel_api<C: Context>() -> ParentHandler<C> {
} }
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)] #[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)] #[ts(export)]
pub struct AddTunnelParams { pub struct AddTunnelParams {
#[arg(help = "help.arg.tunnel-name")] #[arg(help = "help.arg.tunnel-name")]
name: InternedString, name: InternedString,
#[arg(help = "help.arg.wireguard-config")] #[arg(help = "help.arg.wireguard-config")]
config: String, config: String,
#[arg(help = "help.arg.gateway-type")] #[arg(help = "help.arg.is-public")]
#[serde(default, rename = "type")] public: bool,
gateway_type: Option<GatewayType>,
#[arg(help = "help.arg.set-as-default-outbound")]
#[serde(default)]
set_as_default_outbound: bool,
} }
fn sanitize_config(config: &str) -> String { fn sanitize_config(config: &str) -> String {
@@ -69,8 +64,7 @@ pub async fn add_tunnel(
AddTunnelParams { AddTunnelParams {
name, name,
config, config,
gateway_type, public,
set_as_default_outbound,
}: AddTunnelParams, }: AddTunnelParams,
) -> Result<GatewayId, Error> { ) -> Result<GatewayId, Error> {
let ifaces = ctx.net_controller.net_iface.watcher.subscribe(); let ifaces = ctx.net_controller.net_iface.watcher.subscribe();
@@ -82,9 +76,9 @@ pub async fn add_tunnel(
iface.clone(), iface.clone(),
NetworkInterfaceInfo { NetworkInterfaceInfo {
name: Some(name), name: Some(name),
public: Some(public),
secure: None, secure: None,
ip_info: None, ip_info: None,
gateway_type,
}, },
); );
return true; return true;
@@ -126,19 +120,6 @@ pub async fn add_tunnel(
sub.recv().await; sub.recv().await;
if set_as_default_outbound {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_default_outbound_mut()
.ser(&Some(iface.clone()))
})
.await
.result?;
}
Ok(iface) Ok(iface)
} }
@@ -175,13 +156,10 @@ pub async fn remove_tunnel(
ctx.db ctx.db
.mutate(|db| { .mutate(|db| {
let gateways = db.as_public().as_server_info().as_network().as_gateways().de()?;
let ports = db.as_private().as_available_ports().de()?;
for host in all_hosts(db) { for host in all_hosts(db) {
let host = host?; let host = host?;
host.as_public_domains_mut() host.as_public_domains_mut()
.mutate(|p| Ok(p.retain(|_, v| v.gateway != id)))?; .mutate(|p| Ok(p.retain(|_, v| v.gateway != id)))?;
host.update_addresses(&gateways, &ports)?;
} }
Ok(()) Ok(())
@@ -193,18 +171,14 @@ pub async fn remove_tunnel(
ctx.db ctx.db
.mutate(|db| { .mutate(|db| {
let gateways = db.as_public().as_server_info().as_network().as_gateways().de()?;
let ports = db.as_private().as_available_ports().de()?;
for host in all_hosts(db) { for host in all_hosts(db) {
let host = host?; let host = host?;
host.as_private_domains_mut().mutate(|d| { host.as_bindings_mut().mutate(|b| {
for gateways in d.values_mut() { Ok(b.values_mut().for_each(|v| {
gateways.remove(&id); v.net.private_disabled.remove(&id);
} v.net.public_enabled.remove(&id);
d.retain(|_, gateways| !gateways.is_empty()); }))
Ok(())
})?; })?;
host.update_addresses(&gateways, &ports)?;
} }
Ok(()) Ok(())

View File

@@ -1,19 +1,19 @@
use std::any::Any; use std::any::Any;
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::fmt; use std::fmt;
use std::net::{IpAddr, SocketAddr, SocketAddrV6}; use std::net::{IpAddr, SocketAddr};
use std::sync::{Arc, Weak}; use std::sync::{Arc, Weak};
use std::task::{Poll, ready}; use std::task::{Poll, ready};
use std::time::Duration;
use async_acme::acme::ACME_TLS_ALPN_NAME; use async_acme::acme::ACME_TLS_ALPN_NAME;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use futures::FutureExt; use futures::FutureExt;
use futures::future::BoxFuture; use futures::future::BoxFuture;
use imbl::OrdMap;
use imbl_value::{InOMap, InternedString}; use imbl_value::{InOMap, InternedString};
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn}; use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn};
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::net::{TcpListener, TcpStream}; use tokio::net::TcpStream;
use tokio_rustls::TlsConnector; use tokio_rustls::TlsConnector;
use tokio_rustls::rustls::crypto::CryptoProvider; use tokio_rustls::rustls::crypto::CryptoProvider;
use tokio_rustls::rustls::pki_types::ServerName; use tokio_rustls::rustls::pki_types::ServerName;
@@ -23,28 +23,28 @@ use tracing::instrument;
use ts_rs::TS; use ts_rs::TS;
use visit_rs::Visit; use visit_rs::Visit;
use crate::ResultExt;
use crate::context::{CliContext, RpcContext}; use crate::context::{CliContext, RpcContext};
use crate::db::model::Database; use crate::db::model::Database;
use crate::db::model::public::{AcmeSettings, NetworkInterfaceInfo}; use crate::db::model::public::AcmeSettings;
use crate::db::{DbAccessByKey, DbAccessMut}; use crate::db::{DbAccessByKey, DbAccessMut};
use crate::net::acme::{ use crate::net::acme::{
AcmeCertStore, AcmeProvider, AcmeTlsAlpnCache, AcmeTlsHandler, GetAcmeProvider, AcmeCertStore, AcmeProvider, AcmeTlsAlpnCache, AcmeTlsHandler, GetAcmeProvider,
}; };
use crate::net::gateway::{ use crate::net::gateway::{
GatewayInfo, NetworkInterfaceController, NetworkInterfaceListenerAcceptMetadata, AnyFilter, BindTcp, DynInterfaceFilter, GatewayInfo, InterfaceFilter,
NetworkInterfaceController, NetworkInterfaceListener,
}; };
use crate::net::ssl::{CertStore, RootCaTlsHandler}; use crate::net::ssl::{CertStore, RootCaTlsHandler};
use crate::net::tls::{ use crate::net::tls::{
ChainedHandler, TlsHandlerWrapper, TlsListener, TlsMetadata, WrapTlsHandler, ChainedHandler, TlsHandlerWrapper, TlsListener, TlsMetadata, WrapTlsHandler,
}; };
use crate::net::utils::ipv6_is_link_local;
use crate::net::web_server::{Accept, AcceptStream, ExtractVisitor, TcpMetadata, extract}; use crate::net::web_server::{Accept, AcceptStream, ExtractVisitor, TcpMetadata, extract};
use crate::prelude::*; use crate::prelude::*;
use crate::util::collections::EqSet; use crate::util::collections::EqSet;
use crate::util::future::{NonDetachingJoinHandle, WeakFuture}; use crate::util::future::{NonDetachingJoinHandle, WeakFuture};
use crate::util::serde::{HandlerExtSerde, MaybeUtf8String, display_serializable}; use crate::util::serde::{HandlerExtSerde, MaybeUtf8String, display_serializable};
use crate::util::sync::{SyncMutex, Watch}; use crate::util::sync::{SyncMutex, Watch};
use crate::{GatewayId, ResultExt};
pub fn vhost_api<C: Context>() -> ParentHandler<C> { pub fn vhost_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new().subcommand( ParentHandler::new().subcommand(
@@ -93,7 +93,7 @@ pub struct VHostController {
interfaces: Arc<NetworkInterfaceController>, interfaces: Arc<NetworkInterfaceController>,
crypto_provider: Arc<CryptoProvider>, crypto_provider: Arc<CryptoProvider>,
acme_cache: AcmeTlsAlpnCache, acme_cache: AcmeTlsAlpnCache,
servers: SyncMutex<BTreeMap<u16, VHostServer<VHostBindListener>>>, servers: SyncMutex<BTreeMap<u16, VHostServer<NetworkInterfaceListener>>>,
} }
impl VHostController { impl VHostController {
pub fn new( pub fn new(
@@ -114,22 +114,14 @@ impl VHostController {
&self, &self,
hostname: Option<InternedString>, hostname: Option<InternedString>,
external: u16, external: u16,
target: DynVHostTarget<VHostBindListener>, target: DynVHostTarget<NetworkInterfaceListener>,
) -> Result<Arc<()>, Error> { ) -> Result<Arc<()>, Error> {
self.servers.mutate(|writable| { self.servers.mutate(|writable| {
let server = if let Some(server) = writable.remove(&external) { let server = if let Some(server) = writable.remove(&external) {
server server
} else { } else {
let bind_reqs = Watch::new(VHostBindRequirements::default());
let listener = VHostBindListener {
ip_info: self.interfaces.watcher.subscribe(),
port: external,
bind_reqs: bind_reqs.clone_unseen(),
listeners: BTreeMap::new(),
};
VHostServer::new( VHostServer::new(
listener, self.interfaces.watcher.bind(BindTcp, external)?,
bind_reqs,
self.db.clone(), self.db.clone(),
self.crypto_provider.clone(), self.crypto_provider.clone(),
self.acme_cache.clone(), self.acme_cache.clone(),
@@ -181,143 +173,6 @@ impl VHostController {
} }
} }
/// Union of all ProxyTargets' bind requirements for a VHostServer.
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct VHostBindRequirements {
pub public_gateways: BTreeSet<GatewayId>,
pub private_ips: BTreeSet<IpAddr>,
}
fn compute_bind_reqs<A: Accept + 'static>(mapping: &Mapping<A>) -> VHostBindRequirements {
let mut reqs = VHostBindRequirements::default();
for (_, targets) in mapping {
for (target, rc) in targets {
if rc.strong_count() > 0 {
let (pub_gw, priv_ip) = target.0.bind_requirements();
reqs.public_gateways.extend(pub_gw);
reqs.private_ips.extend(priv_ip);
}
}
}
reqs
}
/// Listener that manages its own TCP listeners with IP-level precision.
/// Binds ALL IPs of public gateways and ONLY matching private IPs.
pub struct VHostBindListener {
ip_info: Watch<OrdMap<GatewayId, NetworkInterfaceInfo>>,
port: u16,
bind_reqs: Watch<VHostBindRequirements>,
listeners: BTreeMap<SocketAddr, (TcpListener, GatewayInfo)>,
}
fn update_vhost_listeners(
listeners: &mut BTreeMap<SocketAddr, (TcpListener, GatewayInfo)>,
port: u16,
ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>,
reqs: &VHostBindRequirements,
) -> Result<(), Error> {
let mut keep = BTreeSet::<SocketAddr>::new();
for (gw_id, info) in ip_info {
if let Some(ip_info) = &info.ip_info {
for ipnet in &ip_info.subnets {
let ip = ipnet.addr();
let should_bind =
reqs.public_gateways.contains(gw_id) || reqs.private_ips.contains(&ip);
if should_bind {
let addr = match ip {
IpAddr::V6(ip6) => SocketAddrV6::new(
ip6,
port,
0,
if ipv6_is_link_local(ip6) {
ip_info.scope_id
} else {
0
},
)
.into(),
ip => SocketAddr::new(ip, port),
};
keep.insert(addr);
if let Some((_, existing_info)) = listeners.get_mut(&addr) {
*existing_info = GatewayInfo {
id: gw_id.clone(),
info: info.clone(),
};
} else {
let tcp = TcpListener::from_std(
mio::net::TcpListener::bind(addr)
.with_kind(ErrorKind::Network)?
.into(),
)
.with_kind(ErrorKind::Network)?;
listeners.insert(
addr,
(
tcp,
GatewayInfo {
id: gw_id.clone(),
info: info.clone(),
},
),
);
}
}
}
}
}
listeners.retain(|key, _| keep.contains(key));
Ok(())
}
impl Accept for VHostBindListener {
type Metadata = NetworkInterfaceListenerAcceptMetadata;
fn poll_accept(
&mut self,
cx: &mut std::task::Context<'_>,
) -> Poll<Result<(Self::Metadata, AcceptStream), Error>> {
// Update listeners when ip_info or bind_reqs change
while self.ip_info.poll_changed(cx).is_ready()
|| self.bind_reqs.poll_changed(cx).is_ready()
{
let reqs = self.bind_reqs.read_and_mark_seen();
let listeners = &mut self.listeners;
let port = self.port;
self.ip_info.peek_and_mark_seen(|ip_info| {
update_vhost_listeners(listeners, port, ip_info, &reqs)
})?;
}
// Poll each listener for incoming connections
for (&addr, (listener, gw_info)) in &self.listeners {
match listener.poll_accept(cx) {
Poll::Ready(Ok((stream, peer_addr))) => {
if let Err(e) = socket2::SockRef::from(&stream).set_keepalive(true) {
tracing::error!("Failed to set tcp keepalive: {e}");
tracing::debug!("{e:?}");
}
return Poll::Ready(Ok((
NetworkInterfaceListenerAcceptMetadata {
inner: TcpMetadata {
local_addr: addr,
peer_addr,
},
info: gw_info.clone(),
},
Box::pin(stream),
)));
}
Poll::Ready(Err(e)) => {
tracing::trace!("VHostBindListener accept error on {addr}: {e}");
}
Poll::Pending => {}
}
}
Poll::Pending
}
}
pub trait VHostTarget<A: Accept>: std::fmt::Debug + Eq { pub trait VHostTarget<A: Accept>: std::fmt::Debug + Eq {
type PreprocessRes: Send + 'static; type PreprocessRes: Send + 'static;
#[allow(unused_variables)] #[allow(unused_variables)]
@@ -327,10 +182,6 @@ pub trait VHostTarget<A: Accept>: std::fmt::Debug + Eq {
fn acme(&self) -> Option<&AcmeProvider> { fn acme(&self) -> Option<&AcmeProvider> {
None None
} }
/// Returns (public_gateways, private_ips) this target needs the listener to bind on.
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
(BTreeSet::new(), BTreeSet::new())
}
fn preprocess<'a>( fn preprocess<'a>(
&'a self, &'a self,
prev: ServerConfig, prev: ServerConfig,
@@ -349,7 +200,6 @@ pub trait VHostTarget<A: Accept>: std::fmt::Debug + Eq {
pub trait DynVHostTargetT<A: Accept>: std::fmt::Debug + Any { pub trait DynVHostTargetT<A: Accept>: std::fmt::Debug + Any {
fn filter(&self, metadata: &<A as Accept>::Metadata) -> bool; fn filter(&self, metadata: &<A as Accept>::Metadata) -> bool;
fn acme(&self) -> Option<&AcmeProvider>; fn acme(&self) -> Option<&AcmeProvider>;
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>);
fn preprocess<'a>( fn preprocess<'a>(
&'a self, &'a self,
prev: ServerConfig, prev: ServerConfig,
@@ -374,9 +224,6 @@ impl<A: Accept, T: VHostTarget<A> + 'static> DynVHostTargetT<A> for T {
fn acme(&self) -> Option<&AcmeProvider> { fn acme(&self) -> Option<&AcmeProvider> {
VHostTarget::acme(self) VHostTarget::acme(self)
} }
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
VHostTarget::bind_requirements(self)
}
fn preprocess<'a>( fn preprocess<'a>(
&'a self, &'a self,
prev: ServerConfig, prev: ServerConfig,
@@ -454,8 +301,7 @@ impl<A: Accept + 'static> Preprocessed<A> {
#[derive(Clone)] #[derive(Clone)]
pub struct ProxyTarget { pub struct ProxyTarget {
pub public: BTreeSet<GatewayId>, pub filter: DynInterfaceFilter,
pub private: BTreeSet<IpAddr>,
pub acme: Option<AcmeProvider>, pub acme: Option<AcmeProvider>,
pub addr: SocketAddr, pub addr: SocketAddr,
pub add_x_forwarded_headers: bool, pub add_x_forwarded_headers: bool,
@@ -463,8 +309,7 @@ pub struct ProxyTarget {
} }
impl PartialEq for ProxyTarget { impl PartialEq for ProxyTarget {
fn eq(&self, other: &Self) -> bool { fn eq(&self, other: &Self) -> bool {
self.public == other.public self.filter == other.filter
&& self.private == other.private
&& self.acme == other.acme && self.acme == other.acme
&& self.addr == other.addr && self.addr == other.addr
&& self.connect_ssl.as_ref().map(Arc::as_ptr) && self.connect_ssl.as_ref().map(Arc::as_ptr)
@@ -475,8 +320,7 @@ impl Eq for ProxyTarget {}
impl fmt::Debug for ProxyTarget { impl fmt::Debug for ProxyTarget {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("ProxyTarget") f.debug_struct("ProxyTarget")
.field("public", &self.public) .field("filter", &self.filter)
.field("private", &self.private)
.field("acme", &self.acme) .field("acme", &self.acme)
.field("addr", &self.addr) .field("addr", &self.addr)
.field("add_x_forwarded_headers", &self.add_x_forwarded_headers) .field("add_x_forwarded_headers", &self.add_x_forwarded_headers)
@@ -496,37 +340,16 @@ where
{ {
type PreprocessRes = AcceptStream; type PreprocessRes = AcceptStream;
fn filter(&self, metadata: &<A as Accept>::Metadata) -> bool { fn filter(&self, metadata: &<A as Accept>::Metadata) -> bool {
let gw = extract::<GatewayInfo, _>(metadata); let info = extract::<GatewayInfo, _>(metadata);
let tcp = extract::<TcpMetadata, _>(metadata); if info.is_none() {
let (Some(gw), Some(tcp)) = (gw, tcp) else { tracing::warn!("No GatewayInfo on metadata");
return false;
};
let Some(ip_info) = &gw.info.ip_info else {
return false;
};
let src = tcp.peer_addr.ip();
// Public if: source is a gateway/router IP (NAT'd internet),
// or source is outside all known subnets (direct internet)
let is_public = ip_info.lan_ip.contains(&src)
|| !ip_info.subnets.iter().any(|s| s.contains(&src));
if is_public {
self.public.contains(&gw.id)
} else {
// Private: accept if connection arrived on an interface with a matching IP
ip_info
.subnets
.iter()
.any(|s| self.private.contains(&s.addr()))
} }
info.as_ref()
.map_or(true, |i| self.filter.filter(&i.id, &i.info))
} }
fn acme(&self) -> Option<&AcmeProvider> { fn acme(&self) -> Option<&AcmeProvider> {
self.acme.as_ref() self.acme.as_ref()
} }
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
(self.public.clone(), self.private.clone())
}
async fn preprocess<'a>( async fn preprocess<'a>(
&'a self, &'a self,
mut prev: ServerConfig, mut prev: ServerConfig,
@@ -811,15 +634,28 @@ where
struct VHostServer<A: Accept + 'static> { struct VHostServer<A: Accept + 'static> {
mapping: Watch<Mapping<A>>, mapping: Watch<Mapping<A>>,
bind_reqs: Watch<VHostBindRequirements>,
_thread: NonDetachingJoinHandle<()>, _thread: NonDetachingJoinHandle<()>,
} }
impl<'a> From<&'a BTreeMap<Option<InternedString>, BTreeMap<ProxyTarget, Weak<()>>>> for AnyFilter {
fn from(value: &'a BTreeMap<Option<InternedString>, BTreeMap<ProxyTarget, Weak<()>>>) -> Self {
Self(
value
.iter()
.flat_map(|(_, v)| {
v.iter()
.filter(|(_, r)| r.strong_count() > 0)
.map(|(t, _)| t.filter.clone())
})
.collect(),
)
}
}
impl<A: Accept> VHostServer<A> { impl<A: Accept> VHostServer<A> {
#[instrument(skip_all)] #[instrument(skip_all)]
fn new<M: HasModel>( fn new<M: HasModel>(
listener: A, listener: A,
bind_reqs: Watch<VHostBindRequirements>,
db: TypedPatchDb<M>, db: TypedPatchDb<M>,
crypto_provider: Arc<CryptoProvider>, crypto_provider: Arc<CryptoProvider>,
acme_cache: AcmeTlsAlpnCache, acme_cache: AcmeTlsAlpnCache,
@@ -843,7 +679,6 @@ impl<A: Accept> VHostServer<A> {
let mapping = Watch::new(BTreeMap::new()); let mapping = Watch::new(BTreeMap::new());
Self { Self {
mapping: mapping.clone(), mapping: mapping.clone(),
bind_reqs,
_thread: tokio::spawn(async move { _thread: tokio::spawn(async move {
let mut listener = VHostListener(TlsListener::new( let mut listener = VHostListener(TlsListener::new(
listener, listener,
@@ -894,9 +729,6 @@ impl<A: Accept> VHostServer<A> {
targets.insert(target, Arc::downgrade(&rc)); targets.insert(target, Arc::downgrade(&rc));
writable.insert(hostname, targets); writable.insert(hostname, targets);
res = Ok(rc); res = Ok(rc);
if changed {
self.update_bind_reqs(writable);
}
changed changed
}); });
if self.mapping.watcher_count() > 1 { if self.mapping.watcher_count() > 1 {
@@ -920,23 +752,9 @@ impl<A: Accept> VHostServer<A> {
if !targets.is_empty() { if !targets.is_empty() {
writable.insert(hostname, targets); writable.insert(hostname, targets);
} }
if pre != post {
self.update_bind_reqs(writable);
}
pre == post pre == post
}); });
} }
fn update_bind_reqs(&self, mapping: &Mapping<A>) {
let new_reqs = compute_bind_reqs(mapping);
self.bind_reqs.send_if_modified(|reqs| {
if *reqs != new_reqs {
*reqs = new_reqs;
true
} else {
false
}
});
}
fn is_empty(&self) -> bool { fn is_empty(&self) -> bool {
self.mapping.peek(|m| m.is_empty()) self.mapping.peek(|m| m.is_empty())
} }

View File

@@ -366,6 +366,28 @@ where
pub struct WebServerAcceptorSetter<A: Accept> { pub struct WebServerAcceptorSetter<A: Accept> {
acceptor: Watch<A>, acceptor: Watch<A>,
} }
impl<A, B> WebServerAcceptorSetter<Option<Either<A, B>>>
where
A: Accept,
B: Accept<Metadata = A::Metadata>,
{
pub fn try_upgrade<F: FnOnce(A) -> Result<B, Error>>(&self, f: F) -> Result<(), Error> {
let mut res = Ok(());
self.acceptor.send_modify(|a| {
*a = match a.take() {
Some(Either::Left(a)) => match f(a) {
Ok(b) => Some(Either::Right(b)),
Err(e) => {
res = Err(e);
None
}
},
x => x,
}
});
res
}
}
impl<A: Accept> Deref for WebServerAcceptorSetter<A> { impl<A: Accept> Deref for WebServerAcceptorSetter<A> {
type Target = Watch<A>; type Target = Watch<A>;
fn deref(&self) -> &Self::Target { fn deref(&self) -> &Self::Target {

View File

@@ -11,9 +11,6 @@ use serde::{Deserialize, Serialize};
use tracing::warn; use tracing::warn;
use ts_rs::TS; use ts_rs::TS;
use patch_db::json_ptr::JsonPointer;
use crate::db::model::Database;
use crate::net::ssl::FullchainCertData; use crate::net::ssl::FullchainCertData;
use crate::prelude::*; use crate::prelude::*;
use crate::service::effects::context::EffectContext; use crate::service::effects::context::EffectContext;
@@ -32,7 +29,7 @@ struct ServiceCallbackMap {
get_service_interface: BTreeMap<(PackageId, ServiceInterfaceId), Vec<CallbackHandler>>, get_service_interface: BTreeMap<(PackageId, ServiceInterfaceId), Vec<CallbackHandler>>,
list_service_interfaces: BTreeMap<PackageId, Vec<CallbackHandler>>, list_service_interfaces: BTreeMap<PackageId, Vec<CallbackHandler>>,
get_system_smtp: Vec<CallbackHandler>, get_system_smtp: Vec<CallbackHandler>,
get_host_info: BTreeMap<(PackageId, HostId), (NonDetachingJoinHandle<()>, Vec<CallbackHandler>)>, get_host_info: BTreeMap<(PackageId, HostId), Vec<CallbackHandler>>,
get_ssl_certificate: EqMap< get_ssl_certificate: EqMap<
(BTreeSet<InternedString>, FullchainCertData, Algorithm), (BTreeSet<InternedString>, FullchainCertData, Algorithm),
(NonDetachingJoinHandle<()>, Vec<CallbackHandler>), (NonDetachingJoinHandle<()>, Vec<CallbackHandler>),
@@ -60,7 +57,7 @@ impl ServiceCallbacks {
}); });
this.get_system_smtp this.get_system_smtp
.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); .retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
this.get_host_info.retain(|_, (_, v)| { this.get_host_info.retain(|_, v| {
v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0); v.retain(|h| h.handle.is_active() && h.seed.strong_count() > 0);
!v.is_empty() !v.is_empty()
}); });
@@ -144,57 +141,29 @@ impl ServiceCallbacks {
} }
pub(super) fn add_get_host_info( pub(super) fn add_get_host_info(
self: &Arc<Self>, &self,
db: &TypedPatchDb<Database>,
package_id: PackageId, package_id: PackageId,
host_id: HostId, host_id: HostId,
handler: CallbackHandler, handler: CallbackHandler,
) { ) {
self.mutate(|this| { self.mutate(|this| {
this.get_host_info this.get_host_info
.entry((package_id.clone(), host_id.clone())) .entry((package_id, host_id))
.or_insert_with(|| { .or_default()
let ptr: JsonPointer = format!(
"/public/packageData/{}/hosts/{}",
package_id, host_id
)
.parse()
.expect("valid json pointer");
let db = db.clone();
let callbacks = Arc::clone(self);
let key = (package_id, host_id);
(
tokio::spawn(async move {
let mut sub = db.subscribe(ptr).await;
while sub.recv().await.is_some() {
if let Some(cbs) = callbacks.mutate(|this| {
this.get_host_info
.remove(&key)
.map(|(_, handlers)| CallbackHandlers(handlers))
.filter(|cb| !cb.0.is_empty())
}) {
if let Err(e) = cbs.call(vector![]).await {
tracing::error!(
"Error in host info callback: {e}"
);
tracing::debug!("{e:?}");
}
}
// entry was removed when we consumed handlers,
// so stop watching — a new subscription will be
// created if the service re-registers
break;
}
})
.into(),
Vec::new(),
)
})
.1
.push(handler); .push(handler);
}) })
} }
#[must_use]
pub fn get_host_info(&self, id: &(PackageId, HostId)) -> Option<CallbackHandlers> {
self.mutate(|this| {
Some(CallbackHandlers(
this.get_host_info.remove(id).unwrap_or_default(),
))
.filter(|cb| !cb.0.is_empty())
})
}
pub(super) fn add_get_ssl_certificate( pub(super) fn add_get_ssl_certificate(
&self, &self,
ctx: EffectContext, ctx: EffectContext,

View File

@@ -29,7 +29,6 @@ pub async fn get_host_info(
if let Some(callback) = callback { if let Some(callback) = callback {
let callback = callback.register(&context.seed.persistent_container); let callback = callback.register(&context.seed.persistent_container);
context.seed.ctx.callbacks.add_get_host_info( context.seed.ctx.callbacks.add_get_host_info(
&context.seed.ctx.db,
package_id.clone(), package_id.clone(),
host_id.clone(), host_id.clone(),
CallbackHandler::new(&context, callback), CallbackHandler::new(&context, callback),

View File

@@ -55,18 +55,20 @@ pub async fn get_ssl_certificate(
.map(|(_, m)| m.as_hosts().as_entries()) .map(|(_, m)| m.as_hosts().as_entries())
.flatten_ok() .flatten_ok()
.map_ok(|(_, m)| { .map_ok(|(_, m)| {
Ok(m.as_public_domains() Ok(m.as_onions()
.keys()? .de()?
.into_iter() .iter()
.chain(m.as_private_domains().keys()?) .map(InternedString::from_display)
.chain(m.as_public_domains().keys()?)
.chain(m.as_private_domains().de()?)
.chain( .chain(
m.as_bindings() m.as_hostname_info()
.de()? .de()?
.values() .values()
.flat_map(|b| b.addresses.available.iter().cloned()) .flatten()
.map(|h| h.to_san_hostname()), .map(|h| h.to_san_hostname()),
) )
.collect::<Vec<InternedString>>()) .collect::<Vec<_>>())
}) })
.map(|a| a.and_then(|a| a)) .map(|a| a.and_then(|a| a))
.flatten_ok() .flatten_ok()
@@ -179,18 +181,20 @@ pub async fn get_ssl_key(
.map(|m| m.as_hosts().as_entries()) .map(|m| m.as_hosts().as_entries())
.flatten_ok() .flatten_ok()
.map_ok(|(_, m)| { .map_ok(|(_, m)| {
Ok(m.as_public_domains() Ok(m.as_onions()
.keys()? .de()?
.into_iter() .iter()
.chain(m.as_private_domains().keys()?) .map(InternedString::from_display)
.chain(m.as_public_domains().keys()?)
.chain(m.as_private_domains().de()?)
.chain( .chain(
m.as_bindings() m.as_hostname_info()
.de()? .de()?
.values() .values()
.flat_map(|b| b.addresses.available.iter().cloned()) .flatten()
.map(|h| h.to_san_hostname()), .map(|h| h.to_san_hostname()),
) )
.collect::<Vec<InternedString>>()) .collect::<Vec<_>>())
}) })
.map(|a| a.and_then(|a| a)) .map(|a| a.and_then(|a| a))
.flatten_ok() .flatten_ok()

View File

@@ -259,7 +259,6 @@ impl ServiceMap {
service_interfaces: Default::default(), service_interfaces: Default::default(),
hosts: Default::default(), hosts: Default::default(),
store_exposed_dependents: Default::default(), store_exposed_dependents: Default::default(),
outbound_gateway: None,
}, },
)?; )?;
}; };

View File

@@ -414,11 +414,14 @@ pub async fn show_config(
i.iter().find_map(|(_, info)| { i.iter().find_map(|(_, info)| {
info.ip_info info.ip_info
.as_ref() .as_ref()
.and_then(|ip_info| ip_info.wan_ip) .filter(|_| info.public())
.map(IpAddr::from) .iter()
.find_map(|info| info.subnets.iter().next())
.copied()
}) })
}) })
.or_not_found("a public IP address")? .or_not_found("a public IP address")?
.addr()
}; };
Ok(client Ok(client
.client_config( .client_config(
@@ -456,7 +459,7 @@ pub async fn add_forward(
}) })
.map(|s| s.prefix_len()) .map(|s| s.prefix_len())
.unwrap_or(32); .unwrap_or(32);
let rc = ctx.forward.add_forward(source, target, prefix, None).await?; let rc = ctx.forward.add_forward(source, target, prefix).await?;
ctx.active_forwards.mutate(|m| { ctx.active_forwards.mutate(|m| {
m.insert(source, rc); m.insert(source, rc);
}); });

View File

@@ -199,7 +199,7 @@ impl TunnelContext {
}) })
.map(|s| s.prefix_len()) .map(|s| s.prefix_len())
.unwrap_or(32); .unwrap_or(32);
active_forwards.insert(from, forward.add_forward(from, to, prefix, None).await?); active_forwards.insert(from, forward.add_forward(from, to, prefix).await?);
} }
Ok(Self(Arc::new(TunnelContextSeed { Ok(Self(Arc::new(TunnelContextSeed {

View File

@@ -523,27 +523,27 @@ pub async fn init_web(ctx: CliContext) -> Result<(), Error> {
println!(concat!( println!(concat!(
"To access your Web URL securely, trust your Root CA (displayed above) on your client device(s):\n", "To access your Web URL securely, trust your Root CA (displayed above) on your client device(s):\n",
" - MacOS\n", " - MacOS\n",
" 1. Open the Terminal app\n", " 1. Open the Terminal app\n",
" 2. Paste the following command (**DO NOT** click Return): pbcopy < ~/Desktop/ca.crt\n", " 2. Paste the following command (**DO NOT** click Return): pbcopy < ~/Desktop/ca.crt\n",
" 3. Copy your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n", " 3. Copy your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n",
" 4. Back in Terminal, click Return. ca.crt is saved to your Desktop\n", " 4. Back in Terminal, click Return. ca.crt is saved to your Desktop\n",
" 5. Complete by trusting your Root CA: https://docs.start9.com/device-guides/mac/ca.html\n", " 5. Complete by trusting your Root CA: https://staging.docs.start9.com/device-guides/mac/ca.html\n",
" - Linux\n", " - Linux\n",
" 1. Open gedit, nano, or any editor\n", " 1. Open gedit, nano, or any editor\n",
" 2. Copy/paste your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n", " 2. Copy/paste your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n",
" 3. Name the file ca.crt and save as plaintext\n", " 3. Name the file ca.crt and save as plaintext\n",
" 4. Complete by trusting your Root CA: https://docs.start9.com/device-guides/linux/ca.html\n", " 5. Complete by trusting your Root CA: https://staging.docs.start9.com/device-guides/linux/ca.html\n",
" - Windows\n", " - Windows\n",
" 1. Open the Notepad app\n", " 1. Open the Notepad app\n",
" 2. Copy/paste your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n", " 2. Copy/paste your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n",
" 3. Name the file ca.crt and save as plaintext\n", " 3. Name the file ca.crt and save as plaintext\n",
" 4. Complete by trusting your Root CA: https://docs.start9.com/device-guides/windows/ca.html\n", " 5. Complete by trusting your Root CA: https://staging.docs.start9.com/device-guides/windows/ca.html\n",
" - Android/Graphene\n", " - Android/Graphene\n",
" 1. Send the ca.crt file (created above) to yourself\n", " 1. Send the ca.crt file (created above) to yourself\n",
" 2. Complete by trusting your Root CA: https://docs.start9.com/device-guides/android/ca.html\n", " 2. Complete by trusting your Root CA: https://staging.docs.start9.com/device-guides/android/ca.html\n",
" - iOS\n", " - iOS\n",
" 1. Send the ca.crt file (created above) to yourself\n", " 1. Send the ca.crt file (created above) to yourself\n",
" 2. Complete by trusting your Root CA: https://docs.start9.com/device-guides/ios/ca.html\n", " 2. Complete by trusting your Root CA: https://staging.docs.start9.com/device-guides/ios/ca.html\n",
)); ));
return Ok(()); return Ok(());

View File

@@ -59,9 +59,8 @@ mod v0_4_0_alpha_16;
mod v0_4_0_alpha_17; mod v0_4_0_alpha_17;
mod v0_4_0_alpha_18; mod v0_4_0_alpha_18;
mod v0_4_0_alpha_19; mod v0_4_0_alpha_19;
mod v0_4_0_alpha_20;
pub type Current = v0_4_0_alpha_20::Version; // VERSION_BUMP pub type Current = v0_4_0_alpha_19::Version; // VERSION_BUMP
impl Current { impl Current {
#[instrument(skip(self, db))] #[instrument(skip(self, db))]
@@ -182,8 +181,7 @@ enum Version {
V0_4_0_alpha_16(Wrapper<v0_4_0_alpha_16::Version>), V0_4_0_alpha_16(Wrapper<v0_4_0_alpha_16::Version>),
V0_4_0_alpha_17(Wrapper<v0_4_0_alpha_17::Version>), V0_4_0_alpha_17(Wrapper<v0_4_0_alpha_17::Version>),
V0_4_0_alpha_18(Wrapper<v0_4_0_alpha_18::Version>), V0_4_0_alpha_18(Wrapper<v0_4_0_alpha_18::Version>),
V0_4_0_alpha_19(Wrapper<v0_4_0_alpha_19::Version>), V0_4_0_alpha_19(Wrapper<v0_4_0_alpha_19::Version>), // VERSION_BUMP
V0_4_0_alpha_20(Wrapper<v0_4_0_alpha_20::Version>), // VERSION_BUMP
Other(exver::Version), Other(exver::Version),
} }
@@ -245,8 +243,7 @@ impl Version {
Self::V0_4_0_alpha_16(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_16(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_17(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_17(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_18(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_18(v) => DynVersion(Box::new(v.0)),
Self::V0_4_0_alpha_19(v) => DynVersion(Box::new(v.0)), Self::V0_4_0_alpha_19(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::V0_4_0_alpha_20(v) => DynVersion(Box::new(v.0)), // VERSION_BUMP
Self::Other(v) => { Self::Other(v) => {
return Err(Error::new( return Err(Error::new(
eyre!("unknown version {v}"), eyre!("unknown version {v}"),
@@ -300,8 +297,7 @@ impl Version {
Version::V0_4_0_alpha_16(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_16(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_17(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_17(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_18(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_18(Wrapper(x)) => x.semver(),
Version::V0_4_0_alpha_19(Wrapper(x)) => x.semver(), Version::V0_4_0_alpha_19(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::V0_4_0_alpha_20(Wrapper(x)) => x.semver(), // VERSION_BUMP
Version::Other(x) => x.clone(), Version::Other(x) => x.clone(),
} }
} }

View File

@@ -10,13 +10,13 @@ A server is not a toy. It is a critical component of the computing paradigm, and
Start9 is paving new ground with StartOS, trying to create what most developers and IT professionals thought impossible; namely, an OS and user experience that affords a normal person the same independent control over their data and communications as an experienced Linux sysadmin. Start9 is paving new ground with StartOS, trying to create what most developers and IT professionals thought impossible; namely, an OS and user experience that affords a normal person the same independent control over their data and communications as an experienced Linux sysadmin.
The difficulty of our endeavor requires making mistakes; and our integrity and dedication to excellence require that we correct them. This means a willingness to discard bad ideas and broken parts, and if absolutely necessary, to tear it all down and start over. That is exactly what we did with StartOS v0.2.0 in 2020. It is what we did with StartOS v0.3.0 in 2022. And we are doing it now with StartOS v0.4.0 in 2026. The difficulty of our endeavor requires making mistakes; and our integrity and dedication to excellence require that we correct them. This means a willingness to discard bad ideas and broken parts, and if absolutely necessary, to tear it all down and start over. That is exactly what we did with StartOS v0.2.0 in 2020. It is what we did with StartOS v0.3.0 in 2022. And we are doing it now with StartOS v0.4.0 in 2025.
v0.4.0 is a complete rewrite of StartOS, almost nothing survived. After nearly six years of building StartOS, we believe that we have finally arrived at the correct architecture and foundation that will allow us to deliver on the promise of sovereign computing. v0.4.0 is a complete rewrite of StartOS, almost nothing survived. After nearly six years of building StartOS, we believe that we have finally arrived at the correct architecture and foundation that will allow us to deliver on the promise of sovereign computing.
## Changelog ## Changelog
### New User interface ### Improved User interface
We re-wrote the StartOS UI to be more performant, more intuitive, and better looking on both mobile and desktop. Enjoy. We re-wrote the StartOS UI to be more performant, more intuitive, and better looking on both mobile and desktop. Enjoy.
@@ -28,10 +28,6 @@ StartOS v0.4.0 supports multiple languages and also makes it easy to add more la
Neither Docker nor Podman offer the reliability and flexibility needed for StartOS. Instead, v0.4.0 uses a nested container paradigm based on LXC for the outer container and Linux namespaces for sub containers. This architecture naturally supports multi container setups. Neither Docker nor Podman offer the reliability and flexibility needed for StartOS. Instead, v0.4.0 uses a nested container paradigm based on LXC for the outer container and Linux namespaces for sub containers. This architecture naturally supports multi container setups.
### Hardware Acceleration
Services can take advantage of (and require) the presence of certain hardware modules, such as Nvidia GPUs, for transcoding or inference purposes. For example, StartOS and Ollama can run natively on The Nvidia DGX Spark and take full advantage of the hardware/firmware stack to perform local inference against open source models.
### New S9PK archive format ### New S9PK archive format
The S9PK archive format has been overhauled to allow for signature verification of partial downloads, and allow direct mounting of container images without unpacking the s9pk. The S9PK archive format has been overhauled to allow for signature verification of partial downloads, and allow direct mounting of container images without unpacking the s9pk.
@@ -84,13 +80,13 @@ The new start-fs fuse module unifies file system expectations for various platfo
StartOS now uses Extended Versioning (Exver), which consists of three parts: (1) a Semver-compliant upstream version, (2) a Semver-compliant wrapper version, and (3) an optional "flavor" prefix. Flavors can be thought of as alternative implementations of services, where a user would only want one or the other installed, and data can feasibly be migrating between the two. Another common characteristic of flavors is that they satisfy the same API requirement of dependents, though this is not strictly necessary. A valid Exver looks something like this: `#knots:29.0:1.0-beta.1`. This would translate to "the first beta release of StartOS wrapper version 1.0 of Bitcoin Knots version 29.0". StartOS now uses Extended Versioning (Exver), which consists of three parts: (1) a Semver-compliant upstream version, (2) a Semver-compliant wrapper version, and (3) an optional "flavor" prefix. Flavors can be thought of as alternative implementations of services, where a user would only want one or the other installed, and data can feasibly be migrating between the two. Another common characteristic of flavors is that they satisfy the same API requirement of dependents, though this is not strictly necessary. A valid Exver looks something like this: `#knots:29.0:1.0-beta.1`. This would translate to "the first beta release of StartOS wrapper version 1.0 of Bitcoin Knots version 29.0".
### Let's Encrypt ### ACME
StartOS now supports Let's Encrypt to automatically obtain SSL/TLS certificates for public domains. This means people visiting your public websites and APIs will not need to download and trust your server's Root CA. StartOS now supports using ACME protocol to automatically obtain SSL/TLS certificates from widely trusted certificate authorities, such as Let's Encrypt, for your public domains. This means people visiting your public websites and APIs will not need to download and trust your server's Root CA.
### Gateways ### Gateways
Gateways connect your server to the Internet, facilitating inbound and outbound traffic. Your router is a gateway. It is now possible to add Wireguard VPN gateways to your server to control how devices outside the LAN connect to your server and how your server connects out to the Internet. Gateways connect your server to the Internet. They process outbound traffic, and under certain conditions, they also permit inbound traffic. For example, your router is a gateway. It is now possible add gateways to StartOS, such as StartTunnel, in order to more granularly control how your installed services are exposed to the Internet.
### Static DNS Servers ### Static DNS Servers

View File

@@ -1,4 +1,4 @@
use std::collections::BTreeMap; use std::collections::{BTreeMap, BTreeSet};
use std::ffi::OsStr; use std::ffi::OsStr;
use std::path::Path; use std::path::Path;
@@ -23,14 +23,17 @@ use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::util::unmount; use crate::disk::mount::util::unmount;
use crate::hostname::Hostname; use crate::hostname::Hostname;
use crate::net::forward::AvailablePorts; use crate::net::forward::AvailablePorts;
use crate::net::host::Host;
use crate::net::keys::KeyStore; use crate::net::keys::KeyStore;
use crate::net::tor::{OnionAddress, TorSecretKey};
use crate::notifications::Notifications; use crate::notifications::Notifications;
use crate::prelude::*; use crate::prelude::*;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile; use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::ssh::{SshKeys, SshPubKey}; use crate::ssh::{SshKeys, SshPubKey};
use crate::util::Invoke; use crate::util::Invoke;
use crate::util::crypto::ed25519_expand_key;
use crate::util::serde::Pem; use crate::util::serde::Pem;
use crate::{DATA_DIR, PACKAGE_DATA, PackageId, ReplayId}; use crate::{DATA_DIR, HostId, Id, PACKAGE_DATA, PackageId, ReplayId};
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref V0_3_6_alpha_0: exver::Version = exver::Version::new( static ref V0_3_6_alpha_0: exver::Version = exver::Version::new(
@@ -143,7 +146,12 @@ pub struct Version;
impl VersionT for Version { impl VersionT for Version {
type Previous = v0_3_5_2::Version; type Previous = v0_3_5_2::Version;
type PreUpRes = (AccountInfo, SshKeys, CifsTargets); type PreUpRes = (
AccountInfo,
SshKeys,
CifsTargets,
BTreeMap<PackageId, BTreeMap<HostId, TorSecretKey>>,
);
fn semver(self) -> exver::Version { fn semver(self) -> exver::Version {
V0_3_6_alpha_0.clone() V0_3_6_alpha_0.clone()
} }
@@ -158,18 +166,20 @@ impl VersionT for Version {
let cifs = previous_cifs(&pg).await?; let cifs = previous_cifs(&pg).await?;
let tor_keys = previous_tor_keys(&pg).await?;
Command::new("systemctl") Command::new("systemctl")
.arg("stop") .arg("stop")
.arg("postgresql@*.service") .arg("postgresql@*.service")
.invoke(crate::ErrorKind::Database) .invoke(crate::ErrorKind::Database)
.await?; .await?;
Ok((account, ssh_keys, cifs)) Ok((account, ssh_keys, cifs, tor_keys))
} }
fn up( fn up(
self, self,
db: &mut Value, db: &mut Value,
(account, ssh_keys, cifs): Self::PreUpRes, (account, ssh_keys, cifs, tor_keys): Self::PreUpRes,
) -> Result<Value, Error> { ) -> Result<Value, Error> {
let prev_package_data = db["package-data"].clone(); let prev_package_data = db["package-data"].clone();
@@ -232,7 +242,11 @@ impl VersionT for Version {
"ui": db["ui"], "ui": db["ui"],
}); });
let keystore = KeyStore::new(&account)?; let mut keystore = KeyStore::new(&account)?;
for key in tor_keys.values().flat_map(|v| v.values()) {
assert!(key.is_valid());
keystore.onion.insert(key.clone());
}
let private = { let private = {
let mut value = json!({}); let mut value = json!({});
@@ -336,6 +350,20 @@ impl VersionT for Version {
false false
}; };
let onions = input[&*id]["installed"]["interface-addresses"]
.as_object()
.into_iter()
.flatten()
.filter_map(|(id, addrs)| {
addrs["tor-address"].as_str().map(|addr| {
Ok((
HostId::from(Id::try_from(id.clone())?),
addr.parse::<OnionAddress>()?,
))
})
})
.collect::<Result<BTreeMap<_, _>, Error>>()?;
if let Err(e) = async { if let Err(e) = async {
let package_s9pk = tokio::fs::File::open(path).await?; let package_s9pk = tokio::fs::File::open(path).await?;
let file = MultiCursorFile::open(&package_s9pk).await?; let file = MultiCursorFile::open(&package_s9pk).await?;
@@ -353,8 +381,11 @@ impl VersionT for Version {
.await? .await?
.await?; .await?;
ctx.db let to_sync = ctx
.db
.mutate(|db| { .mutate(|db| {
let mut to_sync = BTreeSet::new();
let package = db let package = db
.as_public_mut() .as_public_mut()
.as_package_data_mut() .as_package_data_mut()
@@ -365,11 +396,29 @@ impl VersionT for Version {
.as_tasks_mut() .as_tasks_mut()
.remove(&ReplayId::from("needs-config"))?; .remove(&ReplayId::from("needs-config"))?;
} }
Ok(()) for (id, onion) in onions {
package
.as_hosts_mut()
.upsert(&id, || Ok(Host::new()))?
.as_onions_mut()
.mutate(|o| {
o.clear();
o.insert(onion);
Ok(())
})?;
to_sync.insert(id);
}
Ok(to_sync)
}) })
.await .await
.result?; .result?;
if let Some(service) = &*ctx.services.get(&id).await {
for host_id in to_sync {
service.sync_host(host_id.clone()).await?;
}
}
Ok::<_, Error>(()) Ok::<_, Error>(())
} }
.await .await
@@ -432,6 +481,33 @@ async fn previous_account_info(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<Accoun
password: account_query password: account_query
.try_get("password") .try_get("password")
.with_ctx(|_| (ErrorKind::Database, "password"))?, .with_ctx(|_| (ErrorKind::Database, "password"))?,
tor_keys: vec![TorSecretKey::from_bytes(
if let Some(bytes) = account_query
.try_get::<Option<Vec<u8>>, _>("tor_key")
.with_ctx(|_| (ErrorKind::Database, "tor_key"))?
{
<[u8; 64]>::try_from(bytes).map_err(|e| {
Error::new(
eyre!("expected vec of len 64, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?
} else {
ed25519_expand_key(
&<[u8; 32]>::try_from(
account_query
.try_get::<Vec<u8>, _>("network_key")
.with_kind(ErrorKind::Database)?,
)
.map_err(|e| {
Error::new(
eyre!("expected vec of len 32, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?,
)
},
)?],
server_id: account_query server_id: account_query
.try_get("server_id") .try_get("server_id")
.with_ctx(|_| (ErrorKind::Database, "server_id"))?, .with_ctx(|_| (ErrorKind::Database, "server_id"))?,
@@ -503,3 +579,68 @@ async fn previous_ssh_keys(pg: &sqlx::Pool<sqlx::Postgres>) -> Result<SshKeys, E
Ok(ssh_keys) Ok(ssh_keys)
} }
#[tracing::instrument(skip_all)]
async fn previous_tor_keys(
pg: &sqlx::Pool<sqlx::Postgres>,
) -> Result<BTreeMap<PackageId, BTreeMap<HostId, TorSecretKey>>, Error> {
let mut res = BTreeMap::<PackageId, BTreeMap<HostId, TorSecretKey>>::new();
let net_key_query = sqlx::query(r#"SELECT * FROM network_keys"#)
.fetch_all(pg)
.await
.with_kind(ErrorKind::Database)?;
for row in net_key_query {
let package_id: PackageId = row
.try_get::<String, _>("package")
.with_ctx(|_| (ErrorKind::Database, "network_keys::package"))?
.parse()?;
let interface_id: HostId = row
.try_get::<String, _>("interface")
.with_ctx(|_| (ErrorKind::Database, "network_keys::interface"))?
.parse()?;
let key = TorSecretKey::from_bytes(ed25519_expand_key(
&<[u8; 32]>::try_from(
row.try_get::<Vec<u8>, _>("key")
.with_ctx(|_| (ErrorKind::Database, "network_keys::key"))?,
)
.map_err(|e| {
Error::new(
eyre!("expected vec of len 32, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?,
))?;
res.entry(package_id).or_default().insert(interface_id, key);
}
let tor_key_query = sqlx::query(r#"SELECT * FROM tor"#)
.fetch_all(pg)
.await
.with_kind(ErrorKind::Database)?;
for row in tor_key_query {
let package_id: PackageId = row
.try_get::<String, _>("package")
.with_ctx(|_| (ErrorKind::Database, "tor::package"))?
.parse()?;
let interface_id: HostId = row
.try_get::<String, _>("interface")
.with_ctx(|_| (ErrorKind::Database, "tor::interface"))?
.parse()?;
let key = TorSecretKey::from_bytes(
<[u8; 64]>::try_from(
row.try_get::<Vec<u8>, _>("key")
.with_ctx(|_| (ErrorKind::Database, "tor::key"))?,
)
.map_err(|e| {
Error::new(
eyre!("expected vec of len 64, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?,
)?;
res.entry(package_id).or_default().insert(interface_id, key);
}
Ok(res)
}

View File

@@ -8,6 +8,7 @@ use super::v0_3_5::V0_3_0_COMPAT;
use super::{VersionT, v0_3_6_alpha_9}; use super::{VersionT, v0_3_6_alpha_9};
use crate::GatewayId; use crate::GatewayId;
use crate::net::host::address::PublicDomainConfig; use crate::net::host::address::PublicDomainConfig;
use crate::net::tor::OnionAddress;
use crate::prelude::*; use crate::prelude::*;
lazy_static::lazy_static! { lazy_static::lazy_static! {
@@ -21,7 +22,7 @@ lazy_static::lazy_static! {
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
#[serde(tag = "kind")] #[serde(tag = "kind")]
enum HostAddress { enum HostAddress {
Onion { address: String }, Onion { address: OnionAddress },
Domain { address: InternedString }, Domain { address: InternedString },
} }

View File

@@ -1,7 +1,11 @@
use std::collections::BTreeSet;
use exver::{PreReleaseSegment, VersionRange}; use exver::{PreReleaseSegment, VersionRange};
use imbl_value::InternedString;
use super::v0_3_5::V0_3_0_COMPAT; use super::v0_3_5::V0_3_0_COMPAT;
use super::{VersionT, v0_4_0_alpha_11}; use super::{VersionT, v0_4_0_alpha_11};
use crate::net::tor::TorSecretKey;
use crate::prelude::*; use crate::prelude::*;
lazy_static::lazy_static! { lazy_static::lazy_static! {
@@ -29,6 +33,48 @@ impl VersionT for Version {
} }
#[instrument(skip_all)] #[instrument(skip_all)]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> { fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
let mut err = None;
let onion_store = db["private"]["keyStore"]["onion"]
.as_object_mut()
.or_not_found("private.keyStore.onion")?;
onion_store.retain(|o, v| match from_value::<TorSecretKey>(v.clone()) {
Ok(k) => k.is_valid() && &InternedString::from_display(&k.onion_address()) == o,
Err(e) => {
err = Some(e);
true
}
});
if let Some(e) = err {
return Err(e);
}
let allowed_addresses = onion_store.keys().cloned().collect::<BTreeSet<_>>();
let fix_host = |host: &mut Value| {
Ok::<_, Error>(
host["onions"]
.as_array_mut()
.or_not_found("host.onions")?
.retain(|addr| {
addr.as_str()
.map(|s| allowed_addresses.contains(s))
.unwrap_or(false)
}),
)
};
for (_, pde) in db["public"]["packageData"]
.as_object_mut()
.or_not_found("public.packageData")?
.iter_mut()
{
for (_, host) in pde["hosts"]
.as_object_mut()
.or_not_found("public.packageData[].hosts")?
.iter_mut()
{
fix_host(host)?;
}
}
fix_host(&mut db["public"]["serverInfo"]["network"]["host"])?;
if db["private"]["keyStore"]["localCerts"].is_null() { if db["private"]["keyStore"]["localCerts"].is_null() {
db["private"]["keyStore"]["localCerts"] = db["private"]["keyStore"]["localCerts"] =
db["private"]["keyStore"]["local_certs"].clone(); db["private"]["keyStore"]["local_certs"].clone();

View File

@@ -1,205 +0,0 @@
use exver::{PreReleaseSegment, VersionRange};
use super::v0_3_5::V0_3_0_COMPAT;
use super::{VersionT, v0_4_0_alpha_19};
use crate::prelude::*;
lazy_static::lazy_static! {
static ref V0_4_0_alpha_20: exver::Version = exver::Version::new(
[0, 4, 0],
[PreReleaseSegment::String("alpha".into()), 20.into()]
);
}
#[derive(Clone, Copy, Debug, Default)]
pub struct Version;
impl VersionT for Version {
type Previous = v0_4_0_alpha_19::Version;
type PreUpRes = ();
async fn pre_up(self) -> Result<Self::PreUpRes, Error> {
Ok(())
}
fn semver(self) -> exver::Version {
V0_4_0_alpha_20.clone()
}
fn compat(self) -> &'static VersionRange {
&V0_3_0_COMPAT
}
#[instrument(skip_all)]
fn up(self, db: &mut Value, _: Self::PreUpRes) -> Result<Value, Error> {
// Remove onions and tor-related fields from server host
if let Some(host) = db
.get_mut("public")
.and_then(|p| p.get_mut("serverInfo"))
.and_then(|s| s.get_mut("network"))
.and_then(|n| n.get_mut("host"))
.and_then(|h| h.as_object_mut())
{
host.remove("onions");
}
// Remove onions from all package hosts
if let Some(packages) = db
.get_mut("public")
.and_then(|p| p.get_mut("packageData"))
.and_then(|p| p.as_object_mut())
{
for (_, package) in packages.iter_mut() {
if let Some(hosts) = package.get_mut("hosts").and_then(|h| h.as_object_mut()) {
for (_, host) in hosts.iter_mut() {
if let Some(host_obj) = host.as_object_mut() {
host_obj.remove("onions");
}
}
}
}
}
// Remove onion store from private keyStore
if let Some(key_store) = db
.get_mut("private")
.and_then(|p| p.get_mut("keyStore"))
.and_then(|k| k.as_object_mut())
{
key_store.remove("onion");
}
// Migrate server host: remove hostnameInfo, add addresses to bindings, clean net
migrate_host(
db.get_mut("public")
.and_then(|p| p.get_mut("serverInfo"))
.and_then(|s| s.get_mut("network"))
.and_then(|n| n.get_mut("host")),
);
// Migrate all package hosts
if let Some(packages) = db
.get_mut("public")
.and_then(|p| p.get_mut("packageData"))
.and_then(|p| p.as_object_mut())
{
for (_, package) in packages.iter_mut() {
if let Some(hosts) = package.get_mut("hosts").and_then(|h| h.as_object_mut()) {
for (_, host) in hosts.iter_mut() {
migrate_host(Some(host));
}
}
}
}
// Migrate availablePorts from IdPool format to BTreeMap<u16, bool>
// Rebuild from actual assigned ports in all bindings
migrate_available_ports(db);
Ok(Value::Null)
}
fn down(self, _db: &mut Value) -> Result<(), Error> {
Ok(())
}
}
fn collect_ports_from_host(host: Option<&Value>, ports: &mut Value) {
let Some(bindings) = host
.and_then(|h| h.get("bindings"))
.and_then(|b| b.as_object())
else {
return;
};
for (_, binding) in bindings.iter() {
if let Some(net) = binding.get("net") {
if let Some(port) = net.get("assignedPort").and_then(|p| p.as_u64()) {
if let Some(obj) = ports.as_object_mut() {
obj.insert(port.to_string().into(), Value::from(false));
}
}
if let Some(port) = net.get("assignedSslPort").and_then(|p| p.as_u64()) {
if let Some(obj) = ports.as_object_mut() {
obj.insert(port.to_string().into(), Value::from(true));
}
}
}
}
}
fn migrate_available_ports(db: &mut Value) {
let mut new_ports: Value = serde_json::json!({}).into();
// Collect from server host
let server_host = db
.get("public")
.and_then(|p| p.get("serverInfo"))
.and_then(|s| s.get("network"))
.and_then(|n| n.get("host"))
.cloned();
collect_ports_from_host(server_host.as_ref(), &mut new_ports);
// Collect from all package hosts
if let Some(packages) = db
.get("public")
.and_then(|p| p.get("packageData"))
.and_then(|p| p.as_object())
{
for (_, package) in packages.iter() {
if let Some(hosts) = package.get("hosts").and_then(|h| h.as_object()) {
for (_, host) in hosts.iter() {
collect_ports_from_host(Some(host), &mut new_ports);
}
}
}
}
// Replace private.availablePorts
if let Some(private) = db.get_mut("private").and_then(|p| p.as_object_mut()) {
private.insert("availablePorts".into(), new_ports);
}
}
fn migrate_host(host: Option<&mut Value>) {
let Some(host) = host.and_then(|h| h.as_object_mut()) else {
return;
};
// Remove hostnameInfo from host
host.remove("hostnameInfo");
// Migrate privateDomains from array to object (BTreeSet -> BTreeMap<_, BTreeSet<GatewayId>>)
if let Some(private_domains) = host.get("privateDomains").and_then(|v| v.as_array()).cloned() {
let mut new_pd: Value = serde_json::json!({}).into();
for domain in private_domains {
if let Some(d) = domain.as_str() {
if let Some(obj) = new_pd.as_object_mut() {
obj.insert(d.into(), serde_json::json!([]).into());
}
}
}
host.insert("privateDomains".into(), new_pd);
}
// For each binding: add "addresses" field, remove gateway-level fields from "net"
if let Some(bindings) = host.get_mut("bindings").and_then(|b| b.as_object_mut()) {
for (_, binding) in bindings.iter_mut() {
if let Some(binding_obj) = binding.as_object_mut() {
// Add addresses if not present
if !binding_obj.contains_key("addresses") {
binding_obj.insert(
"addresses".into(),
serde_json::json!({
"enabled": [],
"disabled": [],
"available": []
})
.into(),
);
}
// Remove gateway-level privateDisabled/publicEnabled from net
if let Some(net) = binding_obj.get_mut("net").and_then(|n| n.as_object_mut()) {
net.remove("privateDisabled");
net.remove("publicEnabled");
}
}
}
}
}

View File

@@ -23,7 +23,7 @@ if [ "${PROJECT}" = "startos" ]; then
else else
INSTALL_TARGET="install-${PROJECT#start-}" INSTALL_TARGET="install-${PROJECT#start-}"
fi fi
make "${INSTALL_TARGET}" DESTDIR=dpkg-workdir/$BASENAME REMOTE= make "${INSTALL_TARGET}" DESTDIR=dpkg-workdir/$BASENAME
if [ -f dpkg-workdir/$BASENAME/usr/lib/$PROJECT/depends ]; then if [ -f dpkg-workdir/$BASENAME/usr/lib/$PROJECT/depends ]; then
if [ -n "$DEPENDS" ]; then if [ -n "$DEPENDS" ]; then

View File

@@ -1,8 +0,0 @@
# SDK — TypeScript Service Packaging
TypeScript SDK for packaging services for StartOS (`@start9labs/start-sdk`).
## Structure
- `base/` — Core types, ABI definitions, effects interface (`@start9labs/start-sdk-base`)
- `package/` — Full SDK for package developers, re-exports base

File diff suppressed because it is too large Load Diff

View File

@@ -1,9 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { GatewayType } from './GatewayType'
export type AddTunnelParams = { export type AddTunnelParams = { name: string; config: string; public: boolean }
name: string
config: string
type: GatewayType | null
setAsDefaultOutbound: boolean
}

View File

@@ -1,11 +1,5 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { BindOptions } from './BindOptions' import type { BindOptions } from './BindOptions'
import type { DerivedAddressInfo } from './DerivedAddressInfo'
import type { NetInfo } from './NetInfo' import type { NetInfo } from './NetInfo'
export type BindInfo = { export type BindInfo = { enabled: boolean; options: BindOptions; net: NetInfo }
enabled: boolean
options: BindOptions
net: NetInfo
addresses: DerivedAddressInfo
}

View File

@@ -1,7 +1,8 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { GatewayId } from './GatewayId'
export type BindingSetAddressEnabledParams = { export type BindingGatewaySetEnabledParams = {
internalPort: number internalPort: number
address: string gateway: GatewayId
enabled: boolean | null enabled: boolean | null
} }

View File

@@ -1,17 +0,0 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { HostnameInfo } from './HostnameInfo'
export type DerivedAddressInfo = {
/**
* User override: enable these addresses (only for public IP & port)
*/
enabled: Array<string>
/**
* User override: disable these addresses (only for domains and private IP & port)
*/
disabled: Array<[string, number]>
/**
* COMPUTED: NetServiceData::update — all possible addresses for this binding
*/
available: Array<HostnameInfo>
}

View File

@@ -1,3 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type ErrorData = { details: string; debug: string; info: unknown } export type ErrorData = { details: string; debug: string }

View File

@@ -1,3 +0,0 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type GatewayType = 'inbound-outbound' | 'outbound-only'

View File

@@ -1,10 +1,15 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { Bindings } from './Bindings' import type { BindInfo } from './BindInfo'
import type { GatewayId } from './GatewayId' import type { HostnameInfo } from './HostnameInfo'
import type { PublicDomainConfig } from './PublicDomainConfig' import type { PublicDomainConfig } from './PublicDomainConfig'
export type Host = { export type Host = {
bindings: Bindings bindings: { [key: number]: BindInfo }
onions: string[]
publicDomains: { [key: string]: PublicDomainConfig } publicDomains: { [key: string]: PublicDomainConfig }
privateDomains: { [key: string]: Array<GatewayId> } privateDomains: Array<string>
/**
* COMPUTED: NetService::update
*/
hostnameInfo: { [key: number]: Array<HostnameInfo> }
} }

View File

@@ -1,10 +1,8 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { HostnameMetadata } from './HostnameMetadata' import type { GatewayInfo } from './GatewayInfo'
import type { IpHostname } from './IpHostname'
import type { OnionHostname } from './OnionHostname'
export type HostnameInfo = { export type HostnameInfo =
ssl: boolean | { kind: 'ip'; gateway: GatewayInfo; public: boolean; hostname: IpHostname }
public: boolean | { kind: 'onion'; hostname: OnionHostname }
host: string
port: number | null
metadata: HostnameMetadata
}

View File

@@ -1,10 +0,0 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { GatewayId } from './GatewayId'
import type { PackageId } from './PackageId'
export type HostnameMetadata =
| { kind: 'ipv4'; gateway: GatewayId }
| { kind: 'ipv6'; gateway: GatewayId; scopeId: number }
| { kind: 'private-domain'; gateways: Array<GatewayId> }
| { kind: 'public-domain'; gateway: GatewayId }
| { kind: 'plugin'; package: PackageId }

View File

@@ -0,0 +1,23 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type IpHostname =
| { kind: 'ipv4'; value: string; port: number | null; sslPort: number | null }
| {
kind: 'ipv6'
value: string
scopeId: number
port: number | null
sslPort: number | null
}
| {
kind: 'local'
value: string
port: number | null
sslPort: number | null
}
| {
kind: 'domain'
value: string
port: number | null
sslPort: number | null
}

View File

@@ -1,6 +1,9 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { GatewayId } from './GatewayId'
export type NetInfo = { export type NetInfo = {
privateDisabled: Array<GatewayId>
publicEnabled: Array<GatewayId>
assignedPort: number | null assignedPort: number | null
assignedSslPort: number | null assignedSslPort: number | null
} }

View File

@@ -13,5 +13,4 @@ export type NetworkInfo = {
gateways: { [key: GatewayId]: NetworkInterfaceInfo } gateways: { [key: GatewayId]: NetworkInterfaceInfo }
acme: { [key: AcmeProvider]: AcmeSettings } acme: { [key: AcmeProvider]: AcmeSettings }
dns: DnsSettings dns: DnsSettings
defaultOutbound: string | null
} }

View File

@@ -1,10 +1,9 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { GatewayType } from './GatewayType'
import type { IpInfo } from './IpInfo' import type { IpInfo } from './IpInfo'
export type NetworkInterfaceInfo = { export type NetworkInterfaceInfo = {
name: string | null name: string | null
public: boolean | null
secure: boolean | null secure: boolean | null
ipInfo: IpInfo | null ipInfo: IpInfo | null
type: GatewayType | null
} }

View File

@@ -1,4 +1,7 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { BindInfo } from './BindInfo'
export type Bindings = { [key: number]: BindInfo } export type OnionHostname = {
value: string
port: number | null
sslPort: number | null
}

View File

@@ -25,5 +25,4 @@ export type PackageDataEntry = {
serviceInterfaces: { [key: ServiceInterfaceId]: ServiceInterface } serviceInterfaces: { [key: ServiceInterfaceId]: ServiceInterface }
hosts: Hosts hosts: Hosts
storeExposedDependents: string[] storeExposedDependents: string[]
outboundGateway: string | null
} }

View File

@@ -1,4 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually. // This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { LocaleString } from './LocaleString'
export type PackageInfoShort = { releaseNotes: LocaleString } export type PackageInfoShort = { releaseNotes: string }

View File

@@ -36,8 +36,7 @@ export { BackupTargetFS } from './BackupTargetFS'
export { Base64 } from './Base64' export { Base64 } from './Base64'
export { BindId } from './BindId' export { BindId } from './BindId'
export { BindInfo } from './BindInfo' export { BindInfo } from './BindInfo'
export { BindingSetAddressEnabledParams } from './BindingSetAddressEnabledParams' export { BindingGatewaySetEnabledParams } from './BindingGatewaySetEnabledParams'
export { Bindings } from './Bindings'
export { BindOptions } from './BindOptions' export { BindOptions } from './BindOptions'
export { BindParams } from './BindParams' export { BindParams } from './BindParams'
export { Blake3Commitment } from './Blake3Commitment' export { Blake3Commitment } from './Blake3Commitment'
@@ -65,7 +64,6 @@ export { Dependencies } from './Dependencies'
export { DependencyMetadata } from './DependencyMetadata' export { DependencyMetadata } from './DependencyMetadata'
export { DependencyRequirement } from './DependencyRequirement' export { DependencyRequirement } from './DependencyRequirement'
export { DepInfo } from './DepInfo' export { DepInfo } from './DepInfo'
export { DerivedAddressInfo } from './DerivedAddressInfo'
export { Description } from './Description' export { Description } from './Description'
export { DesiredStatus } from './DesiredStatus' export { DesiredStatus } from './DesiredStatus'
export { DestroySubcontainerFsParams } from './DestroySubcontainerFsParams' export { DestroySubcontainerFsParams } from './DestroySubcontainerFsParams'
@@ -85,7 +83,6 @@ export { FullIndex } from './FullIndex'
export { FullProgress } from './FullProgress' export { FullProgress } from './FullProgress'
export { GatewayId } from './GatewayId' export { GatewayId } from './GatewayId'
export { GatewayInfo } from './GatewayInfo' export { GatewayInfo } from './GatewayInfo'
export { GatewayType } from './GatewayType'
export { GetActionInputParams } from './GetActionInputParams' export { GetActionInputParams } from './GetActionInputParams'
export { GetContainerIpParams } from './GetContainerIpParams' export { GetContainerIpParams } from './GetContainerIpParams'
export { GetHostInfoParams } from './GetHostInfoParams' export { GetHostInfoParams } from './GetHostInfoParams'
@@ -109,7 +106,6 @@ export { HardwareRequirements } from './HardwareRequirements'
export { HealthCheckId } from './HealthCheckId' export { HealthCheckId } from './HealthCheckId'
export { HostId } from './HostId' export { HostId } from './HostId'
export { HostnameInfo } from './HostnameInfo' export { HostnameInfo } from './HostnameInfo'
export { HostnameMetadata } from './HostnameMetadata'
export { Hosts } from './Hosts' export { Hosts } from './Hosts'
export { Host } from './Host' export { Host } from './Host'
export { IdMap } from './IdMap' export { IdMap } from './IdMap'
@@ -123,6 +119,7 @@ export { InstalledVersionParams } from './InstalledVersionParams'
export { InstallingInfo } from './InstallingInfo' export { InstallingInfo } from './InstallingInfo'
export { InstallingState } from './InstallingState' export { InstallingState } from './InstallingState'
export { InstallParams } from './InstallParams' export { InstallParams } from './InstallParams'
export { IpHostname } from './IpHostname'
export { IpInfo } from './IpInfo' export { IpInfo } from './IpInfo'
export { KeyboardOptions } from './KeyboardOptions' export { KeyboardOptions } from './KeyboardOptions'
export { ListPackageSignersParams } from './ListPackageSignersParams' export { ListPackageSignersParams } from './ListPackageSignersParams'
@@ -152,6 +149,7 @@ export { NetInfo } from './NetInfo'
export { NetworkInfo } from './NetworkInfo' export { NetworkInfo } from './NetworkInfo'
export { NetworkInterfaceInfo } from './NetworkInterfaceInfo' export { NetworkInterfaceInfo } from './NetworkInterfaceInfo'
export { NetworkInterfaceType } from './NetworkInterfaceType' export { NetworkInterfaceType } from './NetworkInterfaceType'
export { OnionHostname } from './OnionHostname'
export { OsIndex } from './OsIndex' export { OsIndex } from './OsIndex'
export { OsVersionInfoMap } from './OsVersionInfoMap' export { OsVersionInfoMap } from './OsVersionInfoMap'
export { OsVersionInfo } from './OsVersionInfo' export { OsVersionInfo } from './OsVersionInfo'

View File

@@ -1,12 +1,6 @@
import { PackageId, ServiceInterfaceId, ServiceInterfaceType } from '../types' import { PackageId, ServiceInterfaceId, ServiceInterfaceType } from '../types'
import { knownProtocols } from '../interfaces/Host' import { knownProtocols } from '../interfaces/Host'
import { import { AddressInfo, Host, Hostname, HostnameInfo } from '../types'
AddressInfo,
DerivedAddressInfo,
Host,
Hostname,
HostnameInfo,
} from '../types'
import { Effects } from '../Effects' import { Effects } from '../Effects'
import { DropGenerator, DropPromise } from './Drop' import { DropGenerator, DropPromise } from './Drop'
import { IpAddress, IPV6_LINK_LOCAL } from './ip' import { IpAddress, IPV6_LINK_LOCAL } from './ip'
@@ -26,6 +20,7 @@ export const getHostname = (url: string): Hostname | null => {
} }
type FilterKinds = type FilterKinds =
| 'onion'
| 'mdns' | 'mdns'
| 'domain' | 'domain'
| 'ip' | 'ip'
@@ -47,26 +42,27 @@ type VisibilityFilter<V extends 'public' | 'private'> = V extends 'public'
| (HostnameInfo & { public: false }) | (HostnameInfo & { public: false })
| VisibilityFilter<Exclude<V, 'private'>> | VisibilityFilter<Exclude<V, 'private'>>
: never : never
type KindFilter<K extends FilterKinds> = K extends 'mdns' type KindFilter<K extends FilterKinds> = K extends 'onion'
? ? (HostnameInfo & { kind: 'onion' }) | KindFilter<Exclude<K, 'onion'>>
| (HostnameInfo & { metadata: { kind: 'private-domain' } }) : K extends 'mdns'
| KindFilter<Exclude<K, 'mdns'>>
: K extends 'domain'
? ?
| (HostnameInfo & { metadata: { kind: 'private-domain' } }) | (HostnameInfo & { kind: 'ip'; hostname: { kind: 'local' } })
| (HostnameInfo & { metadata: { kind: 'public-domain' } }) | KindFilter<Exclude<K, 'mdns'>>
| KindFilter<Exclude<K, 'domain'>> : K extends 'domain'
: K extends 'ipv4'
? ?
| (HostnameInfo & { metadata: { kind: 'ipv4' } }) | (HostnameInfo & { kind: 'ip'; hostname: { kind: 'domain' } })
| KindFilter<Exclude<K, 'ipv4'>> | KindFilter<Exclude<K, 'domain'>>
: K extends 'ipv6' : K extends 'ipv4'
? ?
| (HostnameInfo & { metadata: { kind: 'ipv6' } }) | (HostnameInfo & { kind: 'ip'; hostname: { kind: 'ipv4' } })
| KindFilter<Exclude<K, 'ipv6'>> | KindFilter<Exclude<K, 'ipv4'>>
: K extends 'ip' : K extends 'ipv6'
? KindFilter<Exclude<K, 'ip'> | 'ipv4' | 'ipv6'> ?
: never | (HostnameInfo & { kind: 'ip'; hostname: { kind: 'ipv6' } })
| KindFilter<Exclude<K, 'ipv6'>>
: K extends 'ip'
? KindFilter<Exclude<K, 'ip'> | 'ipv4' | 'ipv6'>
: never
type FilterReturnTy<F extends Filter> = F extends { type FilterReturnTy<F extends Filter> = F extends {
visibility: infer V extends 'public' | 'private' visibility: infer V extends 'public' | 'private'
@@ -94,6 +90,10 @@ const nonLocalFilter = {
const publicFilter = { const publicFilter = {
visibility: 'public', visibility: 'public',
} as const } as const
const onionFilter = {
kind: 'onion',
} as const
type Formats = 'hostname-info' | 'urlstring' | 'url' type Formats = 'hostname-info' | 'urlstring' | 'url'
type FormatReturnTy< type FormatReturnTy<
F extends Filter, F extends Filter,
@@ -109,7 +109,10 @@ type FormatReturnTy<
export type Filled<F extends Filter = {}> = { export type Filled<F extends Filter = {}> = {
hostnames: HostnameInfo[] hostnames: HostnameInfo[]
toUrl: (h: HostnameInfo) => UrlString toUrls: (h: HostnameInfo) => {
url: UrlString | null
sslUrl: UrlString | null
}
format: <Format extends Formats = 'urlstring'>( format: <Format extends Formats = 'urlstring'>(
format?: Format, format?: Format,
@@ -121,6 +124,7 @@ export type Filled<F extends Filter = {}> = {
nonLocal: Filled<typeof nonLocalFilter & Filter> nonLocal: Filled<typeof nonLocalFilter & Filter>
public: Filled<typeof publicFilter & Filter> public: Filled<typeof publicFilter & Filter>
onion: Filled<typeof onionFilter & Filter>
} }
export type FilledAddressInfo = AddressInfo & Filled export type FilledAddressInfo = AddressInfo & Filled
export type ServiceInterfaceFilled = { export type ServiceInterfaceFilled = {
@@ -150,29 +154,41 @@ const unique = <A>(values: A[]) => Array.from(new Set(values))
export const addressHostToUrl = ( export const addressHostToUrl = (
{ scheme, sslScheme, username, suffix }: AddressInfo, { scheme, sslScheme, username, suffix }: AddressInfo,
hostname: HostnameInfo, hostname: HostnameInfo,
): UrlString => { ): { url: UrlString | null; sslUrl: UrlString | null } => {
const effectiveScheme = hostname.ssl ? sslScheme : scheme const res = []
let host: string const fmt = (scheme: string | null, host: HostnameInfo, port: number) => {
if (hostname.metadata.kind === 'ipv6') {
host = IPV6_LINK_LOCAL.contains(hostname.host)
? `[${hostname.host}%${hostname.metadata.scopeId}]`
: `[${hostname.host}]`
} else {
host = hostname.host
}
let portStr = ''
if (hostname.port !== null) {
const excludePort = const excludePort =
effectiveScheme && scheme &&
effectiveScheme in knownProtocols && scheme in knownProtocols &&
hostname.port === port === knownProtocols[scheme as keyof typeof knownProtocols].defaultPort
knownProtocols[effectiveScheme as keyof typeof knownProtocols] let hostname
.defaultPort if (host.kind === 'onion') {
if (!excludePort) portStr = `:${hostname.port}` hostname = host.hostname.value
} else if (host.kind === 'ip') {
if (host.hostname.kind === 'domain') {
hostname = host.hostname.value
} else if (host.hostname.kind === 'ipv6') {
hostname = IPV6_LINK_LOCAL.contains(host.hostname.value)
? `[${host.hostname.value}%${host.hostname.scopeId}]`
: `[${host.hostname.value}]`
} else {
hostname = host.hostname.value
}
}
return `${scheme ? `${scheme}://` : ''}${
username ? `${username}@` : ''
}${hostname}${excludePort ? '' : `:${port}`}${suffix}`
} }
return `${effectiveScheme ? `${effectiveScheme}://` : ''}${ let url = null
username ? `${username}@` : '' if (hostname.hostname.port !== null) {
}${host}${portStr}${suffix}` url = fmt(scheme, hostname, hostname.hostname.port)
}
let sslUrl = null
if (hostname.hostname.sslPort !== null) {
sslUrl = fmt(sslScheme, hostname, hostname.hostname.sslPort)
}
return { url, sslUrl }
} }
function filterRec( function filterRec(
@@ -185,9 +201,13 @@ function filterRec(
hostnames = hostnames.filter((h) => invert !== pred(h)) hostnames = hostnames.filter((h) => invert !== pred(h))
} }
if (filter.visibility === 'public') if (filter.visibility === 'public')
hostnames = hostnames.filter((h) => invert !== h.public) hostnames = hostnames.filter(
(h) => invert !== (h.kind === 'onion' || h.public),
)
if (filter.visibility === 'private') if (filter.visibility === 'private')
hostnames = hostnames.filter((h) => invert !== !h.public) hostnames = hostnames.filter(
(h) => invert !== (h.kind !== 'onion' && !h.public),
)
if (filter.kind) { if (filter.kind) {
const kind = new Set( const kind = new Set(
Array.isArray(filter.kind) ? filter.kind : [filter.kind], Array.isArray(filter.kind) ? filter.kind : [filter.kind],
@@ -199,19 +219,21 @@ function filterRec(
hostnames = hostnames.filter( hostnames = hostnames.filter(
(h) => (h) =>
invert !== invert !==
((kind.has('mdns') && ((kind.has('onion') && h.kind === 'onion') ||
h.metadata.kind === 'private-domain' && (kind.has('mdns') &&
h.host.endsWith('.local')) || h.kind === 'ip' &&
h.hostname.kind === 'local') ||
(kind.has('domain') && (kind.has('domain') &&
(h.metadata.kind === 'private-domain' || h.kind === 'ip' &&
h.metadata.kind === 'public-domain')) || h.hostname.kind === 'domain') ||
(kind.has('ipv4') && h.metadata.kind === 'ipv4') || (kind.has('ipv4') && h.kind === 'ip' && h.hostname.kind === 'ipv4') ||
(kind.has('ipv6') && h.metadata.kind === 'ipv6') || (kind.has('ipv6') && h.kind === 'ip' && h.hostname.kind === 'ipv6') ||
(kind.has('localhost') && (kind.has('localhost') &&
['localhost', '127.0.0.1', '::1'].includes(h.host)) || ['localhost', '127.0.0.1', '::1'].includes(h.hostname.value)) ||
(kind.has('link-local') && (kind.has('link-local') &&
h.metadata.kind === 'ipv6' && h.kind === 'ip' &&
IPV6_LINK_LOCAL.contains(IpAddress.parse(h.host)))), h.hostname.kind === 'ipv6' &&
IPV6_LINK_LOCAL.contains(IpAddress.parse(h.hostname.value)))),
) )
} }
@@ -220,36 +242,16 @@ function filterRec(
return hostnames return hostnames
} }
function isPublicIp(h: HostnameInfo): boolean {
return h.public && (h.metadata.kind === 'ipv4' || h.metadata.kind === 'ipv6')
}
function enabledAddresses(addr: DerivedAddressInfo): HostnameInfo[] {
return addr.available.filter((h) => {
if (isPublicIp(h)) {
// Public IPs: disabled by default, explicitly enabled via SocketAddr string
if (h.port === null) return true
const sa =
h.metadata.kind === 'ipv6'
? `[${h.host}]:${h.port}`
: `${h.host}:${h.port}`
return addr.enabled.includes(sa)
} else {
// Everything else: enabled by default, explicitly disabled via [host, port] tuple
return !addr.disabled.some(
([host, port]) => host === h.host && port === (h.port ?? 0),
)
}
})
}
export const filledAddress = ( export const filledAddress = (
host: Host, host: Host,
addressInfo: AddressInfo, addressInfo: AddressInfo,
): FilledAddressInfo => { ): FilledAddressInfo => {
const toUrl = addressHostToUrl.bind(null, addressInfo) const toUrls = addressHostToUrl.bind(null, addressInfo)
const binding = host.bindings[addressInfo.internalPort] const toUrlArray = (h: HostnameInfo) => {
const hostnames = binding ? enabledAddresses(binding.addresses) : [] const u = toUrls(h)
return [u.url, u.sslUrl].filter((u) => u !== null)
}
const hostnames = host.hostnameInfo[addressInfo.internalPort] ?? []
function filledAddressFromHostnames<F extends Filter>( function filledAddressFromHostnames<F extends Filter>(
hostnames: HostnameInfo[], hostnames: HostnameInfo[],
@@ -264,14 +266,19 @@ export const filledAddress = (
filterRec(hostnames, publicFilter, false), filterRec(hostnames, publicFilter, false),
), ),
) )
const getOnion = once(() =>
filledAddressFromHostnames<typeof onionFilter & F>(
filterRec(hostnames, onionFilter, false),
),
)
return { return {
...addressInfo, ...addressInfo,
hostnames, hostnames,
toUrl, toUrls,
format: <Format extends Formats = 'urlstring'>(format?: Format) => { format: <Format extends Formats = 'urlstring'>(format?: Format) => {
let res: FormatReturnTy<{}, Format>[] = hostnames as any let res: FormatReturnTy<{}, Format>[] = hostnames as any
if (format === 'hostname-info') return res if (format === 'hostname-info') return res
const urls = hostnames.map(toUrl) const urls = hostnames.flatMap(toUrlArray)
if (format === 'url') res = urls.map((u) => new URL(u)) as any if (format === 'url') res = urls.map((u) => new URL(u)) as any
else res = urls as any else res = urls as any
return res return res
@@ -287,6 +294,9 @@ export const filledAddress = (
get public(): Filled<typeof publicFilter & F> { get public(): Filled<typeof publicFilter & F> {
return getPublic() return getPublic()
}, },
get onion(): Filled<typeof onionFilter & F> {
return getOnion()
},
} }
} }

View File

@@ -21,6 +21,11 @@ export const localHostname: Pattern = {
description: 'Must be a valid ".local" hostname', description: 'Must be a valid ".local" hostname',
} }
export const torHostname: Pattern = {
regex: regexes.torHostname.matches(),
description: 'Must be a valid Tor (".onion") hostname',
}
export const url: Pattern = { export const url: Pattern = {
regex: regexes.url.matches(), regex: regexes.url.matches(),
description: 'Must be a valid URL', description: 'Must be a valid URL',
@@ -31,6 +36,11 @@ export const localUrl: Pattern = {
description: 'Must be a valid ".local" URL', description: 'Must be a valid ".local" URL',
} }
export const torUrl: Pattern = {
regex: regexes.torUrl.matches(),
description: 'Must be a valid Tor (".onion") URL',
}
export const ascii: Pattern = { export const ascii: Pattern = {
regex: regexes.ascii.matches(), regex: regexes.ascii.matches(),
description: description:

View File

@@ -39,6 +39,10 @@ export const localHostname = new ComposableRegex(
/[-a-zA-Z0-9@:%._\+~#=]{1,256}\.local/, /[-a-zA-Z0-9@:%._\+~#=]{1,256}\.local/,
) )
export const torHostname = new ComposableRegex(
/[-a-zA-Z0-9@:%._\+~#=]{1,256}\.onion/,
)
// https://ihateregex.io/expr/url/ // https://ihateregex.io/expr/url/
export const url = new ComposableRegex( export const url = new ComposableRegex(
/https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()!@:%_\+.~#?&\/\/=]*)/, /https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.[a-zA-Z0-9()]{1,6}\b([-a-zA-Z0-9()!@:%_\+.~#?&\/\/=]*)/,
@@ -48,6 +52,10 @@ export const localUrl = new ComposableRegex(
/https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.local\b([-a-zA-Z0-9()!@:%_\+.~#?&\/\/=]*)/, /https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.local\b([-a-zA-Z0-9()!@:%_\+.~#?&\/\/=]*)/,
) )
export const torUrl = new ComposableRegex(
/https?:\/\/(www\.)?[-a-zA-Z0-9@:%._\+~#=]{1,256}\.onion\b([-a-zA-Z0-9()!@:%_\+.~#?&\/\/=]*)/,
)
// https://ihateregex.io/expr/ascii/ // https://ihateregex.io/expr/ascii/
export const ascii = new ComposableRegex(/[ -~]*/) export const ascii = new ComposableRegex(/[ -~]*/)

View File

@@ -67,7 +67,7 @@ import {
import { getOwnServiceInterfaces } from '../../base/lib/util/getServiceInterfaces' import { getOwnServiceInterfaces } from '../../base/lib/util/getServiceInterfaces'
import { Volumes, createVolumes } from './util/Volume' import { Volumes, createVolumes } from './util/Volume'
export const OSVersion = testTypeVersion('0.4.0-alpha.20') export const OSVersion = testTypeVersion('0.4.0-alpha.19')
// prettier-ignore // prettier-ignore
type AnyNeverCond<T extends any[], Then, Else> = type AnyNeverCond<T extends any[], Then, Else> =

Some files were not shown because too many files have changed in this diff Show More