Compare commits

..

74 Commits

Author SHA1 Message Date
Aiden McClelland
4ba55860dd Merge branch 'feat/preferred-port-design' into sdk-comments 2026-02-24 14:28:56 -07:00
Aiden McClelland
3974c09369 feat(core): refactor hostname to ServerHostnameInfo with name/hostname pair
- Rename Hostname to ServerHostnameInfo, add name + hostname fields
- Add set_hostname_rpc for changing hostname at runtime
- Migrate alpha_20: generate serverInfo.name from hostname, delete ui.name
- Extract gateway.rs helpers to fix rustfmt nesting depth issue
- Add i18n key for hostname validation error
- Update SDK bindings
2026-02-24 14:18:53 -07:00
Matt Hill
f0b41a3a4c Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into sdk-comments 2026-02-24 10:28:00 -07:00
Matt Hill
86ecc4cc99 frontend support for setting and changing hostname 2026-02-24 10:27:22 -07:00
Matt Hill
d1162272f0 clean up prefill flow 2026-02-24 07:19:56 -07:00
Matt Hill
13ac469ed0 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into sdk-comments 2026-02-23 18:14:06 -07:00
Matt Hill
5294e8f444 minor cleanup from patch-db audit 2026-02-23 18:13:33 -07:00
Aiden McClelland
b7da7cd59f fix(core): preserve plugin URLs across binding updates
BindInfo::update was replacing addresses with a new DerivedAddressInfo
that cleared the available set, wiping plugin-exported URLs whenever
bind() was called. Also simplify update_addresses plugin preservation
to use retain in place rather than collecting into a separate set.
2026-02-23 17:56:15 -07:00
Matt Hill
c196f250f6 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into sdk-comments 2026-02-23 17:00:23 -07:00
Matt Hill
bee8a0f9d8 send prefill for tasks and hide operations to hidden fields 2026-02-23 16:59:21 -07:00
Matt Hill
8213e45b85 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into sdk-comments 2026-02-23 14:26:49 -07:00
Matt Hill
e9b9925c0e rework smtp 2026-02-23 14:25:51 -07:00
Aiden McClelland
0724989792 feat(core): allow setting server hostname 2026-02-23 13:35:34 -07:00
Matt Hill
804560d43c add comments to everything potentially consumer facing 2026-02-21 20:40:45 -07:00
Aiden McClelland
31352a72c3 chore: migrate from ts-matches to zod across all TypeScript packages 2026-02-20 16:24:35 -07:00
Aiden McClelland
c7a4f0f9cb feat: tor hidden service key migration 2026-02-20 16:10:42 -07:00
Aiden McClelland
7879668c40 chore: remove completed TODO 2026-02-20 16:09:19 -07:00
Matt Hill
6a01b5eab1 frontend start-tunnel updates 2026-02-20 13:33:18 -07:00
Aiden McClelland
80cb2d9ba5 feat: add getOutboundGateway effect and simplify VersionGraph init/uninit
Add getOutboundGateway effect across core, container-runtime, and SDK
to let services query their effective outbound gateway with callback
support. Remove preInstall/uninstall hooks from VersionGraph as they
are no longer needed.
2026-02-20 13:26:45 -07:00
Aiden McClelland
8c1a452742 chore: replace OTA updates TODO with UI TODO for MattDHill 2026-02-19 23:28:20 -07:00
Aiden McClelland
135afd0251 fix: publish script dpkg-name, s3cfg fallback, and --reinstall for apply 2026-02-19 23:26:16 -07:00
Aiden McClelland
35f3274f29 feat: OTA updates for start-tunnel via apt repository (untested)
- Add apt repo publish script (build/apt/publish-deb.sh) for S3-hosted repo
- Add apt source config and GPG key placeholder (apt/)
- Add tunnel.update.check and tunnel.update.apply RPC endpoints
- Wire up update API in tunnel frontend (api service + mock)
- Uses systemd-run --scope to survive service restart during update
2026-02-19 22:38:39 -07:00
Aiden McClelland
9af5b87c92 chore: remove completed URL plugins TODO 2026-02-19 21:40:36 -07:00
Aiden McClelland
66b5bc1897 fix: propagate host locale into LXC containers and write locale.conf 2026-02-19 21:39:37 -07:00
Aiden McClelland
7909941b70 feat: builder-style InputSpec API, prefill plumbing, and port forward fix
- Add addKey() and add() builder methods to InputSpec with InputSpecTools
- Move OuterType to last generic param on Value, List, and all dynamic methods
- Plumb prefill through getActionInput end-to-end (core → container-runtime → SDK)
- Filter port_forwards to enabled addresses only
- Bump SDK to 0.4.0-beta.50
2026-02-19 16:44:44 -07:00
Aiden McClelland
4527046f2e feat: NAT hairpinning, DNS static servers, clear service error on install
- Add POSTROUTING MASQUERADE rules for container and host hairpin NAT
- Allow bridge subnet containers to reach private forwards via LAN IPs
- Pass bridge_subnet env var from forward.rs to forward-port script
- Use DB-configured static DNS servers in resolver with DB watcher
- Fall back to resolv.conf servers when no static servers configured
- Clear service error state when install/update completes successfully
- Remove completed TODO items
2026-02-19 15:27:52 -07:00
Matt Hill
5a292e6e2a show table even when no addresses 2026-02-19 12:01:34 -07:00
Matt Hill
84149be3c1 touch up URL plugins table 2026-02-19 11:41:41 -07:00
Aiden McClelland
d562466fc4 feat: split row_actions into remove_action and overflow_actions for URL plugins 2026-02-18 18:18:53 -07:00
Aiden McClelland
9c3053f103 feat: implement URL plugins with table/row actions and prefill support
- Add URL plugin effects (register, export_url, clear_urls) in core
- Add PluginHostnameInfo, HostnameMetadata::Plugin, and plugin registration types
- Implement plugin URL table in web UI with tableAction button and rowAction overflow menus
- Thread urlPluginMetadata (packageId, hostId, interfaceId, internalPort) as prefill to actions
- Add prefill support to PackageActionData so metadata passes through form dialogs
- Add i18n translations for plugin error messages
- Clean up plugin URLs on package uninstall
2026-02-18 17:51:13 -07:00
Matt Hill
dce975410f interface row clickable again, bu now with a chevron! 2026-02-18 17:11:57 -07:00
Matt Hill
783ce4b3b6 version instead of os query param 2026-02-18 14:41:03 -07:00
Aiden McClelland
675a03bdc5 chore: add TODOs for URL plugins, NAT hairpinning, and start-tunnel OTA updates 2026-02-17 23:41:39 -07:00
Matt Hill
485fced691 round out dns check, dns server check, port forward check, and gateway port forwards 2026-02-17 23:31:47 -07:00
Aiden McClelland
a22707c1cb chore: add TODO to clear service error state on install/update 2026-02-17 19:05:19 -07:00
Aiden McClelland
74e10ec473 chore: add createTask decoupling TODO 2026-02-17 19:03:35 -07:00
Aiden McClelland
e25e0f0c12 chore: bump sdk version to 0.4.0-beta.49 2026-02-17 18:59:41 -07:00
Aiden McClelland
4cae00cb33 refactor: rename manifest metadata fields and improve error display
Rename wrapperRepo→packageRepo, marketingSite→marketingUrl,
docsUrl→docsUrls (array), remove supportSite. Add display_src/display_dbg
helpers to Error. Fix DepInfo description type to LocaleString. Update
web UI, SDK bindings, tests, and fixtures to match. Clean up cli_attach
error handling and remove dead commented code.
2026-02-17 18:40:50 -07:00
Aiden McClelland
313b2df540 feat: add check-dns gateway endpoint and fix per-interface routing tables
Add a `check-dns` RPC endpoint that verifies whether a gateway's DNS
is properly configured for private domain resolution. Uses a three-tier
check: direct match (DNS == server IP), TXT challenge probe (DNS on
LAN), or failure (DNS off-subnet).

Fix per-interface routing tables to clone all non-default routes from
the main table instead of only the interface's own subnets. This
preserves LAN reachability when the priority-75 catch-all overrides
default routing. Filter out status-only flags (linkdown, dead) that
are invalid for `ip route add`.
2026-02-17 16:22:24 -07:00
Aiden McClelland
5fbc73755d fix: replace .status() with .invoke() for iptables/ip commands
Using .status() leaks stderr directly to system logs, causing noisy
iptables error messages. Switch all networking CLI invocations to use
.invoke() which captures stderr properly. For check-then-act patterns
(iptables -C), use .invoke().await.is_err() instead of
.status().await.map_or(false, |s| s.success()).
2026-02-17 14:12:29 -07:00
Aiden McClelland
bc4478b0b9 refactor: manifest wraps PackageMetadata, move dependency_metadata to PackageVersionInfo
Manifest now embeds PackageMetadata via #[serde(flatten)] instead of
duplicating ~14 fields. icon and dependency_metadata moved from
PackageMetadata to PackageVersionInfo since they are registry-enrichment
data loaded from the S9PK archive. merge_with now returns errors on
metadata/icon/dependency_metadata mismatches instead of silently ignoring
them.
2026-02-17 14:12:14 -07:00
Aiden McClelland
68141112b7 feat: per-service and default outbound gateway routing
Add set-outbound-gateway RPC for packages and set-default-outbound RPC
for the server, with policy routing enforcement via ip rules. Fix
connmark restore to skip packets with existing fwmarks, add bridge
subnet routes to per-interface tables, and fix squashfs path in
update-image-local.sh.
2026-02-17 12:52:24 -07:00
Aiden McClelland
ccafb599a6 chore: update bindings and use typed params for outbound gateway API 2026-02-17 12:31:35 -07:00
Aiden McClelland
52272feb3e fix: switch BackgroundJobRunner from Vec to FuturesUnordered
BackgroundJobRunner stored active jobs in a Vec<BoxFuture> and polled
ALL of them on every wakeup — O(n) per poll. Since this runs in the
same tokio::select! as the WebServer accept loop, polling overhead from
active connections directly delayed acceptance of new connections.

FuturesUnordered only polls woken futures — O(woken) instead of O(n).
2026-02-16 22:02:59 -07:00
Aiden McClelland
1abad93646 fix: add TLS handshake timeout and fix accept loop deadlock
Two issues in TlsListener::poll_accept:

1. No timeout on TLS handshakes: LazyConfigAcceptor waits indefinitely
   for ClientHello. Attackers that complete TCP handshake but never send
   TLS data create zombie futures in `in_progress` that never complete.
   Fix: wrap the entire handshake in tokio::time::timeout(15s).

2. Missing waker on new-connection pending path: when a TCP connection
   is accepted and the TLS handshake is pending, poll_accept returned
   Pending without calling wake_by_ref(). Since the TcpListener returned
   Ready (not Pending), no waker was registered for it. With edge-
   triggered epoll and no other wakeup source, the task sleeps forever
   and remaining connections in the kernel accept queue are never
   drained. Fix: add cx.waker().wake_by_ref() so the task immediately
   re-polls and continues draining the accept queue.
2026-02-16 21:52:12 -07:00
Aiden McClelland
c9468dda02 fix: include public gateways for IP-based addresses in vhost targets
The server hostname vhost construction only collected private IPs,
always setting public to empty. Public IP addresses (Ipv4/Ipv6 metadata
with public=true) were never added to the vhost target's public gateway
set, causing the vhost filter to reject public traffic for IP-based
addresses.
2026-02-16 19:45:10 -07:00
Aiden McClelland
6a1b1627c5 chore: reserialize db on equal version, update bindings and docs
- Run de/ser roundtrip in pre_init even when db version matches, ensuring
  all #[serde(default)] fields are populated before any typed access
- Add patchdb.md documentation for TypedDbWatch patterns
- Update TS bindings for CheckPortParams, CheckPortRes, ifconfigUrl
- Update CLAUDE.md docs with patchdb and component-level references
2026-02-16 19:27:48 -07:00
Aiden McClelland
cfbace1d91 fix: add CONNMARK restore-mark to mangle OUTPUT chain
The CONNMARK --restore-mark rule was only in PREROUTING, which handles
forwarded packets. Locally-bound listeners (e.g. vhost) generate replies
through the OUTPUT chain, where the fwmark was never restored. This
caused response packets to route via the default table instead of back
through the originating interface.
2026-02-16 19:22:07 -07:00
Matt Hill
d97ab59bab update bindings for API types, add ARCHITECTURE (#3124)
* update binding for API types, add ARCHITECTURE

* translations
2026-02-16 16:23:28 +01:00
Aiden McClelland
3518eccc87 feat: add port_forwards field to Host for tracking gateway forwarding rules 2026-02-14 16:40:21 -07:00
Matt Hill
2f19188dae looking good 2026-02-14 16:37:04 -07:00
Aiden McClelland
3a63f3b840 feat: add mdns hostname metadata variant and fix vhost routing
- Add HostnameMetadata::Mdns variant to distinguish mDNS from private domains
- Mark mDNS addresses as private (public: false) since mDNS is local-only
- Fall back to null SNI entry when hostname not found in vhost mapping
- Simplify public detection in ProxyTarget filter
- Pass hostname to update_addresses for mDNS domain name generation
2026-02-14 15:34:48 -07:00
Matt Hill
098d9275f4 new service interfacee page 2026-02-14 12:24:16 -07:00
Matt Hill
d5c74bc22e re-arrange (#3123) 2026-02-14 08:15:50 -07:00
Aiden McClelland
49d4da03ca feat: refactor NetService to watch DB and reconcile network state
- NetService sync task now uses PatchDB DbWatch instead of being called
  directly after DB mutations
- Read gateways from DB instead of network interface context when
  updating host addresses
- gateway sync updates all host addresses in the DB
- Add Watch<u64> channel for callers to wait on sync completion
- Fix ts-rs codegen bug with #[ts(skip)] on flattened Plugin field
- Update SDK getServiceInterface.ts for new HostnameInfo shape
- Remove unnecessary HTTPS redirect in static_server.rs
- Fix tunnel/api.rs to filter for WAN IPv4 address
2026-02-13 16:21:57 -07:00
Aiden McClelland
3765465618 chore: update ts bindings for preferred port design 2026-02-13 14:23:48 -07:00
Aiden McClelland
61f820d09e Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-13 13:39:25 -07:00
Aiden McClelland
db7f3341ac wip refactor 2026-02-12 14:51:33 -07:00
Matt Hill
4decf9335c fix license display in marketplace 2026-02-12 13:07:19 -07:00
Matt Hill
339e5f799a build ts types and fix i18n 2026-02-12 11:32:29 -07:00
Aiden McClelland
89d3e0cf35 Merge branch 'feat/preferred-port-design' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-12 10:51:32 -07:00
Aiden McClelland
638ed27599 feat: replace SourceFilter with IpNet, add policy routing, remove MASQUERADE 2026-02-12 10:51:26 -07:00
Matt Hill
da75b8498e Merge branch 'next/major' of github.com:Start9Labs/start-os into feat/preferred-port-design 2026-02-12 08:28:36 -07:00
Matt Hill
8ef4ecf5ac outbound gateway support (#3120)
* Multiple (#3111)

* fix alerts i18n, fix status display, better, remove usb media, hide shutdown for install complete

* trigger chnage detection for localize pipe and round out implementing localize pipe for consistency even though not needed

* Fix PackageInfoShort to handle LocaleString on releaseNotes (#3112)

* Fix PackageInfoShort to handle LocaleString on releaseNotes

* fix: filter by target_version in get_matching_models and pass otherVersions from install

* chore: add exver documentation for ai agents

* frontend plus some be types

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2026-02-12 08:27:09 -07:00
Aiden McClelland
2a54625f43 feat: replace InterfaceFilter with ForwardRequirements, add WildcardListener, complete alpha.20 bump
- Replace DynInterfaceFilter with ForwardRequirements for per-IP forward
  precision with source-subnet iptables filtering for private forwards
- Add WildcardListener (binds [::]:port) to replace the per-gateway
  NetworkInterfaceListener/SelfContainedNetworkInterfaceListener/
  UpgradableListener infrastructure
- Update forward-port script with src_subnet and excluded_src env vars
- Remove unused filter types and listener infrastructure from gateway.rs
- Add availablePorts migration (IdPool -> BTreeMap<u16, bool>) to alpha.20
- Complete version bump to 0.4.0-alpha.20 in SDK and web
2026-02-11 18:10:27 -07:00
Aiden McClelland
4e638fb58e feat: implement preferred port allocation and per-address enable/disable
- Add AvailablePorts::try_alloc() with SSL tracking (BTreeMap<u16, bool>)
- Add DerivedAddressInfo on BindInfo with private_disabled/public_enabled/possible sets
- Add Bindings wrapper with Map impl for patchdb indexed access
- Flatten HostAddress from single-variant enum to struct
- Replace set-gateway-enabled RPC with set-address-enabled
- Remove hostname_info from Host; computed addresses now in BindInfo.addresses.possible
- Compute possible addresses inline in NetServiceData::update()
- Update DB migration, SDK types, frontend, and container-runtime
2026-02-10 17:38:51 -07:00
Aiden McClelland
73274ef6e0 docs: update TODO.md with DerivedAddressInfo design, remove completed tor task 2026-02-10 14:45:50 -07:00
Aiden McClelland
e1915bf497 chore: format RPCSpec.md markdown table 2026-02-10 13:38:40 -07:00
Aiden McClelland
8204074bdf chore: flatten HostnameInfo from enum to struct
HostnameInfo only had one variant (Ip) after removing Tor. Flatten it
into a plain struct with fields gateway, public, hostname. Remove all
kind === 'ip' type guards and narrowing across SDK, frontend, and
container runtime. Update DB migration to strip the kind field.
2026-02-10 13:38:12 -07:00
Aiden McClelland
2ee403e7de chore: remove tor from startos core
Tor is being moved from a built-in OS feature to a service. This removes
the Arti-based Tor client, onion address management, hidden service
creation, and all related code from the core backend, frontend, and SDK.

- Delete core/src/net/tor/ module (~2060 lines)
- Remove OnionAddress, TorSecretKey, TorController from all consumers
- Remove HostnameInfo::Onion and HostAddress::Onion variants
- Remove onion CRUD RPC endpoints and tor subcommand
- Remove tor key handling from account and backup/restore
- Remove ~12 tor-related Cargo dependencies (arti-client, torut, etc.)
- Remove tor UI components, API methods, mock data, and routes
- Remove OnionHostname and tor patterns/regexes from SDK
- Add v0_4_0_alpha_20 database migration to strip onion data
- Bump version to 0.4.0-alpha.20
2026-02-10 13:28:24 -07:00
Aiden McClelland
1974dfd66f docs: move address enable/disable to overflow menu, add SSL indicator, defer UI placement decisions 2026-02-09 13:29:49 -07:00
Aiden McClelland
2e03a95e47 docs: overhaul interfaces page design with view/manage split and per-address controls 2026-02-09 13:10:57 -07:00
Aiden McClelland
8f809dab21 docs: add user-controlled public/private and port forward mapping to design 2026-02-08 11:17:43 -07:00
Aiden McClelland
c0b2cbe1c8 docs: update preferred external port design in TODO 2026-02-06 09:30:35 -07:00
257 changed files with 12801 additions and 6982 deletions

View File

@@ -25,13 +25,10 @@ on:
- ALL
- x86_64
- x86_64-nonfree
- x86_64-nvidia
- aarch64
- aarch64-nonfree
- aarch64-nvidia
# - raspberrypi
- riscv64
- riscv64-nonfree
deploy:
type: choice
description: Deploy
@@ -68,13 +65,10 @@ jobs:
fromJson('{
"x86_64": ["x86_64"],
"x86_64-nonfree": ["x86_64"],
"x86_64-nvidia": ["x86_64"],
"aarch64": ["aarch64"],
"aarch64-nonfree": ["aarch64"],
"aarch64-nvidia": ["aarch64"],
"raspberrypi": ["aarch64"],
"riscv64": ["riscv64"],
"riscv64-nonfree": ["riscv64"],
"ALL": ["x86_64", "aarch64", "riscv64"]
}')[github.event.inputs.platform || 'ALL']
}}
@@ -131,7 +125,7 @@ jobs:
format(
'[
["{0}"],
["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "riscv64", "riscv64-nonfree"]
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "riscv64"]
]',
github.event.inputs.platform || 'ALL'
)
@@ -145,24 +139,18 @@ jobs:
fromJson('{
"x86_64": "ubuntu-latest",
"x86_64-nonfree": "ubuntu-latest",
"x86_64-nvidia": "ubuntu-latest",
"aarch64": "ubuntu-24.04-arm",
"aarch64-nonfree": "ubuntu-24.04-arm",
"aarch64-nvidia": "ubuntu-24.04-arm",
"raspberrypi": "ubuntu-24.04-arm",
"riscv64": "ubuntu-24.04-arm",
"riscv64-nonfree": "ubuntu-24.04-arm",
}')[matrix.platform],
fromJson('{
"x86_64": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nvidia": "buildjet-8vcpu-ubuntu-2204",
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nvidia": "buildjet-8vcpu-ubuntu-2204-arm",
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
"riscv64": "buildjet-8vcpu-ubuntu-2204",
"riscv64-nonfree": "buildjet-8vcpu-ubuntu-2204",
}')[matrix.platform]
)
)[github.event.inputs.runner == 'fast']
@@ -173,13 +161,10 @@ jobs:
fromJson('{
"x86_64": "x86_64",
"x86_64-nonfree": "x86_64",
"x86_64-nvidia": "x86_64",
"aarch64": "aarch64",
"aarch64-nonfree": "aarch64",
"aarch64-nvidia": "aarch64",
"raspberrypi": "aarch64",
"riscv64": "riscv64",
"riscv64-nonfree": "riscv64",
}')[matrix.platform]
}}
steps:

View File

@@ -11,14 +11,12 @@ Each major component has its own `CLAUDE.md` with detailed guidance: `core/`, `w
## Build & Development
See [CONTRIBUTING.md](CONTRIBUTING.md) for:
- Environment setup and requirements
- Build commands and make targets
- Testing and formatting commands
- Environment variables
**Quick reference:**
```bash
. ./devmode.sh # Enable dev mode
make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
@@ -30,7 +28,6 @@ make test-core # Run Rust tests
- Always verify cross-layer changes using the order described in [ARCHITECTURE.md](ARCHITECTURE.md#cross-layer-verification)
- Check component-level CLAUDE.md files for component-specific conventions. ALWAYS read it before operating on that component.
- Follow existing patterns before inventing new ones
- Always use `make` recipes when they exist for testing builds rather than manually invoking build commands
## Supplementary Documentation
@@ -50,7 +47,6 @@ On startup:
1. **Check for `docs/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer.
2. **Check `docs/TODO.md` for relevant tasks** - Show TODOs that either:
- Have no `@username` tag (relevant to everyone)
- Are tagged with the current user's identifier

View File

@@ -7,7 +7,7 @@ GIT_HASH_FILE := $(shell ./build/env/check-git-hash.sh)
VERSION_FILE := $(shell ./build/env/check-version.sh)
BASENAME := $(shell PROJECT=startos ./build/env/basename.sh)
PLATFORM := $(shell if [ -f $(PLATFORM_FILE) ]; then cat $(PLATFORM_FILE); else echo unknown; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; elif [ "$(PLATFORM)" = "rockchip64" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g; s/-nvidia$$//g'; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else echo $(ARCH); fi)
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./build/env/basename.sh)
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./build/env/basename.sh)

View File

@@ -52,7 +52,7 @@ The easiest path. [Buy a server](https://store.start9.com) from Start9 and plug
### Build your own
Follow the [install guide](https://docs.start9.com/start-os/installing.html) to install StartOS on your own hardware. . Reasons to go this route:
Install StartOS on your own hardware. Follow one of the [DIY guides](https://start9.com/latest/diy). Reasons to go this route:
1. You already have compatible hardware
2. You want to save on shipping costs

View File

@@ -12,10 +12,6 @@ fi
if [[ "$PLATFORM" =~ -nonfree$ ]]; then
FEATURES+=("nonfree")
fi
if [[ "$PLATFORM" =~ -nvidia$ ]]; then
FEATURES+=("nonfree")
FEATURES+=("nvidia")
fi
feature_file_checker='
/^#/ { next }

View File

@@ -4,4 +4,7 @@
+ firmware-iwlwifi
+ firmware-libertas
+ firmware-misc-nonfree
+ firmware-realtek
+ firmware-realtek
+ nvidia-container-toolkit
# + nvidia-driver
# + nvidia-kernel-dkms

View File

@@ -1 +0,0 @@
+ nvidia-container-toolkit

View File

@@ -34,11 +34,11 @@ fi
IMAGE_BASENAME=startos-${VERSION_FULL}_${IB_TARGET_PLATFORM}
BOOTLOADERS=grub-efi
if [ "$IB_TARGET_PLATFORM" = "x86_64" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nvidia" ]; then
if [ "$IB_TARGET_PLATFORM" = "x86_64" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nonfree" ]; then
IB_TARGET_ARCH=amd64
QEMU_ARCH=x86_64
BOOTLOADERS=grub-efi,syslinux
elif [ "$IB_TARGET_PLATFORM" = "aarch64" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nvidia" ] || [ "$IB_TARGET_PLATFORM" = "raspberrypi" ] || [ "$IB_TARGET_PLATFORM" = "rockchip64" ]; then
elif [ "$IB_TARGET_PLATFORM" = "aarch64" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "raspberrypi" ] || [ "$IB_TARGET_PLATFORM" = "rockchip64" ]; then
IB_TARGET_ARCH=arm64
QEMU_ARCH=aarch64
elif [ "$IB_TARGET_PLATFORM" = "riscv64" ] || [ "$IB_TARGET_PLATFORM" = "riscv64-nonfree" ]; then
@@ -60,13 +60,9 @@ mkdir -p $prep_results_dir
cd $prep_results_dir
NON_FREE=
if [[ "${IB_TARGET_PLATFORM}" =~ -nonfree$ ]] || [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]] || [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
if [[ "${IB_TARGET_PLATFORM}" =~ -nonfree$ ]] || [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
NON_FREE=1
fi
NVIDIA=
if [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]]; then
NVIDIA=1
fi
IMAGE_TYPE=iso
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ] || [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
IMAGE_TYPE=img
@@ -105,7 +101,7 @@ lb config \
--iso-preparer "START9 LABS; HTTPS://START9.COM" \
--iso-publisher "START9 LABS; HTTPS://START9.COM" \
--backports true \
--bootappend-live "boot=live noautologin console=tty0" \
--bootappend-live "boot=live noautologin" \
--bootloaders $BOOTLOADERS \
--cache false \
--mirror-bootstrap "https://deb.debian.org/debian/" \
@@ -181,7 +177,7 @@ if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
fi
if [ "$NVIDIA" = 1 ]; then
if [ "$NON_FREE" = 1 ]; then
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o config/archives/nvidia-container-toolkit.key
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
| sed 's#deb https://#deb [signed-by=/etc/apt/trusted.gpg.d/nvidia-container-toolkit.key.gpg] https://#g' \
@@ -209,11 +205,11 @@ cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF
set -e
if [ "${NVIDIA}" = "1" ]; then
if [ "${NON_FREE}" = "1" ] && [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ] && [ "${IB_TARGET_PLATFORM}" != "riscv64-nonfree" ]; then
# install a specific NVIDIA driver version
# ---------------- configuration ----------------
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.126.09}"
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.119.02}"
BASE_URL="https://download.nvidia.com/XFree86/Linux-${QEMU_ARCH}"
@@ -263,15 +259,12 @@ if [ "${NVIDIA}" = "1" ]; then
echo "[nvidia-hook] Running NVIDIA installer for kernel \${KVER}" >&2
if ! sh "\${RUN_PATH}" \
sh "\${RUN_PATH}" \
--silent \
--kernel-name="\${KVER}" \
--no-x-check \
--no-nouveau-check \
--no-runlevel-check; then
cat /var/log/nvidia-installer.log
exit 1
fi
--no-runlevel-check
# Rebuild module metadata
echo "[nvidia-hook] Running depmod for \${KVER}" >&2

View File

@@ -68,21 +68,6 @@ fi
EOF
# Promote the USB installer boot entry back to first in EFI boot order.
# The entry number was saved during initial OS install.
if [ -d /sys/firmware/efi ] && [ -f /media/startos/config/efi-installer-entry ]; then
USB_ENTRY=$(cat /media/startos/config/efi-installer-entry)
if [ -n "$USB_ENTRY" ]; then
CURRENT_ORDER=$(efibootmgr | grep BootOrder | sed 's/BootOrder: //')
OTHER_ENTRIES=$(echo "$CURRENT_ORDER" | tr ',' '\n' | grep -v "$USB_ENTRY" | tr '\n' ',' | sed 's/,$//')
if [ -n "$OTHER_ENTRIES" ]; then
efibootmgr -o "$USB_ENTRY,$OTHER_ENTRIES"
else
efibootmgr -o "$USB_ENTRY"
fi
fi
fi
sync
umount -Rl /media/startos/next

View File

@@ -1,364 +0,0 @@
#!/bin/bash
set -e
REPO="Start9Labs/start-os"
REGISTRY="https://alpha-registry-x.start9.com"
S3_BUCKET="s3://startos-images"
S3_CDN="https://startos-images.nyc3.cdn.digitaloceanspaces.com"
START9_GPG_KEY="2D63C217"
ARCHES="aarch64 aarch64-nonfree aarch64-nvidia riscv64 riscv64-nonfree x86_64 x86_64-nonfree x86_64-nvidia"
CLI_ARCHES="aarch64 riscv64 x86_64"
parse_run_id() {
local val="$1"
if [[ "$val" =~ /actions/runs/([0-9]+) ]]; then
echo "${BASH_REMATCH[1]}"
else
echo "$val"
fi
}
require_version() {
if [ -z "${VERSION:-}" ]; then
read -rp "VERSION: " VERSION
if [ -z "$VERSION" ]; then
>&2 echo '$VERSION required'
exit 2
fi
fi
}
release_dir() {
echo "$HOME/Downloads/v$VERSION"
}
ensure_release_dir() {
local dir
dir=$(release_dir)
if [ "$CLEAN" = "1" ]; then
rm -rf "$dir"
fi
mkdir -p "$dir"
cd "$dir"
}
enter_release_dir() {
local dir
dir=$(release_dir)
if [ ! -d "$dir" ]; then
>&2 echo "Release directory $dir does not exist. Run 'download' or 'pull' first."
exit 1
fi
cd "$dir"
}
cli_target_for() {
local arch=$1 os=$2
local pair="${arch}-${os}"
if [ "$pair" = "riscv64-linux" ]; then
echo "riscv64gc-unknown-linux-musl"
elif [ "$pair" = "riscv64-macos" ]; then
return 1
elif [ "$os" = "linux" ]; then
echo "${arch}-unknown-linux-musl"
elif [ "$os" = "macos" ]; then
echo "${arch}-apple-darwin"
fi
}
release_files() {
for file in *.iso *.squashfs *.deb; do
[ -f "$file" ] && echo "$file"
done
for file in start-cli_*; do
[[ "$file" == *.asc ]] && continue
[ -f "$file" ] && echo "$file"
done
}
resolve_gh_user() {
GH_USER=${GH_USER:-$(gh api user -q .login 2>/dev/null || true)}
GH_GPG_KEY=$(git config user.signingkey 2>/dev/null || true)
}
# --- Subcommands ---
cmd_download() {
require_version
if [ -z "${RUN_ID:-}" ]; then
read -rp "RUN_ID (OS images, leave blank to skip): " RUN_ID
fi
RUN_ID=$(parse_run_id "${RUN_ID:-}")
if [ -z "${ST_RUN_ID:-}" ]; then
read -rp "ST_RUN_ID (start-tunnel, leave blank to skip): " ST_RUN_ID
fi
ST_RUN_ID=$(parse_run_id "${ST_RUN_ID:-}")
if [ -z "${CLI_RUN_ID:-}" ]; then
read -rp "CLI_RUN_ID (start-cli, leave blank to skip): " CLI_RUN_ID
fi
CLI_RUN_ID=$(parse_run_id "${CLI_RUN_ID:-}")
ensure_release_dir
if [ -n "$RUN_ID" ]; then
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.squashfs" -D "$(pwd)"; do sleep 1; done
done
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.iso" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$ST_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
while ! gh run download -R $REPO "$ST_RUN_ID" -n "start-tunnel_$arch.deb" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$CLI_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
for os in linux macos; do
local target
target=$(cli_target_for "$arch" "$os") || continue
while ! gh run download -R $REPO "$CLI_RUN_ID" -n "start-cli_$target" -D "$(pwd)"; do sleep 1; done
mv start-cli "start-cli_${arch}-${os}"
done
done
fi
}
cmd_pull() {
require_version
ensure_release_dir
echo "Downloading release assets from tag v$VERSION..."
# Download debs and CLI binaries from the GH release
for file in $(gh release view -R $REPO "v$VERSION" --json assets -q '.assets[].name' | grep -E '\.(deb)$|^start-cli_'); do
gh release download -R $REPO "v$VERSION" -p "$file" -D "$(pwd)" --clobber
done
# Download ISOs and squashfs from S3 CDN
for arch in $ARCHES; do
for ext in squashfs iso; do
# Get the actual filename from the GH release asset list or body
local filename
filename=$(gh release view -R $REPO "v$VERSION" --json assets -q ".assets[].name" | grep "_${arch}\\.${ext}$" || true)
if [ -z "$filename" ]; then
filename=$(gh release view -R $REPO "v$VERSION" --json body -q .body | grep -oP "[^ ]*_${arch}\\.${ext}" | head -1 || true)
fi
if [ -n "$filename" ]; then
echo "Downloading $filename from S3..."
curl -fSL -o "$filename" "$S3_CDN/v$VERSION/$filename"
fi
done
done
}
cmd_register() {
require_version
enter_release_dir
start-cli --registry=$REGISTRY registry os version add "$VERSION" "v$VERSION" '' ">=0.3.5 <=$VERSION"
}
cmd_upload() {
require_version
enter_release_dir
for file in $(release_files); do
case "$file" in
*.iso|*.squashfs)
s3cmd put -P "$file" "$S3_BUCKET/v$VERSION/$file"
;;
*)
gh release upload -R $REPO "v$VERSION" "$file"
;;
esac
done
}
cmd_index() {
require_version
enter_release_dir
for arch in $ARCHES; do
for file in *_"$arch".squashfs *_"$arch".iso; do
start-cli --registry=$REGISTRY registry os asset add --platform="$arch" --version="$VERSION" "$file" "$S3_CDN/v$VERSION/$file"
done
done
}
cmd_sign() {
require_version
enter_release_dir
resolve_gh_user
for file in $(release_files); do
gpg -u $START9_GPG_KEY --detach-sign --armor -o "${file}.start9.asc" "$file"
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "${file}.${GH_USER}.asc" "$file"
fi
done
gpg --export -a $START9_GPG_KEY > start9.key.asc
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg --export -a "$GH_GPG_KEY" > "${GH_USER}.key.asc"
else
>&2 echo 'Warning: could not determine GitHub user or GPG signing key, skipping personal signature'
fi
tar -czvf signatures.tar.gz *.asc
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
}
cmd_cosign() {
require_version
enter_release_dir
resolve_gh_user
if [ -z "$GH_USER" ] || [ -z "$GH_GPG_KEY" ]; then
>&2 echo 'Error: could not determine GitHub user or GPG signing key'
>&2 echo "Set GH_USER and/or configure git user.signingkey"
exit 1
fi
echo "Downloading existing signatures..."
gh release download -R $REPO "v$VERSION" -p "signatures.tar.gz" -D "$(pwd)" --clobber
tar -xzf signatures.tar.gz
echo "Adding personal signatures as $GH_USER..."
for file in $(release_files); do
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "${file}.${GH_USER}.asc" "$file"
done
gpg --export -a "$GH_GPG_KEY" > "${GH_USER}.key.asc"
echo "Re-packing signatures..."
tar -czvf signatures.tar.gz *.asc
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
echo "Done. Personal signatures for $GH_USER added to v$VERSION."
}
cmd_notes() {
require_version
enter_release_dir
cat << EOF
# ISO Downloads
- [x86_64/AMD64]($S3_CDN/v$VERSION/$(ls *_x86_64-nonfree.iso))
- [x86_64/AMD64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_x86_64-nvidia.iso))
- [x86_64/AMD64-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
- [aarch64/ARM64]($S3_CDN/v$VERSION/$(ls *_aarch64-nonfree.iso))
- [aarch64/ARM64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_aarch64-nvidia.iso))
- [aarch64/ARM64-slim (FOSS-Only)]($S3_CDN/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
- [RISCV64 (RVA23)]($S3_CDN/v$VERSION/$(ls *_riscv64-nonfree.iso))
- [RISCV64 (RVA23)-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_riscv64.iso) "Without proprietary software or drivers")
EOF
cat << 'EOF'
# StartOS Checksums
## SHA-256
```
EOF
sha256sum *.iso *.squashfs
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum *.iso *.squashfs
cat << 'EOF'
```
# Start-Tunnel Checksums
## SHA-256
```
EOF
sha256sum start-tunnel*.deb
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-tunnel*.deb
cat << 'EOF'
```
# start-cli Checksums
## SHA-256
```
EOF
release_files | grep '^start-cli_' | xargs sha256sum
cat << 'EOF'
```
## BLAKE-3
```
EOF
release_files | grep '^start-cli_' | xargs b3sum
cat << 'EOF'
```
EOF
}
cmd_full_release() {
cmd_download
cmd_register
cmd_upload
cmd_index
cmd_sign
cmd_notes
}
usage() {
cat << 'EOF'
Usage: manage-release.sh <subcommand>
Subcommands:
download Download artifacts from GitHub Actions runs
Requires: RUN_ID, ST_RUN_ID, CLI_RUN_ID (any combination)
pull Download an existing release from the GH tag and S3
register Register the version in the Start9 registry
upload Upload artifacts to GitHub Releases and S3
index Add assets to the registry index
sign Sign all artifacts with Start9 org key (+ personal key if available)
and upload signatures.tar.gz
cosign Add personal GPG signature to an existing release's signatures
(requires 'pull' first so you can verify assets before signing)
notes Print release notes with download links and checksums
full-release Run: download → register → upload → index → sign → notes
Environment variables:
VERSION (required) Release version
RUN_ID GitHub Actions run ID for OS images (download subcommand)
ST_RUN_ID GitHub Actions run ID for start-tunnel (download subcommand)
CLI_RUN_ID GitHub Actions run ID for start-cli (download subcommand)
GH_USER Override GitHub username (default: autodetected via gh cli)
CLEAN Set to 1 to wipe and recreate the release directory
EOF
}
case "${1:-}" in
download) cmd_download ;;
pull) cmd_pull ;;
register) cmd_register ;;
upload) cmd_upload ;;
index) cmd_index ;;
sign) cmd_sign ;;
cosign) cmd_cosign ;;
notes) cmd_notes ;;
full-release) cmd_full_release ;;
*) usage; exit 1 ;;
esac

142
build/upload-ota.sh Executable file
View File

@@ -0,0 +1,142 @@
#!/bin/bash
if [ -z "$VERSION" ]; then
>&2 echo '$VERSION required'
exit 2
fi
set -e
if [ "$SKIP_DL" != "1" ]; then
if [ "$SKIP_CLEAN" != "1" ]; then
rm -rf ~/Downloads/v$VERSION
mkdir ~/Downloads/v$VERSION
cd ~/Downloads/v$VERSION
fi
if [ -n "$RUN_ID" ]; then
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.squashfs -D $(pwd); do sleep 1; done
done
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
while ! gh run download -R Start9Labs/start-os $RUN_ID -n $arch.iso -D $(pwd); do sleep 1; done
done
fi
if [ -n "$ST_RUN_ID" ]; then
for arch in aarch64 riscv64 x86_64; do
while ! gh run download -R Start9Labs/start-os $ST_RUN_ID -n start-tunnel_$arch.deb -D $(pwd); do sleep 1; done
done
fi
if [ -n "$CLI_RUN_ID" ]; then
for arch in aarch64 riscv64 x86_64; do
for os in linux macos; do
pair=${arch}-${os}
if [ "${pair}" = "riscv64-linux" ]; then
target=riscv64gc-unknown-linux-musl
elif [ "${pair}" = "riscv64-macos" ]; then
continue
elif [ "${os}" = "linux" ]; then
target="${arch}-unknown-linux-musl"
elif [ "${os}" = "macos" ]; then
target="${arch}-apple-darwin"
fi
while ! gh run download -R Start9Labs/start-os $CLI_RUN_ID -n start-cli_$target -D $(pwd); do sleep 1; done
mv start-cli "start-cli_${pair}"
done
done
fi
else
cd ~/Downloads/v$VERSION
fi
start-cli --registry=https://alpha-registry-x.start9.com registry os version add $VERSION "v$VERSION" '' ">=0.3.5 <=$VERSION"
if [ "$SKIP_UL" = "2" ]; then
exit 2
elif [ "$SKIP_UL" != "1" ]; then
for file in *.deb start-cli_*; do
gh release upload -R Start9Labs/start-os v$VERSION $file
done
for file in *.iso *.squashfs; do
s3cmd put -P $file s3://startos-images/v$VERSION/$file
done
fi
if [ "$SKIP_INDEX" != "1" ]; then
for arch in aarch64 aarch64-nonfree riscv64 x86_64 x86_64-nonfree; do
for file in *_$arch.squashfs *_$arch.iso; do
start-cli --registry=https://alpha-registry-x.start9.com registry os asset add --platform=$arch --version=$VERSION $file https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$file
done
done
fi
for file in *.iso *.squashfs *.deb start-cli_*; do
gpg -u 7CFFDA41CA66056A --detach-sign --armor -o "${file}.asc" "$file"
done
gpg --export -a 7CFFDA41CA66056A > dr-bonez.key.asc
tar -czvf signatures.tar.gz *.asc
gh release upload -R Start9Labs/start-os v$VERSION signatures.tar.gz
cat << EOF
# ISO Downloads
- [x86_64/AMD64](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_x86_64-nonfree.iso))
- [x86_64/AMD64-slim (FOSS-only)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
- [aarch64/ARM64](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_aarch64-nonfree.iso))
- [aarch64/ARM64-slim (FOSS-Only)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
- [RISCV64 (RVA23)](https://startos-images.nyc3.cdn.digitaloceanspaces.com/v$VERSION/$(ls *_riscv64.iso))
EOF
cat << 'EOF'
# StartOS Checksums
## SHA-256
```
EOF
sha256sum *.iso *.squashfs
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum *.iso *.squashfs
cat << 'EOF'
```
# Start-Tunnel Checksums
## SHA-256
```
EOF
sha256sum start-tunnel*.deb
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-tunnel*.deb
cat << 'EOF'
```
# start-cli Checksums
## SHA-256
```
EOF
sha256sum start-cli_*
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-cli_*
cat << 'EOF'
```
EOF

View File

@@ -1,30 +0,0 @@
// Mock for ESM-only mime package — Jest's module loader doesn't support require(esm)
const types = {
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".svg": "image/svg+xml",
".webp": "image/webp",
".ico": "image/x-icon",
".json": "application/json",
".js": "application/javascript",
".html": "text/html",
".css": "text/css",
".txt": "text/plain",
".md": "text/markdown",
}
module.exports = {
default: {
getType(path) {
const ext = "." + path.split(".").pop()
return types[ext] || null
},
getExtension(type) {
const entry = Object.entries(types).find(([, v]) => v === type)
return entry ? entry[0].slice(1) : null
},
},
__esModule: true,
}

View File

@@ -5,7 +5,4 @@ module.exports = {
testEnvironment: "node",
rootDir: "./src/",
modulePathIgnorePatterns: ["./dist/"],
moduleNameMapper: {
"^mime$": "<rootDir>/../__mocks__/mime.js",
},
}

View File

@@ -37,7 +37,7 @@
},
"../sdk/dist": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.58",
"version": "0.4.0-beta.51",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",
@@ -49,8 +49,7 @@
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"yaml": "^2.7.1",
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
"zod": "^4.3.6"
},
"devDependencies": {
"@types/jest": "^29.4.0",

View File

@@ -1,4 +1,5 @@
import { RpcListener } from "./Adapters/RpcListener"
import { SystemForEmbassy } from "./Adapters/Systems/SystemForEmbassy"
import { AllGetDependencies } from "./Interfaces/AllGetDependencies"
import { getSystem } from "./Adapters/Systems"
@@ -6,18 +7,6 @@ const getDependencies: AllGetDependencies = {
system: getSystem,
}
process.on("unhandledRejection", (reason) => {
if (
reason instanceof Error &&
"muteUnhandled" in reason &&
reason.muteUnhandled
) {
// mute
} else {
console.error("Unhandled promise rejection", reason)
}
})
for (let s of ["SIGTERM", "SIGINT", "SIGHUP"]) {
process.on(s, (s) => {
console.log(`Caught ${s}`)

View File

@@ -25,4 +25,3 @@ cd sdk && make baseDist dist # Rebuild SDK after ts-bindings
- When adding i18n keys, add all 5 locales in `core/locales/i18n.yaml` (see [i18n-patterns.md](i18n-patterns.md))
- When using DB watches, follow the `TypedDbWatch<T>` patterns in [patchdb.md](patchdb.md)
- **Always use `.invoke(ErrorKind::...)` instead of `.status()` when running CLI commands** via `tokio::process::Command`. The `Invoke` trait (from `crate::util::Invoke`) captures stdout/stderr and checks exit codes properly. Using `.status()` leaks stderr directly to system logs, creating noise. For check-then-act patterns (e.g. `iptables -C`), use `.invoke(...).await.is_ok()` / `.is_err()` instead of `.status().await.map_or(false, |s| s.success())`.
- Always use file utils in util::io instead of tokio::fs when available

View File

@@ -197,13 +197,6 @@ setup.transferring-data:
fr_FR: "Transfert de données"
pl_PL: "Przesyłanie danych"
setup.password-required:
en_US: "Password is required for fresh setup"
de_DE: "Passwort ist für die Ersteinrichtung erforderlich"
es_ES: "Se requiere contraseña para la configuración inicial"
fr_FR: "Le mot de passe est requis pour la première configuration"
pl_PL: "Hasło jest wymagane do nowej konfiguracji"
# system.rs
system.governor-not-available:
en_US: "Governor %{governor} not available"
@@ -3684,13 +3677,6 @@ help.arg.s9pk-file-path:
fr_FR: "Chemin vers le fichier de paquet s9pk"
pl_PL: "Ścieżka do pliku pakietu s9pk"
help.arg.s9pk-file-paths:
en_US: "Paths to s9pk package files"
de_DE: "Pfade zu s9pk-Paketdateien"
es_ES: "Rutas a los archivos de paquete s9pk"
fr_FR: "Chemins vers les fichiers de paquet s9pk"
pl_PL: "Ścieżki do plików pakietów s9pk"
help.arg.session-ids:
en_US: "Session identifiers"
de_DE: "Sitzungskennungen"
@@ -4987,13 +4973,6 @@ about.publish-s9pk:
fr_FR: "Publier s9pk dans le bucket S3 et indexer dans le registre"
pl_PL: "Opublikuj s9pk do bucketu S3 i zindeksuj w rejestrze"
about.select-s9pk-for-device:
en_US: "Select the best compatible s9pk for a target device"
de_DE: "Das beste kompatible s9pk für ein Zielgerät auswählen"
es_ES: "Seleccionar el s9pk más compatible para un dispositivo destino"
fr_FR: "Sélectionner le meilleur s9pk compatible pour un appareil cible"
pl_PL: "Wybierz najlepiej kompatybilny s9pk dla urządzenia docelowego"
about.rebuild-service-container:
en_US: "Rebuild service container"
de_DE: "Dienst-Container neu erstellen"

View File

@@ -21,14 +21,6 @@ pub async fn my_handler(ctx: RpcContext, params: MyParams) -> Result<MyResponse,
from_fn_async(my_handler)
```
If a handler takes no params, simply omit the params argument entirely (no need for `_: Empty`):
```rust
pub async fn no_params_handler(ctx: RpcContext) -> Result<MyResponse, Error> {
// ...
}
```
### `from_fn_async_local` - Non-thread-safe async handlers
For async functions that are not `Send` (cannot be safely moved between threads). Use when working with non-thread-safe types.
@@ -189,9 +181,9 @@ pub struct MyParams {
### Adding a New RPC Endpoint
1. Define params struct with `Deserialize, Serialize, Parser, TS` (skip if no params needed)
1. Define params struct with `Deserialize, Serialize, Parser, TS`
2. Choose handler type based on sync/async and thread-safety
3. Write handler function taking `(Context, Params) -> Result<Response, Error>` (omit Params if none needed)
3. Write handler function taking `(Context, Params) -> Result<Response, Error>`
4. Add to parent handler with appropriate extensions (display modifiers before `with_about`)
5. TypeScript types auto-generated via `make ts-bindings`

View File

@@ -86,7 +86,7 @@ pub async fn restore_packages_rpc(
pub async fn recover_full_server(
ctx: &SetupContext,
disk_guid: InternedString,
password: Option<String>,
password: String,
recovery_source: TmpMountGuard,
server_id: &str,
recovery_password: &str,
@@ -110,14 +110,12 @@ pub async fn recover_full_server(
.with_ctx(|_| (ErrorKind::Filesystem, os_backup_path.display().to_string()))?,
)?;
if let Some(password) = password {
os_backup.account.password = argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::rfc9106_low_mem(),
)
.with_kind(ErrorKind::PasswordHashGeneration)?;
}
os_backup.account.password = argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::rfc9106_low_mem(),
)
.with_kind(ErrorKind::PasswordHashGeneration)?;
if let Some(h) = hostname {
os_backup.account.hostname = h;

View File

@@ -10,6 +10,7 @@ use std::time::Duration;
use chrono::{TimeDelta, Utc};
use imbl::OrdMap;
use imbl_value::InternedString;
use itertools::Itertools;
use josekit::jwk::Jwk;
use reqwest::{Client, Proxy};
use rpc_toolkit::yajrc::RpcError;
@@ -24,6 +25,7 @@ use crate::account::AccountInfo;
use crate::auth::Sessions;
use crate::context::config::ServerConfig;
use crate::db::model::Database;
use crate::db::model::package::TaskSeverity;
use crate::disk::OsPartitionInfo;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
@@ -42,6 +44,7 @@ use crate::prelude::*;
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle};
use crate::rpc_continuations::{Guid, OpenAuthedContinuations, RpcContinuations};
use crate::service::ServiceMap;
use crate::service::action::update_tasks;
use crate::service::effects::callbacks::ServiceCallbacks;
use crate::service::effects::subcontainer::NVIDIA_OVERLAY_PATH;
use crate::shutdown::Shutdown;
@@ -50,7 +53,7 @@ use crate::util::future::NonDetachingJoinHandle;
use crate::util::io::{TmpDir, delete_file};
use crate::util::lshw::LshwDevice;
use crate::util::sync::{SyncMutex, SyncRwLock, Watch};
use crate::{DATA_DIR, PLATFORM, PackageId};
use crate::{ActionId, DATA_DIR, PLATFORM, PackageId};
pub struct RpcContextSeed {
is_closed: AtomicBool,
@@ -111,6 +114,7 @@ pub struct CleanupInitPhases {
cleanup_sessions: PhaseProgressTrackerHandle,
init_services: PhaseProgressTrackerHandle,
prune_s9pks: PhaseProgressTrackerHandle,
check_tasks: PhaseProgressTrackerHandle,
}
impl CleanupInitPhases {
pub fn new(handle: &FullProgressTracker) -> Self {
@@ -118,6 +122,7 @@ impl CleanupInitPhases {
cleanup_sessions: handle.add_phase("Cleaning up sessions".into(), Some(1)),
init_services: handle.add_phase("Initializing services".into(), Some(10)),
prune_s9pks: handle.add_phase("Pruning S9PKs".into(), Some(1)),
check_tasks: handle.add_phase("Checking action requests".into(), Some(1)),
}
}
}
@@ -168,7 +173,7 @@ impl RpcContext {
init_net_ctrl.complete();
tracing::info!("{}", t!("context.rpc.initialized-net-controller"));
if PLATFORM.ends_with("-nvidia") {
if PLATFORM.ends_with("-nonfree") {
if let Err(e) = Command::new("nvidia-smi")
.invoke(ErrorKind::ParseSysInfo)
.await
@@ -406,6 +411,7 @@ impl RpcContext {
mut cleanup_sessions,
mut init_services,
mut prune_s9pks,
mut check_tasks,
}: CleanupInitPhases,
) -> Result<(), Error> {
cleanup_sessions.start();
@@ -497,6 +503,76 @@ impl RpcContext {
}
prune_s9pks.complete();
check_tasks.start();
let mut action_input: OrdMap<PackageId, BTreeMap<ActionId, Value>> = OrdMap::new();
let tasks: BTreeSet<_> = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| {
Ok(pde
.as_tasks()
.as_entries()?
.into_iter()
.map(|(_, r)| {
let t = r.as_task();
Ok::<_, Error>(if t.as_input().transpose_ref().is_some() {
Some((t.as_package_id().de()?, t.as_action_id().de()?))
} else {
None
})
})
.filter_map_ok(|a| a))
})
.flatten_ok()
.map(|a| a.and_then(|a| a))
.try_collect()?;
let procedure_id = Guid::new();
for (package_id, action_id) in tasks {
if let Some(service) = self.services.get(&package_id).await.as_ref() {
if let Some(input) = service
.get_action_input(procedure_id.clone(), action_id.clone(), Value::Null)
.await
.log_err()
.flatten()
.and_then(|i| i.value)
{
action_input
.entry(package_id)
.or_default()
.insert(action_id, input);
}
}
}
self.db
.mutate(|db| {
for (package_id, action_input) in &action_input {
for (action_id, input) in action_input {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, package_id, action_id, input, false))
})?;
}
}
}
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
if pde
.as_tasks()
.de()?
.into_iter()
.any(|(_, t)| t.active && t.task.severity == TaskSeverity::Critical)
{
pde.as_status_info_mut().stop()?;
}
}
Ok(())
})
.await
.result?;
check_tasks.complete();
Ok(())
}
pub async fn call_remote<RemoteContext>(

View File

@@ -24,7 +24,7 @@ use crate::net::host::Host;
use crate::net::host::binding::{
AddSslOptions, BindInfo, BindOptions, Bindings, DerivedAddressInfo, NetInfo,
};
use crate::net::vhost::{AlpnInfo, PassthroughInfo};
use crate::net::vhost::AlpnInfo;
use crate::prelude::*;
use crate::progress::FullProgress;
use crate::system::{KeyboardOptions, SmtpValue};
@@ -121,7 +121,6 @@ impl Public {
},
dns: Default::default(),
default_outbound: None,
passthroughs: Vec::new(),
},
status_info: ServerStatus {
backup_progress: None,
@@ -234,8 +233,6 @@ pub struct NetworkInfo {
#[serde(default)]
#[ts(type = "string | null")]
pub default_outbound: Option<GatewayId>,
#[serde(default)]
pub passthroughs: Vec<PassthroughInfo>,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]

View File

@@ -251,35 +251,18 @@ pub async fn set_hostname_rpc(
ctx: RpcContext,
SetServerHostnameParams { name, hostname }: SetServerHostnameParams,
) -> Result<(), Error> {
let name = name.filter(|n| !n.is_empty());
let hostname = hostname
.filter(|h| !h.is_empty())
.map(ServerHostname::new)
.transpose()?;
if name.is_none() && hostname.is_none() {
let Some(hostname) = ServerHostnameInfo::new_opt(name, hostname)? else {
return Err(Error::new(
eyre!("{}", t!("hostname.must-provide-name-or-hostname")),
ErrorKind::InvalidRequest,
));
};
let info = ctx
.db
.mutate(|db| {
let server_info = db.as_public_mut().as_server_info_mut();
if let Some(name) = name {
server_info.as_name_mut().ser(&name)?;
}
if let Some(hostname) = &hostname {
hostname.save(server_info)?;
}
ServerHostnameInfo::load(server_info)
})
ctx.db
.mutate(|db| hostname.save(db.as_public_mut().as_server_info_mut()))
.await
.result?;
ctx.account.mutate(|a| a.hostname = info.clone());
if let Some(h) = hostname {
sync_hostname(&h).await?;
}
ctx.account.mutate(|a| a.hostname = hostname.clone());
sync_hostname(&hostname.hostname).await?;
Ok(())
}

View File

@@ -25,9 +25,6 @@ pub fn platform_to_arch(platform: &str) -> &str {
if let Some(arch) = platform.strip_suffix("-nonfree") {
return arch;
}
if let Some(arch) = platform.strip_suffix("-nvidia") {
return arch;
}
match platform {
"raspberrypi" | "rockchip64" => "aarch64",
_ => platform,
@@ -271,18 +268,6 @@ pub fn server<C: Context>() -> ParentHandler<C> {
.with_about("about.display-time-uptime")
.with_call_remote::<CliContext>(),
)
.subcommand(
"device-info",
ParentHandler::<C, WithIoFormat<Empty>>::new().root_handler(
from_fn_async(system::device_info)
.with_display_serializable()
.with_custom_display_fn(|handle, result| {
system::display_device_info(handle.params, result)
})
.with_about("about.get-device-info")
.with_call_remote::<CliContext>(),
),
)
.subcommand(
"experimental",
system::experimental::<C>().with_about("about.commands-experimental"),

View File

@@ -20,6 +20,9 @@ use crate::context::RpcContext;
use crate::middleware::auth::DbContext;
use crate::prelude::*;
use crate::rpc_continuations::OpenAuthedContinuations;
use crate::util::Invoke;
use crate::util::io::{create_file_mod, read_file_to_string};
use crate::util::serde::{BASE64, const_true};
use crate::util::sync::SyncMutex;
pub trait SessionAuthContext: DbContext {

View File

@@ -27,7 +27,7 @@ use crate::db::model::public::AcmeSettings;
use crate::db::{DbAccess, DbAccessByKey, DbAccessMut};
use crate::error::ErrorData;
use crate::net::ssl::should_use_cert;
use crate::net::tls::{SingleCertResolver, TlsHandler, TlsHandlerAction};
use crate::net::tls::{SingleCertResolver, TlsHandler};
use crate::net::web_server::Accept;
use crate::prelude::*;
use crate::util::FromStrParser;
@@ -173,7 +173,7 @@ where
&'a mut self,
hello: &'a ClientHello<'a>,
_: &'a <A as Accept>::Metadata,
) -> Option<TlsHandlerAction> {
) -> Option<ServerConfig> {
let domain = hello.server_name()?;
if hello
.alpn()
@@ -207,20 +207,20 @@ where
cfg.alpn_protocols = vec![ACME_TLS_ALPN_NAME.to_vec()];
tracing::info!("performing ACME auth challenge");
return Some(TlsHandlerAction::Tls(cfg));
return Some(cfg);
}
let domains: BTreeSet<InternedString> = [domain.into()].into_iter().collect();
let crypto_provider = self.crypto_provider.clone();
if let Some(cert) = self.get_cert(&domains).await {
return Some(TlsHandlerAction::Tls(
return Some(
ServerConfig::builder_with_provider(crypto_provider)
.with_safe_default_protocol_versions()
.log_err()?
.with_no_client_auth()
.with_cert_resolver(Arc::new(SingleCertResolver(Arc::new(cert)))),
));
);
}
None

View File

@@ -174,33 +174,23 @@ async fn set_name(
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CheckPortParams {
struct CheckPortParams {
#[arg(help = "help.arg.port")]
pub port: u16,
port: u16,
#[arg(help = "help.arg.gateway-id")]
pub gateway: GatewayId,
gateway: GatewayId,
}
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CheckPortRes {
pub ip: Ipv4Addr,
pub port: u16,
pub open_externally: bool,
pub open_internally: bool,
pub hairpinning: bool,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct IfconfigPortRes {
pub ip: Ipv4Addr,
pub port: u16,
pub reachable: bool,
}
pub async fn check_port(
async fn check_port(
ctx: RpcContext,
CheckPortParams { port, gateway }: CheckPortParams,
) -> Result<CheckPortRes, Error> {
@@ -221,33 +211,15 @@ pub async fn check_port(
ErrorKind::NotFound,
)
})?;
let internal_ips = ip_info
.subnets
.iter()
.map(|i| i.addr())
.filter(|a| a.is_ipv4())
.map(|a| SocketAddr::new(a, port))
.collect::<Vec<_>>();
let open_internally = tokio::time::timeout(
Duration::from_secs(5),
tokio::net::TcpStream::connect(&*internal_ips),
)
.await
.map_or(false, |r| r.is_ok());
let iface = &*ip_info.name;
let client = reqwest::Client::builder();
#[cfg(target_os = "linux")]
let client = client.interface(gateway.as_str());
let client = client.interface(iface);
let url = base_url
.join(&format!("/port/{port}"))
.with_kind(ErrorKind::ParseUrl)?;
let IfconfigPortRes {
ip,
port,
reachable: open_externally,
} = client
let res: CheckPortRes = client
.build()?
.get(url)
.timeout(Duration::from_secs(10))
@@ -256,32 +228,18 @@ pub async fn check_port(
.error_for_status()?
.json()
.await?;
let hairpinning = tokio::time::timeout(
Duration::from_secs(5),
tokio::net::TcpStream::connect(SocketAddr::new(ip.into(), port)),
)
.await
.map_or(false, |r| r.is_ok());
Ok(CheckPortRes {
ip,
port,
open_externally,
open_internally,
hairpinning,
})
Ok(res)
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct CheckDnsParams {
struct CheckDnsParams {
#[arg(help = "help.arg.gateway-id")]
pub gateway: GatewayId,
gateway: GatewayId,
}
pub async fn check_dns(
async fn check_dns(
ctx: RpcContext,
CheckDnsParams { gateway }: CheckDnsParams,
) -> Result<bool, Error> {
@@ -1238,7 +1196,8 @@ async fn poll_ip_info(
device_type,
Some(NetworkInterfaceType::Bridge | NetworkInterfaceType::Loopback)
) {
let res = match get_wan_ipv4(iface.as_str(), &ifconfig_url).await {
*prev_attempt = Some(Instant::now());
match get_wan_ipv4(iface.as_str(), &ifconfig_url).await {
Ok(a) => a,
Err(e) => {
tracing::error!(
@@ -1252,9 +1211,7 @@ async fn poll_ip_info(
tracing::debug!("{e:?}");
None
}
};
*prev_attempt = Some(Instant::now());
res
}
} else {
None
};

View File

@@ -12,7 +12,6 @@ use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::hostname::ServerHostname;
use crate::net::acme::AcmeProvider;
use crate::net::gateway::{CheckDnsParams, CheckPortParams, CheckPortRes, check_dns, check_port};
use crate::net::host::{HostApiKind, all_hosts};
use crate::prelude::*;
use crate::util::serde::{HandlerExtSerde, display_serializable};
@@ -161,7 +160,6 @@ pub fn address_api<C: Context, Kind: HostApiKind>()
}
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AddPublicDomainParams {
#[arg(help = "help.arg.fqdn")]
@@ -170,17 +168,6 @@ pub struct AddPublicDomainParams {
pub acme: Option<AcmeProvider>,
#[arg(help = "help.arg.gateway-id")]
pub gateway: GatewayId,
#[arg(help = "help.arg.internal-port")]
pub internal_port: u16,
}
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct AddPublicDomainRes {
#[ts(type = "string | null")]
pub dns: Option<Ipv4Addr>,
pub port: CheckPortRes,
}
pub async fn add_public_domain<Kind: HostApiKind>(
@@ -189,12 +176,10 @@ pub async fn add_public_domain<Kind: HostApiKind>(
fqdn,
acme,
gateway,
internal_port,
}: AddPublicDomainParams,
inheritance: Kind::Inheritance,
) -> Result<AddPublicDomainRes, Error> {
let ext_port = ctx
.db
) -> Result<Option<Ipv4Addr>, Error> {
ctx.db
.mutate(|db| {
if let Some(acme) = &acme {
if !db
@@ -210,92 +195,22 @@ pub async fn add_public_domain<Kind: HostApiKind>(
Kind::host_for(&inheritance, db)?
.as_public_domains_mut()
.insert(
&fqdn,
&PublicDomainConfig {
acme,
gateway: gateway.clone(),
},
)?;
.insert(&fqdn, &PublicDomainConfig { acme, gateway })?;
handle_duplicates(db)?;
let hostname = ServerHostname::load(db.as_public().as_server_info())?;
let gateways = db
.as_public()
.as_server_info()
.as_network()
.as_gateways()
.de()?;
let available_ports = db.as_private().as_available_ports().de()?;
let host = Kind::host_for(&inheritance, db)?;
host.update_addresses(&hostname, &gateways, &available_ports)?;
// Find the external port for the target binding
let bindings = host.as_bindings().de()?;
let target_bind = bindings
.get(&internal_port)
.ok_or_else(|| Error::new(eyre!("binding not found for internal port {internal_port}"), ErrorKind::NotFound))?;
let ext_port = target_bind
.addresses
.available
.iter()
.find(|a| a.public && a.hostname == fqdn)
.and_then(|a| a.port)
.ok_or_else(|| Error::new(eyre!("no public address found for {fqdn} on port {internal_port}"), ErrorKind::NotFound))?;
// Disable the domain on all other bindings
host.as_bindings_mut().mutate(|b| {
for (&port, bind) in b.iter_mut() {
if port == internal_port {
continue;
}
let has_addr = bind
.addresses
.available
.iter()
.any(|a| a.public && a.hostname == fqdn);
if has_addr {
let other_ext = bind
.addresses
.available
.iter()
.find(|a| a.public && a.hostname == fqdn)
.and_then(|a| a.port)
.unwrap_or(ext_port);
bind.addresses.disabled.insert((fqdn.clone(), other_ext));
}
}
Ok(())
})?;
Ok(ext_port)
let gateways = db.as_public().as_server_info().as_network().as_gateways().de()?;
let ports = db.as_private().as_available_ports().de()?;
Kind::host_for(&inheritance, db)?.update_addresses(&hostname, &gateways, &ports)
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
let ctx2 = ctx.clone();
let fqdn2 = fqdn.clone();
let (dns_result, port_result) = tokio::join!(
async {
tokio::task::spawn_blocking(move || {
crate::net::dns::query_dns(ctx2, crate::net::dns::QueryDnsParams { fqdn: fqdn2 })
})
.await
.with_kind(ErrorKind::Unknown)?
},
check_port(
ctx.clone(),
CheckPortParams {
port: ext_port,
gateway: gateway.clone(),
},
)
);
Ok(AddPublicDomainRes {
dns: dns_result?,
port: port_result?,
tokio::task::spawn_blocking(|| {
crate::net::dns::query_dns(ctx, crate::net::dns::QueryDnsParams { fqdn })
})
.await
.with_kind(ErrorKind::Unknown)?
}
#[derive(Deserialize, Serialize, Parser, TS)]
@@ -327,6 +242,7 @@ pub async fn remove_public_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
@@ -343,13 +259,13 @@ pub async fn add_private_domain<Kind: HostApiKind>(
ctx: RpcContext,
AddPrivateDomainParams { fqdn, gateway }: AddPrivateDomainParams,
inheritance: Kind::Inheritance,
) -> Result<bool, Error> {
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_private_domains_mut()
.upsert(&fqdn, || Ok(BTreeSet::new()))?
.mutate(|d| Ok(d.insert(gateway.clone())))?;
.mutate(|d| Ok(d.insert(gateway)))?;
handle_duplicates(db)?;
let hostname = ServerHostname::load(db.as_public().as_server_info())?;
let gateways = db
@@ -363,8 +279,9 @@ pub async fn add_private_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
check_dns(ctx, CheckDnsParams { gateway }).await
Ok(())
}
pub async fn remove_private_domain<Kind: HostApiKind>(
@@ -389,6 +306,7 @@ pub async fn remove_private_domain<Kind: HostApiKind>(
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}

View File

@@ -358,5 +358,5 @@ pub async fn set_address_enabled<Kind: HostApiKind>(
})
.await
.result?;
Ok(())
Kind::sync_host(&ctx, inheritance).await
}

View File

@@ -1,4 +1,5 @@
use std::collections::{BTreeMap, BTreeSet};
use std::future::Future;
use std::net::{IpAddr, SocketAddrV4};
use std::panic::RefUnwindSafe;
@@ -181,26 +182,15 @@ impl Model<Host> {
opt.secure
.map_or(true, |s| !(s.ssl && opt.add_ssl.is_some()))
}) {
let mdns_gateways = if opt.secure.is_some() {
mdns_gateways.clone()
} else {
mdns_gateways
.iter()
.filter(|g| gateways.get(*g).map_or(false, |g| g.secure()))
.cloned()
.collect()
};
if !mdns_gateways.is_empty() {
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: false,
hostname: mdns_host.clone(),
port: Some(port),
metadata: HostnameMetadata::Mdns {
gateways: mdns_gateways,
},
});
}
available.insert(HostnameInfo {
ssl: opt.secure.map_or(false, |s| s.ssl),
public: false,
hostname: mdns_host.clone(),
port: Some(port),
metadata: HostnameMetadata::Mdns {
gateways: mdns_gateways.clone(),
},
});
}
if let Some(port) = net.assigned_ssl_port {
available.insert(HostnameInfo {
@@ -249,20 +239,6 @@ impl Model<Host> {
port: Some(port),
metadata,
});
} else if opt.secure.map_or(false, |s| s.ssl)
&& opt.add_ssl.is_none()
&& available_ports.is_ssl(opt.preferred_external_port)
&& net.assigned_port != Some(opt.preferred_external_port)
{
// Service handles its own TLS and the preferred port is
// allocated as SSL — add an address for passthrough vhost.
available.insert(HostnameInfo {
ssl: true,
public: true,
hostname: domain,
port: Some(opt.preferred_external_port),
metadata,
});
}
}
@@ -307,20 +283,6 @@ impl Model<Host> {
gateways: domain_gateways,
},
});
} else if opt.secure.map_or(false, |s| s.ssl)
&& opt.add_ssl.is_none()
&& available_ports.is_ssl(opt.preferred_external_port)
&& net.assigned_port != Some(opt.preferred_external_port)
{
available.insert(HostnameInfo {
ssl: true,
public: true,
hostname: domain,
port: Some(opt.preferred_external_port),
metadata: HostnameMetadata::PrivateDomain {
gateways: domain_gateways,
},
});
}
}
bind.as_addresses_mut().as_available_mut().ser(&available)?;
@@ -467,6 +429,10 @@ pub trait HostApiKind: 'static {
inheritance: &Self::Inheritance,
db: &'a mut DatabaseModel,
) -> Result<&'a mut Model<Host>, Error>;
fn sync_host(
ctx: &RpcContext,
inheritance: Self::Inheritance,
) -> impl Future<Output = Result<(), Error>> + Send;
}
pub struct ForPackage;
impl HostApiKind for ForPackage {
@@ -485,6 +451,12 @@ impl HostApiKind for ForPackage {
) -> Result<&'a mut Model<Host>, Error> {
host_for(db, Some(package), host)
}
async fn sync_host(ctx: &RpcContext, (package, host): Self::Inheritance) -> Result<(), Error> {
let service = ctx.services.get(&package).await;
let service_ref = service.as_ref().or_not_found(&package)?;
service_ref.sync_host(host).await?;
Ok(())
}
}
pub struct ForServer;
impl HostApiKind for ForServer {
@@ -500,6 +472,9 @@ impl HostApiKind for ForServer {
) -> Result<&'a mut Model<Host>, Error> {
host_for(db, None, &HostId::default())
}
async fn sync_host(ctx: &RpcContext, _: Self::Inheritance) -> Result<(), Error> {
ctx.os_net_service.sync_host(HostId::default()).await
}
}
pub fn host_api<C: Context>() -> ParentHandler<C, RequiresPackageId> {

View File

@@ -76,22 +76,9 @@ impl NetController {
],
)
.await?;
let passthroughs = db
.peek()
.await
.as_public()
.as_server_info()
.as_network()
.as_passthroughs()
.de()?;
Ok(Self {
db: db.clone(),
vhost: VHostController::new(
db.clone(),
net_iface.clone(),
crypto_provider,
passthroughs,
),
vhost: VHostController::new(db.clone(), net_iface.clone(), crypto_provider),
tls_client_config,
dns: DnsController::init(db, &net_iface.watcher).await?,
forward: InterfacePortForwardController::new(net_iface.watcher.subscribe()),
@@ -250,7 +237,6 @@ impl NetServiceData {
connect_ssl: connect_ssl
.clone()
.map(|_| ctrl.tls_client_config.clone()),
passthrough: false,
},
);
}
@@ -267,9 +253,7 @@ impl NetServiceData {
_ => continue,
}
let domain = &addr_info.hostname;
let Some(domain_ssl_port) = addr_info.port else {
continue;
};
let domain_ssl_port = addr_info.port.unwrap_or(443);
let key = (Some(domain.clone()), domain_ssl_port);
let target = vhosts.entry(key).or_insert_with(|| ProxyTarget {
public: BTreeSet::new(),
@@ -282,7 +266,6 @@ impl NetServiceData {
addr,
add_x_forwarded_headers: ssl.add_x_forwarded_headers,
connect_ssl: connect_ssl.clone().map(|_| ctrl.tls_client_config.clone()),
passthrough: false,
});
if addr_info.public {
for gw in addr_info.metadata.gateways() {
@@ -334,53 +317,6 @@ impl NetServiceData {
),
);
}
// Passthrough vhosts: if the service handles its own TLS
// (secure.ssl && no add_ssl) and a domain address is enabled on
// an SSL port different from assigned_port, add a passthrough
// vhost so the service's TLS endpoint is reachable on that port.
if bind.options.secure.map_or(false, |s| s.ssl) && bind.options.add_ssl.is_none() {
let assigned = bind.net.assigned_port;
for addr_info in &enabled_addresses {
if !addr_info.ssl {
continue;
}
let Some(pt_port) = addr_info.port.filter(|p| assigned != Some(*p)) else {
continue;
};
match &addr_info.metadata {
HostnameMetadata::PublicDomain { .. }
| HostnameMetadata::PrivateDomain { .. } => {}
_ => continue,
}
let domain = &addr_info.hostname;
let key = (Some(domain.clone()), pt_port);
let target = vhosts.entry(key).or_insert_with(|| ProxyTarget {
public: BTreeSet::new(),
private: BTreeSet::new(),
acme: None,
addr,
add_x_forwarded_headers: false,
connect_ssl: Err(AlpnInfo::Reflect),
passthrough: true,
});
if addr_info.public {
for gw in addr_info.metadata.gateways() {
target.public.insert(gw.clone());
}
} else {
for gw in addr_info.metadata.gateways() {
if let Some(info) = net_ifaces.get(gw) {
if let Some(ip_info) = &info.ip_info {
for subnet in &ip_info.subnets {
target.private.insert(subnet.addr());
}
}
}
}
}
}
}
}
// ── Phase 3: Reconcile ──
@@ -789,6 +725,13 @@ impl NetService {
.result
}
pub async fn sync_host(&self, _id: HostId) -> Result<(), Error> {
let current = self.synced.peek(|v| *v);
let mut w = self.synced.clone();
w.wait_for(|v| *v > current).await;
Ok(())
}
pub async fn remove_all(mut self) -> Result<(), Error> {
if Weak::upgrade(&self.data.lock().await.controller).is_none() {
self.shutdown = true;

View File

@@ -36,7 +36,7 @@ use crate::db::{DbAccess, DbAccessMut};
use crate::hostname::ServerHostname;
use crate::init::check_time_is_synchronized;
use crate::net::gateway::GatewayInfo;
use crate::net::tls::{TlsHandler, TlsHandlerAction};
use crate::net::tls::TlsHandler;
use crate::net::web_server::{Accept, ExtractVisitor, TcpMetadata, extract};
use crate::prelude::*;
use crate::util::serde::Pem;
@@ -620,7 +620,7 @@ where
&mut self,
hello: &ClientHello<'_>,
metadata: &<A as Accept>::Metadata,
) -> Option<TlsHandlerAction> {
) -> Option<ServerConfig> {
let hostnames: BTreeSet<InternedString> = hello
.server_name()
.map(InternedString::from)
@@ -684,6 +684,5 @@ where
)
}
.log_err()
.map(TlsHandlerAction::Tls)
}
}

View File

@@ -9,7 +9,7 @@ use async_compression::tokio::bufread::GzipEncoder;
use axum::Router;
use axum::body::Body;
use axum::extract::{self as x, Request};
use axum::response::Response;
use axum::response::{IntoResponse, Response};
use axum::routing::{any, get};
use base64::display::Base64Display;
use digest::Digest;

View File

@@ -16,14 +16,6 @@ use tokio_rustls::rustls::sign::CertifiedKey;
use tokio_rustls::rustls::{ClientConfig, RootCertStore, ServerConfig};
use visit_rs::{Visit, VisitFields};
/// Result of a TLS handler's decision about how to handle a connection.
pub enum TlsHandlerAction {
/// Complete the TLS handshake with this ServerConfig.
Tls(ServerConfig),
/// Don't complete TLS — rewind the BackTrackingIO and return the raw stream.
Passthrough,
}
use crate::net::http::handle_http_on_https;
use crate::net::web_server::{Accept, AcceptStream, MetadataVisitor};
use crate::prelude::*;
@@ -58,7 +50,7 @@ pub trait TlsHandler<'a, A: Accept> {
&'a mut self,
hello: &'a ClientHello<'a>,
metadata: &'a A::Metadata,
) -> impl Future<Output = Option<TlsHandlerAction>> + Send + 'a;
) -> impl Future<Output = Option<ServerConfig>> + Send + 'a;
}
#[derive(Clone)]
@@ -74,7 +66,7 @@ where
&'a mut self,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> Option<TlsHandlerAction> {
) -> Option<ServerConfig> {
if let Some(config) = self.0.get_config(hello, metadata).await {
return Some(config);
}
@@ -94,7 +86,7 @@ pub trait WrapTlsHandler<A: Accept> {
prev: ServerConfig,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> impl Future<Output = Option<TlsHandlerAction>> + Send + 'a
) -> impl Future<Output = Option<ServerConfig>> + Send + 'a
where
Self: 'a;
}
@@ -110,12 +102,9 @@ where
&'a mut self,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> Option<TlsHandlerAction> {
let action = self.inner.get_config(hello, metadata).await?;
match action {
TlsHandlerAction::Tls(cfg) => self.wrapper.wrap(cfg, hello, metadata).await,
other => Some(other),
}
) -> Option<ServerConfig> {
let prev = self.inner.get_config(hello, metadata).await?;
self.wrapper.wrap(prev, hello, metadata).await
}
}
@@ -214,56 +203,34 @@ where
}
};
let hello = mid.client_hello();
let sni = hello.server_name().map(InternedString::intern);
match tls_handler.get_config(&hello, &metadata).await {
Some(TlsHandlerAction::Tls(cfg)) => {
let buffered = mid.io.stop_buffering();
mid.io
.write_all(&buffered)
.await
.with_kind(ErrorKind::Network)?;
return Ok(match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => {
let s = stream.get_ref().1;
Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: s
.server_name()
.map(InternedString::intern),
alpn: s
.alpn_protocol()
.map(|a| MaybeUtf8String(a.to_vec())),
},
if let Some(cfg) = tls_handler.get_config(&hello, &metadata).await {
let buffered = mid.io.stop_buffering();
mid.io
.write_all(&buffered)
.await
.with_kind(ErrorKind::Network)?;
return Ok(match mid.into_stream(Arc::new(cfg)).await {
Ok(stream) => {
let s = stream.get_ref().1;
Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo {
sni: s.server_name().map(InternedString::intern),
alpn: s
.alpn_protocol()
.map(|a| MaybeUtf8String(a.to_vec())),
},
Box::pin(stream) as AcceptStream,
))
}
Err(e) => {
tracing::trace!("Error completing TLS handshake: {e}");
tracing::trace!("{e:?}");
None
}
});
}
Some(TlsHandlerAction::Passthrough) => {
let (dummy, _drop) = tokio::io::duplex(1);
let mut bt = std::mem::replace(
&mut mid.io,
BackTrackingIO::new(Box::pin(dummy) as AcceptStream),
);
drop(mid);
bt.rewind();
return Ok(Some((
TlsMetadata {
inner: metadata,
tls_info: TlsHandshakeInfo { sni, alpn: None },
},
Box::pin(bt) as AcceptStream,
)));
}
None => {}
},
Box::pin(stream) as AcceptStream,
))
}
Err(e) => {
tracing::trace!("Error completing TLS handshake: {e}");
tracing::trace!("{e:?}");
None
}
});
}
Ok(None)

View File

@@ -6,13 +6,12 @@ use std::sync::{Arc, Weak};
use std::task::{Poll, ready};
use async_acme::acme::ACME_TLS_ALPN_NAME;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use futures::future::BoxFuture;
use imbl::OrdMap;
use imbl_value::{InOMap, InternedString};
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn, from_fn_async};
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn};
use serde::{Deserialize, Serialize};
use tokio::net::{TcpListener, TcpStream};
use tokio_rustls::TlsConnector;
@@ -36,7 +35,7 @@ use crate::net::gateway::{
};
use crate::net::ssl::{CertStore, RootCaTlsHandler};
use crate::net::tls::{
ChainedHandler, TlsHandlerAction, TlsHandlerWrapper, TlsListener, TlsMetadata, WrapTlsHandler,
ChainedHandler, TlsHandlerWrapper, TlsListener, TlsMetadata, WrapTlsHandler,
};
use crate::net::utils::ipv6_is_link_local;
use crate::net::web_server::{Accept, AcceptStream, ExtractVisitor, TcpMetadata, extract};
@@ -47,228 +46,68 @@ use crate::util::serde::{HandlerExtSerde, MaybeUtf8String, display_serializable}
use crate::util::sync::{SyncMutex, Watch};
use crate::{GatewayId, ResultExt};
#[derive(Debug, Clone, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct PassthroughInfo {
#[ts(type = "string")]
pub hostname: InternedString,
pub listen_port: u16,
#[ts(type = "string")]
pub backend: SocketAddr,
#[ts(type = "string[]")]
pub public_gateways: BTreeSet<GatewayId>,
#[ts(type = "string[]")]
pub private_ips: BTreeSet<IpAddr>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
struct AddPassthroughParams {
#[arg(long)]
pub hostname: InternedString,
#[arg(long)]
pub listen_port: u16,
#[arg(long)]
pub backend: SocketAddr,
#[arg(long)]
pub public_gateway: Vec<GatewayId>,
#[arg(long)]
pub private_ip: Vec<IpAddr>,
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
struct RemovePassthroughParams {
#[arg(long)]
pub hostname: InternedString,
#[arg(long)]
pub listen_port: u16,
}
pub fn vhost_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"dump-table",
from_fn(dump_table)
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
ParentHandler::new().subcommand(
"dump-table",
from_fn(|ctx: RpcContext| Ok(ctx.net_controller.vhost.dump_table()))
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
use prettytable::*;
if let Some(format) = params.format {
display_serializable(format, res)?;
return Ok::<_, Error>(());
}
if let Some(format) = params.format {
display_serializable(format, res)?;
return Ok::<_, Error>(());
}
let mut table = Table::new();
table.add_row(row![bc => "FROM", "TO", "ACTIVE"]);
let mut table = Table::new();
table.add_row(row![bc => "FROM", "TO", "ACTIVE"]);
for (external, targets) in res {
for (host, targets) in targets {
for (idx, target) in targets.into_iter().enumerate() {
table.add_row(row![
format!(
"{}:{}",
host.as_ref().map(|s| &**s).unwrap_or("*"),
external.0
),
target,
idx == 0
]);
}
for (external, targets) in res {
for (host, targets) in targets {
for (idx, target) in targets.into_iter().enumerate() {
table.add_row(row![
format!(
"{}:{}",
host.as_ref().map(|s| &**s).unwrap_or("*"),
external.0
),
target,
idx == 0
]);
}
}
}
table.print_tty(false)?;
table.print_tty(false)?;
Ok(())
})
.with_call_remote::<CliContext>(),
)
.subcommand(
"add-passthrough",
from_fn_async(add_passthrough)
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove-passthrough",
from_fn_async(remove_passthrough)
.no_display()
.with_call_remote::<CliContext>(),
)
.subcommand(
"list-passthrough",
from_fn(list_passthrough)
.with_display_serializable()
.with_call_remote::<CliContext>(),
)
}
fn dump_table(
ctx: RpcContext,
) -> Result<BTreeMap<JsonKey<u16>, BTreeMap<JsonKey<Option<InternedString>>, EqSet<String>>>, Error>
{
Ok(ctx.net_controller.vhost.dump_table())
}
async fn add_passthrough(
ctx: RpcContext,
AddPassthroughParams {
hostname,
listen_port,
backend,
public_gateway,
private_ip,
}: AddPassthroughParams,
) -> Result<(), Error> {
let public_gateways: BTreeSet<GatewayId> = public_gateway.into_iter().collect();
let private_ips: BTreeSet<IpAddr> = private_ip.into_iter().collect();
ctx.net_controller.vhost.add_passthrough(
hostname.clone(),
listen_port,
backend,
public_gateways.clone(),
private_ips.clone(),
)?;
ctx.db
.mutate(|db| {
let pts = db
.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_passthroughs_mut();
let mut vec: Vec<PassthroughInfo> = pts.de()?;
vec.retain(|p| !(p.hostname == hostname && p.listen_port == listen_port));
vec.push(PassthroughInfo {
hostname,
listen_port,
backend,
public_gateways,
private_ips,
});
pts.ser(&vec)
})
.await
.result?;
Ok(())
}
async fn remove_passthrough(
ctx: RpcContext,
RemovePassthroughParams {
hostname,
listen_port,
}: RemovePassthroughParams,
) -> Result<(), Error> {
ctx.net_controller
.vhost
.remove_passthrough(&hostname, listen_port);
ctx.db
.mutate(|db| {
let pts = db
.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_passthroughs_mut();
let mut vec: Vec<PassthroughInfo> = pts.de()?;
vec.retain(|p| !(p.hostname == hostname && p.listen_port == listen_port));
pts.ser(&vec)
})
.await
.result?;
Ok(())
}
fn list_passthrough(ctx: RpcContext) -> Result<Vec<PassthroughInfo>, Error> {
Ok(ctx.net_controller.vhost.list_passthrough())
Ok(())
})
.with_call_remote::<CliContext>(),
)
}
// not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
struct PassthroughHandle {
_rc: Arc<()>,
backend: SocketAddr,
public: BTreeSet<GatewayId>,
private: BTreeSet<IpAddr>,
}
pub struct VHostController {
db: TypedPatchDb<Database>,
interfaces: Arc<NetworkInterfaceController>,
crypto_provider: Arc<CryptoProvider>,
acme_cache: AcmeTlsAlpnCache,
servers: SyncMutex<BTreeMap<u16, VHostServer<VHostBindListener>>>,
passthrough_handles: SyncMutex<BTreeMap<(InternedString, u16), PassthroughHandle>>,
}
impl VHostController {
pub fn new(
db: TypedPatchDb<Database>,
interfaces: Arc<NetworkInterfaceController>,
crypto_provider: Arc<CryptoProvider>,
passthroughs: Vec<PassthroughInfo>,
) -> Self {
let controller = Self {
Self {
db,
interfaces,
crypto_provider,
acme_cache: Arc::new(SyncMutex::new(BTreeMap::new())),
servers: SyncMutex::new(BTreeMap::new()),
passthrough_handles: SyncMutex::new(BTreeMap::new()),
};
for pt in passthroughs {
if let Err(e) = controller.add_passthrough(
pt.hostname,
pt.listen_port,
pt.backend,
pt.public_gateways,
pt.private_ips,
) {
tracing::warn!("failed to restore passthrough: {e}");
}
}
controller
}
#[instrument(skip_all)]
pub fn add(
@@ -281,7 +120,20 @@ impl VHostController {
let server = if let Some(server) = writable.remove(&external) {
server
} else {
self.create_server(external)
let bind_reqs = Watch::new(VHostBindRequirements::default());
let listener = VHostBindListener {
ip_info: self.interfaces.watcher.subscribe(),
port: external,
bind_reqs: bind_reqs.clone_unseen(),
listeners: BTreeMap::new(),
};
VHostServer::new(
listener,
bind_reqs,
self.db.clone(),
self.crypto_provider.clone(),
self.acme_cache.clone(),
)
};
let rc = server.add(hostname, target);
writable.insert(external, server);
@@ -289,75 +141,6 @@ impl VHostController {
})
}
fn create_server(&self, port: u16) -> VHostServer<VHostBindListener> {
let bind_reqs = Watch::new(VHostBindRequirements::default());
let listener = VHostBindListener {
ip_info: self.interfaces.watcher.subscribe(),
port,
bind_reqs: bind_reqs.clone_unseen(),
listeners: BTreeMap::new(),
};
VHostServer::new(
listener,
bind_reqs,
self.db.clone(),
self.crypto_provider.clone(),
self.acme_cache.clone(),
)
}
pub fn add_passthrough(
&self,
hostname: InternedString,
port: u16,
backend: SocketAddr,
public: BTreeSet<GatewayId>,
private: BTreeSet<IpAddr>,
) -> Result<(), Error> {
let target = ProxyTarget {
public: public.clone(),
private: private.clone(),
acme: None,
addr: backend,
add_x_forwarded_headers: false,
connect_ssl: Err(AlpnInfo::Reflect),
passthrough: true,
};
let rc = self.add(Some(hostname.clone()), port, DynVHostTarget::new(target))?;
self.passthrough_handles.mutate(|h| {
h.insert(
(hostname, port),
PassthroughHandle {
_rc: rc,
backend,
public,
private,
},
);
});
Ok(())
}
pub fn remove_passthrough(&self, hostname: &InternedString, port: u16) {
self.passthrough_handles
.mutate(|h| h.remove(&(hostname.clone(), port)));
self.gc(Some(hostname.clone()), port);
}
pub fn list_passthrough(&self) -> Vec<PassthroughInfo> {
self.passthrough_handles.peek(|h| {
h.iter()
.map(|((hostname, port), handle)| PassthroughInfo {
hostname: hostname.clone(),
listen_port: *port,
backend: handle.backend,
public_gateways: handle.public.clone(),
private_ips: handle.private.clone(),
})
.collect()
})
}
pub fn dump_table(
&self,
) -> BTreeMap<JsonKey<u16>, BTreeMap<JsonKey<Option<InternedString>>, EqSet<String>>> {
@@ -547,9 +330,6 @@ pub trait VHostTarget<A: Accept>: std::fmt::Debug + Eq {
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
(BTreeSet::new(), BTreeSet::new())
}
fn is_passthrough(&self) -> bool {
false
}
fn preprocess<'a>(
&'a self,
prev: ServerConfig,
@@ -569,7 +349,6 @@ pub trait DynVHostTargetT<A: Accept>: std::fmt::Debug + Any {
fn filter(&self, metadata: &<A as Accept>::Metadata) -> bool;
fn acme(&self) -> Option<&AcmeProvider>;
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>);
fn is_passthrough(&self) -> bool;
fn preprocess<'a>(
&'a self,
prev: ServerConfig,
@@ -594,9 +373,6 @@ impl<A: Accept, T: VHostTarget<A> + 'static> DynVHostTargetT<A> for T {
fn acme(&self) -> Option<&AcmeProvider> {
VHostTarget::acme(self)
}
fn is_passthrough(&self) -> bool {
VHostTarget::is_passthrough(self)
}
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
VHostTarget::bind_requirements(self)
}
@@ -683,7 +459,6 @@ pub struct ProxyTarget {
pub addr: SocketAddr,
pub add_x_forwarded_headers: bool,
pub connect_ssl: Result<Arc<ClientConfig>, AlpnInfo>, // Ok: yes, connect using ssl, pass through alpn; Err: connect tcp, use provided strategy for alpn
pub passthrough: bool,
}
impl PartialEq for ProxyTarget {
fn eq(&self, other: &Self) -> bool {
@@ -691,7 +466,6 @@ impl PartialEq for ProxyTarget {
&& self.private == other.private
&& self.acme == other.acme
&& self.addr == other.addr
&& self.passthrough == other.passthrough
&& self.connect_ssl.as_ref().map(Arc::as_ptr)
== other.connect_ssl.as_ref().map(Arc::as_ptr)
}
@@ -706,7 +480,6 @@ impl fmt::Debug for ProxyTarget {
.field("addr", &self.addr)
.field("add_x_forwarded_headers", &self.add_x_forwarded_headers)
.field("connect_ssl", &self.connect_ssl.as_ref().map(|_| ()))
.field("passthrough", &self.passthrough)
.finish()
}
}
@@ -751,9 +524,6 @@ where
fn bind_requirements(&self) -> (BTreeSet<GatewayId>, BTreeSet<IpAddr>) {
(self.public.clone(), self.private.clone())
}
fn is_passthrough(&self) -> bool {
self.passthrough
}
async fn preprocess<'a>(
&'a self,
mut prev: ServerConfig,
@@ -907,7 +677,7 @@ where
prev: ServerConfig,
hello: &'a ClientHello<'a>,
metadata: &'a <A as Accept>::Metadata,
) -> Option<TlsHandlerAction>
) -> Option<ServerConfig>
where
Self: 'a,
{
@@ -917,7 +687,7 @@ where
.flatten()
.any(|a| a == ACME_TLS_ALPN_NAME)
{
return Some(TlsHandlerAction::Tls(prev));
return Some(prev);
}
let (target, rc) = self.0.peek(|m| {
@@ -930,16 +700,11 @@ where
.map(|(t, rc)| (t.clone(), rc.clone()))
})?;
let is_pt = target.0.is_passthrough();
let (prev, store) = target.into_preprocessed(rc, prev, hello, metadata).await?;
self.1 = Some(store);
if is_pt {
Some(TlsHandlerAction::Passthrough)
} else {
Some(TlsHandlerAction::Tls(prev))
}
Some(prev)
}
}

View File

@@ -27,63 +27,6 @@ use crate::util::serde::IoFormat;
mod gpt;
mod mbr;
/// Get the EFI BootCurrent entry number (the entry firmware used to boot).
/// Returns None on non-EFI systems or if BootCurrent is not set.
async fn get_efi_boot_current() -> Result<Option<String>, Error> {
let efi_output = String::from_utf8(
Command::new("efibootmgr")
.invoke(ErrorKind::Grub)
.await?,
)
.map_err(|e| Error::new(eyre!("efibootmgr output not valid UTF-8: {e}"), ErrorKind::Grub))?;
Ok(efi_output
.lines()
.find(|line| line.starts_with("BootCurrent:"))
.and_then(|line| line.strip_prefix("BootCurrent:"))
.map(|s| s.trim().to_string()))
}
/// Promote a specific boot entry to first in the EFI boot order.
async fn promote_efi_entry(entry: &str) -> Result<(), Error> {
let efi_output = String::from_utf8(
Command::new("efibootmgr")
.invoke(ErrorKind::Grub)
.await?,
)
.map_err(|e| Error::new(eyre!("efibootmgr output not valid UTF-8: {e}"), ErrorKind::Grub))?;
let current_order = efi_output
.lines()
.find(|line| line.starts_with("BootOrder:"))
.and_then(|line| line.strip_prefix("BootOrder:"))
.map(|s| s.trim())
.unwrap_or("");
if current_order.is_empty() || current_order.starts_with(entry) {
return Ok(());
}
let other_entries: Vec<&str> = current_order
.split(',')
.filter(|e| e.trim() != entry)
.collect();
let new_order = if other_entries.is_empty() {
entry.to_string()
} else {
format!("{},{}", entry, other_entries.join(","))
};
Command::new("efibootmgr")
.arg("-o")
.arg(&new_order)
.invoke(ErrorKind::Grub)
.await?;
Ok(())
}
/// Probe a squashfs image to determine its target architecture
async fn probe_squashfs_arch(squashfs_path: &Path) -> Result<InternedString, Error> {
let output = String::from_utf8(
@@ -485,21 +428,6 @@ pub async fn install_os(
});
let use_efi = tokio::fs::metadata("/sys/firmware/efi").await.is_ok();
// Save the boot entry we booted from (the USB installer) before grub-install
// overwrites the boot order.
let boot_current = if use_efi {
match get_efi_boot_current().await {
Ok(entry) => entry,
Err(e) => {
tracing::warn!("Failed to get EFI BootCurrent: {e}");
None
}
}
} else {
None
};
let InstallOsResult { part_info, rootfs } = install_os_to(
"/run/live/medium/live/filesystem.squashfs",
&disk.logicalname,
@@ -511,20 +439,6 @@ pub async fn install_os(
)
.await?;
// grub-install prepends its new entry to the EFI boot order, overriding the
// USB-first priority. Promote the USB entry (identified by BootCurrent from
// when we booted the installer) back to first, and persist the entry number
// so the upgrade script can do the same.
if let Some(ref entry) = boot_current {
if let Err(e) = promote_efi_entry(entry).await {
tracing::warn!("Failed to restore EFI boot order: {e}");
}
let efi_entry_path = rootfs.path().join("config/efi-installer-entry");
if let Err(e) = tokio::fs::write(&efi_entry_path, entry).await {
tracing::warn!("Failed to save EFI installer entry number: {e}");
}
}
ctx.config
.mutate(|c| c.os_partitions = Some(part_info.clone()));

View File

@@ -255,7 +255,30 @@ impl Model<PackageVersionInfo> {
}
if let Some(hw) = &device_info.hardware {
self.as_s9pks_mut().mutate(|s9pks| {
s9pks.retain(|(hw_req, _)| hw_req.is_compatible(hw));
s9pks.retain(|(hw_req, _)| {
if let Some(arch) = &hw_req.arch {
if !arch.contains(&hw.arch) {
return false;
}
}
if let Some(ram) = hw_req.ram {
if hw.ram < ram {
return false;
}
}
if let Some(dev) = &hw.devices {
for device_filter in &hw_req.device {
if !dev
.iter()
.filter(|d| d.class() == &*device_filter.class)
.any(|d| device_filter.matches(d))
{
return false;
}
}
}
true
});
if hw.devices.is_some() {
s9pks.sort_by_key(|(req, _)| req.specificity_desc());
} else {

View File

@@ -58,9 +58,6 @@ pub struct AddPackageSignerParams {
#[arg(long, help = "help.arg.version-range")]
#[ts(type = "string | null")]
pub versions: Option<VersionRange>,
#[arg(long, help = "help.arg.merge")]
#[ts(optional)]
pub merge: Option<bool>,
}
pub async fn add_package_signer(
@@ -69,7 +66,6 @@ pub async fn add_package_signer(
id,
signer,
versions,
merge,
}: AddPackageSignerParams,
) -> Result<(), Error> {
ctx.db
@@ -80,22 +76,13 @@ pub async fn add_package_signer(
"unknown signer {signer}"
);
let versions = versions.unwrap_or_default();
db.as_index_mut()
.as_package_mut()
.as_packages_mut()
.as_idx_mut(&id)
.or_not_found(&id)?
.as_authorized_mut()
.upsert(&signer, || Ok(VersionRange::None))?
.mutate(|existing| {
*existing = if merge.unwrap_or(false) {
VersionRange::or(existing.clone(), versions)
} else {
versions
};
Ok(())
})?;
.insert(&signer, &versions.unwrap_or_default())?;
Ok(())
})

View File

@@ -3,17 +3,16 @@ use std::path::PathBuf;
use std::sync::Arc;
use clap::Parser;
use rpc_toolkit::{Empty, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};
use rpc_toolkit::{Empty, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use ts_rs::TS;
use url::Url;
use crate::ImageId;
use crate::context::{CliContext, RpcContext};
use crate::context::CliContext;
use crate::prelude::*;
use crate::registry::device_info::DeviceInfo;
use crate::s9pk::manifest::{HardwareRequirements, Manifest};
use crate::s9pk::manifest::Manifest;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::s9pk::v2::SIG_CONTEXT;
use crate::s9pk::v2::pack::ImageConfig;
@@ -71,15 +70,6 @@ pub fn s9pk() -> ParentHandler<CliContext> {
.no_display()
.with_about("about.publish-s9pk"),
)
.subcommand(
"select",
from_fn_async(select)
.with_custom_display_fn(|_, path: PathBuf| {
println!("{}", path.display());
Ok(())
})
.with_about("about.select-s9pk-for-device"),
)
}
#[derive(Deserialize, Serialize, Parser)]
@@ -333,97 +323,3 @@ async fn publish(ctx: CliContext, S9pkPath { s9pk: s9pk_path }: S9pkPath) -> Res
.await?;
crate::registry::package::add::cli_add_package_impl(ctx, s9pk, vec![s3url], false).await
}
#[derive(Deserialize, Serialize, Parser)]
struct SelectParams {
#[arg(help = "help.arg.s9pk-file-paths")]
s9pks: Vec<PathBuf>,
}
async fn select(
HandlerArgs {
context,
params: SelectParams { s9pks },
..
}: HandlerArgs<CliContext, SelectParams>,
) -> Result<PathBuf, Error> {
// Resolve file list: use provided paths or scan cwd for *.s9pk
let paths = if s9pks.is_empty() {
let mut found = Vec::new();
let mut entries = tokio::fs::read_dir(".").await?;
while let Some(entry) = entries.next_entry().await? {
let path = entry.path();
if path.extension().and_then(|e| e.to_str()) == Some("s9pk") {
found.push(path);
}
}
if found.is_empty() {
return Err(Error::new(
eyre!("no .s9pk files found in current directory"),
ErrorKind::NotFound,
));
}
found
} else {
s9pks
};
// Fetch DeviceInfo from the target server
let device_info: DeviceInfo = from_value(
context
.call_remote::<RpcContext>("server.device-info", imbl_value::json!({}))
.await?,
)?;
// Filter and rank s9pk files by compatibility
let mut compatible: Vec<(PathBuf, HardwareRequirements)> = Vec::new();
for path in &paths {
let s9pk = match super::S9pk::open(path, None).await {
Ok(s9pk) => s9pk,
Err(e) => {
tracing::warn!("skipping {}: {e}", path.display());
continue;
}
};
let manifest = s9pk.as_manifest();
// OS version check: package's required OS version must be in server's compat range
if !manifest
.metadata
.os_version
.satisfies(&device_info.os.compat)
{
continue;
}
let hw_req = &manifest.hardware_requirements;
if let Some(hw) = &device_info.hardware {
if !hw_req.is_compatible(hw) {
continue;
}
}
compatible.push((path.clone(), hw_req.clone()));
}
if compatible.is_empty() {
return Err(Error::new(
eyre!(
"no compatible s9pk found for device (arch: {}, os: {})",
device_info
.hardware
.as_ref()
.map(|h| h.arch.to_string())
.unwrap_or_else(|| "unknown".into()),
device_info.os.version,
),
ErrorKind::NotFound,
));
}
// Sort by specificity (most specific first)
compatible.sort_by_key(|(_, req)| req.specificity_desc());
Ok(compatible.into_iter().next().unwrap().0)
}

View File

@@ -154,32 +154,6 @@ pub struct HardwareRequirements {
pub arch: Option<BTreeSet<InternedString>>,
}
impl HardwareRequirements {
/// Returns true if this s9pk's hardware requirements are satisfied by the given hardware.
pub fn is_compatible(&self, hw: &crate::registry::device_info::HardwareInfo) -> bool {
if let Some(arch) = &self.arch {
if !arch.contains(&hw.arch) {
return false;
}
}
if let Some(ram) = self.ram {
if hw.ram < ram {
return false;
}
}
if let Some(devices) = &hw.devices {
for device_filter in &self.device {
if !devices
.iter()
.filter(|d| d.class() == &*device_filter.class)
.any(|d| device_filter.matches(d))
{
return false;
}
}
}
true
}
/// returns a value that can be used as a sort key to get most specific requirements first
pub fn specificity_desc(&self) -> (u32, u32, u64) {
(

View File

@@ -251,12 +251,11 @@ async fn create_task(
.get(&task.package_id)
.await
.as_ref()
.filter(|s| s.is_initialized())
{
let prev = service
let Some(prev) = service
.get_action_input(procedure_id.clone(), task.action_id.clone(), Value::Null)
.await?;
let Some(prev) = prev else {
.await?
else {
return Err(Error::new(
eyre!(
"{}",
@@ -279,9 +278,7 @@ async fn create_task(
true
}
} else {
// Service not installed or not yet initialized — assume active.
// Will be retested when service init completes (Service::recheck_tasks).
true
true // update when service is installed
}
}
},

View File

@@ -52,7 +52,7 @@ use crate::util::serde::Pem;
use crate::util::sync::SyncMutex;
use crate::util::tui::choose;
use crate::volume::data_dir;
use crate::{ActionId, CAP_1_KiB, DATA_DIR, ImageId, PackageId};
use crate::{ActionId, CAP_1_KiB, DATA_DIR, HostId, ImageId, PackageId};
pub mod action;
pub mod cli;
@@ -215,84 +215,6 @@ pub struct Service {
seed: Arc<ServiceActorSeed>,
}
impl Service {
pub fn is_initialized(&self) -> bool {
self.seed.persistent_container.state.borrow().rt_initialized
}
/// Re-evaluate all tasks that reference this service's actions.
/// Called after every service init to update task active state.
#[instrument(skip_all)]
async fn recheck_tasks(&self) -> Result<(), Error> {
let service_id = &self.seed.id;
let peek = self.seed.ctx.db.peek().await;
let mut action_input: BTreeMap<ActionId, Value> = BTreeMap::new();
let tasks: BTreeSet<_> = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| {
Ok(pde
.as_tasks()
.as_entries()?
.into_iter()
.map(|(_, r)| {
let t = r.as_task();
Ok::<_, Error>(
if t.as_package_id().de()? == *service_id
&& t.as_input().transpose_ref().is_some()
{
Some(t.as_action_id().de()?)
} else {
None
},
)
})
.filter_map_ok(|a| a))
})
.flatten_ok()
.map(|a| a.and_then(|a| a))
.try_collect()?;
let procedure_id = Guid::new();
for action_id in tasks {
if let Some(input) = self
.get_action_input(procedure_id.clone(), action_id.clone(), Value::Null)
.await
.log_err()
.flatten()
.and_then(|i| i.value)
{
action_input.insert(action_id, input);
}
}
self.seed
.ctx
.db
.mutate(|db| {
for (action_id, input) in &action_input {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, service_id, action_id, input, false))
})?;
}
}
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
if pde
.as_tasks()
.de()?
.into_iter()
.any(|(_, t)| t.active && t.task.severity == TaskSeverity::Critical)
{
pde.as_status_info_mut().stop()?;
}
}
Ok(())
})
.await
.result?;
Ok(())
}
#[instrument(skip_all)]
async fn new(
ctx: RpcContext,
@@ -341,7 +263,6 @@ impl Service {
.persistent_container
.init(service.weak(), procedure_id, init_kind)
.await?;
service.recheck_tasks().await?;
if let Some(recovery_guard) = recovery_guard {
recovery_guard.unmount(true).await?;
}
@@ -568,8 +489,70 @@ impl Service {
)
.await?;
if let Some(mut progress) = progress {
progress.finalization_progress.complete();
progress.progress.complete();
tokio::task::yield_now().await;
}
let peek = ctx.db.peek().await;
let mut action_input: BTreeMap<ActionId, Value> = BTreeMap::new();
let tasks: BTreeSet<_> = peek
.as_public()
.as_package_data()
.as_entries()?
.into_iter()
.map(|(_, pde)| {
Ok(pde
.as_tasks()
.as_entries()?
.into_iter()
.map(|(_, r)| {
let t = r.as_task();
Ok::<_, Error>(
if t.as_package_id().de()? == manifest.id
&& t.as_input().transpose_ref().is_some()
{
Some(t.as_action_id().de()?)
} else {
None
},
)
})
.filter_map_ok(|a| a))
})
.flatten_ok()
.map(|a| a.and_then(|a| a))
.try_collect()?;
for action_id in tasks {
if peek
.as_public()
.as_package_data()
.as_idx(&manifest.id)
.or_not_found(&manifest.id)?
.as_actions()
.contains_key(&action_id)?
{
if let Some(input) = service
.get_action_input(procedure_id.clone(), action_id.clone(), Value::Null)
.await
.log_err()
.flatten()
.and_then(|i| i.value)
{
action_input.insert(action_id, input);
}
}
}
ctx.db
.mutate(|db| {
for (action_id, input) in &action_input {
for (_, pde) in db.as_public_mut().as_package_data_mut().as_entries_mut()? {
pde.as_tasks_mut().mutate(|tasks| {
Ok(update_tasks(tasks, &manifest.id, action_id, input, false))
})?;
}
}
let entry = db
.as_public_mut()
.as_package_data_mut()
@@ -611,12 +594,6 @@ impl Service {
.await
.result?;
if let Some(mut progress) = progress {
progress.finalization_progress.complete();
progress.progress.complete();
tokio::task::yield_now().await;
}
// Trigger manifest callbacks after successful installation
let manifest = service.seed.persistent_container.s9pk.as_manifest();
if let Some(callbacks) = ctx.callbacks.get_service_manifest(&manifest.id) {
@@ -706,6 +683,14 @@ impl Service {
memory_usage: MiB::from_MiB(used),
})
}
pub async fn sync_host(&self, host_id: HostId) -> Result<(), Error> {
self.seed
.persistent_container
.net_service
.sync_host(host_id)
.await
}
}
struct ServiceActorSeed {

View File

@@ -176,6 +176,8 @@ pub struct AttachParams {
pub guid: InternedString,
#[ts(optional)]
pub kiosk: Option<bool>,
pub name: Option<InternedString>,
pub hostname: Option<InternedString>,
}
#[instrument(skip_all)]
@@ -185,6 +187,8 @@ pub async fn attach(
password,
guid: disk_guid,
kiosk,
name,
hostname,
}: AttachParams,
) -> Result<SetupProgress, Error> {
let setup_ctx = ctx.clone();
@@ -238,8 +242,10 @@ pub async fn attach(
}
disk_phase.complete();
let hostname = ServerHostnameInfo::new_opt(name, hostname)?;
let (account, net_ctrl) =
setup_init(&setup_ctx, password, kiosk, None, init_phases).await?;
setup_init(&setup_ctx, password, kiosk, hostname, init_phases).await?;
let rpc_ctx = RpcContext::init(
&setup_ctx.webserver,
@@ -408,7 +414,7 @@ pub async fn setup_data_drive(
#[ts(export)]
pub struct SetupExecuteParams {
guid: InternedString,
password: Option<EncryptedWire>,
password: EncryptedWire,
recovery_source: Option<RecoverySource<EncryptedWire>>,
#[ts(optional)]
kiosk: Option<bool>,
@@ -428,16 +434,15 @@ pub async fn execute(
hostname,
}: SetupExecuteParams,
) -> Result<SetupProgress, Error> {
let password = password
.map(|p| {
p.decrypt(&ctx).ok_or_else(|| {
Error::new(
color_eyre::eyre::eyre!("{}", t!("setup.couldnt-decode-startos-password")),
crate::ErrorKind::Unknown,
)
})
})
.transpose()?;
let password = match password.decrypt(&ctx) {
Some(a) => a,
None => {
return Err(Error::new(
color_eyre::eyre::eyre!("{}", t!("setup.couldnt-decode-startos-password")),
crate::ErrorKind::Unknown,
));
}
};
let recovery = match recovery_source {
Some(RecoverySource::Backup {
target,
@@ -546,7 +551,7 @@ pub async fn shutdown(ctx: SetupContext) -> Result<(), Error> {
pub async fn execute_inner(
ctx: SetupContext,
guid: InternedString,
password: Option<String>,
password: String,
recovery_source: Option<RecoverySource<String>>,
kiosk: Option<bool>,
hostname: Option<ServerHostnameInfo>,
@@ -592,22 +597,7 @@ pub async fn execute_inner(
Some(RecoverySource::Migrate { guid: old_guid }) => {
migrate(&ctx, guid, &old_guid, password, kiosk, hostname, progress).await
}
None => {
fresh_setup(
&ctx,
guid,
&password.ok_or_else(|| {
Error::new(
eyre!("{}", t!("setup.password-required")),
ErrorKind::InvalidRequest,
)
})?,
kiosk,
hostname,
progress,
)
.await
}
None => fresh_setup(&ctx, guid, &password, kiosk, hostname, progress).await,
}
}
@@ -678,7 +668,7 @@ async fn fresh_setup(
async fn recover(
ctx: &SetupContext,
guid: InternedString,
password: Option<String>,
password: String,
recovery_source: BackupTargetFS,
server_id: String,
recovery_password: String,
@@ -706,7 +696,7 @@ async fn migrate(
ctx: &SetupContext,
guid: InternedString,
old_guid: &str,
password: Option<String>,
password: String,
kiosk: Option<bool>,
hostname: Option<ServerHostnameInfo>,
SetupExecuteProgress {
@@ -787,7 +777,8 @@ async fn migrate(
crate::disk::main::export(&old_guid, "/media/startos/migrate").await?;
restore_phase.complete();
let (account, net_ctrl) = setup_init(&ctx, password, kiosk, hostname, init_phases).await?;
let (account, net_ctrl) =
setup_init(&ctx, Some(password), kiosk, hostname, init_phases).await?;
let rpc_ctx = RpcContext::init(
&ctx.webserver,

View File

@@ -20,7 +20,6 @@ use crate::context::{CliContext, RpcContext};
use crate::disk::util::{get_available, get_used};
use crate::logs::{LogSource, LogsParams, SYSTEM_UNIT};
use crate::prelude::*;
use crate::registry::device_info::DeviceInfo;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::shutdown::Shutdown;
use crate::util::Invoke;
@@ -250,64 +249,6 @@ pub async fn time(ctx: RpcContext, _: Empty) -> Result<TimeInfo, Error> {
})
}
pub async fn device_info(ctx: RpcContext) -> Result<DeviceInfo, Error> {
DeviceInfo::load(&ctx).await
}
pub fn display_device_info(params: WithIoFormat<Empty>, info: DeviceInfo) -> Result<(), Error> {
use prettytable::*;
if let Some(format) = params.format {
return display_serializable(format, info);
}
let mut table = Table::new();
table.add_row(row![br -> "PLATFORM", &*info.os.platform]);
table.add_row(row![br -> "OS VERSION", info.os.version.to_string()]);
table.add_row(row![br -> "OS COMPAT", info.os.compat.to_string()]);
if let Some(lang) = &info.os.language {
table.add_row(row![br -> "LANGUAGE", &**lang]);
}
if let Some(hw) = &info.hardware {
table.add_row(row![br -> "ARCH", &*hw.arch]);
table.add_row(row![br -> "RAM", format_ram(hw.ram)]);
if let Some(devices) = &hw.devices {
for dev in devices {
let (class, desc) = match dev {
crate::util::lshw::LshwDevice::Processor(p) => (
"PROCESSOR",
p.product.as_deref().unwrap_or("unknown").to_string(),
),
crate::util::lshw::LshwDevice::Display(d) => (
"DISPLAY",
format!(
"{}{}",
d.product.as_deref().unwrap_or("unknown"),
d.driver
.as_deref()
.map(|drv| format!(" ({})", drv))
.unwrap_or_default()
),
),
};
table.add_row(row![br -> class, desc]);
}
}
}
table.print_tty(false)?;
Ok(())
}
fn format_ram(bytes: u64) -> String {
const GIB: u64 = 1024 * 1024 * 1024;
const MIB: u64 = 1024 * 1024;
if bytes >= GIB {
format!("{:.1} GiB", bytes as f64 / GIB as f64)
} else {
format!("{:.1} MiB", bytes as f64 / MIB as f64)
}
}
pub fn logs<C: Context + AsRef<RpcContinuations>>() -> ParentHandler<C, LogsParams> {
crate::logs::logs(|_: &C, _| async { Ok(LogSource::Unit(SYSTEM_UNIT)) })
}
@@ -1238,13 +1179,19 @@ pub async fn test_smtp(
.body("This is a test email sent from your StartOS Server".to_owned())?;
let transport = match security {
SmtpSecurity::Starttls => AsyncSmtpTransport::<Tokio1Executor>::starttls_relay(&host)?,
SmtpSecurity::Tls => AsyncSmtpTransport::<Tokio1Executor>::relay(&host)?,
}
.port(port)
.tls(Tls::Wrapper(TlsParameters::new(host.clone())?))
.credentials(creds)
.build();
SmtpSecurity::Starttls => AsyncSmtpTransport::<Tokio1Executor>::relay(&host)?
.port(port)
.credentials(creds)
.build(),
SmtpSecurity::Tls => {
let tls = TlsParameters::new(host.clone())?;
AsyncSmtpTransport::<Tokio1Executor>::relay(&host)?
.port(port)
.tls(Tls::Wrapper(tls))
.credentials(creds)
.build()
}
};
transport.send(message).await?;
Ok(())

View File

@@ -11,7 +11,6 @@ use crate::db::model::public::NetworkInterfaceType;
use crate::net::forward::add_iptables_rule;
use crate::prelude::*;
use crate::tunnel::context::TunnelContext;
use crate::tunnel::db::PortForwardEntry;
use crate::tunnel::wg::{WIREGUARD_INTERFACE_NAME, WgConfig, WgSubnetClients, WgSubnetConfig};
use crate::util::serde::{HandlerExtSerde, display_serializable};
@@ -52,22 +51,6 @@ pub fn tunnel_api<C: Context>() -> ParentHandler<C> {
.no_display()
.with_about("about.remove-port-forward")
.with_call_remote::<CliContext>(),
)
.subcommand(
"update-label",
from_fn_async(update_forward_label)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_about("about.update-port-forward-label")
.with_call_remote::<CliContext>(),
)
.subcommand(
"set-enabled",
from_fn_async(set_forward_enabled)
.with_metadata("sync_db", Value::Bool(true))
.no_display()
.with_about("about.enable-or-disable-port-forward")
.with_call_remote::<CliContext>(),
),
)
.subcommand(
@@ -470,17 +453,11 @@ pub async fn show_config(
pub struct AddPortForwardParams {
source: SocketAddrV4,
target: SocketAddrV4,
#[arg(long)]
label: String,
}
pub async fn add_forward(
ctx: TunnelContext,
AddPortForwardParams {
source,
target,
label,
}: AddPortForwardParams,
AddPortForwardParams { source, target }: AddPortForwardParams,
) -> Result<(), Error> {
let prefix = ctx
.net_iface
@@ -505,12 +482,10 @@ pub async fn add_forward(
m.insert(source, rc);
});
let entry = PortForwardEntry { target, label, enabled: true };
ctx.db
.mutate(|db| {
db.as_port_forwards_mut()
.insert(&source, &entry)
.insert(&source, &target)
.and_then(|replaced| {
if replaced.is_some() {
Err(Error::new(
@@ -548,92 +523,3 @@ pub async fn remove_forward(
}
Ok(())
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
pub struct UpdatePortForwardLabelParams {
source: SocketAddrV4,
label: String,
}
pub async fn update_forward_label(
ctx: TunnelContext,
UpdatePortForwardLabelParams { source, label }: UpdatePortForwardLabelParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_port_forwards_mut().mutate(|pf| {
let entry = pf.0.get_mut(&source).ok_or_else(|| {
Error::new(
eyre!("Port forward from {source} not found"),
ErrorKind::NotFound,
)
})?;
entry.label = label.clone();
Ok(())
})
})
.await
.result
}
#[derive(Deserialize, Serialize, Parser)]
#[serde(rename_all = "camelCase")]
pub struct SetPortForwardEnabledParams {
source: SocketAddrV4,
enabled: bool,
}
pub async fn set_forward_enabled(
ctx: TunnelContext,
SetPortForwardEnabledParams { source, enabled }: SetPortForwardEnabledParams,
) -> Result<(), Error> {
let target = ctx
.db
.mutate(|db| {
db.as_port_forwards_mut().mutate(|pf| {
let entry = pf.0.get_mut(&source).ok_or_else(|| {
Error::new(
eyre!("Port forward from {source} not found"),
ErrorKind::NotFound,
)
})?;
entry.enabled = enabled;
Ok(entry.target)
})
})
.await
.result?;
if enabled {
let prefix = ctx
.net_iface
.peek(|i| {
i.iter()
.find_map(|(_, i)| {
i.ip_info.as_ref().and_then(|i| {
i.subnets
.iter()
.find(|s| s.contains(&IpAddr::from(*target.ip())))
})
})
.cloned()
})
.map(|s| s.prefix_len())
.unwrap_or(32);
let rc = ctx
.forward
.add_forward(source, target, prefix, None)
.await?;
ctx.active_forwards.mutate(|m| {
m.insert(source, rc);
});
} else {
if let Some(rc) = ctx.active_forwards.mutate(|m| m.remove(&source)) {
drop(rc);
ctx.forward.gc().await?;
}
}
Ok(())
}

View File

@@ -184,11 +184,7 @@ impl TunnelContext {
}
let mut active_forwards = BTreeMap::new();
for (from, entry) in peek.as_port_forwards().de()?.0 {
if !entry.enabled {
continue;
}
let to = entry.target;
for (from, to) in peek.as_port_forwards().de()?.0 {
let prefix = net_iface
.peek(|i| {
i.iter()

View File

@@ -53,7 +53,7 @@ impl Model<TunnelDatabase> {
}
self.as_port_forwards_mut().mutate(|pf| {
Ok(pf.0.retain(|k, v| {
if keep_targets.contains(v.target.ip()) {
if keep_targets.contains(v.ip()) {
keep_sources.insert(*k);
true
} else {
@@ -70,25 +70,11 @@ fn export_bindings_tunnel_db() {
TunnelDatabase::export_all_to("bindings/tunnel").unwrap();
}
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
pub struct PortForwardEntry {
pub target: SocketAddrV4,
#[serde(default)]
pub label: String,
#[serde(default = "default_true")]
pub enabled: bool,
}
fn default_true() -> bool {
true
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, TS)]
pub struct PortForwards(pub BTreeMap<SocketAddrV4, PortForwardEntry>);
pub struct PortForwards(pub BTreeMap<SocketAddrV4, SocketAddrV4>);
impl Map for PortForwards {
type Key = SocketAddrV4;
type Value = PortForwardEntry;
type Value = SocketAddrV4;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Self::key_string(key)
}

View File

@@ -20,7 +20,7 @@ use ts_rs::TS;
use crate::context::CliContext;
use crate::hostname::ServerHostname;
use crate::net::ssl::{SANInfo, root_ca_start_time};
use crate::net::tls::{TlsHandler, TlsHandlerAction};
use crate::net::tls::TlsHandler;
use crate::net::web_server::Accept;
use crate::prelude::*;
use crate::tunnel::auth::SetPasswordParams;
@@ -59,7 +59,7 @@ where
&'a mut self,
_: &'a ClientHello<'a>,
_: &'a <A as Accept>::Metadata,
) -> Option<TlsHandlerAction> {
) -> Option<ServerConfig> {
let cert_info = self
.db
.peek()
@@ -88,7 +88,7 @@ where
.log_err()?;
cfg.alpn_protocols
.extend([b"http/1.1".into(), b"h2".into()]);
Some(TlsHandlerAction::Tls(cfg))
Some(cfg)
}
}
@@ -524,26 +524,26 @@ pub async fn init_web(ctx: CliContext) -> Result<(), Error> {
"To access your Web URL securely, trust your Root CA (displayed above) on your client device(s):\n",
" - MacOS\n",
" 1. Open the Terminal app\n",
" 2. Type or copy/paste the following command (**DO NOT** click Enter/Return yet): pbpaste > ~/Desktop/tunnel-ca.crt\n",
" 2. Paste the following command (**DO NOT** click Return): pbcopy < ~/Desktop/ca.crt\n",
" 3. Copy your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n",
" 4. Back in Terminal, click Enter/Return. tunnel-ca.crt is saved to your Desktop\n",
" 5. Complete by trusting your Root CA: https://docs.start9.com/start-os/0.4.0.x/user-manual/trust-ca.html?platform=Mac\n",
" 4. Back in Terminal, click Return. ca.crt is saved to your Desktop\n",
" 5. Complete by trusting your Root CA: https://docs.start9.com/device-guides/mac/ca.html\n",
" - Linux\n",
" 1. Open gedit, nano, or any editor\n",
" 2. Copy/paste your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n",
" 3. Name the file tunnel-ca.crt and save as plaintext\n",
" 4. Complete by trusting your Root CA: https://docs.start9.com/start-os/0.4.0.x/user-manual/trust-ca.html?platform=Debian+%252F+Ubuntu\n",
" 3. Name the file ca.crt and save as plaintext\n",
" 4. Complete by trusting your Root CA: https://docs.start9.com/device-guides/linux/ca.html\n",
" - Windows\n",
" 1. Open the Notepad app\n",
" 2. Copy/paste your Root CA (including -----BEGIN CERTIFICATE----- and -----END CERTIFICATE-----)\n",
" 3. Name the file tunnel-ca.crt and save as plaintext\n",
" 4. Complete by trusting your Root CA: https://docs.start9.com/start-os/0.4.0.x/user-manual/trust-ca.html?platform=Windows\n",
" 3. Name the file ca.crt and save as plaintext\n",
" 4. Complete by trusting your Root CA: https://docs.start9.com/device-guides/windows/ca.html\n",
" - Android/Graphene\n",
" 1. Send the tunnel-ca.crt file (created above) to yourself\n",
" 2. Complete by trusting your Root CA: https://docs.start9.com/start-os/0.4.0.x/user-manual/trust-ca.html?platform=Android+%252F+Graphene\n",
" 1. Send the ca.crt file (created above) to yourself\n",
" 2. Complete by trusting your Root CA: https://docs.start9.com/device-guides/android/ca.html\n",
" - iOS\n",
" 1. Send the tunnel-ca.crt file (created above) to yourself\n",
" 2. Complete by trusting your Root CA: https://docs.start9.com/start-os/0.4.0.x/user-manual/trust-ca.html?platform=iOS\n",
" 1. Send the ca.crt file (created above) to yourself\n",
" 2. Complete by trusting your Root CA: https://docs.start9.com/device-guides/ios/ca.html\n",
));
return Ok(());

View File

@@ -168,7 +168,7 @@ impl VersionT for Version {
// Migrate SMTP: rename server->host, login->username, add security field
migrate_smtp(db);
// Delete ui.name (moved to serverInfo.name)
if let Some(ui) = db
.get_mut("public")

View File

@@ -7,12 +7,10 @@ cd "$(dirname "${BASH_SOURCE[0]}")/.."
PROJECT=${PROJECT:-"startos"}
BASENAME=${BASENAME:-"$(./build/env/basename.sh)"}
VERSION=${VERSION:-$(cat ./build/env/VERSION.txt)}
if [ "$PLATFORM" = "x86_64" ] || [ "$PLATFORM" = "x86_64-nonfree" ] || [ "$PLATFORM" = "x86_64-nvidia" ]; then
if [ "$PLATFORM" = "x86_64" ] || [ "$PLATFORM" = "x86_64-nonfree" ]; then
DEB_ARCH=amd64
elif [ "$PLATFORM" = "aarch64" ] || [ "$PLATFORM" = "aarch64-nonfree" ] || [ "$PLATFORM" = "aarch64-nvidia" ] || [ "$PLATFORM" = "raspberrypi" ] || [ "$PLATFORM" = "rockchip64" ]; then
elif [ "$PLATFORM" = "aarch64" ] || [ "$PLATFORM" = "aarch64-nonfree" ] || [ "$PLATFORM" = "raspberrypi" ]; then
DEB_ARCH=arm64
elif [ "$PLATFORM" = "riscv64" ] || [ "$PLATFORM" = "riscv64-nonfree" ]; then
DEB_ARCH=riscv64
else
DEB_ARCH="$PLATFORM"
fi

View File

@@ -23,6 +23,15 @@ Pending tasks for AI agents. Remove items when completed.
other crate types. Extracting them requires either moving the type definitions into the sub-crate
(and importing them back into `start-os`) or restructuring to share a common types crate.
- [ ] Make `SetupExecuteParams.password` optional in the backend - @dr-bonez
**Problem**: In `core/src/setup.rs`, `SetupExecuteParams` has `password: EncryptedWire` (non-nullable),
but the frontend needs to send `null` for restore/transfer flows where the user keeps their existing
password. The `AttachParams` type correctly uses `Option<EncryptedWire>` for this purpose.
**Fix**: Change `password: EncryptedWire` to `password: Option<EncryptedWire>` in `SetupExecuteParams`
and handle the `None` case in the `execute` handler (similar to how `attach` handles it).
- [ ] Auto-configure port forwards via UPnP/NAT-PMP/PCP - @dr-bonez
**Goal**: When a binding is marked public, automatically configure port forwards on the user's router
@@ -30,11 +39,10 @@ Pending tasks for AI agents. Remove items when completed.
displaying manual instructions (the port forward mapping from patch-db) when auto-configuration is
unavailable or fails.
- [ ] Use TLS-ALPN challenges for check-port when addSsl - @dr-bonez
- [ ] Decouple createTask from service running state - @dr-bonez
**Problem**: The `check_port` RPC in `core/src/net/gateway.rs` currently uses an external HTTP
service (`ifconfig_url`) to verify port reachability. This doesn't check whether the port is forwarded to the right place, just that it's open. there's nothing we can do about this if it's a raw forward, but if it goes through the ssl proxy we can do a better verification.
**Problem**: `createTask` currently depends on the service being in a running state.
**Goal**: When a binding has `addSsl` enabled, use TLS-ALPN-01 challenges to verify port
reachability instead of (or in addition to) the plain TCP check. This more accurately validates
that the SSL port is properly configured and reachable.
**Goal**: The `input-not-matches` handler in StartOS should queue the task, check it once the
service is ready, then clear it if it matches. This allows tasks to be created regardless of
whether the service is currently running.

View File

@@ -27,33 +27,16 @@ bundle: baseDist dist | test fmt
base/lib/exver/exver.ts: base/node_modules base/lib/exver/exver.pegjs
cd base && npm run peggy
baseDist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) base/package.json base/node_modules base/README.md base/LICENSE
baseDist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) base/package.json base/node_modules base/README.md base/LICENSE
(cd base && npm run tsc)
# Copy hand-written .js/.d.ts pairs (no corresponding .ts source) into the output.
cd base/lib && find . -name '*.js' | while read f; do \
base="$${f%.js}"; \
if [ -f "$$base.d.ts" ] && [ ! -f "$$base.ts" ]; then \
mkdir -p "../../baseDist/$$(dirname "$$f")"; \
cp "$$f" "../../baseDist/$$f"; \
cp "$$base.d.ts" "../../baseDist/$$base.d.ts"; \
fi; \
done
rsync -ac base/node_modules baseDist/
cp base/package.json baseDist/package.json
cp base/README.md baseDist/README.md
cp base/LICENSE baseDist/LICENSE
touch baseDist
dist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) package/package.json package/.npmignore package/node_modules package/README.md package/LICENSE
dist: $(PACKAGE_TS_FILES) $(BASE_TS_FILES) package/package.json package/.npmignore package/node_modules package/README.md package/LICENSE
(cd package && npm run tsc)
cd base/lib && find . -name '*.js' | while read f; do \
base="$${f%.js}"; \
if [ -f "$$base.d.ts" ] && [ ! -f "$$base.ts" ]; then \
mkdir -p "../../dist/base/lib/$$(dirname "$$f")"; \
cp "$$f" "../../dist/base/lib/$$f"; \
cp "$$base.d.ts" "../../dist/base/lib/$$base.d.ts"; \
fi; \
done
rsync -ac package/node_modules dist/
cp package/.npmignore dist/.npmignore
cp package/package.json dist/package.json
@@ -87,7 +70,7 @@ base/node_modules: base/package-lock.json
node_modules: package/node_modules base/node_modules
publish: bundle package/package.json package/README.md package/LICENSE
cd dist && npm publish --access=public --tag=latest
cd dist && npm publish --access=public
link: bundle
cd dist && npm link

View File

@@ -3,7 +3,6 @@ import { Value } from './value'
import { _ } from '../../../util'
import { Effects } from '../../../Effects'
import { z } from 'zod'
import { zodDeepPartial } from 'zod-deep-partial'
import { DeepPartial } from '../../../types'
import { InputSpecTools, createInputSpecTools } from './inputSpecTools'
@@ -22,57 +21,6 @@ export type LazyBuild<ExpectedOut, Type> = (
options: LazyBuildOptions<Type>,
) => Promise<ExpectedOut> | ExpectedOut
/**
* Defines which keys to keep when filtering an InputSpec.
* Use `true` to keep a field as-is, or a nested object to filter sub-fields of an object-typed field.
*/
export type FilterKeys<F> = {
[K in keyof F]?: F[K] extends Record<string, any>
? boolean | FilterKeys<F[K]>
: boolean
}
type RetainKey<T, F, Default extends boolean> = {
[K in keyof T]: K extends keyof F
? F[K] extends false
? never
: K
: Default extends true
? K
: never
}[keyof T]
/**
* Computes the resulting type after applying a {@link FilterKeys} shape to a type.
*/
export type ApplyFilter<T, F, Default extends boolean = false> = {
[K in RetainKey<T, F, Default>]: K extends keyof F
? true extends F[K]
? F[K] extends true
? T[K]
: T[K] | undefined
: T[K] extends Record<string, any>
? F[K] extends FilterKeys<T[K]>
? ApplyFilter<T[K], F[K]>
: undefined
: undefined
: Default extends true
? T[K]
: undefined
}
/**
* Computes the union of all valid key-path tuples through a nested type.
* Each tuple represents a path from root to a field, recursing into object-typed sub-fields.
*/
export type KeyPaths<T> = {
[K in keyof T & string]: T[K] extends any[]
? [K]
: T[K] extends Record<string, any>
? [K] | [K, ...KeyPaths<T[K]>]
: [K]
}[keyof T & string]
/** Extracts the runtime type from an {@link InputSpec}. */
// prettier-ignore
export type ExtractInputSpecType<A extends InputSpec<Record<string, any>, any>> =
@@ -163,8 +111,6 @@ export class InputSpec<
) {}
public _TYPE: Type = null as any as Type
public _PARTIAL: DeepPartial<Type> = null as any as DeepPartial<Type>
public readonly partialValidator: z.ZodType<DeepPartial<StaticValidatedAs>> =
zodDeepPartial(this.validator) as any
/**
* Builds the runtime form specification and combined Zod validator from this InputSpec's fields.
*
@@ -193,6 +139,35 @@ export class InputSpec<
}
}
/**
* Adds a single named field to this spec, returning a new `InputSpec` with the extended type.
*
* @param key - The field key name
* @param build - A {@link Value} instance, or a function receiving typed tools that returns one
*/
addKey<Key extends string, V extends Value<any, any, any>>(
key: Key,
build: V | ((tools: InputSpecTools<Type>) => V),
): InputSpec<
Type & { [K in Key]: V extends Value<infer T, any, any> ? T : never },
StaticValidatedAs & {
[K in Key]: V extends Value<any, infer S, any> ? S : never
}
> {
const value =
build instanceof Function ? build(createInputSpecTools<Type>()) : build
const newSpec = { ...this.spec, [key]: value } as any
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [
k,
(v as Value<any>).validator,
]),
),
)
return new InputSpec(newSpec, newValidator as any)
}
/**
* Adds multiple fields to this spec at once, returning a new `InputSpec` with extended types.
*
@@ -226,247 +201,6 @@ export class InputSpec<
return new InputSpec(newSpec, newValidator as any)
}
/**
* Returns a new InputSpec containing only the specified keys.
* Use `true` to keep a field as-is, or a nested object to filter sub-fields of object-typed fields.
*
* @example
* ```ts
* const full = InputSpec.of({
* name: Value.text({ name: 'Name', required: true, default: null }),
* settings: Value.object({ name: 'Settings' }, InputSpec.of({
* debug: Value.toggle({ name: 'Debug', default: false }),
* port: Value.number({ name: 'Port', required: true, default: 8080, integer: true }),
* })),
* })
* const filtered = full.filter({ name: true, settings: { debug: true } })
* ```
*/
filter<F extends FilterKeys<Type>, Default extends boolean = false>(
keys: F,
keepByDefault?: Default,
): InputSpec<
ApplyFilter<Type, F, Default> & ApplyFilter<StaticValidatedAs, F, Default>,
ApplyFilter<StaticValidatedAs, F, Default>
> {
const newSpec: Record<string, Value<any>> = {}
for (const k of Object.keys(this.spec)) {
const filterVal = (keys as any)[k]
const value = (this.spec as any)[k] as Value<any> | undefined
if (!value) continue
if (filterVal === true) {
newSpec[k] = value
} else if (typeof filterVal === 'object' && filterVal !== null) {
const objectMeta = value._objectSpec
if (objectMeta) {
const filteredInner = objectMeta.inputSpec.filter(
filterVal,
keepByDefault,
)
newSpec[k] = Value.object(objectMeta.params, filteredInner)
} else {
newSpec[k] = value
}
} else if (keepByDefault && filterVal !== false) {
newSpec[k] = value
}
}
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [k, v.validator]),
),
)
return new InputSpec(newSpec as any, newValidator as any) as any
}
/**
* Returns a new InputSpec with the specified keys disabled.
* Use `true` to disable a field, or a nested object to disable sub-fields of object-typed fields.
* All fields remain in the spec — disabled fields simply cannot be edited by the user.
*
* @param keys - Which fields to disable, using the same shape as {@link FilterKeys}
* @param message - The reason the fields are disabled, displayed to the user
*
* @example
* ```ts
* const spec = InputSpec.of({
* name: Value.text({ name: 'Name', required: true, default: null }),
* settings: Value.object({ name: 'Settings' }, InputSpec.of({
* debug: Value.toggle({ name: 'Debug', default: false }),
* port: Value.number({ name: 'Port', required: true, default: 8080, integer: true }),
* })),
* })
* const disabled = spec.disable({ name: true, settings: { debug: true } }, 'Managed by the system')
* ```
*/
disable(
keys: FilterKeys<Type>,
message: string,
): InputSpec<Type, StaticValidatedAs> {
const newSpec: Record<string, Value<any>> = {}
for (const k in this.spec) {
const filterVal = (keys as any)[k]
const value = (this.spec as any)[k] as Value<any>
if (!filterVal) {
newSpec[k] = value
} else if (filterVal === true) {
newSpec[k] = value.withDisabled(message)
} else if (typeof filterVal === 'object' && filterVal !== null) {
const objectMeta = value._objectSpec
if (objectMeta) {
const disabledInner = objectMeta.inputSpec.disable(filterVal, message)
newSpec[k] = Value.object(objectMeta.params, disabledInner)
} else {
newSpec[k] = value.withDisabled(message)
}
}
}
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [k, v.validator]),
),
)
return new InputSpec(newSpec as any, newValidator as any) as any
}
/**
* Resolves a key path to its corresponding display name path.
* Each key is mapped to the `name` property of its built {@link ValueSpec}.
* Recurses into `Value.object` sub-specs for nested paths.
*
* @param path - Typed tuple of field keys (e.g. `["settings", "debug"]`)
* @param options - Build options providing effects and prefill data
* @returns Array of display names (e.g. `["Settings", "Debug"]`)
*/
async namePath<OuterType>(
path: KeyPaths<Type>,
options: LazyBuildOptions<OuterType>,
): Promise<string[]> {
if (path.length === 0) return []
const [key, ...rest] = path as [string, ...string[]]
const value = (this.spec as any)[key] as Value<any> | undefined
if (!value) return []
const built = await value.build(options as any)
const name =
'name' in built.spec ? (built.spec as { name: string }).name : key
if (rest.length === 0) return [name]
const objectMeta = value._objectSpec
if (objectMeta) {
const innerNames = await objectMeta.inputSpec.namePath(
rest as any,
options,
)
return [name, ...innerNames]
}
return [name]
}
/**
* Resolves a key path to the description of the target field.
* Recurses into `Value.object` sub-specs for nested paths.
*
* @param path - Typed tuple of field keys (e.g. `["settings", "debug"]`)
* @param options - Build options providing effects and prefill data
* @returns The description string, or `null` if the field has no description or was not found
*/
async description<OuterType>(
path: KeyPaths<Type>,
options: LazyBuildOptions<OuterType>,
): Promise<string | null> {
if (path.length === 0) return null
const [key, ...rest] = path as [string, ...string[]]
const value = (this.spec as any)[key] as Value<any> | undefined
if (!value) return null
if (rest.length === 0) {
const built = await value.build(options as any)
return 'description' in built.spec
? (built.spec as { description: string | null }).description
: null
}
const objectMeta = value._objectSpec
if (objectMeta) {
return objectMeta.inputSpec.description(rest as any, options)
}
return null
}
/**
* Returns a new InputSpec filtered to only include keys present in the given partial object.
* For nested `Value.object` fields, recurses into the partial value to filter sub-fields.
*
* @param partial - A deep-partial object whose defined keys determine which fields to keep
*/
filterFromPartial(
partial: DeepPartial<Type>,
): InputSpec<
DeepPartial<Type> & DeepPartial<StaticValidatedAs>,
DeepPartial<StaticValidatedAs>
> {
const newSpec: Record<string, Value<any>> = {}
for (const k of Object.keys(partial)) {
const value = (this.spec as any)[k] as Value<any> | undefined
if (!value) continue
const objectMeta = value._objectSpec
if (objectMeta) {
const partialVal = (partial as any)[k]
if (typeof partialVal === 'object' && partialVal !== null) {
const filteredInner =
objectMeta.inputSpec.filterFromPartial(partialVal)
newSpec[k] = Value.object(objectMeta.params, filteredInner)
continue
}
}
newSpec[k] = value
}
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [k, v.validator]),
),
)
return new InputSpec(newSpec as any, newValidator as any) as any
}
/**
* Returns a new InputSpec with fields disabled based on which keys are present in the given partial object.
* For nested `Value.object` fields, recurses into the partial value to disable sub-fields.
* All fields remain in the spec — disabled fields simply cannot be edited by the user.
*
* @param partial - A deep-partial object whose defined keys determine which fields to disable
* @param message - The reason the fields are disabled, displayed to the user
*/
disableFromPartial(
partial: DeepPartial<Type>,
message: string,
): InputSpec<Type, StaticValidatedAs> {
const newSpec: Record<string, Value<any>> = {}
for (const k in this.spec) {
const value = (this.spec as any)[k] as Value<any>
if (!(k in (partial as any))) {
newSpec[k] = value
continue
}
const objectMeta = value._objectSpec
if (objectMeta) {
const partialVal = (partial as any)[k]
if (typeof partialVal === 'object' && partialVal !== null) {
const disabledInner = objectMeta.inputSpec.disableFromPartial(
partialVal,
message,
)
newSpec[k] = Value.object(objectMeta.params, disabledInner)
continue
}
}
newSpec[k] = value.withDisabled(message)
}
const newValidator = z.object(
Object.fromEntries(
Object.entries(newSpec).map(([k, v]) => [k, v.validator]),
),
)
return new InputSpec(newSpec as any, newValidator as any) as any
}
/**
* Creates an `InputSpec` from a plain record of {@link Value} entries.
*

View File

@@ -70,11 +70,6 @@ export class Value<
) {}
public _TYPE: Type = null as any as Type
public _PARTIAL: DeepPartial<Type> = null as any as DeepPartial<Type>
/** @internal Used by {@link InputSpec.filter} to support nested filtering of object-typed fields. */
_objectSpec?: {
inputSpec: InputSpec<any, any>
params: { name: string; description?: string | null }
}
/**
* @description Displays a boolean toggle to enable/disable
@@ -992,7 +987,7 @@ export class Value<
},
spec: InputSpec<Type, StaticValidatedAs>,
) {
const value = new Value<Type, StaticValidatedAs>(async (options) => {
return new Value<Type, StaticValidatedAs>(async (options) => {
const built = await spec.build(options as any)
return {
spec: {
@@ -1005,8 +1000,6 @@ export class Value<
validator: built.validator,
}
}, spec.validator)
value._objectSpec = { inputSpec: spec, params: a }
return value
}
/**
* Displays a file upload input field.
@@ -1340,25 +1333,6 @@ export class Value<
}, z.any())
}
/**
* Returns a new Value that produces the same field spec but with `disabled` set to the given message.
* The field remains in the form but cannot be edited by the user.
*
* @param message - The reason the field is disabled, displayed to the user
*/
withDisabled(message: string): Value<Type, StaticValidatedAs, OuterType> {
const original = this
const v = new Value<Type, StaticValidatedAs, OuterType>(async (options) => {
const built = await original.build(options)
return {
spec: { ...built.spec, disabled: message } as ValueSpec,
validator: built.validator,
}
}, this.validator)
v._objectSpec = this._objectSpec
return v
}
/**
* Transforms the validated output value using a mapping function.
* The form field itself remains unchanged, but the value is transformed after validation.

View File

@@ -1,57 +1,41 @@
import { SmtpValue } from '../../types'
import { GetSystemSmtp, Patterns } from '../../util'
import { InputSpec } from './builder/inputSpec'
import { InputSpec, InputSpecOf } from './builder/inputSpec'
import { Value } from './builder/value'
import { Variants } from './builder/variants'
const securityVariants = Variants.of({
tls: {
name: 'TLS',
spec: InputSpec.of({
port: Value.dynamicText(async () => ({
name: 'Port',
required: true,
default: '465',
disabled: 'Fixed for TLS',
})),
}),
},
starttls: {
name: 'STARTTLS',
spec: InputSpec.of({
port: Value.select({
name: 'Port',
default: '587',
values: { '25': '25', '587': '587', '2525': '2525' },
}),
}),
},
})
/**
* Creates an SMTP field spec with provider-specific defaults pre-filled.
*/
function smtpFields(
defaults: {
host?: string
port?: number
security?: 'starttls' | 'tls'
hostDisabled?: boolean
} = {},
) {
const hostSpec = Value.text({
name: 'Host',
required: true,
default: defaults.host ?? null,
placeholder: 'smtp.example.com',
})
return InputSpec.of({
host: defaults.hostDisabled
? hostSpec.withDisabled('Fixed for this provider')
: hostSpec,
security: Value.union({
): InputSpec<SmtpValue> {
return InputSpec.of<InputSpecOf<SmtpValue>>({
host: Value.text({
name: 'Host',
required: true,
default: defaults.host ?? null,
placeholder: 'smtp.example.com',
}),
port: Value.number({
name: 'Port',
required: true,
default: defaults.port ?? 587,
min: 1,
max: 65535,
integer: true,
}),
security: Value.select({
name: 'Connection Security',
default: defaults.security ?? 'tls',
variants: securityVariants,
default: defaults.security ?? 'starttls',
values: {
starttls: 'STARTTLS',
tls: 'TLS',
},
}),
from: Value.text({
name: 'From Address',
@@ -88,39 +72,40 @@ export const smtpProviderVariants = Variants.of({
name: 'Gmail',
spec: smtpFields({
host: 'smtp.gmail.com',
security: 'tls',
hostDisabled: true,
port: 587,
security: 'starttls',
}),
},
ses: {
name: 'Amazon SES',
spec: smtpFields({
host: 'email-smtp.us-east-1.amazonaws.com',
security: 'tls',
port: 587,
security: 'starttls',
}),
},
sendgrid: {
name: 'SendGrid',
spec: smtpFields({
host: 'smtp.sendgrid.net',
security: 'tls',
hostDisabled: true,
port: 587,
security: 'starttls',
}),
},
mailgun: {
name: 'Mailgun',
spec: smtpFields({
host: 'smtp.mailgun.org',
security: 'tls',
hostDisabled: true,
port: 587,
security: 'starttls',
}),
},
protonmail: {
name: 'Proton Mail',
spec: smtpFields({
host: 'smtp.protonmail.ch',
security: 'tls',
hostDisabled: true,
port: 587,
security: 'starttls',
}),
},
other: {
@@ -136,7 +121,7 @@ export const smtpProviderVariants = Variants.of({
export const systemSmtpSpec = InputSpec.of({
provider: Value.union({
name: 'Provider',
default: 'gmail',
default: null as any,
variants: smtpProviderVariants,
}),
})

View File

@@ -16,12 +16,10 @@ export type GetInput<A extends Record<string, any>> = (options: {
prefill: T.DeepPartial<A> | null
}) => Promise<null | void | undefined | T.DeepPartial<A>>
export type MaybeFn<T, Opts = { effects: T.Effects }> =
| T
| ((options: Opts) => Promise<T>)
function callMaybeFn<T, Opts = { effects: T.Effects }>(
maybeFn: MaybeFn<T, Opts>,
options: Opts,
export type MaybeFn<T> = T | ((options: { effects: T.Effects }) => Promise<T>)
function callMaybeFn<T>(
maybeFn: MaybeFn<T>,
options: { effects: T.Effects },
): Promise<T> {
if (maybeFn instanceof Function) {
return maybeFn(options)
@@ -59,13 +57,7 @@ export class Action<Id extends T.ActionId, Type extends Record<string, any>>
private constructor(
readonly id: Id,
private readonly metadataFn: MaybeFn<T.ActionMetadata>,
private readonly inputSpec: MaybeFn<
MaybeInputSpec<Type>,
{
effects: T.Effects
prefill: unknown | null
}
>,
private readonly inputSpec: MaybeInputSpec<Type>,
private readonly getInputFn: GetInput<Type>,
private readonly runFn: Run<Type>,
) {}
@@ -75,13 +67,7 @@ export class Action<Id extends T.ActionId, Type extends Record<string, any>>
>(
id: Id,
metadata: MaybeFn<Omit<T.ActionMetadata, 'hasInput'>>,
inputSpec: MaybeFn<
InputSpecType,
{
effects: T.Effects
prefill: unknown | null
}
>,
inputSpec: InputSpecType,
getInput: GetInput<ExtractInputSpecType<InputSpecType>>,
run: Run<ExtractInputSpecType<InputSpecType>>,
): Action<Id, ExtractInputSpecType<InputSpecType>> {
@@ -125,12 +111,9 @@ export class Action<Id extends T.ActionId, Type extends Record<string, any>>
}): Promise<T.ActionInput> {
let spec = {}
if (this.inputSpec) {
const inputSpec = await callMaybeFn(this.inputSpec, options)
const built = await inputSpec?.build(options)
if (built) {
this.prevInputSpec[options.effects.eventId!] = built
spec = built.spec
}
const built = await this.inputSpec.build(options)
this.prevInputSpec[options.effects.eventId!] = built
spec = built.spec
}
return {
eventId: options.effects.eventId!,

View File

@@ -8,6 +8,6 @@ export * as types from './types'
export * as T from './types'
export * as yaml from 'yaml'
export * as inits from './inits'
export { z } from './zExport'
export { z } from 'zod'
export * as utils from './util'

View File

@@ -14,34 +14,28 @@ export const knownProtocols = {
defaultPort: 80,
withSsl: 'https',
alpn: { specified: ['http/1.1'] } as AlpnInfo,
addXForwardedHeaders: true,
},
https: {
secure: { ssl: true },
defaultPort: 443,
addXForwardedHeaders: true,
},
ws: {
secure: null,
defaultPort: 80,
withSsl: 'wss',
alpn: { specified: ['http/1.1'] } as AlpnInfo,
addXForwardedHeaders: true,
},
wss: {
secure: { ssl: true },
defaultPort: 443,
addXForwardedHeaders: true,
},
ssh: {
secure: { ssl: false },
defaultPort: 22,
addXForwardedHeaders: false,
},
dns: {
secure: { ssl: false },
defaultPort: 53,
addXForwardedHeaders: false,
},
} as const
@@ -142,7 +136,7 @@ export class MultiHost {
const sslProto = this.getSslProto(options)
const addSsl = sslProto
? {
addXForwardedHeaders: knownProtocols[sslProto].addXForwardedHeaders,
addXForwardedHeaders: false,
preferredExternalPort: knownProtocols[sslProto].defaultPort,
scheme: sslProto,
alpn: 'alpn' in protoInfo ? protoInfo.alpn : null,
@@ -154,7 +148,7 @@ export class MultiHost {
preferredExternalPort: 443,
scheme: sslProto,
alpn: null,
...options.addSsl,
...('addSsl' in options ? options.addSsl : null),
}
: null

View File

@@ -6,5 +6,4 @@ export type AddPackageSignerParams = {
id: PackageId
signer: Guid
versions: string | null
merge?: boolean
}

View File

@@ -6,5 +6,4 @@ export type AddPublicDomainParams = {
fqdn: string
acme: AcmeProvider | null
gateway: GatewayId
internalPort: number
}

View File

@@ -1,4 +0,0 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
import type { CheckPortRes } from './CheckPortRes'
export type AddPublicDomainRes = { dns: string | null; port: CheckPortRes }

View File

@@ -5,4 +5,6 @@ export type AttachParams = {
password: EncryptedWire | null
guid: string
kiosk?: boolean
name: string | null
hostname: string | null
}

View File

@@ -1,9 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type CheckPortRes = {
ip: string
port: number
openExternally: boolean
openInternally: boolean
hairpinning: boolean
}
export type CheckPortRes = { ip: string; port: number; reachable: boolean }

View File

@@ -5,7 +5,6 @@ import type { DnsSettings } from './DnsSettings'
import type { GatewayId } from './GatewayId'
import type { Host } from './Host'
import type { NetworkInterfaceInfo } from './NetworkInterfaceInfo'
import type { PassthroughInfo } from './PassthroughInfo'
import type { WifiInfo } from './WifiInfo'
export type NetworkInfo = {
@@ -15,5 +14,4 @@ export type NetworkInfo = {
acme: { [key: AcmeProvider]: AcmeSettings }
dns: DnsSettings
defaultOutbound: string | null
passthroughs: Array<PassthroughInfo>
}

View File

@@ -1,9 +0,0 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type PassthroughInfo = {
hostname: string
listenPort: number
backend: string
publicGateways: string[]
privateIps: string[]
}

View File

@@ -4,7 +4,7 @@ import type { RecoverySource } from './RecoverySource'
export type SetupExecuteParams = {
guid: string
password: EncryptedWire | null
password: EncryptedWire
recoverySource: RecoverySource<EncryptedWire> | null
kiosk?: boolean
name: string | null

View File

@@ -19,7 +19,6 @@ export { AddPackageSignerParams } from './AddPackageSignerParams'
export { AddPackageToCategoryParams } from './AddPackageToCategoryParams'
export { AddPrivateDomainParams } from './AddPrivateDomainParams'
export { AddPublicDomainParams } from './AddPublicDomainParams'
export { AddPublicDomainRes } from './AddPublicDomainRes'
export { AddressInfo } from './AddressInfo'
export { AddSslOptions } from './AddSslOptions'
export { AddTunnelParams } from './AddTunnelParams'
@@ -202,7 +201,6 @@ export { PackagePlugin } from './PackagePlugin'
export { PackageState } from './PackageState'
export { PackageVersionInfo } from './PackageVersionInfo'
export { PartitionInfo } from './PartitionInfo'
export { PassthroughInfo } from './PassthroughInfo'
export { PasswordType } from './PasswordType'
export { PathOrUrl } from './PathOrUrl'
export { Pem } from './Pem'

View File

@@ -1,25 +1,24 @@
export * as inputSpecTypes from './actions/input/inputSpecTypes'
export {
CurrentDependenciesResult,
OptionalDependenciesOf as OptionalDependencies,
RequiredDependenciesOf as RequiredDependencies,
} from './dependencies/setupDependencies'
export * from './osBindings'
export { SDKManifest } from './types/ManifestTypes'
export { Effects }
import { InputSpec as InputSpecClass } from './actions/input/builder/inputSpec'
import {
DependencyRequirement,
NamedHealthCheckResult,
Manifest,
ServiceInterface,
ActionId,
} from './osBindings'
import { Affine, StringObject, ToKebab } from './util'
import { Action, Actions } from './actions/setupActions'
import { Effects } from './Effects'
import { ExtendedVersion, VersionRange } from './exver'
import {
ActionId,
DependencyRequirement,
Manifest,
NamedHealthCheckResult,
ServiceInterface,
} from './osBindings'
import { StringObject, ToKebab } from './util'
export { Effects }
export * from './osBindings'
export { SDKManifest } from './types/ManifestTypes'
export {
RequiredDependenciesOf as RequiredDependencies,
OptionalDependenciesOf as OptionalDependencies,
CurrentDependenciesResult,
} from './dependencies/setupDependencies'
/** An object that can be built into a terminable daemon process. */
export type DaemonBuildable = {
@@ -145,11 +144,7 @@ export function isUseEntrypoint(
* - An explicit argv array
* - A {@link UseEntrypoint} to use the container's built-in entrypoint
*/
export type CommandType =
| string
| [string, ...string[]]
| readonly [string, ...string[]]
| UseEntrypoint
export type CommandType = string | [string, ...string[]] | UseEntrypoint
/** The return type from starting a daemon — provides `wait()` and `term()` controls. */
export type DaemonReturned = {
@@ -272,8 +267,3 @@ export type AllowReadonly<T> =
| {
readonly [P in keyof T]: AllowReadonly<T[P]>
}
export type InputSpec<
Type extends StaticValidatedAs,
StaticValidatedAs extends Record<string, unknown> = Type,
> = InputSpecClass<Type, StaticValidatedAs>

View File

@@ -1,10 +0,0 @@
export class AbortedError extends Error {
readonly muteUnhandled = true as const
declare cause?: unknown
constructor(message?: string, options?: { cause?: unknown }) {
super(message)
this.name = 'AbortedError'
if (options?.cause !== undefined) this.cause = options.cause
}
}

View File

@@ -1,5 +1,4 @@
import { Effects } from '../Effects'
import { AbortedError } from './AbortedError'
import { DropGenerator, DropPromise } from './Drop'
export class GetOutboundGateway {
@@ -39,7 +38,7 @@ export class GetOutboundGateway {
})
await waitForNext
}
return new Promise<never>((_, rej) => rej(new AbortedError()))
return new Promise<never>((_, rej) => rej(new Error('aborted')))
}
/**

View File

@@ -1,6 +1,5 @@
import { Effects } from '../Effects'
import * as T from '../types'
import { AbortedError } from './AbortedError'
import { DropGenerator, DropPromise } from './Drop'
export class GetSystemSmtp {
@@ -40,7 +39,7 @@ export class GetSystemSmtp {
})
await waitForNext
}
return new Promise<never>((_, rej) => rej(new AbortedError()))
return new Promise<never>((_, rej) => rej(new Error('aborted')))
}
/**

View File

@@ -8,7 +8,6 @@ import {
HostnameInfo,
} from '../types'
import { Effects } from '../Effects'
import { AbortedError } from './AbortedError'
import { DropGenerator, DropPromise } from './Drop'
import { IpAddress, IPV6_LINK_LOCAL } from './ip'
import { deepEqual } from './deepEqual'
@@ -26,18 +25,6 @@ export const getHostname = (url: string): Hostname | null => {
return last
}
/**
* The kinds of hostnames that can be filtered on.
*
* - `'mdns'` — mDNS / Bonjour `.local` hostnames
* - `'domain'` — any os-managed domain name (matches both `'private-domain'` and `'public-domain'` metadata kinds)
* - `'ip'` — shorthand for both `'ipv4'` and `'ipv6'`
* - `'ipv4'` — IPv4 addresses only
* - `'ipv6'` — IPv6 addresses only
* - `'localhost'` — loopback addresses (`localhost`, `127.0.0.1`, `::1`)
* - `'link-local'` — IPv6 link-local addresses (fe80::/10)
* - `'plugin'` — hostnames provided by a plugin package
*/
type FilterKinds =
| 'mdns'
| 'domain'
@@ -46,25 +33,10 @@ type FilterKinds =
| 'ipv6'
| 'localhost'
| 'link-local'
| 'plugin'
/**
* Describes which hostnames to include (or exclude) when filtering a `Filled` address.
*
* Every field is optional — omitted fields impose no constraint.
* Filters are composable: the `.filter()` method intersects successive filters,
* and the `exclude` field inverts a nested filter.
*/
export type Filter = {
/** Keep only hostnames with the given visibility. `'public'` = externally reachable, `'private'` = LAN-only. */
visibility?: 'public' | 'private'
/** Keep only hostnames whose metadata kind matches. A single kind or array of kinds. `'ip'` expands to `['ipv4','ipv6']`, `'domain'` matches both `'private-domain'` and `'public-domain'`. */
kind?: FilterKinds | FilterKinds[]
/** Arbitrary predicate — hostnames for which this returns `false` are excluded. */
predicate?: (h: HostnameInfo) => boolean
/** Keep only plugin hostnames provided by this package. Implies `kind: 'plugin'`. */
pluginId?: PackageId
/** A nested filter whose matches are *removed* from the result (logical NOT). */
exclude?: Filter
}
@@ -92,13 +64,9 @@ type KindFilter<K extends FilterKinds> = K extends 'mdns'
?
| (HostnameInfo & { metadata: { kind: 'ipv6' } })
| KindFilter<Exclude<K, 'ipv6'>>
: K extends 'plugin'
?
| (HostnameInfo & { metadata: { kind: 'plugin' } })
| KindFilter<Exclude<K, 'plugin'>>
: K extends 'ip'
? KindFilter<Exclude<K, 'ip'> | 'ipv4' | 'ipv6'>
: never
: K extends 'ip'
? KindFilter<Exclude<K, 'ip'> | 'ipv4' | 'ipv6'>
: never
type FilterReturnTy<F extends Filter> = F extends {
visibility: infer V extends 'public' | 'private'
@@ -138,62 +106,20 @@ type FormatReturnTy<
? UrlString | FormatReturnTy<F, Exclude<Format, 'urlstring'>>
: never
/**
* A resolved address with its hostnames already populated, plus helpers
* for filtering, formatting, and converting hostnames to URLs.
*
* Filters are chainable and each call returns a new `Filled` narrowed to the
* matching subset of hostnames:
*
* ```ts
* addresses.nonLocal // exclude localhost & link-local
* addresses.public // only publicly-reachable hostnames
* addresses.filter({ kind: 'domain' }) // only domain-name hostnames
* addresses.filter({ visibility: 'private' }) // only LAN-reachable hostnames
* addresses.nonLocal.filter({ kind: 'ip' }) // chainable — non-local IPs only
* ```
*/
export type Filled<F extends Filter = {}> = {
/** The hostnames that survived all applied filters. */
hostnames: HostnameInfo[]
/** Convert a single hostname into a fully-formed URL string, applying the address's scheme, username, and suffix. */
toUrl: (h: HostnameInfo) => UrlString
/**
* Return every hostname in the requested format.
*
* - `'urlstring'` (default) — formatted URL strings
* - `'url'` — `URL` objects
* - `'hostname-info'` — raw `HostnameInfo` objects
*/
format: <Format extends Formats = 'urlstring'>(
format?: Format,
) => FormatReturnTy<{}, Format>[]
/**
* Apply an arbitrary {@link Filter} and return a new `Filled` containing only
* the hostnames that match. Filters compose: calling `.filter()` on an
* already-filtered `Filled` intersects the constraints.
*/
filter: <NewFilter extends Filter>(
filter: NewFilter,
) => Filled<NewFilter & Filter>
/**
* Apply multiple filters and return hostnames that match **any** of them (union / OR).
*
* ```ts
* addresses.matchesAny([{ kind: 'domain' }, { kind: 'mdns' }])
* ```
*/
matchesAny: <NewFilters extends Filter[]>(
filters: [...NewFilters],
) => Filled<NewFilters[number] & F>
/** Shorthand filter that excludes `localhost` and IPv6 link-local addresses — keeps only network-reachable hostnames. */
nonLocal: Filled<typeof nonLocalFilter & Filter>
/** Shorthand filter that keeps only publicly-reachable hostnames (those with `public: true`). */
public: Filled<typeof publicFilter & Filter>
}
export type FilledAddressInfo = AddressInfo & Filled
@@ -283,16 +209,7 @@ function filterRec(
['localhost', '127.0.0.1', '::1'].includes(h.hostname)) ||
(kind.has('link-local') &&
h.metadata.kind === 'ipv6' &&
IPV6_LINK_LOCAL.contains(IpAddress.parse(h.hostname))) ||
(kind.has('plugin') && h.metadata.kind === 'plugin')),
)
}
if (filter.pluginId) {
const id = filter.pluginId
hostnames = hostnames.filter(
(h) =>
invert !==
(h.metadata.kind === 'plugin' && h.metadata.packageId === id),
IPV6_LINK_LOCAL.contains(IpAddress.parse(h.hostname)))),
)
}
@@ -324,14 +241,6 @@ function enabledAddresses(addr: DerivedAddressInfo): HostnameInfo[] {
})
}
/**
* Filters out localhost and IPv6 link-local hostnames from a list.
* Equivalent to the `nonLocal` filter on `Filled` addresses.
*/
export function filterNonLocal(hostnames: HostnameInfo[]): HostnameInfo[] {
return filterRec(hostnames, nonLocalFilter, false)
}
export const filledAddress = (
host: Host,
addressInfo: AddressInfo,
@@ -370,19 +279,6 @@ export const filledAddress = (
filterRec(hostnames, filter, false),
)
},
matchesAny: <NewFilters extends Filter[]>(filters: [...NewFilters]) => {
const seen = new Set<HostnameInfo>()
const union: HostnameInfo[] = []
for (const f of filters) {
for (const h of filterRec(hostnames, f, false)) {
if (!seen.has(h)) {
seen.add(h)
union.push(h)
}
}
}
return filledAddressFromHostnames<NewFilters[number] & F>(union)
},
get nonLocal(): Filled<typeof nonLocalFilter & F> {
return getNonLocal()
},
@@ -498,7 +394,7 @@ export class GetServiceInterface<Mapped = ServiceInterfaceFilled | null> {
}
await waitForNext
}
return new Promise<never>((_, rej) => rej(new AbortedError()))
return new Promise<never>((_, rej) => rej(new Error('aborted')))
}
/**

View File

@@ -1,6 +1,5 @@
import { Effects } from '../Effects'
import { PackageId } from '../osBindings'
import { AbortedError } from './AbortedError'
import { deepEqual } from './deepEqual'
import { DropGenerator, DropPromise } from './Drop'
import { ServiceInterfaceFilled, filledAddress } from './getServiceInterface'
@@ -106,7 +105,7 @@ export class GetServiceInterfaces<Mapped = ServiceInterfaceFilled[]> {
}
await waitForNext
}
return new Promise<never>((_, rej) => rej(new AbortedError()))
return new Promise<never>((_, rej) => rej(new Error('aborted')))
}
/**

View File

@@ -8,7 +8,6 @@ export {
GetServiceInterface,
getServiceInterface,
filledAddress,
filterNonLocal,
} from './getServiceInterface'
export { getServiceInterfaces } from './getServiceInterfaces'
export { once } from './once'
@@ -23,6 +22,5 @@ export { splitCommand } from './splitCommand'
export { nullIfEmpty } from './nullIfEmpty'
export { deepMerge, partialDiff } from './deepMerge'
export { deepEqual } from './deepEqual'
export { AbortedError } from './AbortedError'
export * as regexes from './regexes'
export { stringFromStdErrOut } from './stringFromStdErrOut'

View File

@@ -1,5 +1,3 @@
import { AllowReadonly } from '../types'
/**
* Normalizes a command into an argv-style string array.
* If given a string, wraps it as `["sh", "-c", command]`.
@@ -15,8 +13,8 @@ import { AllowReadonly } from '../types'
* ```
*/
export const splitCommand = (
command: string | AllowReadonly<[string, ...string[]]>,
command: string | [string, ...string[]],
): string[] => {
if (Array.isArray(command)) return command
return ['sh', '-c', command as string]
return ['sh', '-c', command]
}

View File

@@ -1,14 +0,0 @@
import { z as _z } from 'zod'
import type { DeepPartial } from './types'
type ZodDeepPartial = <T>(a: _z.ZodType<T>) => _z.ZodType<DeepPartial<T>>
type ZodDeepLoose = <T>(a: _z.ZodType<T>) => _z.ZodType<T>
declare module 'zod' {
namespace z {
const deepPartial: ZodDeepPartial
const deepLoose: ZodDeepLoose
}
}
export { _z as z }

View File

@@ -1,92 +0,0 @@
"use strict";
Object.defineProperty(exports, "__esModule", { value: true });
const zod_1 = require("zod");
const zod_deep_partial_1 = require("zod-deep-partial");
// Recursively make all ZodObjects in a schema loose (preserve extra keys at every nesting level).
// Uses _zod.def.type duck-typing instead of instanceof to avoid issues with mismatched zod versions.
function deepLoose(schema) {
const def = schema._zod?.def;
if (!def) return schema;
let result;
switch (def.type) {
case "optional":
result = deepLoose(def.innerType).optional();
break;
case "nullable":
result = deepLoose(def.innerType).nullable();
break;
case "object": {
const newShape = {};
for (const key in schema.shape) {
newShape[key] = deepLoose(schema.shape[key]);
}
result = zod_1.z.looseObject(newShape);
break;
}
case "array":
result = zod_1.z.array(deepLoose(def.element));
break;
case "union":
result = zod_1.z.union(def.options.map((o) => deepLoose(o)));
break;
case "intersection":
result = zod_1.z.intersection(deepLoose(def.left), deepLoose(def.right));
break;
case "record":
result = zod_1.z.record(def.keyType, deepLoose(def.valueType));
break;
case "tuple":
result = zod_1.z.tuple(def.items.map((i) => deepLoose(i)));
break;
case "lazy":
result = zod_1.z.lazy(() => deepLoose(def.getter()));
break;
default:
return schema;
}
return result;
}
// Add deepPartial and deepLoose to z at runtime
zod_1.z.deepPartial = (a) =>
deepLoose((0, zod_deep_partial_1.zodDeepPartial)(a));
zod_1.z.deepLoose = deepLoose;
// Override z.object to produce loose objects by default (extra keys are preserved, not stripped).
const _origObject = zod_1.z.object;
const _patchedObject = (...args) => _origObject(...args).loose();
// In CJS (Node.js), patch the source module in require.cache where 'object' is a writable property;
// the CJS getter chain (index → external → schemas) then relays the patched version.
// We walk only the zod entry module's dependency tree and match by identity (=== origObject).
try {
const _zodModule = require.cache[require.resolve("zod")];
for (const child of _zodModule?.children ?? []) {
for (const grandchild of child.children ?? []) {
const desc = Object.getOwnPropertyDescriptor(
grandchild.exports,
"object",
);
if (desc?.value === _origObject && desc.writable) {
grandchild.exports.object = _patchedObject;
}
}
}
} catch (_) {
// Not in CJS/Node environment (e.g. browser) — require.cache unavailable
}
// z.object is a non-configurable getter on the zod namespace, so we can't override it directly.
// Shadow it by exporting a new object with _z as prototype and our patched object on the instance.
const z = Object.create(zod_1.z, {
object: {
value: _patchedObject,
writable: true,
configurable: true,
enumerable: true,
},
});
exports.z = z;

View File

@@ -14,8 +14,7 @@
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"yaml": "^2.7.1",
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
"zod": "^4.3.6"
},
"devDependencies": {
"@types/jest": "^29.4.0",
@@ -5007,19 +5006,9 @@
"resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz",
"integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==",
"license": "MIT",
"peer": true,
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/zod-deep-partial": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/zod-deep-partial/-/zod-deep-partial-1.2.0.tgz",
"integrity": "sha512-dXfte+/YN0aFYs0kMGz6xfPQWEYNaKz/LsbfxrbwL+oY3l/aR9HOBTyWCpHZ5AJXMGWKSq+0X0oVPpRliUFcjQ==",
"license": "MIT",
"peerDependencies": {
"zod": "^4.1.13"
}
}
}
}

View File

@@ -28,8 +28,7 @@
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"yaml": "^2.7.1",
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
"zod": "^4.3.6"
},
"prettier": {
"trailingComma": "all",

View File

@@ -141,7 +141,6 @@ export class StartSdk<Manifest extends T.SDKManifest> {
| 'getSystemSmtp'
| 'getOutboundGateway'
| 'getContainerIp'
| 'getStatus'
| 'getDataVersion'
| 'setDataVersion'
| 'getServiceManifest'
@@ -165,6 +164,7 @@ export class StartSdk<Manifest extends T.SDKManifest> {
getSslKey: (effects, ...args) => effects.getSslKey(...args),
shutdown: (effects, ...args) => effects.shutdown(...args),
getDependencies: (effects, ...args) => effects.getDependencies(...args),
getStatus: (effects, ...args) => effects.getStatus(...args),
setHealth: (effects, ...args) => effects.setHealth(...args),
}
@@ -342,104 +342,6 @@ export class StartSdk<Manifest extends T.SDKManifest> {
}
},
/**
* Get the service's current status with reactive subscription support.
*
* Returns an object with multiple read strategies: `const()` for a value
* that retries on change, `once()` for a single read, `watch()` for an async
* generator, `onChange()` for a callback, and `waitFor()` to block until a predicate is met.
*
* @param effects - The effects context
* @param options - Optional filtering options (e.g. `packageId`)
*/
getStatus: (
effects: T.Effects,
options: Omit<Parameters<T.Effects['getStatus']>[0], 'callback'> = {},
) => {
async function* watch(abort?: AbortSignal) {
const resolveCell = { resolve: () => {} }
effects.onLeaveContext(() => {
resolveCell.resolve()
})
abort?.addEventListener('abort', () => resolveCell.resolve())
while (effects.isInContext && !abort?.aborted) {
let callback: () => void = () => {}
const waitForNext = new Promise<void>((resolve) => {
callback = resolve
resolveCell.resolve = resolve
})
yield await effects.getStatus({ ...options, callback })
await waitForNext
}
}
return {
const: () =>
effects.getStatus({
...options,
callback:
effects.constRetry &&
(() => effects.constRetry && effects.constRetry()),
}),
once: () => effects.getStatus(options),
watch: (abort?: AbortSignal) => {
const ctrl = new AbortController()
abort?.addEventListener('abort', () => ctrl.abort())
return DropGenerator.of(watch(ctrl.signal), () => ctrl.abort())
},
onChange: (
callback: (
value: T.StatusInfo | null,
error?: Error,
) => { cancel: boolean } | Promise<{ cancel: boolean }>,
) => {
;(async () => {
const ctrl = new AbortController()
for await (const value of watch(ctrl.signal)) {
try {
const res = await callback(value)
if (res.cancel) {
ctrl.abort()
break
}
} catch (e) {
console.error(
'callback function threw an error @ getStatus.onChange',
e,
)
}
}
})()
.catch((e) => callback(null, e))
.catch((e) =>
console.error(
'callback function threw an error @ getStatus.onChange',
e,
),
)
},
waitFor: async (pred: (value: T.StatusInfo | null) => boolean) => {
const resolveCell = { resolve: () => {} }
effects.onLeaveContext(() => {
resolveCell.resolve()
})
while (effects.isInContext) {
let callback: () => void = () => {}
const waitForNext = new Promise<void>((resolve) => {
callback = resolve
resolveCell.resolve = resolve
})
const res = await effects.getStatus({ ...options, callback })
if (pred(res)) {
resolveCell.resolve()
return res
}
await waitForNext
}
return null
},
}
},
MultiHost: {
/**
* Create a new MultiHost instance for binding ports and exporting interfaces.

View File

@@ -1,6 +1,5 @@
import { Effects } from '../../../base/lib/Effects'
import { Manifest, PackageId } from '../../../base/lib/osBindings'
import { AbortedError } from '../../../base/lib/util/AbortedError'
import { DropGenerator, DropPromise } from '../../../base/lib/util/Drop'
import { deepEqual } from '../../../base/lib/util/deepEqual'
@@ -65,7 +64,7 @@ export class GetServiceManifest<Mapped = Manifest> {
}
await waitForNext
}
return new Promise<never>((_, rej) => rej(new AbortedError()))
return new Promise<never>((_, rej) => rej(new Error('aborted')))
}
/**

View File

@@ -1,6 +1,5 @@
import { T } from '..'
import { Effects } from '../../../base/lib/Effects'
import { AbortedError } from '../../../base/lib/util/AbortedError'
import { DropGenerator, DropPromise } from '../../../base/lib/util/Drop'
export class GetSslCertificate {
@@ -51,7 +50,7 @@ export class GetSslCertificate {
})
await waitForNext
}
return new Promise<never>((_, rej) => rej(new AbortedError()))
return new Promise<never>((_, rej) => rej(new Error('aborted')))
}
/**

View File

@@ -4,7 +4,7 @@ import * as TOML from '@iarna/toml'
import * as INI from 'ini'
import * as T from '../../../base/lib/types'
import * as fs from 'node:fs/promises'
import { AbortedError, asError, deepEqual } from '../../../base/lib/util'
import { asError, deepEqual } from '../../../base/lib/util'
import { DropGenerator, DropPromise } from '../../../base/lib/util/Drop'
import { PathBase } from './Volume'
@@ -91,15 +91,11 @@ function filterUndefined<A>(a: A): A {
* @typeParam Raw - The native type the file format parses to (e.g. `Record<string, unknown>` for JSON)
* @typeParam Transformed - The application-level type after transformation
*/
export type Transformers<
Raw = unknown,
Transformed = unknown,
Validated extends Transformed = Transformed,
> = {
export type Transformers<Raw = unknown, Transformed = unknown> = {
/** Transform raw parsed data into the application type */
onRead: (value: Raw) => Transformed
/** Transform application data back into the raw format for writing */
onWrite: (value: Validated) => Raw
onWrite: (value: Transformed) => Raw
}
type ToPath = string | { base: PathBase; subpath: string }
@@ -289,7 +285,7 @@ export class FileHelper<A> {
await onCreated(this.path).catch((e) => console.error(asError(e)))
}
}
return new Promise<never>((_, rej) => rej(new AbortedError()))
return new Promise<never>((_, rej) => rej(new Error('aborted')))
}
private readOnChange<B>(
@@ -487,7 +483,7 @@ export class FileHelper<A> {
toFile: (dataIn: Raw) => string,
fromFile: (rawData: string) => Raw,
validate: (data: Transformed) => A,
transformers: Transformers<Raw, Transformed, A> | undefined,
transformers: Transformers<Raw, Transformed> | undefined,
) {
return FileHelper.raw<A>(
path,
@@ -497,12 +493,7 @@ export class FileHelper<A> {
}
return toFile(inData as any as Raw)
},
(fileData) => {
if (transformers) {
return transformers.onRead(fromFile(fileData))
}
return fromFile(fileData)
},
fromFile,
validate as (a: unknown) => A,
)
}
@@ -518,12 +509,12 @@ export class FileHelper<A> {
static string<A extends Transformed, Transformed = string>(
path: ToPath,
shape: Validator<Transformed, A>,
transformers: Transformers<string, Transformed, A>,
transformers: Transformers<string, Transformed>,
): FileHelper<A>
static string<A extends Transformed, Transformed = string>(
path: ToPath,
shape?: Validator<Transformed, A>,
transformers?: Transformers<string, Transformed, A>,
transformers?: Transformers<string, Transformed>,
) {
return FileHelper.rawTransformed<A, string, Transformed>(
path,
@@ -540,16 +531,10 @@ export class FileHelper<A> {
/**
* Create a File Helper for a .json file.
*/
static json<A>(path: ToPath, shape: Validator<unknown, A>): FileHelper<A>
static json<A extends Transformed, Transformed = unknown>(
static json<A>(
path: ToPath,
shape: Validator<unknown, A>,
transformers: Transformers<unknown, Transformed, A>,
): FileHelper<A>
static json<A extends Transformed, Transformed = unknown>(
path: ToPath,
shape: Validator<unknown, A>,
transformers?: Transformers<unknown, Transformed, A>,
transformers?: Transformers,
) {
return FileHelper.rawTransformed(
path,
@@ -570,12 +555,12 @@ export class FileHelper<A> {
static yaml<A extends Transformed, Transformed = Record<string, unknown>>(
path: ToPath,
shape: Validator<Transformed, A>,
transformers: Transformers<Record<string, unknown>, Transformed, A>,
transformers: Transformers<Record<string, unknown>, Transformed>,
): FileHelper<A>
static yaml<A extends Transformed, Transformed = Record<string, unknown>>(
path: ToPath,
shape: Validator<Transformed, A>,
transformers?: Transformers<Record<string, unknown>, Transformed, A>,
transformers?: Transformers<Record<string, unknown>, Transformed>,
) {
return FileHelper.rawTransformed<A, Record<string, unknown>, Transformed>(
path,
@@ -596,12 +581,12 @@ export class FileHelper<A> {
static toml<A extends Transformed, Transformed = Record<string, unknown>>(
path: ToPath,
shape: Validator<Transformed, A>,
transformers: Transformers<Record<string, unknown>, Transformed, A>,
transformers: Transformers<Record<string, unknown>, Transformed>,
): FileHelper<A>
static toml<A extends Transformed, Transformed = Record<string, unknown>>(
path: ToPath,
shape: Validator<Transformed, A>,
transformers?: Transformers<Record<string, unknown>, Transformed, A>,
transformers?: Transformers<Record<string, unknown>, Transformed>,
) {
return FileHelper.rawTransformed<A, Record<string, unknown>, Transformed>(
path,
@@ -626,13 +611,13 @@ export class FileHelper<A> {
path: ToPath,
shape: Validator<Transformed, A>,
options: INI.EncodeOptions & INI.DecodeOptions,
transformers: Transformers<Record<string, unknown>, Transformed, A>,
transformers: Transformers<Record<string, unknown>, Transformed>,
): FileHelper<A>
static ini<A extends Transformed, Transformed = Record<string, unknown>>(
path: ToPath,
shape: Validator<Transformed, A>,
options?: INI.EncodeOptions & INI.DecodeOptions,
transformers?: Transformers<Record<string, unknown>, Transformed, A>,
transformers?: Transformers<Record<string, unknown>, Transformed>,
): FileHelper<A> {
return FileHelper.rawTransformed<A, Record<string, unknown>, Transformed>(
path,
@@ -655,12 +640,12 @@ export class FileHelper<A> {
static env<A extends Transformed, Transformed = Record<string, string>>(
path: ToPath,
shape: Validator<Transformed, A>,
transformers: Transformers<Record<string, string>, Transformed, A>,
transformers: Transformers<Record<string, string>, Transformed>,
): FileHelper<A>
static env<A extends Transformed, Transformed = Record<string, string>>(
path: ToPath,
shape: Validator<Transformed, A>,
transformers?: Transformers<Record<string, string>, Transformed, A>,
transformers?: Transformers<Record<string, string>, Transformed>,
) {
return FileHelper.rawTransformed<A, Record<string, string>, Transformed>(
path,

View File

@@ -1,12 +1,12 @@
{
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.58",
"version": "0.4.0-beta.52",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.58",
"version": "0.4.0-beta.52",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",
@@ -18,8 +18,7 @@
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"yaml": "^2.7.1",
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
"zod": "^4.3.6"
},
"devDependencies": {
"@types/jest": "^29.4.0",
@@ -5233,19 +5232,9 @@
"resolved": "https://registry.npmjs.org/zod/-/zod-4.3.6.tgz",
"integrity": "sha512-rftlrkhHZOcjDwkGlnUtZZkvaPHCsDATp4pGpuOOMDaTdDDXF91wuVDJoWoPsKX/3YPQ5fHuF3STjcYyKr+Qhg==",
"license": "MIT",
"peer": true,
"funding": {
"url": "https://github.com/sponsors/colinhacks"
}
},
"node_modules/zod-deep-partial": {
"version": "1.4.4",
"resolved": "https://registry.npmjs.org/zod-deep-partial/-/zod-deep-partial-1.4.4.tgz",
"integrity": "sha512-aWkPl7hVStgE01WzbbSxCgX4O+sSpgt8JOjvFUtMTF75VgL6MhWQbiZi+AWGN85SfSTtI9gsOtL1vInoqfDVaA==",
"license": "MIT",
"peerDependencies": {
"zod": "^4.1.13"
}
}
}
}

View File

@@ -1,6 +1,6 @@
{
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.58",
"version": "0.4.0-beta.52",
"description": "Software development kit to facilitate packaging services for StartOS",
"main": "./package/lib/index.js",
"types": "./package/lib/index.d.ts",
@@ -40,8 +40,7 @@
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"yaml": "^2.7.1",
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
"zod": "^4.3.6"
},
"prettier": {
"trailingComma": "all",

2505
web/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -33,33 +33,34 @@
"format:check": "prettier --check projects/"
},
"dependencies": {
"@angular/animations": "^21.2.1",
"@angular/cdk": "^21.2.1",
"@angular/common": "^21.2.1",
"@angular/compiler": "^21.2.1",
"@angular/core": "^21.2.1",
"@angular/forms": "^21.2.1",
"@angular/platform-browser": "^21.2.1",
"@angular/pwa": "^21.2.1",
"@angular/router": "^21.2.1",
"@angular/service-worker": "^21.2.1",
"@angular/animations": "^20.3.0",
"@angular/cdk": "^20.1.0",
"@angular/common": "^20.3.0",
"@angular/compiler": "^20.3.0",
"@angular/core": "^20.3.0",
"@angular/forms": "^20.3.0",
"@angular/platform-browser": "^20.3.0",
"@angular/platform-browser-dynamic": "^20.1.0",
"@angular/pwa": "^20.3.0",
"@angular/router": "^20.3.0",
"@angular/service-worker": "^20.3.0",
"@materia-ui/ngx-monaco-editor": "^6.0.0",
"@noble/curves": "^1.4.0",
"@noble/hashes": "^1.4.0",
"@start9labs/argon2": "^0.3.0",
"@start9labs/start-sdk": "file:../sdk/baseDist",
"@taiga-ui/addon-charts": "4.73.0",
"@taiga-ui/addon-commerce": "4.73.0",
"@taiga-ui/addon-mobile": "4.73.0",
"@taiga-ui/addon-table": "4.73.0",
"@taiga-ui/cdk": "4.73.0",
"@taiga-ui/core": "4.73.0",
"@taiga-ui/addon-charts": "4.66.0",
"@taiga-ui/addon-commerce": "4.66.0",
"@taiga-ui/addon-mobile": "4.66.0",
"@taiga-ui/addon-table": "4.66.0",
"@taiga-ui/cdk": "4.66.0",
"@taiga-ui/core": "4.66.0",
"@taiga-ui/dompurify": "4.1.11",
"@taiga-ui/event-plugins": "4.7.0",
"@taiga-ui/experimental": "4.73.0",
"@taiga-ui/icons": "4.73.0",
"@taiga-ui/kit": "4.73.0",
"@taiga-ui/layout": "4.73.0",
"@taiga-ui/experimental": "4.66.0",
"@taiga-ui/icons": "4.66.0",
"@taiga-ui/kit": "4.66.0",
"@taiga-ui/layout": "4.66.0",
"@taiga-ui/polymorpheus": "4.9.0",
"ansi-to-html": "^0.7.2",
"base64-js": "^1.5.1",
@@ -79,7 +80,7 @@
"mime": "^4.0.3",
"monaco-editor": "^0.33.0",
"mustache": "^4.2.0",
"ng-qrcode": "^21.0.0",
"ng-qrcode": "^20.0.0",
"node-jose": "^2.2.0",
"patch-db-client": "file:../patch-db/client",
"pbkdf2": "^3.1.2",
@@ -91,10 +92,10 @@
},
"devDependencies": {
"@angular-experts/hawkeye": "^1.7.2",
"@angular/build": "^21.2.1",
"@angular/cli": "^21.2.1",
"@angular/compiler-cli": "^21.2.1",
"@angular/language-service": "^21.2.1",
"@angular/build": "^20.1.0",
"@angular/cli": "^20.1.0",
"@angular/compiler-cli": "^20.1.0",
"@angular/language-service": "^20.1.0",
"@types/dompurify": "3.0.5",
"@types/estree": "^0.0.51",
"@types/js-yaml": "^4.0.5",
@@ -106,7 +107,7 @@
"@types/uuid": "^8.3.1",
"husky": "^4.3.8",
"lint-staged": "^13.2.0",
"ng-packagr": "^21.2.0",
"ng-packagr": "^20.1.0",
"node-html-parser": "^5.3.3",
"postcss": "^8.4.21",
"prettier": "^3.5.3",

View File

@@ -0,0 +1,42 @@
import { CommonModule } from '@angular/common'
import { NgModule } from '@angular/core'
import {
DocsLinkDirective,
i18nPipe,
SharedPipesModule,
} from '@start9labs/shared'
import {
TuiAppearance,
TuiButton,
TuiIcon,
TuiLoader,
TuiPopup,
} from '@taiga-ui/core'
import { TuiDrawer, TuiSkeleton } from '@taiga-ui/kit'
import { CategoriesModule } from '../../pages/list/categories/categories.module'
import { SearchModule } from '../../pages/list/search/search.module'
import { StoreIconComponentModule } from '../store-icon/store-icon.component.module'
import { MenuComponent } from './menu.component'
@NgModule({
imports: [
CommonModule,
SharedPipesModule,
SearchModule,
CategoriesModule,
TuiLoader,
TuiButton,
CategoriesModule,
StoreIconComponentModule,
TuiAppearance,
TuiIcon,
TuiSkeleton,
TuiDrawer,
TuiPopup,
i18nPipe,
DocsLinkDirective,
],
declarations: [MenuComponent],
exports: [MenuComponent],
})
export class MenuModule {}

View File

@@ -1,4 +1,3 @@
import { CommonModule } from '@angular/common'
import {
ChangeDetectionStrategy,
Component,
@@ -7,35 +6,16 @@ import {
OnDestroy,
signal,
} from '@angular/core'
import { DocsLinkDirective, i18nPipe } from '@start9labs/shared'
import { TuiAppearance, TuiButton, TuiIcon, TuiPopup } from '@taiga-ui/core'
import { TuiDrawer, TuiSkeleton } from '@taiga-ui/kit'
import { Subject, takeUntil } from 'rxjs'
import { CategoriesComponent } from '../../pages/list/categories/categories.component'
import { SearchComponent } from '../../pages/list/search/search.component'
import { AbstractCategoryService } from '../../services/category.service'
import { StoreDataWithUrl } from '../../types'
import { StoreIconComponent } from '../store-icon.component'
@Component({
selector: 'menu',
templateUrl: './menu.component.html',
styleUrls: ['./menu.component.scss'],
imports: [
CommonModule,
SearchComponent,
CategoriesComponent,
TuiButton,
StoreIconComponent,
TuiAppearance,
TuiIcon,
TuiSkeleton,
TuiDrawer,
TuiPopup,
i18nPipe,
DocsLinkDirective,
],
changeDetection: ChangeDetectionStrategy.OnPush,
standalone: false,
})
export class MenuComponent implements OnDestroy {
@Input({ required: true })

View File

@@ -1,6 +1,6 @@
import { ChangeDetectionStrategy, Component, Input } from '@angular/core'
import { TuiIcon, TuiTitle } from '@taiga-ui/core'
import { StoreIconComponent } from './store-icon.component'
import { StoreIconComponentModule } from './store-icon/store-icon.component.module'
@Component({
selector: '[registry]',
@@ -17,7 +17,7 @@ import { StoreIconComponent } from './store-icon.component'
}
`,
changeDetection: ChangeDetectionStrategy.OnPush,
imports: [StoreIconComponent, TuiIcon, TuiTitle],
imports: [StoreIconComponentModule, TuiIcon, TuiTitle],
})
export class MarketplaceRegistryComponent {
@Input()

View File

@@ -0,0 +1,10 @@
import { NgModule } from '@angular/core'
import { CommonModule } from '@angular/common'
import { StoreIconComponent } from './store-icon.component'
@NgModule({
declarations: [StoreIconComponent],
imports: [CommonModule],
exports: [StoreIconComponent],
})
export class StoreIconComponentModule {}

View File

@@ -21,6 +21,7 @@ import { knownRegistries, sameUrl } from '@start9labs/shared'
`,
styles: ':host { overflow: hidden; }',
changeDetection: ChangeDetectionStrategy.OnPush,
standalone: false,
})
export class StoreIconComponent {
@Input()

View File

@@ -1,4 +1,3 @@
import { CommonModule } from '@angular/common'
import {
ChangeDetectionStrategy,
Component,
@@ -6,11 +5,7 @@ import {
Input,
Output,
} from '@angular/core'
import { RouterModule } from '@angular/router'
import { LocalizePipe } from '@start9labs/shared'
import { T } from '@start9labs/start-sdk'
import { TuiAppearance, TuiIcon } from '@taiga-ui/core'
import { TuiSkeleton } from '@taiga-ui/kit'
const ICONS: Record<string, string> = {
all: '@tui.layout-grid',
@@ -31,15 +26,8 @@ const ICONS: Record<string, string> = {
selector: 'marketplace-categories',
templateUrl: 'categories.component.html',
styleUrls: ['categories.component.scss'],
imports: [
RouterModule,
CommonModule,
TuiAppearance,
TuiIcon,
TuiSkeleton,
LocalizePipe,
],
changeDetection: ChangeDetectionStrategy.OnPush,
standalone: false,
})
export class CategoriesComponent {
@Input()

View File

@@ -0,0 +1,15 @@
import { TuiIcon, TuiAppearance } from '@taiga-ui/core'
import { CommonModule } from '@angular/common'
import { NgModule } from '@angular/core'
import { TuiSkeleton } from '@taiga-ui/kit'
import { LocalizePipe } from '@start9labs/shared'
import { CategoriesComponent } from './categories.component'
import { RouterModule } from '@angular/router'
@NgModule({
imports: [RouterModule, CommonModule, TuiAppearance, TuiIcon, TuiSkeleton, LocalizePipe],
declarations: [CategoriesComponent],
exports: [CategoriesComponent],
})
export class CategoriesModule {}

View File

@@ -1,15 +1,12 @@
import { CommonModule } from '@angular/common'
import { ChangeDetectionStrategy, Component, Input } from '@angular/core'
import { RouterModule } from '@angular/router'
import { LocalizePipe, TickerComponent } from '@start9labs/shared'
import { MarketplacePkg } from '../../../types'
@Component({
selector: 'marketplace-item',
templateUrl: 'item.component.html',
styleUrls: ['item.component.scss'],
imports: [CommonModule, RouterModule, TickerComponent, LocalizePipe],
changeDetection: ChangeDetectionStrategy.OnPush,
standalone: false,
})
export class ItemComponent {
@Input({ required: true })

View File

@@ -0,0 +1,12 @@
import { CommonModule } from '@angular/common'
import { NgModule } from '@angular/core'
import { RouterModule } from '@angular/router'
import { LocalizePipe, SharedPipesModule, TickerComponent } from '@start9labs/shared'
import { ItemComponent } from './item.component'
@NgModule({
declarations: [ItemComponent],
exports: [ItemComponent],
imports: [CommonModule, RouterModule, SharedPipesModule, TickerComponent, LocalizePipe],
})
export class ItemModule {}

View File

@@ -1,4 +1,3 @@
import { CommonModule } from '@angular/common'
import {
ChangeDetectionStrategy,
Component,
@@ -6,15 +5,13 @@ import {
Input,
Output,
} from '@angular/core'
import { FormsModule } from '@angular/forms'
import { TuiIcon } from '@taiga-ui/core'
@Component({
selector: 'marketplace-search',
templateUrl: 'search.component.html',
styleUrls: ['search.component.scss'],
imports: [FormsModule, CommonModule, TuiIcon],
changeDetection: ChangeDetectionStrategy.OnPush,
standalone: false,
})
export class SearchComponent {
@Input()

View File

@@ -0,0 +1,12 @@
import { CommonModule } from '@angular/common'
import { NgModule } from '@angular/core'
import { FormsModule } from '@angular/forms'
import { TuiIcon } from '@taiga-ui/core'
import { SearchComponent } from './search.component'
@NgModule({
imports: [FormsModule, CommonModule, TuiIcon],
declarations: [SearchComponent],
exports: [SearchComponent],
})
export class SearchModule {}

View File

@@ -1,12 +1,7 @@
import { KeyValue } from '@angular/common'
import {
ChangeDetectionStrategy,
Component,
inject,
Input,
} from '@angular/core'
import { ChangeDetectionStrategy, Component, inject, Input } from '@angular/core'
import { RouterModule } from '@angular/router'
import { i18nPipe, i18nService } from '@start9labs/shared'
import { ExverPipesModule, i18nPipe, i18nService } from '@start9labs/shared'
import { T } from '@start9labs/start-sdk'
import { TuiAvatar, TuiLineClamp } from '@taiga-ui/kit'
import { MarketplacePkgBase } from '../../../types'
@@ -25,7 +20,9 @@ import { MarketplacePkgBase } from '../../../types'
<tui-line-clamp [linesLimit]="2" [content]="titleContent" />
<ng-template #titleContent>
<div class="title">
<span>{{ getTitle(dep.key) }}</span>
<span>
{{ getTitle(dep.key) }}
</span>
<p>
@if (dep.value.optional) {
<span>({{ 'Optional' | i18n }})</span>
@@ -40,7 +37,9 @@ import { MarketplacePkgBase } from '../../../types'
[content]="descContent"
class="description"
/>
<ng-template #descContent>{{ dep.value.description }}</ng-template>
<ng-template #descContent>
{{ dep.value.description }}
</ng-template>
</div>
</div>
`,
@@ -95,7 +94,7 @@ import { MarketplacePkgBase } from '../../../types'
}
`,
changeDetection: ChangeDetectionStrategy.OnPush,
imports: [RouterModule, TuiAvatar, TuiLineClamp, i18nPipe],
imports: [RouterModule, TuiAvatar, ExverPipesModule, TuiLineClamp, i18nPipe],
})
export class MarketplaceDepItemComponent {
private readonly i18nService = inject(i18nService)

View File

@@ -1,6 +1,6 @@
import { ChangeDetectionStrategy, Component, Input } from '@angular/core'
import { RouterLink } from '@angular/router'
import { i18nPipe, TrustUrlPipe } from '@start9labs/shared'
import { i18nPipe, SharedPipesModule } from '@start9labs/shared'
import { TuiTitle } from '@taiga-ui/core'
import { TuiAvatar } from '@taiga-ui/kit'
import { TuiCell } from '@taiga-ui/layout'
@@ -47,7 +47,14 @@ import { MarketplacePkg } from '../../types'
}
`,
changeDetection: ChangeDetectionStrategy.OnPush,
imports: [RouterLink, TuiCell, TuiTitle, TrustUrlPipe, TuiAvatar, i18nPipe],
imports: [
RouterLink,
TuiCell,
TuiTitle,
SharedPipesModule,
TuiAvatar,
i18nPipe,
],
})
export class MarketplaceFlavorsComponent {
@Input()

View File

@@ -1,5 +1,5 @@
import { ChangeDetectionStrategy, Component, Input } from '@angular/core'
import { TickerComponent } from '@start9labs/shared'
import { SharedPipesModule, TickerComponent } from '@start9labs/shared'
import { T } from '@start9labs/start-sdk'
@Component({
@@ -118,7 +118,7 @@ import { T } from '@start9labs/start-sdk'
}
`,
changeDetection: ChangeDetectionStrategy.OnPush,
imports: [TickerComponent],
imports: [SharedPipesModule, TickerComponent],
})
export class MarketplacePackageHeroComponent {
@Input({ required: true })

Some files were not shown because too many files have changed in this diff Show More