Compare commits

...

94 Commits

Author SHA1 Message Date
gStart9
6ec2feb230 Fix docs URLs in start-tunnel installer output 2026-03-11 13:36:22 -06:00
Alex Inkin
be921b7865 chore: update packages (#3132)
* chore: update packages

* start tunnel messaging

* chore: standalone

* pbpaste instead

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2026-03-09 09:53:47 -06:00
Matt Hill
a4bae73592 rpc/v1 for polling 2026-03-06 11:25:03 -07:00
Matt Hill
8b89f016ad task fix and keyboard fix (#3130)
* task fix and keyboard fix

* fixes for build scripts

* passthrough feature

* feat: inline domain health checks and improve address UX

- addPublicDomain returns DNS query + port check results (AddPublicDomainRes)
  so frontend skips separate API calls after adding a domain
- addPrivateDomain returns check_dns result for the gateway
- Support multiple ports per domain in validation modal (deduplicated)
- Run port checks concurrently via futures::future::join_all
- Add note to add-domain dialog showing other interfaces on same host
- Add addXForwardedHeaders to knownProtocols in SDK Host.ts
- Add plugin filter kind, pluginId filter, matchesAny, and docs to
  getServiceInterface.ts
- Add PassthroughInfo type and passthroughs field to NetworkInfo
- Pluralize "port forwarding rules" in i18n dictionaries

* feat: add shared host note to private domain dialog with i18n

* fix: scope public domain to single binding and return single port check

Accept internalPort in AddPublicDomainParams to target a specific
binding. Disable the domain on all other bindings. Return a single
CheckPortRes instead of Vec. Revert multi-port UI to singular port
display from 0f8a66b35.

* better shared hostname approach,  and improve look-feel of addresses tables

* fix starttls

* preserve usb as top efi boot option

* fix race condition in wan ip check

* sdk beta.56

* various bug, improve smtp

* multiple bugs, better outbound gateway UX

* remove non option from smtp for better package compat

* bump sdk

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2026-03-06 00:30:06 -07:00
Aiden McClelland
3320391fcc feat: support preferred external ports besides 443 (#3117)
* docs: update preferred external port design in TODO

* docs: add user-controlled public/private and port forward mapping to design

* docs: overhaul interfaces page design with view/manage split and per-address controls

* docs: move address enable/disable to overflow menu, add SSL indicator, defer UI placement decisions

* chore: remove tor from startos core

Tor is being moved from a built-in OS feature to a service. This removes
the Arti-based Tor client, onion address management, hidden service
creation, and all related code from the core backend, frontend, and SDK.

- Delete core/src/net/tor/ module (~2060 lines)
- Remove OnionAddress, TorSecretKey, TorController from all consumers
- Remove HostnameInfo::Onion and HostAddress::Onion variants
- Remove onion CRUD RPC endpoints and tor subcommand
- Remove tor key handling from account and backup/restore
- Remove ~12 tor-related Cargo dependencies (arti-client, torut, etc.)
- Remove tor UI components, API methods, mock data, and routes
- Remove OnionHostname and tor patterns/regexes from SDK
- Add v0_4_0_alpha_20 database migration to strip onion data
- Bump version to 0.4.0-alpha.20

* chore: flatten HostnameInfo from enum to struct

HostnameInfo only had one variant (Ip) after removing Tor. Flatten it
into a plain struct with fields gateway, public, hostname. Remove all
kind === 'ip' type guards and narrowing across SDK, frontend, and
container runtime. Update DB migration to strip the kind field.

* chore: format RPCSpec.md markdown table

* docs: update TODO.md with DerivedAddressInfo design, remove completed tor task

* feat: implement preferred port allocation and per-address enable/disable

- Add AvailablePorts::try_alloc() with SSL tracking (BTreeMap<u16, bool>)
- Add DerivedAddressInfo on BindInfo with private_disabled/public_enabled/possible sets
- Add Bindings wrapper with Map impl for patchdb indexed access
- Flatten HostAddress from single-variant enum to struct
- Replace set-gateway-enabled RPC with set-address-enabled
- Remove hostname_info from Host; computed addresses now in BindInfo.addresses.possible
- Compute possible addresses inline in NetServiceData::update()
- Update DB migration, SDK types, frontend, and container-runtime

* feat: replace InterfaceFilter with ForwardRequirements, add WildcardListener, complete alpha.20 bump

- Replace DynInterfaceFilter with ForwardRequirements for per-IP forward
  precision with source-subnet iptables filtering for private forwards
- Add WildcardListener (binds [::]:port) to replace the per-gateway
  NetworkInterfaceListener/SelfContainedNetworkInterfaceListener/
  UpgradableListener infrastructure
- Update forward-port script with src_subnet and excluded_src env vars
- Remove unused filter types and listener infrastructure from gateway.rs
- Add availablePorts migration (IdPool -> BTreeMap<u16, bool>) to alpha.20
- Complete version bump to 0.4.0-alpha.20 in SDK and web

* outbound gateway support (#3120)

* Multiple (#3111)

* fix alerts i18n, fix status display, better, remove usb media, hide shutdown for install complete

* trigger chnage detection for localize pipe and round out implementing localize pipe for consistency even though not needed

* Fix PackageInfoShort to handle LocaleString on releaseNotes (#3112)

* Fix PackageInfoShort to handle LocaleString on releaseNotes

* fix: filter by target_version in get_matching_models and pass otherVersions from install

* chore: add exver documentation for ai agents

* frontend plus some be types

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* feat: replace SourceFilter with IpNet, add policy routing, remove MASQUERADE

* build ts types and fix i18n

* fix license display in marketplace

* wip refactor

* chore: update ts bindings for preferred port design

* feat: refactor NetService to watch DB and reconcile network state

- NetService sync task now uses PatchDB DbWatch instead of being called
  directly after DB mutations
- Read gateways from DB instead of network interface context when
  updating host addresses
- gateway sync updates all host addresses in the DB
- Add Watch<u64> channel for callers to wait on sync completion
- Fix ts-rs codegen bug with #[ts(skip)] on flattened Plugin field
- Update SDK getServiceInterface.ts for new HostnameInfo shape
- Remove unnecessary HTTPS redirect in static_server.rs
- Fix tunnel/api.rs to filter for WAN IPv4 address

* re-arrange (#3123)

* new service interfacee page

* feat: add mdns hostname metadata variant and fix vhost routing

- Add HostnameMetadata::Mdns variant to distinguish mDNS from private domains
- Mark mDNS addresses as private (public: false) since mDNS is local-only
- Fall back to null SNI entry when hostname not found in vhost mapping
- Simplify public detection in ProxyTarget filter
- Pass hostname to update_addresses for mDNS domain name generation

* looking good

* feat: add port_forwards field to Host for tracking gateway forwarding rules

* update bindings for API types, add ARCHITECTURE (#3124)

* update binding for API types, add ARCHITECTURE

* translations

* fix: add CONNMARK restore-mark to mangle OUTPUT chain

The CONNMARK --restore-mark rule was only in PREROUTING, which handles
forwarded packets. Locally-bound listeners (e.g. vhost) generate replies
through the OUTPUT chain, where the fwmark was never restored. This
caused response packets to route via the default table instead of back
through the originating interface.

* chore: reserialize db on equal version, update bindings and docs

- Run de/ser roundtrip in pre_init even when db version matches, ensuring
  all #[serde(default)] fields are populated before any typed access
- Add patchdb.md documentation for TypedDbWatch patterns
- Update TS bindings for CheckPortParams, CheckPortRes, ifconfigUrl
- Update CLAUDE.md docs with patchdb and component-level references

* fix: include public gateways for IP-based addresses in vhost targets

The server hostname vhost construction only collected private IPs,
always setting public to empty. Public IP addresses (Ipv4/Ipv6 metadata
with public=true) were never added to the vhost target's public gateway
set, causing the vhost filter to reject public traffic for IP-based
addresses.

* fix: add TLS handshake timeout and fix accept loop deadlock

Two issues in TlsListener::poll_accept:

1. No timeout on TLS handshakes: LazyConfigAcceptor waits indefinitely
   for ClientHello. Attackers that complete TCP handshake but never send
   TLS data create zombie futures in `in_progress` that never complete.
   Fix: wrap the entire handshake in tokio::time::timeout(15s).

2. Missing waker on new-connection pending path: when a TCP connection
   is accepted and the TLS handshake is pending, poll_accept returned
   Pending without calling wake_by_ref(). Since the TcpListener returned
   Ready (not Pending), no waker was registered for it. With edge-
   triggered epoll and no other wakeup source, the task sleeps forever
   and remaining connections in the kernel accept queue are never
   drained. Fix: add cx.waker().wake_by_ref() so the task immediately
   re-polls and continues draining the accept queue.

* fix: switch BackgroundJobRunner from Vec to FuturesUnordered

BackgroundJobRunner stored active jobs in a Vec<BoxFuture> and polled
ALL of them on every wakeup — O(n) per poll. Since this runs in the
same tokio::select! as the WebServer accept loop, polling overhead from
active connections directly delayed acceptance of new connections.

FuturesUnordered only polls woken futures — O(woken) instead of O(n).

* chore: update bindings and use typed params for outbound gateway API

* feat: per-service and default outbound gateway routing

Add set-outbound-gateway RPC for packages and set-default-outbound RPC
for the server, with policy routing enforcement via ip rules. Fix
connmark restore to skip packets with existing fwmarks, add bridge
subnet routes to per-interface tables, and fix squashfs path in
update-image-local.sh.

* refactor: manifest wraps PackageMetadata, move dependency_metadata to PackageVersionInfo

Manifest now embeds PackageMetadata via #[serde(flatten)] instead of
duplicating ~14 fields. icon and dependency_metadata moved from
PackageMetadata to PackageVersionInfo since they are registry-enrichment
data loaded from the S9PK archive. merge_with now returns errors on
metadata/icon/dependency_metadata mismatches instead of silently ignoring
them.

* fix: replace .status() with .invoke() for iptables/ip commands

Using .status() leaks stderr directly to system logs, causing noisy
iptables error messages. Switch all networking CLI invocations to use
.invoke() which captures stderr properly. For check-then-act patterns
(iptables -C), use .invoke().await.is_err() instead of
.status().await.map_or(false, |s| s.success()).

* feat: add check-dns gateway endpoint and fix per-interface routing tables

Add a `check-dns` RPC endpoint that verifies whether a gateway's DNS
is properly configured for private domain resolution. Uses a three-tier
check: direct match (DNS == server IP), TXT challenge probe (DNS on
LAN), or failure (DNS off-subnet).

Fix per-interface routing tables to clone all non-default routes from
the main table instead of only the interface's own subnets. This
preserves LAN reachability when the priority-75 catch-all overrides
default routing. Filter out status-only flags (linkdown, dead) that
are invalid for `ip route add`.

* refactor: rename manifest metadata fields and improve error display

Rename wrapperRepo→packageRepo, marketingSite→marketingUrl,
docsUrl→docsUrls (array), remove supportSite. Add display_src/display_dbg
helpers to Error. Fix DepInfo description type to LocaleString. Update
web UI, SDK bindings, tests, and fixtures to match. Clean up cli_attach
error handling and remove dead commented code.

* chore: bump sdk version to 0.4.0-beta.49

* chore: add createTask decoupling TODO

* chore: add TODO to clear service error state on install/update

* round out dns check, dns server check, port forward check, and gateway port forwards

* chore: add TODOs for URL plugins, NAT hairpinning, and start-tunnel OTA updates

* version instead of os query param

* interface row clickable again, bu now with a chevron!

* feat: implement URL plugins with table/row actions and prefill support

- Add URL plugin effects (register, export_url, clear_urls) in core
- Add PluginHostnameInfo, HostnameMetadata::Plugin, and plugin registration types
- Implement plugin URL table in web UI with tableAction button and rowAction overflow menus
- Thread urlPluginMetadata (packageId, hostId, interfaceId, internalPort) as prefill to actions
- Add prefill support to PackageActionData so metadata passes through form dialogs
- Add i18n translations for plugin error messages
- Clean up plugin URLs on package uninstall

* feat: split row_actions into remove_action and overflow_actions for URL plugins

* touch up URL plugins table

* show table even when no addresses

* feat: NAT hairpinning, DNS static servers, clear service error on install

- Add POSTROUTING MASQUERADE rules for container and host hairpin NAT
- Allow bridge subnet containers to reach private forwards via LAN IPs
- Pass bridge_subnet env var from forward.rs to forward-port script
- Use DB-configured static DNS servers in resolver with DB watcher
- Fall back to resolv.conf servers when no static servers configured
- Clear service error state when install/update completes successfully
- Remove completed TODO items

* feat: builder-style InputSpec API, prefill plumbing, and port forward fix

- Add addKey() and add() builder methods to InputSpec with InputSpecTools
- Move OuterType to last generic param on Value, List, and all dynamic methods
- Plumb prefill through getActionInput end-to-end (core → container-runtime → SDK)
- Filter port_forwards to enabled addresses only
- Bump SDK to 0.4.0-beta.50

* fix: propagate host locale into LXC containers and write locale.conf

* chore: remove completed URL plugins TODO

* feat: OTA updates for start-tunnel via apt repository (untested)

- Add apt repo publish script (build/apt/publish-deb.sh) for S3-hosted repo
- Add apt source config and GPG key placeholder (apt/)
- Add tunnel.update.check and tunnel.update.apply RPC endpoints
- Wire up update API in tunnel frontend (api service + mock)
- Uses systemd-run --scope to survive service restart during update

* fix: publish script dpkg-name, s3cfg fallback, and --reinstall for apply

* chore: replace OTA updates TODO with UI TODO for MattDHill

* feat: add getOutboundGateway effect and simplify VersionGraph init/uninit

Add getOutboundGateway effect across core, container-runtime, and SDK
to let services query their effective outbound gateway with callback
support. Remove preInstall/uninstall hooks from VersionGraph as they
are no longer needed.

* frontend start-tunnel updates

* chore: remove completed TODO

* feat: tor hidden service key migration

* chore: migrate from ts-matches to zod across all TypeScript packages

* feat(core): allow setting server hostname

* send prefill for tasks and hide operations to hidden fields

* fix(core): preserve plugin URLs across binding updates

BindInfo::update was replacing addresses with a new DerivedAddressInfo
that cleared the available set, wiping plugin-exported URLs whenever
bind() was called. Also simplify update_addresses plugin preservation
to use retain in place rather than collecting into a separate set.

* minor cleanup from patch-db audit

* clean up prefill flow

* frontend support for setting and changing hostname

* feat(core): refactor hostname to ServerHostnameInfo with name/hostname pair

- Rename Hostname to ServerHostnameInfo, add name + hostname fields
- Add set_hostname_rpc for changing hostname at runtime
- Migrate alpha_20: generate serverInfo.name from hostname, delete ui.name
- Extract gateway.rs helpers to fix rustfmt nesting depth issue
- Add i18n key for hostname validation error
- Update SDK bindings

* add comments to everything potentially consumer facing (#3127)

* add comments to everything potentially consumer facing

* rework smtp

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* implement server name

* setup changes

* clean up copy around addresses table

* feat: add zod-deep-partial, partialValidator on InputSpec, and z.deepPartial re-export

* fix: header color in zoom (#3128)

* fix: merge version ranges when adding existing package signer (#3125)

* fix: merge version ranges when adding existing package signer

   Previously, add_package_signer unconditionally inserted the new
   version range, overwriting any existing authorization for that signer.
   Now it OR-merges the new range with the existing one, so running
   signer add multiple times accumulates permissions rather than
   replacing them.

* add --merge flag to registry package signer add

  Default behavior remains overwrite. When --merge is passed, the new
  version range is OR-merged with the existing one, allowing admins to
  accumulate permissions incrementally.

* add missing attribute to TS type

* make merge optional

* upsert instead of insert

* VersionRange::None on upsert

* fix: header color in zoom

---------

Co-authored-by: Dominion5254 <musashidisciple@proton.me>

* update snake and add about this server to system general

* chore: bump sdk to beta.53, wrap z.deepPartial with passthrough

* reset instead of reset defaults

* action failure show dialog

* chore: bump sdk to beta.54, add device-info RPC, improve SDK abort handling and InputSpec filtering

- Bump SDK version to 0.4.0-beta.54
- Add `server.device-info` RPC endpoint and `s9pk select` CLI command
- Extract `HardwareRequirements::is_compatible()` method, reuse in registry filtering
- Add `AbortedError` class with `muteUnhandled` flag, replace generic abort errors
- Handle unhandled promise rejections in container-runtime with mute support
- Improve `InputSpec.filter()` with `keepByDefault` param and boolean filter values
- Accept readonly tuples in `CommandType` and `splitCommand`
- Remove `sync_host` calls from host API handlers (binding/address changes)
- Filter mDNS hostnames by secure gateway availability
- Derive mDNS enabled state from LAN IPs in web UI
- Add "Open UI" action to address table, disable mDNS toggle
- Hide debug details in service error component
- Update rpc-toolkit docs for no-params handlers

* fix: add --no-nvram to efi grub-install to preserve built-in boot order

* update snake

* diable actions when in error state

* chore: split out nvidia variant

* misc bugfixes

* create manage-release script (untested)

* fix: preserve z namespace types for sdk consumers

* sdk version bump

* new checkPort types

* multiple bugs and better port forward ux

* fix link

* chore: todos and formatting

* fix build

---------

Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Alex Inkin <alexander@inkin.ru>
Co-authored-by: Dominion5254 <musashidisciple@proton.me>
2026-03-04 04:37:31 -07:00
Dominion5254
26a68afdef fix: merge version ranges when adding existing package signer (#3125)
* fix: merge version ranges when adding existing package signer

   Previously, add_package_signer unconditionally inserted the new
   version range, overwriting any existing authorization for that signer.
   Now it OR-merges the new range with the existing one, so running
   signer add multiple times accumulates permissions rather than
   replacing them.

* add --merge flag to registry package signer add

  Default behavior remains overwrite. When --merge is passed, the new
  version range is OR-merged with the existing one, allowing admins to
  accumulate permissions incrementally.

* add missing attribute to TS type

* make merge optional

* upsert instead of insert

* VersionRange::None on upsert
2026-02-18 13:21:33 -07:00
Matt Hill
0260c1532d fix ssh, undeprecate wifi (#3121) 2026-02-12 08:10:01 -07:00
Aiden McClelland
b6262c8e13 Fix PackageInfoShort to handle LocaleString on releaseNotes (#3112)
* Fix PackageInfoShort to handle LocaleString on releaseNotes

* fix: filter by target_version in get_matching_models and pass otherVersions from install

* chore: add exver documentation for ai agents
2026-02-09 19:42:03 +00:00
Matt Hill
ba740a9ee2 Multiple (#3111)
* fix alerts i18n, fix status display, better, remove usb media, hide shutdown for install complete

* trigger chnage detection for localize pipe and round out implementing localize pipe for consistency even though not needed
2026-02-09 12:41:29 -07:00
Aiden McClelland
f2142f0bb3 add documentation for ai agents (#3115)
* add documentation for ai agents

* docs: consolidate CLAUDE.md and CONTRIBUTING.md, add style guidelines

- Refactor CLAUDE.md to reference CONTRIBUTING.md for build/test/format info
- Expand CONTRIBUTING.md with comprehensive build targets, env vars, and testing
- Add code style guidelines section with conventional commits
- Standardize SDK prettier config to use single quotes (matching web)
- Add project-level Claude Code settings to disable co-author attribution

* style(sdk): apply prettier with single quotes

Run prettier across sdk/base and sdk/package to apply the
standardized quote style (single quotes matching web).

* docs: add USER.md for per-developer TODO filtering

- Add agents/USER.md to .gitignore (contains user identifier)
- Document session startup flow in CLAUDE.md:
  - Create USER.md if missing, prompting for identifier
  - Filter TODOs by @username tags
  - Offer relevant TODOs on session start

* docs: add i18n documentation task to agent TODOs

* docs: document i18n ID patterns in core/

Add agents/i18n-patterns.md covering rust-i18n setup, translation file
format, t!() macro usage, key naming conventions, and locale selection.
Remove completed TODO item and add reference in CLAUDE.md.

* chore: clarify that all builds work on any OS with Docker
2026-02-06 00:10:16 +01:00
gStart9
86ca23c093 Remove redundant https:// strings in start-tunnel installation output (#3114) 2026-02-05 23:22:31 +01:00
Dominion5254
463b6ca4ef propagate Error info (#3116) 2026-02-05 23:21:28 +01:00
Matt Hill
58e0b166cb move comment to safe place 2026-02-02 21:09:19 -07:00
Matt Hill
2a678bb017 fix warning and skip raspberrypi builds for now 2026-02-02 20:16:41 -07:00
Matt Hill
5664456b77 fix for buildjet 2026-02-02 18:51:11 -07:00
Matt Hill
3685b7e57e fix workflows 2026-02-02 18:37:13 -07:00
Matt Hill
989d5f73b1 fix --arch flag to fall back to emulation when native image unavailab… (#3108)
* fix --arch flag to fall back to emulation when native image unavailable, always infer hardware requirement for arch

* better handling of arch filter

* dont cancel in-progress commit workflows and abstract common setup

* cli improvements

fix group handling

* fix cli publish

* alpha.19

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2026-02-03 00:56:59 +00:00
Matt Hill
4f84073cb5 actually eliminate duplicate workflows 2026-01-30 11:28:18 -07:00
Matt Hill
c190295c34 cleaner, also eliminate duplicate workflows 2026-01-30 11:23:40 -07:00
Matt Hill
60875644a1 better i18n checks, better action disabled, fix cert download for ios 2026-01-30 10:59:27 -07:00
Matt Hill
113b09ad01 fix cert download issue in index html 2026-01-29 16:57:12 -07:00
Alex Inkin
2605d0e671 chore: make column shorter (#3107) 2026-01-29 09:54:42 -07:00
Aiden McClelland
d232b91d31 update ota script, rbind for dependency mounts, cli list-ingredients fix, and formatting 2026-01-28 16:09:37 -07:00
Aiden McClelland
c65db31fd9 Feature/consolidate setup (#3092)
* start consolidating

* add start-cli flash-os

* combine install and setup and refactor all

* use http

* undo mock

* fix translation

* translations

* use dialogservice wrapper

* better ST messaging on setup

* only warn on update if breakages (#3097)

* finish setup wizard and ui language-keyboard feature

* fix typo

* wip: localization

* remove start-tunnel readme

* switch to posix strings for language internal

* revert mock

* translate backend strings

* fix missing about text

* help text for args

* feat: add "Add new gateway" option (#3098)

* feat: add "Add new gateway" option

* Update web/projects/ui/src/app/routes/portal/components/form/controls/select.component.ts

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* add translation

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: Matt Hill <mattnine@protonmail.com>

* fix dns selection

* keyboard keymap also

* ability to shutdown after install

* revert mock

* working setup flow + manifest localization

* (mostly) redundant localization on frontend

* version bump

* omit live medium from disk list and better space management

* ignore missing package archive on 035 migration

* fix device migration

* add i18n helper to sdk

* fix install over 0.3.5.1

* fix grub config

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
Co-authored-by: Alex Inkin <alexander@inkin.ru>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2026-01-27 14:44:41 -08:00
Aiden McClelland
99871805bd hardware acceleration and support for NVIDIA cards on nonfree images (#3089)
* add nvidia packages

* add nvidia deps to nonfree

* gpu_acceleration flag & nvidia hacking

* fix gpu_config & /tmp/lxc.log

* implement hardware acceleration more dynamically

* refactor OpenUI

* use mknod

* registry updates for multi-hardware-requirements

* pluralize

* handle new registry types

* remove log

* migrations and driver fixes

* wip

* misc patches

* handle nvidia-container differently

* chore: comments (#3093)

* chore: comments

* revert some sizing

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>

* Revert "handle nvidia-container differently"

This reverts commit d708ae53df.

* fix debian containers

* cleanup

* feat: add empty array placeholder in forms (#3095)

* fixes from testing, client side device filtering for better fingerprinting resistance

* fix mac builds

---------

Co-authored-by: Sam Sartor <me@samsartor.com>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Alex Inkin <alexander@inkin.ru>
2026-01-15 11:42:17 -08:00
Aiden McClelland
e8ef39adad misc fixes for alpha.16 (#3091)
* port misc fixes from feature/nvidia

* switch back to official tor proxy on 9050

* refactor OpenUI

* fix typo

* fixes, plus getServiceManifest

* fix EffectCreator, bump to beta.47

* fixes
2026-01-10 12:58:17 -07:00
Remco Ros
466b9217b5 fix: allow (multiple) equal signs in env filehelper values (#3090) 2026-01-06 18:32:03 +00:00
Matt Hill
c9a7f519b9 Misc (#3087)
* help ios downlaod .crt and add begin add masked for addresses

* only require and show CA for public domain if addSsl

* fix type and revert i18n const

* feat: add address masking and adjust design (#3088)

* feat: add address masking and adjust design

* update lockfile

* chore: move eye button to actions

* chore: refresh notifications and handle action error

* static width for health check name

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>

* hide certificate authorities tab

* alpha.17

* add waiting health check status

* remove "on" from waiting message

* reject on abort in `.watch`

* id migration: nostr -> nostr-rs-relay

* health check waiting state

* use interface type for launch button

* better wording for masked

* cleaner

* sdk improvements

* fix type error

* fix notification badge issue

---------

Co-authored-by: Alex Inkin <alexander@inkin.ru>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-12-31 11:30:57 -07:00
Aiden McClelland
96ae532879 Refactor/project structure (#3085)
* refactor project structure

* environment-based default registry

* fix tests

* update build container

* use docker platform for iso build emulation

* simplify compat

* Fix docker platform spec in run-compat.sh

* handle riscv compat

* fix bug with dep error exists attr

* undo removal of sorting

* use qemu for iso stage

---------

Co-authored-by: Mariusz Kogen <k0gen@pm.me>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
2025-12-22 13:39:38 -07:00
Alex Inkin
eda08d5b0f chore: update taiga (#3086)
* chore: update taiga

* chore: fix UI menu
2025-12-22 13:33:02 -07:00
Remco Ros
7c12b58bb5 fix: refactor dns to handle tcp connections: (#3083)
* fix: refactor dns to handle tcp connections:
- do not use long-lived tcp connections to upstream dns servers
- when incoming request is over tcp, force a tcp lookup instead of udp

this solves cases where large dns records were not being resolved due to udp->tcp switch-over.

* use forwarding resolver for fallback

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-12-19 23:26:29 -07:00
Aiden McClelland
5446c89bc0 don't create src dir on readonly bind mount (#3084) 2025-12-19 23:26:15 -07:00
Matt Hill
2d0251e585 StartTunnel random subnet and also 80 to 5443 (#3082)
* random subnet and also 80 to 5443

* fix getNext

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-12-19 23:25:58 -07:00
Aiden McClelland
f41710c892 dynamic subnet in port forward 2025-12-18 20:19:08 -07:00
Aiden McClelland
df3f79f282 fix ws timeouts 2025-12-18 14:54:19 -07:00
Aiden McClelland
f8df692865 Merge pull request #3079 from Start9Labs/hotfix/alpha.16
hotfixes for alpha.16
2025-12-18 11:32:47 -07:00
Aiden McClelland
0c6d3b188d Merge branch 'hotfix/alpha.16' of github.com:Start9Labs/start-os into hotfix/alpha.16 2025-12-18 11:31:51 -07:00
Aiden McClelland
e7a38863ab fix registry auth 2025-12-18 11:31:30 -07:00
Alex Inkin
720e0fcdab fix: keep uptime width constant and service table DOM cached (#3078)
* fix: keep uptime width constant and service table DOM cached

* show error status and fix columns spacing

* revert const

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2025-12-18 07:51:34 -07:00
Aiden McClelland
bf8ff84522 inconsequential ssl changes 2025-12-18 05:56:51 -07:00
Aiden McClelland
5a9510238e add map & eq to getServiceInterface 2025-12-18 04:30:08 -07:00
Aiden McClelland
7b3c74179b add local auth to registry 2025-12-18 04:29:34 -07:00
Aiden McClelland
cd70fa4c32 hotfixes for alpha.16 2025-12-18 04:22:56 -07:00
Aiden McClelland
83133ced6a consolidate crates 2025-12-17 21:15:24 -07:00
Aiden McClelland
6c5179a179 handle flavor atom version range 2025-12-17 14:18:43 -07:00
Aiden McClelland
e33ab39b85 hotfix 2025-12-17 12:17:22 -07:00
Aiden McClelland
9567bcec1b randomize default start-tunnel subnet 2025-12-16 17:34:23 -07:00
Aiden McClelland
550b16dc0b fix build for cargo deps 2025-12-16 17:33:55 -07:00
Matt Hill
5d8331b7f7 Feature/tor logs (#3077)
* add tor logs, rework services page, other small things

* feat: sortable service table and mobile view

---------

Co-authored-by: waterplea <alexander@inkin.ru>
2025-12-16 12:47:43 -07:00
Aiden McClelland
e35b643e51 use arm runner for riscv 2025-12-15 16:19:06 -07:00
Aiden McClelland
bc6a92677b readd riscv target 2025-12-15 16:18:44 -07:00
Aiden McClelland
f52072e6ec sdk beta.45 2025-12-15 15:23:05 -07:00
Remco Ros
9c43c43a46 fix: shutdown order (#3073)
* fix: race condition in Daemon.stop()

* fix: do not stop Daemon on context leave

* fix: remove duplicate Daemons.term calls

* feat: honor dependency order when shutting terminating Daemons

* fixes, and remove started

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-12-15 15:21:23 -07:00
Aiden McClelland
0430e0f930 alpha.16 (#3068)
* add support for idmapped mounts to start-sdk

* misc fixes

* misc fixes

* add default to textarea

* fix iptables masquerade rule

* fix textarea types

* more fixes

* better logging for rsync

* fix tty size

* fix wg conf generation for android

* disable file mounts on dependencies

* mostly there, some styling issues (#3069)

* mostly there, some styling issues

* fix: address comments (#3070)

* fix: address comments

* fix: fix

* show SSL for any address with secure protocol and ssl added

* better sorting and messaging

---------

Co-authored-by: Alex Inkin <alexander@inkin.ru>

* fixes for nextcloud

* allow sidebar navigation during service state traansitions

* wip: x-forwarded headers

* implement x-forwarded-for proxy

* lowercase domain names and fix warning popover bug

* fix http2 websockets

* fix websocket retry behavior

* add arch filters to s9pk pack

* use docker for start-cli install

* add version range to package signer on registry

* fix rcs < 0

* fix user information parsing

* refactor service interface getters

* disable idmaps

* build fixes

* update docker login action

* streamline build

* add start-cli workflow

* rename

* riscv64gc

* fix ui packing

* no default features on cli

* make cli depend on GIT_HASH

* more build fixes

* more build fixes

* interpolate arch within dockerfile

* fix tests

* add launch ui to service page plus other small improvements (#3075)

* add launch ui to service page plus other small improvements

* revert translation disable

* add spinner to service list if service is health and loading

* chore: some visual tune up

* chore: update Taiga UI

---------

Co-authored-by: waterplea <alexander@inkin.ru>

* fix backups

* feat: use arm hosted runners and don't fail when apt package does not exist (#3076)

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Shadowy Super Coder <musashidisciple@proton.me>
Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
Co-authored-by: Alex Inkin <alexander@inkin.ru>
Co-authored-by: Remco Ros <remcoros@live.nl>
2025-12-15 13:30:50 -07:00
Mariusz Kogen
b945243d1a refactor(tor-check): improve proxy support, error handling ... (#3072)
refactor(tor-check): improve proxy support, error handling, and output formatting
2025-12-15 18:14:46 +01:00
Aiden McClelland
d8484a8b26 add docker build for start-registry (#3067)
* add docker build for start-registry

* login to docker

* add workflow dependency

* fix path

* fix add

* fix gh actions permissions

* use apt-get
2025-12-05 18:12:09 -07:00
Matt Hill
3c27499795 Refactor/status info (#3066)
* refactor status info

* wip fe

* frontend changes and version bump

* fix tests and motd

* add registry workflow

* better starttunnel instructions

* placeholders for starttunnel tables

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-12-02 23:31:02 +00:00
Remco Ros
7c772e873d fix: pass --allow-discards to luksOpen for trim support (#3064) 2025-12-02 22:00:10 +00:00
Matt Hill
db2fab245e better language and show wg config on device save (#3065)
* better language and show wg config on device save

* chore: fix

---------

Co-authored-by: waterplea <alexander@inkin.ru>
2025-12-02 21:42:14 +00:00
Matt Hill
a9c9917f1a better ST instructions 2025-12-01 18:03:07 -07:00
Matt Hill
23e2e9e9cc Update START-TUNNEL.md 2025-11-30 16:32:40 -07:00
Aiden McClelland
2369e92460 Update download link for StartTunnel installation 2025-11-28 14:28:54 -07:00
Aiden McClelland
a53b15f2a3 improve StartTunnel validation and GC (#3062)
* improve StartTunnel validation and GC

* update sdk formatting
2025-11-28 13:14:52 -07:00
Aiden McClelland
72eb8b1eb6 Update START-TUNNEL.md 2025-11-27 08:57:38 -07:00
Aiden McClelland
4db54f3b83 Update START-TUNNEL.md 2025-11-27 08:56:05 -07:00
Aiden McClelland
24eb27f005 minor bugfixes for alpha.14 (#3058)
* overwrite AllowedIPs in wg config
mute UnknownCA errors

* fix upgrade issues

* allow start9 user to access journal

* alpha.15

* sort actions lexicographically and show desc in marketplace details

* add registry package download cli command

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2025-11-26 16:23:08 -07:00
Matt Hill
009d76ea35 More FE fixes (#3056)
* tell user to restart server after kiosk chnage

* remove unused import

* dont show tor address on server setup

* chore: address comments

* revert mock

* chore: remove uptime block on mobile

* utiliser le futur proche

* chore: comments

* don't show loading on authorities tab

* chore: fix mobile unions

---------

Co-authored-by: waterplea <alexander@inkin.ru>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2025-11-25 23:43:19 +00:00
Aiden McClelland
6e8a425eb1 overwrite AllowedIPs in wg config (#3055)
mute UnknownCA errors
2025-11-21 11:30:21 -07:00
Aiden McClelland
66188d791b fix start-tunnel artifact upload 2025-11-20 10:53:23 -07:00
Aiden McClelland
015ff02d71 fix build 2025-11-20 01:05:50 -07:00
Aiden McClelland
10bfaf5415 fix start-tunnel build 2025-11-20 00:30:33 -07:00
Aiden McClelland
e3e0b85e0c Bugfix/alpha.13 (#3053)
* bugfixes for alpha.13

* minor fixes

* version bump

* start-tunnel workflow

* sdk beta 44

* defaultFilter

* fix reset-password on tunnel auth

* explicitly rebuild types

* fix typo

* ubuntu-latest runner

* add cleanup steps

* fix env on attach
2025-11-19 22:48:49 -07:00
Matt Hill
ad0632892e Various (#3051)
* tell user to restart server after kiosk chnage

* remove unused import

* dont show tor address on server setup

* chore: address comments

* revert mock

* chore: remove uptime block on mobile

* utiliser le futur proche

---------

Co-authored-by: waterplea <alexander@inkin.ru>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2025-11-19 10:35:07 -07:00
Aiden McClelland
f26791ba39 fix raspi fsck 2025-11-17 12:17:58 -07:00
Aiden McClelland
2fbaaebf44 Bugfixes for alpha.12 (#3049)
* squashfs-wip

* sdk fixes

* misc fixes

* bump sdk

* Include StartTunnel installation command

Added installation instructions for StartTunnel.

* CA instead of leaf for StartTunnel (#3046)

* updated docs for CA instead of cert

* generate ca instead of self-signed in start-tunnel

* Fix formatting in START-TUNNEL.md installation instructions

* Fix formatting in START-TUNNEL.md

* fix infinite loop

* add success message to install

* hide loopback and bridge gateways

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* prevent gateways from getting stuck empty

* fix set-password

* misc networking fixes

* build and efi fixes

* efi fixes

* alpha.13

* remove cross

* fix tests

* provide path to upgrade

* fix networkmanager issues

* remove squashfs before creating

---------

Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
2025-11-15 22:33:03 -07:00
StuPleb
edb916338c minor typos and grammar (#3047)
* minor typos and grammar

* added missing word - compute
2025-11-14 12:48:41 -07:00
Aiden McClelland
f7e947d37d Fix installation command for StartTunnel (#3048) 2025-11-14 12:42:05 -07:00
Aiden McClelland
a9e3d1ed75 Revise StartTunnel installation and update commands
Updated installation and update instructions for StartTunnel.
2025-11-14 12:39:53 -07:00
Matt Hill
ce97827c42 CA instead of leaf for StartTunnel (#3046)
* updated docs for CA instead of cert

* generate ca instead of self-signed in start-tunnel

* Fix formatting in START-TUNNEL.md installation instructions

* Fix formatting in START-TUNNEL.md

* fix infinite loop

* add success message to install

* hide loopback and bridge gateways

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2025-11-10 14:15:58 -07:00
Aiden McClelland
3efec07338 Include StartTunnel installation command
Added installation instructions for StartTunnel.
2025-11-07 20:00:31 +00:00
Aiden McClelland
68f401bfa3 Feature/start tunnel (#3037)
* fix live-build resolv.conf

* improved debuggability

* wip: start-tunnel

* fixes for trixie and tor

* non-free-firmware on trixie

* wip

* web server WIP

* wip: tls refactor

* FE patchdb, mocks, and most endpoints

* fix editing records and patch mocks

* refactor complete

* finish api

* build and formatter update

* minor change toi viewing addresses and fix build

* fixes

* more providers

* endpoint for getting config

* fix tests

* api fixes

* wip: separate port forward controller into parts

* simplify iptables rules

* bump sdk

* misc fixes

* predict next subnet and ip, use wan ips, and form validation

* refactor: break big components apart and address todos (#3043)

* refactor: break big components apart and address todos

* starttunnel readme, fix pf mocks, fix adding tor domain in startos

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>

* better tui

* tui tweaks

* fix: address comments

* better regex for subnet

* fixes

* better validation

* handle rpc errors

* build fixes

* fix: address comments (#3044)

* fix: address comments

* fix unread notification mocks

* fix row click for notification

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>

* fix raspi build

* fix build

* fix build

* fix build

* fix build

* try to fix build

* fix tests

* fix tests

* fix rsync tests

* delete useless effectful test

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Alex Inkin <alexander@inkin.ru>
2025-11-07 10:12:05 +00:00
Matt Hill
1ea525feaa make textarea rows configurable (#3042)
* make textarea rows configurable

* add comments

* better defaults
2025-10-31 11:46:49 -06:00
Alex Inkin
57c4a7527e fix: make CPU meter not go to 11 (#3038) 2025-10-30 16:13:39 -06:00
Aiden McClelland
5aa9c045e1 fix live-build resolv.conf (#3035)
* fix live-build resolv.conf

* improved debuggability
2025-09-24 22:44:25 -06:00
Matt Hill
6f1900f3bb limit adding gateway to StartTunnel, better copy around Tor SSL (#3033)
* limit adding gateway to StartTunnel, better copy around Tor SSL

* properly differentiate ssl

* exclude disconnected gateways

* better error handling

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-09-24 13:22:26 -06:00
Aiden McClelland
bc62de795e bugfixes for alpha.10 (#3032)
* bugfixes for alpha.10

* bump raspi kernel

* rpi kernel bump

* alpha.11
2025-09-23 22:42:17 +00:00
Alex Inkin
c62ca4b183 fix: make long dropdown options wrap (#3031) 2025-09-21 06:06:33 -06:00
Alex Inkin
876e5bc683 fix: fix overflowing interface table (#3027) 2025-09-20 07:10:58 -06:00
Alex Inkin
b99f3b73cd fix: make logs page take up all space (#3030) 2025-09-20 07:10:28 -06:00
Matt Hill
7eecf29449 fix dep error display, show starting if any health check starting, show disabled health check message, remove loader from service list, animated dots, better color (#3025)
* refector addresses to not need gateways array

* fix dep error display, show starting if any health check starting, show disabled health check message, remove loader from service list, animated dots, better color

* fix: fix action results textfields

---------

Co-authored-by: waterplea <alexander@inkin.ru>
2025-09-17 10:32:20 -06:00
Mariusz Kogen
1d331d7810 Fix file permissions for developer key and auth cookie (#3024)
* fix permissions

* include read for group
2025-09-16 09:09:33 -06:00
Aiden McClelland
68414678d8 sdk updates; beta.39 (#3022)
* sdk updates; beta.39

* beta.40
2025-09-11 15:47:48 -06:00
Aiden McClelland
2f6b9dac26 Bugfix/dns recursion (#3023)
* fix dns recursion and localhost

* additional fix
2025-09-11 15:47:38 -06:00
Aiden McClelland
d1812d875b fix dns recursion and localhost (#3021) 2025-09-11 12:35:12 -06:00
1306 changed files with 65076 additions and 41855 deletions

1
.claude/settings.json Normal file
View File

@@ -0,0 +1 @@
{}

81
.github/actions/setup-build/action.yml vendored Normal file
View File

@@ -0,0 +1,81 @@
name: Setup Build Environment
description: Common build environment setup steps
inputs:
nodejs-version:
description: Node.js version
required: true
setup-python:
description: Set up Python
required: false
default: "false"
setup-docker:
description: Set up Docker QEMU and Buildx
required: false
default: "true"
setup-sccache:
description: Configure sccache for GitHub Actions
required: false
default: "true"
free-space:
description: Remove unnecessary packages to free disk space
required: false
default: "true"
runs:
using: composite
steps:
- name: Free disk space
if: inputs.free-space == 'true'
shell: bash
run: |
sudo apt-get remove --purge -y azure-cli || true
sudo apt-get remove --purge -y firefox || true
sudo apt-get remove --purge -y ghc-* || true
sudo apt-get remove --purge -y google-cloud-sdk || true
sudo apt-get remove --purge -y google-chrome-stable || true
sudo apt-get remove --purge -y powershell || true
sudo apt-get remove --purge -y php* || true
sudo apt-get remove --purge -y ruby* || true
sudo apt-get remove --purge -y mono-* || true
sudo apt-get autoremove -y
sudo apt-get clean
sudo rm -rf /usr/lib/jvm
sudo rm -rf /usr/local/.ghcup
sudo rm -rf /usr/local/lib/android
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/share/swift
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
# BuildJet runners lack /opt/hostedtoolcache, which setup-python and setup-qemu expect
- name: Ensure hostedtoolcache exists
shell: bash
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
- name: Set up Python
if: inputs.setup-python == 'true'
uses: actions/setup-python@v5
with:
python-version: "3.x"
- uses: actions/setup-node@v4
with:
node-version: ${{ inputs.nodejs-version }}
cache: npm
cache-dependency-path: "**/package-lock.json"
- name: Set up Docker QEMU
if: inputs.setup-docker == 'true'
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
if: inputs.setup-docker == 'true'
uses: docker/setup-buildx-action@v3
- name: Configure sccache
if: inputs.setup-sccache == 'true'
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');

88
.github/workflows/start-cli.yaml vendored Normal file
View File

@@ -0,0 +1,88 @@
name: start-cli
on:
workflow_call:
workflow_dispatch:
inputs:
environment:
type: choice
description: Environment
options:
- NONE
- dev
- unstable
- dev-unstable
runner:
type: choice
description: Runner
options:
- standard
- fast
arch:
type: choice
description: Architecture
options:
- ALL
- x86_64
- x86_64-apple
- aarch64
- aarch64-apple
- riscv64
push:
branches:
- master
- next/*
pull_request:
branches:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
compile:
name: Build Debian Package
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
triple: >-
${{
fromJson('{
"x86_64": ["x86_64-unknown-linux-musl"],
"x86_64-apple": ["x86_64-apple-darwin"],
"aarch64": ["aarch64-unknown-linux-musl"],
"x86_64-apple": ["aarch64-apple-darwin"],
"riscv64": ["riscv64gc-unknown-linux-musl"],
"ALL": ["x86_64-unknown-linux-musl", "x86_64-apple-darwin", "aarch64-unknown-linux-musl", "aarch64-apple-darwin", "riscv64gc-unknown-linux-musl"]
}')[github.event.inputs.platform || 'ALL']
}}
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/setup-build
with:
nodejs-version: ${{ env.NODEJS_VERSION }}
- name: Make
run: TARGET=${{ matrix.triple }} make cli
env:
PLATFORM: ${{ matrix.arch }}
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v4
with:
name: start-cli_${{ matrix.triple }}
path: core/target/${{ matrix.triple }}/release/start-cli

173
.github/workflows/start-registry.yaml vendored Normal file
View File

@@ -0,0 +1,173 @@
name: start-registry
on:
workflow_call:
workflow_dispatch:
inputs:
environment:
type: choice
description: Environment
options:
- NONE
- dev
- unstable
- dev-unstable
runner:
type: choice
description: Runner
options:
- standard
- fast
arch:
type: choice
description: Architecture
options:
- ALL
- x86_64
- aarch64
- riscv64
push:
branches:
- master
- next/*
pull_request:
branches:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
compile:
name: Build Debian Package
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
arch: >-
${{
fromJson('{
"x86_64": ["x86_64"],
"aarch64": ["aarch64"],
"riscv64": ["riscv64"],
"ALL": ["x86_64", "aarch64", "riscv64"]
}')[github.event.inputs.platform || 'ALL']
}}
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/setup-build
with:
nodejs-version: ${{ env.NODEJS_VERSION }}
- name: Make
run: make registry-deb
env:
PLATFORM: ${{ matrix.arch }}
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v4
with:
name: start-registry_${{ matrix.arch }}.deb
path: results/start-registry-*_${{ matrix.arch }}.deb
create-image:
name: Create Docker Image
needs: [compile]
permissions:
contents: read
packages: write
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Cleaning up unnecessary files
run: |
sudo apt-get remove --purge -y google-chrome-stable firefox mono-devel
sudo apt-get autoremove -y
sudo apt-get clean
- run: |
sudo mount -t tmpfs tmpfs .
if: ${{ github.event.inputs.runner == 'fast' }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: "Login to GitHub Container Registry"
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{github.actor}}
password: ${{secrets.GITHUB_TOKEN}}
- name: Docker meta
id: meta
uses: docker/metadata-action@v5
with:
images: ghcr.io/Start9Labs/startos-registry
tags: |
type=raw,value=${{ github.ref_name }}
- name: Download debian package
uses: actions/download-artifact@v4
with:
pattern: start-registry_*.deb
- name: Map matrix.arch to docker platform
run: |
platforms=""
for deb in *.deb; do
filename=$(basename "$deb" .deb)
arch="${filename#*_}"
case "$arch" in
x86_64)
platform="linux/amd64"
;;
aarch64)
platform="linux/arm64"
;;
riscv64)
platform="linux/riscv64"
;;
*)
echo "Unknown architecture: $arch" >&2
exit 1
;;
esac
if [ -z "$platforms" ]; then
platforms="$platform"
else
platforms="$platforms,$platform"
fi
done
echo "DOCKER_PLATFORM=$platforms" >> "$GITHUB_ENV"
- run: |
cat | docker buildx build --platform "$DOCKER_PLATFORM" --push -t ${{ steps.meta.outputs.tags }} -f - . << 'EOF'
FROM debian:trixie
ADD *.deb .
RUN apt-get install -y ./*_$(uname -m).deb && rm *.deb
VOLUME /var/lib/startos
ENV RUST_LOG=startos=debug
ENTRYPOINT ["start-registryd"]
EOF

84
.github/workflows/start-tunnel.yaml vendored Normal file
View File

@@ -0,0 +1,84 @@
name: start-tunnel
on:
workflow_call:
workflow_dispatch:
inputs:
environment:
type: choice
description: Environment
options:
- NONE
- dev
- unstable
- dev-unstable
runner:
type: choice
description: Runner
options:
- standard
- fast
arch:
type: choice
description: Architecture
options:
- ALL
- x86_64
- aarch64
- riscv64
push:
branches:
- master
- next/*
pull_request:
branches:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
compile:
name: Build Debian Package
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
arch: >-
${{
fromJson('{
"x86_64": ["x86_64"],
"aarch64": ["aarch64"],
"riscv64": ["riscv64"],
"ALL": ["x86_64", "aarch64", "riscv64"]
}')[github.event.inputs.platform || 'ALL']
}}
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: ./.github/actions/setup-build
with:
nodejs-version: ${{ env.NODEJS_VERSION }}
- name: Make
run: make tunnel-deb
env:
PLATFORM: ${{ matrix.arch }}
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v4
with:
name: start-tunnel_${{ matrix.arch }}.deb
path: results/start-tunnel-*_${{ matrix.arch }}.deb

View File

@@ -25,9 +25,13 @@ on:
- ALL
- x86_64
- x86_64-nonfree
- x86_64-nvidia
- aarch64
- aarch64-nonfree
- raspberrypi
- aarch64-nvidia
# - raspberrypi
- riscv64
- riscv64-nonfree
deploy:
type: choice
description: Deploy
@@ -44,13 +48,18 @@ on:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "22.17.1"
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
compile:
name: Compile Base Binaries
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
@@ -59,46 +68,45 @@ jobs:
fromJson('{
"x86_64": ["x86_64"],
"x86_64-nonfree": ["x86_64"],
"x86_64-nvidia": ["x86_64"],
"aarch64": ["aarch64"],
"aarch64-nonfree": ["aarch64"],
"aarch64-nvidia": ["aarch64"],
"raspberrypi": ["aarch64"],
"ALL": ["x86_64", "aarch64"]
"riscv64": ["riscv64"],
"riscv64-nonfree": ["riscv64"],
"ALL": ["x86_64", "aarch64", "riscv64"]
}')[github.event.inputs.platform || 'ALL']
}}
runs-on: ${{ fromJson('["ubuntu-22.04", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
runs-on: >-
${{
fromJson(
format(
'["{0}", "{1}"]',
fromJson('{
"x86_64": "ubuntu-latest",
"aarch64": "ubuntu-24.04-arm",
"riscv64": "ubuntu-latest"
}')[matrix.arch],
fromJson('{
"x86_64": "buildjet-32vcpu-ubuntu-2204",
"aarch64": "buildjet-32vcpu-ubuntu-2204-arm",
"riscv64": "buildjet-32vcpu-ubuntu-2204"
}')[matrix.arch]
)
)[github.event.inputs.runner == 'fast']
}}
steps:
- run: |
sudo mount -t tmpfs tmpfs .
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Python
uses: actions/setup-python@v5
- uses: ./.github/actions/setup-build
with:
python-version: "3.x"
- uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
- name: Set up system dependencies
run: sudo apt-get update && sudo apt-get install -y qemu-user-static systemd-container squashfuse
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Configure sccache
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
nodejs-version: ${{ env.NODEJS_VERSION }}
setup-python: "true"
- name: Make
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
@@ -116,13 +124,14 @@ jobs:
strategy:
fail-fast: false
matrix:
# TODO: re-add "raspberrypi" to the platform list below
platform: >-
${{
fromJson(
format(
'[
["{0}"],
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "raspberrypi"]
["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "riscv64", "riscv64-nonfree"]
]',
github.event.inputs.platform || 'ALL'
)
@@ -132,13 +141,28 @@ jobs:
${{
fromJson(
format(
'["ubuntu-22.04", "{0}"]',
'["{0}", "{1}"]',
fromJson('{
"x86_64": "ubuntu-latest",
"x86_64-nonfree": "ubuntu-latest",
"x86_64-nvidia": "ubuntu-latest",
"aarch64": "ubuntu-24.04-arm",
"aarch64-nonfree": "ubuntu-24.04-arm",
"aarch64-nvidia": "ubuntu-24.04-arm",
"raspberrypi": "ubuntu-24.04-arm",
"riscv64": "ubuntu-24.04-arm",
"riscv64-nonfree": "ubuntu-24.04-arm",
}')[matrix.platform],
fromJson('{
"x86_64": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nvidia": "buildjet-8vcpu-ubuntu-2204",
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nvidia": "buildjet-8vcpu-ubuntu-2204-arm",
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
"riscv64": "buildjet-8vcpu-ubuntu-2204",
"riscv64-nonfree": "buildjet-8vcpu-ubuntu-2204",
}')[matrix.platform]
)
)[github.event.inputs.runner == 'fast']
@@ -149,42 +173,48 @@ jobs:
fromJson('{
"x86_64": "x86_64",
"x86_64-nonfree": "x86_64",
"x86_64-nvidia": "x86_64",
"aarch64": "aarch64",
"aarch64-nonfree": "aarch64",
"aarch64-nvidia": "aarch64",
"raspberrypi": "aarch64",
"riscv64": "riscv64",
"riscv64-nonfree": "riscv64",
}')[matrix.platform]
}}
steps:
- name: Free space
run: rm -rf /opt/hostedtoolcache*
run: |
sudo apt-get remove --purge -y azure-cli || true
sudo apt-get remove --purge -y firefox || true
sudo apt-get remove --purge -y ghc-* || true
sudo apt-get remove --purge -y google-cloud-sdk || true
sudo apt-get remove --purge -y google-chrome-stable || true
sudo apt-get remove --purge -y powershell || true
sudo apt-get remove --purge -y php* || true
sudo apt-get remove --purge -y ruby* || true
sudo apt-get remove --purge -y mono-* || true
sudo apt-get autoremove -y
sudo apt-get clean
sudo rm -rf /usr/lib/jvm # All JDKs
sudo rm -rf /usr/local/.ghcup # Haskell toolchain
sudo rm -rf /usr/local/lib/android # Android SDK/NDK, emulator
sudo rm -rf /usr/share/dotnet # .NET SDKs
sudo rm -rf /usr/share/swift # Swift toolchain (if present)
sudo rm -rf "$AGENT_TOOLSDIRECTORY" # Pre-cached tool cache (Go, Node, etc.)
if: ${{ github.event.inputs.runner != 'fast' }}
# BuildJet runners lack /opt/hostedtoolcache, which setup-qemu expects
- name: Ensure hostedtoolcache exists
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.x"
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y qemu-user-static
wget https://deb.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.2-1_all.deb
sha256sum ./debspawn_0.6.2-1_all.deb | grep 37ef27458cb1e35e8bce4d4f639b06b4b3866fc0b9191ec6b9bd157afd06a817
sudo apt-get install -y ./debspawn_0.6.2-1_all.deb
- name: Configure debspawn
run: |
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo mkdir -p /var/tmp/debspawn
- run: sudo mount -t tmpfs tmpfs /var/tmp/debspawn
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree') }}
- name: Download compiled artifacts
uses: actions/download-artifact@v4
with:
@@ -197,22 +227,19 @@ jobs:
run: |
mkdir -p web/node_modules
mkdir -p web/dist/raw
mkdir -p core/startos/bindings
mkdir -p core/bindings
mkdir -p sdk/base/lib/osBindings
mkdir -p container-runtime/node_modules
mkdir -p container-runtime/dist
mkdir -p container-runtime/dist/node_modules
mkdir -p core/startos/bindings
mkdir -p sdk/dist
mkdir -p sdk/baseDist
mkdir -p patch-db/client/node_modules
mkdir -p patch-db/client/dist
mkdir -p web/.angular
mkdir -p web/dist/raw/ui
mkdir -p web/dist/raw/install-wizard
mkdir -p web/dist/raw/setup-wizard
mkdir -p web/dist/static/ui
mkdir -p web/dist/static/install-wizard
mkdir -p web/dist/static/setup-wizard
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
@@ -242,40 +269,3 @@ jobs:
name: ${{ matrix.platform }}.img
path: results/*.img
if: ${{ matrix.platform == 'raspberrypi' }}
- name: Upload OTA to registry
run: >-
PLATFORM=${{ matrix.platform }} make upload-ota TARGET="${{
fromJson('{
"alpha": "alpha-registry-x.start9.com",
"beta": "beta-registry.start9.com",
}')[github.event.inputs.deploy]
}}" KEY="${{
fromJson(
format('{{
"alpha": "{0}",
"beta": "{1}",
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
)[github.event.inputs.deploy]
}}"
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
index:
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
needs: [image]
runs-on: ubuntu-22.04
steps:
- run: >-
curl "https://${{
fromJson('{
"alpha": "alpha-registry-x.start9.com",
"beta": "beta-registry.start9.com",
}')[github.event.inputs.deploy]
}}:8443/resync.cgi?key=${{
fromJson(
format('{{
"alpha": "{0}",
"beta": "{1}",
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
)[github.event.inputs.deploy]
}}"

View File

@@ -10,22 +10,29 @@ on:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "22.17.1"
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: dev-unstable
jobs:
test:
name: Run Automated Tests
runs-on: ubuntu-22.04
if: github.event.pull_request.draft != true
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v4
- uses: ./.github/actions/setup-build
with:
node-version: ${{ env.NODEJS_VERSION }}
nodejs-version: ${{ env.NODEJS_VERSION }}
free-space: "false"
setup-docker: "false"
setup-sccache: "false"
- name: Build And Run Tests
run: make test

26
.gitignore vendored
View File

@@ -1,28 +1,24 @@
.DS_Store
.idea
/*.img
/*.img.gz
/*.img.xz
/*-raspios-bullseye-arm64-lite.img
/*-raspios-bullseye-arm64-lite.zip
*.img
*.img.gz
*.img.xz
*.zip
/product_key.txt
/*_product_key.txt
.vscode/settings.json
deploy_web.sh
deploy_web.sh
secrets.db
.vscode/
/cargo-deps/**/*
/PLATFORM.txt
/ENVIRONMENT.txt
/GIT_HASH.txt
/VERSION.txt
/*.deb
/build/env/*.txt
*.deb
/target
/*.squashfs
*.squashfs
/results
/dpkg-workdir
/compiled.tar
/compiled-*.tar
/firmware
/tmp
/build/lib/firmware
tmp
web/.i18n-checked
docs/USER.md

101
ARCHITECTURE.md Normal file
View File

@@ -0,0 +1,101 @@
# Architecture
StartOS is an open-source Linux distribution for running personal servers. It manages discovery, installation, network configuration, backups, and health monitoring of self-hosted services.
## Tech Stack
- Backend: Rust (async/Tokio, Axum web framework)
- Frontend: Angular 20 + TypeScript + TaigaUI
- Container runtime: Node.js/TypeScript with LXC
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`)
- Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`)
## Project Structure
```bash
/
├── assets/ # Screenshots for README
├── build/ # Auxiliary files and scripts for deployed images
├── container-runtime/ # Node.js program managing package containers
├── core/ # Rust backend: API, daemon (startd), CLI (start-cli)
├── debian/ # Debian package maintainer scripts
├── image-recipe/ # Scripts for building StartOS images
├── patch-db/ # (submodule) Diff-based data store for frontend sync
├── sdk/ # TypeScript SDK for building StartOS packages
└── web/ # Web UIs (Angular)
```
## Components
- **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md).
- **`web/`** — Angular 20 + TypeScript workspace using Taiga UI. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md).
- **`container-runtime/`** — Node.js runtime that runs inside each service's LXC container. Loads the service's JavaScript from its S9PK package and manages subcontainers. Communicates with the host daemon via JSON-RPC over Unix socket. See [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md).
- **`sdk/`** — TypeScript SDK for packaging services for StartOS (`@start9labs/start-sdk`). Split into `base/` (core types, ABI definitions, effects interface, consumed by web as `@start9labs/start-sdk-base`) and `package/` (full SDK for service developers, consumed by container-runtime as `@start9labs/start-sdk`).
- **`patch-db/`** — Git submodule providing diff-based state synchronization. Uses CBOR encoding. Backend mutations produce diffs that are pushed to the frontend via WebSocket, enabling reactive UI updates without polling. See [patch-db repo](https://github.com/Start9Labs/patch-db).
## Build Pipeline
Components have a strict dependency chain. Changes flow in one direction:
```
Rust (core/)
→ cargo test exports ts-rs types to core/bindings/
→ rsync copies to sdk/base/lib/osBindings/
→ SDK build produces baseDist/ and dist/
→ web/ consumes baseDist/ (via @start9labs/start-sdk-base)
→ container-runtime/ consumes dist/ (via @start9labs/start-sdk)
```
Key make targets along this chain:
| Step | Command | What it does |
|---|---|---|
| 1 | `cargo check -p start-os` | Verify Rust compiles |
| 2 | `make ts-bindings` | Export ts-rs types → rsync to SDK |
| 3 | `cd sdk && make baseDist dist` | Build SDK packages |
| 4 | `cd web && npm run check` | Type-check Angular projects |
| 5 | `cd container-runtime && npm run check` | Type-check runtime |
**Important**: Editing `sdk/base/lib/osBindings/*.ts` alone is NOT sufficient — you must rebuild the SDK bundle (step 3) before web/container-runtime can see the changes.
## Cross-Layer Verification
When making changes across multiple layers (Rust, SDK, web, container-runtime), verify in this order:
1. **Rust**: `cargo check -p start-os` — verifies core compiles
2. **TS bindings**: `make ts-bindings` — regenerates TypeScript types from Rust `#[ts(export)]` structs
- Runs `./core/build/build-ts.sh` to export ts-rs types to `core/bindings/`
- Syncs `core/bindings/``sdk/base/lib/osBindings/` via rsync
- If you manually edit files in `sdk/base/lib/osBindings/`, you must still rebuild the SDK (step 3)
3. **SDK bundle**: `cd sdk && make baseDist dist` — compiles SDK source into packages
- `baseDist/` is consumed by `/web` (via `@start9labs/start-sdk-base`)
- `dist/` is consumed by `/container-runtime` (via `@start9labs/start-sdk`)
- Web and container-runtime reference the **built** SDK, not source files
4. **Web type check**: `cd web && npm run check` — type-checks all Angular projects
5. **Container runtime type check**: `cd container-runtime && npm run check` — type-checks the runtime
## Data Flow: Backend to Frontend
StartOS uses Patch-DB for reactive state synchronization:
1. The backend mutates state via `db.mutate()`, producing CBOR diffs
2. Diffs are pushed to the frontend over a persistent WebSocket connection
3. The frontend applies diffs to its local state copy and notifies observers
4. Components watch specific database paths via `PatchDB.watch$()`, receiving updates reactively
This means the UI is always eventually consistent with the backend — after any mutating API call, the frontend waits for the corresponding PatchDB diff before resolving, so the UI reflects the result immediately.
## Further Reading
- [core/ARCHITECTURE.md](core/ARCHITECTURE.md) — Rust backend architecture
- [web/ARCHITECTURE.md](web/ARCHITECTURE.md) — Angular frontend architecture
- [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md) — Container runtime details
- [core/rpc-toolkit.md](core/rpc-toolkit.md) — JSON-RPC handler patterns
- [core/s9pk-structure.md](core/s9pk-structure.md) — S9PK package format
- [docs/exver.md](docs/exver.md) — Extended versioning format
- [docs/VERSION_BUMP.md](docs/VERSION_BUMP.md) — Version bumping guide

59
CLAUDE.md Normal file
View File

@@ -0,0 +1,59 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Architecture
See [ARCHITECTURE.md](ARCHITECTURE.md) for the full system architecture, component map, build pipeline, and cross-layer verification order.
Each major component has its own `CLAUDE.md` with detailed guidance: `core/`, `web/`, `container-runtime/`, `sdk/`.
## Build & Development
See [CONTRIBUTING.md](CONTRIBUTING.md) for:
- Environment setup and requirements
- Build commands and make targets
- Testing and formatting commands
- Environment variables
**Quick reference:**
```bash
. ./devmode.sh # Enable dev mode
make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
make test-core # Run Rust tests
```
## Operating Rules
- Always verify cross-layer changes using the order described in [ARCHITECTURE.md](ARCHITECTURE.md#cross-layer-verification)
- Check component-level CLAUDE.md files for component-specific conventions. ALWAYS read it before operating on that component.
- Follow existing patterns before inventing new ones
- Always use `make` recipes when they exist for testing builds rather than manually invoking build commands
## Supplementary Documentation
The `docs/` directory contains cross-cutting documentation for AI assistants:
- `TODO.md` - Pending tasks for AI agents (check this first, remove items when completed)
- `USER.md` - Current user identifier (gitignored, see below)
- `exver.md` - Extended versioning format (used across core, sdk, and web)
- `VERSION_BUMP.md` - Guide for bumping the StartOS version across the codebase
Component-specific docs live alongside their code (e.g., `core/rpc-toolkit.md`, `core/i18n-patterns.md`).
### Session Startup
On startup:
1. **Check for `docs/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer.
2. **Check `docs/TODO.md` for relevant tasks** - Show TODOs that either:
- Have no `@username` tag (relevant to everyone)
- Are tagged with the current user's identifier
Skip TODOs tagged with a different user.
3. **Ask "What would you like to do today?"** - Offer options for each relevant TODO item, plus "Something else" for other requests.

View File

@@ -1,133 +1,240 @@
# Contributing to StartOS
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://docs.start9.com/latest/packaging-guide/). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://github.com/Start9Labs/ai-service-packaging). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
## Collaboration
- [Matrix](https://matrix.to/#/#community-dev:matrix.start9labs.com)
- [Telegram](https://t.me/start9_labs/47471)
- [Matrix](https://matrix.to/#/#dev-startos:matrix.start9labs.com)
## Project Structure
```bash
/
├── assets/
├── container-runtime/
├── core/
├── build/
├── debian/
├── web/
├── image-recipe/
├── patch-db
└── sdk/
```
#### assets
screenshots for the StartOS README
#### container-runtime
A NodeJS program that dynamically loads maintainer scripts and communicates with the OS to manage packages
#### core
An API, daemon (startd), and CLI (start-cli) that together provide the core functionality of StartOS.
#### build
Auxiliary files and scripts to include in deployed StartOS images
#### debian
Maintainer scripts for the StartOS Debian package
#### web
Web UIs served under various conditions and used to interact with StartOS APIs.
#### image-recipe
Scripts for building StartOS images
#### patch-db (submodule)
A diff based data store used to synchronize data between the web interfaces and server.
#### sdk
A typescript sdk for building start-os packages
For project structure and system architecture, see [ARCHITECTURE.md](ARCHITECTURE.md).
## Environment Setup
#### Clone the StartOS repository
### Installing Dependencies (Debian/Ubuntu)
> Debian/Ubuntu is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install).
```sh
git clone https://github.com/Start9Labs/start-os.git --recurse-submodules
sudo apt update
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 24
nvm use 24
nvm alias default 24 # this prevents your machine from reverting back to another version
```
### Cloning the Repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
cd start-os
```
#### Continue to your project of interest for additional instructions:
### Development Mode
- [`core`](core/README.md)
- [`web-interfaces`](web-interfaces/README.md)
- [`build`](build/README.md)
- [`patch-db`](https://github.com/Start9Labs/patch-db)
For faster iteration during development:
```sh
. ./devmode.sh
```
This sets `ENVIRONMENT=dev` and `GIT_BRANCH_AS_HASH=1` to prevent rebuilds on every commit.
## Building
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components. To build any specific component, simply run `make <TARGET>` replacing `<TARGET>` with the name of the target you'd like to build
All builds can be performed on any operating system that can run Docker.
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components.
### Requirements
- [GNU Make](https://www.gnu.org/software/make/)
- [Docker](https://docs.docker.com/get-docker/)
- [Docker](https://docs.docker.com/get-docker/) or [Podman](https://podman.io/)
- [NodeJS v20.16.0](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
- [sed](https://www.gnu.org/software/sed/)
- [grep](https://www.gnu.org/software/grep/)
- [awk](https://www.gnu.org/software/gawk/)
- [Rust](https://rustup.rs/) (nightly for formatting)
- [sed](https://www.gnu.org/software/sed/), [grep](https://www.gnu.org/software/grep/), [awk](https://www.gnu.org/software/gawk/)
- [jq](https://jqlang.github.io/jq/)
- [gzip](https://www.gnu.org/software/gzip/)
- [brotli](https://github.com/google/brotli)
- [gzip](https://www.gnu.org/software/gzip/), [brotli](https://github.com/google/brotli)
### Environment variables
### Environment Variables
- `PLATFORM`: which platform you would like to build for. Must be one of `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `raspberrypi`
- NOTE: `nonfree` images are for including `nonfree` firmware packages in the built ISO
- `ENVIRONMENT`: a hyphen separated set of feature flags to enable
- `dev`: enables password ssh (INSECURE!) and does not compress frontends
- `unstable`: enables assertions that will cause errors on unexpected inconsistencies that are undesirable in production use either for performance or reliability reasons
- `docker`: use `docker` instead of `podman`
- `GIT_BRANCH_AS_HASH`: set to `1` to use the current git branch name as the git hash so that the project does not need to be rebuilt on each commit
| Variable | Description |
| -------------------- | --------------------------------------------------------------------------------------------------- |
| `PLATFORM` | Target platform: `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `riscv64`, `raspberrypi` |
| `ENVIRONMENT` | Hyphen-separated feature flags (see below) |
| `PROFILE` | Build profile: `release` (default) or `dev` |
| `GIT_BRANCH_AS_HASH` | Set to `1` to use git branch name as version hash (avoids rebuilds) |
### Useful Make Targets
**ENVIRONMENT flags:**
- `iso`: Create a full `.iso` image
- Only possible from Debian
- Not available for `PLATFORM=raspberrypi`
- Additional Requirements:
- [debspawn](https://github.com/lkhq/debspawn)
- `img`: Create a full `.img` image
- Only possible from Debian
- Only available for `PLATFORM=raspberrypi`
- Additional Requirements:
- [debspawn](https://github.com/lkhq/debspawn)
- `format`: Run automatic code formatting for the project
- Additional Requirements:
- [rust](https://rustup.rs/)
- `test`: Run automated tests for the project
- Additional Requirements:
- [rust](https://rustup.rs/)
- `update`: Deploy the current working project to a device over ssh as if through an over-the-air update
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
- `reflash`: Deploy the current working project to a device over ssh as if using a live `iso` image to reflash it
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
- `update-overlay`: Deploy the current working project to a device over ssh to the in-memory overlay without restarting it
- WARNING: changes will be reverted after the device is rebooted
- WARNING: changes to `init` will not take effect as the device is already initialized
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
- `wormhole`: Deploy the `startbox` to a device using [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
- When the build it complete will emit a command to paste into the shell of the device to upgrade it
- Additional Requirements:
- [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
- `clean`: Delete all compiled artifacts
- `dev` - Enables password SSH before setup, skips frontend compression
- `unstable` - Enables assertions and debugging with performance penalty
- `console` - Enables tokio-console for async debugging
**Platform notes:**
- `-nonfree` variants include proprietary firmware and drivers
- `raspberrypi` includes non-free components by necessity
- Platform is remembered between builds if not specified
### Make Targets
#### Building
| Target | Description |
| ------------- | ---------------------------------------------- |
| `iso` | Create full `.iso` image (not for raspberrypi) |
| `img` | Create full `.img` image (raspberrypi only) |
| `deb` | Build Debian package |
| `all` | Build all Rust binaries |
| `uis` | Build all web UIs |
| `ui` | Build main UI only |
| `ts-bindings` | Generate TypeScript bindings from Rust types |
#### Deploying to Device
For devices on the same network:
| Target | Description |
| ------------------------------------ | ----------------------------------------------- |
| `update-startbox REMOTE=start9@<ip>` | Deploy binary + UI only (fastest) |
| `update-deb REMOTE=start9@<ip>` | Deploy full Debian package |
| `update REMOTE=start9@<ip>` | OTA-style update |
| `reflash REMOTE=start9@<ip>` | Reflash as if using live ISO |
| `update-overlay REMOTE=start9@<ip>` | Deploy to in-memory overlay (reverts on reboot) |
For devices on different networks (uses [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)):
| Target | Description |
| ------------------- | -------------------- |
| `wormhole` | Send startbox binary |
| `wormhole-deb` | Send Debian package |
| `wormhole-squashfs` | Send squashfs image |
### Creating a VM
Install virt-manager:
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
virt-manager
```
Follow the screenshot walkthrough in [`assets/create-vm/`](assets/create-vm/) to create a new virtual machine. Key steps:
1. Create a new virtual machine
2. Browse for the ISO — create a storage pool pointing to your `results/` directory
3. Select "Generic or unknown OS"
4. Set memory and CPUs
5. Create a disk and name the VM
Build an ISO first:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
#### Other
| Target | Description |
| ------------------------ | ------------------------------------------- |
| `format` | Run code formatting (Rust nightly required) |
| `test` | Run all automated tests |
| `test-core` | Run Rust tests |
| `test-sdk` | Run SDK tests |
| `test-container-runtime` | Run container runtime tests |
| `clean` | Delete all compiled artifacts |
## Testing
```bash
make test # All tests
make test-core # Rust tests (via ./core/run-tests.sh)
make test-sdk # SDK tests
make test-container-runtime # Container runtime tests
# Run specific Rust test
cd core && cargo test <test_name> --features=test
```
## Code Formatting
```bash
# Rust (requires nightly)
make format
# TypeScript/HTML/SCSS (web)
cd web && npm run format
```
## Code Style Guidelines
### Formatting
Run the formatters before committing. Configuration is handled by `rustfmt.toml` (Rust) and prettier configs (TypeScript).
### Documentation & Comments
**Rust:**
- Add doc comments (`///`) to public APIs, structs, and non-obvious functions
- Use `//` comments sparingly for complex logic that isn't self-evident
- Prefer self-documenting code (clear naming, small functions) over comments
**TypeScript:**
- Document exported functions and complex types with JSDoc
- Keep comments focused on "why" rather than "what"
**General:**
- Don't add comments that just restate the code
- Update or remove comments when code changes
- TODOs should include context: `// TODO(username): reason`
### Commit Messages
Use [Conventional Commits](https://www.conventionalcommits.org/):
```
<type>(<scope>): <description>
[optional body]
[optional footer]
```
**Types:**
- `feat` - New feature
- `fix` - Bug fix
- `docs` - Documentation only
- `style` - Formatting, no code change
- `refactor` - Code change that neither fixes a bug nor adds a feature
- `test` - Adding or updating tests
- `chore` - Build process, dependencies, etc.
**Examples:**
```
feat(web): add dark mode toggle
fix(core): resolve race condition in service startup
docs: update CONTRIBUTING.md with style guidelines
refactor(sdk): simplify package validation logic
```

View File

@@ -1,134 +0,0 @@
# Setting up your development environment on Debian/Ubuntu
A step-by-step guide
> This is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
## Installing dependencies
Run the following commands one at a time
```sh
sudo apt update
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 22
nvm use 22
nvm alias default 22 # this prevents your machine from reverting back to another version
```
## Cloning the repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
cd start-os
```
## Building an ISO
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
This will build an ISO for your current architecture. If you are building to run on an architecture other than the one you are currently on, replace `$(uname -m)` with the correct platform for the device (one of `aarch64`, `aarch64-nonfree`, `x86_64`, `x86_64-nonfree`, `raspberrypi`)
## Creating a VM
### Install virt-manager
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
```
### Launch virt-manager
```sh
virt-manager
```
### Create new virtual machine
![Select "Create a new virtual machine"](assets/create-vm/step-1.png)
![Click "Forward"](assets/create-vm/step-2.png)
![Click "Browse"](assets/create-vm/step-3.png)
![Click "+"](assets/create-vm/step-4.png)
#### make sure to set "Target Path" to the path to your results directory in start-os
![Create storage pool](assets/create-vm/step-5.png)
![Select storage pool](assets/create-vm/step-6.png)
![Select ISO](assets/create-vm/step-7.png)
![Select "Generic or unknown OS" and click "Forward"](assets/create-vm/step-8.png)
![Set Memory and CPUs](assets/create-vm/step-9.png)
![Create disk](assets/create-vm/step-10.png)
![Name VM](assets/create-vm/step-11.png)
![Create network](assets/create-vm/step-12.png)
## Updating a VM
The fastest way to update a VM to your latest code depends on what you changed:
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-startbox REMOTE=start9@<VM IP>
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-deb REMOTE=start9@<VM IP>
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-squashfs REMOTE=start9@<VM IP>
```
---
If the device you are building for is not available via ssh, it is also possible to use `magic-wormhole` to send the relevant files.
### Prerequisites:
```sh
sudo apt update
sudo apt install -y magic-wormhole
```
As before, the fastest way to update a VM to your latest code depends on what you changed. Each of the following commands will return a command to paste into the shell of the device you would like to upgrade.
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-deb
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-squashfs
```

263
Makefile
View File

@@ -1,39 +1,44 @@
ls-files = $(shell git ls-files --cached --others --exclude-standard $1)
PROFILE = release
PLATFORM_FILE := $(shell ./check-platform.sh)
ENVIRONMENT_FILE := $(shell ./check-environment.sh)
GIT_HASH_FILE := $(shell ./check-git-hash.sh)
VERSION_FILE := $(shell ./check-version.sh)
BASENAME := $(shell ./basename.sh)
PLATFORM := $(shell if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
PLATFORM_FILE := $(shell ./build/env/check-platform.sh)
ENVIRONMENT_FILE := $(shell ./build/env/check-environment.sh)
GIT_HASH_FILE := $(shell ./build/env/check-git-hash.sh)
VERSION_FILE := $(shell ./build/env/check-version.sh)
BASENAME := $(shell PROJECT=startos ./build/env/basename.sh)
PLATFORM := $(shell if [ -f $(PLATFORM_FILE) ]; then cat $(PLATFORM_FILE); else echo unknown; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; elif [ "$(PLATFORM)" = "rockchip64" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g; s/-nvidia$$//g'; fi)
RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else echo $(ARCH); fi)
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./build/env/basename.sh)
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./build/env/basename.sh)
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html web/dist/raw/install-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html web/dist/static/install-wizard/index.html
FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(call ls-files, build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
DEBIAN_SRC := $(call ls-files, debian/)
IMAGE_RECIPE_SRC := $(call ls-files, image-recipe/)
STARTD_SRC := core/startos/startd.service $(BUILD_SRC)
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/)
STARTD_SRC := core/startd.service $(BUILD_SRC)
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(call ls-files, web/projects/shared) $(call ls-files, web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
WEB_UI_SRC := $(call ls-files, web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(call ls-files, web/projects/setup-wizard)
WEB_INSTALL_WIZARD_SRC := $(call ls-files, web/projects/install-wizard)
WEB_START_TUNNEL_SRC := $(call ls-files, web/projects/start-tunnel)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip)
TAR_BIN := $(shell which gtar || which tar)
COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox container-runtime/rootfs.$(ARCH).squashfs
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs $(PLATFORM_FILE) \
COMPILED_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container container-runtime/rootfs.$(ARCH).squashfs
STARTOS_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs $(PLATFORM_FILE) \
$(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then \
echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; \
echo target/aarch64-unknown-linux-musl/release/pi-beep; \
fi) \
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then \
echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; \
echo cargo-deps/$(ARCH)-unknown-linux-musl/release/flamegraph; \
echo target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph; \
fi') \
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]; then \
echo target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console; \
fi')
REBUILD_TYPES = 1
REGISTRY_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox core/start-registryd.service
TUNNEL_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service
ifeq ($(REMOTE),)
mkdir = mkdir -p $1
@@ -56,18 +61,18 @@ endif
.DELETE_ON_ERROR:
.PHONY: all metadata install clean format cli uis ui reflash deb $(IMAGE_TYPE) squashfs wormhole wormhole-deb test test-core test-sdk test-container-runtime registry
.PHONY: all metadata install clean format install-cli cli uis ui reflash deb $(IMAGE_TYPE) squashfs wormhole wormhole-deb test test-core test-sdk test-container-runtime registry install-registry tunnel install-tunnel ts-bindings
all: $(ALL_TARGETS)
all: $(STARTOS_TARGETS)
touch:
touch $(ALL_TARGETS)
touch $(STARTOS_TARGETS)
metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
clean:
rm -rf core/target
rm -rf core/startos/bindings
rm -rf core/bindings
rm -rf web/.angular
rm -f web/config.json
rm -rf web/node_modules
@@ -75,7 +80,7 @@ clean:
rm -rf patch-db/client/node_modules
rm -rf patch-db/client/dist
rm -rf patch-db/target
rm -rf cargo-deps
rm -rf target
rm -rf dpkg-workdir
rm -rf image-recipe/deb
rm -rf results
@@ -83,14 +88,8 @@ clean:
rm -rf container-runtime/dist
rm -rf container-runtime/node_modules
rm -f container-runtime/*.squashfs
if [ -d container-runtime/tmp/combined ] && mountpoint container-runtime/tmp/combined; then sudo umount container-runtime/tmp/combined; fi
if [ -d container-runtime/tmp/lower ] && mountpoint container-runtime/tmp/lower; then sudo umount container-runtime/tmp/lower; fi
rm -rf container-runtime/tmp
(cd sdk && make clean)
rm -f ENVIRONMENT.txt
rm -f PLATFORM.txt
rm -f GIT_HASH.txt
rm -f VERSION.txt
rm -f env/*.txt
format:
cd core && cargo +nightly fmt
@@ -106,47 +105,89 @@ test-sdk: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
cd container-runtime && npm test
cli:
./core/install-cli.sh
install-cli: $(GIT_HASH_FILE)
./core/build/build-cli.sh --install
registry:
./core/build-registrybox.sh
cli: $(GIT_HASH_FILE)
./core/build/build-cli.sh
tunnel:
./core/build-tunnelbox.sh
registry: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox
install-registry: $(REGISTRY_TARGETS)
$(call mkdir,$(DESTDIR)/usr/bin)
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox,$(DESTDIR)/usr/bin/start-registrybox)
$(call ln,/usr/bin/start-registrybox,$(DESTDIR)/usr/bin/start-registryd)
$(call ln,/usr/bin/start-registrybox,$(DESTDIR)/usr/bin/start-registry)
$(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/start-registryd.service,$(DESTDIR)/lib/systemd/system/start-registryd.service)
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox: $(CORE_SRC) $(ENVIRONMENT_FILE)
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-registrybox.sh
tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox
install-tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service
$(call mkdir,$(DESTDIR)/usr/bin)
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox,$(DESTDIR)/usr/bin/start-tunnelbox)
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunneld)
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunnel)
$(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/start-tunneld.service,$(DESTDIR)/lib/systemd/system/start-tunneld.service)
$(call mkdir,$(DESTDIR)/usr/lib/startos/scripts)
$(call cp,build/lib/scripts/forward-port,$(DESTDIR)/usr/lib/startos/scripts/forward-port)
$(call mkdir,$(DESTDIR)/etc/apt/sources.list.d)
$(call cp,apt/start9.list,$(DESTDIR)/etc/apt/sources.list.d/start9.list)
$(call mkdir,$(DESTDIR)/usr/share/keyrings)
$(call cp,apt/start9.gpg,$(DESTDIR)/usr/share/keyrings/start9.gpg)
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox: $(CORE_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) web/dist/static/start-tunnel/index.html
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-tunnelbox.sh
deb: results/$(BASENAME).deb
debian/control: build/lib/depends build/lib/conflicts
./debuild/control.sh
results/$(BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/startos) $(STARTOS_TARGETS)
PLATFORM=$(PLATFORM) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
results/$(BASENAME).deb: dpkg-build.sh $(DEBIAN_SRC) $(ALL_TARGETS)
PLATFORM=$(PLATFORM) REQUIRES=debian ./build/os-compat/run-compat.sh ./dpkg-build.sh
registry-deb: results/$(REGISTRY_BASENAME).deb
results/$(REGISTRY_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-registry) $(REGISTRY_TARGETS)
PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
tunnel-deb: results/$(TUNNEL_BASENAME).deb
results/$(TUNNEL_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-tunnel) $(TUNNEL_TARGETS) build/lib/scripts/forward-port
PROJECT=start-tunnel PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=wireguard-tools,iptables,conntrack ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
squashfs: results/$(BASENAME).squashfs
results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_SRC) results/$(BASENAME).deb
REQUIRES=debian ./build/os-compat/run-compat.sh ./image-recipe/run-local-build.sh "results/$(BASENAME).deb"
ARCH=$(ARCH) ./build/image-recipe/run-local-build.sh "results/$(BASENAME).deb"
# For creating os images. DO NOT USE
install: $(ALL_TARGETS)
install: $(STARTOS_TARGETS)
$(call mkdir,$(DESTDIR)/usr/bin)
$(call mkdir,$(DESTDIR)/usr/sbin)
$(call cp,core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox,$(DESTDIR)/usr/bin/startbox)
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox,$(DESTDIR)/usr/bin/startbox)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,target/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
$(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); \
$(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/flamegraph,$(DESTDIR)/usr/bin/flamegraph); \
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph,$(DESTDIR)/usr/bin/flamegraph); \
fi
$(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]'; then \
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); \
fi
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
$(call ln,/usr/bin/startos-backup-fs,$(DESTDIR)/usr/sbin/mount.backup-fs)
$(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/startos/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
$(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
$(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/startos)
@@ -154,26 +195,24 @@ install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/lib/startos/container-runtime)
$(call cp,container-runtime/rootfs.$(ARCH).squashfs,$(DESTDIR)/usr/lib/startos/container-runtime/rootfs.squashfs)
$(call cp,PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
$(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
$(call cp,build/env/PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
$(call cp,build/env/ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
$(call cp,build/env/GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
$(call cp,build/env/VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
$(call cp,firmware/$(PLATFORM),$(DESTDIR)/usr/lib/startos/firmware)
update-overlay: $(ALL_TARGETS)
update-overlay: $(STARTOS_TARGETS)
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
@if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat $(VERSION_FILE)`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
$(call ssh,"sudo systemctl stop startd")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
$(call ssh,"sudo systemctl start startd")
wormhole: core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox
wormhole: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
@echo "Paste the following command into the shell of your StartOS server:"
@echo
@wormhole send core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
@wormhole send core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
wormhole-deb: results/$(BASENAME).deb
@echo "Paste the following command into the shell of your StartOS server:"
@@ -185,26 +224,26 @@ wormhole-squashfs: results/$(BASENAME).squashfs
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
@echo "Paste the following command into the shell of your StartOS server:"
@echo
@wormhole send results/$(BASENAME).squashfs 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo sh -c '"'"'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE) && /usr/lib/startos/scripts/prune-boot && cd /media/startos/images && wormhole receive --accept-file %s && CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/use-img ./$(BASENAME).squashfs'"'"'\n", $$3 }'
@wormhole send results/$(BASENAME).squashfs 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo sh -c '"'"'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE) && /usr/lib/startos/scripts/prune-boot && cd /media/startos/images && wormhole receive --accept-file %s && CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade ./$(BASENAME).squashfs'"'"'\n", $$3 }'
update: $(ALL_TARGETS)
update: $(STARTOS_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
update-startbox: core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox # only update binary (faster than full update)
update-startbox: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox # only update binary (faster than full update)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call cp,core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox,/media/startos/next/usr/bin/startbox)
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox,/media/startos/next/usr/bin/startbox)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync true')
update-deb: results/$(BASENAME).deb # better than update, but only available from debian
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call mkdir,/media/startos/next/tmp/startos-deb)
$(call cp,results/$(BASENAME).deb,/media/startos/next/tmp/startos-deb/$(BASENAME).deb)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /tmp/startos-deb/$(BASENAME).deb"')
$(call mkdir,/media/startos/next/var/tmp/startos-deb)
$(call cp,results/$(BASENAME).deb,/media/startos/next/var/tmp/startos-deb/$(BASENAME).deb)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /var/tmp/startos-deb/$(BASENAME).deb"')
update-squashfs: results/$(BASENAME).squashfs
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@@ -213,9 +252,9 @@ update-squashfs: results/$(BASENAME).squashfs
$(call ssh,'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE)')
$(call ssh,'/usr/lib/startos/scripts/prune-boot')
$(call cp,results/$(BASENAME).squashfs,/media/startos/images/next.rootfs)
$(call ssh,'sudo CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/use-img /media/startos/images/next.rootfs')
$(call ssh,'sudo CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade /media/startos/images/next.rootfs')
emulate-reflash: $(ALL_TARGETS)
emulate-reflash: $(STARTOS_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
@@ -223,7 +262,7 @@ emulate-reflash: $(ALL_TARGETS)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
upload-ota: results/$(BASENAME).squashfs
TARGET=$(TARGET) KEY=$(KEY) ./upload-ota.sh
TARGET=$(TARGET) KEY=$(KEY) ./build/upload-ota.sh
container-runtime/debian.$(ARCH).squashfs: ./container-runtime/download-base-image.sh
ARCH=$(ARCH) ./container-runtime/download-base-image.sh
@@ -236,17 +275,16 @@ container-runtime/node_modules/.package-lock.json: container-runtime/package-loc
npm --prefix container-runtime ci
touch container-runtime/node_modules/.package-lock.json
sdk/base/lib/osBindings/index.ts: $(shell if [ "$(REBUILD_TYPES)" -ne 0 ]; then echo core/startos/bindings/index.ts; fi)
ts-bindings: core/bindings/index.ts
mkdir -p sdk/base/lib/osBindings
rsync -ac --delete core/startos/bindings/ sdk/base/lib/osBindings/
touch sdk/base/lib/osBindings/index.ts
rsync -ac --delete core/bindings/ sdk/base/lib/osBindings/
core/startos/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
rm -rf core/startos/bindings
./core/build-ts.sh
ls core/startos/bindings/*.ts | sed 's/core\/startos\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/startos/bindings/index.ts
npm --prefix sdk exec -- prettier --config ./sdk/base/package.json -w ./core/startos/bindings/*.ts
touch core/startos/bindings/index.ts
core/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
rm -rf core/bindings
./core/build/build-ts.sh
ls core/bindings/*.ts | sed 's/core\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/bindings/index.ts
npm --prefix sdk/base exec -- prettier --config=./sdk/base/package.json -w './core/bindings/**/*.ts'
touch core/bindings/index.ts
sdk/dist/package.json sdk/baseDist/package.json: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
(cd sdk && make bundle)
@@ -261,22 +299,22 @@ container-runtime/dist/node_modules/.package-lock.json container-runtime/dist/pa
./container-runtime/install-dist-deps.sh
touch container-runtime/dist/node_modules/.package-lock.json
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules/.package-lock.json core/target/$(ARCH)-unknown-linux-musl/release/containerbox
ARCH=$(ARCH) REQUIRES=linux ./build/os-compat/run-compat.sh ./container-runtime/update-image.sh
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/update-image-local.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules/.package-lock.json core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container
ARCH=$(ARCH) ./container-runtime/update-image-local.sh
build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) build/dpkg-deps/*
build/dpkg-deps/generate.sh
build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(shell ls build/dpkg-deps/*)
PLATFORM=$(PLATFORM) ARCH=$(ARCH) build/dpkg-deps/generate.sh
$(FIRMWARE_ROMS): build/lib/firmware.json download-firmware.sh $(PLATFORM_FILE)
./download-firmware.sh $(PLATFORM)
$(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE)
./build/download-firmware.sh $(PLATFORM)
core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build-startbox.sh
touch core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh
touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
core/target/$(ARCH)-unknown-linux-musl/release/containerbox: $(CORE_SRC) $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build-containerbox.sh
touch core/target/$(ARCH)-unknown-linux-musl/release/containerbox
core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container: $(CORE_SRC) $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build/build-start-container.sh
touch core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container
web/package-lock.json: web/package.json sdk/baseDist/package.json
npm --prefix web i
@@ -291,23 +329,27 @@ web/.angular/.updated: patch-db/client/dist/index.js sdk/baseDist/package.json w
mkdir -p web/.angular
touch web/.angular/.updated
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
web/.i18n-checked: $(WEB_SHARED_SRC) $(WEB_UI_SRC) $(WEB_SETUP_WIZARD_SRC) $(WEB_START_TUNNEL_SRC)
npm --prefix web run check:i18n
touch web/.i18n-checked
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
npm --prefix web run build:ui
touch web/dist/raw/ui/index.html
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
npm --prefix web run build:setup
touch web/dist/raw/setup-wizard/index.html
web/dist/raw/install-wizard/index.html: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
npm --prefix web run build:install
touch web/dist/raw/install-wizard/index.html
web/dist/raw/start-tunnel/index.html: $(WEB_START_TUNNEL_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
npm --prefix web run build:tunnel
touch web/dist/raw/start-tunnel/index.html
$(COMPRESSED_WEB_UIS): $(WEB_UIS) $(ENVIRONMENT_FILE)
./compress-uis.sh
web/dist/static/%/index.html: web/dist/raw/%/index.html
./web/compress-uis.sh $*
web/config.json: $(GIT_HASH_FILE) web/config-sample.json
jq '.useMocks = false' web/config-sample.json | jq '.gitHash = "$(shell cat GIT_HASH.txt)"' > web/config.json
web/config.json: $(GIT_HASH_FILE) $(ENVIRONMENT_FILE) web/config-sample.json web/update-config.sh
./web/update-config.sh
patch-db/client/node_modules/.package-lock.json: patch-db/client/package.json
npm --prefix patch-db/client ci
@@ -328,14 +370,17 @@ uis: $(WEB_UIS)
# this is a convenience step to build the UI
ui: web/dist/raw/ui
cargo-deps/aarch64-unknown-linux-musl/release/pi-beep:
ARCH=aarch64 ./build-cargo-dep.sh pi-beep
target/aarch64-unknown-linux-musl/release/pi-beep: ./build/build-cargo-dep.sh
ARCH=aarch64 ./build/build-cargo-dep.sh pi-beep
cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console:
ARCH=$(ARCH) PREINSTALL="apk add musl-dev pkgconfig" ./build-cargo-dep.sh tokio-console
target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console: ./build/build-cargo-dep.sh
ARCH=$(ARCH) ./build/build-cargo-dep.sh tokio-console
touch $@
cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs:
ARCH=$(ARCH) PREINSTALL="apk add fuse3 fuse3-dev fuse3-static musl-dev pkgconfig" ./build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs
target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs: ./build/build-cargo-dep.sh
ARCH=$(ARCH) ./build/build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs
touch $@
cargo-deps/$(ARCH)-unknown-linux-musl/release/flamegraph:
ARCH=$(ARCH) PREINSTALL="apk add musl-dev pkgconfig" ./build-cargo-dep.sh flamegraph
target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph: ./build/build-cargo-dep.sh
ARCH=$(ARCH) ./build/build-cargo-dep.sh flamegraph
touch $@

View File

@@ -7,76 +7,64 @@
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
</a>
<a href="https://heyapollo.com/product/startos">
<a href="https://heyapollo.com/product/startos">
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
</a>
<a href="https://twitter.com/start9labs">
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
</a>
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
</a>
<a href="https://t.me/start9_labs">
<img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
</a>
<a href="https://docs.start9.com">
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
</a>
<a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
<a href="https://matrix.to/#/#dev-startos:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
</a>
<a href="https://start9.com">
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
</a>
</div>
<br />
<div align="center">
<h3>
Welcome to the era of Sovereign Computing
</h3>
<p>
StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
</p>
</div>
<br />
<p align="center">
<img src="assets/StartOS.png" alt="StartOS" width="85%">
</p>
<br />
## Running StartOS
> [!WARNING]
> StartOS is in beta. It lacks features. It doesn't always work perfectly. Start9 servers are not plug and play. Using them properly requires some effort and patience. Please do not use StartOS or purchase a server if you are unable or unwilling to follow instructions and learn new concepts.
## What is StartOS?
### 💰 Buy a Start9 server
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
StartOS is an open-source Linux distribution for running a personal server. It handles discovery, installation, network configuration, data backup, dependency management, and health monitoring of self-hosted services.
### 👷 Build your own server
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have hardware
1. You want to save on shipping costs
1. You prefer not to divulge your physical address
1. You just like building things
**Tech stack:** Rust backend (Tokio/Axum), Angular frontend, Node.js container runtime with LXC, and a custom diff-based database ([Patch-DB](https://github.com/Start9Labs/patch-db)) for reactive state synchronization.
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
Services run in isolated LXC containers, packaged as [S9PKs](https://github.com/Start9Labs/start-os/blob/master/core/s9pk-structure.md) — a signed, merkle-archived format that supports partial downloads and cryptographic verification.
## ❤️ Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
## What can you do with it?
To report security issues, please email our security team - security@start9.com.
StartOS lets you self-host services that would otherwise depend on third-party cloud providers — giving you full ownership of your data and infrastructure.
## 🌎 Marketplace
There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
Browse available services on the [Start9 Marketplace](https://marketplace.start9.com/), including:
## 🖥️ User Interface Screenshots
- **Bitcoin & Lightning** — Run a full Bitcoin node, Lightning node, BTCPay Server, and other payment infrastructure
- **Communication** — Self-host Matrix, SimpleX, or other messaging platforms
- **Cloud Storage** — Run Nextcloud, Vaultwarden, and other productivity tools
<p align="center">
<img src="assets/registry.png" alt="StartOS Marketplace" width="49%">
<img src="assets/community.png" alt="StartOS Community Registry" width="49%">
<img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%">
<img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%">
<img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%">
<img src="assets/system.png" alt="StartOS System Settings" width="49%">
<img src="assets/welcome.png" alt="StartOS System Settings" width="49%">
<img src="assets/logs.png" alt="StartOS System Settings" width="49%">
</p>
Services are added by the community. If a service you want isn't available, you can [package it yourself](https://github.com/Start9Labs/ai-service-packaging/).
## Getting StartOS
### Buy a Start9 server
The easiest path. [Buy a server](https://store.start9.com) from Start9 and plug it in.
### Build your own
Follow the [install guide](https://docs.start9.com/start-os/installing.html) to install StartOS on your own hardware. . Reasons to go this route:
1. You already have compatible hardware
2. You want to save on shipping costs
3. You prefer not to share your physical address
4. You enjoy building things
### Build from source
See [CONTRIBUTING.md](CONTRIBUTING.md) for environment setup, build instructions, and development workflow.
## Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. See [CONTRIBUTING.md](CONTRIBUTING.md) or visit [start9.com/contribute](https://start9.com/contribute/).
To report security issues, email [security@start9.com](mailto:security@start9.com).

BIN
apt/start9.gpg Normal file

Binary file not shown.

1
apt/start9.list Normal file
View File

@@ -0,0 +1 @@
deb [arch=amd64,arm64,riscv64 signed-by=/usr/share/keyrings/start9.gpg] https://start9-debs.nyc3.cdn.digitaloceanspaces.com stable main

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.1 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 396 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 402 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 591 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.6 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 319 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 521 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 331 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 402 KiB

View File

@@ -1,34 +0,0 @@
#!/bin/bash
set -e
shopt -s expand_aliases
if [ "$0" != "./build-cargo-dep.sh" ]; then
>&2 echo "Must be run from start-os directory"
exit 1
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
DOCKER_PLATFORM="linux/${ARCH}"
if [ "$ARCH" = aarch64 ] || [ "$ARCH" = arm64 ]; then
DOCKER_PLATFORM="linux/arm64"
elif [ "$ARCH" = x86_64 ]; then
DOCKER_PLATFORM="linux/amd64"
fi
mkdir -p cargo-deps
alias 'rust-musl-builder'='docker run $USE_TTY --platform=${DOCKER_PLATFORM} --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)"/cargo-deps:/home/rust/src -w /home/rust/src -P rust:alpine'
PREINSTALL=${PREINSTALL:-true}
rust-musl-builder sh -c "$PREINSTALL && cargo install $* --target-dir /home/rust/src --target=$ARCH-unknown-linux-musl"
sudo chown -R $USER cargo-deps
sudo chown -R $USER ~/.cargo

0
build/README.md Normal file
View File

138
build/apt/publish-deb.sh Executable file
View File

@@ -0,0 +1,138 @@
#!/bin/bash
#
# Publish .deb files to an S3-hosted apt repository.
#
# Usage: publish-deb.sh <deb-file-or-directory> [<deb-file-or-directory> ...]
#
# Environment variables:
# GPG_PRIVATE_KEY - Armored GPG private key (imported if set)
# GPG_KEY_ID - GPG key ID for signing
# S3_ACCESS_KEY - S3 access key
# S3_SECRET_KEY - S3 secret key
# S3_ENDPOINT - S3 endpoint (default: https://nyc3.digitaloceanspaces.com)
# S3_BUCKET - S3 bucket name (default: start9-debs)
# SUITE - Apt suite name (default: stable)
# COMPONENT - Apt component name (default: main)
set -e
if [ $# -eq 0 ]; then
echo "Usage: $0 <deb-file-or-directory> [...]" >&2
exit 1
fi
BUCKET="${S3_BUCKET:-start9-debs}"
ENDPOINT="${S3_ENDPOINT:-https://nyc3.digitaloceanspaces.com}"
SUITE="${SUITE:-stable}"
COMPONENT="${COMPONENT:-main}"
REPO_DIR="$(mktemp -d)"
cleanup() {
rm -rf "$REPO_DIR"
}
trap cleanup EXIT
# Import GPG key if provided
if [ -n "$GPG_PRIVATE_KEY" ]; then
echo "$GPG_PRIVATE_KEY" | gpg --batch --import 2>/dev/null
fi
# Configure s3cmd
if [ -n "$S3_ACCESS_KEY" ] && [ -n "$S3_SECRET_KEY" ]; then
S3CMD_CONFIG="$(mktemp)"
cat > "$S3CMD_CONFIG" <<EOF
[default]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
host_base = $(echo "$ENDPOINT" | sed 's|https://||')
host_bucket = %(bucket)s.$(echo "$ENDPOINT" | sed 's|https://||')
use_https = True
EOF
s3() {
s3cmd -c "$S3CMD_CONFIG" "$@"
}
else
# Fall back to default ~/.s3cfg
S3CMD_CONFIG=""
s3() {
s3cmd "$@"
}
fi
# Sync existing repo from S3
echo "Syncing existing repo from s3://${BUCKET}/ ..."
s3 sync --no-mime-magic "s3://${BUCKET}/" "$REPO_DIR/" 2>/dev/null || true
# Collect all .deb files from arguments
DEB_FILES=()
for arg in "$@"; do
if [ -d "$arg" ]; then
while IFS= read -r -d '' f; do
DEB_FILES+=("$f")
done < <(find "$arg" -name '*.deb' -print0)
elif [ -f "$arg" ]; then
DEB_FILES+=("$arg")
else
echo "Warning: $arg is not a file or directory, skipping" >&2
fi
done
if [ ${#DEB_FILES[@]} -eq 0 ]; then
echo "No .deb files found" >&2
exit 1
fi
# Copy each deb to the pool, renaming to standard format
for deb in "${DEB_FILES[@]}"; do
PKG_NAME="$(dpkg-deb --field "$deb" Package)"
POOL_DIR="$REPO_DIR/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}"
mkdir -p "$POOL_DIR"
cp "$deb" "$POOL_DIR/"
dpkg-name -o "$POOL_DIR/$(basename "$deb")" 2>/dev/null || true
echo "Added: $(basename "$deb") -> pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
done
# Generate Packages indices for each architecture
for arch in amd64 arm64 riscv64; do
BINARY_DIR="$REPO_DIR/dists/${SUITE}/${COMPONENT}/binary-${arch}"
mkdir -p "$BINARY_DIR"
(
cd "$REPO_DIR"
dpkg-scanpackages --arch "$arch" pool/ > "$BINARY_DIR/Packages"
gzip -k -f "$BINARY_DIR/Packages"
)
echo "Generated Packages index for ${arch}"
done
# Generate Release file
(
cd "$REPO_DIR/dists/${SUITE}"
apt-ftparchive release \
-o "APT::FTPArchive::Release::Origin=Start9" \
-o "APT::FTPArchive::Release::Label=Start9" \
-o "APT::FTPArchive::Release::Suite=${SUITE}" \
-o "APT::FTPArchive::Release::Codename=${SUITE}" \
-o "APT::FTPArchive::Release::Architectures=amd64 arm64 riscv64" \
-o "APT::FTPArchive::Release::Components=${COMPONENT}" \
. > Release
)
echo "Generated Release file"
# Sign if GPG key is available
if [ -n "$GPG_KEY_ID" ]; then
(
cd "$REPO_DIR/dists/${SUITE}"
gpg --default-key "$GPG_KEY_ID" --batch --yes --detach-sign -o Release.gpg Release
gpg --default-key "$GPG_KEY_ID" --batch --yes --clearsign -o InRelease Release
)
echo "Signed Release file with key ${GPG_KEY_ID}"
else
echo "Warning: GPG_KEY_ID not set, Release file is unsigned" >&2
fi
# Upload to S3
echo "Uploading to s3://${BUCKET}/ ..."
s3 sync --acl-public --no-mime-magic "$REPO_DIR/" "s3://${BUCKET}/"
[ -n "$S3CMD_CONFIG" ] && rm -f "$S3CMD_CONFIG"
echo "Done."

26
build/build-cargo-dep.sh Executable file
View File

@@ -0,0 +1,26 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")/.."
set -e
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
RUST_ARCH="$ARCH"
if [ "$ARCH" = "riscv64" ]; then
RUST_ARCH="riscv64gc"
fi
mkdir -p target
source core/build/builder-alias.sh
RUSTFLAGS="-C target-feature=+crt-static"
rust-zig-builder cargo-zigbuild install $* --target-dir /workdir/target/ --target=$RUST_ARCH-unknown-linux-musl
if [ "$(ls -nd "target/$RUST_ARCH-unknown-linux-musl/release/${!#}" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID target && chown -R $UID:$UID /usr/local/cargo"
fi

View File

@@ -11,13 +11,13 @@ if [ -z "$PLATFORM" ]; then
exit 1
fi
rm -rf ./firmware/$PLATFORM
mkdir -p ./firmware/$PLATFORM
rm -rf ./lib/firmware/$PLATFORM
mkdir -p ./lib/firmware/$PLATFORM
cd ./firmware/$PLATFORM
cd ./lib/firmware/$PLATFORM
firmwares=()
while IFS= read -r line; do firmwares+=("$line"); done < <(jq -c ".[] | select(.platform[] | contains(\"$PLATFORM\"))" ../../build/lib/firmware.json)
while IFS= read -r line; do firmwares+=("$line"); done < <(jq -c ".[] | select(.platform[] | contains(\"$PLATFORM\"))" ../../firmware.json)
for firmware in "${firmwares[@]}"; do
if [ -n "$firmware" ]; then
id=$(echo "$firmware" | jq --raw-output '.id')

View File

@@ -3,22 +3,25 @@ avahi-utils
b3sum
bash-completion
beep
binfmt-support
bmon
btrfs-progs
ca-certificates
cifs-utils
conntrack
cryptsetup
curl
dnsutils
dmidecode
dnsutils
dosfstools
e2fsprogs
ecryptfs-utils
equivs
exfatprogs
flashrom
fuse3
grub-common
grub-efi
htop
httpdirfs
iotop
@@ -41,9 +44,9 @@ nvme-cli
nyx
openssh-server
podman
postgresql
psmisc
qemu-guest-agent
qemu-user-static
rfkill
rsync
samba-common-bin
@@ -52,6 +55,7 @@ socat
sqlite3
squashfs-tools
squashfs-tools-ng
ssl-cert
sudo
systemd
systemd-resolved

View File

@@ -0,0 +1 @@
+ nmap

View File

@@ -5,11 +5,22 @@ set -e
cd "$(dirname "${BASH_SOURCE[0]}")"
IFS="-" read -ra FEATURES <<< "$ENVIRONMENT"
FEATURES+=("${ARCH}")
if [ "$ARCH" != "$PLATFORM" ]; then
FEATURES+=("${PLATFORM}")
fi
if [[ "$PLATFORM" =~ -nonfree$ ]]; then
FEATURES+=("nonfree")
fi
if [[ "$PLATFORM" =~ -nvidia$ ]]; then
FEATURES+=("nonfree")
FEATURES+=("nvidia")
fi
feature_file_checker='
/^#/ { next }
/^\+ [a-z0-9-]+$/ { next }
/^- [a-z0-9-]+$/ { next }
/^\+ [a-z0-9.-]+$/ { next }
/^- [a-z0-9.-]+$/ { next }
{ exit 1 }
'
@@ -30,8 +41,8 @@ for type in conflicts depends; do
for feature in ${FEATURES[@]}; do
file="$feature.$type"
if [ -f $file ]; then
if grep "^- $pkg$" $file; then
SKIP=1
if grep "^- $pkg$" $file > /dev/null; then
SKIP=yes
fi
fi
done

View File

@@ -0,0 +1,7 @@
+ firmware-amd-graphics
+ firmware-atheros
+ firmware-brcm80211
+ firmware-iwlwifi
+ firmware-libertas
+ firmware-misc-nonfree
+ firmware-realtek

View File

@@ -0,0 +1 @@
+ nvidia-container-toolkit

View File

@@ -0,0 +1,10 @@
- grub-efi
+ parted
+ raspberrypi-net-mods
+ raspberrypi-sys-mods
+ raspi-config
+ raspi-firmware
+ raspi-utils
+ rpi-eeprom
+ rpi-update
+ rpi.gpio-common

View File

@@ -0,0 +1 @@
+ grub-pc-bin

View File

@@ -1,5 +1,7 @@
#!/bin/bash
PROJECT=${PROJECT:-"startos"}
cd "$(dirname "${BASH_SOURCE[0]}")"
PLATFORM="$(if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)"
@@ -16,4 +18,4 @@ if [ -n "$STARTOS_ENV" ]; then
VERSION_FULL="$VERSION_FULL~${STARTOS_ENV}"
fi
echo -n "startos-${VERSION_FULL}_${PLATFORM}"
echo -n "${PROJECT}-${VERSION_FULL}_${PLATFORM}"

View File

@@ -1,8 +1,10 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
if ! [ -f ./ENVIRONMENT.txt ] || [ "$(cat ./ENVIRONMENT.txt)" != "$ENVIRONMENT" ]; then
>&2 echo "Updating ENVIRONMENT.txt to \"$ENVIRONMENT\""
echo -n "$ENVIRONMENT" > ./ENVIRONMENT.txt
fi
echo -n ./ENVIRONMENT.txt
echo -n ./build/env/ENVIRONMENT.txt

View File

@@ -1,5 +1,7 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
if [ "$GIT_BRANCH_AS_HASH" != 1 ]; then
GIT_HASH="$(git rev-parse HEAD)$(if ! git diff-index --quiet HEAD --; then echo '-modified'; fi)"
else
@@ -11,4 +13,4 @@ if ! [ -f ./GIT_HASH.txt ] || [ "$(cat ./GIT_HASH.txt)" != "$GIT_HASH" ]; then
echo -n "$GIT_HASH" > ./GIT_HASH.txt
fi
echo -n ./GIT_HASH.txt
echo -n ./build/env/GIT_HASH.txt

View File

@@ -1,8 +1,10 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
if ! [ -f ./PLATFORM.txt ] || [ "$(cat ./PLATFORM.txt)" != "$PLATFORM" ] && [ -n "$PLATFORM" ]; then
>&2 echo "Updating PLATFORM.txt to \"$PLATFORM\""
echo -n "$PLATFORM" > ./PLATFORM.txt
fi
echo -n ./PLATFORM.txt
echo -n ./build/env/PLATFORM.txt

View File

@@ -1,6 +1,8 @@
#!/bin/bash
FE_VERSION="$(cat web/package.json | grep '"version"' | sed 's/[ \t]*"version":[ \t]*"\([^"]*\)",/\1/')"
cd "$(dirname "${BASH_SOURCE[0]}")"
FE_VERSION="$(cat ../../web/package.json | grep '"version"' | sed 's/[ \t]*"version":[ \t]*"\([^"]*\)",/\1/')"
# TODO: Validate other version sources - backend/Cargo.toml, backend/src/version/mod.rs
@@ -10,4 +12,4 @@ if ! [ -f ./VERSION.txt ] || [ "$(cat ./VERSION.txt)" != "$VERSION" ]; then
echo -n "$VERSION" > ./VERSION.txt
fi
echo -n ./VERSION.txt
echo -n ./build/env/VERSION.txt

View File

@@ -0,0 +1,35 @@
ARG SUITE=trixie
FROM debian:${SUITE}
ENV DEBIAN_FRONTEND=noninteractive
RUN apt-get update && \
apt-get install -yq \
live-build \
procps \
binfmt-support \
qemu-utils \
qemu-user-static \
xorriso \
isolinux \
ca-certificates \
curl \
wget \
gpg \
git \
fdisk \
dosfstools \
e2fsprogs \
squashfs-tools \
rsync \
b3sum \
dpkg-dev
COPY binary_grub-efi.patch /root/binary_grub-efi.patch
RUN patch /usr/lib/live/build/binary_grub-efi < /root/binary_grub-efi.patch && rm /root/binary_grub-efi.patch
RUN echo 'retry_connrefused = on' > /etc/wgetrc && \
echo 'tries = 100' >> /etc/wgetrc
WORKDIR /root

View File

@@ -0,0 +1,47 @@
--- /usr/lib/live/build/binary_grub-efi 2024-05-25 05:22:52.000000000 -0600
+++ binary_grub-efi 2025-10-16 13:04:32.338740922 -0600
@@ -54,6 +54,8 @@
armhf)
Check_package chroot /usr/lib/grub/arm-efi/configfile.mod grub-efi-arm-bin
;;
+ riscv64)
+ Check_package chroot /usr/lib/grub/riscv64-efi/configfile.mod grub-efi-riscv64-bin
esac
Check_package chroot /usr/bin/grub-mkimage grub-common
Check_package chroot /usr/bin/mcopy mtools
@@ -136,7 +138,7 @@
esac
# Cleanup files that we generate
-rm -rf binary/boot/efi.img binary/boot/grub/i386-efi/ binary/boot/grub/x86_64-efi binary/boot/grub/arm64-efi binary/boot/grub/arm-efi
+rm -rf binary/boot/efi.img binary/boot/grub/i386-efi/ binary/boot/grub/x86_64-efi binary/boot/grub/arm64-efi binary/boot/grub/arm-efi binary/boot/grub/riscv64-efi
# This is workaround till both efi-image and grub-cpmodules are put into a binary package
case "${LB_BUILD_WITH_CHROOT}" in
@@ -243,6 +245,10 @@
gen_efi_boot_img "arm-efi" "arm" "debian-live/arm"
PATH="\${PRE_EFI_IMAGE_PATH}"
;;
+ riscv64)
+ gen_efi_boot_img "riscv64-efi" "riscv64" "debian-live/riscv64"
+ PATH="\${PRE_EFI_IMAGE_PATH}"
+ ;;
esac
@@ -324,6 +330,7 @@
rm -f chroot/grub-efi-temp/bootnetx64.efi
rm -f chroot/grub-efi-temp/bootnetaa64.efi
rm -f chroot/grub-efi-temp/bootnetarm.efi
+rm -f chroot/grub-efi-temp/bootnetriscv64.efi
mkdir -p binary
cp -a chroot/grub-efi-temp/* binary/
@@ -331,6 +338,7 @@
rm -rf chroot/grub-efi-temp-i386-efi
rm -rf chroot/grub-efi-temp-arm64-efi
rm -rf chroot/grub-efi-temp-arm-efi
+rm -rf chroot/grub-efi-temp-riscv64-efi
rm -rf chroot/grub-efi-temp-cfg
rm -rf chroot/grub-efi-temp

456
build/image-recipe/build.sh Executable file
View File

@@ -0,0 +1,456 @@
#!/bin/bash
set -e
MAX_IMG_LEN=$((4 * 1024 * 1024 * 1024)) # 4GB
echo "==== StartOS Image Build ===="
echo "Building for architecture: $IB_TARGET_ARCH"
SOURCE_DIR="$(realpath $(dirname "${BASH_SOURCE[0]}"))"
base_dir="$(pwd -P)"
prep_results_dir="$base_dir/images-prep"
RESULTS_DIR="$base_dir/results"
echo "Saving results in: $RESULTS_DIR"
DEB_PATH="$base_dir/$1"
VERSION="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/VERSION.txt)"
GIT_HASH="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/GIT_HASH.txt)"
if [[ "$GIT_HASH" =~ ^@ ]]; then
GIT_HASH="unknown"
else
GIT_HASH="$(echo -n "$GIT_HASH" | head -c 7)"
fi
IB_OS_ENV="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/ENVIRONMENT.txt)"
IB_TARGET_PLATFORM="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/PLATFORM.txt)"
VERSION_FULL="${VERSION}-${GIT_HASH}"
if [ -n "$IB_OS_ENV" ]; then
VERSION_FULL="$VERSION_FULL~${IB_OS_ENV}"
fi
IMAGE_BASENAME=startos-${VERSION_FULL}_${IB_TARGET_PLATFORM}
BOOTLOADERS=grub-efi
if [ "$IB_TARGET_PLATFORM" = "x86_64" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nvidia" ]; then
IB_TARGET_ARCH=amd64
QEMU_ARCH=x86_64
BOOTLOADERS=grub-efi,syslinux
elif [ "$IB_TARGET_PLATFORM" = "aarch64" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nvidia" ] || [ "$IB_TARGET_PLATFORM" = "raspberrypi" ] || [ "$IB_TARGET_PLATFORM" = "rockchip64" ]; then
IB_TARGET_ARCH=arm64
QEMU_ARCH=aarch64
elif [ "$IB_TARGET_PLATFORM" = "riscv64" ] || [ "$IB_TARGET_PLATFORM" = "riscv64-nonfree" ]; then
IB_TARGET_ARCH=riscv64
QEMU_ARCH=riscv64
else
IB_TARGET_ARCH="$IB_TARGET_PLATFORM"
QEMU_ARCH="$IB_TARGET_PLATFORM"
fi
QEMU_ARGS=()
if [ "$QEMU_ARCH" != $(uname -m) ]; then
QEMU_ARGS+=(--bootstrap-qemu-arch ${IB_TARGET_ARCH})
QEMU_ARGS+=(--bootstrap-qemu-static /usr/bin/qemu-${QEMU_ARCH}-static)
fi
mkdir -p $prep_results_dir
cd $prep_results_dir
NON_FREE=
if [[ "${IB_TARGET_PLATFORM}" =~ -nonfree$ ]] || [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]] || [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
NON_FREE=1
fi
NVIDIA=
if [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]]; then
NVIDIA=1
fi
IMAGE_TYPE=iso
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ] || [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
IMAGE_TYPE=img
fi
ARCHIVE_AREAS="main contrib"
if [ "$NON_FREE" = 1 ]; then
if [ "$IB_SUITE" = "bullseye" ]; then
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free"
else
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free non-free-firmware"
fi
fi
PLATFORM_CONFIG_EXTRAS=()
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
PLATFORM_CONFIG_EXTRAS+=( --firmware-binary false )
PLATFORM_CONFIG_EXTRAS+=( --firmware-chroot false )
RPI_KERNEL_VERSION=6.12.47+rpt
PLATFORM_CONFIG_EXTRAS+=( --linux-packages linux-image-$RPI_KERNEL_VERSION )
PLATFORM_CONFIG_EXTRAS+=( --linux-flavours "rpi-v8 rpi-2712" )
elif [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
PLATFORM_CONFIG_EXTRAS+=( --linux-flavours rockchip64 )
elif [ "${IB_TARGET_ARCH}" = "riscv64" ]; then
PLATFORM_CONFIG_EXTRAS+=( --uefi-secure-boot=disable )
fi
cat > /etc/wgetrc << EOF
retry_connrefused = on
tries = 100
EOF
lb config \
--iso-application "StartOS v${VERSION_FULL} ${IB_TARGET_ARCH}" \
--iso-volume "StartOS v${VERSION} ${IB_TARGET_ARCH}" \
--iso-preparer "START9 LABS; HTTPS://START9.COM" \
--iso-publisher "START9 LABS; HTTPS://START9.COM" \
--backports true \
--bootappend-live "boot=live noautologin console=tty0" \
--bootloaders $BOOTLOADERS \
--cache false \
--mirror-bootstrap "https://deb.debian.org/debian/" \
--mirror-chroot "https://deb.debian.org/debian/" \
--mirror-chroot-security "https://security.debian.org/debian-security" \
-d ${IB_SUITE} \
-a ${IB_TARGET_ARCH} \
${QEMU_ARGS[@]} \
--archive-areas "${ARCHIVE_AREAS}" \
${PLATFORM_CONFIG_EXTRAS[@]}
# Overlays
mkdir -p config/packages.chroot/
cp $RESULTS_DIR/$IMAGE_BASENAME.deb config/packages.chroot/
dpkg-name config/packages.chroot/*.deb
mkdir -p config/includes.chroot/etc
echo start > config/includes.chroot/etc/hostname
cat > config/includes.chroot/etc/hosts << EOT
127.0.0.1 localhost start
::1 localhost start ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
EOT
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
mkdir -p config/includes.chroot
git clone --depth=1 --branch=stable https://github.com/raspberrypi/rpi-firmware.git config/includes.chroot/boot
rm -rf config/includes.chroot/boot/.git config/includes.chroot/boot/modules
rsync -rLp $SOURCE_DIR/raspberrypi/squashfs/ config/includes.chroot/
fi
# Bootloaders
rm -rf config/bootloaders
cp -r /usr/share/live/build/bootloaders config/bootloaders
cat > config/bootloaders/syslinux/syslinux.cfg << EOF
include menu.cfg
default vesamenu.c32
prompt 0
timeout 50
EOF
cat > config/bootloaders/isolinux/isolinux.cfg << EOF
include menu.cfg
default vesamenu.c32
prompt 0
timeout 50
EOF
# Extract splash.png from the deb package
dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xf - ./usr/lib/startos/splash.png > /tmp/splash.png
cp /tmp/splash.png config/bootloaders/syslinux_common/splash.png
cp /tmp/splash.png config/bootloaders/isolinux/splash.png
cp /tmp/splash.png config/bootloaders/grub-pc/splash.png
rm /tmp/splash.png
sed -i -e '2i set timeout=5' config/bootloaders/grub-pc/config.cfg
# Archives
mkdir -p config/archives
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
curl -fsSL https://archive.raspberrypi.com/debian/raspberrypi.gpg.key | gpg --dearmor -o config/archives/raspi.key
echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/raspi.key.gpg] https://archive.raspberrypi.com/debian/ ${IB_SUITE} main" > config/archives/raspi.list
fi
if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
curl -fsSL https://apt.armbian.com/armbian.key | gpg --dearmor -o config/archives/armbian.key
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
fi
if [ "$NVIDIA" = 1 ]; then
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o config/archives/nvidia-container-toolkit.key
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
| sed 's#deb https://#deb [signed-by=/etc/apt/trusted.gpg.d/nvidia-container-toolkit.key.gpg] https://#g' \
> config/archives/nvidia-container-toolkit.list
fi
cat > config/archives/backports.pref <<-EOF
Package: linux-image-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: linux-headers-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: *nvidia*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
EOF
# Hooks
cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF
#!/bin/bash
set -e
if [ "${NVIDIA}" = "1" ]; then
# install a specific NVIDIA driver version
# ---------------- configuration ----------------
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.126.09}"
BASE_URL="https://download.nvidia.com/XFree86/Linux-${QEMU_ARCH}"
echo "[nvidia-hook] Using NVIDIA driver: \${NVIDIA_DRIVER_VERSION}" >&2
# ---------------- kernel version ----------------
# Determine target kernel version from newest /boot/vmlinuz-* in the chroot.
KVER="\$(
ls -1t /boot/vmlinuz-* 2>/dev/null \
| head -n1 \
| sed 's|.*/vmlinuz-||'
)"
if [ -z "\${KVER}" ]; then
echo "[nvidia-hook] ERROR: no /boot/vmlinuz-* found; cannot determine kernel version" >&2
exit 1
fi
echo "[nvidia-hook] Target kernel version: \${KVER}" >&2
# Ensure kernel headers are present
TEMP_APT_DEPS=(build-essential)
if [ ! -e "/lib/modules/\${KVER}/build" ]; then
TEMP_APT_DEPS+=(linux-headers-\${KVER})
fi
echo "[nvidia-hook] Installing build dependencies" >&2
/usr/lib/startos/scripts/install-equivs <<-EOF
Package: nvidia-depends
Version: \${NVIDIA_DRIVER_VERSION}
Section: unknown
Priority: optional
Depends: \${dep_list="\$(IFS=', '; echo "\${TEMP_APT_DEPS[*]}")"}
EOF
# ---------------- download and run installer ----------------
RUN_NAME="NVIDIA-Linux-${QEMU_ARCH}-\${NVIDIA_DRIVER_VERSION}.run"
RUN_PATH="/root/\${RUN_NAME}"
RUN_URL="\${BASE_URL}/\${NVIDIA_DRIVER_VERSION}/\${RUN_NAME}"
echo "[nvidia-hook] Downloading \${RUN_URL}" >&2
wget -O "\${RUN_PATH}" "\${RUN_URL}"
chmod +x "\${RUN_PATH}"
echo "[nvidia-hook] Running NVIDIA installer for kernel \${KVER}" >&2
if ! sh "\${RUN_PATH}" \
--silent \
--kernel-name="\${KVER}" \
--no-x-check \
--no-nouveau-check \
--no-runlevel-check; then
cat /var/log/nvidia-installer.log
exit 1
fi
# Rebuild module metadata
echo "[nvidia-hook] Running depmod for \${KVER}" >&2
depmod -a "\${KVER}"
echo "[nvidia-hook] NVIDIA \${NVIDIA_DRIVER_VERSION} installation complete for kernel \${KVER}" >&2
echo "[nvidia-hook] Removing build dependencies..." >&2
apt-get purge -y nvidia-depends
apt-get autoremove -y
echo "[nvidia-hook] Removed build dependencies." >&2
fi
cp /etc/resolv.conf /etc/resolv.conf.bak
if [ "${IB_SUITE}" = trixie ] && [ "${IB_TARGET_ARCH}" != riscv64 ]; then
echo 'deb https://deb.debian.org/debian/ bookworm main' > /etc/apt/sources.list.d/bookworm.list
apt-get update
apt-get install -y postgresql-15
rm /etc/apt/sources.list.d/bookworm.list
apt-get update
systemctl mask postgresql
fi
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
KERNEL_VERSION=${RPI_KERNEL_VERSION} sh /boot/config.sh > /boot/config.txt
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
fi
useradd --shell /bin/bash -G startos -m start9
echo start9:embassy | chpasswd
usermod -aG sudo start9
usermod -aG systemd-journal start9
echo "start9 ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/010_start9-nopasswd"
if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
/usr/lib/startos/scripts/enable-kiosk
fi
if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
passwd -l start9
fi
EOF
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(date '+%s')}"
if lb bootstrap; then
true
else
EXIT=$?
cat ./chroot/debootstrap/debootstrap.log
exit $EXIT
fi
lb chroot
lb installer
lb binary_chroot
lb chroot_prep install all mode-apt-install-binary mode-archives-chroot
mv chroot/chroot/etc/resolv.conf.bak chroot/chroot/etc/resolv.conf
lb binary_rootfs
cp $prep_results_dir/binary/live/filesystem.squashfs $RESULTS_DIR/$IMAGE_BASENAME.squashfs
if [ "${IMAGE_TYPE}" = iso ]; then
lb binary_manifest
lb binary_package-lists
lb binary_linux-image
lb binary_memtest
lb binary_grub-legacy
lb binary_grub-pc
lb binary_grub_cfg
lb binary_syslinux
lb binary_disk
lb binary_loadlin
lb binary_win32-loader
lb binary_includes
lb binary_grub-efi
lb binary_hooks
lb binary_checksums
find binary -newermt "$(date -d@${SOURCE_DATE_EPOCH} '+%Y-%m-%d %H:%M:%S')" -printf "%y %p\n" -exec touch '{}' -d@${SOURCE_DATE_EPOCH} --no-dereference ';' > binary.modified_timestamps
lb binary_iso
lb binary_onie
lb binary_netboot
lb binary_tar
lb binary_hdd
lb binary_zsync
lb chroot_prep remove all mode-archives-chroot
lb source
mv $prep_results_dir/live-image-${IB_TARGET_ARCH}.hybrid.iso $RESULTS_DIR/$IMAGE_BASENAME.iso
elif [ "${IMAGE_TYPE}" = img ]; then
SECTOR_LEN=512
BOOT_START=$((1024 * 1024)) # 1MiB
BOOT_LEN=$((512 * 1024 * 1024)) # 512MiB
BOOT_END=$((BOOT_START + BOOT_LEN - 1))
ROOT_START=$((BOOT_END + 1))
ROOT_LEN=$((MAX_IMG_LEN - ROOT_START))
ROOT_END=$((MAX_IMG_LEN - 1))
TARGET_NAME=$prep_results_dir/${IMAGE_BASENAME}.img
truncate -s $MAX_IMG_LEN $TARGET_NAME
sfdisk $TARGET_NAME <<-EOF
label: dos
label-id: 0xcb15ae4d
unit: sectors
sector-size: 512
${TARGET_NAME}1 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=c, bootable
${TARGET_NAME}2 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=83
EOF
BOOT_DEV=$(losetup --show -f --offset $BOOT_START --sizelimit $BOOT_LEN $TARGET_NAME)
ROOT_DEV=$(losetup --show -f --offset $ROOT_START --sizelimit $ROOT_LEN $TARGET_NAME)
mkfs.vfat -F32 $BOOT_DEV
mkfs.ext4 $ROOT_DEV
TMPDIR=$(mktemp -d)
mkdir -p $TMPDIR/boot $TMPDIR/root
mount $ROOT_DEV $TMPDIR/root
mount $BOOT_DEV $TMPDIR/boot
unsquashfs -n -f -d $TMPDIR $prep_results_dir/binary/live/filesystem.squashfs boot
mkdir $TMPDIR/root/images $TMPDIR/root/config
B3SUM=$(b3sum $prep_results_dir/binary/live/filesystem.squashfs | head -c 16)
cp $prep_results_dir/binary/live/filesystem.squashfs $TMPDIR/root/images/$B3SUM.rootfs
ln -rsf $TMPDIR/root/images/$B3SUM.rootfs $TMPDIR/root/config/current.rootfs
mkdir -p $TMPDIR/next $TMPDIR/lower $TMPDIR/root/config/work $TMPDIR/root/config/overlay
mount $TMPDIR/root/config/current.rootfs $TMPDIR/lower
mount -t overlay -o lowerdir=$TMPDIR/lower,workdir=$TMPDIR/root/config/work,upperdir=$TMPDIR/root/config/overlay overlay $TMPDIR/next
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
sed -i 's| boot=startos| boot=startos init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
rsync -a $SOURCE_DIR/raspberrypi/img/ $TMPDIR/next/
fi
umount $TMPDIR/next
umount $TMPDIR/lower
umount $TMPDIR/boot
umount $TMPDIR/root
e2fsck -fy $ROOT_DEV
resize2fs -M $ROOT_DEV
BLOCK_COUNT=$(dumpe2fs -h $ROOT_DEV | awk '/^Block count:/ { print $3 }')
BLOCK_SIZE=$(dumpe2fs -h $ROOT_DEV | awk '/^Block size:/ { print $3 }')
ROOT_LEN=$((BLOCK_COUNT * BLOCK_SIZE))
losetup -d $ROOT_DEV
losetup -d $BOOT_DEV
# Recreate partition 2 with the new size using sfdisk
sfdisk $TARGET_NAME <<-EOF
label: dos
label-id: 0xcb15ae4d
unit: sectors
sector-size: 512
${TARGET_NAME}1 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=c, bootable
${TARGET_NAME}2 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=83
EOF
TARGET_SIZE=$((ROOT_START + ROOT_LEN))
truncate -s $TARGET_SIZE $TARGET_NAME
mv $TARGET_NAME $RESULTS_DIR/$IMAGE_BASENAME.img
fi
chown $IB_UID:$IB_UID $RESULTS_DIR/$IMAGE_BASENAME.*

View File

@@ -1 +1 @@
usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory quiet boot=startos
usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u console=serial0,115200 console=tty1 root=PARTUUID=cb15ae4d-02 rootfstype=ext4 fsck.repair=yes rootwait cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory boot=startos

View File

@@ -0,0 +1,46 @@
#!/bin/sh
cat << EOF
# Enable audio (loads snd_bcm2835)
dtparam=audio=on
# Automatically load overlays for detected cameras
camera_auto_detect=1
# Automatically load overlays for detected DSI displays
display_auto_detect=1
# Enable DRM VC4 V3D driver
dtoverlay=vc4-kms-v3d
max_framebuffers=2
# Run in 64-bit mode
arm_64bit=1
# Disable compensation for displays with overscan
disable_overscan=1
[cm4]
# Enable host mode on the 2711 built-in XHCI USB controller.
# This line should be removed if the legacy DWC2 controller is required
# (e.g. for USB device mode) or if USB support is not required.
otg_mode=1
[all]
[pi4]
# Run as fast as firmware / board allows
arm_boost=1
kernel=vmlinuz-${KERNEL_VERSION}-rpi-v8
initramfs initrd.img-${KERNEL_VERSION}-rpi-v8 followkernel
[pi5]
kernel=vmlinuz-${KERNEL_VERSION}-rpi-2712
initramfs initrd.img-${KERNEL_VERSION}-rpi-2712 followkernel
[all]
gpu_mem=16
dtoverlay=pwm-2chan,disable-bt
EOF

View File

@@ -0,0 +1,35 @@
#!/bin/bash
set -e
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
BASEDIR="$(pwd -P)"
SUITE=trixie
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
dockerfile_hash=$(sha256sum ${BASEDIR}/build/image-recipe/Dockerfile | head -c 7)
docker_img_name="start9/build-iso:${SUITE}-${dockerfile_hash}"
platform=linux/${ARCH}
case $ARCH in
x86_64)
platform=linux/amd64;;
aarch64)
platform=linux/arm64;;
esac
if ! docker run --rm --platform=$platform "${docker_img_name}" true 2> /dev/null; then
docker buildx build --load --platform=$platform --build-arg=SUITE=${SUITE} -t "${docker_img_name}" ./build/image-recipe
fi
docker run $USE_TTY --rm --platform=$platform --privileged -v "$(pwd)/build/image-recipe:/root/image-recipe" -v "$(pwd)/results:/root/results" \
-e IB_SUITE="$SUITE" \
-e IB_UID="$UID" \
-e IB_INCLUDE \
"${docker_img_name}" /root/image-recipe/build.sh $@

View File

@@ -0,0 +1,51 @@
desktop-image: "../splash.png"
title-color: "#ffffff"
title-font: "Unifont Regular 16"
title-text: "StartOS Boot Menu with GRUB"
message-font: "Unifont Regular 16"
terminal-font: "Unifont Regular 16"
#help bar at the bottom
+ label {
top = 100%-50
left = 0
width = 100%
height = 20
text = "@KEYMAP_SHORT@"
align = "center"
color = "#ffffff"
font = "Unifont Regular 16"
}
#boot menu
+ boot_menu {
left = 10%
width = 80%
top = 52%
height = 48%-80
item_color = "#a8a8a8"
item_font = "Unifont Regular 16"
selected_item_color= "#ffffff"
selected_item_font = "Unifont Regular 16"
item_height = 16
item_padding = 0
item_spacing = 4
icon_width = 0
icon_heigh = 0
item_icon_space = 0
}
#progress bar
+ progress_bar {
id = "__timeout__"
left = 15%
top = 100%-80
height = 16
width = 70%
font = "Unifont Regular 16"
text_color = "#000000"
fg_color = "#ffffff"
bg_color = "#a8a8a8"
border_color = "#ffffff"
text = "@TIMEOUT_NOTIFICATION_LONG@"
}

View File

@@ -4,7 +4,7 @@ parse_essential_db_info() {
DB_DUMP="/tmp/startos_db.json"
if command -v start-cli >/dev/null 2>&1; then
start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1
timeout 30 start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1
else
return 1
fi
@@ -22,7 +22,7 @@ parse_essential_db_info() {
RAM_GB="unknown"
fi
RUNNING_SERVICES=$(jq -r '[.value.packageData[] | select(.status.main == "running")] | length' "$DB_DUMP" 2>/dev/null)
RUNNING_SERVICES=$(jq -r '[.value.packageData[] | select(.statusInfo.started != null)] | length' "$DB_DUMP" 2>/dev/null)
TOTAL_SERVICES=$(jq -r '.value.packageData | length' "$DB_DUMP" 2>/dev/null)
rm -f "$DB_DUMP"

View File

@@ -10,24 +10,24 @@ fi
POSITIONAL_ARGS=()
while [[ $# -gt 0 ]]; do
case $1 in
--no-sync)
NO_SYNC=1
shift
;;
--create)
ONLY_CREATE=1
shift
;;
-*|--*)
echo "Unknown option $1"
exit 1
;;
*)
POSITIONAL_ARGS+=("$1") # save positional arg
shift # past argument
;;
esac
case $1 in
--no-sync)
NO_SYNC=1
shift
;;
--create)
ONLY_CREATE=1
shift
;;
-*|--*)
echo "Unknown option $1"
exit 1
;;
*)
POSITIONAL_ARGS+=("$1") # save positional arg
shift # past argument
;;
esac
done
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
@@ -35,7 +35,7 @@ set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
if [ -z "$NO_SYNC" ]; then
echo 'Syncing...'
umount -R /media/startos/next 2> /dev/null
umount -R /media/startos/upper 2> /dev/null
umount /media/startos/upper 2> /dev/null
rm -rf /media/startos/upper /media/startos/next
mkdir /media/startos/upper
mount -t tmpfs tmpfs /media/startos/upper
@@ -43,8 +43,6 @@ if [ -z "$NO_SYNC" ]; then
mount -t overlay \
-olowerdir=/media/startos/current,upperdir=/media/startos/upper/data,workdir=/media/startos/upper/work \
overlay /media/startos/next
mkdir -p /media/startos/next/media/startos/root
mount --bind /media/startos/root /media/startos/next/media/startos/root
fi
if [ -n "$ONLY_CREATE" ]; then
@@ -56,12 +54,18 @@ mkdir -p /media/startos/next/dev
mkdir -p /media/startos/next/sys
mkdir -p /media/startos/next/proc
mkdir -p /media/startos/next/boot
mkdir -p /media/startos/next/media/startos/root
mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi
if [ -z "$*" ]; then
chroot /media/startos/next
@@ -71,6 +75,10 @@ else
CHROOT_RES=$?
fi
if mountpoint /media/startos/next/sys/firmware/efi/efivars 2>&1 > /dev/null; then
umount /media/startos/next/sys/firmware/efi/efivars
fi
umount /media/startos/next/run
umount /media/startos/next/tmp
umount /media/startos/next/dev
@@ -87,11 +95,12 @@ if [ "$CHROOT_RES" -eq 0 ]; then
echo 'Upgrading...'
rm -f /media/startos/images/next.squashfs
if ! time mksquashfs /media/startos/next /media/startos/images/next.squashfs -b 4096 -comp gzip; then
umount -R /media/startos/next
umount -R /media/startos/upper
rm -rf /media/startos/upper /media/startos/next
exit 1
umount -l /media/startos/next
umount -l /media/startos/upper
rm -rf /media/startos/upper /media/startos/next
exit 1
fi
hash=$(b3sum /media/startos/images/next.squashfs | head -c 32)
mv /media/startos/images/next.squashfs /media/startos/images/${hash}.rootfs
@@ -102,6 +111,6 @@ if [ "$CHROOT_RES" -eq 0 ]; then
reboot
fi
umount -R /media/startos/next
umount -R /media/startos/upper
umount /media/startos/next
umount /media/startos/upper
rm -rf /media/startos/upper /media/startos/next

View File

@@ -64,9 +64,11 @@ user_pref("messaging-system.rsexperimentloader.enabled", false);
user_pref("network.allow-experiments", false);
user_pref("network.captive-portal-service.enabled", false);
user_pref("network.connectivity-service.enabled", false);
user_pref("network.proxy.autoconfig_url", "file:///usr/lib/startos/proxy.pac");
user_pref("network.proxy.socks", "10.0.3.1");
user_pref("network.proxy.socks_port", 9050);
user_pref("network.proxy.socks_version", 5);
user_pref("network.proxy.socks_remote_dns", true);
user_pref("network.proxy.type", 2);
user_pref("network.proxy.type", 1);
user_pref("privacy.resistFingerprinting", true);
//Enable letterboxing if we want the window size sent to the server to snap to common resolutions:
//user_pref("privacy.resistFingerprinting.letterboxing", true);

View File

@@ -1,26 +1,72 @@
#!/bin/bash
if [ -z "$iiface" ] || [ -z "$oiface" ] || [ -z "$sip" ] || [ -z "$dip" ] || [ -z "$sport" ] || [ -z "$dport" ]; then
if [ -z "$sip" ] || [ -z "$dip" ] || [ -z "$dprefix" ] || [ -z "$sport" ] || [ -z "$dport" ]; then
>&2 echo 'missing required env var'
exit 1
fi
kind="-A"
NAME="F$(echo "$sip:$sport -> $dip/$dprefix:$dport ${src_subnet:-any}" | sha256sum | head -c 15)"
for kind in INPUT FORWARD ACCEPT; do
if ! iptables -C $kind -j "${NAME}_${kind}" 2> /dev/null; then
iptables -N "${NAME}_${kind}" 2> /dev/null
iptables -A $kind -j "${NAME}_${kind}"
fi
done
for kind in PREROUTING OUTPUT POSTROUTING; do
if ! iptables -t nat -C $kind -j "${NAME}_${kind}" 2> /dev/null; then
iptables -t nat -N "${NAME}_${kind}" 2> /dev/null
iptables -t nat -A $kind -j "${NAME}_${kind}"
fi
done
err=0
trap 'err=1' ERR
for kind in INPUT FORWARD ACCEPT; do
iptables -F "${NAME}_${kind}" 2> /dev/null
done
for kind in PREROUTING OUTPUT POSTROUTING; do
iptables -t nat -F "${NAME}_${kind}" 2> /dev/null
done
if [ "$UNDO" = 1 ]; then
kind="-D"
conntrack -D -p tcp -d $sip --dport $sport || true # conntrack returns exit 1 if no connections are active
conntrack -D -p udp -d $sip --dport $sport || true # conntrack returns exit 1 if no connections are active
exit $err
fi
iptables -t nat "$kind" POSTROUTING -o $iiface -j MASQUERADE
iptables -t nat "$kind" PREROUTING -i $iiface -p tcp --dport $sport -j DNAT --to-destination $dip:$dport
iptables -t nat "$kind" PREROUTING -i $iiface -p udp --dport $sport -j DNAT --to-destination $dip:$dport
iptables -t nat "$kind" PREROUTING -i $oiface -s $dip/24 -d $sip -p tcp --dport $sport -j DNAT --to-destination $dip:$dport
iptables -t nat "$kind" PREROUTING -i $oiface -s $dip/24 -d $sip -p udp --dport $sport -j DNAT --to-destination $dip:$dport
iptables -t nat "$kind" POSTROUTING -o $oiface -s $dip/24 -d $dip/32 -p tcp --dport $dport -j SNAT --to-source $sip:$sport
iptables -t nat "$kind" POSTROUTING -o $oiface -s $dip/24 -d $dip/32 -p udp --dport $dport -j SNAT --to-source $sip:$sport
# DNAT: rewrite destination for incoming packets (external traffic)
# When src_subnet is set, only forward traffic from that subnet (private forwards)
if [ -n "$src_subnet" ]; then
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
# Also allow containers on the bridge subnet to reach this forward
if [ -n "$bridge_subnet" ]; then
iptables -t nat -A ${NAME}_PREROUTING -s "$bridge_subnet" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$bridge_subnet" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
fi
else
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
fi
# DNAT: rewrite destination for locally-originated packets (hairpin from host itself)
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat "$kind" PREROUTING -i $iiface -s $sip/32 -d $sip -p tcp --dport $sport -j DNAT --to-destination $dip:$dport
iptables -t nat "$kind" PREROUTING -i $iiface -s $sip/32 -d $sip -p udp --dport $sport -j DNAT --to-destination $dip:$dport
iptables -t nat "$kind" POSTROUTING -o $oiface -s $sip/32 -d $dip/32 -p tcp --dport $dport -j SNAT --to-source $sip:$sport
iptables -t nat "$kind" POSTROUTING -o $oiface -s $sip/32 -d $dip/32 -p udp --dport $dport -j SNAT --to-source $sip:$sport
# Allow new connections to be forwarded to the destination
iptables -A ${NAME}_FORWARD -d $dip -p tcp --dport $dport -m state --state NEW -j ACCEPT
iptables -A ${NAME}_FORWARD -d $dip -p udp --dport $dport -m state --state NEW -j ACCEPT
# NAT hairpin: masquerade traffic from the bridge subnet or host to the DNAT
# target, so replies route back through the host for proper NAT reversal.
# Container-to-container hairpin (source is on the bridge subnet)
if [ -n "$bridge_subnet" ]; then
iptables -t nat -A ${NAME}_POSTROUTING -s "$bridge_subnet" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -s "$bridge_subnet" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
fi
# Host-to-container hairpin (host connects to its own gateway IP, source is sip)
iptables -t nat -A ${NAME}_POSTROUTING -s "$sip" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -s "$sip" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
exit $err

View File

@@ -0,0 +1,20 @@
#!/bin/bash
export DEBIAN_FRONTEND=noninteractive
export DEBCONF_NONINTERACTIVE_SEEN=true
TMP_DIR=$(mktemp -d)
(
set -e
cd $TMP_DIR
cat > control.equivs
equivs-build control.equivs
apt-get install -y ./*.deb < /dev/null
)
rm -rf $TMP_DIR
echo Install complete. >&2
exit 0

View File

@@ -29,10 +29,13 @@ if [ -z "$needed" ]; then
exit 1
fi
MARGIN=${MARGIN:-1073741824}
target=$((needed + MARGIN))
if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then
echo 'Pruning...'
current="$(readlink -f /media/startos/config/current.rootfs)"
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$needed" ]]; do
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$target" ]]; do
to_prune="$(ls -t1 /media/startos/images/*.rootfs /media/startos/images/*.squashfs 2> /dev/null | grep -v "$current" | tail -n1)"
if [ -e "$to_prune" ]; then
echo " Pruning $to_prune"

View File

@@ -83,6 +83,7 @@ local_mount_root()
if [ -d "$image" ]; then
mount -r --bind $image /lower
elif [ -f "$image" ]; then
modprobe loop
modprobe squashfs
mount -r $image /lower
else

View File

@@ -1,36 +1,64 @@
#!/bin/bash
fail=$(printf " [\033[31m fail \033[0m]")
pass=$(printf " [\033[32m pass \033[0m]")
# --- Config ---
# Colors (using printf to ensure compatibility)
GRAY=$(printf '\033[90m')
GREEN=$(printf '\033[32m')
RED=$(printf '\033[31m')
NC=$(printf '\033[0m') # No Color
# Proxies to test
proxies=(
"Host Tor|127.0.1.1:9050"
"Startd Tor|10.0.3.1:9050"
)
# Default URLs
onion_list=(
"The Tor Project|http://2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion"
"Start9|http://privacy34kn4ez3y3nijweec6w4g54i3g54sdv7r5mr6soma3w4begyd.onion"
"Mempool|http://mempoolhqx4isw62xs7abwphsq7ldayuidyx2v2oethdhhj6mlo2r6ad.onion"
"DuckDuckGo|https://duckduckgogg42xjoc72x3sjasowoarfbgcmvfimaftt6twagswzczad.onion"
"Brave Search|https://search.brave4u7jddbv7cyviptqjc7jusxh72uik7zt6adtckl5f4nwy2v72qd.onion"
)
# Check if ~/.startos/tor-check.list exists and read its contents if available
if [ -f ~/.startos/tor-check.list ]; then
while IFS= read -r line; do
# Check if the line starts with a #
if [[ ! "$line" =~ ^# ]]; then
onion_list+=("$line")
# Load custom list
[ -f ~/.startos/tor-check.list ] && readarray -t custom_list < <(grep -v '^#' ~/.startos/tor-check.list) && onion_list+=("${custom_list[@]}")
# --- Functions ---
print_line() { printf "${GRAY}────────────────────────────────────────${NC}\n"; }
# --- Main ---
echo "Testing Onion Connections..."
for proxy_info in "${proxies[@]}"; do
proxy_name="${proxy_info%%|*}"
proxy_addr="${proxy_info#*|}"
print_line
printf "${GRAY}Proxy: %s (%s)${NC}\n" "$proxy_name" "$proxy_addr"
for data in "${onion_list[@]}"; do
name="${data%%|*}"
url="${data#*|}"
# Capture verbose output + http code.
# --no-progress-meter: Suppresses the "0 0 0" stats but keeps -v output
output=$(curl -v --no-progress-meter --max-time 15 --socks5-hostname "$proxy_addr" "$url" 2>&1)
exit_code=$?
if [ $exit_code -eq 0 ]; then
printf " ${GREEN}[pass]${NC} %s (%s)\n" "$name" "$url"
else
printf " ${RED}[fail]${NC} %s (%s)\n" "$name" "$url"
printf " ${RED}↳ Curl Error %s${NC}\n" "$exit_code"
# Print the last 4 lines of verbose log to show the specific handshake error
# We look for lines starting with '*' or '>' or '<' to filter out junk if any remains
echo "$output" | tail -n 4 | sed "s/^/ ${GRAY}/"
fi
done < ~/.startos/tor-check.list
fi
echo "Testing connection to Onion Pages ..."
for data in "${onion_list[@]}"; do
name="${data%%|*}"
url="${data#*|}"
if curl --socks5-hostname localhost:9050 "$url" > /dev/null 2>&1; then
echo " ${pass}: $name ($url) "
else
echo " ${fail}: $name ($url) "
fi
done
done
echo
echo "Done."
print_line
# Reset color just in case
printf "${NC}"

97
build/lib/scripts/upgrade Executable file
View File

@@ -0,0 +1,97 @@
#!/bin/bash
set -e
SOURCE_DIR="$(dirname $(realpath "${BASH_SOURCE[0]}"))"
if [ "$UID" -ne 0 ]; then
>&2 echo 'Must be run as root'
exit 1
fi
if ! [ -f "$1" ]; then
>&2 echo "usage: $0 <SQUASHFS>"
exit 1
fi
echo 'Upgrading...'
hash=$(b3sum $1 | head -c 32)
if [ -n "$2" ] && [ "$hash" != "$CHECKSUM" ]; then
>&2 echo 'Checksum mismatch'
exit 2
fi
unsquashfs -f -d / $1 boot
umount -R /media/startos/next 2> /dev/null || true
umount /media/startos/upper 2> /dev/null || true
umount /media/startos/lower 2> /dev/null || true
mkdir -p /media/startos/upper
mount -t tmpfs tmpfs /media/startos/upper
mkdir -p /media/startos/lower /media/startos/upper/data /media/startos/upper/work /media/startos/next
mount $1 /media/startos/lower
mount -t overlay \
-olowerdir=/media/startos/lower,upperdir=/media/startos/upper/data,workdir=/media/startos/upper/work \
overlay /media/startos/next
mkdir -p /media/startos/next/run
mkdir -p /media/startos/next/dev
mkdir -p /media/startos/next/sys
mkdir -p /media/startos/next/proc
mkdir -p /media/startos/next/boot
mkdir -p /media/startos/next/media/startos/root
mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /boot/efi 2>&1 > /dev/null; then
mkdir -p /media/startos/next/boot/efi
mount --bind /boot/efi /media/startos/next/boot/efi
fi
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi
chroot /media/startos/next bash -e << "EOF"
if [ -f /boot/grub/grub.cfg ]; then
grub-install /dev/$(eval $(lsblk -o MOUNTPOINT,PKNAME -P | grep 'MOUNTPOINT="/media/startos/root"') && echo $PKNAME)
update-grub
fi
EOF
# Promote the USB installer boot entry back to first in EFI boot order.
# The entry number was saved during initial OS install.
if [ -d /sys/firmware/efi ] && [ -f /media/startos/config/efi-installer-entry ]; then
USB_ENTRY=$(cat /media/startos/config/efi-installer-entry)
if [ -n "$USB_ENTRY" ]; then
CURRENT_ORDER=$(efibootmgr | grep BootOrder | sed 's/BootOrder: //')
OTHER_ENTRIES=$(echo "$CURRENT_ORDER" | tr ',' '\n' | grep -v "$USB_ENTRY" | tr '\n' ',' | sed 's/,$//')
if [ -n "$OTHER_ENTRIES" ]; then
efibootmgr -o "$USB_ENTRY,$OTHER_ENTRIES"
else
efibootmgr -o "$USB_ENTRY"
fi
fi
fi
sync
umount -Rl /media/startos/next
umount /media/startos/upper
umount /media/startos/lower
mv $1 /media/startos/images/${hash}.rootfs
ln -rsf /media/startos/images/${hash}.rootfs /media/startos/config/current.rootfs
sync
echo 'System upgrade complete. Reboot to apply changes...'

View File

@@ -1,61 +0,0 @@
#!/bin/bash
set -e
if [ "$UID" -ne 0 ]; then
>&2 echo 'Must be run as root'
exit 1
fi
if [ -z "$1" ]; then
>&2 echo "usage: $0 <SQUASHFS>"
exit 1
fi
VERSION=$(unsquashfs -cat $1 /usr/lib/startos/VERSION.txt)
GIT_HASH=$(unsquashfs -cat $1 /usr/lib/startos/GIT_HASH.txt)
B3SUM=$(b3sum $1 | head -c 32)
if [ -n "$CHECKSUM" ] && [ "$CHECKSUM" != "$B3SUM" ]; then
>&2 echo "CHECKSUM MISMATCH"
exit 2
fi
mv $1 /media/startos/images/${B3SUM}.rootfs
ln -rsf /media/startos/images/${B3SUM}.rootfs /media/startos/config/current.rootfs
unsquashfs -n -f -d / /media/startos/images/${B3SUM}.rootfs boot
umount -R /media/startos/next 2> /dev/null || true
umount -R /media/startos/lower 2> /dev/null || true
umount -R /media/startos/upper 2> /dev/null || true
rm -rf /media/startos/lower /media/startos/upper /media/startos/next
mkdir /media/startos/upper
mount -t tmpfs tmpfs /media/startos/upper
mkdir -p /media/startos/lower /media/startos/upper/data /media/startos/upper/work /media/startos/next
mount /media/startos/images/${B3SUM}.rootfs /media/startos/lower
mount -t overlay \
-olowerdir=/media/startos/lower,upperdir=/media/startos/upper/data,workdir=/media/startos/upper/work \
overlay /media/startos/next
mkdir -p /media/startos/next/media/startos/root
mount --bind /media/startos/root /media/startos/next/media/startos/root
mkdir -p /media/startos/next/dev
mkdir -p /media/startos/next/sys
mkdir -p /media/startos/next/proc
mkdir -p /media/startos/next/boot
mount --bind /dev /media/startos/next/dev
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
chroot /media/startos/next update-grub2
umount -R /media/startos/next
umount -R /media/startos/upper
umount -R /media/startos/lower
rm -rf /media/startos/lower /media/startos/upper /media/startos/next
sync
reboot

View File

Before

Width:  |  Height:  |  Size: 9.6 KiB

After

Width:  |  Height:  |  Size: 9.6 KiB

364
build/manage-release.sh Executable file
View File

@@ -0,0 +1,364 @@
#!/bin/bash
set -e
REPO="Start9Labs/start-os"
REGISTRY="https://alpha-registry-x.start9.com"
S3_BUCKET="s3://startos-images"
S3_CDN="https://startos-images.nyc3.cdn.digitaloceanspaces.com"
START9_GPG_KEY="2D63C217"
ARCHES="aarch64 aarch64-nonfree aarch64-nvidia riscv64 riscv64-nonfree x86_64 x86_64-nonfree x86_64-nvidia"
CLI_ARCHES="aarch64 riscv64 x86_64"
parse_run_id() {
local val="$1"
if [[ "$val" =~ /actions/runs/([0-9]+) ]]; then
echo "${BASH_REMATCH[1]}"
else
echo "$val"
fi
}
require_version() {
if [ -z "${VERSION:-}" ]; then
read -rp "VERSION: " VERSION
if [ -z "$VERSION" ]; then
>&2 echo '$VERSION required'
exit 2
fi
fi
}
release_dir() {
echo "$HOME/Downloads/v$VERSION"
}
ensure_release_dir() {
local dir
dir=$(release_dir)
if [ "$CLEAN" = "1" ]; then
rm -rf "$dir"
fi
mkdir -p "$dir"
cd "$dir"
}
enter_release_dir() {
local dir
dir=$(release_dir)
if [ ! -d "$dir" ]; then
>&2 echo "Release directory $dir does not exist. Run 'download' or 'pull' first."
exit 1
fi
cd "$dir"
}
cli_target_for() {
local arch=$1 os=$2
local pair="${arch}-${os}"
if [ "$pair" = "riscv64-linux" ]; then
echo "riscv64gc-unknown-linux-musl"
elif [ "$pair" = "riscv64-macos" ]; then
return 1
elif [ "$os" = "linux" ]; then
echo "${arch}-unknown-linux-musl"
elif [ "$os" = "macos" ]; then
echo "${arch}-apple-darwin"
fi
}
release_files() {
for file in *.iso *.squashfs *.deb; do
[ -f "$file" ] && echo "$file"
done
for file in start-cli_*; do
[[ "$file" == *.asc ]] && continue
[ -f "$file" ] && echo "$file"
done
}
resolve_gh_user() {
GH_USER=${GH_USER:-$(gh api user -q .login 2>/dev/null || true)}
GH_GPG_KEY=$(git config user.signingkey 2>/dev/null || true)
}
# --- Subcommands ---
cmd_download() {
require_version
if [ -z "${RUN_ID:-}" ]; then
read -rp "RUN_ID (OS images, leave blank to skip): " RUN_ID
fi
RUN_ID=$(parse_run_id "${RUN_ID:-}")
if [ -z "${ST_RUN_ID:-}" ]; then
read -rp "ST_RUN_ID (start-tunnel, leave blank to skip): " ST_RUN_ID
fi
ST_RUN_ID=$(parse_run_id "${ST_RUN_ID:-}")
if [ -z "${CLI_RUN_ID:-}" ]; then
read -rp "CLI_RUN_ID (start-cli, leave blank to skip): " CLI_RUN_ID
fi
CLI_RUN_ID=$(parse_run_id "${CLI_RUN_ID:-}")
ensure_release_dir
if [ -n "$RUN_ID" ]; then
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.squashfs" -D "$(pwd)"; do sleep 1; done
done
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.iso" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$ST_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
while ! gh run download -R $REPO "$ST_RUN_ID" -n "start-tunnel_$arch.deb" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$CLI_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
for os in linux macos; do
local target
target=$(cli_target_for "$arch" "$os") || continue
while ! gh run download -R $REPO "$CLI_RUN_ID" -n "start-cli_$target" -D "$(pwd)"; do sleep 1; done
mv start-cli "start-cli_${arch}-${os}"
done
done
fi
}
cmd_pull() {
require_version
ensure_release_dir
echo "Downloading release assets from tag v$VERSION..."
# Download debs and CLI binaries from the GH release
for file in $(gh release view -R $REPO "v$VERSION" --json assets -q '.assets[].name' | grep -E '\.(deb)$|^start-cli_'); do
gh release download -R $REPO "v$VERSION" -p "$file" -D "$(pwd)" --clobber
done
# Download ISOs and squashfs from S3 CDN
for arch in $ARCHES; do
for ext in squashfs iso; do
# Get the actual filename from the GH release asset list or body
local filename
filename=$(gh release view -R $REPO "v$VERSION" --json assets -q ".assets[].name" | grep "_${arch}\\.${ext}$" || true)
if [ -z "$filename" ]; then
filename=$(gh release view -R $REPO "v$VERSION" --json body -q .body | grep -oP "[^ ]*_${arch}\\.${ext}" | head -1 || true)
fi
if [ -n "$filename" ]; then
echo "Downloading $filename from S3..."
curl -fSL -o "$filename" "$S3_CDN/v$VERSION/$filename"
fi
done
done
}
cmd_register() {
require_version
enter_release_dir
start-cli --registry=$REGISTRY registry os version add "$VERSION" "v$VERSION" '' ">=0.3.5 <=$VERSION"
}
cmd_upload() {
require_version
enter_release_dir
for file in $(release_files); do
case "$file" in
*.iso|*.squashfs)
s3cmd put -P "$file" "$S3_BUCKET/v$VERSION/$file"
;;
*)
gh release upload -R $REPO "v$VERSION" "$file"
;;
esac
done
}
cmd_index() {
require_version
enter_release_dir
for arch in $ARCHES; do
for file in *_"$arch".squashfs *_"$arch".iso; do
start-cli --registry=$REGISTRY registry os asset add --platform="$arch" --version="$VERSION" "$file" "$S3_CDN/v$VERSION/$file"
done
done
}
cmd_sign() {
require_version
enter_release_dir
resolve_gh_user
for file in $(release_files); do
gpg -u $START9_GPG_KEY --detach-sign --armor -o "${file}.start9.asc" "$file"
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "${file}.${GH_USER}.asc" "$file"
fi
done
gpg --export -a $START9_GPG_KEY > start9.key.asc
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg --export -a "$GH_GPG_KEY" > "${GH_USER}.key.asc"
else
>&2 echo 'Warning: could not determine GitHub user or GPG signing key, skipping personal signature'
fi
tar -czvf signatures.tar.gz *.asc
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
}
cmd_cosign() {
require_version
enter_release_dir
resolve_gh_user
if [ -z "$GH_USER" ] || [ -z "$GH_GPG_KEY" ]; then
>&2 echo 'Error: could not determine GitHub user or GPG signing key'
>&2 echo "Set GH_USER and/or configure git user.signingkey"
exit 1
fi
echo "Downloading existing signatures..."
gh release download -R $REPO "v$VERSION" -p "signatures.tar.gz" -D "$(pwd)" --clobber
tar -xzf signatures.tar.gz
echo "Adding personal signatures as $GH_USER..."
for file in $(release_files); do
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "${file}.${GH_USER}.asc" "$file"
done
gpg --export -a "$GH_GPG_KEY" > "${GH_USER}.key.asc"
echo "Re-packing signatures..."
tar -czvf signatures.tar.gz *.asc
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
echo "Done. Personal signatures for $GH_USER added to v$VERSION."
}
cmd_notes() {
require_version
enter_release_dir
cat << EOF
# ISO Downloads
- [x86_64/AMD64]($S3_CDN/v$VERSION/$(ls *_x86_64-nonfree.iso))
- [x86_64/AMD64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_x86_64-nvidia.iso))
- [x86_64/AMD64-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
- [aarch64/ARM64]($S3_CDN/v$VERSION/$(ls *_aarch64-nonfree.iso))
- [aarch64/ARM64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_aarch64-nvidia.iso))
- [aarch64/ARM64-slim (FOSS-Only)]($S3_CDN/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
- [RISCV64 (RVA23)]($S3_CDN/v$VERSION/$(ls *_riscv64-nonfree.iso))
- [RISCV64 (RVA23)-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_riscv64.iso) "Without proprietary software or drivers")
EOF
cat << 'EOF'
# StartOS Checksums
## SHA-256
```
EOF
sha256sum *.iso *.squashfs
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum *.iso *.squashfs
cat << 'EOF'
```
# Start-Tunnel Checksums
## SHA-256
```
EOF
sha256sum start-tunnel*.deb
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-tunnel*.deb
cat << 'EOF'
```
# start-cli Checksums
## SHA-256
```
EOF
release_files | grep '^start-cli_' | xargs sha256sum
cat << 'EOF'
```
## BLAKE-3
```
EOF
release_files | grep '^start-cli_' | xargs b3sum
cat << 'EOF'
```
EOF
}
cmd_full_release() {
cmd_download
cmd_register
cmd_upload
cmd_index
cmd_sign
cmd_notes
}
usage() {
cat << 'EOF'
Usage: manage-release.sh <subcommand>
Subcommands:
download Download artifacts from GitHub Actions runs
Requires: RUN_ID, ST_RUN_ID, CLI_RUN_ID (any combination)
pull Download an existing release from the GH tag and S3
register Register the version in the Start9 registry
upload Upload artifacts to GitHub Releases and S3
index Add assets to the registry index
sign Sign all artifacts with Start9 org key (+ personal key if available)
and upload signatures.tar.gz
cosign Add personal GPG signature to an existing release's signatures
(requires 'pull' first so you can verify assets before signing)
notes Print release notes with download links and checksums
full-release Run: download → register → upload → index → sign → notes
Environment variables:
VERSION (required) Release version
RUN_ID GitHub Actions run ID for OS images (download subcommand)
ST_RUN_ID GitHub Actions run ID for start-tunnel (download subcommand)
CLI_RUN_ID GitHub Actions run ID for start-cli (download subcommand)
GH_USER Override GitHub username (default: autodetected via gh cli)
CLEAN Set to 1 to wipe and recreate the release directory
EOF
}
case "${1:-}" in
download) cmd_download ;;
pull) cmd_pull ;;
register) cmd_register ;;
upload) cmd_upload ;;
index) cmd_index ;;
sign) cmd_sign ;;
cosign) cmd_cosign ;;
notes) cmd_notes ;;
full-release) cmd_full_release ;;
*) usage; exit 1 ;;
esac

View File

@@ -1,4 +1,4 @@
FROM debian:bookworm
FROM debian:trixie
RUN apt-get update && \
apt-get install -y \
@@ -12,45 +12,14 @@ RUN apt-get update && \
jq \
gzip \
brotli \
qemu-user-static \
binfmt-support \
squashfs-tools \
git \
debspawn \
rsync \
b3sum \
fuse-overlayfs \
sudo \
systemd \
systemd-container \
systemd-sysv \
dbus \
dbus-user-session
RUN systemctl mask \
systemd-firstboot.service \
systemd-udevd.service \
getty@tty1.service \
console-getty.service
nodejs
RUN git config --global --add safe.directory /root/start-os
RUN mkdir -p /etc/debspawn && \
echo "AllowUnsafePermissions=true" > /etc/debspawn/global.toml
ENV NVM_DIR=~/.nvm
ENV NODE_VERSION=22
RUN mkdir -p $NVM_DIR && \
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash && \
. $NVM_DIR/nvm.sh \
nvm install $NODE_VERSION && \
nvm use $NODE_VERSION && \
nvm alias default $NODE_VERSION && \
ln -s $(which node) /usr/bin/node && \
ln -s $(which npm) /usr/bin/npm
RUN mkdir -p /root/start-os
WORKDIR /root/start-os
COPY docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT [ "/docker-entrypoint.sh" ]

View File

@@ -1,3 +0,0 @@
#!/bin/bash
exec /lib/systemd/systemd --unit=multi-user.target --show-status=false --log-target=journal

View File

@@ -1,27 +1,30 @@
#!/bin/bash
if [ "$FORCE_COMPAT" = 1 ] || ( [ "$REQUIRES" = "linux" ] && [ "$(uname -s)" != "Linux" ] ) || ( [ "$REQUIRES" = "debian" ] && ! which dpkg > /dev/null ); then
project_pwd="$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd)/"
pwd="$(pwd)/"
if ! [[ "$pwd" = "$project_pwd"* ]]; then
>&2 echo "Must be run from start-os project dir"
exit 1
fi
rel_pwd="${pwd#"$project_pwd"}"
pwd=$(pwd)
SYSTEMD_TTY="-P"
USE_TTY=
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
set -e
rel_pwd="${pwd#"$(pwd)"}"
COMPAT_ARCH=$(uname -m)
platform=linux/$COMPAT_ARCH
case $COMPAT_ARCH in
x86_64)
platform=linux/amd64;;
aarch64)
platform=linux/arm64;;
esac
if [ "$FORCE_COMPAT" = 1 ] || ( [ "$REQUIRES" = "linux" ] && [ "$(uname -s)" != "Linux" ] ) || ( [ "$REQUIRES" = "debian" ] && ! which dpkg > /dev/null ); then
if tty -s; then
USE_TTY="-it"
SYSTEMD_TTY="-t"
fi
docker run -d --rm --name os-compat --privileged --security-opt apparmor=unconfined -v "${project_pwd}:/root/start-os" -v /lib/modules:/lib/modules:ro start9/build-env
while ! docker exec os-compat systemctl is-active --quiet multi-user.target 2> /dev/null; do sleep .5; done
docker exec -eARCH -eENVIRONMENT -ePLATFORM -eGIT_BRANCH_AS_HASH $USE_TTY -w "/root/start-os${rel_pwd}" os-compat $@
code=$?
docker stop os-compat
exit $code
docker run $USE_TTY --platform=$platform -eARCH -eENVIRONMENT -ePLATFORM -eGIT_BRANCH_AS_HASH -ePROJECT -eDEPENDS -eCONFLICTS -w "/root/start-os${rel_pwd}" --rm -v "$(pwd):/root/start-os" start9/build-env $@
else
exec $@
fi
fi

View File

@@ -1,87 +0,0 @@
#!/bin/bash
set -e
function partition_for () {
if [[ "$1" =~ [0-9]+$ ]]; then
echo "$1p$2"
else
echo "$1$2"
fi
}
VERSION=$(cat VERSION.txt)
ENVIRONMENT=$(cat ENVIRONMENT.txt)
GIT_HASH=$(cat GIT_HASH.txt | head -c 7)
DATE=$(date +%Y%m%d)
ROOT_PART_END=7217792
VERSION_FULL="$VERSION-$GIT_HASH"
if [ -n "$ENVIRONMENT" ]; then
VERSION_FULL="$VERSION_FULL~$ENVIRONMENT"
fi
TARGET_NAME=startos-${VERSION_FULL}-${DATE}_raspberrypi.img
TARGET_SIZE=$[($ROOT_PART_END+1)*512]
rm -f $TARGET_NAME
truncate -s $TARGET_SIZE $TARGET_NAME
(
echo o
echo x
echo i
echo "0xcb15ae4d"
echo r
echo n
echo p
echo 1
echo 2048
echo 526335
echo t
echo c
echo n
echo p
echo 2
echo 526336
echo $ROOT_PART_END
echo a
echo 1
echo w
) | fdisk $TARGET_NAME
OUTPUT_DEVICE=$(sudo losetup --show -fP $TARGET_NAME)
sudo mkfs.ext4 `partition_for ${OUTPUT_DEVICE} 2`
sudo mkfs.vfat `partition_for ${OUTPUT_DEVICE} 1`
TMPDIR=$(mktemp -d)
sudo mount `partition_for ${OUTPUT_DEVICE} 2` $TMPDIR
sudo mkdir $TMPDIR/boot
sudo mount `partition_for ${OUTPUT_DEVICE} 1` $TMPDIR/boot
sudo unsquashfs -f -d $TMPDIR startos.raspberrypi.squashfs
REAL_GIT_HASH=$(cat $TMPDIR/usr/lib/startos/GIT_HASH.txt)
REAL_VERSION=$(cat $TMPDIR/usr/lib/startos/VERSION.txt)
REAL_ENVIRONMENT=$(cat $TMPDIR/usr/lib/startos/ENVIRONMENT.txt)
sudo sed -i 's| boot=startos| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo cp ./build/raspberrypi/fstab $TMPDIR/etc/
sudo cp ./build/raspberrypi/init_resize.sh $TMPDIR/usr/lib/startos/scripts/init_resize.sh
sudo umount $TMPDIR/boot
sudo umount $TMPDIR
sudo losetup -d $OUTPUT_DEVICE
if [ "$ALLOW_VERSION_MISMATCH" != 1 ]; then
if [ "$(cat GIT_HASH.txt)" != "$REAL_GIT_HASH" ]; then
>&2 echo "startos.raspberrypi.squashfs GIT_HASH.txt mismatch"
>&2 echo "expected $REAL_GIT_HASH (dpkg) found $(cat GIT_HASH.txt) (repo)"
exit 1
fi
if [ "$(cat VERSION.txt)" != "$REAL_VERSION" ]; then
>&2 echo "startos.raspberrypi.squashfs VERSION.txt mismatch"
exit 1
fi
if [ "$(cat ENVIRONMENT.txt)" != "$REAL_ENVIRONMENT" ]; then
>&2 echo "startos.raspberrypi.squashfs ENVIRONMENT.txt mismatch"
exit 1
fi
fi

View File

@@ -0,0 +1,32 @@
# Container Runtime — Node.js Service Manager
Node.js runtime that manages service containers via JSON-RPC. See `RPCSpec.md` in this directory for the full RPC protocol.
## Architecture
```
LXC Container (uniform base for all services)
└── systemd
└── container-runtime.service
└── Loads /usr/lib/startos/package/index.js (from s9pk javascript.squashfs)
└── Package JS launches subcontainers (from images in s9pk)
```
The container runtime communicates with the host via JSON-RPC over Unix socket. Package JavaScript must export functions conforming to the `ABI` type defined in `sdk/base/lib/types.ts`.
## `/media/startos/` Directory (mounted by host into container)
| Path | Description |
| -------------------- | ----------------------------------------------------- |
| `volumes/<name>/` | Package data volumes (id-mapped, persistent) |
| `assets/` | Read-only assets from s9pk `assets.squashfs` |
| `images/<name>/` | Container images (squashfs, used for subcontainers) |
| `images/<name>.env` | Environment variables for image |
| `images/<name>.json` | Image metadata |
| `backup/` | Backup mount point (mounted during backup operations) |
| `rpc/service.sock` | RPC socket (container runtime listens here) |
| `rpc/host.sock` | Host RPC socket (for effects callbacks to host) |
## S9PK Structure
See `../core/s9pk-structure.md` for the S9PK package format.

View File

@@ -1,16 +1,21 @@
# Container RPC SERVER Specification
# Container RPC Server Specification
The container runtime exposes a JSON-RPC server over a Unix socket at `/media/startos/rpc/service.sock`.
## Methods
### init
initialize runtime (mount `/proc`, `/sys`, `/dev`, and `/run` to each image in `/media/images`)
Initialize the runtime and system.
called after os has mounted js and images to the container
#### params
#### args
`[]`
```ts
{
id: string,
kind: "install" | "update" | "restore" | null,
}
```
#### response
@@ -18,11 +23,16 @@ called after os has mounted js and images to the container
### exit
shutdown runtime
Shutdown runtime and optionally run exit hooks for a target version.
#### args
#### params
`[]`
```ts
{
id: string,
target: string | null, // ExtendedVersion or VersionRange
}
```
#### response
@@ -30,11 +40,11 @@ shutdown runtime
### start
run main method if not already running
Run main method if not already running.
#### args
#### params
`[]`
None
#### response
@@ -42,11 +52,11 @@ run main method if not already running
### stop
stop main method by sending SIGTERM to child processes, and SIGKILL after timeout
Stop main method by sending SIGTERM to child processes, and SIGKILL after timeout.
#### args
#### params
`{ timeout: millis }`
None
#### response
@@ -54,15 +64,16 @@ stop main method by sending SIGTERM to child processes, and SIGKILL after timeou
### execute
run a specific package procedure
Run a specific package procedure.
#### args
#### params
```ts
{
procedure: JsonPath,
input: any,
timeout: millis,
id: string, // event ID
procedure: string, // JSON path (e.g., "/backup/create", "/actions/{name}/run")
input: any,
timeout: number | null,
}
```
@@ -72,18 +83,64 @@ run a specific package procedure
### sandbox
run a specific package procedure in sandbox mode
Run a specific package procedure in sandbox mode. Same interface as `execute`.
#### args
UNIMPLEMENTED: this feature is planned but does not exist
#### params
```ts
{
procedure: JsonPath,
input: any,
timeout: millis,
id: string,
procedure: string,
input: any,
timeout: number | null,
}
```
#### response
`any`
### callback
Handle a callback from an effect.
#### params
```ts
{
id: number,
args: any[],
}
```
#### response
`null` (no response sent)
### eval
Evaluate a script in the runtime context. Used for debugging.
#### params
```ts
{
script: string,
}
```
#### response
`any`
## Procedures
The `execute` and `sandbox` methods route to procedures based on the `procedure` path:
| Procedure | Description |
| -------------------------- | ---------------------------- |
| `/backup/create` | Create a backup |
| `/actions/{name}/getInput` | Get input spec for an action |
| `/actions/{name}/run` | Run an action with input |

View File

@@ -0,0 +1,30 @@
// Mock for ESM-only mime package — Jest's module loader doesn't support require(esm)
const types = {
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".svg": "image/svg+xml",
".webp": "image/webp",
".ico": "image/x-icon",
".json": "application/json",
".js": "application/javascript",
".html": "text/html",
".css": "text/css",
".txt": "text/plain",
".md": "text/markdown",
}
module.exports = {
default: {
getType(path) {
const ext = "." + path.split(".").pop()
return types[ext] || null
},
getExtension(type) {
const entry = Object.entries(types).find(([, v]) => v === type)
return entry ? entry[0].slice(1) : null
},
},
__esModule: true,
}

View File

@@ -4,6 +4,7 @@ OnFailure=container-runtime-failure.service
[Service]
Type=simple
Environment=RUST_LOG=startos=debug
ExecStart=/usr/bin/node --experimental-detect-module --trace-warnings --unhandled-rejections=warn /usr/lib/startos/init/index.js
Restart=no

View File

@@ -2,17 +2,8 @@
set -e
mkdir -p /run/systemd/resolve
echo "nameserver 8.8.8.8" > /run/systemd/resolve/stub-resolv.conf
apt-get update
apt-get install -y curl rsync qemu-user-static
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
source ~/.bashrc
nvm install 22
ln -s $(which node) /usr/bin/node
apt-get install -y curl rsync qemu-user-static nodejs
sed -i '/\(^\|#\)DNSStubListener=/c\DNSStubListener=no' /etc/systemd/resolved.conf
sed -i '/\(^\|#\)Storage=/c\Storage=persistent' /etc/systemd/journald.conf
@@ -22,7 +13,4 @@ sed -i '/\(^\|#\)ForwardToSyslog=/c\ForwardToSyslog=no' /etc/systemd/journald.co
systemctl enable container-runtime.service
rm -rf /run/systemd
rm /etc/resolv.conf
echo "nameserver 10.0.3.1" > /etc/resolv.conf

View File

@@ -3,7 +3,7 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
DISTRO=debian
VERSION=bookworm
VERSION=trixie
ARCH=${ARCH:-$(uname -m)}
FLAVOR=default

View File

@@ -5,4 +5,7 @@ module.exports = {
testEnvironment: "node",
rootDir: "./src/",
modulePathIgnorePatterns: ["./dist/"],
moduleNameMapper: {
"^mime$": "<rootDir>/../__mocks__/mime.js",
},
}

View File

@@ -1,28 +0,0 @@
#!/bin/bash
set -e
IMAGE=$1
if [ -z "$IMAGE" ]; then
>&2 echo "usage: $0 <image id>"
exit 1
fi
if ! [ -d "/media/images/$IMAGE" ]; then
>&2 echo "image does not exist"
exit 1
fi
container=$(mktemp -d)
mkdir -p $container/rootfs $container/upper $container/work
mount -t overlay -olowerdir=/media/images/$IMAGE,upperdir=$container/upper,workdir=$container/work overlay $container/rootfs
rootfs=$container/rootfs
for special in dev sys proc run; do
mkdir -p $rootfs/$special
mount --bind /$special $rootfs/$special
done
echo $rootfs

View File

@@ -19,7 +19,6 @@
"lodash.merge": "^4.6.2",
"mime": "^4.0.7",
"node-fetch": "^3.1.0",
"ts-matches": "^6.3.2",
"tslib": "^2.5.3",
"typescript": "^5.1.3",
"yaml": "^2.3.1"
@@ -38,7 +37,7 @@
},
"../sdk/dist": {
"name": "@start9labs/start-sdk",
"version": "0.4.0-beta.38",
"version": "0.4.0-beta.58",
"license": "MIT",
"dependencies": {
"@iarna/toml": "^3.0.0",
@@ -49,8 +48,9 @@
"ini": "^5.0.0",
"isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7",
"ts-matches": "^6.3.2",
"yaml": "^2.7.1"
"yaml": "^2.7.1",
"zod": "^4.3.6",
"zod-deep-partial": "^1.2.0"
},
"devDependencies": {
"@types/jest": "^29.4.0",
@@ -110,6 +110,7 @@
"integrity": "sha512-l+lkXCHS6tQEc5oUpK28xBOZ6+HwaH7YwoYQbLFiYb4nS2/l1tKnZEtEWkD0GuiYdvArf9qBS0XlQGXzPMsNqQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@ampproject/remapping": "^2.2.0",
"@babel/code-frame": "^7.26.2",
@@ -1200,6 +1201,7 @@
"dev": true,
"hasInstallScript": true,
"license": "Apache-2.0",
"peer": true,
"dependencies": {
"@swc/counter": "^0.1.3",
"@swc/types": "^0.1.17"
@@ -2143,6 +2145,7 @@
}
],
"license": "MIT",
"peer": true,
"dependencies": {
"caniuse-lite": "^1.0.30001688",
"electron-to-chromium": "^1.5.73",
@@ -3990,6 +3993,7 @@
"integrity": "sha512-NIy3oAFp9shda19hy4HK0HRTWKtPJmGdnvywu01nOqNC2vZg+Z+fvJDxpMQA88eb2I9EcafcdjYgsDthnYTvGw==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"@jest/core": "^29.7.0",
"@jest/types": "^29.6.3",
@@ -6490,12 +6494,6 @@
}
}
},
"node_modules/ts-matches": {
"version": "6.3.2",
"resolved": "https://registry.npmjs.org/ts-matches/-/ts-matches-6.3.2.tgz",
"integrity": "sha512-UhSgJymF8cLd4y0vV29qlKVCkQpUtekAaujXbQVc729FezS8HwqzepqvtjzQ3HboatIqN/Idor85O2RMwT7lIQ==",
"license": "MIT"
},
"node_modules/tslib": {
"version": "2.8.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
@@ -6556,6 +6554,7 @@
"integrity": "sha512-84MVSjMEHP+FQRPy3pX9sTVV/INIex71s9TL2Gm5FG/WG1SqXeKyZ0k7/blY/4FdOzI12CBy1vGc4og/eus0fw==",
"dev": true,
"license": "Apache-2.0",
"peer": true,
"bin": {
"tsc": "bin/tsc",
"tsserver": "bin/tsserver"

View File

@@ -28,7 +28,6 @@
"lodash.merge": "^4.6.2",
"mime": "^4.0.7",
"node-fetch": "^3.1.0",
"ts-matches": "^6.3.2",
"tslib": "^2.5.3",
"typescript": "^5.1.3",
"yaml": "^2.3.1"

View File

@@ -1,12 +0,0 @@
#!/bin/bash
set -e
rootfs=$1
if [ -z "$rootfs" ]; then
>&2 echo "usage: $0 <container rootfs path>"
exit 1
fi
umount --recursive $rootfs
rm -rf $rootfs/..

View File

@@ -3,33 +3,39 @@ import {
types as T,
utils,
VersionRange,
z,
} from "@start9labs/start-sdk"
import * as net from "net"
import { object, string, number, literals, some, unknown } from "ts-matches"
import { Effects } from "../Models/Effects"
import { CallbackHolder } from "../Models/CallbackHolder"
import { asError } from "@start9labs/start-sdk/base/lib/util"
const matchRpcError = object({
error: object({
code: number,
message: string,
data: some(
string,
object({
details: string,
debug: string.nullable().optional(),
}),
)
const matchRpcError = z.object({
error: z.object({
code: z.number(),
message: z.string(),
data: z
.union([
z.string(),
z.object({
details: z.string(),
debug: z.string().nullable().optional(),
}),
])
.nullable()
.optional(),
}),
})
const testRpcError = matchRpcError.test
const testRpcResult = object({
result: unknown,
}).test
type RpcError = typeof matchRpcError._TYPE
function testRpcError(v: unknown): v is RpcError {
return matchRpcError.safeParse(v).success
}
const matchRpcResult = z.object({
result: z.unknown(),
})
function testRpcResult(v: unknown): v is z.infer<typeof matchRpcResult> {
return matchRpcResult.safeParse(v).success
}
type RpcError = z.infer<typeof matchRpcError>
const SOCKET_PATH = "/media/startos/rpc/host.sock"
let hostSystemId = 0
@@ -71,7 +77,7 @@ const rpcRoundFor =
"Error in host RPC:",
utils.asError({ method, params, error: res.error }),
)
if (string.test(res.error.data)) {
if (typeof res.error.data === "string") {
message += ": " + res.error.data
console.error(`Details: ${res.error.data}`)
} else {
@@ -178,6 +184,13 @@ export function makeEffects(context: EffectContext): Effects {
T.Effects["getInstalledPackages"]
>
},
getServiceManifest(
...[options]: Parameters<T.Effects["getServiceManifest"]>
) {
return rpcRound("get-service-manifest", options) as ReturnType<
T.Effects["getServiceManifest"]
>
},
subcontainer: {
createFs(options: { imageId: string; name: string }) {
return rpcRound("subcontainer.create-fs", options) as ReturnType<
@@ -246,6 +259,14 @@ export function makeEffects(context: EffectContext): Effects {
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getSystemSmtp"]>
},
getOutboundGateway(
...[options]: Parameters<T.Effects["getOutboundGateway"]>
) {
return rpcRound("get-outbound-gateway", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getOutboundGateway"]>
},
listServiceInterfaces(
...[options]: Parameters<T.Effects["listServiceInterfaces"]>
) {
@@ -289,6 +310,7 @@ export function makeEffects(context: EffectContext): Effects {
getStatus(...[o]: Parameters<T.Effects["getStatus"]>) {
return rpcRound("get-status", o) as ReturnType<T.Effects["getStatus"]>
},
/// DEPRECATED
setMainStatus(o: { status: "running" | "stopped" }): Promise<null> {
return rpcRound("set-main-status", o) as ReturnType<
T.Effects["setHealth"]
@@ -308,9 +330,35 @@ export function makeEffects(context: EffectContext): Effects {
T.Effects["setDataVersion"]
>
},
plugin: {
url: {
register(
...[options]: Parameters<T.Effects["plugin"]["url"]["register"]>
) {
return rpcRound("plugin.url.register", options) as ReturnType<
T.Effects["plugin"]["url"]["register"]
>
},
exportUrl(
...[options]: Parameters<T.Effects["plugin"]["url"]["exportUrl"]>
) {
return rpcRound("plugin.url.export-url", options) as ReturnType<
T.Effects["plugin"]["url"]["exportUrl"]
>
},
clearUrls(
...[options]: Parameters<T.Effects["plugin"]["url"]["clearUrls"]>
) {
return rpcRound("plugin.url.clear-urls", options) as ReturnType<
T.Effects["plugin"]["url"]["clearUrls"]
>
},
},
},
}
if (context.callbacks?.onLeaveContext)
self.onLeaveContext(() => {
self.constRetry = undefined
self.isInContext = false
self.onLeaveContext = () => {
console.warn(

View File

@@ -1,25 +1,13 @@
// @ts-check
import * as net from "net"
import {
object,
some,
string,
literal,
array,
number,
matches,
any,
shape,
anyOf,
literals,
} from "ts-matches"
import {
ExtendedVersion,
types as T,
utils,
VersionRange,
z,
} from "@start9labs/start-sdk"
import * as fs from "fs"
@@ -29,89 +17,92 @@ import { jsonPath, unNestPath } from "../Models/JsonPath"
import { System } from "../Interfaces/System"
import { makeEffects } from "./EffectCreator"
type MaybePromise<T> = T | Promise<T>
export const matchRpcResult = anyOf(
object({ result: any }),
object({
error: object({
code: number,
message: string,
data: object({
details: string.optional(),
debug: any.optional(),
})
export const matchRpcResult = z.union([
z.object({ result: z.any() }),
z.object({
error: z.object({
code: z.number(),
message: z.string(),
data: z
.object({
details: z.string().optional(),
debug: z.any().optional(),
})
.nullable()
.optional(),
}),
}),
)
])
export type RpcResult = typeof matchRpcResult._TYPE
export type RpcResult = z.infer<typeof matchRpcResult>
type SocketResponse = ({ jsonrpc: "2.0"; id: IdType } & RpcResult) | null
const SOCKET_PARENT = "/media/startos/rpc"
const SOCKET_PATH = "/media/startos/rpc/service.sock"
const jsonrpc = "2.0" as const
const isResult = object({ result: any }).test
const isResultSchema = z.object({ result: z.any() })
const isResult = (v: unknown): v is z.infer<typeof isResultSchema> =>
isResultSchema.safeParse(v).success
const idType = some(string, number, literal(null))
const idType = z.union([z.string(), z.number(), z.literal(null)])
type IdType = null | string | number | undefined
const runType = object({
const runType = z.object({
id: idType.optional(),
method: literal("execute"),
params: object({
id: string,
procedure: string,
input: any,
timeout: number.nullable().optional(),
method: z.literal("execute"),
params: z.object({
id: z.string(),
procedure: z.string(),
input: z.any(),
timeout: z.number().nullable().optional(),
}),
})
const sandboxRunType = object({
const sandboxRunType = z.object({
id: idType.optional(),
method: literal("sandbox"),
params: object({
id: string,
procedure: string,
input: any,
timeout: number.nullable().optional(),
method: z.literal("sandbox"),
params: z.object({
id: z.string(),
procedure: z.string(),
input: z.any(),
timeout: z.number().nullable().optional(),
}),
})
const callbackType = object({
method: literal("callback"),
params: object({
id: number,
args: array,
const callbackType = z.object({
method: z.literal("callback"),
params: z.object({
id: z.number(),
args: z.array(z.unknown()),
}),
})
const initType = object({
const initType = z.object({
id: idType.optional(),
method: literal("init"),
params: object({
id: string,
kind: literals("install", "update", "restore").nullable(),
method: z.literal("init"),
params: z.object({
id: z.string(),
kind: z.enum(["install", "update", "restore"]).nullable(),
}),
})
const startType = object({
const startType = z.object({
id: idType.optional(),
method: literal("start"),
method: z.literal("start"),
})
const stopType = object({
const stopType = z.object({
id: idType.optional(),
method: literal("stop"),
method: z.literal("stop"),
})
const exitType = object({
const exitType = z.object({
id: idType.optional(),
method: literal("exit"),
params: object({
id: string,
target: string.nullable(),
method: z.literal("exit"),
params: z.object({
id: z.string(),
target: z.string().nullable(),
}),
})
const evalType = object({
const evalType = z.object({
id: idType.optional(),
method: literal("eval"),
params: object({
script: string,
method: z.literal("eval"),
params: z.object({
script: z.string(),
}),
})
@@ -144,8 +135,11 @@ const handleRpc = (id: IdType, result: Promise<RpcResult>) =>
},
}))
const hasId = object({ id: idType }).test
const hasIdSchema = z.object({ id: idType })
const hasId = (v: unknown): v is z.infer<typeof hasIdSchema> =>
hasIdSchema.safeParse(v).success
export class RpcListener {
shouldExit = false
unixSocketServer = net.createServer(async (server) => {})
private _system: System | undefined
private callbacks: CallbackHolder | undefined
@@ -158,6 +152,8 @@ export class RpcListener {
this.unixSocketServer.listen(SOCKET_PATH)
console.log("Listening on %s", SOCKET_PATH)
this.unixSocketServer.on("connection", (s) => {
let id: IdType = null
const captureId = <X>(x: X) => {
@@ -208,6 +204,11 @@ export class RpcListener {
.catch(mapError)
.then(logData("response"))
.then(writeDataToSocket)
.then((_) => {
if (this.shouldExit) {
process.exit(0)
}
})
.catch((e) => {
console.error(`Major error in socket handling: ${e}`)
console.debug(`Data in: ${a.toString()}`)
@@ -238,40 +239,52 @@ export class RpcListener {
}
private dealWithInput(input: unknown): MaybePromise<SocketResponse> {
return matches(input)
.when(runType, async ({ id, params }) => {
const parsed = z.object({ method: z.string() }).safeParse(input)
if (!parsed.success) {
console.warn(
`Couldn't parse the following input ${JSON.stringify(input)}`,
)
return {
jsonrpc,
id: (input as any)?.id,
error: {
code: -32602,
message: "invalid params",
data: {
details: JSON.stringify(input),
},
},
}
}
switch (parsed.data.method) {
case "execute": {
const { id, params } = runType.parse(input)
const system = this.system
const procedure = jsonPath.unsafeCast(params.procedure)
const { input, timeout, id: eventId } = params
const result = this.getResult(
procedure,
system,
eventId,
timeout,
input,
)
const procedure = jsonPath.parse(params.procedure)
const { input: inp, timeout, id: eventId } = params
const result = this.getResult(procedure, system, eventId, timeout, inp)
return handleRpc(id, result)
})
.when(sandboxRunType, async ({ id, params }) => {
}
case "sandbox": {
const { id, params } = sandboxRunType.parse(input)
const system = this.system
const procedure = jsonPath.unsafeCast(params.procedure)
const { input, timeout, id: eventId } = params
const result = this.getResult(
procedure,
system,
eventId,
timeout,
input,
)
const procedure = jsonPath.parse(params.procedure)
const { input: inp, timeout, id: eventId } = params
const result = this.getResult(procedure, system, eventId, timeout, inp)
return handleRpc(id, result)
})
.when(callbackType, async ({ params: { id, args } }) => {
}
case "callback": {
const {
params: { id, args },
} = callbackType.parse(input)
this.callCallback(id, args)
return null
})
.when(startType, async ({ id }) => {
}
case "start": {
const { id } = startType.parse(input)
const callbacks =
this.callbacks?.getChild("main") || this.callbacks?.child("main")
const effects = makeEffects({
@@ -282,15 +295,20 @@ export class RpcListener {
id,
this.system.start(effects).then((result) => ({ result })),
)
})
.when(stopType, async ({ id }) => {
this.callbacks?.removeChild("main")
}
case "stop": {
const { id } = stopType.parse(input)
return handleRpc(
id,
this.system.stop().then((result) => ({ result })),
this.system.stop().then((result) => {
this.callbacks?.removeChild("main")
return { result }
}),
)
})
.when(exitType, async ({ id, params }) => {
}
case "exit": {
const { id, params } = exitType.parse(input)
return handleRpc(
id,
(async () => {
@@ -308,11 +326,13 @@ export class RpcListener {
}),
target,
)
this.shouldExit = true
}
})().then((result) => ({ result })),
)
})
.when(initType, async ({ id, params }) => {
}
case "init": {
const { id, params } = initType.parse(input)
return handleRpc(
id,
(async () => {
@@ -337,8 +357,9 @@ export class RpcListener {
}
})().then((result) => ({ result })),
)
})
.when(evalType, async ({ id, params }) => {
}
case "eval": {
const { id, params } = evalType.parse(input)
return handleRpc(
id,
(async () => {
@@ -363,41 +384,28 @@ export class RpcListener {
}
})(),
)
})
.when(
shape({ id: idType.optional(), method: string }),
({ id, method }) => ({
}
default: {
const { id, method } = z
.object({ id: idType.optional(), method: z.string() })
.passthrough()
.parse(input)
return {
jsonrpc,
id,
error: {
code: -32601,
message: `Method not found`,
message: "Method not found",
data: {
details: method,
},
},
}),
)
.defaultToLazy(() => {
console.warn(
`Couldn't parse the following input ${JSON.stringify(input)}`,
)
return {
jsonrpc,
id: (input as any)?.id,
error: {
code: -32602,
message: "invalid params",
data: {
details: JSON.stringify(input),
},
},
}
})
}
}
}
private getResult(
procedure: typeof jsonPath._TYPE,
procedure: z.infer<typeof jsonPath>,
system: System,
eventId: string,
timeout: number | null | undefined,
@@ -425,6 +433,7 @@ export class RpcListener {
return system.getActionInput(
effects,
procedures[2],
input?.prefill ?? null,
timeout || null,
)
case procedures[1] === "actions" && procedures[3] === "run":
@@ -436,26 +445,18 @@ export class RpcListener {
)
}
}
})().then(ensureResultTypeShape, (error) =>
matches(error)
.when(
object({
error: string,
code: number.defaultTo(0),
}),
(error) => ({
error: {
code: error.code,
message: error.error,
},
}),
)
.defaultToLazy(() => ({
error: {
code: 0,
message: String(error),
},
})),
)
})().then(ensureResultTypeShape, (error) => {
const errorSchema = z.object({
error: z.string(),
code: z.number().default(0),
})
const parsed = errorSchema.safeParse(error)
if (parsed.success) {
return {
error: { code: parsed.data.code, message: parsed.data.error },
}
}
return { error: { code: 0, message: String(error) } }
})
}
}

View File

@@ -2,7 +2,7 @@ import * as fs from "fs/promises"
import * as cp from "child_process"
import { SubContainer, types as T } from "@start9labs/start-sdk"
import { promisify } from "util"
import { DockerProcedure, VolumeId } from "../../../Models/DockerProcedure"
import { DockerProcedure } from "../../../Models/DockerProcedure"
import { Volume } from "./matchVolume"
import {
CommandOptions,
@@ -28,7 +28,7 @@ export class DockerProcedureContainer extends Drop {
effects: T.Effects,
packageId: string,
data: DockerProcedure,
volumes: { [id: VolumeId]: Volume },
volumes: { [id: string]: Volume },
name: string,
options: { subcontainer?: SubContainer<SDKManifest> } = {},
) {
@@ -47,7 +47,7 @@ export class DockerProcedureContainer extends Drop {
effects: T.Effects,
packageId: string,
data: DockerProcedure,
volumes: { [id: VolumeId]: Volume },
volumes: { [id: string]: Volume },
name: string,
) {
const subcontainer = await SubContainerOwned.of(
@@ -64,7 +64,7 @@ export class DockerProcedureContainer extends Drop {
? `${subcontainer.rootfs}${mounts[mount]}`
: `${subcontainer.rootfs}/${mounts[mount]}`
await fs.mkdir(path, { recursive: true })
const volumeMount = volumes[mount]
const volumeMount: Volume = volumes[mount]
if (volumeMount.type === "data") {
await subcontainer.mount(
Mounts.of().mountVolume({
@@ -82,18 +82,15 @@ export class DockerProcedureContainer extends Drop {
}),
)
} else if (volumeMount.type === "certificate") {
const hostInfo = await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
const hostnames = [
`${packageId}.embassy`,
...new Set(
Object.values(
(
await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
)?.hostnameInfo || {},
)
.flatMap((h) => h)
.flatMap((h) => (h.kind === "onion" ? [h.hostname.value] : [])),
Object.values(hostInfo?.bindings || {})
.flatMap((b) => b.addresses.available)
.map((h) => h.hostname),
).values(),
]
const certChain = await effects.getSslCertificate({
@@ -118,7 +115,7 @@ export class DockerProcedureContainer extends Drop {
subpath: volumeMount.path,
readonly: volumeMount.readonly,
volumeId: volumeMount["volume-id"],
filetype: "directory",
idmap: [],
},
})
} else if (volumeMount.type === "backup") {

View File

@@ -10,7 +10,6 @@ import { SDKManifest } from "@start9labs/start-sdk/base/lib/types"
import { SubContainerRc } from "@start9labs/start-sdk/package/lib/util/SubContainer"
const EMBASSY_HEALTH_INTERVAL = 15 * 1000
const EMBASSY_PROPERTIES_LOOP = 30 * 1000
/**
* We wanted something to represent what the main loop is doing, and
* in this case it used to run the properties, health, and the docker/ js main.
@@ -120,6 +119,7 @@ export class MainLoop {
? {
preferredExternalPort: lanConf.external,
alpn: { specified: ["http/1.1"] },
addXForwardedHeaders: false,
}
: null,
})
@@ -133,7 +133,7 @@ export class MainLoop {
delete this.mainEvent
delete this.healthLoops
await main?.daemon
.stop()
.term()
.catch((e: unknown) => console.error(`Main loop error`, utils.asError(e)))
this.effects.setMainStatus({ status: "stopped" })
if (healthLoops) healthLoops.forEach((x) => clearInterval(x.interval))

View File

@@ -15,26 +15,11 @@ import { System } from "../../../Interfaces/System"
import { matchManifest, Manifest } from "./matchManifest"
import * as childProcess from "node:child_process"
import { DockerProcedureContainer } from "./DockerProcedureContainer"
import { DockerProcedure } from "../../../Models/DockerProcedure"
import { promisify } from "node:util"
import * as U from "./oldEmbassyTypes"
import { MainLoop } from "./MainLoop"
import {
matches,
boolean,
dictionary,
literal,
literals,
object,
string,
unknown,
any,
tuple,
number,
anyOf,
deferred,
Parser,
array,
} from "ts-matches"
import { z } from "@start9labs/start-sdk"
import { AddSslOptions } from "@start9labs/start-sdk/base/lib/osBindings"
import {
BindOptionsByProtocol,
@@ -50,40 +35,48 @@ import {
transformOldConfigToNew,
} from "./transformConfigSpec"
import { partialDiff } from "@start9labs/start-sdk/base/lib/util"
import { Volume } from "@start9labs/start-sdk/package/lib/util/Volume"
type Optional<A> = A | undefined | null
function todo(): never {
throw new Error("Not implemented")
}
/**
* Local type for procedure values from the manifest.
* The manifest's zod schemas use ZodTypeAny casts that produce `unknown` in zod v4.
* This type restores the expected shape for type-safe property access.
*/
type Procedure =
| (DockerProcedure & { type: "docker" })
| { type: "script"; args: unknown[] | null }
const MANIFEST_LOCATION = "/usr/lib/startos/package/embassyManifest.json"
export const EMBASSY_JS_LOCATION = "/usr/lib/startos/package/embassy.js"
const configFile = FileHelper.json(
{
volumeId: "embassy",
base: new Volume("embassy"),
subpath: "config.json",
},
matches.any,
z.any(),
)
const dependsOnFile = FileHelper.json(
{
volumeId: "embassy",
base: new Volume("embassy"),
subpath: "dependsOn.json",
},
dictionary([string, array(string)]),
z.record(z.string(), z.array(z.string())),
)
const matchResult = object({
result: any,
const matchResult = z.object({
result: z.any(),
})
const matchError = object({
error: string,
const matchError = z.object({
error: z.string(),
})
const matchErrorCode = object<{
"error-code": [number, string] | readonly [number, string]
}>({
"error-code": tuple(number, string),
const matchErrorCode = z.object({
"error-code": z.tuple([z.number(), z.string()]),
})
const assertNever = (
@@ -95,29 +88,34 @@ const assertNever = (
/**
Should be changing the type for specific properties, and this is mostly a transformation for the old return types to the newer one.
*/
function isMatchResult(a: unknown): a is z.infer<typeof matchResult> {
return matchResult.safeParse(a).success
}
function isMatchError(a: unknown): a is z.infer<typeof matchError> {
return matchError.safeParse(a).success
}
function isMatchErrorCode(a: unknown): a is z.infer<typeof matchErrorCode> {
return matchErrorCode.safeParse(a).success
}
const fromReturnType = <A>(a: U.ResultType<A>): A => {
if (matchResult.test(a)) {
if (isMatchResult(a)) {
return a.result
}
if (matchError.test(a)) {
if (isMatchError(a)) {
console.info({ passedErrorStack: new Error().stack, error: a.error })
throw { error: a.error }
}
if (matchErrorCode.test(a)) {
if (isMatchErrorCode(a)) {
const [code, message] = a["error-code"]
throw { error: message, code }
}
return assertNever(a)
return assertNever(a as never)
}
const matchSetResult = object({
"depends-on": dictionary([string, array(string)])
.nullable()
.optional(),
dependsOn: dictionary([string, array(string)])
.nullable()
.optional(),
signal: literals(
const matchSetResult = z.object({
"depends-on": z.record(z.string(), z.array(z.string())).nullable().optional(),
dependsOn: z.record(z.string(), z.array(z.string())).nullable().optional(),
signal: z.enum([
"SIGTERM",
"SIGHUP",
"SIGINT",
@@ -150,7 +148,7 @@ const matchSetResult = object({
"SIGPWR",
"SIGSYS",
"SIGINFO",
),
]),
})
type OldGetConfigRes = {
@@ -232,33 +230,29 @@ const asProperty = (x: PackagePropertiesV2): PropertiesReturn =>
Object.fromEntries(
Object.entries(x).map(([key, value]) => [key, asProperty_(value)]),
)
const [matchPackageProperties, setMatchPackageProperties] =
deferred<PackagePropertiesV2>()
const matchPackagePropertyObject: Parser<unknown, PackagePropertyObject> =
object({
value: matchPackageProperties,
type: literal("object"),
description: string,
})
const matchPackagePropertyObject: z.ZodType<PackagePropertyObject> = z.object({
value: z.lazy(() => matchPackageProperties),
type: z.literal("object"),
description: z.string(),
})
const matchPackagePropertyString: Parser<unknown, PackagePropertyString> =
object({
type: literal("string"),
description: string.nullable().optional(),
value: string,
copyable: boolean.nullable().optional(),
qr: boolean.nullable().optional(),
masked: boolean.nullable().optional(),
})
setMatchPackageProperties(
dictionary([
string,
anyOf(matchPackagePropertyObject, matchPackagePropertyString),
]),
const matchPackagePropertyString: z.ZodType<PackagePropertyString> = z.object({
type: z.literal("string"),
description: z.string().nullable().optional(),
value: z.string(),
copyable: z.boolean().nullable().optional(),
qr: z.boolean().nullable().optional(),
masked: z.boolean().nullable().optional(),
})
const matchPackageProperties: z.ZodType<PackagePropertiesV2> = z.lazy(() =>
z.record(
z.string(),
z.union([matchPackagePropertyObject, matchPackagePropertyString]),
),
)
const matchProperties = object({
version: literal(2),
const matchProperties = z.object({
version: z.literal(2),
data: matchPackageProperties,
})
@@ -287,7 +281,6 @@ function convertProperties(
}
}
const DEFAULT_REGISTRY = "https://registry.start9.com"
export class SystemForEmbassy implements System {
private version: ExtendedVersion
currentRunning: MainLoop | undefined
@@ -303,7 +296,7 @@ export class SystemForEmbassy implements System {
})
const manifestData = await fs.readFile(manifestLocation, "utf-8")
return new SystemForEmbassy(
matchManifest.unsafeCast(JSON.parse(manifestData)),
matchManifest.parse(JSON.parse(manifestData)),
moduleCode,
)
}
@@ -331,6 +324,10 @@ export class SystemForEmbassy implements System {
) {
this.version.upstream.prerelease = ["alpha"]
}
if (this.manifest.id === "nostr") {
this.manifest.id = "nostr-rs-relay"
}
}
async init(
@@ -385,7 +382,9 @@ export class SystemForEmbassy implements System {
delete this.currentRunning
if (currentRunning) {
await currentRunning.clean({
timeout: fromDuration(this.manifest.main["sigterm-timeout"] || "30s"),
timeout: fromDuration(
(this.manifest.main["sigterm-timeout"] as any) || "30s",
),
})
}
}
@@ -456,6 +455,7 @@ export class SystemForEmbassy implements System {
addSsl = {
preferredExternalPort: lanPortNum,
alpn: { specified: [] },
addXForwardedHeaders: false,
}
}
return [
@@ -505,6 +505,7 @@ export class SystemForEmbassy implements System {
async getActionInput(
effects: Effects,
actionId: string,
_prefill: Record<string, unknown> | null,
timeoutMs: number | null,
): Promise<T.ActionInput | null> {
if (actionId === "config") {
@@ -617,7 +618,7 @@ export class SystemForEmbassy implements System {
effects: Effects,
timeoutMs: number | null,
): Promise<void> {
const backup = this.manifest.backup.create
const backup = this.manifest.backup.create as Procedure
if (backup.type === "docker") {
const commands = [backup.entrypoint, ...backup.args]
const container = await DockerProcedureContainer.of(
@@ -650,7 +651,7 @@ export class SystemForEmbassy implements System {
encoding: "utf-8",
})
.catch((_) => null)
const restoreBackup = this.manifest.backup.restore
const restoreBackup = this.manifest.backup.restore as Procedure
if (restoreBackup.type === "docker") {
const commands = [restoreBackup.entrypoint, ...restoreBackup.args]
const container = await DockerProcedureContainer.of(
@@ -683,7 +684,7 @@ export class SystemForEmbassy implements System {
effects: Effects,
timeoutMs: number | null,
): Promise<OldGetConfigRes> {
const config = this.manifest.config?.get
const config = this.manifest.config?.get as Procedure | undefined
if (!config) return { spec: {} }
if (config.type === "docker") {
const commands = [config.entrypoint, ...config.args]
@@ -725,7 +726,7 @@ export class SystemForEmbassy implements System {
)
await updateConfig(effects, this.manifest, spec, newConfig)
await configFile.write(effects, newConfig)
const setConfigValue = this.manifest.config?.set
const setConfigValue = this.manifest.config?.set as Procedure | undefined
if (!setConfigValue) return
if (setConfigValue.type === "docker") {
const commands = [
@@ -740,7 +741,7 @@ export class SystemForEmbassy implements System {
this.manifest.volumes,
`Set Config - ${commands.join(" ")}`,
)
const answer = matchSetResult.unsafeCast(
const answer = matchSetResult.parse(
JSON.parse(
(await container.execFail(commands, timeoutMs)).stdout.toString(),
),
@@ -753,7 +754,7 @@ export class SystemForEmbassy implements System {
const method = moduleCode.setConfig
if (!method) throw new Error("Expecting that the method setConfig exists")
const answer = matchSetResult.unsafeCast(
const answer = matchSetResult.parse(
await method(
polyfillEffects(effects, this.manifest),
newConfig as U.Config,
@@ -782,7 +783,11 @@ export class SystemForEmbassy implements System {
const requiredDeps = {
...Object.fromEntries(
Object.entries(this.manifest.dependencies ?? {})
.filter(([k, v]) => v?.requirement.type === "required")
.filter(
([k, v]) =>
(v?.requirement as { type: string } | undefined)?.type ===
"required",
)
.map((x) => [x[0], []]) || [],
),
}
@@ -850,7 +855,7 @@ export class SystemForEmbassy implements System {
}
if (migration) {
const [_, procedure] = migration
const [_, procedure] = migration as readonly [unknown, Procedure]
if (procedure.type === "docker") {
const commands = [procedure.entrypoint, ...procedure.args]
const container = await DockerProcedureContainer.of(
@@ -888,8 +893,10 @@ export class SystemForEmbassy implements System {
effects: Effects,
timeoutMs: number | null,
): Promise<PropertiesReturn> {
// TODO BLU-J set the properties ever so often
const setConfigValue = this.manifest.properties
const setConfigValue = this.manifest.properties as
| Procedure
| null
| undefined
if (!setConfigValue) throw new Error("There is no properties")
if (setConfigValue.type === "docker") {
const commands = [setConfigValue.entrypoint, ...setConfigValue.args]
@@ -900,7 +907,7 @@ export class SystemForEmbassy implements System {
this.manifest.volumes,
`Properties - ${commands.join(" ")}`,
)
const properties = matchProperties.unsafeCast(
const properties = matchProperties.parse(
JSON.parse(
(await container.execFail(commands, timeoutMs)).stdout.toString(),
),
@@ -911,7 +918,7 @@ export class SystemForEmbassy implements System {
const method = moduleCode.properties
if (!method)
throw new Error("Expecting that the method properties exists")
const properties = matchProperties.unsafeCast(
const properties = matchProperties.parse(
await method(polyfillEffects(effects, this.manifest)).then(
fromReturnType,
),
@@ -926,7 +933,8 @@ export class SystemForEmbassy implements System {
formData: unknown,
timeoutMs: number | null,
): Promise<T.ActionResult> {
const actionProcedure = this.manifest.actions?.[actionId]?.implementation
const actionProcedure = this.manifest.actions?.[actionId]
?.implementation as Procedure | undefined
const toActionResult = ({
message,
value,
@@ -993,7 +1001,9 @@ export class SystemForEmbassy implements System {
oldConfig: unknown,
timeoutMs: number | null,
): Promise<object> {
const actionProcedure = this.manifest.dependencies?.[id]?.config?.check
const actionProcedure = this.manifest.dependencies?.[id]?.config?.check as
| Procedure
| undefined
if (!actionProcedure) return { message: "Action not found", value: null }
if (actionProcedure.type === "docker") {
const commands = [
@@ -1043,7 +1053,7 @@ export class SystemForEmbassy implements System {
volumeId: "embassy",
subpath: null,
readonly: true,
filetype: "directory",
idmap: [],
},
})
configFile
@@ -1085,40 +1095,50 @@ export class SystemForEmbassy implements System {
}
}
const matchPointer = object({
type: literal("pointer"),
const matchPointer = z.object({
type: z.literal("pointer"),
})
const matchPointerPackage = object({
subtype: literal("package"),
target: literals("tor-key", "tor-address", "lan-address"),
"package-id": string,
interface: string,
const matchPointerPackage = z.object({
subtype: z.literal("package"),
target: z.enum(["tor-key", "tor-address", "lan-address"]),
"package-id": z.string(),
interface: z.string(),
})
const matchPointerConfig = object({
subtype: literal("package"),
target: literals("config"),
"package-id": string,
selector: string,
multi: boolean,
const matchPointerConfig = z.object({
subtype: z.literal("package"),
target: z.enum(["config"]),
"package-id": z.string(),
selector: z.string(),
multi: z.boolean(),
})
const matchSpec = object({
spec: object,
const matchSpec = z.object({
spec: z.record(z.string(), z.unknown()),
})
const matchVariants = object({ variants: dictionary([string, unknown]) })
const matchVariants = z.object({ variants: z.record(z.string(), z.unknown()) })
function isMatchPointer(v: unknown): v is z.infer<typeof matchPointer> {
return matchPointer.safeParse(v).success
}
function isMatchSpec(v: unknown): v is z.infer<typeof matchSpec> {
return matchSpec.safeParse(v).success
}
function isMatchVariants(v: unknown): v is z.infer<typeof matchVariants> {
return matchVariants.safeParse(v).success
}
function cleanSpecOfPointers<T>(mutSpec: T): T {
if (!object.test(mutSpec)) return mutSpec
if (typeof mutSpec !== "object" || mutSpec === null) return mutSpec
for (const key in mutSpec) {
const value = mutSpec[key]
if (matchSpec.test(value)) value.spec = cleanSpecOfPointers(value.spec)
if (matchVariants.test(value))
if (isMatchSpec(value))
value.spec = cleanSpecOfPointers(value.spec) as Record<string, unknown>
if (isMatchVariants(value))
value.variants = Object.fromEntries(
Object.entries(value.variants).map(([key, value]) => [
key,
cleanSpecOfPointers(value),
]),
)
if (!matchPointer.test(value)) continue
if (!isMatchPointer(value)) continue
delete mutSpec[key]
// // if (value.target === )
}
@@ -1191,7 +1211,7 @@ async function updateConfig(
volumeId: "embassy",
subpath: null,
readonly: true,
filetype: "directory",
idmap: [],
},
})
const remoteConfig = configFile
@@ -1240,12 +1260,8 @@ async function updateConfig(
? ""
: catchFn(
() =>
(specValue.target === "lan-address"
? filled.addressInfo!.localHostnames[0] ||
filled.addressInfo!.onionHostnames[0]
: filled.addressInfo!.onionHostnames[0] ||
filled.addressInfo!.localHostnames[0]
).hostname.value,
filled.addressInfo!.filter({ kind: "mdns" })!.hostnames[0]
.hostname,
) || ""
mutConfigValue[key] = url
}
@@ -1268,7 +1284,7 @@ function extractServiceInterfaceId(manifest: Manifest, specInterface: string) {
}
async function convertToNewConfig(value: OldGetConfigRes) {
try {
const valueSpec: OldConfigSpec = matchOldConfigSpec.unsafeCast(value.spec)
const valueSpec: OldConfigSpec = matchOldConfigSpec.parse(value.spec)
const spec = transformConfigSpec(valueSpec)
if (!value.config) return { spec, config: null }
const config = transformOldConfigToNew(valueSpec, value.config) ?? null

View File

@@ -4,9 +4,9 @@ import synapseManifest from "./__fixtures__/synapseManifest"
describe("matchManifest", () => {
test("gittea", () => {
matchManifest.unsafeCast(giteaManifest)
matchManifest.parse(giteaManifest)
})
test("synapse", () => {
matchManifest.unsafeCast(synapseManifest)
matchManifest.parse(synapseManifest)
})
})

View File

@@ -1,126 +1,121 @@
import {
object,
literal,
string,
array,
boolean,
dictionary,
literals,
number,
unknown,
some,
every,
} from "ts-matches"
import { z } from "@start9labs/start-sdk"
import { matchVolume } from "./matchVolume"
import { matchDockerProcedure } from "../../../Models/DockerProcedure"
const matchJsProcedure = object({
type: literal("script"),
args: array(unknown).nullable().optional().defaultTo([]),
const matchJsProcedure = z.object({
type: z.literal("script"),
args: z.array(z.unknown()).nullable().optional().default([]),
})
const matchProcedure = some(matchDockerProcedure, matchJsProcedure)
export type Procedure = typeof matchProcedure._TYPE
const matchProcedure = z.union([matchDockerProcedure, matchJsProcedure])
export type Procedure = z.infer<typeof matchProcedure>
const matchAction = object({
name: string,
description: string,
warning: string.nullable().optional(),
const matchAction = z.object({
name: z.string(),
description: z.string(),
warning: z.string().nullable().optional(),
implementation: matchProcedure,
"allowed-statuses": array(literals("running", "stopped")),
"input-spec": unknown.nullable().optional(),
"allowed-statuses": z.array(z.enum(["running", "stopped"])),
"input-spec": z.unknown().nullable().optional(),
})
export const matchManifest = object({
id: string,
title: string,
version: string,
export const matchManifest = z.object({
id: z.string(),
title: z.string(),
version: z.string(),
main: matchDockerProcedure,
assets: object({
assets: string.nullable().optional(),
scripts: string.nullable().optional(),
})
assets: z
.object({
assets: z.string().nullable().optional(),
scripts: z.string().nullable().optional(),
})
.nullable()
.optional(),
"health-checks": dictionary([
string,
every(
"health-checks": z.record(
z.string(),
z.intersection(
matchProcedure,
object({
name: string,
["success-message"]: string.nullable().optional(),
z.object({
name: z.string(),
"success-message": z.string().nullable().optional(),
}),
),
]),
config: object({
get: matchProcedure,
set: matchProcedure,
})
),
config: z
.object({
get: matchProcedure,
set: matchProcedure,
})
.nullable()
.optional(),
properties: matchProcedure.nullable().optional(),
volumes: dictionary([string, matchVolume]),
interfaces: dictionary([
string,
object({
name: string,
description: string,
"tor-config": object({
"port-mapping": dictionary([string, string]),
})
volumes: z.record(z.string(), matchVolume),
interfaces: z.record(
z.string(),
z.object({
name: z.string(),
description: z.string(),
"tor-config": z
.object({
"port-mapping": z.record(z.string(), z.string()),
})
.nullable()
.optional(),
"lan-config": dictionary([
string,
object({
ssl: boolean,
internal: number,
}),
])
"lan-config": z
.record(
z.string(),
z.object({
ssl: z.boolean(),
internal: z.number(),
}),
)
.nullable()
.optional(),
ui: boolean,
protocols: array(string),
ui: z.boolean(),
protocols: z.array(z.string()),
}),
]),
backup: object({
),
backup: z.object({
create: matchProcedure,
restore: matchProcedure,
}),
migrations: object({
to: dictionary([string, matchProcedure]),
from: dictionary([string, matchProcedure]),
})
migrations: z
.object({
to: z.record(z.string(), matchProcedure),
from: z.record(z.string(), matchProcedure),
})
.nullable()
.optional(),
dependencies: dictionary([
string,
object({
version: string,
requirement: some(
object({
type: literal("opt-in"),
how: string,
}),
object({
type: literal("opt-out"),
how: string,
}),
object({
type: literal("required"),
}),
),
description: string.nullable().optional(),
config: object({
check: matchProcedure,
"auto-configure": matchProcedure,
dependencies: z.record(
z.string(),
z
.object({
version: z.string(),
requirement: z.union([
z.object({
type: z.literal("opt-in"),
how: z.string(),
}),
z.object({
type: z.literal("opt-out"),
how: z.string(),
}),
z.object({
type: z.literal("required"),
}),
]),
description: z.string().nullable().optional(),
config: z
.object({
check: matchProcedure,
"auto-configure": matchProcedure,
})
.nullable()
.optional(),
})
.nullable()
.optional(),
})
.nullable()
.optional(),
]),
),
actions: dictionary([string, matchAction]),
actions: z.record(z.string(), matchAction),
})
export type Manifest = typeof matchManifest._TYPE
export type Manifest = z.infer<typeof matchManifest>

View File

@@ -1,32 +1,32 @@
import { object, literal, string, boolean, some } from "ts-matches"
import { z } from "@start9labs/start-sdk"
const matchDataVolume = object({
type: literal("data"),
readonly: boolean.optional(),
const matchDataVolume = z.object({
type: z.literal("data"),
readonly: z.boolean().optional(),
})
const matchAssetVolume = object({
type: literal("assets"),
const matchAssetVolume = z.object({
type: z.literal("assets"),
})
const matchPointerVolume = object({
type: literal("pointer"),
"package-id": string,
"volume-id": string,
path: string,
readonly: boolean,
const matchPointerVolume = z.object({
type: z.literal("pointer"),
"package-id": z.string(),
"volume-id": z.string(),
path: z.string(),
readonly: z.boolean(),
})
const matchCertificateVolume = object({
type: literal("certificate"),
"interface-id": string,
const matchCertificateVolume = z.object({
type: z.literal("certificate"),
"interface-id": z.string(),
})
const matchBackupVolume = object({
type: literal("backup"),
readonly: boolean,
const matchBackupVolume = z.object({
type: z.literal("backup"),
readonly: z.boolean(),
})
export const matchVolume = some(
export const matchVolume = z.union([
matchDataVolume,
matchAssetVolume,
matchPointerVolume,
matchCertificateVolume,
matchBackupVolume,
)
export type Volume = typeof matchVolume._TYPE
])
export type Volume = z.infer<typeof matchVolume>

View File

@@ -12,43 +12,43 @@ import nostrConfig2 from "./__fixtures__/nostrConfig2"
describe("transformConfigSpec", () => {
test("matchOldConfigSpec(embassyPages.homepage.variants[web-page])", () => {
matchOldConfigSpec.unsafeCast(
matchOldConfigSpec.parse(
fixtureEmbassyPagesConfig.homepage.variants["web-page"],
)
})
test("matchOldConfigSpec(embassyPages)", () => {
matchOldConfigSpec.unsafeCast(fixtureEmbassyPagesConfig)
matchOldConfigSpec.parse(fixtureEmbassyPagesConfig)
})
test("transformConfigSpec(embassyPages)", () => {
const spec = matchOldConfigSpec.unsafeCast(fixtureEmbassyPagesConfig)
const spec = matchOldConfigSpec.parse(fixtureEmbassyPagesConfig)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("matchOldConfigSpec(RTL.nodes)", () => {
matchOldValueSpecList.unsafeCast(fixtureRTLConfig.nodes)
matchOldValueSpecList.parse(fixtureRTLConfig.nodes)
})
test("matchOldConfigSpec(RTL)", () => {
matchOldConfigSpec.unsafeCast(fixtureRTLConfig)
matchOldConfigSpec.parse(fixtureRTLConfig)
})
test("transformConfigSpec(RTL)", () => {
const spec = matchOldConfigSpec.unsafeCast(fixtureRTLConfig)
const spec = matchOldConfigSpec.parse(fixtureRTLConfig)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(searNXG)", () => {
const spec = matchOldConfigSpec.unsafeCast(searNXG)
const spec = matchOldConfigSpec.parse(searNXG)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(bitcoind)", () => {
const spec = matchOldConfigSpec.unsafeCast(bitcoind)
const spec = matchOldConfigSpec.parse(bitcoind)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(nostr)", () => {
const spec = matchOldConfigSpec.unsafeCast(nostr)
const spec = matchOldConfigSpec.parse(nostr)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(nostr2)", () => {
const spec = matchOldConfigSpec.unsafeCast(nostrConfig2)
const spec = matchOldConfigSpec.parse(nostrConfig2)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
})

View File

@@ -1,19 +1,4 @@
import { IST } from "@start9labs/start-sdk"
import {
dictionary,
object,
anyOf,
string,
literals,
array,
number,
boolean,
Parser,
deferred,
every,
nill,
literal,
} from "ts-matches"
import { IST, z } from "@start9labs/start-sdk"
export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
return Object.entries(oldSpec).reduce((inputSpec, [key, oldVal]) => {
@@ -82,7 +67,7 @@ export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
name: oldVal.name,
description: oldVal.description || null,
warning: oldVal.warning || null,
spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(oldVal.spec)),
spec: transformConfigSpec(matchOldConfigSpec.parse(oldVal.spec)),
}
} else if (oldVal.type === "string") {
newVal = {
@@ -121,7 +106,7 @@ export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
...obj,
[id]: {
name: oldVal.tag["variant-names"][id] || id,
spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(spec)),
spec: transformConfigSpec(matchOldConfigSpec.parse(spec)),
},
}),
{} as Record<string, { name: string; spec: IST.InputSpec }>,
@@ -153,7 +138,7 @@ export function transformOldConfigToNew(
if (isObject(val)) {
newVal = transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.spec),
matchOldConfigSpec.parse(val.spec),
config[key],
)
}
@@ -172,7 +157,7 @@ export function transformOldConfigToNew(
newVal = {
selection,
value: transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.variants[selection]),
matchOldConfigSpec.parse(val.variants[selection]),
config[key],
),
}
@@ -183,10 +168,7 @@ export function transformOldConfigToNew(
if (isObjectList(val)) {
newVal = (config[key] as object[]).map((obj) =>
transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.spec.spec),
obj,
),
transformOldConfigToNew(matchOldConfigSpec.parse(val.spec.spec), obj),
)
} else if (isUnionList(val)) return obj
}
@@ -212,7 +194,7 @@ export function transformNewConfigToOld(
if (isObject(val)) {
newVal = transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.spec),
matchOldConfigSpec.parse(val.spec),
config[key],
)
}
@@ -221,7 +203,7 @@ export function transformNewConfigToOld(
newVal = {
[val.tag.id]: config[key].selection,
...transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.variants[config[key].selection]),
matchOldConfigSpec.parse(val.variants[config[key].selection]),
config[key].value,
),
}
@@ -230,10 +212,7 @@ export function transformNewConfigToOld(
if (isList(val)) {
if (isObjectList(val)) {
newVal = (config[key] as object[]).map((obj) =>
transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.spec.spec),
obj,
),
transformNewConfigToOld(matchOldConfigSpec.parse(val.spec.spec), obj),
)
} else if (isUnionList(val)) return obj
}
@@ -337,9 +316,7 @@ function getListSpec(
default: oldVal.default as Record<string, unknown>[],
spec: {
type: "object",
spec: transformConfigSpec(
matchOldConfigSpec.unsafeCast(oldVal.spec.spec),
),
spec: transformConfigSpec(matchOldConfigSpec.parse(oldVal.spec.spec)),
uniqueBy: oldVal.spec["unique-by"] || null,
displayAs: oldVal.spec["display-as"] || null,
},
@@ -393,211 +370,281 @@ function isUnionList(
}
export type OldConfigSpec = Record<string, OldValueSpec>
const [_matchOldConfigSpec, setMatchOldConfigSpec] = deferred<unknown>()
export const matchOldConfigSpec = _matchOldConfigSpec as Parser<
unknown,
OldConfigSpec
>
export const matchOldDefaultString = anyOf(
string,
object({ charset: string, len: number }),
export const matchOldConfigSpec: z.ZodType<OldConfigSpec> = z.lazy(() =>
z.record(z.string(), matchOldValueSpec),
)
type OldDefaultString = typeof matchOldDefaultString._TYPE
export const matchOldDefaultString = z.union([
z.string(),
z.object({ charset: z.string(), len: z.number() }),
])
type OldDefaultString = z.infer<typeof matchOldDefaultString>
export const matchOldValueSpecString = object({
type: literals("string"),
name: string,
masked: boolean.nullable().optional(),
copyable: boolean.nullable().optional(),
nullable: boolean.nullable().optional(),
placeholder: string.nullable().optional(),
pattern: string.nullable().optional(),
"pattern-description": string.nullable().optional(),
export const matchOldValueSpecString = z.object({
type: z.enum(["string"]),
name: z.string(),
masked: z.boolean().nullable().optional(),
copyable: z.boolean().nullable().optional(),
nullable: z.boolean().nullable().optional(),
placeholder: z.string().nullable().optional(),
pattern: z.string().nullable().optional(),
"pattern-description": z.string().nullable().optional(),
default: matchOldDefaultString.nullable().optional(),
textarea: boolean.nullable().optional(),
description: string.nullable().optional(),
warning: string.nullable().optional(),
textarea: z.boolean().nullable().optional(),
description: z.string().nullable().optional(),
warning: z.string().nullable().optional(),
})
export const matchOldValueSpecNumber = object({
type: literals("number"),
nullable: boolean,
name: string,
range: string,
integral: boolean,
default: number.nullable().optional(),
description: string.nullable().optional(),
warning: string.nullable().optional(),
units: string.nullable().optional(),
placeholder: anyOf(number, string).nullable().optional(),
export const matchOldValueSpecNumber = z.object({
type: z.enum(["number"]),
nullable: z.boolean(),
name: z.string(),
range: z.string(),
integral: z.boolean(),
default: z.number().nullable().optional(),
description: z.string().nullable().optional(),
warning: z.string().nullable().optional(),
units: z.string().nullable().optional(),
placeholder: z.union([z.number(), z.string()]).nullable().optional(),
})
type OldValueSpecNumber = typeof matchOldValueSpecNumber._TYPE
type OldValueSpecNumber = z.infer<typeof matchOldValueSpecNumber>
export const matchOldValueSpecBoolean = object({
type: literals("boolean"),
default: boolean,
name: string,
description: string.nullable().optional(),
warning: string.nullable().optional(),
export const matchOldValueSpecBoolean = z.object({
type: z.enum(["boolean"]),
default: z.boolean(),
name: z.string(),
description: z.string().nullable().optional(),
warning: z.string().nullable().optional(),
})
type OldValueSpecBoolean = typeof matchOldValueSpecBoolean._TYPE
type OldValueSpecBoolean = z.infer<typeof matchOldValueSpecBoolean>
const matchOldValueSpecObject = object({
type: literals("object"),
spec: _matchOldConfigSpec,
name: string,
description: string.nullable().optional(),
warning: string.nullable().optional(),
type OldValueSpecObject = {
type: "object"
spec: OldConfigSpec
name: string
description?: string | null
warning?: string | null
}
const matchOldValueSpecObject: z.ZodType<OldValueSpecObject> = z.object({
type: z.enum(["object"]),
spec: z.lazy(() => matchOldConfigSpec),
name: z.string(),
description: z.string().nullable().optional(),
warning: z.string().nullable().optional(),
})
type OldValueSpecObject = typeof matchOldValueSpecObject._TYPE
const matchOldValueSpecEnum = object({
values: array(string),
"value-names": dictionary([string, string]),
type: literals("enum"),
default: string,
name: string,
description: string.nullable().optional(),
warning: string.nullable().optional(),
const matchOldValueSpecEnum = z.object({
values: z.array(z.string()),
"value-names": z.record(z.string(), z.string()),
type: z.enum(["enum"]),
default: z.string(),
name: z.string(),
description: z.string().nullable().optional(),
warning: z.string().nullable().optional(),
})
type OldValueSpecEnum = typeof matchOldValueSpecEnum._TYPE
type OldValueSpecEnum = z.infer<typeof matchOldValueSpecEnum>
const matchOldUnionTagSpec = object({
id: string, // The name of the field containing one of the union variants
"variant-names": dictionary([string, string]), // The name of each variant
name: string,
description: string.nullable().optional(),
warning: string.nullable().optional(),
const matchOldUnionTagSpec = z.object({
id: z.string(), // The name of the field containing one of the union variants
"variant-names": z.record(z.string(), z.string()), // The name of each variant
name: z.string(),
description: z.string().nullable().optional(),
warning: z.string().nullable().optional(),
})
const matchOldValueSpecUnion = object({
type: literals("union"),
type OldValueSpecUnion = {
type: "union"
tag: z.infer<typeof matchOldUnionTagSpec>
variants: Record<string, OldConfigSpec>
default: string
}
const matchOldValueSpecUnion: z.ZodType<OldValueSpecUnion> = z.object({
type: z.enum(["union"]),
tag: matchOldUnionTagSpec,
variants: dictionary([string, _matchOldConfigSpec]),
default: string,
variants: z.record(
z.string(),
z.lazy(() => matchOldConfigSpec),
),
default: z.string(),
})
type OldValueSpecUnion = typeof matchOldValueSpecUnion._TYPE
const [matchOldUniqueBy, setOldUniqueBy] = deferred<OldUniqueBy>()
type OldUniqueBy =
| null
| string
| { any: OldUniqueBy[] }
| { all: OldUniqueBy[] }
setOldUniqueBy(
anyOf(
nill,
string,
object({ any: array(matchOldUniqueBy) }),
object({ all: array(matchOldUniqueBy) }),
),
const matchOldUniqueBy: z.ZodType<OldUniqueBy> = z.lazy(() =>
z.union([
z.null(),
z.string(),
z.object({ any: z.array(matchOldUniqueBy) }),
z.object({ all: z.array(matchOldUniqueBy) }),
]),
)
const matchOldListValueSpecObject = object({
spec: _matchOldConfigSpec, // this is a mapped type of the config object at this level, replacing the object's values with specs on those values
"unique-by": matchOldUniqueBy.nullable().optional(), // indicates whether duplicates can be permitted in the list
"display-as": string.nullable().optional(), // this should be a handlebars template which can make use of the entire config which corresponds to 'spec'
})
const matchOldListValueSpecUnion = object({
type OldListValueSpecObject = {
spec: OldConfigSpec
"unique-by"?: OldUniqueBy | null
"display-as"?: string | null
}
const matchOldListValueSpecObject: z.ZodType<OldListValueSpecObject> = z.object(
{
spec: z.lazy(() => matchOldConfigSpec), // this is a mapped type of the config object at this level, replacing the object's values with specs on those values
"unique-by": matchOldUniqueBy.nullable().optional(), // indicates whether duplicates can be permitted in the list
"display-as": z.string().nullable().optional(), // this should be a handlebars template which can make use of the entire config which corresponds to 'spec'
},
)
type OldListValueSpecUnion = {
"unique-by"?: OldUniqueBy | null
"display-as"?: string | null
tag: z.infer<typeof matchOldUnionTagSpec>
variants: Record<string, OldConfigSpec>
}
const matchOldListValueSpecUnion: z.ZodType<OldListValueSpecUnion> = z.object({
"unique-by": matchOldUniqueBy.nullable().optional(),
"display-as": string.nullable().optional(),
"display-as": z.string().nullable().optional(),
tag: matchOldUnionTagSpec,
variants: dictionary([string, _matchOldConfigSpec]),
variants: z.record(
z.string(),
z.lazy(() => matchOldConfigSpec),
),
})
const matchOldListValueSpecString = object({
masked: boolean.nullable().optional(),
copyable: boolean.nullable().optional(),
pattern: string.nullable().optional(),
"pattern-description": string.nullable().optional(),
placeholder: string.nullable().optional(),
const matchOldListValueSpecString = z.object({
masked: z.boolean().nullable().optional(),
copyable: z.boolean().nullable().optional(),
pattern: z.string().nullable().optional(),
"pattern-description": z.string().nullable().optional(),
placeholder: z.string().nullable().optional(),
})
const matchOldListValueSpecEnum = object({
values: array(string),
"value-names": dictionary([string, string]),
const matchOldListValueSpecEnum = z.object({
values: z.array(z.string()),
"value-names": z.record(z.string(), z.string()),
})
const matchOldListValueSpecNumber = object({
range: string,
integral: boolean,
units: string.nullable().optional(),
placeholder: anyOf(number, string).nullable().optional(),
const matchOldListValueSpecNumber = z.object({
range: z.string(),
integral: z.boolean(),
units: z.string().nullable().optional(),
placeholder: z.union([z.number(), z.string()]).nullable().optional(),
})
type OldValueSpecListBase = {
type: "list"
range: string
default: string[] | number[] | OldDefaultString[] | Record<string, unknown>[]
name: string
description?: string | null
warning?: string | null
}
type OldValueSpecList = OldValueSpecListBase &
(
| { subtype: "string"; spec: z.infer<typeof matchOldListValueSpecString> }
| { subtype: "enum"; spec: z.infer<typeof matchOldListValueSpecEnum> }
| { subtype: "object"; spec: OldListValueSpecObject }
| { subtype: "number"; spec: z.infer<typeof matchOldListValueSpecNumber> }
| { subtype: "union"; spec: OldListValueSpecUnion }
)
// represents a spec for a list
export const matchOldValueSpecList = every(
object({
type: literals("list"),
range: string, // '[0,1]' (inclusive) OR '[0,*)' (right unbounded), normal math rules
default: anyOf(
array(string),
array(number),
array(matchOldDefaultString),
array(object),
),
name: string,
description: string.nullable().optional(),
warning: string.nullable().optional(),
}),
anyOf(
object({
subtype: literals("string"),
spec: matchOldListValueSpecString,
export const matchOldValueSpecList: z.ZodType<OldValueSpecList> =
z.intersection(
z.object({
type: z.enum(["list"]),
range: z.string(), // '[0,1]' (inclusive) OR '[0,*)' (right unbounded), normal math rules
default: z.union([
z.array(z.string()),
z.array(z.number()),
z.array(matchOldDefaultString),
z.array(z.object({}).passthrough()),
]),
name: z.string(),
description: z.string().nullable().optional(),
warning: z.string().nullable().optional(),
}),
object({
subtype: literals("enum"),
spec: matchOldListValueSpecEnum,
}),
object({
subtype: literals("object"),
spec: matchOldListValueSpecObject,
}),
object({
subtype: literals("number"),
spec: matchOldListValueSpecNumber,
}),
object({
subtype: literals("union"),
spec: matchOldListValueSpecUnion,
}),
),
)
type OldValueSpecList = typeof matchOldValueSpecList._TYPE
z.union([
z.object({
subtype: z.enum(["string"]),
spec: matchOldListValueSpecString,
}),
z.object({
subtype: z.enum(["enum"]),
spec: matchOldListValueSpecEnum,
}),
z.object({
subtype: z.enum(["object"]),
spec: matchOldListValueSpecObject,
}),
z.object({
subtype: z.enum(["number"]),
spec: matchOldListValueSpecNumber,
}),
z.object({
subtype: z.enum(["union"]),
spec: matchOldListValueSpecUnion,
}),
]),
) as unknown as z.ZodType<OldValueSpecList>
const matchOldValueSpecPointer = every(
object({
type: literal("pointer"),
}),
anyOf(
object({
subtype: literal("package"),
target: literals("tor-key", "tor-address", "lan-address"),
"package-id": string,
interface: string,
}),
object({
subtype: literal("package"),
target: literals("config"),
"package-id": string,
selector: string,
multi: boolean,
}),
),
type OldValueSpecPointer = {
type: "pointer"
} & (
| {
subtype: "package"
target: "tor-key" | "tor-address" | "lan-address"
"package-id": string
interface: string
}
| {
subtype: "package"
target: "config"
"package-id": string
selector: string
multi: boolean
}
)
type OldValueSpecPointer = typeof matchOldValueSpecPointer._TYPE
const matchOldValueSpecPointer: z.ZodType<OldValueSpecPointer> = z.intersection(
z.object({
type: z.literal("pointer"),
}),
z.union([
z.object({
subtype: z.literal("package"),
target: z.enum(["tor-key", "tor-address", "lan-address"]),
"package-id": z.string(),
interface: z.string(),
}),
z.object({
subtype: z.literal("package"),
target: z.enum(["config"]),
"package-id": z.string(),
selector: z.string(),
multi: z.boolean(),
}),
]),
) as unknown as z.ZodType<OldValueSpecPointer>
export const matchOldValueSpec = anyOf(
type OldValueSpecString = z.infer<typeof matchOldValueSpecString>
type OldValueSpec =
| OldValueSpecString
| OldValueSpecNumber
| OldValueSpecBoolean
| OldValueSpecObject
| OldValueSpecEnum
| OldValueSpecList
| OldValueSpecUnion
| OldValueSpecPointer
export const matchOldValueSpec: z.ZodType<OldValueSpec> = z.union([
matchOldValueSpecString,
matchOldValueSpecNumber,
matchOldValueSpecBoolean,
matchOldValueSpecObject,
matchOldValueSpecObject as z.ZodType<OldValueSpecObject>,
matchOldValueSpecEnum,
matchOldValueSpecList,
matchOldValueSpecUnion,
matchOldValueSpecPointer,
)
type OldValueSpec = typeof matchOldValueSpec._TYPE
setMatchOldConfigSpec(dictionary([string, matchOldValueSpec]))
matchOldValueSpecList as z.ZodType<OldValueSpecList>,
matchOldValueSpecUnion as z.ZodType<OldValueSpecUnion>,
matchOldValueSpecPointer as z.ZodType<OldValueSpecPointer>,
])
export class Range {
min?: number

View File

@@ -47,11 +47,12 @@ export class SystemForStartOs implements System {
getActionInput(
effects: Effects,
id: string,
prefill: Record<string, unknown> | null,
timeoutMs: number | null,
): Promise<T.ActionInput | null> {
const action = this.abi.actions.get(id)
if (!action) throw new Error(`Action ${id} not found`)
return action.getInput({ effects })
return action.getInput({ effects, prefill })
}
runAction(
effects: Effects,
@@ -68,22 +69,18 @@ export class SystemForStartOs implements System {
try {
if (this.runningMain || this.starting) return
this.starting = true
effects.constRetry = utils.once(() => effects.restart())
effects.constRetry = utils.once(() => {
console.debug(".const() triggered")
effects.restart()
})
let mainOnTerm: () => Promise<void> | undefined
const started = async (onTerm: () => Promise<void>) => {
await effects.setMainStatus({ status: "running" })
mainOnTerm = onTerm
return null
}
const daemons = await (
await this.abi.main({
effects,
started,
})
).build()
this.runningMain = {
stop: async () => {
if (mainOnTerm) await mainOnTerm()
await daemons.term()
},
}

View File

@@ -33,6 +33,7 @@ export type System = {
getActionInput(
effects: Effects,
actionId: string,
prefill: Record<string, unknown> | null,
timeoutMs: number | null,
): Promise<T.ActionInput | null>

View File

@@ -1,41 +1,19 @@
import {
object,
literal,
string,
boolean,
array,
dictionary,
literals,
number,
Parser,
some,
} from "ts-matches"
import { z } from "@start9labs/start-sdk"
import { matchDuration } from "./Duration"
const VolumeId = string
const Path = string
export type VolumeId = string
export type Path = string
export const matchDockerProcedure = object({
type: literal("docker"),
image: string,
system: boolean.optional(),
entrypoint: string,
args: array(string).defaultTo([]),
mounts: dictionary([VolumeId, Path]).optional(),
"io-format": literals(
"json",
"json-pretty",
"yaml",
"cbor",
"toml",
"toml-pretty",
)
export const matchDockerProcedure = z.object({
type: z.literal("docker"),
image: z.string(),
system: z.boolean().optional(),
entrypoint: z.string(),
args: z.array(z.string()).default([]),
mounts: z.record(z.string(), z.string()).optional(),
"io-format": z
.enum(["json", "json-pretty", "yaml", "cbor", "toml", "toml-pretty"])
.nullable()
.optional(),
"sigterm-timeout": some(number, matchDuration).onMismatch(30),
inject: boolean.defaultTo(false),
"sigterm-timeout": z.union([z.number(), matchDuration]).catch(30),
inject: z.boolean().default(false),
})
export type DockerProcedure = typeof matchDockerProcedure._TYPE
export type DockerProcedure = z.infer<typeof matchDockerProcedure>

View File

@@ -1,11 +1,11 @@
import { string } from "ts-matches"
import { z } from "@start9labs/start-sdk"
export type TimeUnit = "d" | "h" | "s" | "ms" | "m" | "µs" | "ns"
export type Duration = `${number}${TimeUnit}`
const durationRegex = /^([0-9]*(\.[0-9]+)?)(ns|µs|ms|s|m|d)$/
export const matchDuration = string.refine(isDuration)
export const matchDuration = z.string().refine(isDuration)
export function isDuration(value: string): value is Duration {
return durationRegex.test(value)
}

View File

@@ -1,10 +1,10 @@
import { literals, some, string } from "ts-matches"
import { z } from "@start9labs/start-sdk"
type NestedPath<A extends string, B extends string> = `/${A}/${string}/${B}`
type NestedPaths = NestedPath<"actions", "run" | "getInput">
// prettier-ignore
type UnNestPaths<A> =
A extends `${infer A}/${infer B}` ? [...UnNestPaths<A>, ... UnNestPaths<B>] :
type UnNestPaths<A> =
A extends `${infer A}/${infer B}` ? [...UnNestPaths<A>, ... UnNestPaths<B>] :
[A]
export function unNestPath<A extends string>(a: A): UnNestPaths<A> {
@@ -17,14 +17,14 @@ function isNestedPath(path: string): path is NestedPaths {
return true
return false
}
export const jsonPath = some(
literals(
export const jsonPath = z.union([
z.enum([
"/packageInit",
"/packageUninit",
"/backup/create",
"/backup/restore",
),
string.refine(isNestedPath, "isNestedPath"),
)
]),
z.string().refine(isNestedPath),
])
export type JsonPath = typeof jsonPath._TYPE
export type JsonPath = z.infer<typeof jsonPath>

View File

@@ -1,5 +1,4 @@
import { RpcListener } from "./Adapters/RpcListener"
import { SystemForEmbassy } from "./Adapters/Systems/SystemForEmbassy"
import { AllGetDependencies } from "./Interfaces/AllGetDependencies"
import { getSystem } from "./Adapters/Systems"
@@ -7,6 +6,24 @@ const getDependencies: AllGetDependencies = {
system: getSystem,
}
process.on("unhandledRejection", (reason) => {
if (
reason instanceof Error &&
"muteUnhandled" in reason &&
reason.muteUnhandled
) {
// mute
} else {
console.error("Unhandled promise rejection", reason)
}
})
for (let s of ["SIGTERM", "SIGINT", "SIGHUP"]) {
process.on(s, (s) => {
console.log(`Caught ${s}`)
})
}
new RpcListener(getDependencies)
/**

View File

@@ -0,0 +1,21 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")/.."
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
DOCKER_PLATFORM=linux/${ARCH}
case $ARCH in
x86_64)
DOCKER_PLATFORM=linux/amd64;;
aarch64)
DOCKER_PLATFORM=linux/arm64;;
esac
docker run --rm $USE_TTY --platform=$DOCKER_PLATFORM -eARCH --privileged -v "$(pwd):/root/start-os" start9/build-env /root/start-os/container-runtime/update-image.sh
if [ "$(ls -nd "container-runtime/rootfs.${ARCH}.squashfs" | awk '{ print $3 }')" != "$UID" ]; then
docker run --rm $USE_TTY -v "$(pwd):/root/start-os" start9/build-env chown -R $UID:$UID /root/start-os/container-runtime
fi

Some files were not shown because too many files have changed in this diff Show More