Compare commits

..

11 Commits

Author SHA1 Message Date
Aiden McClelland
bc9db9f2b7 efi fixes 2025-11-15 00:23:04 -07:00
Aiden McClelland
7210f43f50 build and efi fixes 2025-11-14 19:00:33 -07:00
Aiden McClelland
df636b7a78 misc networking fixes 2025-11-14 17:56:24 -07:00
Aiden McClelland
10c14b4d0a fix set-password 2025-11-13 16:36:17 -07:00
Aiden McClelland
1bf610a853 prevent gateways from getting stuck empty 2025-11-13 16:34:14 -07:00
Matt Hill
b4d82b82a9 CA instead of leaf for StartTunnel (#3046)
* updated docs for CA instead of cert

* generate ca instead of self-signed in start-tunnel

* Fix formatting in START-TUNNEL.md installation instructions

* Fix formatting in START-TUNNEL.md

* fix infinite loop

* add success message to install

* hide loopback and bridge gateways

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2025-11-13 15:32:49 -07:00
Aiden McClelland
7c5ba45f6a Include StartTunnel installation command
Added installation instructions for StartTunnel.
2025-11-13 15:32:49 -07:00
Aiden McClelland
f83df5682c bump sdk 2025-11-07 18:07:25 -07:00
Aiden McClelland
bfdab897ab misc fixes 2025-11-07 14:31:14 -07:00
Aiden McClelland
29c97fcbb0 sdk fixes 2025-11-07 11:20:13 -07:00
Aiden McClelland
e7847d0e88 squashfs-wip 2025-11-07 03:13:54 -07:00
1775 changed files with 44382 additions and 80617 deletions

View File

@@ -1 +0,0 @@
{}

View File

@@ -1,81 +0,0 @@
name: Setup Build Environment
description: Common build environment setup steps
inputs:
nodejs-version:
description: Node.js version
required: true
setup-python:
description: Set up Python
required: false
default: "false"
setup-docker:
description: Set up Docker QEMU and Buildx
required: false
default: "true"
setup-sccache:
description: Configure sccache for GitHub Actions
required: false
default: "true"
free-space:
description: Remove unnecessary packages to free disk space
required: false
default: "true"
runs:
using: composite
steps:
- name: Free disk space
if: inputs.free-space == 'true'
shell: bash
run: |
sudo apt-get remove --purge -y azure-cli || true
sudo apt-get remove --purge -y firefox || true
sudo apt-get remove --purge -y ghc-* || true
sudo apt-get remove --purge -y google-cloud-sdk || true
sudo apt-get remove --purge -y google-chrome-stable || true
sudo apt-get remove --purge -y powershell || true
sudo apt-get remove --purge -y php* || true
sudo apt-get remove --purge -y ruby* || true
sudo apt-get remove --purge -y mono-* || true
sudo apt-get autoremove -y
sudo apt-get clean
sudo rm -rf /usr/lib/jvm
sudo rm -rf /usr/local/.ghcup
sudo rm -rf /usr/local/lib/android
sudo rm -rf /usr/share/dotnet
sudo rm -rf /usr/share/swift
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
# Some runners lack /opt/hostedtoolcache, which setup-python and setup-qemu expect
- name: Ensure hostedtoolcache exists
shell: bash
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
- name: Set up Python
if: inputs.setup-python == 'true'
uses: actions/setup-python@v6
with:
python-version: "3.x"
- uses: actions/setup-node@v6
with:
node-version: ${{ inputs.nodejs-version }}
cache: npm
cache-dependency-path: "**/package-lock.json"
- name: Set up Docker QEMU
if: inputs.setup-docker == 'true'
uses: docker/setup-qemu-action@v4
- name: Set up Docker Buildx
if: inputs.setup-docker == 'true'
uses: docker/setup-buildx-action@v4
- name: Configure sccache
if: inputs.setup-sccache == 'true'
uses: actions/github-script@v8
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');

View File

@@ -1,88 +0,0 @@
name: start-cli
on:
workflow_call:
workflow_dispatch:
inputs:
environment:
type: choice
description: Environment
options:
- NONE
- dev
- unstable
- dev-unstable
runner:
type: choice
description: Runner
options:
- standard
- fast
arch:
type: choice
description: Architecture
options:
- ALL
- x86_64
- x86_64-apple
- aarch64
- aarch64-apple
- riscv64
push:
branches:
- master
- next/*
pull_request:
branches:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
compile:
name: Build Debian Package
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
triple: >-
${{
fromJson('{
"x86_64": ["x86_64-unknown-linux-musl"],
"x86_64-apple": ["x86_64-apple-darwin"],
"aarch64": ["aarch64-unknown-linux-musl"],
"x86_64-apple": ["aarch64-apple-darwin"],
"riscv64": ["riscv64gc-unknown-linux-musl"],
"ALL": ["x86_64-unknown-linux-musl", "x86_64-apple-darwin", "aarch64-unknown-linux-musl", "aarch64-apple-darwin", "riscv64gc-unknown-linux-musl"]
}')[github.event.inputs.platform || 'ALL']
}}
runs-on: ${{ fromJson('["ubuntu-latest", "ubuntu-24.04-32-cores"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v6
with:
submodules: recursive
- uses: ./.github/actions/setup-build
with:
nodejs-version: ${{ env.NODEJS_VERSION }}
- name: Make
run: TARGET=${{ matrix.triple }} make cli
env:
PLATFORM: ${{ matrix.arch }}
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v7
with:
name: start-cli_${{ matrix.triple }}
path: core/target/${{ matrix.triple }}/release/start-cli

View File

@@ -1,173 +0,0 @@
name: start-registry
on:
workflow_call:
workflow_dispatch:
inputs:
environment:
type: choice
description: Environment
options:
- NONE
- dev
- unstable
- dev-unstable
runner:
type: choice
description: Runner
options:
- standard
- fast
arch:
type: choice
description: Architecture
options:
- ALL
- x86_64
- aarch64
- riscv64
push:
branches:
- master
- next/*
pull_request:
branches:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
compile:
name: Build Debian Package
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
arch: >-
${{
fromJson('{
"x86_64": ["x86_64"],
"aarch64": ["aarch64"],
"riscv64": ["riscv64"],
"ALL": ["x86_64", "aarch64", "riscv64"]
}')[github.event.inputs.platform || 'ALL']
}}
runs-on: ${{ fromJson('["ubuntu-latest", "ubuntu-24.04-32-cores"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v6
with:
submodules: recursive
- uses: ./.github/actions/setup-build
with:
nodejs-version: ${{ env.NODEJS_VERSION }}
- name: Make
run: make registry-deb
env:
PLATFORM: ${{ matrix.arch }}
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v7
with:
name: start-registry_${{ matrix.arch }}.deb
path: results/start-registry-*_${{ matrix.arch }}.deb
create-image:
name: Create Docker Image
needs: [compile]
permissions:
contents: read
packages: write
runs-on: ${{ fromJson('["ubuntu-latest", "ubuntu-24.04-32-cores"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Cleaning up unnecessary files
run: |
sudo apt-get remove --purge -y google-chrome-stable firefox mono-devel
sudo apt-get autoremove -y
sudo apt-get clean
- run: |
sudo mount -t tmpfs tmpfs .
if: ${{ github.event.inputs.runner == 'fast' }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v4
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v4
- name: "Login to GitHub Container Registry"
uses: docker/login-action@v4
with:
registry: ghcr.io
username: ${{github.actor}}
password: ${{secrets.GITHUB_TOKEN}}
- name: Docker meta
id: meta
uses: docker/metadata-action@v6
with:
images: ghcr.io/Start9Labs/startos-registry
tags: |
type=raw,value=${{ github.ref_name }}
- name: Download debian package
uses: actions/download-artifact@v8
with:
pattern: start-registry_*.deb
- name: Map matrix.arch to docker platform
run: |
platforms=""
for deb in *.deb; do
filename=$(basename "$deb" .deb)
arch="${filename#*_}"
case "$arch" in
x86_64)
platform="linux/amd64"
;;
aarch64)
platform="linux/arm64"
;;
riscv64)
platform="linux/riscv64"
;;
*)
echo "Unknown architecture: $arch" >&2
exit 1
;;
esac
if [ -z "$platforms" ]; then
platforms="$platform"
else
platforms="$platforms,$platform"
fi
done
echo "DOCKER_PLATFORM=$platforms" >> "$GITHUB_ENV"
- run: |
cat | docker buildx build --platform "$DOCKER_PLATFORM" --push -t ${{ steps.meta.outputs.tags }} -f - . << 'EOF'
FROM debian:trixie
ADD *.deb .
RUN apt-get update && apt-get install -y ./*_$(uname -m).deb && rm -rf *.deb /var/lib/apt/lists/*
VOLUME /var/lib/startos
ENV RUST_LOG=startos=debug
ENTRYPOINT ["start-registryd"]
EOF

View File

@@ -1,84 +0,0 @@
name: start-tunnel
on:
workflow_call:
workflow_dispatch:
inputs:
environment:
type: choice
description: Environment
options:
- NONE
- dev
- unstable
- dev-unstable
runner:
type: choice
description: Runner
options:
- standard
- fast
arch:
type: choice
description: Architecture
options:
- ALL
- x86_64
- aarch64
- riscv64
push:
branches:
- master
- next/*
pull_request:
branches:
- master
- next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env:
NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
compile:
name: Build Debian Package
if: github.event.pull_request.draft != true
strategy:
fail-fast: true
matrix:
arch: >-
${{
fromJson('{
"x86_64": ["x86_64"],
"aarch64": ["aarch64"],
"riscv64": ["riscv64"],
"ALL": ["x86_64", "aarch64", "riscv64"]
}')[github.event.inputs.platform || 'ALL']
}}
runs-on: ${{ fromJson('["ubuntu-latest", "ubuntu-24.04-32-cores"]')[github.event.inputs.runner == 'fast'] }}
steps:
- name: Mount tmpfs
if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v6
with:
submodules: recursive
- uses: ./.github/actions/setup-build
with:
nodejs-version: ${{ env.NODEJS_VERSION }}
- name: Make
run: make tunnel-deb
env:
PLATFORM: ${{ matrix.arch }}
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v7
with:
name: start-tunnel_${{ matrix.arch }}.deb
path: results/start-tunnel-*_${{ matrix.arch }}.deb

View File

@@ -25,13 +25,10 @@ on:
- ALL - ALL
- x86_64 - x86_64
- x86_64-nonfree - x86_64-nonfree
- x86_64-nvidia
- aarch64 - aarch64
- aarch64-nonfree - aarch64-nonfree
- aarch64-nvidia - raspberrypi
# - raspberrypi
- riscv64 - riscv64
- riscv64-nonfree
deploy: deploy:
type: choice type: choice
description: Deploy description: Deploy
@@ -48,10 +45,6 @@ on:
- master - master
- next/* - next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env: env:
NODEJS_VERSION: "24.11.0" NODEJS_VERSION: "24.11.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}' ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
@@ -59,7 +52,6 @@ env:
jobs: jobs:
compile: compile:
name: Compile Base Binaries name: Compile Base Binaries
if: github.event.pull_request.draft != true
strategy: strategy:
fail-fast: true fail-fast: true
matrix: matrix:
@@ -68,45 +60,53 @@ jobs:
fromJson('{ fromJson('{
"x86_64": ["x86_64"], "x86_64": ["x86_64"],
"x86_64-nonfree": ["x86_64"], "x86_64-nonfree": ["x86_64"],
"x86_64-nvidia": ["x86_64"],
"aarch64": ["aarch64"], "aarch64": ["aarch64"],
"aarch64-nonfree": ["aarch64"], "aarch64-nonfree": ["aarch64"],
"aarch64-nvidia": ["aarch64"],
"raspberrypi": ["aarch64"], "raspberrypi": ["aarch64"],
"riscv64": ["riscv64"], "riscv64": ["riscv64"],
"riscv64-nonfree": ["riscv64"], "ALL": ["x86_64", "aarch64"]
"ALL": ["x86_64", "aarch64", "riscv64"]
}')[github.event.inputs.platform || 'ALL'] }')[github.event.inputs.platform || 'ALL']
}} }}
runs-on: >- runs-on: ${{ fromJson('["ubuntu-22.04", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
${{
fromJson(
format(
'["{0}", "{1}"]',
fromJson('{
"x86_64": "ubuntu-latest",
"aarch64": "ubuntu-24.04-arm",
"riscv64": "ubuntu-latest"
}')[matrix.arch],
fromJson('{
"x86_64": "amd64-fast",
"aarch64": "aarch64-fast",
"riscv64": "amd64-fast"
}')[matrix.arch]
)
)[github.event.inputs.runner == 'fast']
}}
steps: steps:
- name: Mount tmpfs - run: |
sudo mount -t tmpfs tmpfs .
if: ${{ github.event.inputs.runner == 'fast' }} if: ${{ github.event.inputs.runner == 'fast' }}
run: sudo mount -t tmpfs tmpfs .
- uses: actions/checkout@v6 - uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive
- uses: ./.github/actions/setup-build
- name: Set up Python
uses: actions/setup-python@v5
with: with:
nodejs-version: ${{ env.NODEJS_VERSION }} python-version: "3.x"
setup-python: "true"
- uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
- name: Set up system dependencies
run: sudo apt-get update && sudo apt-get install -y qemu-user-static systemd-container squashfuse
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Configure sccache
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
- name: Use Beta Toolchain
run: rustup default beta
- name: Setup Cross
run: cargo install cross --git https://github.com/cross-rs/cross
- name: Make - name: Make
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
@@ -114,7 +114,7 @@ jobs:
SCCACHE_GHA_ENABLED: on SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0 SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v7 - uses: actions/upload-artifact@v4
with: with:
name: compiled-${{ matrix.arch }}.tar name: compiled-${{ matrix.arch }}.tar
path: compiled-${{ matrix.arch }}.tar path: compiled-${{ matrix.arch }}.tar
@@ -130,7 +130,7 @@ jobs:
format( format(
'[ '[
["{0}"], ["{0}"],
["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "raspberrypi", "riscv64", "riscv64-nonfree"] ["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "raspberrypi"]
]', ]',
github.event.inputs.platform || 'ALL' github.event.inputs.platform || 'ALL'
) )
@@ -140,28 +140,14 @@ jobs:
${{ ${{
fromJson( fromJson(
format( format(
'["{0}", "{1}"]', '["ubuntu-22.04", "{0}"]',
fromJson('{ fromJson('{
"x86_64": "ubuntu-latest", "x86_64": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nonfree": "ubuntu-latest", "x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nvidia": "ubuntu-latest", "aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64": "ubuntu-24.04-arm", "aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nonfree": "ubuntu-24.04-arm", "raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nvidia": "ubuntu-24.04-arm", "riscv64": "buildjet-8vcpu-ubuntu-2204",
"raspberrypi": "ubuntu-24.04-arm",
"riscv64": "ubuntu-24.04-arm",
"riscv64-nonfree": "ubuntu-24.04-arm",
}')[matrix.platform],
fromJson('{
"x86_64": "amd64-fast",
"x86_64-nonfree": "amd64-fast",
"x86_64-nvidia": "amd64-fast",
"aarch64": "aarch64-fast",
"aarch64-nonfree": "aarch64-fast",
"aarch64-nvidia": "aarch64-fast",
"raspberrypi": "aarch64-fast",
"riscv64": "amd64-fast",
"riscv64-nonfree": "amd64-fast",
}')[matrix.platform] }')[matrix.platform]
) )
)[github.event.inputs.runner == 'fast'] )[github.event.inputs.runner == 'fast']
@@ -172,50 +158,45 @@ jobs:
fromJson('{ fromJson('{
"x86_64": "x86_64", "x86_64": "x86_64",
"x86_64-nonfree": "x86_64", "x86_64-nonfree": "x86_64",
"x86_64-nvidia": "x86_64",
"aarch64": "aarch64", "aarch64": "aarch64",
"aarch64-nonfree": "aarch64", "aarch64-nonfree": "aarch64",
"aarch64-nvidia": "aarch64",
"raspberrypi": "aarch64", "raspberrypi": "aarch64",
"riscv64": "riscv64", "riscv64": "riscv64",
"riscv64-nonfree": "riscv64",
}')[matrix.platform] }')[matrix.platform]
}} }}
steps: steps:
- name: Free space - name: Free space
run: | run: rm -rf /opt/hostedtoolcache*
sudo apt-get remove --purge -y azure-cli || true
sudo apt-get remove --purge -y firefox || true
sudo apt-get remove --purge -y ghc-* || true
sudo apt-get remove --purge -y google-cloud-sdk || true
sudo apt-get remove --purge -y google-chrome-stable || true
sudo apt-get remove --purge -y powershell || true
sudo apt-get remove --purge -y php* || true
sudo apt-get remove --purge -y ruby* || true
sudo apt-get remove --purge -y mono-* || true
sudo apt-get autoremove -y
sudo apt-get clean
sudo rm -rf /usr/lib/jvm # All JDKs
sudo rm -rf /usr/local/.ghcup # Haskell toolchain
sudo rm -rf /usr/local/lib/android # Android SDK/NDK, emulator
sudo rm -rf /usr/share/dotnet # .NET SDKs
sudo rm -rf /usr/share/swift # Swift toolchain (if present)
sudo rm -rf "$AGENT_TOOLSDIRECTORY" # Pre-cached tool cache (Go, Node, etc.)
if: ${{ github.event.inputs.runner != 'fast' }} if: ${{ github.event.inputs.runner != 'fast' }}
# Some runners lack /opt/hostedtoolcache, which setup-qemu expects - uses: actions/checkout@v4
- name: Ensure hostedtoolcache exists
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v4
- uses: actions/checkout@v6
with: with:
submodules: recursive submodules: recursive
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.x"
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y qemu-user-static
wget https://deb.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.2-1_all.deb
sha256sum ./debspawn_0.6.2-1_all.deb | grep 37ef27458cb1e35e8bce4d4f639b06b4b3866fc0b9191ec6b9bd157afd06a817
sudo apt-get install -y ./debspawn_0.6.2-1_all.deb
- name: Configure debspawn
run: |
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo mkdir -p /var/tmp/debspawn
- run: sudo mount -t tmpfs tmpfs /var/tmp/debspawn
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree') }}
- name: Download compiled artifacts - name: Download compiled artifacts
uses: actions/download-artifact@v8 uses: actions/download-artifact@v4
with: with:
name: compiled-${{ env.ARCH }}.tar name: compiled-${{ env.ARCH }}.tar
@@ -226,19 +207,22 @@ jobs:
run: | run: |
mkdir -p web/node_modules mkdir -p web/node_modules
mkdir -p web/dist/raw mkdir -p web/dist/raw
mkdir -p core/bindings mkdir -p core/startos/bindings
mkdir -p sdk/base/lib/osBindings mkdir -p sdk/base/lib/osBindings
mkdir -p container-runtime/node_modules mkdir -p container-runtime/node_modules
mkdir -p container-runtime/dist mkdir -p container-runtime/dist
mkdir -p container-runtime/dist/node_modules mkdir -p container-runtime/dist/node_modules
mkdir -p core/startos/bindings
mkdir -p sdk/dist mkdir -p sdk/dist
mkdir -p sdk/baseDist mkdir -p sdk/baseDist
mkdir -p patch-db/client/node_modules mkdir -p patch-db/client/node_modules
mkdir -p patch-db/client/dist mkdir -p patch-db/client/dist
mkdir -p web/.angular mkdir -p web/.angular
mkdir -p web/dist/raw/ui mkdir -p web/dist/raw/ui
mkdir -p web/dist/raw/install-wizard
mkdir -p web/dist/raw/setup-wizard mkdir -p web/dist/raw/setup-wizard
mkdir -p web/dist/static/ui mkdir -p web/dist/static/ui
mkdir -p web/dist/static/install-wizard
mkdir -p web/dist/static/setup-wizard mkdir -p web/dist/static/setup-wizard
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
@@ -252,124 +236,56 @@ jobs:
run: PLATFORM=${{ matrix.platform }} make img run: PLATFORM=${{ matrix.platform }} make img
if: ${{ matrix.platform == 'raspberrypi' }} if: ${{ matrix.platform == 'raspberrypi' }}
- uses: actions/upload-artifact@v7 - uses: actions/upload-artifact@v4
with: with:
name: ${{ matrix.platform }}.squashfs name: ${{ matrix.platform }}.squashfs
path: results/*.squashfs path: results/*.squashfs
- uses: actions/upload-artifact@v7 - uses: actions/upload-artifact@v4
with: with:
name: ${{ matrix.platform }}.iso name: ${{ matrix.platform }}.iso
path: results/*.iso path: results/*.iso
if: ${{ matrix.platform != 'raspberrypi' }} if: ${{ matrix.platform != 'raspberrypi' }}
- uses: actions/upload-artifact@v7 - uses: actions/upload-artifact@v4
with: with:
name: ${{ matrix.platform }}.img name: ${{ matrix.platform }}.img
path: results/*.img path: results/*.img
if: ${{ matrix.platform == 'raspberrypi' }} if: ${{ matrix.platform == 'raspberrypi' }}
deploy: - name: Upload OTA to registry
name: Deploy run: >-
PLATFORM=${{ matrix.platform }} make upload-ota TARGET="${{
fromJson('{
"alpha": "alpha-registry-x.start9.com",
"beta": "beta-registry.start9.com",
}')[github.event.inputs.deploy]
}}" KEY="${{
fromJson(
format('{{
"alpha": "{0}",
"beta": "{1}",
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
)[github.event.inputs.deploy]
}}"
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
index:
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
needs: [image] needs: [image]
if: github.event_name == 'workflow_dispatch' && github.event.inputs.deploy != 'NONE' runs-on: ubuntu-22.04
runs-on: ubuntu-latest
env:
REGISTRY: >-
${{
fromJson('{
"alpha": "https://alpha-registry-x.start9.com",
"beta": "https://beta-registry.start9.com"
}')[github.event.inputs.deploy]
}}
S3_BUCKET: s3://startos-images
S3_CDN: https://startos-images.nyc3.cdn.digitaloceanspaces.com
steps: steps:
- uses: actions/checkout@v6 - run: >-
with: curl "https://${{
sparse-checkout: web/package.json fromJson('{
"alpha": "alpha-registry-x.start9.com",
- name: Determine version "beta": "beta-registry.start9.com",
id: version }')[github.event.inputs.deploy]
run: | }}:8443/resync.cgi?key=${{
VERSION=$(sed -n 's/.*"version": *"\([^"]*\)".*/\1/p' web/package.json | head -1) fromJson(
echo "version=$VERSION" >> "$GITHUB_OUTPUT" format('{{
echo "Version: $VERSION" "alpha": "{0}",
"beta": "{1}",
- name: Download squashfs artifacts }}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
uses: actions/download-artifact@v8 )[github.event.inputs.deploy]
with: }}"
pattern: "*.squashfs"
path: artifacts/
merge-multiple: true
- name: Download ISO artifacts
uses: actions/download-artifact@v8
with:
pattern: "*.iso"
path: artifacts/
merge-multiple: true
- name: Install start-cli
run: |
ARCH=$(uname -m)
OS=$(uname -s | tr '[:upper:]' '[:lower:]')
ASSET_NAME="start-cli_${ARCH}-${OS}"
DOWNLOAD_URL=$(curl -fsS \
-H "Authorization: token ${{ github.token }}" \
https://api.github.com/repos/Start9Labs/start-os/releases \
| jq -r '[.[].assets[] | select(.name=="'"$ASSET_NAME"'")] | first | .browser_download_url')
curl -fsSL \
-H "Authorization: token ${{ github.token }}" \
-H "Accept: application/octet-stream" \
"$DOWNLOAD_URL" -o /tmp/start-cli
sudo install -m 755 /tmp/start-cli /usr/local/bin/start-cli
echo "start-cli: $(start-cli --version)"
- name: Configure S3
run: |
sudo apt-get install -y -qq s3cmd > /dev/null
cat > ~/.s3cfg <<EOF
[default]
access_key = ${{ secrets.S3_ACCESS_KEY }}
secret_key = ${{ secrets.S3_SECRET_KEY }}
host_base = nyc3.digitaloceanspaces.com
host_bucket = %(bucket)s.nyc3.digitaloceanspaces.com
use_https = True
EOF
- name: Set up developer key
run: |
mkdir -p ~/.startos
printf '%s' "${{ secrets.DEV_KEY }}" > ~/.startos/developer.key.pem
- name: Upload to S3
run: |
VERSION="${{ steps.version.outputs.version }}"
cd artifacts
for file in *.iso *.squashfs; do
[ -f "$file" ] || continue
echo "Uploading $file..."
s3cmd put -P "$file" "${{ env.S3_BUCKET }}/v${VERSION}/$file"
done
- name: Register OS version
run: |
VERSION="${{ steps.version.outputs.version }}"
start-cli --registry="${{ env.REGISTRY }}" registry os version add \
"$VERSION" "v${VERSION}" '' ">=0.3.5 <=${VERSION}"
- name: Index assets in registry
run: |
VERSION="${{ steps.version.outputs.version }}"
cd artifacts
for file in *.squashfs *.iso; do
[ -f "$file" ] || continue
PLATFORM=$(echo "$file" | sed 's/.*_\([^.]*\)\.\(squashfs\|iso\)$/\1/')
echo "Indexing $file for platform $PLATFORM..."
start-cli --registry="${{ env.REGISTRY }}" registry os asset add \
--platform="$PLATFORM" \
--version="$VERSION" \
"$file" \
"${{ env.S3_CDN }}/v${VERSION}/$file"
done

View File

@@ -10,10 +10,6 @@ on:
- master - master
- next/* - next/*
concurrency:
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
cancel-in-progress: true
env: env:
NODEJS_VERSION: "24.11.0" NODEJS_VERSION: "24.11.0"
ENVIRONMENT: dev-unstable ENVIRONMENT: dev-unstable
@@ -21,18 +17,21 @@ env:
jobs: jobs:
test: test:
name: Run Automated Tests name: Run Automated Tests
if: github.event.pull_request.draft != true runs-on: ubuntu-22.04
runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@v6 - uses: actions/checkout@v4
with: with:
submodules: recursive submodules: recursive
- uses: ./.github/actions/setup-build
- uses: actions/setup-node@v4
with: with:
nodejs-version: ${{ env.NODEJS_VERSION }} node-version: ${{ env.NODEJS_VERSION }}
free-space: "false"
setup-docker: "false" - name: Use Beta Toolchain
setup-sccache: "false" run: rustup default beta
- name: Setup Cross
run: cargo install cross --git https://github.com/cross-rs/cross
- name: Build And Run Tests - name: Build And Run Tests
run: make test run: make test

28
.gitignore vendored
View File

@@ -1,26 +1,28 @@
.DS_Store .DS_Store
.idea .idea
*.img /*.img
*.img.gz /*.img.gz
*.img.xz /*.img.xz
*.zip /*-raspios-bullseye-arm64-lite.img
/*-raspios-bullseye-arm64-lite.zip
/product_key.txt /product_key.txt
/*_product_key.txt /*_product_key.txt
.vscode/settings.json .vscode/settings.json
deploy_web.sh deploy_web.sh
deploy_web.sh
secrets.db secrets.db
.vscode/ .vscode/
/build/env/*.txt /cargo-deps/**/*
*.deb /PLATFORM.txt
/ENVIRONMENT.txt
/GIT_HASH.txt
/VERSION.txt
/*.deb
/target /target
*.squashfs /*.squashfs
/results /results
/dpkg-workdir /dpkg-workdir
/compiled.tar /compiled.tar
/compiled-*.tar /compiled-*.tar
/build/lib/firmware /firmware
tmp /tmp
web/.i18n-checked
docs/USER.md
*.s9pk
/build/lib/migration-images

View File

@@ -1,101 +0,0 @@
# Architecture
StartOS is an open-source Linux distribution for running personal servers. It manages discovery, installation, network configuration, backups, and health monitoring of self-hosted services.
## Tech Stack
- Backend: Rust (async/Tokio, Axum web framework)
- Frontend: Angular 21 + TypeScript + Taiga UI 5
- Container runtime: Node.js/TypeScript with LXC
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`)
- Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`)
## Project Structure
```bash
/
├── assets/ # Screenshots for README
├── build/ # Auxiliary files and scripts for deployed images
├── container-runtime/ # Node.js program managing package containers
├── core/ # Rust backend: API, daemon (startd), CLI (start-cli)
├── debian/ # Debian package maintainer scripts
├── image-recipe/ # Scripts for building StartOS images
├── patch-db/ # (submodule) Diff-based data store for frontend sync
├── sdk/ # TypeScript SDK for building StartOS packages
└── web/ # Web UIs (Angular)
```
## Components
- **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md).
- **`web/`** — Angular 21 + TypeScript workspace using Taiga UI 5. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md).
- **`container-runtime/`** — Node.js runtime that runs inside each service's LXC container. Loads the service's JavaScript from its S9PK package and manages subcontainers. Communicates with the host daemon via JSON-RPC over Unix socket. See [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md).
- **`sdk/`** — TypeScript SDK for packaging services for StartOS (`@start9labs/start-sdk`). Split into `base/` (core types, ABI definitions, effects interface, consumed by web as `@start9labs/start-sdk-base`) and `package/` (full SDK for service developers, consumed by container-runtime as `@start9labs/start-sdk`).
- **`patch-db/`** — Git submodule providing diff-based state synchronization. Uses CBOR encoding. Backend mutations produce diffs that are pushed to the frontend via WebSocket, enabling reactive UI updates without polling. See [patch-db repo](https://github.com/Start9Labs/patch-db).
## Build Pipeline
Components have a strict dependency chain. Changes flow in one direction:
```
Rust (core/)
→ cargo test exports ts-rs types to core/bindings/
→ rsync copies to sdk/base/lib/osBindings/
→ SDK build produces baseDist/ and dist/
→ web/ consumes baseDist/ (via @start9labs/start-sdk-base)
→ container-runtime/ consumes dist/ (via @start9labs/start-sdk)
```
Key make targets along this chain:
| Step | Command | What it does |
|---|---|---|
| 1 | `cargo check -p start-os` | Verify Rust compiles |
| 2 | `make ts-bindings` | Export ts-rs types → rsync to SDK |
| 3 | `cd sdk && make baseDist dist` | Build SDK packages |
| 4 | `cd web && npm run check` | Type-check Angular projects |
| 5 | `cd container-runtime && npm run check` | Type-check runtime |
**Important**: Editing `sdk/base/lib/osBindings/*.ts` alone is NOT sufficient — you must rebuild the SDK bundle (step 3) before web/container-runtime can see the changes.
## Cross-Layer Verification
When making changes across multiple layers (Rust, SDK, web, container-runtime), verify in this order:
1. **Rust**: `cargo check -p start-os` — verifies core compiles
2. **TS bindings**: `make ts-bindings` — regenerates TypeScript types from Rust `#[ts(export)]` structs
- Runs `./core/build/build-ts.sh` to export ts-rs types to `core/bindings/`
- Syncs `core/bindings/``sdk/base/lib/osBindings/` via rsync
- If you manually edit files in `sdk/base/lib/osBindings/`, you must still rebuild the SDK (step 3)
3. **SDK bundle**: `cd sdk && make baseDist dist` — compiles SDK source into packages
- `baseDist/` is consumed by `/web` (via `@start9labs/start-sdk-base`)
- `dist/` is consumed by `/container-runtime` (via `@start9labs/start-sdk`)
- Web and container-runtime reference the **built** SDK, not source files
4. **Web type check**: `cd web && npm run check` — type-checks all Angular projects
5. **Container runtime type check**: `cd container-runtime && npm run check` — type-checks the runtime
## Data Flow: Backend to Frontend
StartOS uses Patch-DB for reactive state synchronization:
1. The backend mutates state via `db.mutate()`, producing CBOR diffs
2. Diffs are pushed to the frontend over a persistent WebSocket connection
3. The frontend applies diffs to its local state copy and notifies observers
4. Components watch specific database paths via `PatchDB.watch$()`, receiving updates reactively
This means the UI is always eventually consistent with the backend — after any mutating API call, the frontend waits for the corresponding PatchDB diff before resolving, so the UI reflects the result immediately.
## Further Reading
- [core/ARCHITECTURE.md](core/ARCHITECTURE.md) — Rust backend architecture
- [web/ARCHITECTURE.md](web/ARCHITECTURE.md) — Angular frontend architecture
- [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md) — Container runtime details
- [core/rpc-toolkit.md](core/rpc-toolkit.md) — JSON-RPC handler patterns
- [core/s9pk-structure.md](core/s9pk-structure.md) — S9PK package format
- [docs/exver.md](docs/exver.md) — Extended versioning format
- [docs/VERSION_BUMP.md](docs/VERSION_BUMP.md) — Version bumping guide

View File

@@ -1,59 +0,0 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Architecture
See [ARCHITECTURE.md](ARCHITECTURE.md) for the full system architecture, component map, build pipeline, and cross-layer verification order.
Each major component has its own `CLAUDE.md` with detailed guidance: `core/`, `web/`, `container-runtime/`, `sdk/`.
## Build & Development
See [CONTRIBUTING.md](CONTRIBUTING.md) for:
- Environment setup and requirements
- Build commands and make targets
- Testing and formatting commands
- Environment variables
**Quick reference:**
```bash
. ./devmode.sh # Enable dev mode
make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
make test-core # Run Rust tests
```
## Operating Rules
- Always verify cross-layer changes using the order described in [ARCHITECTURE.md](ARCHITECTURE.md#cross-layer-verification)
- Check component-level CLAUDE.md files for component-specific conventions. ALWAYS read it before operating on that component.
- Follow existing patterns before inventing new ones
- Always use `make` recipes when they exist for testing builds rather than manually invoking build commands
- **Commit signing:** Never push unsigned commits. Before pushing, check all unpushed commits for signatures with `git log --show-signature @{upstream}..HEAD`. If any are unsigned, prompt the user to sign them with `git rebase --exec 'git commit --amend -S --no-edit' @{upstream}`.
## Supplementary Documentation
The `docs/` directory contains cross-cutting documentation for AI assistants:
- `TODO.md` - Pending tasks for AI agents (check this first, remove items when completed)
- `USER.md` - Current user identifier (gitignored, see below)
- `exver.md` - Extended versioning format (used across core, sdk, and web)
- `VERSION_BUMP.md` - Guide for bumping the StartOS version across the codebase
Component-specific docs live alongside their code (e.g., `core/rpc-toolkit.md`, `core/i18n-patterns.md`).
### Session Startup
On startup:
1. **Check for `docs/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer.
2. **Check `docs/TODO.md` for relevant tasks** - Show TODOs that either:
- Have no `@username` tag (relevant to everyone)
- Are tagged with the current user's identifier
Skip TODOs tagged with a different user.
3. **Ask "What would you like to do today?"** - Offer options for each relevant TODO item, plus "Something else" for other requests.

View File

@@ -1,240 +1,133 @@
# Contributing to StartOS # Contributing to StartOS
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://github.com/Start9Labs/ai-service-packaging). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute). This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://docs.start9.com/latest/packaging-guide/). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
## Collaboration ## Collaboration
- [Matrix](https://matrix.to/#/#dev-startos:matrix.start9labs.com) - [Matrix](https://matrix.to/#/#community-dev:matrix.start9labs.com)
- [Telegram](https://t.me/start9_labs/47471)
For project structure and system architecture, see [ARCHITECTURE.md](ARCHITECTURE.md). ## Project Structure
```bash
/
├── assets/
├── container-runtime/
├── core/
├── build/
├── debian/
├── web/
├── image-recipe/
├── patch-db
└── sdk/
```
#### assets
screenshots for the StartOS README
#### container-runtime
A NodeJS program that dynamically loads maintainer scripts and communicates with the OS to manage packages
#### core
An API, daemon (startd), and CLI (start-cli) that together provide the core functionality of StartOS.
#### build
Auxiliary files and scripts to include in deployed StartOS images
#### debian
Maintainer scripts for the StartOS Debian package
#### web
Web UIs served under various conditions and used to interact with StartOS APIs.
#### image-recipe
Scripts for building StartOS images
#### patch-db (submodule)
A diff based data store used to synchronize data between the web interfaces and server.
#### sdk
A typescript sdk for building start-os packages
## Environment Setup ## Environment Setup
### Installing Dependencies (Debian/Ubuntu) #### Clone the StartOS repository
> Debian/Ubuntu is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install).
```sh ```sh
sudo apt update git clone https://github.com/Start9Labs/start-os.git --recurse-submodules
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 24
nvm use 24
nvm alias default 24 # this prevents your machine from reverting back to another version
```
### Cloning the Repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
cd start-os cd start-os
``` ```
### Development Mode #### Continue to your project of interest for additional instructions:
For faster iteration during development: - [`core`](core/README.md)
- [`web-interfaces`](web-interfaces/README.md)
```sh - [`build`](build/README.md)
. ./devmode.sh - [`patch-db`](https://github.com/Start9Labs/patch-db)
```
This sets `ENVIRONMENT=dev` and `GIT_BRANCH_AS_HASH=1` to prevent rebuilds on every commit.
## Building ## Building
All builds can be performed on any operating system that can run Docker. This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components. To build any specific component, simply run `make <TARGET>` replacing `<TARGET>` with the name of the target you'd like to build
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components.
### Requirements ### Requirements
- [GNU Make](https://www.gnu.org/software/make/) - [GNU Make](https://www.gnu.org/software/make/)
- [Docker](https://docs.docker.com/get-docker/) or [Podman](https://podman.io/) - [Docker](https://docs.docker.com/get-docker/)
- [NodeJS v20.16.0](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm) - [NodeJS v20.16.0](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
- [Rust](https://rustup.rs/) (nightly for formatting) - [sed](https://www.gnu.org/software/sed/)
- [sed](https://www.gnu.org/software/sed/), [grep](https://www.gnu.org/software/grep/), [awk](https://www.gnu.org/software/gawk/) - [grep](https://www.gnu.org/software/grep/)
- [awk](https://www.gnu.org/software/gawk/)
- [jq](https://jqlang.github.io/jq/) - [jq](https://jqlang.github.io/jq/)
- [gzip](https://www.gnu.org/software/gzip/), [brotli](https://github.com/google/brotli) - [gzip](https://www.gnu.org/software/gzip/)
- [brotli](https://github.com/google/brotli)
### Environment Variables ### Environment variables
| Variable | Description | - `PLATFORM`: which platform you would like to build for. Must be one of `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `raspberrypi`
| -------------------- | --------------------------------------------------------------------------------------------------- | - NOTE: `nonfree` images are for including `nonfree` firmware packages in the built ISO
| `PLATFORM` | Target platform: `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `riscv64`, `raspberrypi` | - `ENVIRONMENT`: a hyphen separated set of feature flags to enable
| `ENVIRONMENT` | Hyphen-separated feature flags (see below) | - `dev`: enables password ssh (INSECURE!) and does not compress frontends
| `PROFILE` | Build profile: `release` (default) or `dev` | - `unstable`: enables assertions that will cause errors on unexpected inconsistencies that are undesirable in production use either for performance or reliability reasons
| `GIT_BRANCH_AS_HASH` | Set to `1` to use git branch name as version hash (avoids rebuilds) | - `docker`: use `docker` instead of `podman`
- `GIT_BRANCH_AS_HASH`: set to `1` to use the current git branch name as the git hash so that the project does not need to be rebuilt on each commit
**ENVIRONMENT flags:** ### Useful Make Targets
- `dev` - Enables password SSH before setup, skips frontend compression - `iso`: Create a full `.iso` image
- `unstable` - Enables assertions and debugging with performance penalty - Only possible from Debian
- `console` - Enables tokio-console for async debugging - Not available for `PLATFORM=raspberrypi`
- Additional Requirements:
**Platform notes:** - [debspawn](https://github.com/lkhq/debspawn)
- `img`: Create a full `.img` image
- `-nonfree` variants include proprietary firmware and drivers - Only possible from Debian
- `raspberrypi` includes non-free components by necessity - Only available for `PLATFORM=raspberrypi`
- Platform is remembered between builds if not specified - Additional Requirements:
- [debspawn](https://github.com/lkhq/debspawn)
### Make Targets - `format`: Run automatic code formatting for the project
- Additional Requirements:
#### Building - [rust](https://rustup.rs/)
- `test`: Run automated tests for the project
| Target | Description | - Additional Requirements:
| ------------- | ---------------------------------------------- | - [rust](https://rustup.rs/)
| `iso` | Create full `.iso` image (not for raspberrypi) | - `update`: Deploy the current working project to a device over ssh as if through an over-the-air update
| `img` | Create full `.img` image (raspberrypi only) | - Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
| `deb` | Build Debian package | - `reflash`: Deploy the current working project to a device over ssh as if using a live `iso` image to reflash it
| `all` | Build all Rust binaries | - Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
| `uis` | Build all web UIs | - `update-overlay`: Deploy the current working project to a device over ssh to the in-memory overlay without restarting it
| `ui` | Build main UI only | - WARNING: changes will be reverted after the device is rebooted
| `ts-bindings` | Generate TypeScript bindings from Rust types | - WARNING: changes to `init` will not take effect as the device is already initialized
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
#### Deploying to Device - `wormhole`: Deploy the `startbox` to a device using [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
- When the build it complete will emit a command to paste into the shell of the device to upgrade it
For devices on the same network: - Additional Requirements:
- [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
| Target | Description | - `clean`: Delete all compiled artifacts
| ------------------------------------ | ----------------------------------------------- |
| `update-startbox REMOTE=start9@<ip>` | Deploy binary + UI only (fastest) |
| `update-deb REMOTE=start9@<ip>` | Deploy full Debian package |
| `update REMOTE=start9@<ip>` | OTA-style update |
| `reflash REMOTE=start9@<ip>` | Reflash as if using live ISO |
| `update-overlay REMOTE=start9@<ip>` | Deploy to in-memory overlay (reverts on reboot) |
For devices on different networks (uses [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)):
| Target | Description |
| ------------------- | -------------------- |
| `wormhole` | Send startbox binary |
| `wormhole-deb` | Send Debian package |
| `wormhole-squashfs` | Send squashfs image |
### Creating a VM
Install virt-manager:
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
virt-manager
```
Follow the screenshot walkthrough in [`assets/create-vm/`](assets/create-vm/) to create a new virtual machine. Key steps:
1. Create a new virtual machine
2. Browse for the ISO — create a storage pool pointing to your `results/` directory
3. Select "Generic or unknown OS"
4. Set memory and CPUs
5. Create a disk and name the VM
Build an ISO first:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
#### Other
| Target | Description |
| ------------------------ | ------------------------------------------- |
| `format` | Run code formatting (Rust nightly required) |
| `test` | Run all automated tests |
| `test-core` | Run Rust tests |
| `test-sdk` | Run SDK tests |
| `test-container-runtime` | Run container runtime tests |
| `clean` | Delete all compiled artifacts |
## Testing
```bash
make test # All tests
make test-core # Rust tests (via ./core/run-tests.sh)
make test-sdk # SDK tests
make test-container-runtime # Container runtime tests
# Run specific Rust test
cd core && cargo test <test_name> --features=test
```
## Code Formatting
```bash
# Rust (requires nightly)
make format
# TypeScript/HTML/SCSS (web)
cd web && npm run format
```
## Code Style Guidelines
### Formatting
Run the formatters before committing. Configuration is handled by `rustfmt.toml` (Rust) and prettier configs (TypeScript).
### Documentation & Comments
**Rust:**
- Add doc comments (`///`) to public APIs, structs, and non-obvious functions
- Use `//` comments sparingly for complex logic that isn't self-evident
- Prefer self-documenting code (clear naming, small functions) over comments
**TypeScript:**
- Document exported functions and complex types with JSDoc
- Keep comments focused on "why" rather than "what"
**General:**
- Don't add comments that just restate the code
- Update or remove comments when code changes
- TODOs should include context: `// TODO(username): reason`
### Commit Messages
Use [Conventional Commits](https://www.conventionalcommits.org/):
```
<type>(<scope>): <description>
[optional body]
[optional footer]
```
**Types:**
- `feat` - New feature
- `fix` - Bug fix
- `docs` - Documentation only
- `style` - Formatting, no code change
- `refactor` - Code change that neither fixes a bug nor adds a feature
- `test` - Adding or updating tests
- `chore` - Build process, dependencies, etc.
**Examples:**
```
feat(web): add dark mode toggle
fix(core): resolve race condition in service startup
docs: update CONTRIBUTING.md with style guidelines
refactor(sdk): simplify package validation logic
```

134
DEVELOPMENT.md Normal file
View File

@@ -0,0 +1,134 @@
# Setting up your development environment on Debian/Ubuntu
A step-by-step guide
> This is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
## Installing dependencies
Run the following commands one at a time
```sh
sudo apt update
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 24
nvm use 24
nvm alias default 24 # this prevents your machine from reverting back to another version
```
## Cloning the repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
cd start-os
```
## Building an ISO
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
This will build an ISO for your current architecture. If you are building to run on an architecture other than the one you are currently on, replace `$(uname -m)` with the correct platform for the device (one of `aarch64`, `aarch64-nonfree`, `x86_64`, `x86_64-nonfree`, `raspberrypi`)
## Creating a VM
### Install virt-manager
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
```
### Launch virt-manager
```sh
virt-manager
```
### Create new virtual machine
![Select "Create a new virtual machine"](assets/create-vm/step-1.png)
![Click "Forward"](assets/create-vm/step-2.png)
![Click "Browse"](assets/create-vm/step-3.png)
![Click "+"](assets/create-vm/step-4.png)
#### make sure to set "Target Path" to the path to your results directory in start-os
![Create storage pool](assets/create-vm/step-5.png)
![Select storage pool](assets/create-vm/step-6.png)
![Select ISO](assets/create-vm/step-7.png)
![Select "Generic or unknown OS" and click "Forward"](assets/create-vm/step-8.png)
![Set Memory and CPUs](assets/create-vm/step-9.png)
![Create disk](assets/create-vm/step-10.png)
![Name VM](assets/create-vm/step-11.png)
![Create network](assets/create-vm/step-12.png)
## Updating a VM
The fastest way to update a VM to your latest code depends on what you changed:
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-startbox REMOTE=start9@<VM IP>
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-deb REMOTE=start9@<VM IP>
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-squashfs REMOTE=start9@<VM IP>
```
---
If the device you are building for is not available via ssh, it is also possible to use `magic-wormhole` to send the relevant files.
### Prerequisites:
```sh
sudo apt update
sudo apt install -y magic-wormhole
```
As before, the fastest way to update a VM to your latest code depends on what you changed. Each of the following commands will return a command to paste into the shell of the device you would like to upgrade.
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-deb
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-squashfs
```

216
Makefile
View File

@@ -1,44 +1,46 @@
ls-files = $(shell git ls-files --cached --others --exclude-standard $1) ls-files = $(shell git ls-files --cached --others --exclude-standard $1)
PROFILE = release PROFILE = release
PLATFORM_FILE := $(shell ./build/env/check-platform.sh) PLATFORM_FILE := $(shell ./check-platform.sh)
ENVIRONMENT_FILE := $(shell ./build/env/check-environment.sh) ENVIRONMENT_FILE := $(shell ./check-environment.sh)
GIT_HASH_FILE := $(shell ./build/env/check-git-hash.sh) GIT_HASH_FILE := $(shell ./check-git-hash.sh)
VERSION_FILE := $(shell ./build/env/check-version.sh) VERSION_FILE := $(shell ./check-version.sh)
BASENAME := $(shell PROJECT=startos ./build/env/basename.sh) BASENAME := $(shell PROJECT=startos ./basename.sh)
PLATFORM := $(shell if [ -f $(PLATFORM_FILE) ]; then cat $(PLATFORM_FILE); else echo unknown; fi) PLATFORM := $(shell if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; elif [ "$(PLATFORM)" = "rockchip64" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g; s/-nvidia$$//g'; fi) ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else echo $(ARCH); fi) RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else echo $(ARCH); fi)
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./build/env/basename.sh) REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./basename.sh)
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./build/env/basename.sh) TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./basename.sh)
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi) IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html web/dist/raw/install-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html web/dist/static/install-wizard/index.html
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json) FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) build/lib/migration-images/.done BUILD_SRC := $(call ls-files, build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/) IMAGE_RECIPE_SRC := $(call ls-files, image-recipe/)
STARTD_SRC := core/startd.service $(BUILD_SRC) STARTD_SRC := core/startos/startd.service $(BUILD_SRC)
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE) CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(call ls-files, web/projects/shared) $(call ls-files, web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json WEB_SHARED_SRC := $(call ls-files, web/projects/shared) $(call ls-files, web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
WEB_UI_SRC := $(call ls-files, web/projects/ui) WEB_UI_SRC := $(call ls-files, web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(call ls-files, web/projects/setup-wizard) WEB_SETUP_WIZARD_SRC := $(call ls-files, web/projects/setup-wizard)
WEB_INSTALL_WIZARD_SRC := $(call ls-files, web/projects/install-wizard)
WEB_START_TUNNEL_SRC := $(call ls-files, web/projects/start-tunnel) WEB_START_TUNNEL_SRC := $(call ls-files, web/projects/start-tunnel)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client) PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip) GZIP_BIN := $(shell which pigz || which gzip)
TAR_BIN := $(shell which gtar || which tar) TAR_BIN := $(shell which gtar || which tar)
COMPILED_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container container-runtime/rootfs.$(ARCH).squashfs COMPILED_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox core/target/$(RUST_ARCH)-unknown-linux-musl/release/containerbox container-runtime/rootfs.$(ARCH).squashfs
STARTOS_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs $(PLATFORM_FILE) \ STARTOS_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs $(PLATFORM_FILE) \
$(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then \ $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then \
echo target/aarch64-unknown-linux-musl/release/pi-beep; \ echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; \
fi) \ fi) \
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then \ $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then \
echo target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph; \ echo cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph; \
fi') \ fi') \
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]; then \ $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]; then \
echo target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console; \ echo cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console; \
fi') fi')
REGISTRY_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox core/start-registryd.service REGISTRY_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox core/startos/start-registryd.service
TUNNEL_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service TUNNEL_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/startos/start-tunneld.service
REBUILD_TYPES = 1
ifeq ($(REMOTE),) ifeq ($(REMOTE),)
mkdir = mkdir -p $1 mkdir = mkdir -p $1
@@ -61,7 +63,7 @@ endif
.DELETE_ON_ERROR: .DELETE_ON_ERROR:
.PHONY: all metadata install clean format install-cli cli uis ui reflash deb $(IMAGE_TYPE) squashfs wormhole wormhole-deb test test-core test-sdk test-container-runtime registry install-registry tunnel install-tunnel ts-bindings .PHONY: all metadata install clean format cli uis ui reflash deb $(IMAGE_TYPE) squashfs wormhole wormhole-deb test test-core test-sdk test-container-runtime registry install-registry tunnel install-tunnel
all: $(STARTOS_TARGETS) all: $(STARTOS_TARGETS)
@@ -72,7 +74,7 @@ metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
clean: clean:
rm -rf core/target rm -rf core/target
rm -rf core/bindings rm -rf core/startos/bindings
rm -rf web/.angular rm -rf web/.angular
rm -f web/config.json rm -f web/config.json
rm -rf web/node_modules rm -rf web/node_modules
@@ -80,7 +82,7 @@ clean:
rm -rf patch-db/client/node_modules rm -rf patch-db/client/node_modules
rm -rf patch-db/client/dist rm -rf patch-db/client/dist
rm -rf patch-db/target rm -rf patch-db/target
rm -rf target rm -rf cargo-deps
rm -rf dpkg-workdir rm -rf dpkg-workdir
rm -rf image-recipe/deb rm -rf image-recipe/deb
rm -rf results rm -rf results
@@ -88,9 +90,14 @@ clean:
rm -rf container-runtime/dist rm -rf container-runtime/dist
rm -rf container-runtime/node_modules rm -rf container-runtime/node_modules
rm -f container-runtime/*.squashfs rm -f container-runtime/*.squashfs
if [ -d container-runtime/tmp/combined ] && mountpoint container-runtime/tmp/combined; then sudo umount container-runtime/tmp/combined; fi
if [ -d container-runtime/tmp/lower ] && mountpoint container-runtime/tmp/lower; then sudo umount container-runtime/tmp/lower; fi
rm -rf container-runtime/tmp
(cd sdk && make clean) (cd sdk && make clean)
rm -rf build/lib/migration-images rm -f ENVIRONMENT.txt
rm -f env/*.txt rm -f PLATFORM.txt
rm -f GIT_HASH.txt
rm -f VERSION.txt
format: format:
cd core && cargo +nightly fmt cd core && cargo +nightly fmt
@@ -106,15 +113,8 @@ test-sdk: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
cd container-runtime && npm test cd container-runtime && npm test
build/lib/migration-images/.done: build/save-migration-images.sh cli:
ARCH=$(ARCH) ./build/save-migration-images.sh build/lib/migration-images ./core/install-cli.sh
touch $@
install-cli: $(GIT_HASH_FILE)
./core/build/build-cli.sh --install
cli: $(GIT_HASH_FILE)
./core/build/build-cli.sh
registry: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox registry: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox
@@ -125,54 +125,49 @@ install-registry: $(REGISTRY_TARGETS)
$(call ln,/usr/bin/start-registrybox,$(DESTDIR)/usr/bin/start-registry) $(call ln,/usr/bin/start-registrybox,$(DESTDIR)/usr/bin/start-registry)
$(call mkdir,$(DESTDIR)/lib/systemd/system) $(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/start-registryd.service,$(DESTDIR)/lib/systemd/system/start-registryd.service) $(call cp,core/startos/start-registryd.service,$(DESTDIR)/lib/systemd/system/start-registryd.service)
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox: $(CORE_SRC) $(ENVIRONMENT_FILE) core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox: $(CORE_SRC) $(ENVIRONMENT_FILE)
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-registrybox.sh ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build-registrybox.sh
tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox
install-tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service install-tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/startos/start-tunneld.service
$(call mkdir,$(DESTDIR)/usr/bin) $(call mkdir,$(DESTDIR)/usr/bin)
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox,$(DESTDIR)/usr/bin/start-tunnelbox) $(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox,$(DESTDIR)/usr/bin/start-tunnelbox)
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunneld) $(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunneld)
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunnel) $(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunnel)
$(call mkdir,$(DESTDIR)/lib/systemd/system) $(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/start-tunneld.service,$(DESTDIR)/lib/systemd/system/start-tunneld.service) $(call cp,core/startos/start-tunneld.service,$(DESTDIR)/lib/systemd/system/start-tunneld.service)
$(call mkdir,$(DESTDIR)/usr/lib/startos/scripts) $(call mkdir,$(DESTDIR)/usr/lib/startos/scripts)
$(call cp,build/lib/scripts/forward-port,$(DESTDIR)/usr/lib/startos/scripts/forward-port) $(call cp,build/lib/scripts/forward-port,$(DESTDIR)/usr/lib/startos/scripts/forward-port)
$(call mkdir,$(DESTDIR)/etc/apt/sources.list.d)
$(call cp,apt/start9.list,$(DESTDIR)/etc/apt/sources.list.d/start9.list)
$(call mkdir,$(DESTDIR)/usr/share/keyrings)
$(call cp,apt/start9.gpg,$(DESTDIR)/usr/share/keyrings/start9.gpg)
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox: $(CORE_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) web/dist/static/start-tunnel/index.html core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox: $(CORE_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) web/dist/static/start-tunnel/index.html
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-tunnelbox.sh ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build-tunnelbox.sh
deb: results/$(BASENAME).deb deb: results/$(BASENAME).deb
results/$(BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/startos) $(STARTOS_TARGETS) results/$(BASENAME).deb: dpkg-build.sh $(call ls-files,debian/startos) $(STARTOS_TARGETS)
PLATFORM=$(PLATFORM) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh PLATFORM=$(PLATFORM) REQUIRES=debian ./build/os-compat/run-compat.sh ./dpkg-build.sh
registry-deb: results/$(REGISTRY_BASENAME).deb registry-deb: results/$(REGISTRY_BASENAME).deb
results/$(REGISTRY_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-registry) $(REGISTRY_TARGETS) results/$(REGISTRY_BASENAME).deb: dpkg-build.sh $(call ls-files,debian/start-registry) $(REGISTRY_TARGETS)
PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=ca-certificates ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian ./build/os-compat/run-compat.sh ./dpkg-build.sh
tunnel-deb: results/$(TUNNEL_BASENAME).deb tunnel-deb: results/$(TUNNEL_BASENAME).deb
results/$(TUNNEL_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-tunnel) $(TUNNEL_TARGETS) build/lib/scripts/forward-port results/$(TUNNEL_BASENAME).deb: dpkg-build.sh $(call ls-files,debian/start-tunnel) $(TUNNEL_TARGETS)
PROJECT=start-tunnel PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=wireguard-tools,iptables,conntrack ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh PROJECT=start-tunnel PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=wireguard-tools,iptables,conntrack ./build/os-compat/run-compat.sh ./dpkg-build.sh
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE) $(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
squashfs: results/$(BASENAME).squashfs squashfs: results/$(BASENAME).squashfs
results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_SRC) results/$(BASENAME).deb results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_SRC) results/$(BASENAME).deb
ARCH=$(ARCH) ./build/image-recipe/run-local-build.sh "results/$(BASENAME).deb" ./image-recipe/run-local-build.sh "results/$(BASENAME).deb"
# For creating os images. DO NOT USE # For creating os images. DO NOT USE
install: $(STARTOS_TARGETS) install: $(STARTOS_TARGETS)
@@ -181,21 +176,18 @@ install: $(STARTOS_TARGETS)
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox,$(DESTDIR)/usr/bin/startbox) $(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox,$(DESTDIR)/usr/bin/startbox)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,target/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \ if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph,$(DESTDIR)/usr/bin/flamegraph); \ $(call cp,cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph,$(DESTDIR)/usr/bin/flamegraph); \
fi fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]'; then \ if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]'; then \
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); \ $(call cp,cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); \
fi fi
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs) $(call cp,cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
$(call ln,/usr/bin/startos-backup-fs,$(DESTDIR)/usr/sbin/mount.backup-fs) $(call ln,/usr/bin/startos-backup-fs,$(DESTDIR)/usr/sbin/mount.backup-fs)
$(call mkdir,$(DESTDIR)/lib/systemd/system) $(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service) $(call cp,core/startos/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
sed -i '/^Environment=/a Environment=RUST_BACKTRACE=full' $(DESTDIR)/lib/systemd/system/startd.service; \
fi
$(call mkdir,$(DESTDIR)/usr/lib) $(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/startos) $(call rm,$(DESTDIR)/usr/lib/startos)
@@ -203,16 +195,18 @@ install: $(STARTOS_TARGETS)
$(call mkdir,$(DESTDIR)/usr/lib/startos/container-runtime) $(call mkdir,$(DESTDIR)/usr/lib/startos/container-runtime)
$(call cp,container-runtime/rootfs.$(ARCH).squashfs,$(DESTDIR)/usr/lib/startos/container-runtime/rootfs.squashfs) $(call cp,container-runtime/rootfs.$(ARCH).squashfs,$(DESTDIR)/usr/lib/startos/container-runtime/rootfs.squashfs)
$(call cp,build/env/PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt) $(call cp,PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
$(call cp,build/env/ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt) $(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
$(call cp,build/env/GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt) $(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
$(call cp,build/env/VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt) $(call cp,VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
$(call cp,firmware/$(PLATFORM),$(DESTDIR)/usr/lib/startos/firmware)
update-overlay: $(STARTOS_TARGETS) update-overlay: $(STARTOS_TARGETS)
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m" @echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m" @echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat $(VERSION_FILE)`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi @if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
$(call ssh,"sudo systemctl stop startd") $(call ssh,"sudo systemctl stop startd")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM) $(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
$(call ssh,"sudo systemctl start startd") $(call ssh,"sudo systemctl start startd")
@@ -249,16 +243,16 @@ update-startbox: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
update-deb: results/$(BASENAME).deb # better than update, but only available from debian update-deb: results/$(BASENAME).deb # better than update, but only available from debian
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create') $(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call mkdir,/media/startos/next/var/tmp/startos-deb) $(call mkdir,/media/startos/next/tmp/startos-deb)
$(call cp,results/$(BASENAME).deb,/media/startos/next/var/tmp/startos-deb/$(BASENAME).deb) $(call cp,results/$(BASENAME).deb,/media/startos/next/tmp/startos-deb/$(BASENAME).deb)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /var/tmp/startos-deb/$(BASENAME).deb"') $(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /tmp/startos-deb/$(BASENAME).deb"')
update-squashfs: results/$(BASENAME).squashfs update-squashfs: results/$(BASENAME).squashfs
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs | head -c 32)) $(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs))
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}')) $(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
$(call ssh,'sudo /usr/lib/startos/scripts/prune-images $(SQFS_SIZE)') $(call ssh,'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE)')
$(call ssh,'sudo /usr/lib/startos/scripts/prune-boot') $(call ssh,'/usr/lib/startos/scripts/prune-boot')
$(call cp,results/$(BASENAME).squashfs,/media/startos/images/next.rootfs) $(call cp,results/$(BASENAME).squashfs,/media/startos/images/next.rootfs)
$(call ssh,'sudo CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade /media/startos/images/next.rootfs') $(call ssh,'sudo CHECKSUM=$(SQFS_SUM) /usr/lib/startos/scripts/upgrade /media/startos/images/next.rootfs')
@@ -270,7 +264,7 @@ emulate-reflash: $(STARTOS_TARGETS)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"') $(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
upload-ota: results/$(BASENAME).squashfs upload-ota: results/$(BASENAME).squashfs
TARGET=$(TARGET) KEY=$(KEY) ./build/upload-ota.sh TARGET=$(TARGET) KEY=$(KEY) ./upload-ota.sh
container-runtime/debian.$(ARCH).squashfs: ./container-runtime/download-base-image.sh container-runtime/debian.$(ARCH).squashfs: ./container-runtime/download-base-image.sh
ARCH=$(ARCH) ./container-runtime/download-base-image.sh ARCH=$(ARCH) ./container-runtime/download-base-image.sh
@@ -283,20 +277,17 @@ container-runtime/node_modules/.package-lock.json: container-runtime/package-loc
npm --prefix container-runtime ci npm --prefix container-runtime ci
touch container-runtime/node_modules/.package-lock.json touch container-runtime/node_modules/.package-lock.json
ts-bindings: core/bindings/index.ts sdk/base/lib/osBindings/index.ts: $(shell if [ "$(REBUILD_TYPES)" -ne 0 ]; then echo core/startos/bindings/index.ts; fi)
mkdir -p sdk/base/lib/osBindings mkdir -p sdk/base/lib/osBindings
rsync -ac --delete core/bindings/ sdk/base/lib/osBindings/ rsync -ac --delete core/startos/bindings/ sdk/base/lib/osBindings/
touch sdk/base/lib/osBindings/index.ts
core/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE) core/startos/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
rm -rf core/bindings rm -rf core/startos/bindings
./core/build/build-ts.sh ./core/build-ts.sh
ls core/bindings/*.ts | sed 's/core\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/bindings/index.ts ls core/startos/bindings/*.ts | sed 's/core\/startos\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/startos/bindings/index.ts
if [ -d core/bindings/tunnel ]; then \ npm --prefix sdk exec -- prettier --config ./sdk/base/package.json -w ./core/startos/bindings/*.ts
ls core/bindings/tunnel/*.ts | sed 's/core\/bindings\/tunnel\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' > core/bindings/tunnel/index.ts; \ touch core/startos/bindings/index.ts
echo 'export * as Tunnel from "./tunnel";' >> core/bindings/index.ts; \
fi
npm --prefix sdk/base exec -- prettier --config=./sdk/base/package.json -w './core/bindings/**/*.ts'
touch core/bindings/index.ts
sdk/dist/package.json sdk/baseDist/package.json: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts sdk/dist/package.json sdk/baseDist/package.json: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
(cd sdk && make bundle) (cd sdk && make bundle)
@@ -311,22 +302,22 @@ container-runtime/dist/node_modules/.package-lock.json container-runtime/dist/pa
./container-runtime/install-dist-deps.sh ./container-runtime/install-dist-deps.sh
touch container-runtime/dist/node_modules/.package-lock.json touch container-runtime/dist/node_modules/.package-lock.json
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/update-image-local.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules/.package-lock.json core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules/.package-lock.json core/target/$(RUST_ARCH)-unknown-linux-musl/release/containerbox
ARCH=$(ARCH) ./container-runtime/update-image-local.sh ARCH=$(ARCH) REQUIRES=linux ./build/os-compat/run-compat.sh ./container-runtime/update-image.sh
build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(shell ls build/dpkg-deps/*) build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(shell ls build/dpkg-deps/*)
PLATFORM=$(PLATFORM) ARCH=$(ARCH) build/dpkg-deps/generate.sh PLATFORM=$(PLATFORM) ARCH=$(ARCH) build/dpkg-deps/generate.sh
$(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE) $(FIRMWARE_ROMS): build/lib/firmware.json download-firmware.sh $(PLATFORM_FILE)
./build/download-firmware.sh $(PLATFORM) ./download-firmware.sh $(PLATFORM)
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE) core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build-startbox.sh
touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container: $(CORE_SRC) $(ENVIRONMENT_FILE) core/target/$(RUST_ARCH)-unknown-linux-musl/release/containerbox: $(CORE_SRC) $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build/build-start-container.sh ARCH=$(ARCH) ./core/build-containerbox.sh
touch core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container touch core/target/$(RUST_ARCH)-unknown-linux-musl/release/containerbox
web/package-lock.json: web/package.json sdk/baseDist/package.json web/package-lock.json: web/package.json sdk/baseDist/package.json
npm --prefix web i npm --prefix web i
@@ -341,27 +332,27 @@ web/.angular/.updated: patch-db/client/dist/index.js sdk/baseDist/package.json w
mkdir -p web/.angular mkdir -p web/.angular
touch web/.angular/.updated touch web/.angular/.updated
web/.i18n-checked: $(WEB_SHARED_SRC) $(WEB_UI_SRC) $(WEB_SETUP_WIZARD_SRC) $(WEB_START_TUNNEL_SRC) web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
npm --prefix web run check:i18n
touch web/.i18n-checked
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
npm --prefix web run build:ui npm --prefix web run build:ui
touch web/dist/raw/ui/index.html touch web/dist/raw/ui/index.html
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
npm --prefix web run build:setup npm --prefix web run build:setup
touch web/dist/raw/setup-wizard/index.html touch web/dist/raw/setup-wizard/index.html
web/dist/raw/start-tunnel/index.html: $(WEB_START_TUNNEL_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked web/dist/raw/install-wizard/index.html: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
npm --prefix web run build:install
touch web/dist/raw/install-wizard/index.html
web/dist/raw/start-tunnel/index.html: $(WEB_START_TUNNEL_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
npm --prefix web run build:tunnel npm --prefix web run build:tunnel
touch web/dist/raw/start-tunnel/index.html touch web/dist/raw/start-tunnel/index.html
web/dist/static/%/index.html: web/dist/raw/%/index.html web/dist/static/%/index.html: web/dist/raw/%/index.html
./web/compress-uis.sh $* ./compress-uis.sh $*
web/config.json: $(GIT_HASH_FILE) $(ENVIRONMENT_FILE) web/config-sample.json web/update-config.sh web/config.json: $(GIT_HASH_FILE) web/config-sample.json
./web/update-config.sh jq '.useMocks = false' web/config-sample.json | jq '.gitHash = "$(shell cat GIT_HASH.txt)"' > web/config.json
patch-db/client/node_modules/.package-lock.json: patch-db/client/package.json patch-db/client/node_modules/.package-lock.json: patch-db/client/package.json
npm --prefix patch-db/client ci npm --prefix patch-db/client ci
@@ -382,17 +373,14 @@ uis: $(WEB_UIS)
# this is a convenience step to build the UI # this is a convenience step to build the UI
ui: web/dist/raw/ui ui: web/dist/raw/ui
target/aarch64-unknown-linux-musl/release/pi-beep: ./build/build-cargo-dep.sh cargo-deps/aarch64-unknown-linux-musl/release/pi-beep:
ARCH=aarch64 ./build/build-cargo-dep.sh pi-beep ARCH=aarch64 ./build-cargo-dep.sh pi-beep
target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console: ./build/build-cargo-dep.sh cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console:
ARCH=$(ARCH) ./build/build-cargo-dep.sh tokio-console ARCH=$(ARCH) PREINSTALL="apk add musl-dev pkgconfig" ./build-cargo-dep.sh tokio-console
touch $@
target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs: ./build/build-cargo-dep.sh cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs:
ARCH=$(ARCH) ./build/build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs ARCH=$(ARCH) PREINSTALL="apk add fuse3 fuse3-dev fuse3-static musl-dev pkgconfig" ./build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs
touch $@
target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph: ./build/build-cargo-dep.sh cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph:
ARCH=$(ARCH) ./build/build-cargo-dep.sh flamegraph ARCH=$(ARCH) PREINSTALL="apk add musl-dev pkgconfig" ./build-cargo-dep.sh flamegraph
touch $@

View File

@@ -7,64 +7,76 @@
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml"> <a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg"> <img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
</a> </a>
<a href="https://heyapollo.com/product/startos"> <a href="https://heyapollo.com/product/startos">
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue"> <img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
</a> </a>
<a href="https://twitter.com/start9labs"> <a href="https://twitter.com/start9labs">
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs"> <img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
</a> </a>
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
</a>
<a href="https://t.me/start9_labs">
<img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
</a>
<a href="https://docs.start9.com"> <a href="https://docs.start9.com">
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support"> <img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
</a> </a>
<a href="https://matrix.to/#/#dev-startos:matrix.start9labs.com"> <a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix"> <img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
</a> </a>
<a href="https://start9.com"> <a href="https://start9.com">
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website"> <img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
</a> </a>
</div> </div>
<br />
<div align="center">
<h3>
Welcome to the era of Sovereign Computing
</h3>
<p>
StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
</p>
</div>
<br />
<p align="center">
<img src="assets/StartOS.png" alt="StartOS" width="85%">
</p>
<br />
## What is StartOS? ## Running StartOS
> [!WARNING]
> StartOS is in beta. It lacks features. It doesn't always work perfectly. Start9 servers are not plug and play. Using them properly requires some effort and patience. Please do not use StartOS or purchase a server if you are unable or unwilling to follow instructions and learn new concepts.
StartOS is an open-source Linux distribution for running a personal server. It handles discovery, installation, network configuration, data backup, dependency management, and health monitoring of self-hosted services. ### 💰 Buy a Start9 server
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
**Tech stack:** Rust backend (Tokio/Axum), Angular frontend, Node.js container runtime with LXC, and a custom diff-based database ([Patch-DB](https://github.com/Start9Labs/patch-db)) for reactive state synchronization. ### 👷 Build your own server
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have hardware
1. You want to save on shipping costs
1. You prefer not to divulge your physical address
1. You just like building things
Services run in isolated LXC containers, packaged as [S9PKs](https://github.com/Start9Labs/start-os/blob/master/core/s9pk-structure.md) — a signed, merkle-archived format that supports partial downloads and cryptographic verification. To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
## What can you do with it? ## ❤️ Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
StartOS lets you self-host services that would otherwise depend on third-party cloud providers — giving you full ownership of your data and infrastructure. To report security issues, please email our security team - security@start9.com.
Browse available services on the [Start9 Marketplace](https://marketplace.start9.com/), including: ## 🌎 Marketplace
There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
- **Bitcoin & Lightning** — Run a full Bitcoin node, Lightning node, BTCPay Server, and other payment infrastructure ## 🖥️ User Interface Screenshots
- **Communication** — Self-host Matrix, SimpleX, or other messaging platforms
- **Cloud Storage** — Run Nextcloud, Vaultwarden, and other productivity tools
Services are added by the community. If a service you want isn't available, you can [package it yourself](https://github.com/Start9Labs/ai-service-packaging/). <p align="center">
<img src="assets/registry.png" alt="StartOS Marketplace" width="49%">
## Getting StartOS <img src="assets/community.png" alt="StartOS Community Registry" width="49%">
<img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%">
### Buy a Start9 server <img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%">
<img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%">
The easiest path. [Buy a server](https://store.start9.com) from Start9 and plug it in. <img src="assets/system.png" alt="StartOS System Settings" width="49%">
<img src="assets/welcome.png" alt="StartOS System Settings" width="49%">
### Build your own <img src="assets/logs.png" alt="StartOS System Settings" width="49%">
</p>
Follow the [install guide](https://docs.start9.com/start-os/installing.html) to install StartOS on your own hardware. . Reasons to go this route:
1. You already have compatible hardware
2. You want to save on shipping costs
3. You prefer not to share your physical address
4. You enjoy building things
### Build from source
See [CONTRIBUTING.md](CONTRIBUTING.md) for environment setup, build instructions, and development workflow.
## Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. See [CONTRIBUTING.md](CONTRIBUTING.md) or visit [start9.com/contribute](https://start9.com/contribute/).
To report security issues, email [security@start9.com](mailto:security@start9.com).

63
START-TUNNEL.md Normal file
View File

@@ -0,0 +1,63 @@
# StartTunnel
A self-hosted Wiregaurd VPN optimized for creating VLANs and reverse tunneling to personal servers.
You can think of StartTunnel as "virtual router in the cloud"
Use it for private, remote access, to self-hosted services running on a personal server, or to expose self-hosted services to the public Internet without revealing the host server's IP address.
## Features
- **Create Subnets**: Each subnet creates a private, virtual local area network (VLAN), similar to the LAN created by a home router.
- **Add Devices**: When you add a device (server, phone, laptop) to a subnet, it receives a LAN IP address on that subnet as well as a unique Wireguard config that must be copied, downloaded, or scanned into the device.
- **Forward Ports**: Forwarding a port creates a "reverse tunnel", exposing a specific port on a specific device to the public Internet.
## Installation
1. Rent a low cost VPS. For most use cases, the cheapest option should be enough.
- It must have a dedicated public IP address.
- For (CPU), memory (RAM), and storage (disk), choose the minimum spec.
- For transfer (bandwidth), it depends on (1) your use case and (2) your home Internet's _upload_ speed. Even if you intend to serve large files or stream content from your server, there is no reason to pay for speeds that exceed your home Internet's upload speed.
1. Provision the VPS with the latest version of Debian.
1. Access the VPS via SSH.
1. Install StartTunnel:
```sh
TMP_DIR=$(mktemp -d) && (cd $TMP_DIR && wget https://github.com/Start9Labs/start-os/releases/download/v0.4.0-alpha.12/start-tunnel-0.4.0-alpha.12-68f401b_$(uname -m).deb && apt-get install -y ./start-tunnel-0.4.0-alpha.12-68f401b_$(uname -m).deb) && rm -rf $TMP_DIR && systemctl start start-tunneld && echo "Installation Succeeded"
```
5. [Initialize the web interface](#web-interface) (recommended)
## CLI
By default, StartTunnel is managed via the `start-tunnel` command line interface, which is self-documented.
```
start-tunnel --help
```
## Web Interface
If you choose to enable the web interface (recommended in most cases), StartTunnel can be accessed as a website from the browser, or programmatically via API.
1. Initialize the web interface.
start-tunnel web init
1. When prompted, select the IP address at which to host the web interface. In many cases, there will be only one IP address.
1. When prompted, enter the port at which to host the web interface. The default is 8443, and we recommend using it. If you change the default, choose an uncommon port to avoid conflicts.
1. Select whether to autogenerate a self-signed certificate or provide your own certificate and key. If you choose to autogenerate, you will be asked to list all IP addresses and domains for which to sign the certificate. For example, if you intend to access your StartTunnel web UI at a domain, include the domain in the list.
1. You will receive a success message with 3 pieces of information:
- <https://IP:port>: the URL where you can reach your personal web interface.
- Password: an autogenerated password for your interface. If you lose/forget it, you can reset using the CLI.
- Root Certificate Authority: the Root CA of your StartTunnel instance. If not already, trust it in your browser or system keychain.

Binary file not shown.

View File

@@ -1 +0,0 @@
deb [arch=amd64,arm64,riscv64 signed-by=/usr/share/keyrings/start9.gpg] https://start9-debs.nyc3.cdn.digitaloceanspaces.com stable main

BIN
assets/StartOS.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 MiB

BIN
assets/btcpay.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 396 KiB

BIN
assets/c-lightning.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

BIN
assets/community.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 591 KiB

BIN
assets/logs.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

BIN
assets/nextcloud.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 319 KiB

BIN
assets/registry.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 521 KiB

BIN
assets/system.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 331 KiB

BIN
assets/welcome.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

34
build-cargo-dep.sh Executable file
View File

@@ -0,0 +1,34 @@
#!/bin/bash
set -e
shopt -s expand_aliases
if [ "$0" != "./build-cargo-dep.sh" ]; then
>&2 echo "Must be run from start-os directory"
exit 1
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
DOCKER_PLATFORM="linux/${ARCH}"
if [ "$ARCH" = aarch64 ] || [ "$ARCH" = arm64 ]; then
DOCKER_PLATFORM="linux/arm64"
elif [ "$ARCH" = x86_64 ]; then
DOCKER_PLATFORM="linux/amd64"
fi
mkdir -p cargo-deps
alias 'rust-musl-builder'='docker run $USE_TTY --platform=${DOCKER_PLATFORM} --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)"/cargo-deps:/home/rust/src -w /home/rust/src -P rust:alpine'
PREINSTALL=${PREINSTALL:-true}
rust-musl-builder sh -c "$PREINSTALL && cargo install $* --target-dir /home/rust/src --target=$ARCH-unknown-linux-musl"
sudo chown -R $USER cargo-deps
sudo chown -R $USER ~/.cargo

View File

View File

@@ -1,139 +0,0 @@
#!/bin/bash
#
# Publish .deb files to an S3-hosted apt repository.
#
# Usage: publish-deb.sh <deb-file-or-directory> [<deb-file-or-directory> ...]
#
# Environment variables:
# GPG_PRIVATE_KEY - Armored GPG private key (imported if set)
# GPG_KEY_ID - GPG key ID for signing
# S3_ACCESS_KEY - S3 access key
# S3_SECRET_KEY - S3 secret key
# S3_ENDPOINT - S3 endpoint (default: https://nyc3.digitaloceanspaces.com)
# S3_BUCKET - S3 bucket name (default: start9-debs)
# SUITE - Apt suite name (default: stable)
# COMPONENT - Apt component name (default: main)
set -e
if [ $# -eq 0 ]; then
echo "Usage: $0 <deb-file-or-directory> [...]" >&2
exit 1
fi
BUCKET="${S3_BUCKET:-start9-debs}"
ENDPOINT="${S3_ENDPOINT:-https://nyc3.digitaloceanspaces.com}"
GPG_KEY_ID="${GPG_KEY_ID:-5259ADFC2D63C217}"
SUITE="${SUITE:-stable}"
COMPONENT="${COMPONENT:-main}"
REPO_DIR="$(mktemp -d)"
cleanup() {
rm -rf "$REPO_DIR"
}
trap cleanup EXIT
# Import GPG key if provided
if [ -n "$GPG_PRIVATE_KEY" ]; then
echo "$GPG_PRIVATE_KEY" | gpg --batch --import 2>/dev/null
fi
# Configure s3cmd
if [ -n "$S3_ACCESS_KEY" ] && [ -n "$S3_SECRET_KEY" ]; then
S3CMD_CONFIG="$(mktemp)"
cat > "$S3CMD_CONFIG" <<EOF
[default]
access_key = ${S3_ACCESS_KEY}
secret_key = ${S3_SECRET_KEY}
host_base = $(echo "$ENDPOINT" | sed 's|https://||')
host_bucket = %(bucket)s.$(echo "$ENDPOINT" | sed 's|https://||')
use_https = True
EOF
s3() {
s3cmd -c "$S3CMD_CONFIG" "$@"
}
else
# Fall back to default ~/.s3cfg
S3CMD_CONFIG=""
s3() {
s3cmd "$@"
}
fi
# Sync existing repo from S3
echo "Syncing existing repo from s3://${BUCKET}/ ..."
s3 sync --no-mime-magic "s3://${BUCKET}/" "$REPO_DIR/" 2>/dev/null || true
# Collect all .deb files from arguments
DEB_FILES=()
for arg in "$@"; do
if [ -d "$arg" ]; then
while IFS= read -r -d '' f; do
DEB_FILES+=("$f")
done < <(find "$arg" -name '*.deb' -print0)
elif [ -f "$arg" ]; then
DEB_FILES+=("$arg")
else
echo "Warning: $arg is not a file or directory, skipping" >&2
fi
done
if [ ${#DEB_FILES[@]} -eq 0 ]; then
echo "No .deb files found" >&2
exit 1
fi
# Copy each deb to the pool, renaming to standard format
for deb in "${DEB_FILES[@]}"; do
PKG_NAME="$(dpkg-deb --field "$deb" Package)"
POOL_DIR="$REPO_DIR/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}"
mkdir -p "$POOL_DIR"
cp "$deb" "$POOL_DIR/"
dpkg-name -o "$POOL_DIR/$(basename "$deb")" 2>/dev/null || true
echo "Added: $(basename "$deb") -> pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
done
# Generate Packages indices for each architecture
for arch in amd64 arm64 riscv64; do
BINARY_DIR="$REPO_DIR/dists/${SUITE}/${COMPONENT}/binary-${arch}"
mkdir -p "$BINARY_DIR"
(
cd "$REPO_DIR"
dpkg-scanpackages --multiversion --arch "$arch" pool/ > "$BINARY_DIR/Packages"
gzip -k -f "$BINARY_DIR/Packages"
)
echo "Generated Packages index for ${arch}"
done
# Generate Release file
(
cd "$REPO_DIR/dists/${SUITE}"
apt-ftparchive release \
-o "APT::FTPArchive::Release::Origin=Start9" \
-o "APT::FTPArchive::Release::Label=Start9" \
-o "APT::FTPArchive::Release::Suite=${SUITE}" \
-o "APT::FTPArchive::Release::Codename=${SUITE}" \
-o "APT::FTPArchive::Release::Architectures=amd64 arm64 riscv64" \
-o "APT::FTPArchive::Release::Components=${COMPONENT}" \
. > Release
)
echo "Generated Release file"
# Sign if GPG key is available
if [ -n "$GPG_KEY_ID" ]; then
(
cd "$REPO_DIR/dists/${SUITE}"
gpg --default-key "$GPG_KEY_ID" --batch --yes --detach-sign -o Release.gpg Release
gpg --default-key "$GPG_KEY_ID" --batch --yes --clearsign -o InRelease Release
)
echo "Signed Release file with key ${GPG_KEY_ID}"
else
echo "Warning: GPG_KEY_ID not set, Release file is unsigned" >&2
fi
# Upload to S3
echo "Uploading to s3://${BUCKET}/ ..."
s3 sync --acl-public --no-mime-magic "$REPO_DIR/" "s3://${BUCKET}/"
[ -n "$S3CMD_CONFIG" ] && rm -f "$S3CMD_CONFIG"
echo "Done."

View File

@@ -1,26 +0,0 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")/.."
set -e
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
RUST_ARCH="$ARCH"
if [ "$ARCH" = "riscv64" ]; then
RUST_ARCH="riscv64gc"
fi
mkdir -p target
source core/build/builder-alias.sh
RUSTFLAGS="-C target-feature=+crt-static"
rust-zig-builder cargo-zigbuild install $* --target-dir /workdir/target/ --target=$RUST_ARCH-unknown-linux-musl
if [ "$(ls -nd "target/$RUST_ARCH-unknown-linux-musl/release/${!#}" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID target && chown -R $UID:$UID /usr/local/cargo"
fi

View File

@@ -3,7 +3,6 @@ avahi-utils
b3sum b3sum
bash-completion bash-completion
beep beep
binfmt-support
bmon bmon
btrfs-progs btrfs-progs
ca-certificates ca-certificates
@@ -11,13 +10,11 @@ cifs-utils
conntrack conntrack
cryptsetup cryptsetup
curl curl
dkms
dmidecode dmidecode
dnsutils dnsutils
dosfstools dosfstools
e2fsprogs e2fsprogs
ecryptfs-utils ecryptfs-utils
equivs
exfatprogs exfatprogs
flashrom flashrom
fuse3 fuse3
@@ -37,7 +34,6 @@ lvm2
lxc lxc
magic-wormhole magic-wormhole
man-db man-db
mokutil
ncdu ncdu
net-tools net-tools
network-manager network-manager
@@ -48,7 +44,6 @@ openssh-server
podman podman
psmisc psmisc
qemu-guest-agent qemu-guest-agent
qemu-user-static
rfkill rfkill
rsync rsync
samba-common-bin samba-common-bin
@@ -57,7 +52,6 @@ socat
sqlite3 sqlite3
squashfs-tools squashfs-tools
squashfs-tools-ng squashfs-tools-ng
ssl-cert
sudo sudo
systemd systemd
systemd-resolved systemd-resolved

View File

@@ -1 +0,0 @@
+ nmap

View File

@@ -9,13 +9,6 @@ FEATURES+=("${ARCH}")
if [ "$ARCH" != "$PLATFORM" ]; then if [ "$ARCH" != "$PLATFORM" ]; then
FEATURES+=("${PLATFORM}") FEATURES+=("${PLATFORM}")
fi fi
if [[ "$PLATFORM" =~ -nonfree$ ]]; then
FEATURES+=("nonfree")
fi
if [[ "$PLATFORM" =~ -nvidia$ ]]; then
FEATURES+=("nonfree")
FEATURES+=("nvidia")
fi
feature_file_checker=' feature_file_checker='
/^#/ { next } /^#/ { next }

View File

@@ -1,7 +0,0 @@
+ firmware-amd-graphics
+ firmware-atheros
+ firmware-brcm80211
+ firmware-iwlwifi
+ firmware-libertas
+ firmware-misc-nonfree
+ firmware-realtek

View File

@@ -1 +0,0 @@
+ nvidia-container-toolkit

View File

@@ -1,6 +1,6 @@
+ gdisk - grub-common
- grub-efi
+ parted + parted
+ u-boot-rpi
+ raspberrypi-net-mods + raspberrypi-net-mods
+ raspberrypi-sys-mods + raspberrypi-sys-mods
+ raspi-config + raspi-config

View File

@@ -1,550 +0,0 @@
#!/bin/bash
set -e
echo "==== StartOS Image Build ===="
echo "Building for architecture: $IB_TARGET_ARCH"
SOURCE_DIR="$(realpath $(dirname "${BASH_SOURCE[0]}"))"
base_dir="$(pwd -P)"
prep_results_dir="$base_dir/images-prep"
RESULTS_DIR="$base_dir/results"
echo "Saving results in: $RESULTS_DIR"
DEB_PATH="$base_dir/$1"
VERSION="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/VERSION.txt)"
GIT_HASH="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/GIT_HASH.txt)"
if [[ "$GIT_HASH" =~ ^@ ]]; then
GIT_HASH="unknown"
else
GIT_HASH="$(echo -n "$GIT_HASH" | head -c 7)"
fi
IB_OS_ENV="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/ENVIRONMENT.txt)"
IB_TARGET_PLATFORM="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/PLATFORM.txt)"
VERSION_FULL="${VERSION}-${GIT_HASH}"
if [ -n "$IB_OS_ENV" ]; then
VERSION_FULL="$VERSION_FULL~${IB_OS_ENV}"
fi
IMAGE_BASENAME=startos-${VERSION_FULL}_${IB_TARGET_PLATFORM}
BOOTLOADERS=grub-efi
if [ "$IB_TARGET_PLATFORM" = "x86_64" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nvidia" ]; then
IB_TARGET_ARCH=amd64
QEMU_ARCH=x86_64
BOOTLOADERS=grub-efi,syslinux
elif [ "$IB_TARGET_PLATFORM" = "aarch64" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nvidia" ] || [ "$IB_TARGET_PLATFORM" = "raspberrypi" ] || [ "$IB_TARGET_PLATFORM" = "rockchip64" ]; then
IB_TARGET_ARCH=arm64
QEMU_ARCH=aarch64
elif [ "$IB_TARGET_PLATFORM" = "riscv64" ] || [ "$IB_TARGET_PLATFORM" = "riscv64-nonfree" ]; then
IB_TARGET_ARCH=riscv64
QEMU_ARCH=riscv64
else
IB_TARGET_ARCH="$IB_TARGET_PLATFORM"
QEMU_ARCH="$IB_TARGET_PLATFORM"
fi
QEMU_ARGS=()
if [ "$QEMU_ARCH" != $(uname -m) ]; then
QEMU_ARGS+=(--bootstrap-qemu-arch ${IB_TARGET_ARCH})
QEMU_ARGS+=(--bootstrap-qemu-static /usr/bin/qemu-${QEMU_ARCH}-static)
fi
mkdir -p $prep_results_dir
cd $prep_results_dir
NON_FREE=
if [[ "${IB_TARGET_PLATFORM}" =~ -nonfree$ ]] || [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]] || [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
NON_FREE=1
fi
NVIDIA=
if [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]]; then
NVIDIA=1
fi
IMAGE_TYPE=iso
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ] || [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
IMAGE_TYPE=img
fi
ARCHIVE_AREAS="main contrib"
if [ "$NON_FREE" = 1 ]; then
if [ "$IB_SUITE" = "bullseye" ]; then
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free"
else
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free non-free-firmware"
fi
fi
PLATFORM_CONFIG_EXTRAS=()
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
PLATFORM_CONFIG_EXTRAS+=( --firmware-binary false )
PLATFORM_CONFIG_EXTRAS+=( --firmware-chroot false )
RPI_KERNEL_VERSION=6.12.47+rpt
PLATFORM_CONFIG_EXTRAS+=( --linux-packages linux-image-$RPI_KERNEL_VERSION )
PLATFORM_CONFIG_EXTRAS+=( --linux-flavours "rpi-v8 rpi-2712" )
elif [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
PLATFORM_CONFIG_EXTRAS+=( --linux-flavours rockchip64 )
elif [ "${IB_TARGET_ARCH}" = "riscv64" ]; then
PLATFORM_CONFIG_EXTRAS+=( --uefi-secure-boot=disable )
fi
cat > /etc/wgetrc << EOF
retry_connrefused = on
tries = 100
EOF
lb config \
--iso-application "StartOS v${VERSION_FULL} ${IB_TARGET_ARCH}" \
--iso-volume "StartOS v${VERSION} ${IB_TARGET_ARCH}" \
--iso-preparer "START9 LABS; HTTPS://START9.COM" \
--iso-publisher "START9 LABS; HTTPS://START9.COM" \
--backports true \
--bootappend-live "boot=live noautologin console=tty0" \
--bootloaders $BOOTLOADERS \
--cache false \
--mirror-bootstrap "https://deb.debian.org/debian/" \
--mirror-chroot "https://deb.debian.org/debian/" \
--mirror-chroot-security "https://security.debian.org/debian-security" \
-d ${IB_SUITE} \
-a ${IB_TARGET_ARCH} \
${QEMU_ARGS[@]} \
--archive-areas "${ARCHIVE_AREAS}" \
${PLATFORM_CONFIG_EXTRAS[@]}
# Overlays
mkdir -p config/packages.chroot/
cp $RESULTS_DIR/$IMAGE_BASENAME.deb config/packages.chroot/
dpkg-name config/packages.chroot/*.deb
mkdir -p config/includes.chroot/etc
echo start > config/includes.chroot/etc/hostname
cat > config/includes.chroot/etc/hosts << EOT
127.0.0.1 localhost start
::1 localhost start ip6-localhost ip6-loopback
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters
EOT
if [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
mkdir -p config/includes.chroot/etc/ssh/sshd_config.d
echo "PasswordAuthentication yes" > config/includes.chroot/etc/ssh/sshd_config.d/dev-password-auth.conf
fi
# Installer marker file (used by installed GRUB to detect the live USB)
mkdir -p config/includes.binary
touch config/includes.binary/.startos-installer
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
mkdir -p config/includes.chroot
git clone --depth=1 --branch=stable https://github.com/raspberrypi/rpi-firmware.git config/includes.chroot/boot
rm -rf config/includes.chroot/boot/.git config/includes.chroot/boot/modules
rsync -rLp $SOURCE_DIR/raspberrypi/squashfs/ config/includes.chroot/
fi
# Bootloaders
rm -rf config/bootloaders
cp -r /usr/share/live/build/bootloaders config/bootloaders
cat > config/bootloaders/syslinux/syslinux.cfg << EOF
include menu.cfg
default vesamenu.c32
prompt 0
timeout 50
EOF
cat > config/bootloaders/isolinux/isolinux.cfg << EOF
include menu.cfg
default vesamenu.c32
prompt 0
timeout 50
EOF
# Extract splash.png from the deb package
dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xf - ./usr/lib/startos/splash.png > /tmp/splash.png
cp /tmp/splash.png config/bootloaders/syslinux_common/splash.png
cp /tmp/splash.png config/bootloaders/isolinux/splash.png
cp /tmp/splash.png config/bootloaders/grub-pc/splash.png
rm /tmp/splash.png
sed -i -e '2i set timeout=5' config/bootloaders/grub-pc/config.cfg
# Archives
mkdir -p config/archives
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
# Fetch the keyring package (not the old raspberrypi.gpg.key, which has
# SHA1-only binding signatures that sqv on Trixie rejects).
KEYRING_DEB=$(mktemp)
curl -fsSL -o "$KEYRING_DEB" https://archive.raspberrypi.com/debian/pool/main/r/raspberrypi-archive-keyring/raspberrypi-archive-keyring_2025.1+rpt1_all.deb
dpkg-deb -x "$KEYRING_DEB" "$KEYRING_DEB.d"
cp "$KEYRING_DEB.d/usr/share/keyrings/raspberrypi-archive-keyring.gpg" config/archives/raspi.key
rm -rf "$KEYRING_DEB" "$KEYRING_DEB.d"
echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/raspi.key.gpg] https://archive.raspberrypi.com/debian/ ${IB_SUITE} main" > config/archives/raspi.list
fi
if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
curl -fsSL https://apt.armbian.com/armbian.key | gpg --dearmor -o config/archives/armbian.key
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
fi
if [ "$NVIDIA" = 1 ]; then
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o config/archives/nvidia-container-toolkit.key
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
| sed 's#deb https://#deb [signed-by=/etc/apt/trusted.gpg.d/nvidia-container-toolkit.key.gpg] https://#g' \
> config/archives/nvidia-container-toolkit.list
fi
cat > config/archives/backports.pref <<-EOF
Package: linux-image-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: linux-headers-*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
Package: *nvidia*
Pin: release n=${IB_SUITE}-backports
Pin-Priority: 500
EOF
# Hooks
cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF
#!/bin/bash
set -e
if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
/usr/lib/startos/scripts/enable-kiosk
fi
if [ "${NVIDIA}" = "1" ]; then
# install a specific NVIDIA driver version
# ---------------- configuration ----------------
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.126.09}"
BASE_URL="https://download.nvidia.com/XFree86/Linux-${QEMU_ARCH}"
echo "[nvidia-hook] Using NVIDIA driver: \${NVIDIA_DRIVER_VERSION}" >&2
# ---------------- kernel version ----------------
# Determine target kernel version from newest /boot/vmlinuz-* in the chroot.
KVER="\$(
ls -1t /boot/vmlinuz-* 2>/dev/null \
| head -n1 \
| sed 's|.*/vmlinuz-||'
)"
if [ -z "\${KVER}" ]; then
echo "[nvidia-hook] ERROR: no /boot/vmlinuz-* found; cannot determine kernel version" >&2
exit 1
fi
echo "[nvidia-hook] Target kernel version: \${KVER}" >&2
# Ensure kernel headers are present
TEMP_APT_DEPS=(build-essential pkg-config)
if [ ! -e "/lib/modules/\${KVER}/build" ]; then
TEMP_APT_DEPS+=(linux-headers-\${KVER})
fi
echo "[nvidia-hook] Installing build dependencies" >&2
/usr/lib/startos/scripts/install-equivs <<-EOF
Package: nvidia-depends
Version: \${NVIDIA_DRIVER_VERSION}
Section: unknown
Priority: optional
Depends: \${dep_list="\$(IFS=', '; echo "\${TEMP_APT_DEPS[*]}")"}
EOF
# ---------------- download and run installer ----------------
RUN_NAME="NVIDIA-Linux-${QEMU_ARCH}-\${NVIDIA_DRIVER_VERSION}.run"
RUN_PATH="/root/\${RUN_NAME}"
RUN_URL="\${BASE_URL}/\${NVIDIA_DRIVER_VERSION}/\${RUN_NAME}"
echo "[nvidia-hook] Downloading \${RUN_URL}" >&2
wget -O "\${RUN_PATH}" "\${RUN_URL}"
chmod +x "\${RUN_PATH}"
echo "[nvidia-hook] Running NVIDIA installer for kernel \${KVER}" >&2
if ! sh "\${RUN_PATH}" \
--silent \
--kernel-name="\${KVER}" \
--no-x-check \
--no-nouveau-check \
--no-runlevel-check; then
cat /var/log/nvidia-installer.log
exit 1
fi
# Rebuild module metadata
echo "[nvidia-hook] Running depmod for \${KVER}" >&2
depmod -a "\${KVER}"
echo "[nvidia-hook] NVIDIA \${NVIDIA_DRIVER_VERSION} installation complete for kernel \${KVER}" >&2
echo "[nvidia-hook] Removing .run installer..." >&2
rm -f "\${RUN_PATH}"
echo "[nvidia-hook] Blacklisting nouveau..." >&2
echo "blacklist nouveau" > /etc/modprobe.d/blacklist-nouveau.conf
echo "options nouveau modeset=0" >> /etc/modprobe.d/blacklist-nouveau.conf
echo "[nvidia-hook] Rebuilding initramfs..." >&2
update-initramfs -u -k "\${KVER}"
echo "[nvidia-hook] Removing build dependencies..." >&2
apt-get purge -y nvidia-depends
apt-get autoremove -y
echo "[nvidia-hook] Removed build dependencies." >&2
fi
# Install linux-kbuild for sign-file (Secure Boot module signing)
KVER_ALL="\$(ls -1t /boot/vmlinuz-* 2>/dev/null | head -n1 | sed 's|.*/vmlinuz-||')"
if [ -n "\${KVER_ALL}" ]; then
KBUILD_VER="\$(echo "\${KVER_ALL}" | grep -oP '^\d+\.\d+')"
if [ -n "\${KBUILD_VER}" ]; then
echo "[build] Installing linux-kbuild-\${KBUILD_VER} for Secure Boot support" >&2
apt-get install -y "linux-kbuild-\${KBUILD_VER}" || echo "[build] WARNING: linux-kbuild-\${KBUILD_VER} not available" >&2
fi
fi
cp /etc/resolv.conf /etc/resolv.conf.bak
if [ "${IB_SUITE}" = trixie ] && [ "${IB_TARGET_ARCH}" != riscv64 ]; then
echo 'deb https://deb.debian.org/debian/ bookworm main' > /etc/apt/sources.list.d/bookworm.list
apt-get update
apt-get install -y postgresql-15
rm /etc/apt/sources.list.d/bookworm.list
apt-get update
systemctl mask postgresql
fi
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
sh /boot/firmware/config.sh > /boot/firmware/config.txt
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
cp /usr/lib/u-boot/rpi_arm64/u-boot.bin /boot/firmware/u-boot.bin
fi
useradd --shell /bin/bash -G startos -m start9
echo start9:embassy | chpasswd
usermod -aG sudo start9
usermod -aG systemd-journal start9
echo "start9 ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/010_start9-nopasswd"
if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
passwd -l start9
fi
mkdir -p /media/startos
chmod 750 /media/startos
chown root:startos /media/startos
start-cli --registry=https://alpha-registry-x.start9.com registry package download tor -d /usr/lib/startos/tor_${QEMU_ARCH}.s9pk -a "${QEMU_ARCH}"
EOF
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(date '+%s')}"
if lb bootstrap; then
true
else
EXIT=$?
cat ./chroot/debootstrap/debootstrap.log
exit $EXIT
fi
lb chroot
lb installer
lb binary_chroot
lb chroot_prep install all mode-apt-install-binary mode-archives-chroot
mv chroot/chroot/etc/resolv.conf.bak chroot/chroot/etc/resolv.conf
lb binary_rootfs
cp $prep_results_dir/binary/live/filesystem.squashfs $RESULTS_DIR/$IMAGE_BASENAME.squashfs
if [ "${IMAGE_TYPE}" = iso ]; then
lb binary_manifest
lb binary_package-lists
lb binary_linux-image
lb binary_memtest
lb binary_grub-legacy
lb binary_grub-pc
lb binary_grub_cfg
lb binary_syslinux
lb binary_disk
lb binary_loadlin
lb binary_win32-loader
lb binary_includes
lb binary_grub-efi
lb binary_hooks
lb binary_checksums
find binary -newermt "$(date -d@${SOURCE_DATE_EPOCH} '+%Y-%m-%d %H:%M:%S')" -printf "%y %p\n" -exec touch '{}' -d@${SOURCE_DATE_EPOCH} --no-dereference ';' > binary.modified_timestamps
lb binary_iso
lb binary_onie
lb binary_netboot
lb binary_tar
lb binary_hdd
lb binary_zsync
lb chroot_prep remove all mode-archives-chroot
lb source
mv $prep_results_dir/live-image-${IB_TARGET_ARCH}.hybrid.iso $RESULTS_DIR/$IMAGE_BASENAME.iso
elif [ "${IMAGE_TYPE}" = img ]; then
SECTOR_LEN=512
FW_START=$((1024 * 1024)) # 1MiB (sector 2048) — Pi-specific
FW_LEN=$((128 * 1024 * 1024)) # 128MiB (Pi firmware + U-Boot + DTBs)
FW_END=$((FW_START + FW_LEN - 1))
ESP_START=$((FW_END + 1)) # 100MB EFI System Partition (matches os_install)
ESP_LEN=$((100 * 1024 * 1024))
ESP_END=$((ESP_START + ESP_LEN - 1))
BOOT_START=$((ESP_END + 1)) # 2GB /boot (matches os_install)
BOOT_LEN=$((2 * 1024 * 1024 * 1024))
BOOT_END=$((BOOT_START + BOOT_LEN - 1))
ROOT_START=$((BOOT_END + 1))
# Size root partition to fit the squashfs + 256MB overhead for btrfs
# metadata and config overlay, avoiding the need for btrfs resize
SQUASHFS_SIZE=$(stat -c %s $prep_results_dir/binary/live/filesystem.squashfs)
ROOT_LEN=$(( SQUASHFS_SIZE + 256 * 1024 * 1024 ))
# Align to sector boundary
ROOT_LEN=$(( (ROOT_LEN + SECTOR_LEN - 1) / SECTOR_LEN * SECTOR_LEN ))
# Total image: partitions + GPT backup header (34 sectors)
IMG_LEN=$((ROOT_START + ROOT_LEN + 34 * SECTOR_LEN))
# Fixed GPT partition UUIDs (deterministic, based on old MBR disk ID cb15ae4d)
FW_UUID=cb15ae4d-0001-4000-8000-000000000001
ESP_UUID=cb15ae4d-0002-4000-8000-000000000002
BOOT_UUID=cb15ae4d-0003-4000-8000-000000000003
ROOT_UUID=cb15ae4d-0004-4000-8000-000000000004
TARGET_NAME=$prep_results_dir/${IMAGE_BASENAME}.img
truncate -s $IMG_LEN $TARGET_NAME
sfdisk $TARGET_NAME <<-EOF
label: gpt
${TARGET_NAME}1 : start=$((FW_START / SECTOR_LEN)), size=$((FW_LEN / SECTOR_LEN)), type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=${FW_UUID}, name="firmware"
${TARGET_NAME}2 : start=$((ESP_START / SECTOR_LEN)), size=$((ESP_LEN / SECTOR_LEN)), type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B, uuid=${ESP_UUID}, name="efi"
${TARGET_NAME}3 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=${BOOT_UUID}, name="boot"
${TARGET_NAME}4 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=B921B045-1DF0-41C3-AF44-4C6F280D3FAE, uuid=${ROOT_UUID}, name="root"
EOF
# Create named loop device nodes (high minor numbers to avoid conflicts)
# and detach any stale ones from previous failed builds
FW_DEV=/dev/startos-loop-fw
ESP_DEV=/dev/startos-loop-esp
BOOT_DEV=/dev/startos-loop-boot
ROOT_DEV=/dev/startos-loop-root
for dev in $FW_DEV:200 $ESP_DEV:201 $BOOT_DEV:202 $ROOT_DEV:203; do
name=${dev%:*}
minor=${dev#*:}
[ -e $name ] || mknod $name b 7 $minor
losetup -d $name 2>/dev/null || true
done
losetup $FW_DEV --offset $FW_START --sizelimit $FW_LEN $TARGET_NAME
losetup $ESP_DEV --offset $ESP_START --sizelimit $ESP_LEN $TARGET_NAME
losetup $BOOT_DEV --offset $BOOT_START --sizelimit $BOOT_LEN $TARGET_NAME
losetup $ROOT_DEV --offset $ROOT_START --sizelimit $ROOT_LEN $TARGET_NAME
mkfs.vfat -F32 -n firmware $FW_DEV
mkfs.vfat -F32 -n efi $ESP_DEV
mkfs.vfat -F32 -n boot $BOOT_DEV
mkfs.btrfs -f -L rootfs $ROOT_DEV
TMPDIR=$(mktemp -d)
# Extract boot files from squashfs to staging area
BOOT_STAGING=$(mktemp -d)
unsquashfs -n -f -d $BOOT_STAGING $prep_results_dir/binary/live/filesystem.squashfs boot
# Mount partitions (nested: firmware and efi inside boot)
mkdir -p $TMPDIR/boot $TMPDIR/root
mount $BOOT_DEV $TMPDIR/boot
mkdir -p $TMPDIR/boot/firmware $TMPDIR/boot/efi
mount $FW_DEV $TMPDIR/boot/firmware
mount $ESP_DEV $TMPDIR/boot/efi
mount $ROOT_DEV $TMPDIR/root
# Copy boot files — nested mounts route firmware/* to the firmware partition
cp -a $BOOT_STAGING/boot/. $TMPDIR/boot/
rm -rf $BOOT_STAGING
mkdir $TMPDIR/root/images $TMPDIR/root/config
B3SUM=$(b3sum $prep_results_dir/binary/live/filesystem.squashfs | head -c 16)
cp $prep_results_dir/binary/live/filesystem.squashfs $TMPDIR/root/images/$B3SUM.rootfs
ln -rsf $TMPDIR/root/images/$B3SUM.rootfs $TMPDIR/root/config/current.rootfs
mkdir -p $TMPDIR/next $TMPDIR/lower $TMPDIR/root/config/work $TMPDIR/root/config/overlay
mount $TMPDIR/root/config/current.rootfs $TMPDIR/lower
mount -t overlay -o lowerdir=$TMPDIR/lower,workdir=$TMPDIR/root/config/work,upperdir=$TMPDIR/root/config/overlay overlay $TMPDIR/next
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
rsync -a $SOURCE_DIR/raspberrypi/img/ $TMPDIR/next/
# Install GRUB: ESP at /boot/efi (Part 2), /boot (Part 3)
mkdir -p $TMPDIR/next/boot \
$TMPDIR/next/dev $TMPDIR/next/proc $TMPDIR/next/sys $TMPDIR/next/media/startos/root
mount --rbind $TMPDIR/boot $TMPDIR/next/boot
mount --bind /dev $TMPDIR/next/dev
mount -t proc proc $TMPDIR/next/proc
mount -t sysfs sysfs $TMPDIR/next/sys
mount --bind $TMPDIR/root $TMPDIR/next/media/startos/root
chroot $TMPDIR/next grub-install --target=arm64-efi --removable --efi-directory=/boot/efi --boot-directory=/boot --no-nvram
chroot $TMPDIR/next update-grub
umount $TMPDIR/next/media/startos/root
umount $TMPDIR/next/sys
umount $TMPDIR/next/proc
umount $TMPDIR/next/dev
umount -l $TMPDIR/next/boot
# Fix root= in grub.cfg: update-grub sees loop devices, but the
# real device uses a fixed GPT PARTUUID for root (Part 4).
sed -i "s|root=[^ ]*|root=PARTUUID=${ROOT_UUID}|g" $TMPDIR/boot/grub/grub.cfg
# Inject first-boot resize script into GRUB config
sed -i 's| boot=startos| boot=startos init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/grub/grub.cfg
fi
umount $TMPDIR/next
umount $TMPDIR/lower
umount $TMPDIR/boot/firmware
umount $TMPDIR/boot/efi
umount $TMPDIR/boot
umount $TMPDIR/root
losetup -d $ROOT_DEV
losetup -d $BOOT_DEV
losetup -d $ESP_DEV
losetup -d $FW_DEV
mv $TARGET_NAME $RESULTS_DIR/$IMAGE_BASENAME.img
fi
chown $IB_UID:$IB_UID $RESULTS_DIR/$IMAGE_BASENAME.*

View File

@@ -1,4 +0,0 @@
PARTUUID=cb15ae4d-0001-4000-8000-000000000001 /boot/firmware vfat umask=0077 0 2
PARTUUID=cb15ae4d-0002-4000-8000-000000000002 /boot/efi vfat umask=0077 0 1
PARTUUID=cb15ae4d-0003-4000-8000-000000000003 /boot vfat umask=0077 0 2
PARTUUID=cb15ae4d-0004-4000-8000-000000000004 / btrfs defaults 0 1

View File

@@ -1,4 +0,0 @@
# Raspberry Pi-specific GRUB overrides
# Overrides GRUB_CMDLINE_LINUX from /etc/default/grub with Pi-specific
# console devices and hardware quirks.
GRUB_CMDLINE_LINUX="boot=startos console=serial0,115200 console=tty1 usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory"

View File

@@ -1,35 +0,0 @@
#!/bin/bash
set -e
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
BASEDIR="$(pwd -P)"
SUITE=trixie
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
dockerfile_hash=$(sha256sum ${BASEDIR}/build/image-recipe/Dockerfile | head -c 7)
docker_img_name="start9/build-iso:${SUITE}-${dockerfile_hash}"
platform=linux/${ARCH}
case $ARCH in
x86_64)
platform=linux/amd64;;
aarch64)
platform=linux/arm64;;
esac
if ! docker run --rm --platform=$platform "${docker_img_name}" true 2> /dev/null; then
docker buildx build --load --platform=$platform --build-arg=SUITE=${SUITE} -t "${docker_img_name}" ./build/image-recipe
fi
docker run $USE_TTY --rm --platform=$platform --privileged -v "$(pwd)/build/image-recipe:/root/image-recipe" -v "$(pwd)/results:/root/results" \
-e IB_SUITE="$SUITE" \
-e IB_UID="$UID" \
-e IB_INCLUDE \
"${docker_img_name}" /root/image-recipe/build.sh $@

View File

@@ -1,51 +0,0 @@
desktop-image: "../splash.png"
title-color: "#ffffff"
title-font: "Unifont Regular 16"
title-text: "StartOS Boot Menu with GRUB"
message-font: "Unifont Regular 16"
terminal-font: "Unifont Regular 16"
#help bar at the bottom
+ label {
top = 100%-50
left = 0
width = 100%
height = 20
text = "@KEYMAP_SHORT@"
align = "center"
color = "#ffffff"
font = "Unifont Regular 16"
}
#boot menu
+ boot_menu {
left = 10%
width = 80%
top = 52%
height = 48%-80
item_color = "#a8a8a8"
item_font = "Unifont Regular 16"
selected_item_color= "#ffffff"
selected_item_font = "Unifont Regular 16"
item_height = 16
item_padding = 0
item_spacing = 4
icon_width = 0
icon_heigh = 0
item_icon_space = 0
}
#progress bar
+ progress_bar {
id = "__timeout__"
left = 15%
top = 100%-80
height = 16
width = 70%
font = "Unifont Regular 16"
text_color = "#000000"
fg_color = "#ffffff"
bg_color = "#a8a8a8"
border_color = "#ffffff"
text = "@TIMEOUT_NOTIFICATION_LONG@"
}

View File

@@ -4,7 +4,7 @@ parse_essential_db_info() {
DB_DUMP="/tmp/startos_db.json" DB_DUMP="/tmp/startos_db.json"
if command -v start-cli >/dev/null 2>&1; then if command -v start-cli >/dev/null 2>&1; then
timeout 30 start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1 start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1
else else
return 1 return 1
fi fi
@@ -22,7 +22,7 @@ parse_essential_db_info() {
RAM_GB="unknown" RAM_GB="unknown"
fi fi
RUNNING_SERVICES=$(jq -r '[.value.packageData[] | select(.statusInfo.started != null)] | length' "$DB_DUMP" 2>/dev/null) RUNNING_SERVICES=$(jq -r '[.value.packageData[] | select(.status.main == "running")] | length' "$DB_DUMP" 2>/dev/null)
TOTAL_SERVICES=$(jq -r '.value.packageData | length' "$DB_DUMP" 2>/dev/null) TOTAL_SERVICES=$(jq -r '.value.packageData | length' "$DB_DUMP" 2>/dev/null)
rm -f "$DB_DUMP" rm -f "$DB_DUMP"
@@ -118,6 +118,6 @@ else
fi fi
printf "\n \033[1;37m┌──────────────────────────────────────────────────── QUICK ACCESS ─┐\033[0m\n" printf "\n \033[1;37m┌──────────────────────────────────────────────────── QUICK ACCESS ─┐\033[0m\n"
printf " \033[1;37m│\033[0m Web Interface: \033[0;36m%-50s\033[0m \033[1;37m│\033[0m\n" "$web_url" printf " \033[1;37m│\033[0m Web Interface: \033[0;36m%-50s\033[0m \033[1;37m│\033[0m\n" "$web_url"
printf " \033[1;37m│\033[0m Documentation: \033[0;36m%-50s\033[0m \033[1;37m│\033[0m\n" "https://docs.start9.com" printf " \033[1;37m│\033[0m Documentation: \033[0;36m%-50s\033[0m \033[1;37m│\033[0m\n" "https://staging.docs.start9.com"
printf " \033[1;37m│\033[0m Support: \033[0;36m%-50s\033[0m \033[1;37m│\033[0m\n" "https://start9.com/contact" printf " \033[1;37m│\033[0m Support: \033[0;36m%-50s\033[0m \033[1;37m│\033[0m\n" "https://start9.com/contact"
printf " \033[1;37m└───────────────────────────────────────────────────────────────────┘\033[0m\n\n" printf " \033[1;37m└───────────────────────────────────────────────────────────────────┘\033[0m\n\n"

View File

@@ -34,7 +34,7 @@ set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
if [ -z "$NO_SYNC" ]; then if [ -z "$NO_SYNC" ]; then
echo 'Syncing...' echo 'Syncing...'
umount -l /media/startos/next 2> /dev/null umount -R /media/startos/next 2> /dev/null
umount /media/startos/upper 2> /dev/null umount /media/startos/upper 2> /dev/null
rm -rf /media/startos/upper /media/startos/next rm -rf /media/startos/upper /media/startos/next
mkdir /media/startos/upper mkdir /media/startos/upper
@@ -55,16 +55,16 @@ mkdir -p /media/startos/next/sys
mkdir -p /media/startos/next/proc mkdir -p /media/startos/next/proc
mkdir -p /media/startos/next/boot mkdir -p /media/startos/next/boot
mkdir -p /media/startos/next/media/startos/root mkdir -p /media/startos/next/media/startos/root
mount -t tmpfs tmpfs /media/startos/next/run mount --bind /run /media/startos/next/run
mount -t tmpfs tmpfs /media/startos/next/tmp mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev mount --bind /dev /media/startos/next/dev
mount -t sysfs sysfs /media/startos/next/sys mount --bind /sys /media/startos/next/sys
mount -t proc proc /media/startos/next/proc mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then
mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi fi
if [ -z "$*" ]; then if [ -z "$*" ]; then
@@ -75,17 +75,17 @@ else
CHROOT_RES=$? CHROOT_RES=$?
fi fi
if mountpoint /media/startos/next/sys/firmware/efi/efivars 2>&1 > /dev/null; then if mountpoint /media/startos/next/sys/firmware/efi/efivars 2> /dev/null; then
umount /media/startos/next/sys/firmware/efi/efivars umount /media/startos/next/sys/firmware/efi/efivars
fi fi
umount -l /media/startos/next/run umount /media/startos/next/run
umount -l /media/startos/next/tmp umount /media/startos/next/tmp
umount -l /media/startos/next/dev umount /media/startos/next/dev
umount -l /media/startos/next/sys umount /media/startos/next/sys
umount -l /media/startos/next/proc umount /media/startos/next/proc
umount -l /media/startos/next/boot umount /media/startos/next/boot
umount -l /media/startos/next/media/startos/root umount /media/startos/next/media/startos/root
if [ "$CHROOT_RES" -eq 0 ]; then if [ "$CHROOT_RES" -eq 0 ]; then
@@ -95,7 +95,6 @@ if [ "$CHROOT_RES" -eq 0 ]; then
echo 'Upgrading...' echo 'Upgrading...'
rm -f /media/startos/images/next.squashfs
if ! time mksquashfs /media/startos/next /media/startos/images/next.squashfs -b 4096 -comp gzip; then if ! time mksquashfs /media/startos/next /media/startos/images/next.squashfs -b 4096 -comp gzip; then
umount -l /media/startos/next umount -l /media/startos/next
umount -l /media/startos/upper umount -l /media/startos/upper
@@ -111,6 +110,6 @@ if [ "$CHROOT_RES" -eq 0 ]; then
reboot reboot
fi fi
umount -l /media/startos/next umount -R /media/startos/next
umount -l /media/startos/upper umount /media/startos/upper
rm -rf /media/startos/upper /media/startos/next rm -rf /media/startos/upper /media/startos/next

View File

@@ -1,72 +1,29 @@
#!/bin/bash #!/bin/bash
if [ -z "$sip" ] || [ -z "$dip" ] || [ -z "$dprefix" ] || [ -z "$sport" ] || [ -z "$dport" ]; then if [ -z "$sip" ] || [ -z "$dip" ] || [ -z "$sport" ] || [ -z "$dport" ]; then
>&2 echo 'missing required env var' >&2 echo 'missing required env var'
exit 1 exit 1
fi fi
NAME="F$(echo "$sip:$sport -> $dip/$dprefix:$dport ${src_subnet:-any}" | sha256sum | head -c 15)" rule_exists() {
iptables -t nat -C "$@" 2>/dev/null
}
for kind in INPUT FORWARD ACCEPT; do apply_rule() {
if ! iptables -C $kind -j "${NAME}_${kind}" 2> /dev/null; then if [ "$UNDO" = "1" ]; then
iptables -N "${NAME}_${kind}" 2> /dev/null if rule_exists "$@"; then
iptables -A $kind -j "${NAME}_${kind}" iptables -t nat -D "$@"
fi
else
if ! rule_exists "$@"; then
iptables -t nat -A "$@"
fi
fi fi
done }
for kind in PREROUTING OUTPUT POSTROUTING; do
if ! iptables -t nat -C $kind -j "${NAME}_${kind}" 2> /dev/null; then
iptables -t nat -N "${NAME}_${kind}" 2> /dev/null
iptables -t nat -A $kind -j "${NAME}_${kind}"
fi
done
err=0 apply_rule PREROUTING -p tcp -d $sip --dport $sport -j DNAT --to-destination $dip:$dport
trap 'err=1' ERR apply_rule OUTPUT -p tcp -d $sip --dport $sport -j DNAT --to-destination $dip:$dport
for kind in INPUT FORWARD ACCEPT; do
iptables -F "${NAME}_${kind}" 2> /dev/null
done
for kind in PREROUTING OUTPUT POSTROUTING; do
iptables -t nat -F "${NAME}_${kind}" 2> /dev/null
done
if [ "$UNDO" = 1 ]; then if [ "$UNDO" = 1 ]; then
conntrack -D -p tcp -d $sip --dport $sport || true # conntrack returns exit 1 if no connections are active conntrack -D -p tcp -d $sip --dport $sport
conntrack -D -p udp -d $sip --dport $sport || true # conntrack returns exit 1 if no connections are active fi
exit $err
fi
# DNAT: rewrite destination for incoming packets (external traffic)
# When src_subnet is set, only forward traffic from that subnet (private forwards)
if [ -n "$src_subnet" ]; then
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
# Also allow containers on the bridge subnet to reach this forward
if [ -n "$bridge_subnet" ]; then
iptables -t nat -A ${NAME}_PREROUTING -s "$bridge_subnet" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -s "$bridge_subnet" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
fi
else
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
fi
# DNAT: rewrite destination for locally-originated packets (hairpin from host itself)
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
# Allow new connections to be forwarded to the destination
iptables -A ${NAME}_FORWARD -d $dip -p tcp --dport $dport -m state --state NEW -j ACCEPT
iptables -A ${NAME}_FORWARD -d $dip -p udp --dport $dport -m state --state NEW -j ACCEPT
# NAT hairpin: masquerade traffic from the bridge subnet or host to the DNAT
# target, so replies route back through the host for proper NAT reversal.
# Container-to-container hairpin (source is on the bridge subnet)
if [ -n "$bridge_subnet" ]; then
iptables -t nat -A ${NAME}_POSTROUTING -s "$bridge_subnet" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -s "$bridge_subnet" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
fi
# Host-to-container hairpin (host connects to its own gateway IP, source is sip)
iptables -t nat -A ${NAME}_POSTROUTING -s "$sip" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
iptables -t nat -A ${NAME}_POSTROUTING -s "$sip" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
exit $err

View File

@@ -1,20 +0,0 @@
#!/bin/bash
export DEBIAN_FRONTEND=noninteractive
export DEBCONF_NONINTERACTIVE_SEEN=true
TMP_DIR=$(mktemp -d)
(
set -e
cd $TMP_DIR
cat > control.equivs
equivs-build control.equivs
apt-get install -y ./*.deb < /dev/null
)
rm -rf $TMP_DIR
echo Install complete. >&2
exit 0

View File

@@ -29,13 +29,10 @@ if [ -z "$needed" ]; then
exit 1 exit 1
fi fi
MARGIN=${MARGIN:-1073741824}
target=$((needed + MARGIN))
if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then
echo 'Pruning...' echo 'Pruning...'
current="$(readlink -f /media/startos/config/current.rootfs)" current="$(readlink -f /media/startos/config/current.rootfs)"
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$target" ]]; do while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$needed" ]]; do
to_prune="$(ls -t1 /media/startos/images/*.rootfs /media/startos/images/*.squashfs 2> /dev/null | grep -v "$current" | tail -n1)" to_prune="$(ls -t1 /media/startos/images/*.rootfs /media/startos/images/*.squashfs 2> /dev/null | grep -v "$current" | tail -n1)"
if [ -e "$to_prune" ]; then if [ -e "$to_prune" ]; then
echo " Pruning $to_prune" echo " Pruning $to_prune"

View File

@@ -1,76 +0,0 @@
#!/bin/bash
# sign-unsigned-modules [--source <dir> --dest <dir>] [--sign-file <path>]
# [--mok-key <path>] [--mok-pub <path>]
#
# Signs all unsigned kernel modules using the DKMS MOK key.
#
# Default (install) mode:
# Run inside a chroot. Finds and signs unsigned modules in /lib/modules in-place.
# sign-file and MOK key are auto-detected from standard paths.
#
# Overlay mode (--source/--dest):
# Finds unsigned modules in <source>, copies to <dest>, signs the copies.
# Clears old signed modules in <dest> first. Used during upgrades where the
# overlay upper is tmpfs and writes would be lost.
set -e
SOURCE=""
DEST=""
SIGN_FILE=""
MOK_KEY="/var/lib/dkms/mok.key"
MOK_PUB="/var/lib/dkms/mok.pub"
while [[ $# -gt 0 ]]; do
case $1 in
--source) SOURCE="$2"; shift 2;;
--dest) DEST="$2"; shift 2;;
--sign-file) SIGN_FILE="$2"; shift 2;;
--mok-key) MOK_KEY="$2"; shift 2;;
--mok-pub) MOK_PUB="$2"; shift 2;;
*) echo "Unknown option: $1" >&2; exit 1;;
esac
done
# Auto-detect sign-file if not specified
if [ -z "$SIGN_FILE" ]; then
SIGN_FILE="$(ls -1 /usr/lib/linux-kbuild-*/scripts/sign-file 2>/dev/null | head -1)"
fi
if [ -z "$SIGN_FILE" ] || [ ! -x "$SIGN_FILE" ]; then
exit 0
fi
if [ ! -f "$MOK_KEY" ] || [ ! -f "$MOK_PUB" ]; then
exit 0
fi
COUNT=0
if [ -n "$SOURCE" ] && [ -n "$DEST" ]; then
# Overlay mode: find unsigned in source, copy to dest, sign in dest
rm -rf "${DEST}"/lib/modules
for ko in $(find "${SOURCE}"/lib/modules -name '*.ko' 2>/dev/null); do
if ! modinfo "$ko" 2>/dev/null | grep -q '^sig_id:'; then
rel_path="${ko#${SOURCE}}"
mkdir -p "${DEST}$(dirname "$rel_path")"
cp "$ko" "${DEST}${rel_path}"
"$SIGN_FILE" sha256 "$MOK_KEY" "$MOK_PUB" "${DEST}${rel_path}"
COUNT=$((COUNT + 1))
fi
done
else
# In-place mode: sign modules directly
for ko in $(find /lib/modules -name '*.ko' 2>/dev/null); do
if ! modinfo "$ko" 2>/dev/null | grep -q '^sig_id:'; then
"$SIGN_FILE" sha256 "$MOK_KEY" "$MOK_PUB" "$ko"
COUNT=$((COUNT + 1))
fi
done
fi
if [ $COUNT -gt 0 ]; then
echo "[sign-modules] Signed $COUNT unsigned kernel modules"
fi

View File

@@ -104,7 +104,6 @@ local_mount_root()
-olowerdir=/startos/config/overlay:/lower,upperdir=/upper/data,workdir=/upper/work \ -olowerdir=/startos/config/overlay:/lower,upperdir=/upper/data,workdir=/upper/work \
overlay ${rootmnt} overlay ${rootmnt}
mkdir -m 750 -p ${rootmnt}/media/startos
mkdir -p ${rootmnt}/media/startos/config mkdir -p ${rootmnt}/media/startos/config
mount --bind /startos/config ${rootmnt}/media/startos/config mount --bind /startos/config ${rootmnt}/media/startos/config
mkdir -p ${rootmnt}/media/startos/images mkdir -p ${rootmnt}/media/startos/images

View File

@@ -1,64 +1,36 @@
#!/bin/bash #!/bin/bash
# --- Config --- fail=$(printf " [\033[31m fail \033[0m]")
# Colors (using printf to ensure compatibility) pass=$(printf " [\033[32m pass \033[0m]")
GRAY=$(printf '\033[90m')
GREEN=$(printf '\033[32m')
RED=$(printf '\033[31m')
NC=$(printf '\033[0m') # No Color
# Proxies to test
proxies=(
"Host Tor|127.0.1.1:9050"
"Startd Tor|10.0.3.1:9050"
)
# Default URLs
onion_list=( onion_list=(
"The Tor Project|http://2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion"
"Start9|http://privacy34kn4ez3y3nijweec6w4g54i3g54sdv7r5mr6soma3w4begyd.onion" "Start9|http://privacy34kn4ez3y3nijweec6w4g54i3g54sdv7r5mr6soma3w4begyd.onion"
"Mempool|http://mempoolhqx4isw62xs7abwphsq7ldayuidyx2v2oethdhhj6mlo2r6ad.onion" "Mempool|http://mempoolhqx4isw62xs7abwphsq7ldayuidyx2v2oethdhhj6mlo2r6ad.onion"
"DuckDuckGo|https://duckduckgogg42xjoc72x3sjasowoarfbgcmvfimaftt6twagswzczad.onion" "DuckDuckGo|https://duckduckgogg42xjoc72x3sjasowoarfbgcmvfimaftt6twagswzczad.onion"
"Brave Search|https://search.brave4u7jddbv7cyviptqjc7jusxh72uik7zt6adtckl5f4nwy2v72qd.onion" "Brave Search|https://search.brave4u7jddbv7cyviptqjc7jusxh72uik7zt6adtckl5f4nwy2v72qd.onion"
) )
# Load custom list # Check if ~/.startos/tor-check.list exists and read its contents if available
[ -f ~/.startos/tor-check.list ] && readarray -t custom_list < <(grep -v '^#' ~/.startos/tor-check.list) && onion_list+=("${custom_list[@]}") if [ -f ~/.startos/tor-check.list ]; then
while IFS= read -r line; do
# --- Functions --- # Check if the line starts with a #
print_line() { printf "${GRAY}────────────────────────────────────────${NC}\n"; } if [[ ! "$line" =~ ^# ]]; then
onion_list+=("$line")
# --- Main ---
echo "Testing Onion Connections..."
for proxy_info in "${proxies[@]}"; do
proxy_name="${proxy_info%%|*}"
proxy_addr="${proxy_info#*|}"
print_line
printf "${GRAY}Proxy: %s (%s)${NC}\n" "$proxy_name" "$proxy_addr"
for data in "${onion_list[@]}"; do
name="${data%%|*}"
url="${data#*|}"
# Capture verbose output + http code.
# --no-progress-meter: Suppresses the "0 0 0" stats but keeps -v output
output=$(curl -v --no-progress-meter --max-time 15 --socks5-hostname "$proxy_addr" "$url" 2>&1)
exit_code=$?
if [ $exit_code -eq 0 ]; then
printf " ${GREEN}[pass]${NC} %s (%s)\n" "$name" "$url"
else
printf " ${RED}[fail]${NC} %s (%s)\n" "$name" "$url"
printf " ${RED}↳ Curl Error %s${NC}\n" "$exit_code"
# Print the last 4 lines of verbose log to show the specific handshake error
# We look for lines starting with '*' or '>' or '<' to filter out junk if any remains
echo "$output" | tail -n 4 | sed "s/^/ ${GRAY}/"
fi fi
done done < ~/.startos/tor-check.list
fi
echo "Testing connection to Onion Pages ..."
for data in "${onion_list[@]}"; do
name="${data%%|*}"
url="${data#*|}"
if curl --socks5-hostname localhost:9050 "$url" > /dev/null 2>&1; then
echo " ${pass}: $name ($url) "
else
echo " ${fail}: $name ($url) "
fi
done done
print_line
# Reset color just in case echo
printf "${NC}" echo "Done."

View File

@@ -24,7 +24,7 @@ fi
unsquashfs -f -d / $1 boot unsquashfs -f -d / $1 boot
umount -l /media/startos/next 2> /dev/null || true umount -R /media/startos/next 2> /dev/null || true
umount /media/startos/upper 2> /dev/null || true umount /media/startos/upper 2> /dev/null || true
umount /media/startos/lower 2> /dev/null || true umount /media/startos/lower 2> /dev/null || true
@@ -45,36 +45,32 @@ mkdir -p /media/startos/next/media/startos/root
mount --bind /run /media/startos/next/run mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev mount --bind /dev /media/startos/next/dev
mount -t sysfs sysfs /media/startos/next/sys mount --bind /sys /media/startos/next/sys
mount -t proc proc /media/startos/next/proc mount --bind /proc /media/startos/next/proc
mount --rbind /boot /media/startos/next/boot mount --bind /boot /media/startos/next/boot
mount --bind /media/startos/root /media/startos/next/media/startos/root mount --bind /media/startos/root /media/startos/next/media/startos/root
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then if mountpoint /boot/efi 2> /dev/null; then
mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars mkdir -p /media/startos/next/boot/efi
mount --bind /boot/efi /media/startos/next/boot/efi
fi
if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
fi fi
chroot /media/startos/next bash -e << "EOF" chroot /media/startos/next bash -e << "EOF"
if [ -f /boot/grub/grub.cfg ]; then if dpkg -s grub-common 2>&1 > /dev/null; then
grub-install /dev/$(eval $(lsblk -o MOUNTPOINT,PKNAME -P | grep 'MOUNTPOINT="/media/startos/root"') && echo $PKNAME) grub-install /dev/$(eval $(lsblk -o MOUNTPOINT,PKNAME -P | grep 'MOUNTPOINT="/media/startos/root"') && echo $PKNAME)
update-grub update-grub
fi fi
EOF EOF
# Sign unsigned kernel modules for Secure Boot
SIGN_FILE="$(ls -1 /media/startos/next/usr/lib/linux-kbuild-*/scripts/sign-file 2>/dev/null | head -1)"
/media/startos/next/usr/lib/startos/scripts/sign-unsigned-modules \
--source /media/startos/lower \
--dest /media/startos/config/overlay \
--sign-file "$SIGN_FILE" \
--mok-key /media/startos/config/overlay/var/lib/dkms/mok.key \
--mok-pub /media/startos/config/overlay/var/lib/dkms/mok.pub
sync sync
umount -l /media/startos/next umount -R /media/startos/next
umount /media/startos/upper umount /media/startos/upper
umount /media/startos/lower umount /media/startos/lower

View File

@@ -1,367 +0,0 @@
#!/bin/bash
set -e
REPO="Start9Labs/start-os"
REGISTRY="https://alpha-registry-x.start9.com"
S3_BUCKET="s3://startos-images"
S3_CDN="https://startos-images.nyc3.cdn.digitaloceanspaces.com"
START9_GPG_KEY="2D63C217"
ARCHES="aarch64 aarch64-nonfree aarch64-nvidia riscv64 riscv64-nonfree x86_64 x86_64-nonfree x86_64-nvidia"
CLI_ARCHES="aarch64 riscv64 x86_64"
parse_run_id() {
local val="$1"
if [[ "$val" =~ /actions/runs/([0-9]+) ]]; then
echo "${BASH_REMATCH[1]}"
else
echo "$val"
fi
}
require_version() {
if [ -z "${VERSION:-}" ]; then
read -rp "VERSION: " VERSION
if [ -z "$VERSION" ]; then
>&2 echo '$VERSION required'
exit 2
fi
fi
}
release_dir() {
echo "$HOME/Downloads/v$VERSION"
}
ensure_release_dir() {
local dir
dir=$(release_dir)
if [ "$CLEAN" = "1" ]; then
rm -rf "$dir"
fi
mkdir -p "$dir"
cd "$dir"
}
enter_release_dir() {
local dir
dir=$(release_dir)
if [ ! -d "$dir" ]; then
>&2 echo "Release directory $dir does not exist. Run 'download' or 'pull' first."
exit 1
fi
cd "$dir"
}
cli_target_for() {
local arch=$1 os=$2
local pair="${arch}-${os}"
if [ "$pair" = "riscv64-linux" ]; then
echo "riscv64gc-unknown-linux-musl"
elif [ "$pair" = "riscv64-macos" ]; then
return 1
elif [ "$os" = "linux" ]; then
echo "${arch}-unknown-linux-musl"
elif [ "$os" = "macos" ]; then
echo "${arch}-apple-darwin"
fi
}
release_files() {
for file in *.iso *.squashfs *.deb; do
[ -f "$file" ] && echo "$file"
done
for file in start-cli_*; do
[[ "$file" == *.asc ]] && continue
[ -f "$file" ] && echo "$file"
done
}
resolve_gh_user() {
GH_USER=${GH_USER:-$(gh api user -q .login 2>/dev/null || true)}
GH_GPG_KEY=$(git config user.signingkey 2>/dev/null || true)
}
# --- Subcommands ---
cmd_download() {
require_version
if [ -z "${RUN_ID:-}" ]; then
read -rp "RUN_ID (OS images, leave blank to skip): " RUN_ID
fi
RUN_ID=$(parse_run_id "${RUN_ID:-}")
if [ -z "${ST_RUN_ID:-}" ]; then
read -rp "ST_RUN_ID (start-tunnel, leave blank to skip): " ST_RUN_ID
fi
ST_RUN_ID=$(parse_run_id "${ST_RUN_ID:-}")
if [ -z "${CLI_RUN_ID:-}" ]; then
read -rp "CLI_RUN_ID (start-cli, leave blank to skip): " CLI_RUN_ID
fi
CLI_RUN_ID=$(parse_run_id "${CLI_RUN_ID:-}")
ensure_release_dir
if [ -n "$RUN_ID" ]; then
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.squashfs" -D "$(pwd)"; do sleep 1; done
done
for arch in $ARCHES; do
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.iso" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$ST_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
while ! gh run download -R $REPO "$ST_RUN_ID" -n "start-tunnel_$arch.deb" -D "$(pwd)"; do sleep 1; done
done
fi
if [ -n "$CLI_RUN_ID" ]; then
for arch in $CLI_ARCHES; do
for os in linux macos; do
local target
target=$(cli_target_for "$arch" "$os") || continue
while ! gh run download -R $REPO "$CLI_RUN_ID" -n "start-cli_$target" -D "$(pwd)"; do sleep 1; done
mv start-cli "start-cli_${arch}-${os}"
done
done
fi
}
cmd_pull() {
require_version
ensure_release_dir
echo "Downloading release assets from tag v$VERSION..."
# Download debs and CLI binaries from the GH release
for file in $(gh release view -R $REPO "v$VERSION" --json assets -q '.assets[].name' | grep -E '\.(deb)$|^start-cli_'); do
gh release download -R $REPO "v$VERSION" -p "$file" -D "$(pwd)" --clobber
done
# Download ISOs and squashfs from S3 CDN
for arch in $ARCHES; do
for ext in squashfs iso; do
# Get the actual filename from the GH release asset list or body
local filename
filename=$(gh release view -R $REPO "v$VERSION" --json assets -q ".assets[].name" | grep "_${arch}\\.${ext}$" || true)
if [ -z "$filename" ]; then
filename=$(gh release view -R $REPO "v$VERSION" --json body -q .body | grep -oP "[^ ]*_${arch}\\.${ext}" | head -1 || true)
fi
if [ -n "$filename" ]; then
echo "Downloading $filename from S3..."
curl -fSL -o "$filename" "$S3_CDN/v$VERSION/$filename"
fi
done
done
}
cmd_register() {
require_version
enter_release_dir
start-cli --registry=$REGISTRY registry os version add "$VERSION" "v$VERSION" '' ">=0.3.5 <=$VERSION"
}
cmd_upload() {
require_version
enter_release_dir
for file in $(release_files); do
case "$file" in
*.iso|*.squashfs)
s3cmd put -P "$file" "$S3_BUCKET/v$VERSION/$file"
;;
*)
gh release upload -R $REPO "v$VERSION" "$file"
;;
esac
done
}
cmd_index() {
require_version
enter_release_dir
for arch in $ARCHES; do
for file in *_"$arch".squashfs *_"$arch".iso; do
start-cli --registry=$REGISTRY registry os asset add --platform="$arch" --version="$VERSION" "$file" "$S3_CDN/v$VERSION/$file"
done
done
}
cmd_sign() {
require_version
enter_release_dir
resolve_gh_user
mkdir -p signatures
for file in $(release_files); do
gpg -u $START9_GPG_KEY --detach-sign --armor -o "signatures/${file}.start9.asc" "$file"
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "signatures/${file}.${GH_USER}.asc" "$file"
fi
done
gpg --export -a $START9_GPG_KEY > signatures/start9.key.asc
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
gpg --export -a "$GH_GPG_KEY" > "signatures/${GH_USER}.key.asc"
else
>&2 echo 'Warning: could not determine GitHub user or GPG signing key, skipping personal signature'
fi
tar -czvf signatures.tar.gz -C signatures .
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
}
cmd_cosign() {
require_version
enter_release_dir
resolve_gh_user
if [ -z "$GH_USER" ] || [ -z "$GH_GPG_KEY" ]; then
>&2 echo 'Error: could not determine GitHub user or GPG signing key'
>&2 echo "Set GH_USER and/or configure git user.signingkey"
exit 1
fi
echo "Downloading existing signatures..."
gh release download -R $REPO "v$VERSION" -p "signatures.tar.gz" -D "$(pwd)" --clobber
mkdir -p signatures
tar -xzf signatures.tar.gz -C signatures
echo "Adding personal signatures as $GH_USER..."
for file in $(release_files); do
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "signatures/${file}.${GH_USER}.asc" "$file"
done
gpg --export -a "$GH_GPG_KEY" > "signatures/${GH_USER}.key.asc"
echo "Re-packing signatures..."
tar -czvf signatures.tar.gz -C signatures .
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
echo "Done. Personal signatures for $GH_USER added to v$VERSION."
}
cmd_notes() {
require_version
enter_release_dir
cat << EOF
# ISO Downloads
- [x86_64/AMD64]($S3_CDN/v$VERSION/$(ls *_x86_64-nonfree.iso))
- [x86_64/AMD64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_x86_64-nvidia.iso))
- [x86_64/AMD64-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
- [aarch64/ARM64]($S3_CDN/v$VERSION/$(ls *_aarch64-nonfree.iso))
- [aarch64/ARM64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_aarch64-nvidia.iso))
- [aarch64/ARM64-slim (FOSS-Only)]($S3_CDN/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
- [RISCV64 (RVA23)]($S3_CDN/v$VERSION/$(ls *_riscv64-nonfree.iso))
- [RISCV64 (RVA23)-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_riscv64.iso) "Without proprietary software or drivers")
EOF
cat << 'EOF'
# StartOS Checksums
## SHA-256
```
EOF
sha256sum *.iso *.squashfs
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum *.iso *.squashfs
cat << 'EOF'
```
# Start-Tunnel Checksums
## SHA-256
```
EOF
sha256sum start-tunnel*.deb
cat << 'EOF'
```
## BLAKE-3
```
EOF
b3sum start-tunnel*.deb
cat << 'EOF'
```
# start-cli Checksums
## SHA-256
```
EOF
release_files | grep '^start-cli_' | xargs sha256sum
cat << 'EOF'
```
## BLAKE-3
```
EOF
release_files | grep '^start-cli_' | xargs b3sum
cat << 'EOF'
```
EOF
}
cmd_full_release() {
cmd_download
cmd_register
cmd_upload
cmd_index
cmd_sign
cmd_notes
}
usage() {
cat << 'EOF'
Usage: manage-release.sh <subcommand>
Subcommands:
download Download artifacts from GitHub Actions runs
Requires: RUN_ID, ST_RUN_ID, CLI_RUN_ID (any combination)
pull Download an existing release from the GH tag and S3
register Register the version in the Start9 registry
upload Upload artifacts to GitHub Releases and S3
index Add assets to the registry index
sign Sign all artifacts with Start9 org key (+ personal key if available)
and upload signatures.tar.gz
cosign Add personal GPG signature to an existing release's signatures
(requires 'pull' first so you can verify assets before signing)
notes Print release notes with download links and checksums
full-release Run: download → register → upload → index → sign → notes
Environment variables:
VERSION (required) Release version
RUN_ID GitHub Actions run ID for OS images (download subcommand)
ST_RUN_ID GitHub Actions run ID for start-tunnel (download subcommand)
CLI_RUN_ID GitHub Actions run ID for start-cli (download subcommand)
GH_USER Override GitHub username (default: autodetected via gh cli)
CLEAN Set to 1 to wipe and recreate the release directory
EOF
}
case "${1:-}" in
download) cmd_download ;;
pull) cmd_pull ;;
register) cmd_register ;;
upload) cmd_upload ;;
index) cmd_index ;;
sign) cmd_sign ;;
cosign) cmd_cosign ;;
notes) cmd_notes ;;
full-release) cmd_full_release ;;
*) usage; exit 1 ;;
esac

View File

@@ -1,4 +1,4 @@
FROM debian:trixie FROM debian:bookworm
RUN apt-get update && \ RUN apt-get update && \
apt-get install -y \ apt-get install -y \
@@ -12,14 +12,45 @@ RUN apt-get update && \
jq \ jq \
gzip \ gzip \
brotli \ brotli \
qemu-user-static \
binfmt-support \
squashfs-tools \ squashfs-tools \
git \ git \
debspawn \
rsync \ rsync \
b3sum \ b3sum \
fuse-overlayfs \
sudo \ sudo \
nodejs systemd \
systemd-container \
systemd-sysv \
dbus \
dbus-user-session
RUN systemctl mask \
systemd-firstboot.service \
systemd-udevd.service \
getty@tty1.service \
console-getty.service
RUN git config --global --add safe.directory /root/start-os RUN git config --global --add safe.directory /root/start-os
RUN mkdir -p /etc/debspawn && \
echo "AllowUnsafePermissions=true" > /etc/debspawn/global.toml
ENV NVM_DIR=~/.nvm
ENV NODE_VERSION=22
RUN mkdir -p $NVM_DIR && \
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash && \
. $NVM_DIR/nvm.sh \
nvm install $NODE_VERSION && \
nvm use $NODE_VERSION && \
nvm alias default $NODE_VERSION && \
ln -s $(which node) /usr/bin/node && \
ln -s $(which npm) /usr/bin/npm
RUN mkdir -p /root/start-os RUN mkdir -p /root/start-os
WORKDIR /root/start-os WORKDIR /root/start-os
COPY docker-entrypoint.sh /docker-entrypoint.sh
ENTRYPOINT [ "/docker-entrypoint.sh" ]

View File

@@ -0,0 +1,3 @@
#!/bin/bash
exec /lib/systemd/systemd --unit=multi-user.target --show-status=false --log-target=journal

View File

@@ -1,30 +1,27 @@
#!/bin/bash #!/bin/bash
pwd=$(pwd)
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
set -e
rel_pwd="${pwd#"$(pwd)"}"
COMPAT_ARCH=$(uname -m)
platform=linux/$COMPAT_ARCH
case $COMPAT_ARCH in
x86_64)
platform=linux/amd64;;
aarch64)
platform=linux/arm64;;
esac
if [ "$FORCE_COMPAT" = 1 ] || ( [ "$REQUIRES" = "linux" ] && [ "$(uname -s)" != "Linux" ] ) || ( [ "$REQUIRES" = "debian" ] && ! which dpkg > /dev/null ); then if [ "$FORCE_COMPAT" = 1 ] || ( [ "$REQUIRES" = "linux" ] && [ "$(uname -s)" != "Linux" ] ) || ( [ "$REQUIRES" = "debian" ] && ! which dpkg > /dev/null ); then
project_pwd="$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd)/"
pwd="$(pwd)/"
if ! [[ "$pwd" = "$project_pwd"* ]]; then
>&2 echo "Must be run from start-os project dir"
exit 1
fi
rel_pwd="${pwd#"$project_pwd"}"
SYSTEMD_TTY="-P"
USE_TTY=
if tty -s; then if tty -s; then
USE_TTY="-it" USE_TTY="-it"
SYSTEMD_TTY="-t"
fi fi
docker run $USE_TTY --platform=$platform -eARCH -eENVIRONMENT -ePLATFORM -eGIT_BRANCH_AS_HASH -ePROJECT -eDEPENDS -eCONFLICTS -w "/root/start-os${rel_pwd}" --rm -v "$(pwd):/root/start-os" start9/build-env $@ docker run -d --rm --name os-compat --privileged --security-opt apparmor=unconfined -v "${project_pwd}:/root/start-os" -v /lib/modules:/lib/modules:ro start9/build-env
while ! docker exec os-compat systemctl is-active --quiet multi-user.target 2> /dev/null; do sleep .5; done
docker exec -eARCH -eENVIRONMENT -ePLATFORM -eGIT_BRANCH_AS_HASH -ePROJECT -eDEPENDS -eCONFLICTS $USE_TTY -w "/root/start-os${rel_pwd}" os-compat $@
code=$?
docker stop os-compat
exit $code
else else
exec $@ exec $@
fi fi

87
build/raspberrypi/make-image.sh Executable file
View File

@@ -0,0 +1,87 @@
#!/bin/bash
set -e
function partition_for () {
if [[ "$1" =~ [0-9]+$ ]]; then
echo "$1p$2"
else
echo "$1$2"
fi
}
VERSION=$(cat VERSION.txt)
ENVIRONMENT=$(cat ENVIRONMENT.txt)
GIT_HASH=$(cat GIT_HASH.txt | head -c 7)
DATE=$(date +%Y%m%d)
ROOT_PART_END=7217792
VERSION_FULL="$VERSION-$GIT_HASH"
if [ -n "$ENVIRONMENT" ]; then
VERSION_FULL="$VERSION_FULL~$ENVIRONMENT"
fi
TARGET_NAME=startos-${VERSION_FULL}-${DATE}_raspberrypi.img
TARGET_SIZE=$[($ROOT_PART_END+1)*512]
rm -f $TARGET_NAME
truncate -s $TARGET_SIZE $TARGET_NAME
(
echo o
echo x
echo i
echo "0xcb15ae4d"
echo r
echo n
echo p
echo 1
echo 2048
echo 526335
echo t
echo c
echo n
echo p
echo 2
echo 526336
echo $ROOT_PART_END
echo a
echo 1
echo w
) | fdisk $TARGET_NAME
OUTPUT_DEVICE=$(sudo losetup --show -fP $TARGET_NAME)
sudo mkfs.ext4 `partition_for ${OUTPUT_DEVICE} 2`
sudo mkfs.vfat `partition_for ${OUTPUT_DEVICE} 1`
TMPDIR=$(mktemp -d)
sudo mount `partition_for ${OUTPUT_DEVICE} 2` $TMPDIR
sudo mkdir $TMPDIR/boot
sudo mount `partition_for ${OUTPUT_DEVICE} 1` $TMPDIR/boot
sudo unsquashfs -f -d $TMPDIR startos.raspberrypi.squashfs
REAL_GIT_HASH=$(cat $TMPDIR/usr/lib/startos/GIT_HASH.txt)
REAL_VERSION=$(cat $TMPDIR/usr/lib/startos/VERSION.txt)
REAL_ENVIRONMENT=$(cat $TMPDIR/usr/lib/startos/ENVIRONMENT.txt)
sudo sed -i 's| boot=startos| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo cp ./build/raspberrypi/fstab $TMPDIR/etc/
sudo cp ./build/raspberrypi/init_resize.sh $TMPDIR/usr/lib/startos/scripts/init_resize.sh
sudo umount $TMPDIR/boot
sudo umount $TMPDIR
sudo losetup -d $OUTPUT_DEVICE
if [ "$ALLOW_VERSION_MISMATCH" != 1 ]; then
if [ "$(cat GIT_HASH.txt)" != "$REAL_GIT_HASH" ]; then
>&2 echo "startos.raspberrypi.squashfs GIT_HASH.txt mismatch"
>&2 echo "expected $REAL_GIT_HASH (dpkg) found $(cat GIT_HASH.txt) (repo)"
exit 1
fi
if [ "$(cat VERSION.txt)" != "$REAL_VERSION" ]; then
>&2 echo "startos.raspberrypi.squashfs VERSION.txt mismatch"
exit 1
fi
if [ "$(cat ENVIRONMENT.txt)" != "$REAL_ENVIRONMENT" ]; then
>&2 echo "startos.raspberrypi.squashfs ENVIRONMENT.txt mismatch"
exit 1
fi
fi

View File

@@ -1,36 +0,0 @@
#!/bin/bash
# Save Docker images needed by the 0.3.6-alpha.0 migration as tarballs
# so they can be bundled into the OS and loaded without internet access.
set -e
ARCH="${ARCH:-x86_64}"
DESTDIR="${1:-build/lib/migration-images}"
if [ "$ARCH" = "x86_64" ]; then
DOCKER_PLATFORM="linux/amd64"
elif [ "$ARCH" = "aarch64" ]; then
DOCKER_PLATFORM="linux/arm64"
else
DOCKER_PLATFORM="linux/$ARCH"
fi
IMAGES=("tonistiigi/binfmt:latest")
if [ "$ARCH" != "riscv64" ]; then
IMAGES=("start9/compat:latest" "start9/utils:latest" "${IMAGES[@]}")
fi
mkdir -p "$DESTDIR"
for IMAGE in "${IMAGES[@]}"; do
FILENAME=$(echo "$IMAGE" | sed 's|/|_|g; s/:/_/g').tar
if [ -f "$DESTDIR/$FILENAME" ]; then
echo "Skipping $IMAGE (already saved)"
continue
fi
echo "Pulling $IMAGE for $DOCKER_PLATFORM..."
docker pull --platform "$DOCKER_PLATFORM" "$IMAGE"
echo "Saving $IMAGE to $DESTDIR/$FILENAME..."
docker save "$IMAGE" -o "$DESTDIR/$FILENAME"
done
echo "Migration images saved to $DESTDIR"

View File

@@ -1,10 +1,8 @@
#!/bin/bash #!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
if ! [ -f ./ENVIRONMENT.txt ] || [ "$(cat ./ENVIRONMENT.txt)" != "$ENVIRONMENT" ]; then if ! [ -f ./ENVIRONMENT.txt ] || [ "$(cat ./ENVIRONMENT.txt)" != "$ENVIRONMENT" ]; then
>&2 echo "Updating ENVIRONMENT.txt to \"$ENVIRONMENT\"" >&2 echo "Updating ENVIRONMENT.txt to \"$ENVIRONMENT\""
echo -n "$ENVIRONMENT" > ./ENVIRONMENT.txt echo -n "$ENVIRONMENT" > ./ENVIRONMENT.txt
fi fi
echo -n ./build/env/ENVIRONMENT.txt echo -n ./ENVIRONMENT.txt

View File

@@ -1,7 +1,5 @@
#!/bin/bash #!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
if [ "$GIT_BRANCH_AS_HASH" != 1 ]; then if [ "$GIT_BRANCH_AS_HASH" != 1 ]; then
GIT_HASH="$(git rev-parse HEAD)$(if ! git diff-index --quiet HEAD --; then echo '-modified'; fi)" GIT_HASH="$(git rev-parse HEAD)$(if ! git diff-index --quiet HEAD --; then echo '-modified'; fi)"
else else
@@ -13,4 +11,4 @@ if ! [ -f ./GIT_HASH.txt ] || [ "$(cat ./GIT_HASH.txt)" != "$GIT_HASH" ]; then
echo -n "$GIT_HASH" > ./GIT_HASH.txt echo -n "$GIT_HASH" > ./GIT_HASH.txt
fi fi
echo -n ./build/env/GIT_HASH.txt echo -n ./GIT_HASH.txt

View File

@@ -1,10 +1,8 @@
#!/bin/bash #!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
if ! [ -f ./PLATFORM.txt ] || [ "$(cat ./PLATFORM.txt)" != "$PLATFORM" ] && [ -n "$PLATFORM" ]; then if ! [ -f ./PLATFORM.txt ] || [ "$(cat ./PLATFORM.txt)" != "$PLATFORM" ] && [ -n "$PLATFORM" ]; then
>&2 echo "Updating PLATFORM.txt to \"$PLATFORM\"" >&2 echo "Updating PLATFORM.txt to \"$PLATFORM\""
echo -n "$PLATFORM" > ./PLATFORM.txt echo -n "$PLATFORM" > ./PLATFORM.txt
fi fi
echo -n ./build/env/PLATFORM.txt echo -n ./PLATFORM.txt

View File

@@ -1,8 +1,6 @@
#!/bin/bash #!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")" FE_VERSION="$(cat web/package.json | grep '"version"' | sed 's/[ \t]*"version":[ \t]*"\([^"]*\)",/\1/')"
FE_VERSION="$(cat ../../web/package.json | grep '"version"' | sed 's/[ \t]*"version":[ \t]*"\([^"]*\)",/\1/')"
# TODO: Validate other version sources - backend/Cargo.toml, backend/src/version/mod.rs # TODO: Validate other version sources - backend/Cargo.toml, backend/src/version/mod.rs
@@ -12,4 +10,4 @@ if ! [ -f ./VERSION.txt ] || [ "$(cat ./VERSION.txt)" != "$VERSION" ]; then
echo -n "$VERSION" > ./VERSION.txt echo -n "$VERSION" > ./VERSION.txt
fi fi
echo -n ./build/env/VERSION.txt echo -n ./VERSION.txt

View File

@@ -4,8 +4,8 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
set -e set -e
STATIC_DIR=dist/static/$1 STATIC_DIR=web/dist/static/$1
RAW_DIR=dist/raw/$1 RAW_DIR=web/dist/raw/$1
mkdir -p $STATIC_DIR mkdir -p $STATIC_DIR
rm -rf $STATIC_DIR rm -rf $STATIC_DIR

View File

@@ -1,32 +0,0 @@
# Container Runtime — Node.js Service Manager
Node.js runtime that manages service containers via JSON-RPC. See `RPCSpec.md` in this directory for the full RPC protocol.
## Architecture
```
LXC Container (uniform base for all services)
└── systemd
└── container-runtime.service
└── Loads /usr/lib/startos/package/index.js (from s9pk javascript.squashfs)
└── Package JS launches subcontainers (from images in s9pk)
```
The container runtime communicates with the host via JSON-RPC over Unix socket. Package JavaScript must export functions conforming to the `ABI` type defined in `sdk/base/lib/types.ts`.
## `/media/startos/` Directory (mounted by host into container)
| Path | Description |
| -------------------- | ----------------------------------------------------- |
| `volumes/<name>/` | Package data volumes (id-mapped, persistent) |
| `assets/` | Read-only assets from s9pk `assets.squashfs` |
| `images/<name>/` | Container images (squashfs, used for subcontainers) |
| `images/<name>.env` | Environment variables for image |
| `images/<name>.json` | Image metadata |
| `backup/` | Backup mount point (mounted during backup operations) |
| `rpc/service.sock` | RPC socket (container runtime listens here) |
| `rpc/host.sock` | Host RPC socket (for effects callbacks to host) |
## S9PK Structure
See `../core/s9pk-structure.md` for the S9PK package format.

View File

@@ -1,21 +1,16 @@
# Container RPC Server Specification # Container RPC SERVER Specification
The container runtime exposes a JSON-RPC server over a Unix socket at `/media/startos/rpc/service.sock`.
## Methods ## Methods
### init ### init
Initialize the runtime and system. initialize runtime (mount `/proc`, `/sys`, `/dev`, and `/run` to each image in `/media/images`)
#### params called after os has mounted js and images to the container
```ts #### args
{
id: string, `[]`
kind: "install" | "update" | "restore" | null,
}
```
#### response #### response
@@ -23,16 +18,11 @@ Initialize the runtime and system.
### exit ### exit
Shutdown runtime and optionally run exit hooks for a target version. shutdown runtime
#### params #### args
```ts `[]`
{
id: string,
target: string | null, // ExtendedVersion or VersionRange
}
```
#### response #### response
@@ -40,11 +30,11 @@ Shutdown runtime and optionally run exit hooks for a target version.
### start ### start
Run main method if not already running. run main method if not already running
#### params #### args
None `[]`
#### response #### response
@@ -52,11 +42,11 @@ None
### stop ### stop
Stop main method by sending SIGTERM to child processes, and SIGKILL after timeout. stop main method by sending SIGTERM to child processes, and SIGKILL after timeout
#### params #### args
None `{ timeout: millis }`
#### response #### response
@@ -64,16 +54,15 @@ None
### execute ### execute
Run a specific package procedure. run a specific package procedure
#### params #### args
```ts ```ts
{ {
id: string, // event ID procedure: JsonPath,
procedure: string, // JSON path (e.g., "/backup/create", "/actions/{name}/run") input: any,
input: any, timeout: millis,
timeout: number | null,
} }
``` ```
@@ -83,64 +72,18 @@ Run a specific package procedure.
### sandbox ### sandbox
Run a specific package procedure in sandbox mode. Same interface as `execute`. run a specific package procedure in sandbox mode
UNIMPLEMENTED: this feature is planned but does not exist #### args
#### params
```ts ```ts
{ {
id: string, procedure: JsonPath,
procedure: string, input: any,
input: any, timeout: millis,
timeout: number | null,
} }
``` ```
#### response #### response
`any` `any`
### callback
Handle a callback from an effect.
#### params
```ts
{
id: number,
args: any[],
}
```
#### response
`null` (no response sent)
### eval
Evaluate a script in the runtime context. Used for debugging.
#### params
```ts
{
script: string,
}
```
#### response
`any`
## Procedures
The `execute` and `sandbox` methods route to procedures based on the `procedure` path:
| Procedure | Description |
| -------------------------- | ---------------------------- |
| `/backup/create` | Create a backup |
| `/actions/{name}/getInput` | Get input spec for an action |
| `/actions/{name}/run` | Run an action with input |

View File

@@ -1,30 +0,0 @@
// Mock for ESM-only mime package — Jest's module loader doesn't support require(esm)
const types = {
".png": "image/png",
".jpg": "image/jpeg",
".jpeg": "image/jpeg",
".gif": "image/gif",
".svg": "image/svg+xml",
".webp": "image/webp",
".ico": "image/x-icon",
".json": "application/json",
".js": "application/javascript",
".html": "text/html",
".css": "text/css",
".txt": "text/plain",
".md": "text/markdown",
}
module.exports = {
default: {
getType(path) {
const ext = "." + path.split(".").pop()
return types[ext] || null
},
getExtension(type) {
const entry = Object.entries(types).find(([, v]) => v === type)
return entry ? entry[0].slice(1) : null
},
},
__esModule: true,
}

View File

@@ -4,8 +4,7 @@ OnFailure=container-runtime-failure.service
[Service] [Service]
Type=simple Type=simple
Environment=RUST_LOG=startos=debug ExecStart=/usr/bin/node --experimental-detect-module --trace-warnings --unhandled-rejections=warn /usr/lib/startos/init/index.js
ExecStart=/usr/bin/start-container pipe-wrap /usr/bin/node --experimental-detect-module --trace-warnings /usr/lib/startos/init/index.js
Restart=no Restart=no
[Install] [Install]

View File

@@ -2,6 +2,9 @@
set -e set -e
mkdir -p /run/systemd/resolve
echo "nameserver 8.8.8.8" > /run/systemd/resolve/stub-resolv.conf
apt-get update apt-get update
apt-get install -y curl rsync qemu-user-static nodejs apt-get install -y curl rsync qemu-user-static nodejs
@@ -13,4 +16,7 @@ sed -i '/\(^\|#\)ForwardToSyslog=/c\ForwardToSyslog=no' /etc/systemd/journald.co
systemctl enable container-runtime.service systemctl enable container-runtime.service
rm -rf /run/systemd
rm -f /etc/resolv.conf
echo "nameserver 10.0.3.1" > /etc/resolv.conf echo "nameserver 10.0.3.1" > /etc/resolv.conf

View File

@@ -5,7 +5,4 @@ module.exports = {
testEnvironment: "node", testEnvironment: "node",
rootDir: "./src/", rootDir: "./src/",
modulePathIgnorePatterns: ["./dist/"], modulePathIgnorePatterns: ["./dist/"],
moduleNameMapper: {
"^mime$": "<rootDir>/../__mocks__/mime.js",
},
} }

View File

@@ -0,0 +1,28 @@
#!/bin/bash
set -e
IMAGE=$1
if [ -z "$IMAGE" ]; then
>&2 echo "usage: $0 <image id>"
exit 1
fi
if ! [ -d "/media/images/$IMAGE" ]; then
>&2 echo "image does not exist"
exit 1
fi
container=$(mktemp -d)
mkdir -p $container/rootfs $container/upper $container/work
mount -t overlay -olowerdir=/media/images/$IMAGE,upperdir=$container/upper,workdir=$container/work overlay $container/rootfs
rootfs=$container/rootfs
for special in dev sys proc run; do
mkdir -p $rootfs/$special
mount --bind /$special $rootfs/$special
done
echo $rootfs

View File

@@ -19,6 +19,7 @@
"lodash.merge": "^4.6.2", "lodash.merge": "^4.6.2",
"mime": "^4.0.7", "mime": "^4.0.7",
"node-fetch": "^3.1.0", "node-fetch": "^3.1.0",
"ts-matches": "^6.3.2",
"tslib": "^2.5.3", "tslib": "^2.5.3",
"typescript": "^5.1.3", "typescript": "^5.1.3",
"yaml": "^2.3.1" "yaml": "^2.3.1"
@@ -37,7 +38,7 @@
}, },
"../sdk/dist": { "../sdk/dist": {
"name": "@start9labs/start-sdk", "name": "@start9labs/start-sdk",
"version": "0.4.0-beta.66", "version": "0.4.0-beta.43",
"license": "MIT", "license": "MIT",
"dependencies": { "dependencies": {
"@iarna/toml": "^3.0.0", "@iarna/toml": "^3.0.0",
@@ -45,13 +46,11 @@
"@noble/hashes": "^1.7.2", "@noble/hashes": "^1.7.2",
"@types/ini": "^4.1.1", "@types/ini": "^4.1.1",
"deep-equality-data-structures": "^2.0.0", "deep-equality-data-structures": "^2.0.0",
"fast-xml-parser": "^5.5.6",
"ini": "^5.0.0", "ini": "^5.0.0",
"isomorphic-fetch": "^3.0.0", "isomorphic-fetch": "^3.0.0",
"mime": "^4.0.7", "mime": "^4.0.7",
"yaml": "^2.7.1", "ts-matches": "^6.3.2",
"zod": "^4.3.6", "yaml": "^2.7.1"
"zod-deep-partial": "^1.2.0"
}, },
"devDependencies": { "devDependencies": {
"@types/jest": "^29.4.0", "@types/jest": "^29.4.0",
@@ -6495,6 +6494,12 @@
} }
} }
}, },
"node_modules/ts-matches": {
"version": "6.3.2",
"resolved": "https://registry.npmjs.org/ts-matches/-/ts-matches-6.3.2.tgz",
"integrity": "sha512-UhSgJymF8cLd4y0vV29qlKVCkQpUtekAaujXbQVc729FezS8HwqzepqvtjzQ3HboatIqN/Idor85O2RMwT7lIQ==",
"license": "MIT"
},
"node_modules/tslib": { "node_modules/tslib": {
"version": "2.8.1", "version": "2.8.1",
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz", "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",

View File

@@ -28,6 +28,7 @@
"lodash.merge": "^4.6.2", "lodash.merge": "^4.6.2",
"mime": "^4.0.7", "mime": "^4.0.7",
"node-fetch": "^3.1.0", "node-fetch": "^3.1.0",
"ts-matches": "^6.3.2",
"tslib": "^2.5.3", "tslib": "^2.5.3",
"typescript": "^5.1.3", "typescript": "^5.1.3",
"yaml": "^2.3.1" "yaml": "^2.3.1"

View File

@@ -0,0 +1,12 @@
#!/bin/bash
set -e
rootfs=$1
if [ -z "$rootfs" ]; then
>&2 echo "usage: $0 <container rootfs path>"
exit 1
fi
umount --recursive $rootfs
rm -rf $rootfs/..

View File

@@ -3,39 +3,33 @@ import {
types as T, types as T,
utils, utils,
VersionRange, VersionRange,
z,
} from "@start9labs/start-sdk" } from "@start9labs/start-sdk"
import * as net from "net" import * as net from "net"
import { object, string, number, literals, some, unknown } from "ts-matches"
import { Effects } from "../Models/Effects" import { Effects } from "../Models/Effects"
import { CallbackHolder } from "../Models/CallbackHolder" import { CallbackHolder } from "../Models/CallbackHolder"
import { asError } from "@start9labs/start-sdk/base/lib/util" import { asError } from "@start9labs/start-sdk/base/lib/util"
const matchRpcError = z.object({ const matchRpcError = object({
error: z.object({ error: object({
code: z.number(), code: number,
message: z.string(), message: string,
data: z data: some(
.union([ string,
z.string(), object({
z.object({ details: string,
details: z.string(), debug: string.nullable().optional(),
debug: z.string().nullable().optional(), }),
}), )
])
.nullable() .nullable()
.optional(), .optional(),
}), }),
}) })
function testRpcError(v: unknown): v is RpcError { const testRpcError = matchRpcError.test
return matchRpcError.safeParse(v).success const testRpcResult = object({
} result: unknown,
const matchRpcResult = z.object({ }).test
result: z.unknown(), type RpcError = typeof matchRpcError._TYPE
})
function testRpcResult(v: unknown): v is z.infer<typeof matchRpcResult> {
return matchRpcResult.safeParse(v).success
}
type RpcError = z.infer<typeof matchRpcError>
const SOCKET_PATH = "/media/startos/rpc/host.sock" const SOCKET_PATH = "/media/startos/rpc/host.sock"
let hostSystemId = 0 let hostSystemId = 0
@@ -77,7 +71,7 @@ const rpcRoundFor =
"Error in host RPC:", "Error in host RPC:",
utils.asError({ method, params, error: res.error }), utils.asError({ method, params, error: res.error }),
) )
if (typeof res.error.data === "string") { if (string.test(res.error.data)) {
message += ": " + res.error.data message += ": " + res.error.data
console.error(`Details: ${res.error.data}`) console.error(`Details: ${res.error.data}`)
} else { } else {
@@ -184,14 +178,6 @@ export function makeEffects(context: EffectContext): Effects {
T.Effects["getInstalledPackages"] T.Effects["getInstalledPackages"]
> >
}, },
getServiceManifest(
...[options]: Parameters<T.Effects["getServiceManifest"]>
) {
return rpcRound("get-service-manifest", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getServiceManifest"]>
},
subcontainer: { subcontainer: {
createFs(options: { imageId: string; name: string }) { createFs(options: { imageId: string; name: string }) {
return rpcRound("subcontainer.create-fs", options) as ReturnType< return rpcRound("subcontainer.create-fs", options) as ReturnType<
@@ -212,10 +198,9 @@ export function makeEffects(context: EffectContext): Effects {
> >
}) as Effects["exportServiceInterface"], }) as Effects["exportServiceInterface"],
getContainerIp(...[options]: Parameters<T.Effects["getContainerIp"]>) { getContainerIp(...[options]: Parameters<T.Effects["getContainerIp"]>) {
return rpcRound("get-container-ip", { return rpcRound("get-container-ip", options) as ReturnType<
...options, T.Effects["getContainerIp"]
callback: context.callbacks?.addCallback(options.callback) || null, >
}) as ReturnType<T.Effects["getContainerIp"]>
}, },
getOsIp(...[]: Parameters<T.Effects["getOsIp"]>) { getOsIp(...[]: Parameters<T.Effects["getOsIp"]>) {
return rpcRound("get-os-ip", {}) as ReturnType<T.Effects["getOsIp"]> return rpcRound("get-os-ip", {}) as ReturnType<T.Effects["getOsIp"]>
@@ -246,10 +231,9 @@ export function makeEffects(context: EffectContext): Effects {
> >
}, },
getSslCertificate(options: Parameters<T.Effects["getSslCertificate"]>[0]) { getSslCertificate(options: Parameters<T.Effects["getSslCertificate"]>[0]) {
return rpcRound("get-ssl-certificate", { return rpcRound("get-ssl-certificate", options) as ReturnType<
...options, T.Effects["getSslCertificate"]
callback: context.callbacks?.addCallback(options.callback) || null, >
}) as ReturnType<T.Effects["getSslCertificate"]>
}, },
getSslKey(options: Parameters<T.Effects["getSslKey"]>[0]) { getSslKey(options: Parameters<T.Effects["getSslKey"]>[0]) {
return rpcRound("get-ssl-key", options) as ReturnType< return rpcRound("get-ssl-key", options) as ReturnType<
@@ -262,14 +246,6 @@ export function makeEffects(context: EffectContext): Effects {
callback: context.callbacks?.addCallback(options.callback) || null, callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getSystemSmtp"]> }) as ReturnType<T.Effects["getSystemSmtp"]>
}, },
getOutboundGateway(
...[options]: Parameters<T.Effects["getOutboundGateway"]>
) {
return rpcRound("get-outbound-gateway", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getOutboundGateway"]>
},
listServiceInterfaces( listServiceInterfaces(
...[options]: Parameters<T.Effects["listServiceInterfaces"]> ...[options]: Parameters<T.Effects["listServiceInterfaces"]>
) { ) {
@@ -311,12 +287,8 @@ export function makeEffects(context: EffectContext): Effects {
}, },
getStatus(...[o]: Parameters<T.Effects["getStatus"]>) { getStatus(...[o]: Parameters<T.Effects["getStatus"]>) {
return rpcRound("get-status", { return rpcRound("get-status", o) as ReturnType<T.Effects["getStatus"]>
...o,
callback: context.callbacks?.addCallback(o.callback) || null,
}) as ReturnType<T.Effects["getStatus"]>
}, },
/// DEPRECATED
setMainStatus(o: { status: "running" | "stopped" }): Promise<null> { setMainStatus(o: { status: "running" | "stopped" }): Promise<null> {
return rpcRound("set-main-status", o) as ReturnType< return rpcRound("set-main-status", o) as ReturnType<
T.Effects["setHealth"] T.Effects["setHealth"]
@@ -336,35 +308,9 @@ export function makeEffects(context: EffectContext): Effects {
T.Effects["setDataVersion"] T.Effects["setDataVersion"]
> >
}, },
plugin: {
url: {
register(
...[options]: Parameters<T.Effects["plugin"]["url"]["register"]>
) {
return rpcRound("plugin.url.register", options) as ReturnType<
T.Effects["plugin"]["url"]["register"]
>
},
exportUrl(
...[options]: Parameters<T.Effects["plugin"]["url"]["exportUrl"]>
) {
return rpcRound("plugin.url.export-url", options) as ReturnType<
T.Effects["plugin"]["url"]["exportUrl"]
>
},
clearUrls(
...[options]: Parameters<T.Effects["plugin"]["url"]["clearUrls"]>
) {
return rpcRound("plugin.url.clear-urls", options) as ReturnType<
T.Effects["plugin"]["url"]["clearUrls"]
>
},
},
},
} }
if (context.callbacks?.onLeaveContext) if (context.callbacks?.onLeaveContext)
self.onLeaveContext(() => { self.onLeaveContext(() => {
self.constRetry = undefined
self.isInContext = false self.isInContext = false
self.onLeaveContext = () => { self.onLeaveContext = () => {
console.warn( console.warn(

View File

@@ -1,13 +1,25 @@
// @ts-check // @ts-check
import * as net from "net" import * as net from "net"
import {
object,
some,
string,
literal,
array,
number,
matches,
any,
shape,
anyOf,
literals,
} from "ts-matches"
import { import {
ExtendedVersion, ExtendedVersion,
types as T, types as T,
utils, utils,
VersionRange, VersionRange,
z,
} from "@start9labs/start-sdk" } from "@start9labs/start-sdk"
import * as fs from "fs" import * as fs from "fs"
@@ -17,92 +29,89 @@ import { jsonPath, unNestPath } from "../Models/JsonPath"
import { System } from "../Interfaces/System" import { System } from "../Interfaces/System"
import { makeEffects } from "./EffectCreator" import { makeEffects } from "./EffectCreator"
type MaybePromise<T> = T | Promise<T> type MaybePromise<T> = T | Promise<T>
export const matchRpcResult = z.union([ export const matchRpcResult = anyOf(
z.object({ result: z.any() }), object({ result: any }),
z.object({ object({
error: z.object({ error: object({
code: z.number(), code: number,
message: z.string(), message: string,
data: z data: object({
.object({ details: string.optional(),
details: z.string().optional(), debug: any.optional(),
debug: z.any().optional(), })
})
.nullable() .nullable()
.optional(), .optional(),
}), }),
}), }),
]) )
export type RpcResult = z.infer<typeof matchRpcResult> export type RpcResult = typeof matchRpcResult._TYPE
type SocketResponse = ({ jsonrpc: "2.0"; id: IdType } & RpcResult) | null type SocketResponse = ({ jsonrpc: "2.0"; id: IdType } & RpcResult) | null
const SOCKET_PARENT = "/media/startos/rpc" const SOCKET_PARENT = "/media/startos/rpc"
const SOCKET_PATH = "/media/startos/rpc/service.sock" const SOCKET_PATH = "/media/startos/rpc/service.sock"
const jsonrpc = "2.0" as const const jsonrpc = "2.0" as const
const isResultSchema = z.object({ result: z.any() }) const isResult = object({ result: any }).test
const isResult = (v: unknown): v is z.infer<typeof isResultSchema> =>
isResultSchema.safeParse(v).success
const idType = z.union([z.string(), z.number(), z.literal(null)]) const idType = some(string, number, literal(null))
type IdType = null | string | number | undefined type IdType = null | string | number | undefined
const runType = z.object({ const runType = object({
id: idType.optional(), id: idType.optional(),
method: z.literal("execute"), method: literal("execute"),
params: z.object({ params: object({
id: z.string(), id: string,
procedure: z.string(), procedure: string,
input: z.any(), input: any,
timeout: z.number().nullable().optional(), timeout: number.nullable().optional(),
}), }),
}) })
const sandboxRunType = z.object({ const sandboxRunType = object({
id: idType.optional(), id: idType.optional(),
method: z.literal("sandbox"), method: literal("sandbox"),
params: z.object({ params: object({
id: z.string(), id: string,
procedure: z.string(), procedure: string,
input: z.any(), input: any,
timeout: z.number().nullable().optional(), timeout: number.nullable().optional(),
}), }),
}) })
const callbackType = z.object({ const callbackType = object({
method: z.literal("callback"), method: literal("callback"),
params: z.object({ params: object({
id: z.number(), id: number,
args: z.array(z.unknown()), args: array,
}), }),
}) })
const initType = z.object({ const initType = object({
id: idType.optional(), id: idType.optional(),
method: z.literal("init"), method: literal("init"),
params: z.object({ params: object({
id: z.string(), id: string,
kind: z.enum(["install", "update", "restore"]).nullable(), kind: literals("install", "update", "restore").nullable(),
}), }),
}) })
const startType = z.object({ const startType = object({
id: idType.optional(), id: idType.optional(),
method: z.literal("start"), method: literal("start"),
}) })
const stopType = z.object({ const stopType = object({
id: idType.optional(), id: idType.optional(),
method: z.literal("stop"), method: literal("stop"),
}) })
const exitType = z.object({ const exitType = object({
id: idType.optional(), id: idType.optional(),
method: z.literal("exit"), method: literal("exit"),
params: z.object({ params: object({
id: z.string(), id: string,
target: z.string().nullable(), target: string.nullable(),
}), }),
}) })
const evalType = z.object({ const evalType = object({
id: idType.optional(), id: idType.optional(),
method: z.literal("eval"), method: literal("eval"),
params: z.object({ params: object({
script: z.string(), script: string,
}), }),
}) })
@@ -135,11 +144,8 @@ const handleRpc = (id: IdType, result: Promise<RpcResult>) =>
}, },
})) }))
const hasIdSchema = z.object({ id: idType }) const hasId = object({ id: idType }).test
const hasId = (v: unknown): v is z.infer<typeof hasIdSchema> =>
hasIdSchema.safeParse(v).success
export class RpcListener { export class RpcListener {
shouldExit = false
unixSocketServer = net.createServer(async (server) => {}) unixSocketServer = net.createServer(async (server) => {})
private _system: System | undefined private _system: System | undefined
private callbacks: CallbackHolder | undefined private callbacks: CallbackHolder | undefined
@@ -152,8 +158,6 @@ export class RpcListener {
this.unixSocketServer.listen(SOCKET_PATH) this.unixSocketServer.listen(SOCKET_PATH)
console.log("Listening on %s", SOCKET_PATH)
this.unixSocketServer.on("connection", (s) => { this.unixSocketServer.on("connection", (s) => {
let id: IdType = null let id: IdType = null
const captureId = <X>(x: X) => { const captureId = <X>(x: X) => {
@@ -204,11 +208,6 @@ export class RpcListener {
.catch(mapError) .catch(mapError)
.then(logData("response")) .then(logData("response"))
.then(writeDataToSocket) .then(writeDataToSocket)
.then((_) => {
if (this.shouldExit) {
process.exit(0)
}
})
.catch((e) => { .catch((e) => {
console.error(`Major error in socket handling: ${e}`) console.error(`Major error in socket handling: ${e}`)
console.debug(`Data in: ${a.toString()}`) console.debug(`Data in: ${a.toString()}`)
@@ -239,52 +238,40 @@ export class RpcListener {
} }
private dealWithInput(input: unknown): MaybePromise<SocketResponse> { private dealWithInput(input: unknown): MaybePromise<SocketResponse> {
const parsed = z.object({ method: z.string() }).safeParse(input) return matches(input)
if (!parsed.success) { .when(runType, async ({ id, params }) => {
console.warn(
`Couldn't parse the following input ${JSON.stringify(input)}`,
)
return {
jsonrpc,
id: (input as any)?.id,
error: {
code: -32602,
message: "invalid params",
data: {
details: JSON.stringify(input),
},
},
}
}
switch (parsed.data.method) {
case "execute": {
const { id, params } = runType.parse(input)
const system = this.system const system = this.system
const procedure = jsonPath.parse(params.procedure) const procedure = jsonPath.unsafeCast(params.procedure)
const { input: inp, timeout, id: eventId } = params const { input, timeout, id: eventId } = params
const result = this.getResult(procedure, system, eventId, timeout, inp) const result = this.getResult(
procedure,
system,
eventId,
timeout,
input,
)
return handleRpc(id, result) return handleRpc(id, result)
} })
case "sandbox": { .when(sandboxRunType, async ({ id, params }) => {
const { id, params } = sandboxRunType.parse(input)
const system = this.system const system = this.system
const procedure = jsonPath.parse(params.procedure) const procedure = jsonPath.unsafeCast(params.procedure)
const { input: inp, timeout, id: eventId } = params const { input, timeout, id: eventId } = params
const result = this.getResult(procedure, system, eventId, timeout, inp) const result = this.getResult(
procedure,
system,
eventId,
timeout,
input,
)
return handleRpc(id, result) return handleRpc(id, result)
} })
case "callback": { .when(callbackType, async ({ params: { id, args } }) => {
const {
params: { id, args },
} = callbackType.parse(input)
this.callCallback(id, args) this.callCallback(id, args)
return null return null
} })
case "start": { .when(startType, async ({ id }) => {
const { id } = startType.parse(input)
const callbacks = const callbacks =
this.callbacks?.getChild("main") || this.callbacks?.child("main") this.callbacks?.getChild("main") || this.callbacks?.child("main")
const effects = makeEffects({ const effects = makeEffects({
@@ -295,17 +282,15 @@ export class RpcListener {
id, id,
this.system.start(effects).then((result) => ({ result })), this.system.start(effects).then((result) => ({ result })),
) )
} })
case "stop": { .when(stopType, async ({ id }) => {
const { id } = stopType.parse(input)
this.callbacks?.removeChild("main") this.callbacks?.removeChild("main")
return handleRpc( return handleRpc(
id, id,
this.system.stop().then((result) => ({ result })), this.system.stop().then((result) => ({ result })),
) )
} })
case "exit": { .when(exitType, async ({ id, params }) => {
const { id, params } = exitType.parse(input)
return handleRpc( return handleRpc(
id, id,
(async () => { (async () => {
@@ -323,13 +308,11 @@ export class RpcListener {
}), }),
target, target,
) )
this.shouldExit = true
} }
})().then((result) => ({ result })), })().then((result) => ({ result })),
) )
} })
case "init": { .when(initType, async ({ id, params }) => {
const { id, params } = initType.parse(input)
return handleRpc( return handleRpc(
id, id,
(async () => { (async () => {
@@ -354,9 +337,8 @@ export class RpcListener {
} }
})().then((result) => ({ result })), })().then((result) => ({ result })),
) )
} })
case "eval": { .when(evalType, async ({ id, params }) => {
const { id, params } = evalType.parse(input)
return handleRpc( return handleRpc(
id, id,
(async () => { (async () => {
@@ -381,28 +363,41 @@ export class RpcListener {
} }
})(), })(),
) )
} })
default: { .when(
const { id, method } = z shape({ id: idType.optional(), method: string }),
.object({ id: idType.optional(), method: z.string() }) ({ id, method }) => ({
.passthrough()
.parse(input)
return {
jsonrpc, jsonrpc,
id, id,
error: { error: {
code: -32601, code: -32601,
message: "Method not found", message: `Method not found`,
data: { data: {
details: method, details: method,
}, },
}, },
}),
)
.defaultToLazy(() => {
console.warn(
`Couldn't parse the following input ${JSON.stringify(input)}`,
)
return {
jsonrpc,
id: (input as any)?.id,
error: {
code: -32602,
message: "invalid params",
data: {
details: JSON.stringify(input),
},
},
} }
} })
}
} }
private getResult( private getResult(
procedure: z.infer<typeof jsonPath>, procedure: typeof jsonPath._TYPE,
system: System, system: System,
eventId: string, eventId: string,
timeout: number | null | undefined, timeout: number | null | undefined,
@@ -430,7 +425,6 @@ export class RpcListener {
return system.getActionInput( return system.getActionInput(
effects, effects,
procedures[2], procedures[2],
input?.prefill ?? null,
timeout || null, timeout || null,
) )
case procedures[1] === "actions" && procedures[3] === "run": case procedures[1] === "actions" && procedures[3] === "run":
@@ -442,18 +436,26 @@ export class RpcListener {
) )
} }
} }
})().then(ensureResultTypeShape, (error) => { })().then(ensureResultTypeShape, (error) =>
const errorSchema = z.object({ matches(error)
error: z.string(), .when(
code: z.number().default(0), object({
}) error: string,
const parsed = errorSchema.safeParse(error) code: number.defaultTo(0),
if (parsed.success) { }),
return { (error) => ({
error: { code: parsed.data.code, message: parsed.data.error }, error: {
} code: error.code,
} message: error.error,
return { error: { code: 0, message: String(error) } } },
}) }),
)
.defaultToLazy(() => ({
error: {
code: 0,
message: String(error),
},
})),
)
} }
} }

View File

@@ -2,7 +2,7 @@ import * as fs from "fs/promises"
import * as cp from "child_process" import * as cp from "child_process"
import { SubContainer, types as T } from "@start9labs/start-sdk" import { SubContainer, types as T } from "@start9labs/start-sdk"
import { promisify } from "util" import { promisify } from "util"
import { DockerProcedure } from "../../../Models/DockerProcedure" import { DockerProcedure, VolumeId } from "../../../Models/DockerProcedure"
import { Volume } from "./matchVolume" import { Volume } from "./matchVolume"
import { import {
CommandOptions, CommandOptions,
@@ -28,7 +28,7 @@ export class DockerProcedureContainer extends Drop {
effects: T.Effects, effects: T.Effects,
packageId: string, packageId: string,
data: DockerProcedure, data: DockerProcedure,
volumes: { [id: string]: Volume }, volumes: { [id: VolumeId]: Volume },
name: string, name: string,
options: { subcontainer?: SubContainer<SDKManifest> } = {}, options: { subcontainer?: SubContainer<SDKManifest> } = {},
) { ) {
@@ -47,7 +47,7 @@ export class DockerProcedureContainer extends Drop {
effects: T.Effects, effects: T.Effects,
packageId: string, packageId: string,
data: DockerProcedure, data: DockerProcedure,
volumes: { [id: string]: Volume }, volumes: { [id: VolumeId]: Volume },
name: string, name: string,
) { ) {
const subcontainer = await SubContainerOwned.of( const subcontainer = await SubContainerOwned.of(
@@ -64,7 +64,7 @@ export class DockerProcedureContainer extends Drop {
? `${subcontainer.rootfs}${mounts[mount]}` ? `${subcontainer.rootfs}${mounts[mount]}`
: `${subcontainer.rootfs}/${mounts[mount]}` : `${subcontainer.rootfs}/${mounts[mount]}`
await fs.mkdir(path, { recursive: true }) await fs.mkdir(path, { recursive: true })
const volumeMount: Volume = volumes[mount] const volumeMount = volumes[mount]
if (volumeMount.type === "data") { if (volumeMount.type === "data") {
await subcontainer.mount( await subcontainer.mount(
Mounts.of().mountVolume({ Mounts.of().mountVolume({
@@ -82,15 +82,18 @@ export class DockerProcedureContainer extends Drop {
}), }),
) )
} else if (volumeMount.type === "certificate") { } else if (volumeMount.type === "certificate") {
const hostInfo = await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
const hostnames = [ const hostnames = [
`${packageId}.embassy`, `${packageId}.embassy`,
...new Set( ...new Set(
Object.values(hostInfo?.bindings || {}) Object.values(
.flatMap((b) => b.addresses.available) (
.map((h) => h.hostname), await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
)?.hostnameInfo || {},
)
.flatMap((h) => h)
.flatMap((h) => (h.kind === "onion" ? [h.hostname.value] : [])),
).values(), ).values(),
] ]
const certChain = await effects.getSslCertificate({ const certChain = await effects.getSslCertificate({
@@ -115,7 +118,7 @@ export class DockerProcedureContainer extends Drop {
subpath: volumeMount.path, subpath: volumeMount.path,
readonly: volumeMount.readonly, readonly: volumeMount.readonly,
volumeId: volumeMount["volume-id"], volumeId: volumeMount["volume-id"],
idmap: [], filetype: "directory",
}, },
}) })
} else if (volumeMount.type === "backup") { } else if (volumeMount.type === "backup") {

View File

@@ -10,6 +10,7 @@ import { SDKManifest } from "@start9labs/start-sdk/base/lib/types"
import { SubContainerRc } from "@start9labs/start-sdk/package/lib/util/SubContainer" import { SubContainerRc } from "@start9labs/start-sdk/package/lib/util/SubContainer"
const EMBASSY_HEALTH_INTERVAL = 15 * 1000 const EMBASSY_HEALTH_INTERVAL = 15 * 1000
const EMBASSY_PROPERTIES_LOOP = 30 * 1000
/** /**
* We wanted something to represent what the main loop is doing, and * We wanted something to represent what the main loop is doing, and
* in this case it used to run the properties, health, and the docker/ js main. * in this case it used to run the properties, health, and the docker/ js main.
@@ -119,7 +120,6 @@ export class MainLoop {
? { ? {
preferredExternalPort: lanConf.external, preferredExternalPort: lanConf.external,
alpn: { specified: ["http/1.1"] }, alpn: { specified: ["http/1.1"] },
addXForwardedHeaders: false,
} }
: null, : null,
}) })
@@ -133,7 +133,7 @@ export class MainLoop {
delete this.mainEvent delete this.mainEvent
delete this.healthLoops delete this.healthLoops
await main?.daemon await main?.daemon
.term() .stop()
.catch((e: unknown) => console.error(`Main loop error`, utils.asError(e))) .catch((e: unknown) => console.error(`Main loop error`, utils.asError(e)))
this.effects.setMainStatus({ status: "stopped" }) this.effects.setMainStatus({ status: "stopped" })
if (healthLoops) healthLoops.forEach((x) => clearInterval(x.interval)) if (healthLoops) healthLoops.forEach((x) => clearInterval(x.interval))

View File

@@ -15,11 +15,26 @@ import { System } from "../../../Interfaces/System"
import { matchManifest, Manifest } from "./matchManifest" import { matchManifest, Manifest } from "./matchManifest"
import * as childProcess from "node:child_process" import * as childProcess from "node:child_process"
import { DockerProcedureContainer } from "./DockerProcedureContainer" import { DockerProcedureContainer } from "./DockerProcedureContainer"
import { DockerProcedure } from "../../../Models/DockerProcedure"
import { promisify } from "node:util" import { promisify } from "node:util"
import * as U from "./oldEmbassyTypes" import * as U from "./oldEmbassyTypes"
import { MainLoop } from "./MainLoop" import { MainLoop } from "./MainLoop"
import { z } from "@start9labs/start-sdk" import {
matches,
boolean,
dictionary,
literal,
literals,
object,
string,
unknown,
any,
tuple,
number,
anyOf,
deferred,
Parser,
array,
} from "ts-matches"
import { AddSslOptions } from "@start9labs/start-sdk/base/lib/osBindings" import { AddSslOptions } from "@start9labs/start-sdk/base/lib/osBindings"
import { import {
BindOptionsByProtocol, BindOptionsByProtocol,
@@ -35,116 +50,40 @@ import {
transformOldConfigToNew, transformOldConfigToNew,
} from "./transformConfigSpec" } from "./transformConfigSpec"
import { partialDiff } from "@start9labs/start-sdk/base/lib/util" import { partialDiff } from "@start9labs/start-sdk/base/lib/util"
import { Volume } from "@start9labs/start-sdk/package/lib/util/Volume"
type Optional<A> = A | undefined | null type Optional<A> = A | undefined | null
function todo(): never { function todo(): never {
throw new Error("Not implemented") throw new Error("Not implemented")
} }
function getStatus(
effects: Effects,
options: Omit<Parameters<Effects["getStatus"]>[0], "callback"> = {},
) {
async function* watch(abort?: AbortSignal) {
const resolveCell = { resolve: () => {} }
effects.onLeaveContext(() => {
resolveCell.resolve()
})
abort?.addEventListener("abort", () => resolveCell.resolve())
while (effects.isInContext && !abort?.aborted) {
let callback: () => void = () => {}
const waitForNext = new Promise<void>((resolve) => {
callback = resolve
resolveCell.resolve = resolve
})
yield await effects.getStatus({ ...options, callback })
await waitForNext
}
}
return {
const: () =>
effects.getStatus({
...options,
callback:
effects.constRetry &&
(() => effects.constRetry && effects.constRetry()),
}),
once: () => effects.getStatus(options),
watch: (abort?: AbortSignal) => {
const ctrl = new AbortController()
abort?.addEventListener("abort", () => ctrl.abort())
return watch(ctrl.signal)
},
onChange: (
callback: (
value: T.StatusInfo | null,
error?: Error,
) => { cancel: boolean } | Promise<{ cancel: boolean }>,
) => {
;(async () => {
const ctrl = new AbortController()
for await (const value of watch(ctrl.signal)) {
try {
const res = await callback(value)
if (res.cancel) {
ctrl.abort()
break
}
} catch (e) {
console.error(
"callback function threw an error @ getStatus.onChange",
e,
)
}
}
})()
.catch((e) => callback(null, e as Error))
.catch((e) =>
console.error(
"callback function threw an error @ getStatus.onChange",
e,
),
)
},
}
}
/**
* Local type for procedure values from the manifest.
* The manifest's zod schemas use ZodTypeAny casts that produce `unknown` in zod v4.
* This type restores the expected shape for type-safe property access.
*/
type Procedure =
| (DockerProcedure & { type: "docker" })
| { type: "script"; args: unknown[] | null }
const MANIFEST_LOCATION = "/usr/lib/startos/package/embassyManifest.json" const MANIFEST_LOCATION = "/usr/lib/startos/package/embassyManifest.json"
export const EMBASSY_JS_LOCATION = "/usr/lib/startos/package/embassy.js" export const EMBASSY_JS_LOCATION = "/usr/lib/startos/package/embassy.js"
const configFile = FileHelper.json( const configFile = FileHelper.json(
{ {
base: new Volume("embassy"), volumeId: "embassy",
subpath: "config.json", subpath: "config.json",
}, },
z.any(), matches.any,
) )
const dependsOnFile = FileHelper.json( const dependsOnFile = FileHelper.json(
{ {
base: new Volume("embassy"), volumeId: "embassy",
subpath: "dependsOn.json", subpath: "dependsOn.json",
}, },
z.record(z.string(), z.array(z.string())), dictionary([string, array(string)]),
) )
const matchResult = z.object({ const matchResult = object({
result: z.any(), result: any,
}) })
const matchError = z.object({ const matchError = object({
error: z.string(), error: string,
}) })
const matchErrorCode = z.object({ const matchErrorCode = object<{
"error-code": z.tuple([z.number(), z.string()]), "error-code": [number, string] | readonly [number, string]
}>({
"error-code": tuple(number, string),
}) })
const assertNever = ( const assertNever = (
@@ -156,34 +95,29 @@ const assertNever = (
/** /**
Should be changing the type for specific properties, and this is mostly a transformation for the old return types to the newer one. Should be changing the type for specific properties, and this is mostly a transformation for the old return types to the newer one.
*/ */
function isMatchResult(a: unknown): a is z.infer<typeof matchResult> {
return matchResult.safeParse(a).success
}
function isMatchError(a: unknown): a is z.infer<typeof matchError> {
return matchError.safeParse(a).success
}
function isMatchErrorCode(a: unknown): a is z.infer<typeof matchErrorCode> {
return matchErrorCode.safeParse(a).success
}
const fromReturnType = <A>(a: U.ResultType<A>): A => { const fromReturnType = <A>(a: U.ResultType<A>): A => {
if (isMatchResult(a)) { if (matchResult.test(a)) {
return a.result return a.result
} }
if (isMatchError(a)) { if (matchError.test(a)) {
console.info({ passedErrorStack: new Error().stack, error: a.error }) console.info({ passedErrorStack: new Error().stack, error: a.error })
throw { error: a.error } throw { error: a.error }
} }
if (isMatchErrorCode(a)) { if (matchErrorCode.test(a)) {
const [code, message] = a["error-code"] const [code, message] = a["error-code"]
throw { error: message, code } throw { error: message, code }
} }
return assertNever(a as never) return assertNever(a)
} }
const matchSetResult = z.object({ const matchSetResult = object({
"depends-on": z.record(z.string(), z.array(z.string())).nullable().optional(), "depends-on": dictionary([string, array(string)])
dependsOn: z.record(z.string(), z.array(z.string())).nullable().optional(), .nullable()
signal: z.enum([ .optional(),
dependsOn: dictionary([string, array(string)])
.nullable()
.optional(),
signal: literals(
"SIGTERM", "SIGTERM",
"SIGHUP", "SIGHUP",
"SIGINT", "SIGINT",
@@ -216,7 +150,7 @@ const matchSetResult = z.object({
"SIGPWR", "SIGPWR",
"SIGSYS", "SIGSYS",
"SIGINFO", "SIGINFO",
]), ),
}) })
type OldGetConfigRes = { type OldGetConfigRes = {
@@ -298,29 +232,33 @@ const asProperty = (x: PackagePropertiesV2): PropertiesReturn =>
Object.fromEntries( Object.fromEntries(
Object.entries(x).map(([key, value]) => [key, asProperty_(value)]), Object.entries(x).map(([key, value]) => [key, asProperty_(value)]),
) )
const matchPackagePropertyObject: z.ZodType<PackagePropertyObject> = z.object({ const [matchPackageProperties, setMatchPackageProperties] =
value: z.lazy(() => matchPackageProperties), deferred<PackagePropertiesV2>()
type: z.literal("object"), const matchPackagePropertyObject: Parser<unknown, PackagePropertyObject> =
description: z.string(), object({
}) value: matchPackageProperties,
type: literal("object"),
description: string,
})
const matchPackagePropertyString: z.ZodType<PackagePropertyString> = z.object({ const matchPackagePropertyString: Parser<unknown, PackagePropertyString> =
type: z.literal("string"), object({
description: z.string().nullable().optional(), type: literal("string"),
value: z.string(), description: string.nullable().optional(),
copyable: z.boolean().nullable().optional(), value: string,
qr: z.boolean().nullable().optional(), copyable: boolean.nullable().optional(),
masked: z.boolean().nullable().optional(), qr: boolean.nullable().optional(),
}) masked: boolean.nullable().optional(),
const matchPackageProperties: z.ZodType<PackagePropertiesV2> = z.lazy(() => })
z.record( setMatchPackageProperties(
z.string(), dictionary([
z.union([matchPackagePropertyObject, matchPackagePropertyString]), string,
), anyOf(matchPackagePropertyObject, matchPackagePropertyString),
]),
) )
const matchProperties = z.object({ const matchProperties = object({
version: z.literal(2), version: literal(2),
data: matchPackageProperties, data: matchPackageProperties,
}) })
@@ -349,6 +287,7 @@ function convertProperties(
} }
} }
const DEFAULT_REGISTRY = "https://registry.start9.com"
export class SystemForEmbassy implements System { export class SystemForEmbassy implements System {
private version: ExtendedVersion private version: ExtendedVersion
currentRunning: MainLoop | undefined currentRunning: MainLoop | undefined
@@ -364,7 +303,7 @@ export class SystemForEmbassy implements System {
}) })
const manifestData = await fs.readFile(manifestLocation, "utf-8") const manifestData = await fs.readFile(manifestLocation, "utf-8")
return new SystemForEmbassy( return new SystemForEmbassy(
matchManifest.parse(JSON.parse(manifestData)), matchManifest.unsafeCast(JSON.parse(manifestData)),
moduleCode, moduleCode,
) )
} }
@@ -392,10 +331,6 @@ export class SystemForEmbassy implements System {
) { ) {
this.version.upstream.prerelease = ["alpha"] this.version.upstream.prerelease = ["alpha"]
} }
if (this.manifest.id === "nostr") {
this.manifest.id = "nostr-rs-relay"
}
} }
async init( async init(
@@ -445,14 +380,13 @@ export class SystemForEmbassy implements System {
} }
callCallback(_callback: number, _args: any[]): void {} callCallback(_callback: number, _args: any[]): void {}
async stop(): Promise<void> { async stop(): Promise<void> {
const clean = this.currentRunning?.clean({ const { currentRunning } = this
timeout: fromDuration( this.currentRunning?.clean()
(this.manifest.main["sigterm-timeout"] as any) || "30s",
),
})
delete this.currentRunning delete this.currentRunning
if (clean) { if (currentRunning) {
await clean await currentRunning.clean({
timeout: fromDuration(this.manifest.main["sigterm-timeout"] || "30s"),
})
} }
} }
@@ -522,7 +456,6 @@ export class SystemForEmbassy implements System {
addSsl = { addSsl = {
preferredExternalPort: lanPortNum, preferredExternalPort: lanPortNum,
alpn: { specified: [] }, alpn: { specified: [] },
addXForwardedHeaders: false,
} }
} }
return [ return [
@@ -572,7 +505,6 @@ export class SystemForEmbassy implements System {
async getActionInput( async getActionInput(
effects: Effects, effects: Effects,
actionId: string, actionId: string,
_prefill: Record<string, unknown> | null,
timeoutMs: number | null, timeoutMs: number | null,
): Promise<T.ActionInput | null> { ): Promise<T.ActionInput | null> {
if (actionId === "config") { if (actionId === "config") {
@@ -685,7 +617,7 @@ export class SystemForEmbassy implements System {
effects: Effects, effects: Effects,
timeoutMs: number | null, timeoutMs: number | null,
): Promise<void> { ): Promise<void> {
const backup = this.manifest.backup.create as Procedure const backup = this.manifest.backup.create
if (backup.type === "docker") { if (backup.type === "docker") {
const commands = [backup.entrypoint, ...backup.args] const commands = [backup.entrypoint, ...backup.args]
const container = await DockerProcedureContainer.of( const container = await DockerProcedureContainer.of(
@@ -718,7 +650,7 @@ export class SystemForEmbassy implements System {
encoding: "utf-8", encoding: "utf-8",
}) })
.catch((_) => null) .catch((_) => null)
const restoreBackup = this.manifest.backup.restore as Procedure const restoreBackup = this.manifest.backup.restore
if (restoreBackup.type === "docker") { if (restoreBackup.type === "docker") {
const commands = [restoreBackup.entrypoint, ...restoreBackup.args] const commands = [restoreBackup.entrypoint, ...restoreBackup.args]
const container = await DockerProcedureContainer.of( const container = await DockerProcedureContainer.of(
@@ -751,7 +683,7 @@ export class SystemForEmbassy implements System {
effects: Effects, effects: Effects,
timeoutMs: number | null, timeoutMs: number | null,
): Promise<OldGetConfigRes> { ): Promise<OldGetConfigRes> {
const config = this.manifest.config?.get as Procedure | undefined const config = this.manifest.config?.get
if (!config) return { spec: {} } if (!config) return { spec: {} }
if (config.type === "docker") { if (config.type === "docker") {
const commands = [config.entrypoint, ...config.args] const commands = [config.entrypoint, ...config.args]
@@ -793,7 +725,7 @@ export class SystemForEmbassy implements System {
) )
await updateConfig(effects, this.manifest, spec, newConfig) await updateConfig(effects, this.manifest, spec, newConfig)
await configFile.write(effects, newConfig) await configFile.write(effects, newConfig)
const setConfigValue = this.manifest.config?.set as Procedure | undefined const setConfigValue = this.manifest.config?.set
if (!setConfigValue) return if (!setConfigValue) return
if (setConfigValue.type === "docker") { if (setConfigValue.type === "docker") {
const commands = [ const commands = [
@@ -808,7 +740,7 @@ export class SystemForEmbassy implements System {
this.manifest.volumes, this.manifest.volumes,
`Set Config - ${commands.join(" ")}`, `Set Config - ${commands.join(" ")}`,
) )
const answer = matchSetResult.parse( const answer = matchSetResult.unsafeCast(
JSON.parse( JSON.parse(
(await container.execFail(commands, timeoutMs)).stdout.toString(), (await container.execFail(commands, timeoutMs)).stdout.toString(),
), ),
@@ -821,7 +753,7 @@ export class SystemForEmbassy implements System {
const method = moduleCode.setConfig const method = moduleCode.setConfig
if (!method) throw new Error("Expecting that the method setConfig exists") if (!method) throw new Error("Expecting that the method setConfig exists")
const answer = matchSetResult.parse( const answer = matchSetResult.unsafeCast(
await method( await method(
polyfillEffects(effects, this.manifest), polyfillEffects(effects, this.manifest),
newConfig as U.Config, newConfig as U.Config,
@@ -850,11 +782,7 @@ export class SystemForEmbassy implements System {
const requiredDeps = { const requiredDeps = {
...Object.fromEntries( ...Object.fromEntries(
Object.entries(this.manifest.dependencies ?? {}) Object.entries(this.manifest.dependencies ?? {})
.filter( .filter(([k, v]) => v?.requirement.type === "required")
([k, v]) =>
(v?.requirement as { type: string } | undefined)?.type ===
"required",
)
.map((x) => [x[0], []]) || [], .map((x) => [x[0], []]) || [],
), ),
} }
@@ -922,7 +850,7 @@ export class SystemForEmbassy implements System {
} }
if (migration) { if (migration) {
const [_, procedure] = migration as readonly [unknown, Procedure] const [_, procedure] = migration
if (procedure.type === "docker") { if (procedure.type === "docker") {
const commands = [procedure.entrypoint, ...procedure.args] const commands = [procedure.entrypoint, ...procedure.args]
const container = await DockerProcedureContainer.of( const container = await DockerProcedureContainer.of(
@@ -960,10 +888,8 @@ export class SystemForEmbassy implements System {
effects: Effects, effects: Effects,
timeoutMs: number | null, timeoutMs: number | null,
): Promise<PropertiesReturn> { ): Promise<PropertiesReturn> {
const setConfigValue = this.manifest.properties as // TODO BLU-J set the properties ever so often
| Procedure const setConfigValue = this.manifest.properties
| null
| undefined
if (!setConfigValue) throw new Error("There is no properties") if (!setConfigValue) throw new Error("There is no properties")
if (setConfigValue.type === "docker") { if (setConfigValue.type === "docker") {
const commands = [setConfigValue.entrypoint, ...setConfigValue.args] const commands = [setConfigValue.entrypoint, ...setConfigValue.args]
@@ -974,7 +900,7 @@ export class SystemForEmbassy implements System {
this.manifest.volumes, this.manifest.volumes,
`Properties - ${commands.join(" ")}`, `Properties - ${commands.join(" ")}`,
) )
const properties = matchProperties.parse( const properties = matchProperties.unsafeCast(
JSON.parse( JSON.parse(
(await container.execFail(commands, timeoutMs)).stdout.toString(), (await container.execFail(commands, timeoutMs)).stdout.toString(),
), ),
@@ -985,7 +911,7 @@ export class SystemForEmbassy implements System {
const method = moduleCode.properties const method = moduleCode.properties
if (!method) if (!method)
throw new Error("Expecting that the method properties exists") throw new Error("Expecting that the method properties exists")
const properties = matchProperties.parse( const properties = matchProperties.unsafeCast(
await method(polyfillEffects(effects, this.manifest)).then( await method(polyfillEffects(effects, this.manifest)).then(
fromReturnType, fromReturnType,
), ),
@@ -1000,8 +926,7 @@ export class SystemForEmbassy implements System {
formData: unknown, formData: unknown,
timeoutMs: number | null, timeoutMs: number | null,
): Promise<T.ActionResult> { ): Promise<T.ActionResult> {
const actionProcedure = this.manifest.actions?.[actionId] const actionProcedure = this.manifest.actions?.[actionId]?.implementation
?.implementation as Procedure | undefined
const toActionResult = ({ const toActionResult = ({
message, message,
value, value,
@@ -1068,9 +993,7 @@ export class SystemForEmbassy implements System {
oldConfig: unknown, oldConfig: unknown,
timeoutMs: number | null, timeoutMs: number | null,
): Promise<object> { ): Promise<object> {
const actionProcedure = this.manifest.dependencies?.[id]?.config?.check as const actionProcedure = this.manifest.dependencies?.[id]?.config?.check
| Procedure
| undefined
if (!actionProcedure) return { message: "Action not found", value: null } if (!actionProcedure) return { message: "Action not found", value: null }
if (actionProcedure.type === "docker") { if (actionProcedure.type === "docker") {
const commands = [ const commands = [
@@ -1113,26 +1036,16 @@ export class SystemForEmbassy implements System {
timeoutMs: number | null, timeoutMs: number | null,
): Promise<void> { ): Promise<void> {
// TODO: docker // TODO: docker
const status = await getStatus(effects, { packageId: id }).const() await effects.mount({
if (!status) return location: `/media/embassy/${id}`,
try { target: {
await effects.mount({ packageId: id,
location: `/media/embassy/${id}`, volumeId: "embassy",
target: { subpath: null,
packageId: id, readonly: true,
volumeId: "embassy", filetype: "directory",
subpath: null, },
readonly: true, })
idmap: [],
},
})
} catch (e) {
console.error(
`Failed to mount dependency volume for ${id}, skipping autoconfig:`,
e,
)
return
}
configFile configFile
.withPath(`/media/embassy/${id}/config.json`) .withPath(`/media/embassy/${id}/config.json`)
.read() .read()
@@ -1172,50 +1085,40 @@ export class SystemForEmbassy implements System {
} }
} }
const matchPointer = z.object({ const matchPointer = object({
type: z.literal("pointer"), type: literal("pointer"),
}) })
const matchPointerPackage = z.object({ const matchPointerPackage = object({
subtype: z.literal("package"), subtype: literal("package"),
target: z.enum(["tor-key", "tor-address", "lan-address"]), target: literals("tor-key", "tor-address", "lan-address"),
"package-id": z.string(), "package-id": string,
interface: z.string(), interface: string,
}) })
const matchPointerConfig = z.object({ const matchPointerConfig = object({
subtype: z.literal("package"), subtype: literal("package"),
target: z.enum(["config"]), target: literals("config"),
"package-id": z.string(), "package-id": string,
selector: z.string(), selector: string,
multi: z.boolean(), multi: boolean,
}) })
const matchSpec = z.object({ const matchSpec = object({
spec: z.record(z.string(), z.unknown()), spec: object,
}) })
const matchVariants = z.object({ variants: z.record(z.string(), z.unknown()) }) const matchVariants = object({ variants: dictionary([string, unknown]) })
function isMatchPointer(v: unknown): v is z.infer<typeof matchPointer> {
return matchPointer.safeParse(v).success
}
function isMatchSpec(v: unknown): v is z.infer<typeof matchSpec> {
return matchSpec.safeParse(v).success
}
function isMatchVariants(v: unknown): v is z.infer<typeof matchVariants> {
return matchVariants.safeParse(v).success
}
function cleanSpecOfPointers<T>(mutSpec: T): T { function cleanSpecOfPointers<T>(mutSpec: T): T {
if (typeof mutSpec !== "object" || mutSpec === null) return mutSpec if (!object.test(mutSpec)) return mutSpec
for (const key in mutSpec) { for (const key in mutSpec) {
const value = mutSpec[key] const value = mutSpec[key]
if (isMatchSpec(value)) if (matchSpec.test(value)) value.spec = cleanSpecOfPointers(value.spec)
value.spec = cleanSpecOfPointers(value.spec) as Record<string, unknown> if (matchVariants.test(value))
if (isMatchVariants(value))
value.variants = Object.fromEntries( value.variants = Object.fromEntries(
Object.entries(value.variants).map(([key, value]) => [ Object.entries(value.variants).map(([key, value]) => [
key, key,
cleanSpecOfPointers(value), cleanSpecOfPointers(value),
]), ]),
) )
if (!isMatchPointer(value)) continue if (!matchPointer.test(value)) continue
delete mutSpec[key] delete mutSpec[key]
// // if (value.target === ) // // if (value.target === )
} }
@@ -1281,11 +1184,6 @@ async function updateConfig(
if (specValue.target === "config") { if (specValue.target === "config") {
const jp = require("jsonpath") const jp = require("jsonpath")
const depId = specValue["package-id"] const depId = specValue["package-id"]
const depStatus = await getStatus(effects, { packageId: depId }).const()
if (!depStatus) {
mutConfigValue[key] = null
continue
}
await effects.mount({ await effects.mount({
location: `/media/embassy/${depId}`, location: `/media/embassy/${depId}`,
target: { target: {
@@ -1293,7 +1191,7 @@ async function updateConfig(
volumeId: "embassy", volumeId: "embassy",
subpath: null, subpath: null,
readonly: true, readonly: true,
idmap: [], filetype: "directory",
}, },
}) })
const remoteConfig = configFile const remoteConfig = configFile
@@ -1342,8 +1240,12 @@ async function updateConfig(
? "" ? ""
: catchFn( : catchFn(
() => () =>
filled.addressInfo!.filter({ kind: "mdns" })!.hostnames[0] (specValue.target === "lan-address"
.hostname, ? filled.addressInfo!.localHostnames[0] ||
filled.addressInfo!.onionHostnames[0]
: filled.addressInfo!.onionHostnames[0] ||
filled.addressInfo!.localHostnames[0]
).hostname.value,
) || "" ) || ""
mutConfigValue[key] = url mutConfigValue[key] = url
} }
@@ -1366,7 +1268,7 @@ function extractServiceInterfaceId(manifest: Manifest, specInterface: string) {
} }
async function convertToNewConfig(value: OldGetConfigRes) { async function convertToNewConfig(value: OldGetConfigRes) {
try { try {
const valueSpec: OldConfigSpec = matchOldConfigSpec.parse(value.spec) const valueSpec: OldConfigSpec = matchOldConfigSpec.unsafeCast(value.spec)
const spec = transformConfigSpec(valueSpec) const spec = transformConfigSpec(valueSpec)
if (!value.config) return { spec, config: null } if (!value.config) return { spec, config: null }
const config = transformOldConfigToNew(valueSpec, value.config) ?? null const config = transformOldConfigToNew(valueSpec, value.config) ?? null

View File

@@ -4,9 +4,9 @@ import synapseManifest from "./__fixtures__/synapseManifest"
describe("matchManifest", () => { describe("matchManifest", () => {
test("gittea", () => { test("gittea", () => {
matchManifest.parse(giteaManifest) matchManifest.unsafeCast(giteaManifest)
}) })
test("synapse", () => { test("synapse", () => {
matchManifest.parse(synapseManifest) matchManifest.unsafeCast(synapseManifest)
}) })
}) })

View File

@@ -1,123 +1,126 @@
import { z } from "@start9labs/start-sdk" import {
object,
literal,
string,
array,
boolean,
dictionary,
literals,
number,
unknown,
some,
every,
} from "ts-matches"
import { matchVolume } from "./matchVolume" import { matchVolume } from "./matchVolume"
import { matchDockerProcedure } from "../../../Models/DockerProcedure" import { matchDockerProcedure } from "../../../Models/DockerProcedure"
const matchJsProcedure = z.object({ const matchJsProcedure = object({
type: z.literal("script"), type: literal("script"),
args: z.array(z.unknown()).nullable().optional().default([]), args: array(unknown).nullable().optional().defaultTo([]),
}) })
const matchProcedure = z.union([matchDockerProcedure, matchJsProcedure]) const matchProcedure = some(matchDockerProcedure, matchJsProcedure)
export type Procedure = z.infer<typeof matchProcedure> export type Procedure = typeof matchProcedure._TYPE
const healthCheckFields = { const matchAction = object({
name: z.string(), name: string,
"success-message": z.string().nullable().optional(), description: string,
} warning: string.nullable().optional(),
const matchAction = z.object({
name: z.string(),
description: z.string(),
warning: z.string().nullable().optional(),
implementation: matchProcedure, implementation: matchProcedure,
"allowed-statuses": z.array(z.enum(["running", "stopped"])), "allowed-statuses": array(literals("running", "stopped")),
"input-spec": z.unknown().nullable().optional(), "input-spec": unknown.nullable().optional(),
}) })
export const matchManifest = z.object({ export const matchManifest = object({
id: z.string(), id: string,
title: z.string(), title: string,
version: z.string(), version: string,
main: matchDockerProcedure, main: matchDockerProcedure,
assets: z assets: object({
.object({ assets: string.nullable().optional(),
assets: z.string().nullable().optional(), scripts: string.nullable().optional(),
scripts: z.string().nullable().optional(), })
})
.nullable() .nullable()
.optional(), .optional(),
"health-checks": z.record( "health-checks": dictionary([
z.string(), string,
z.union([ every(
matchDockerProcedure.extend(healthCheckFields), matchProcedure,
matchJsProcedure.extend(healthCheckFields), object({
]), name: string,
), ["success-message"]: string.nullable().optional(),
config: z }),
.object({ ),
get: matchProcedure, ]),
set: matchProcedure, config: object({
}) get: matchProcedure,
set: matchProcedure,
})
.nullable() .nullable()
.optional(), .optional(),
properties: matchProcedure.nullable().optional(), properties: matchProcedure.nullable().optional(),
volumes: z.record(z.string(), matchVolume), volumes: dictionary([string, matchVolume]),
interfaces: z.record( interfaces: dictionary([
z.string(), string,
z.object({ object({
name: z.string(), name: string,
description: z.string(), description: string,
"tor-config": z "tor-config": object({
.object({ "port-mapping": dictionary([string, string]),
"port-mapping": z.record(z.string(), z.string()), })
})
.nullable() .nullable()
.optional(), .optional(),
"lan-config": z "lan-config": dictionary([
.record( string,
z.string(), object({
z.object({ ssl: boolean,
ssl: z.boolean(), internal: number,
internal: z.number(), }),
}), ])
)
.nullable() .nullable()
.optional(), .optional(),
ui: z.boolean(), ui: boolean,
protocols: z.array(z.string()), protocols: array(string),
}), }),
), ]),
backup: z.object({ backup: object({
create: matchProcedure, create: matchProcedure,
restore: matchProcedure, restore: matchProcedure,
}), }),
migrations: z migrations: object({
.object({ to: dictionary([string, matchProcedure]),
to: z.record(z.string(), matchProcedure), from: dictionary([string, matchProcedure]),
from: z.record(z.string(), matchProcedure), })
})
.nullable() .nullable()
.optional(), .optional(),
dependencies: z.record( dependencies: dictionary([
z.string(), string,
z object({
.object({ version: string,
version: z.string(), requirement: some(
requirement: z.union([ object({
z.object({ type: literal("opt-in"),
type: z.literal("opt-in"), how: string,
how: z.string(), }),
}), object({
z.object({ type: literal("opt-out"),
type: z.literal("opt-out"), how: string,
how: z.string(), }),
}), object({
z.object({ type: literal("required"),
type: z.literal("required"), }),
}), ),
]), description: string.nullable().optional(),
description: z.string().nullable().optional(), config: object({
config: z check: matchProcedure,
.object({ "auto-configure": matchProcedure,
check: matchProcedure,
"auto-configure": matchProcedure,
})
.nullable()
.optional(),
}) })
.nullable()
.optional(),
})
.nullable() .nullable()
.optional(), .optional(),
), ]),
actions: z.record(z.string(), matchAction), actions: dictionary([string, matchAction]),
}) })
export type Manifest = z.infer<typeof matchManifest> export type Manifest = typeof matchManifest._TYPE

View File

@@ -1,32 +1,32 @@
import { z } from "@start9labs/start-sdk" import { object, literal, string, boolean, some } from "ts-matches"
const matchDataVolume = z.object({ const matchDataVolume = object({
type: z.literal("data"), type: literal("data"),
readonly: z.boolean().optional(), readonly: boolean.optional(),
}) })
const matchAssetVolume = z.object({ const matchAssetVolume = object({
type: z.literal("assets"), type: literal("assets"),
}) })
const matchPointerVolume = z.object({ const matchPointerVolume = object({
type: z.literal("pointer"), type: literal("pointer"),
"package-id": z.string(), "package-id": string,
"volume-id": z.string(), "volume-id": string,
path: z.string(), path: string,
readonly: z.boolean(), readonly: boolean,
}) })
const matchCertificateVolume = z.object({ const matchCertificateVolume = object({
type: z.literal("certificate"), type: literal("certificate"),
"interface-id": z.string(), "interface-id": string,
}) })
const matchBackupVolume = z.object({ const matchBackupVolume = object({
type: z.literal("backup"), type: literal("backup"),
readonly: z.boolean(), readonly: boolean,
}) })
export const matchVolume = z.union([ export const matchVolume = some(
matchDataVolume, matchDataVolume,
matchAssetVolume, matchAssetVolume,
matchPointerVolume, matchPointerVolume,
matchCertificateVolume, matchCertificateVolume,
matchBackupVolume, matchBackupVolume,
]) )
export type Volume = z.infer<typeof matchVolume> export type Volume = typeof matchVolume._TYPE

View File

@@ -12,43 +12,43 @@ import nostrConfig2 from "./__fixtures__/nostrConfig2"
describe("transformConfigSpec", () => { describe("transformConfigSpec", () => {
test("matchOldConfigSpec(embassyPages.homepage.variants[web-page])", () => { test("matchOldConfigSpec(embassyPages.homepage.variants[web-page])", () => {
matchOldConfigSpec.parse( matchOldConfigSpec.unsafeCast(
fixtureEmbassyPagesConfig.homepage.variants["web-page"], fixtureEmbassyPagesConfig.homepage.variants["web-page"],
) )
}) })
test("matchOldConfigSpec(embassyPages)", () => { test("matchOldConfigSpec(embassyPages)", () => {
matchOldConfigSpec.parse(fixtureEmbassyPagesConfig) matchOldConfigSpec.unsafeCast(fixtureEmbassyPagesConfig)
}) })
test("transformConfigSpec(embassyPages)", () => { test("transformConfigSpec(embassyPages)", () => {
const spec = matchOldConfigSpec.parse(fixtureEmbassyPagesConfig) const spec = matchOldConfigSpec.unsafeCast(fixtureEmbassyPagesConfig)
expect(transformConfigSpec(spec)).toMatchSnapshot() expect(transformConfigSpec(spec)).toMatchSnapshot()
}) })
test("matchOldConfigSpec(RTL.nodes)", () => { test("matchOldConfigSpec(RTL.nodes)", () => {
matchOldValueSpecList.parse(fixtureRTLConfig.nodes) matchOldValueSpecList.unsafeCast(fixtureRTLConfig.nodes)
}) })
test("matchOldConfigSpec(RTL)", () => { test("matchOldConfigSpec(RTL)", () => {
matchOldConfigSpec.parse(fixtureRTLConfig) matchOldConfigSpec.unsafeCast(fixtureRTLConfig)
}) })
test("transformConfigSpec(RTL)", () => { test("transformConfigSpec(RTL)", () => {
const spec = matchOldConfigSpec.parse(fixtureRTLConfig) const spec = matchOldConfigSpec.unsafeCast(fixtureRTLConfig)
expect(transformConfigSpec(spec)).toMatchSnapshot() expect(transformConfigSpec(spec)).toMatchSnapshot()
}) })
test("transformConfigSpec(searNXG)", () => { test("transformConfigSpec(searNXG)", () => {
const spec = matchOldConfigSpec.parse(searNXG) const spec = matchOldConfigSpec.unsafeCast(searNXG)
expect(transformConfigSpec(spec)).toMatchSnapshot() expect(transformConfigSpec(spec)).toMatchSnapshot()
}) })
test("transformConfigSpec(bitcoind)", () => { test("transformConfigSpec(bitcoind)", () => {
const spec = matchOldConfigSpec.parse(bitcoind) const spec = matchOldConfigSpec.unsafeCast(bitcoind)
expect(transformConfigSpec(spec)).toMatchSnapshot() expect(transformConfigSpec(spec)).toMatchSnapshot()
}) })
test("transformConfigSpec(nostr)", () => { test("transformConfigSpec(nostr)", () => {
const spec = matchOldConfigSpec.parse(nostr) const spec = matchOldConfigSpec.unsafeCast(nostr)
expect(transformConfigSpec(spec)).toMatchSnapshot() expect(transformConfigSpec(spec)).toMatchSnapshot()
}) })
test("transformConfigSpec(nostr2)", () => { test("transformConfigSpec(nostr2)", () => {
const spec = matchOldConfigSpec.parse(nostrConfig2) const spec = matchOldConfigSpec.unsafeCast(nostrConfig2)
expect(transformConfigSpec(spec)).toMatchSnapshot() expect(transformConfigSpec(spec)).toMatchSnapshot()
}) })
}) })

View File

@@ -1,4 +1,19 @@
import { IST, z } from "@start9labs/start-sdk" import { IST } from "@start9labs/start-sdk"
import {
dictionary,
object,
anyOf,
string,
literals,
array,
number,
boolean,
Parser,
deferred,
every,
nill,
literal,
} from "ts-matches"
export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec { export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
return Object.entries(oldSpec).reduce((inputSpec, [key, oldVal]) => { return Object.entries(oldSpec).reduce((inputSpec, [key, oldVal]) => {
@@ -67,7 +82,7 @@ export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
name: oldVal.name, name: oldVal.name,
description: oldVal.description || null, description: oldVal.description || null,
warning: oldVal.warning || null, warning: oldVal.warning || null,
spec: transformConfigSpec(matchOldConfigSpec.parse(oldVal.spec)), spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(oldVal.spec)),
} }
} else if (oldVal.type === "string") { } else if (oldVal.type === "string") {
newVal = { newVal = {
@@ -106,7 +121,7 @@ export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
...obj, ...obj,
[id]: { [id]: {
name: oldVal.tag["variant-names"][id] || id, name: oldVal.tag["variant-names"][id] || id,
spec: transformConfigSpec(matchOldConfigSpec.parse(spec)), spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(spec)),
}, },
}), }),
{} as Record<string, { name: string; spec: IST.InputSpec }>, {} as Record<string, { name: string; spec: IST.InputSpec }>,
@@ -138,7 +153,7 @@ export function transformOldConfigToNew(
if (isObject(val)) { if (isObject(val)) {
newVal = transformOldConfigToNew( newVal = transformOldConfigToNew(
matchOldConfigSpec.parse(val.spec), matchOldConfigSpec.unsafeCast(val.spec),
config[key], config[key],
) )
} }
@@ -157,7 +172,7 @@ export function transformOldConfigToNew(
newVal = { newVal = {
selection, selection,
value: transformOldConfigToNew( value: transformOldConfigToNew(
matchOldConfigSpec.parse(val.variants[selection]), matchOldConfigSpec.unsafeCast(val.variants[selection]),
config[key], config[key],
), ),
} }
@@ -168,7 +183,10 @@ export function transformOldConfigToNew(
if (isObjectList(val)) { if (isObjectList(val)) {
newVal = (config[key] as object[]).map((obj) => newVal = (config[key] as object[]).map((obj) =>
transformOldConfigToNew(matchOldConfigSpec.parse(val.spec.spec), obj), transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.spec.spec),
obj,
),
) )
} else if (isUnionList(val)) return obj } else if (isUnionList(val)) return obj
} }
@@ -194,7 +212,7 @@ export function transformNewConfigToOld(
if (isObject(val)) { if (isObject(val)) {
newVal = transformNewConfigToOld( newVal = transformNewConfigToOld(
matchOldConfigSpec.parse(val.spec), matchOldConfigSpec.unsafeCast(val.spec),
config[key], config[key],
) )
} }
@@ -203,7 +221,7 @@ export function transformNewConfigToOld(
newVal = { newVal = {
[val.tag.id]: config[key].selection, [val.tag.id]: config[key].selection,
...transformNewConfigToOld( ...transformNewConfigToOld(
matchOldConfigSpec.parse(val.variants[config[key].selection]), matchOldConfigSpec.unsafeCast(val.variants[config[key].selection]),
config[key].value, config[key].value,
), ),
} }
@@ -212,7 +230,10 @@ export function transformNewConfigToOld(
if (isList(val)) { if (isList(val)) {
if (isObjectList(val)) { if (isObjectList(val)) {
newVal = (config[key] as object[]).map((obj) => newVal = (config[key] as object[]).map((obj) =>
transformNewConfigToOld(matchOldConfigSpec.parse(val.spec.spec), obj), transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.spec.spec),
obj,
),
) )
} else if (isUnionList(val)) return obj } else if (isUnionList(val)) return obj
} }
@@ -316,7 +337,9 @@ function getListSpec(
default: oldVal.default as Record<string, unknown>[], default: oldVal.default as Record<string, unknown>[],
spec: { spec: {
type: "object", type: "object",
spec: transformConfigSpec(matchOldConfigSpec.parse(oldVal.spec.spec)), spec: transformConfigSpec(
matchOldConfigSpec.unsafeCast(oldVal.spec.spec),
),
uniqueBy: oldVal.spec["unique-by"] || null, uniqueBy: oldVal.spec["unique-by"] || null,
displayAs: oldVal.spec["display-as"] || null, displayAs: oldVal.spec["display-as"] || null,
}, },
@@ -370,281 +393,211 @@ function isUnionList(
} }
export type OldConfigSpec = Record<string, OldValueSpec> export type OldConfigSpec = Record<string, OldValueSpec>
export const matchOldConfigSpec: z.ZodType<OldConfigSpec> = z.lazy(() => const [_matchOldConfigSpec, setMatchOldConfigSpec] = deferred<unknown>()
z.record(z.string(), matchOldValueSpec), export const matchOldConfigSpec = _matchOldConfigSpec as Parser<
unknown,
OldConfigSpec
>
export const matchOldDefaultString = anyOf(
string,
object({ charset: string, len: number }),
) )
export const matchOldDefaultString = z.union([ type OldDefaultString = typeof matchOldDefaultString._TYPE
z.string(),
z.object({ charset: z.string(), len: z.number() }),
])
type OldDefaultString = z.infer<typeof matchOldDefaultString>
export const matchOldValueSpecString = z.object({ export const matchOldValueSpecString = object({
type: z.enum(["string"]), type: literals("string"),
name: z.string(), name: string,
masked: z.boolean().nullable().optional(), masked: boolean.nullable().optional(),
copyable: z.boolean().nullable().optional(), copyable: boolean.nullable().optional(),
nullable: z.boolean().nullable().optional(), nullable: boolean.nullable().optional(),
placeholder: z.string().nullable().optional(), placeholder: string.nullable().optional(),
pattern: z.string().nullable().optional(), pattern: string.nullable().optional(),
"pattern-description": z.string().nullable().optional(), "pattern-description": string.nullable().optional(),
default: matchOldDefaultString.nullable().optional(), default: matchOldDefaultString.nullable().optional(),
textarea: z.boolean().nullable().optional(), textarea: boolean.nullable().optional(),
description: z.string().nullable().optional(), description: string.nullable().optional(),
warning: z.string().nullable().optional(), warning: string.nullable().optional(),
}) })
export const matchOldValueSpecNumber = z.object({ export const matchOldValueSpecNumber = object({
type: z.enum(["number"]), type: literals("number"),
nullable: z.boolean(), nullable: boolean,
name: z.string(), name: string,
range: z.string(), range: string,
integral: z.boolean(), integral: boolean,
default: z.number().nullable().optional(), default: number.nullable().optional(),
description: z.string().nullable().optional(), description: string.nullable().optional(),
warning: z.string().nullable().optional(), warning: string.nullable().optional(),
units: z.string().nullable().optional(), units: string.nullable().optional(),
placeholder: z.union([z.number(), z.string()]).nullable().optional(), placeholder: anyOf(number, string).nullable().optional(),
}) })
type OldValueSpecNumber = z.infer<typeof matchOldValueSpecNumber> type OldValueSpecNumber = typeof matchOldValueSpecNumber._TYPE
export const matchOldValueSpecBoolean = z.object({ export const matchOldValueSpecBoolean = object({
type: z.enum(["boolean"]), type: literals("boolean"),
default: z.boolean(), default: boolean,
name: z.string(), name: string,
description: z.string().nullable().optional(), description: string.nullable().optional(),
warning: z.string().nullable().optional(), warning: string.nullable().optional(),
}) })
type OldValueSpecBoolean = z.infer<typeof matchOldValueSpecBoolean> type OldValueSpecBoolean = typeof matchOldValueSpecBoolean._TYPE
type OldValueSpecObject = { const matchOldValueSpecObject = object({
type: "object" type: literals("object"),
spec: OldConfigSpec spec: _matchOldConfigSpec,
name: string name: string,
description?: string | null description: string.nullable().optional(),
warning?: string | null warning: string.nullable().optional(),
}
const matchOldValueSpecObject: z.ZodType<OldValueSpecObject> = z.object({
type: z.enum(["object"]),
spec: z.lazy(() => matchOldConfigSpec),
name: z.string(),
description: z.string().nullable().optional(),
warning: z.string().nullable().optional(),
}) })
type OldValueSpecObject = typeof matchOldValueSpecObject._TYPE
const matchOldValueSpecEnum = z.object({ const matchOldValueSpecEnum = object({
values: z.array(z.string()), values: array(string),
"value-names": z.record(z.string(), z.string()), "value-names": dictionary([string, string]),
type: z.enum(["enum"]), type: literals("enum"),
default: z.string(), default: string,
name: z.string(), name: string,
description: z.string().nullable().optional(), description: string.nullable().optional(),
warning: z.string().nullable().optional(), warning: string.nullable().optional(),
}) })
type OldValueSpecEnum = z.infer<typeof matchOldValueSpecEnum> type OldValueSpecEnum = typeof matchOldValueSpecEnum._TYPE
const matchOldUnionTagSpec = z.object({ const matchOldUnionTagSpec = object({
id: z.string(), // The name of the field containing one of the union variants id: string, // The name of the field containing one of the union variants
"variant-names": z.record(z.string(), z.string()), // The name of each variant "variant-names": dictionary([string, string]), // The name of each variant
name: z.string(), name: string,
description: z.string().nullable().optional(), description: string.nullable().optional(),
warning: z.string().nullable().optional(), warning: string.nullable().optional(),
}) })
type OldValueSpecUnion = { const matchOldValueSpecUnion = object({
type: "union" type: literals("union"),
tag: z.infer<typeof matchOldUnionTagSpec>
variants: Record<string, OldConfigSpec>
default: string
}
const matchOldValueSpecUnion: z.ZodType<OldValueSpecUnion> = z.object({
type: z.enum(["union"]),
tag: matchOldUnionTagSpec, tag: matchOldUnionTagSpec,
variants: z.record( variants: dictionary([string, _matchOldConfigSpec]),
z.string(), default: string,
z.lazy(() => matchOldConfigSpec),
),
default: z.string(),
}) })
type OldValueSpecUnion = typeof matchOldValueSpecUnion._TYPE
const [matchOldUniqueBy, setOldUniqueBy] = deferred<OldUniqueBy>()
type OldUniqueBy = type OldUniqueBy =
| null | null
| string | string
| { any: OldUniqueBy[] } | { any: OldUniqueBy[] }
| { all: OldUniqueBy[] } | { all: OldUniqueBy[] }
const matchOldUniqueBy: z.ZodType<OldUniqueBy> = z.lazy(() => setOldUniqueBy(
z.union([ anyOf(
z.null(), nill,
z.string(), string,
z.object({ any: z.array(matchOldUniqueBy) }), object({ any: array(matchOldUniqueBy) }),
z.object({ all: z.array(matchOldUniqueBy) }), object({ all: array(matchOldUniqueBy) }),
]),
)
type OldListValueSpecObject = {
spec: OldConfigSpec
"unique-by"?: OldUniqueBy | null
"display-as"?: string | null
}
const matchOldListValueSpecObject: z.ZodType<OldListValueSpecObject> = z.object(
{
spec: z.lazy(() => matchOldConfigSpec), // this is a mapped type of the config object at this level, replacing the object's values with specs on those values
"unique-by": matchOldUniqueBy.nullable().optional(), // indicates whether duplicates can be permitted in the list
"display-as": z.string().nullable().optional(), // this should be a handlebars template which can make use of the entire config which corresponds to 'spec'
},
)
type OldListValueSpecUnion = {
"unique-by"?: OldUniqueBy | null
"display-as"?: string | null
tag: z.infer<typeof matchOldUnionTagSpec>
variants: Record<string, OldConfigSpec>
}
const matchOldListValueSpecUnion: z.ZodType<OldListValueSpecUnion> = z.object({
"unique-by": matchOldUniqueBy.nullable().optional(),
"display-as": z.string().nullable().optional(),
tag: matchOldUnionTagSpec,
variants: z.record(
z.string(),
z.lazy(() => matchOldConfigSpec),
), ),
)
const matchOldListValueSpecObject = object({
spec: _matchOldConfigSpec, // this is a mapped type of the config object at this level, replacing the object's values with specs on those values
"unique-by": matchOldUniqueBy.nullable().optional(), // indicates whether duplicates can be permitted in the list
"display-as": string.nullable().optional(), // this should be a handlebars template which can make use of the entire config which corresponds to 'spec'
}) })
const matchOldListValueSpecString = z.object({ const matchOldListValueSpecUnion = object({
masked: z.boolean().nullable().optional(), "unique-by": matchOldUniqueBy.nullable().optional(),
copyable: z.boolean().nullable().optional(), "display-as": string.nullable().optional(),
pattern: z.string().nullable().optional(), tag: matchOldUnionTagSpec,
"pattern-description": z.string().nullable().optional(), variants: dictionary([string, _matchOldConfigSpec]),
placeholder: z.string().nullable().optional(), })
const matchOldListValueSpecString = object({
masked: boolean.nullable().optional(),
copyable: boolean.nullable().optional(),
pattern: string.nullable().optional(),
"pattern-description": string.nullable().optional(),
placeholder: string.nullable().optional(),
}) })
const matchOldListValueSpecEnum = z.object({ const matchOldListValueSpecEnum = object({
values: z.array(z.string()), values: array(string),
"value-names": z.record(z.string(), z.string()), "value-names": dictionary([string, string]),
}) })
const matchOldListValueSpecNumber = z.object({ const matchOldListValueSpecNumber = object({
range: z.string(), range: string,
integral: z.boolean(), integral: boolean,
units: z.string().nullable().optional(), units: string.nullable().optional(),
placeholder: z.union([z.number(), z.string()]).nullable().optional(), placeholder: anyOf(number, string).nullable().optional(),
}) })
type OldValueSpecListBase = {
type: "list"
range: string
default: string[] | number[] | OldDefaultString[] | Record<string, unknown>[]
name: string
description?: string | null
warning?: string | null
}
type OldValueSpecList = OldValueSpecListBase &
(
| { subtype: "string"; spec: z.infer<typeof matchOldListValueSpecString> }
| { subtype: "enum"; spec: z.infer<typeof matchOldListValueSpecEnum> }
| { subtype: "object"; spec: OldListValueSpecObject }
| { subtype: "number"; spec: z.infer<typeof matchOldListValueSpecNumber> }
| { subtype: "union"; spec: OldListValueSpecUnion }
)
// represents a spec for a list // represents a spec for a list
export const matchOldValueSpecList: z.ZodType<OldValueSpecList> = export const matchOldValueSpecList = every(
z.intersection( object({
z.object({ type: literals("list"),
type: z.enum(["list"]), range: string, // '[0,1]' (inclusive) OR '[0,*)' (right unbounded), normal math rules
range: z.string(), // '[0,1]' (inclusive) OR '[0,*)' (right unbounded), normal math rules default: anyOf(
default: z.union([ array(string),
z.array(z.string()), array(number),
z.array(z.number()), array(matchOldDefaultString),
z.array(matchOldDefaultString), array(object),
z.array(z.object({}).passthrough()), ),
]), name: string,
name: z.string(), description: string.nullable().optional(),
description: z.string().nullable().optional(), warning: string.nullable().optional(),
warning: z.string().nullable().optional(),
}),
z.union([
z.object({
subtype: z.enum(["string"]),
spec: matchOldListValueSpecString,
}),
z.object({
subtype: z.enum(["enum"]),
spec: matchOldListValueSpecEnum,
}),
z.object({
subtype: z.enum(["object"]),
spec: matchOldListValueSpecObject,
}),
z.object({
subtype: z.enum(["number"]),
spec: matchOldListValueSpecNumber,
}),
z.object({
subtype: z.enum(["union"]),
spec: matchOldListValueSpecUnion,
}),
]),
) as unknown as z.ZodType<OldValueSpecList>
type OldValueSpecPointer = {
type: "pointer"
} & (
| {
subtype: "package"
target: "tor-key" | "tor-address" | "lan-address"
"package-id": string
interface: string
}
| {
subtype: "package"
target: "config"
"package-id": string
selector: string
multi: boolean
}
)
const matchOldValueSpecPointer: z.ZodType<OldValueSpecPointer> = z.intersection(
z.object({
type: z.literal("pointer"),
}), }),
z.union([ anyOf(
z.object({ object({
subtype: z.literal("package"), subtype: literals("string"),
target: z.enum(["tor-key", "tor-address", "lan-address"]), spec: matchOldListValueSpecString,
"package-id": z.string(),
interface: z.string(),
}), }),
z.object({ object({
subtype: z.literal("package"), subtype: literals("enum"),
target: z.enum(["config"]), spec: matchOldListValueSpecEnum,
"package-id": z.string(),
selector: z.string(),
multi: z.boolean(),
}), }),
]), object({
) as unknown as z.ZodType<OldValueSpecPointer> subtype: literals("object"),
spec: matchOldListValueSpecObject,
}),
object({
subtype: literals("number"),
spec: matchOldListValueSpecNumber,
}),
object({
subtype: literals("union"),
spec: matchOldListValueSpecUnion,
}),
),
)
type OldValueSpecList = typeof matchOldValueSpecList._TYPE
type OldValueSpecString = z.infer<typeof matchOldValueSpecString> const matchOldValueSpecPointer = every(
object({
type: literal("pointer"),
}),
anyOf(
object({
subtype: literal("package"),
target: literals("tor-key", "tor-address", "lan-address"),
"package-id": string,
interface: string,
}),
object({
subtype: literal("package"),
target: literals("config"),
"package-id": string,
selector: string,
multi: boolean,
}),
),
)
type OldValueSpecPointer = typeof matchOldValueSpecPointer._TYPE
type OldValueSpec = export const matchOldValueSpec = anyOf(
| OldValueSpecString
| OldValueSpecNumber
| OldValueSpecBoolean
| OldValueSpecObject
| OldValueSpecEnum
| OldValueSpecList
| OldValueSpecUnion
| OldValueSpecPointer
export const matchOldValueSpec: z.ZodType<OldValueSpec> = z.union([
matchOldValueSpecString, matchOldValueSpecString,
matchOldValueSpecNumber, matchOldValueSpecNumber,
matchOldValueSpecBoolean, matchOldValueSpecBoolean,
matchOldValueSpecObject as z.ZodType<OldValueSpecObject>, matchOldValueSpecObject,
matchOldValueSpecEnum, matchOldValueSpecEnum,
matchOldValueSpecList as z.ZodType<OldValueSpecList>, matchOldValueSpecList,
matchOldValueSpecUnion as z.ZodType<OldValueSpecUnion>, matchOldValueSpecUnion,
matchOldValueSpecPointer as z.ZodType<OldValueSpecPointer>, matchOldValueSpecPointer,
]) )
type OldValueSpec = typeof matchOldValueSpec._TYPE
setMatchOldConfigSpec(dictionary([string, matchOldValueSpec]))
export class Range { export class Range {
min?: number min?: number

View File

@@ -47,12 +47,11 @@ export class SystemForStartOs implements System {
getActionInput( getActionInput(
effects: Effects, effects: Effects,
id: string, id: string,
prefill: Record<string, unknown> | null,
timeoutMs: number | null, timeoutMs: number | null,
): Promise<T.ActionInput | null> { ): Promise<T.ActionInput | null> {
const action = this.abi.actions.get(id) const action = this.abi.actions.get(id)
if (!action) throw new Error(`Action ${id} not found`) if (!action) throw new Error(`Action ${id} not found`)
return action.getInput({ effects, prefill }) return action.getInput({ effects })
} }
runAction( runAction(
effects: Effects, effects: Effects,
@@ -69,18 +68,22 @@ export class SystemForStartOs implements System {
try { try {
if (this.runningMain || this.starting) return if (this.runningMain || this.starting) return
this.starting = true this.starting = true
effects.constRetry = utils.once(() => { effects.constRetry = utils.once(() => effects.restart())
console.debug(".const() triggered")
if (effects.isInContext) effects.restart()
})
let mainOnTerm: () => Promise<void> | undefined let mainOnTerm: () => Promise<void> | undefined
const started = async (onTerm: () => Promise<void>) => {
await effects.setMainStatus({ status: "running" })
mainOnTerm = onTerm
return null
}
const daemons = await ( const daemons = await (
await this.abi.main({ await this.abi.main({
effects, effects,
started,
}) })
).build() ).build()
this.runningMain = { this.runningMain = {
stop: async () => { stop: async () => {
if (mainOnTerm) await mainOnTerm()
await daemons.term() await daemons.term()
}, },
} }

View File

@@ -33,7 +33,6 @@ export type System = {
getActionInput( getActionInput(
effects: Effects, effects: Effects,
actionId: string, actionId: string,
prefill: Record<string, unknown> | null,
timeoutMs: number | null, timeoutMs: number | null,
): Promise<T.ActionInput | null> ): Promise<T.ActionInput | null>

View File

@@ -1,19 +1,41 @@
import { z } from "@start9labs/start-sdk" import {
object,
literal,
string,
boolean,
array,
dictionary,
literals,
number,
Parser,
some,
} from "ts-matches"
import { matchDuration } from "./Duration" import { matchDuration } from "./Duration"
export const matchDockerProcedure = z.object({ const VolumeId = string
type: z.literal("docker"), const Path = string
image: z.string(),
system: z.boolean().optional(), export type VolumeId = string
entrypoint: z.string(), export type Path = string
args: z.array(z.string()).default([]), export const matchDockerProcedure = object({
mounts: z.record(z.string(), z.string()).optional(), type: literal("docker"),
"io-format": z image: string,
.enum(["json", "json-pretty", "yaml", "cbor", "toml", "toml-pretty"]) system: boolean.optional(),
entrypoint: string,
args: array(string).defaultTo([]),
mounts: dictionary([VolumeId, Path]).optional(),
"io-format": literals(
"json",
"json-pretty",
"yaml",
"cbor",
"toml",
"toml-pretty",
)
.nullable() .nullable()
.optional(), .optional(),
"sigterm-timeout": z.union([z.number(), matchDuration]).catch(30), "sigterm-timeout": some(number, matchDuration).onMismatch(30),
inject: z.boolean().default(false), inject: boolean.defaultTo(false),
}) })
export type DockerProcedure = z.infer<typeof matchDockerProcedure> export type DockerProcedure = typeof matchDockerProcedure._TYPE

View File

@@ -1,11 +1,11 @@
import { z } from "@start9labs/start-sdk" import { string } from "ts-matches"
export type TimeUnit = "d" | "h" | "s" | "ms" | "m" | "µs" | "ns" export type TimeUnit = "d" | "h" | "s" | "ms" | "m" | "µs" | "ns"
export type Duration = `${number}${TimeUnit}` export type Duration = `${number}${TimeUnit}`
const durationRegex = /^([0-9]*(\.[0-9]+)?)(ns|µs|ms|s|m|d)$/ const durationRegex = /^([0-9]*(\.[0-9]+)?)(ns|µs|ms|s|m|d)$/
export const matchDuration = z.string().refine(isDuration) export const matchDuration = string.refine(isDuration)
export function isDuration(value: string): value is Duration { export function isDuration(value: string): value is Duration {
return durationRegex.test(value) return durationRegex.test(value)
} }

View File

@@ -1,10 +1,10 @@
import { z } from "@start9labs/start-sdk" import { literals, some, string } from "ts-matches"
type NestedPath<A extends string, B extends string> = `/${A}/${string}/${B}` type NestedPath<A extends string, B extends string> = `/${A}/${string}/${B}`
type NestedPaths = NestedPath<"actions", "run" | "getInput"> type NestedPaths = NestedPath<"actions", "run" | "getInput">
// prettier-ignore // prettier-ignore
type UnNestPaths<A> = type UnNestPaths<A> =
A extends `${infer A}/${infer B}` ? [...UnNestPaths<A>, ... UnNestPaths<B>] : A extends `${infer A}/${infer B}` ? [...UnNestPaths<A>, ... UnNestPaths<B>] :
[A] [A]
export function unNestPath<A extends string>(a: A): UnNestPaths<A> { export function unNestPath<A extends string>(a: A): UnNestPaths<A> {
@@ -17,14 +17,14 @@ function isNestedPath(path: string): path is NestedPaths {
return true return true
return false return false
} }
export const jsonPath = z.union([ export const jsonPath = some(
z.enum([ literals(
"/packageInit", "/packageInit",
"/packageUninit", "/packageUninit",
"/backup/create", "/backup/create",
"/backup/restore", "/backup/restore",
]), ),
z.string().refine(isNestedPath), string.refine(isNestedPath, "isNestedPath"),
]) )
export type JsonPath = z.infer<typeof jsonPath> export type JsonPath = typeof jsonPath._TYPE

View File

@@ -1,4 +1,5 @@
import { RpcListener } from "./Adapters/RpcListener" import { RpcListener } from "./Adapters/RpcListener"
import { SystemForEmbassy } from "./Adapters/Systems/SystemForEmbassy"
import { AllGetDependencies } from "./Interfaces/AllGetDependencies" import { AllGetDependencies } from "./Interfaces/AllGetDependencies"
import { getSystem } from "./Adapters/Systems" import { getSystem } from "./Adapters/Systems"
@@ -6,24 +7,6 @@ const getDependencies: AllGetDependencies = {
system: getSystem, system: getSystem,
} }
process.on("unhandledRejection", (reason) => {
if (
reason instanceof Error &&
"muteUnhandled" in reason &&
reason.muteUnhandled
) {
// mute
} else {
console.error("Unhandled promise rejection", reason)
}
})
for (let s of ["SIGTERM", "SIGINT", "SIGHUP"]) {
process.on(s, (s) => {
console.log(`Caught ${s}`)
})
}
new RpcListener(getDependencies) new RpcListener(getDependencies)
/** /**

View File

@@ -1,21 +0,0 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")/.."
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
DOCKER_PLATFORM=linux/${ARCH}
case $ARCH in
x86_64)
DOCKER_PLATFORM=linux/amd64;;
aarch64)
DOCKER_PLATFORM=linux/arm64;;
esac
docker run --rm $USE_TTY --platform=$DOCKER_PLATFORM -eARCH --privileged -v "$(pwd):/root/start-os" start9/build-env /root/start-os/container-runtime/update-image.sh
if [ "$(ls -nd "container-runtime/rootfs.${ARCH}.squashfs" | awk '{ print $3 }')" != "$UID" ]; then
docker run --rm $USE_TTY -v "$(pwd):/root/start-os" start9/build-env chown -R $UID:$UID /root/start-os/container-runtime
fi

View File

@@ -9,34 +9,56 @@ if [ "$ARCH" = "riscv64" ]; then
RUST_ARCH="riscv64gc" RUST_ARCH="riscv64gc"
fi fi
mount -t tmpfs tmpfs /tmp if mountpoint -q tmp/combined; then sudo umount -l tmp/combined; fi
mkdir -p /tmp/lower /tmp/upper /tmp/work /tmp/combined if mountpoint -q tmp/lower; then sudo umount tmp/lower; fi
mount -o loop debian.${ARCH}.squashfs /tmp/lower sudo rm -rf tmp
mount -t overlay -olowerdir=/tmp/lower,upperdir=/tmp/upper,workdir=/tmp/work overlay /tmp/combined mkdir -p tmp/lower tmp/upper tmp/work tmp/combined
if which squashfuse > /dev/null; then
sudo squashfuse debian.${ARCH}.squashfs tmp/lower
else
sudo mount debian.${ARCH}.squashfs tmp/lower
fi
if which fuse-overlayfs > /dev/null; then
sudo fuse-overlayfs -olowerdir=tmp/lower,upperdir=tmp/upper,workdir=tmp/work overlay tmp/combined
else
sudo mount -t overlay -olowerdir=tmp/lower,upperdir=tmp/upper,workdir=tmp/work overlay tmp/combined
fi
mkdir -p /tmp/combined/usr/lib/startos/ QEMU=
rsync -a --copy-unsafe-links --info=progress2 dist/ /tmp/combined/usr/lib/startos/init/ if [ "$ARCH" != "$(uname -m)" ]; then
chown -R 0:0 /tmp/combined/usr/lib/startos/ QEMU=/usr/bin/qemu-${ARCH}-static
cp container-runtime.service /tmp/combined/lib/systemd/system/container-runtime.service if ! which qemu-$ARCH-static > /dev/null; then
chown 0:0 /tmp/combined/lib/systemd/system/container-runtime.service >&2 echo qemu-user-static is required for cross-platform builds
cp container-runtime-failure.service /tmp/combined/lib/systemd/system/container-runtime-failure.service sudo umount tmp/combined
chown 0:0 /tmp/combined/lib/systemd/system/container-runtime-failure.service sudo umount tmp/lower
cp ../core/target/${RUST_ARCH}-unknown-linux-musl/release/start-container /tmp/combined/usr/bin/start-container sudo rm -rf tmp
echo -e '#!/bin/bash\nexec start-container "$@"' > /tmp/combined/usr/bin/start-cli # TODO: remove exit 1
chmod +x /tmp/combined/usr/bin/start-cli fi
chown 0:0 /tmp/combined/usr/bin/start-container sudo cp $(which qemu-$ARCH-static) tmp/combined${QEMU}
echo container-runtime | sha256sum | head -c 32 | cat - <(echo) > /tmp/combined/etc/machine-id fi
rm -f /tmp/combined/etc/resolv.conf
cp /etc/resolv.conf /tmp/combined/etc/resolv.conf sudo mkdir -p tmp/combined/usr/lib/startos/
for fs in proc sys dev; do sudo rsync -a --copy-unsafe-links dist/ tmp/combined/usr/lib/startos/init/
mount --bind /$fs /tmp/combined/$fs sudo chown -R 0:0 tmp/combined/usr/lib/startos/
done sudo cp container-runtime.service tmp/combined/lib/systemd/system/container-runtime.service
cat deb-install.sh | chroot /tmp/combined /bin/bash sudo chown 0:0 tmp/combined/lib/systemd/system/container-runtime.service
for fs in proc sys dev; do sudo cp container-runtime-failure.service tmp/combined/lib/systemd/system/container-runtime-failure.service
umount /tmp/combined/$fs sudo chown 0:0 tmp/combined/lib/systemd/system/container-runtime-failure.service
done sudo cp ../core/target/${RUST_ARCH}-unknown-linux-musl/release/containerbox tmp/combined/usr/bin/start-container
truncate -s 0 /tmp/combined/etc/machine-id echo -e '#!/bin/bash\nexec start-container "$@"' | sudo tee tmp/combined/usr/bin/start-cli # TODO: remove
sudo chmod +x tmp/combined/usr/bin/start-cli
sudo chown 0:0 tmp/combined/usr/bin/start-container
echo container-runtime | sha256sum | head -c 32 | cat - <(echo) | sudo tee tmp/combined/etc/machine-id
cat deb-install.sh | sudo systemd-nspawn --console=pipe -D tmp/combined $QEMU /bin/bash
sudo truncate -s 0 tmp/combined/etc/machine-id
if [ -n "$QEMU" ]; then
sudo rm tmp/combined${QEMU}
fi
rm -f rootfs.${ARCH}.squashfs rm -f rootfs.${ARCH}.squashfs
mkdir -p ../build/lib/container-runtime mkdir -p ../build/lib/container-runtime
mksquashfs /tmp/combined rootfs.${ARCH}.squashfs sudo mksquashfs tmp/combined rootfs.${ARCH}.squashfs
sudo umount tmp/combined
sudo umount tmp/lower
sudo rm -rf tmp

2
core/.gitignore vendored
View File

@@ -8,4 +8,4 @@ secrets.db
.env .env
.editorconfig .editorconfig
proptest-regressions/**/* proptest-regressions/**/*
/bindings/* /startos/bindings/*

View File

@@ -1,72 +0,0 @@
# Core Architecture
The Rust backend daemon for StartOS.
## Binaries
The crate produces a single binary `startbox` that is symlinked under different names for different behavior:
- `startbox` / `startd` — Main daemon
- `start-cli` — CLI interface
- `start-container` — Runs inside LXC containers; communicates with host and manages subcontainers
- `registrybox` — Registry daemon
- `tunnelbox` — VPN/tunnel daemon
## Crate Structure
- `startos` — Core library that supports building `startbox`
- `helpers` — Utility functions used across both `startos` and `js-engine`
- `models` — Types shared across `startos`, `js-engine`, and `helpers`
## Key Modules
- `src/context/` — Context types (RpcContext, CliContext, InitContext, DiagnosticContext)
- `src/service/` — Service lifecycle management with actor pattern (`service_actor.rs`)
- `src/db/model/` — Patch-DB models (`public.rs` synced to frontend, `private.rs` backend-only)
- `src/net/` — Networking (DNS, ACME, WiFi, Tor via Arti, WireGuard)
- `src/s9pk/` — S9PK package format (merkle archive)
- `src/registry/` — Package registry management
## RPC Pattern
The API is JSON-RPC (not REST). All endpoints are RPC methods organized in a hierarchical command structure using [rpc-toolkit](https://github.com/Start9Labs/rpc-toolkit). Handlers are registered in a tree of `ParentHandler` nodes, with four handler types: `from_fn_async` (standard), `from_fn_async_local` (non-Send), `from_fn` (sync), and `from_fn_blocking` (blocking). Metadata like `.with_about()` drives middleware and documentation.
See [rpc-toolkit.md](rpc-toolkit.md) for full handler patterns and configuration.
## Patch-DB Patterns
Patch-DB provides diff-based state synchronization. Changes to `db/model/public.rs` automatically sync to the frontend.
**Key patterns:**
- `db.peek().await` — Get a read-only snapshot of the database state
- `db.mutate(|db| { ... }).await` — Apply mutations atomically, returns `MutateResult`
- `#[derive(HasModel)]` — Derive macro for types stored in the database, generates typed accessors
**Generated accessor types** (from `HasModel` derive):
- `as_field()` — Immutable reference: `&Model<T>`
- `as_field_mut()` — Mutable reference: `&mut Model<T>`
- `into_field()` — Owned value: `Model<T>`
**`Model<T>` APIs** (from `db/prelude.rs`):
- `.de()` — Deserialize to `T`
- `.ser(&value)` — Serialize from `T`
- `.mutate(|v| ...)` — Deserialize, mutate, reserialize
- For maps: `.keys()`, `.as_idx(&key)`, `.as_idx_mut(&key)`, `.insert()`, `.remove()`, `.contains_key()`
See [patchdb.md](patchdb.md) for `TypedDbWatch<T>` construction, API, and usage patterns.
## i18n
See [i18n-patterns.md](i18n-patterns.md) for internationalization key conventions and the `t!()` macro.
## Rust Utilities & Patterns
See [core-rust-patterns.md](core-rust-patterns.md) for common utilities (Invoke trait, Guard pattern, mount guards, Apply trait, etc.).
## Related Documentation
- [rpc-toolkit.md](rpc-toolkit.md) — JSON-RPC handler patterns
- [patchdb.md](patchdb.md) — Patch-DB watch patterns and TypedDbWatch
- [i18n-patterns.md](i18n-patterns.md) — Internationalization conventions
- [core-rust-patterns.md](core-rust-patterns.md) — Common Rust utilities
- [s9pk-structure.md](s9pk-structure.md) — S9PK package format

View File

@@ -1,28 +0,0 @@
# Core — Rust Backend
The Rust backend daemon for StartOS.
## Architecture
See [ARCHITECTURE.md](ARCHITECTURE.md) for binaries, modules, Patch-DB patterns, and related documentation.
See [CONTRIBUTING.md](CONTRIBUTING.md) for how to add RPC endpoints, TS-exported types, and i18n keys.
## Quick Reference
```bash
cargo check -p start-os # Type check
make test-core # Run tests
make ts-bindings # Regenerate TS types after changing #[ts(export)] structs
cd sdk && make baseDist dist # Rebuild SDK after ts-bindings
```
## Operating Rules
- Always run `cargo check -p start-os` after modifying Rust code
- When adding RPC endpoints, follow the patterns in [rpc-toolkit.md](rpc-toolkit.md)
- When modifying `#[ts(export)]` types, regenerate bindings and rebuild the SDK (see [ARCHITECTURE.md](../ARCHITECTURE.md#build-pipeline))
- **i18n is mandatory** — any user-facing string must go in `core/locales/i18n.yaml` with all 5 locales (`en_US`, `de_DE`, `es_ES`, `fr_FR`, `pl_PL`). This includes CLI subcommand descriptions (`about.<name>`), CLI arg help (`help.arg.<name>`), error messages (`error.<name>`), notifications, setup messages, and any other text shown to users. Entries are alphabetically ordered within their section. See [i18n-patterns.md](i18n-patterns.md)
- When using DB watches, follow the `TypedDbWatch<T>` patterns in [patchdb.md](patchdb.md)
- **Always use `.invoke(ErrorKind::...)` instead of `.status()` when running CLI commands** via `tokio::process::Command`. The `Invoke` trait (from `crate::util::Invoke`) captures stdout/stderr and checks exit codes properly. Using `.status()` leaks stderr directly to system logs, creating noise. For check-then-act patterns (e.g. `iptables -C`), use `.invoke(...).await.is_ok()` / `.is_err()` instead of `.status().await.map_or(false, |s| s.success())`.
- Always use file utils in util::io instead of tokio::fs when available

View File

@@ -1,49 +0,0 @@
# Contributing to Core
For general environment setup, cloning, and build system, see the root [CONTRIBUTING.md](../CONTRIBUTING.md).
## Prerequisites
- [Rust](https://rustup.rs) (nightly for formatting)
- [rust-analyzer](https://rust-analyzer.github.io/) recommended
- [Docker](https://docs.docker.com/get-docker/) (for cross-compilation via `rust-zig-builder` container)
## Common Commands
```bash
cargo check -p start-os # Type check
cargo test --features=test # Run tests (or: make test-core)
make format # Format with nightly rustfmt
cd core && cargo test <test_name> --features=test # Run a specific test
```
## Adding a New RPC Endpoint
1. Define a params struct with `#[derive(Deserialize, Serialize)]`
2. Choose a handler type (`from_fn_async` for most cases)
3. Write the handler function: `async fn my_handler(ctx: RpcContext, params: MyParams) -> Result<MyResponse, Error>`
4. Register it in the appropriate `ParentHandler` tree
5. If params/response should be available in TypeScript, add `#[derive(TS)]` and `#[ts(export)]`
See [rpc-toolkit.md](rpc-toolkit.md) for full handler patterns and all four handler types.
## Adding TS-Exported Types
When a Rust type needs to be available in TypeScript (for the web frontend or SDK):
1. Add `ts_rs::TS` to the derive list and `#[ts(export)]` to the struct/enum
2. Use `#[serde(rename_all = "camelCase")]` for JS-friendly field names
3. For types that don't implement TS (like `DateTime<Utc>`, `exver::Version`), use `#[ts(type = "string")]` overrides
4. For `u64` fields that should be JS `number` (not `bigint`), use `#[ts(type = "number")]`
5. Run `make ts-bindings` to regenerate — files appear in `core/bindings/` then sync to `sdk/base/lib/osBindings/`
6. Rebuild the SDK: `cd sdk && make baseDist dist`
## Adding i18n Keys
1. Add the key to `core/locales/i18n.yaml` with all 5 language translations
2. Use the `t!("your.key.name")` macro in Rust code
3. Follow existing namespace conventions — match the module path where the key is used
4. Use kebab-case for multi-word segments
5. Translations are validated at compile time
See [i18n-patterns.md](i18n-patterns.md) for full conventions.

4850
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,254 +1,3 @@
[package] [workspace]
authors = ["Aiden McClelland <me@drbonez.dev>"]
description = "The core of StartOS"
documentation = "https://docs.rs/start-os"
edition = "2024"
keywords = [
"bitcoin",
"full-node",
"lightning",
"privacy",
"raspberry-pi",
"self-hosted",
]
license = "MIT"
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.23" # VERSION_BUMP
[lib] members = ["helpers", "models", "startos"]
name = "startos"
path = "src/lib.rs"
[[bin]]
name = "startbox"
path = "src/main/startbox.rs"
[[bin]]
name = "start-cli"
path = "src/main/start-cli.rs"
[[bin]]
name = "start-container"
path = "src/main/start-container.rs"
[[bin]]
name = "registrybox"
path = "src/main/registrybox.rs"
[[bin]]
name = "tunnelbox"
path = "src/main/tunnelbox.rs"
[features]
beta = []
console = ["console-subscriber", "tokio/tracing"]
default = []
dev = []
test = []
unstable = ["backtrace-on-stack-overflow"]
[dependencies]
aes = { version = "0.7.5", features = ["ctr"] }
async-acme = { version = "0.6.0", git = "https://github.com/dr-bonez/async-acme.git", features = [
"use_rustls",
"use_tokio",
] }
async-compression = { version = "0.4.32", features = [
"brotli",
"gzip",
"tokio",
"zstd",
] }
async-stream = "0.3.5"
async-trait = "0.1.74"
axum = { version = "0.8.4", features = ["http2", "ws"] }
backtrace-on-stack-overflow = { version = "0.3.0", optional = true }
base32 = "0.5.0"
base64 = "0.22.1"
base64ct = "1.6.0"
basic-cookies = "0.1.4"
blake3 = { version = "1.5.0", features = ["mmap", "rayon"] }
bytes = "1"
chrono = { version = "0.4.31", features = ["serde"] }
clap = { version = "4.4.12", features = ["string"] }
color-eyre = "0.6.2"
console = "0.16.2"
console-subscriber = { version = "0.5.0", optional = true }
const_format = "0.2.34"
cookie = "0.18.0"
cookie_store = "0.22.0"
der = { version = "0.7.9", features = ["derive", "pem"] }
digest = "0.10.7"
divrem = "1.0.0"
dns-lookup = "3.0.1"
ed25519 = { version = "2.2.3", features = ["alloc", "pem", "pkcs8"] }
ed25519-dalek = { version = "2.2.0", features = [
"digest",
"hazmat",
"pkcs8",
"rand_core",
"serde",
"zeroize",
] }
ed25519-dalek-v1 = { package = "ed25519-dalek", version = "1" }
exver = { version = "0.2.0", git = "https://github.com/Start9Labs/exver-rs.git", features = [
"serde",
] }
fd-lock-rs = "0.1.4"
form_urlencoded = "1.2.1"
futures = "0.3.28"
gpt = "4.1.0"
hashing-serializer = "0.1.1"
hex = "0.4.3"
hickory-server = { version = "0.25.2", features = ["resolver"] }
hmac = "0.12.1"
http = "1.0.0"
http-body-util = "0.1"
hyper = { version = "1.5", features = ["http1", "http2", "server"] }
hyper-util = { version = "0.1.10", features = [
"http1",
"http2",
"server",
"server-auto",
"server-graceful",
"service",
"tokio",
] }
id-pool = { version = "0.2.2", default-features = false, features = [
"serde",
"u16",
] }
iddqd = "0.3.14"
imbl = { version = "6", features = ["serde", "small-chunks"] }
imbl-value = { version = "0.4.3", features = ["ts-rs"] }
include_dir = { version = "0.7.3", features = ["metadata"] }
indexmap = { version = "2.0.2", features = ["serde"] }
indicatif = { version = "0.18.3", features = ["tokio"] }
inotify = "0.11.0"
integer-encoding = { version = "4.0.0", features = ["tokio_async"] }
ipnet = { version = "2.8.0", features = ["serde"] }
isocountry = "0.3.2"
itertools = "0.14.0"
jaq-core = "0.10.1"
jaq-std = "0.10.0"
josekit = "0.10.3"
jsonpath_lib = { git = "https://github.com/Start9Labs/jsonpath.git" }
lazy_async_pool = "0.3.3"
lazy_format = "2.0"
lazy_static = "1.4.0"
lettre = { version = "0.11.18", default-features = false, features = [
"aws-lc-rs",
"builder",
"hostname",
"pool",
"rustls-platform-verifier",
"smtp-transport",
"tokio1-rustls",
] }
libc = "0.2.149"
log = "0.4.20"
mbrman = "0.6.0"
miette = { version = "7.6.0", features = ["fancy"] }
mio = "1"
new_mime_guess = "4"
nix = { version = "0.30.1", features = [
"fs",
"hostname",
"mount",
"net",
"process",
"sched",
"signal",
"user",
] }
nom = "8.0.0"
num = "0.4.1"
num_cpus = "1.16.0"
num_enum = "0.7.0"
once_cell = "1.19.0"
openssh-keys = "0.6.2"
openssl = { version = "0.10.57", features = ["vendored"] }
p256 = { version = "0.13.2", features = ["pem"] }
patch-db = { version = "*", path = "../patch-db/core", features = ["trace"] }
pbkdf2 = "0.12.2"
pin-project = "1.1.3"
pkcs8 = { version = "0.10.2", features = ["std"] }
prettytable-rs = "0.10.0"
proptest = "1.3.1"
proptest-derive = "0.7.0"
qrcode = "0.14.1"
r3bl_tui = "0.7.6"
rand = "0.9.2"
regex = "1.10.2"
reqwest = { version = "0.12.25", features = [
"http2",
"json",
"socks",
"stream",
] }
reqwest_cookie_store = "0.9.0"
rpassword = "7.2.0"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git" }
rust-argon2 = "3.0.0"
rust-i18n = "3.1.5"
semver = { version = "1.0.20", features = ["serde"] }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.1" }
serde_json = "1.0"
serde_toml = { package = "toml", version = "0.9.9+spec-1.0.0" }
serde_yaml = { package = "serde_yml", version = "0.0.12" }
sha-crypt = "0.5.0"
sha2 = "0.10.2"
sha3 = "0.10"
signal-hook = "0.3.17"
socket2 = { version = "0.6.0", features = ["all"] }
socks5-impl = { version = "0.7.2", features = ["client", "server"] }
sqlx = { version = "0.8.6", features = [
"postgres",
"runtime-tokio-rustls",
], default-features = false }
sscanf = "0.4.1"
ssh-key = { version = "0.6.2", features = ["ed25519"] }
tar = "0.4.40"
termion = "4.0.5"
textwrap = "0.16.1"
thiserror = "2.0.12"
tokio = { version = "1.38.1", features = ["full"] }
tokio-rustls = "0.26.4"
tokio-stream = { version = "0.1.14", features = ["io-util", "net", "sync"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.26.2", features = ["native-tls", "url"] }
tokio-util = { version = "0.7.9", features = ["io"] }
tower-service = "0.3.3"
tracing = "0.1.39"
tracing-error = "0.2.0"
tracing-journald = "0.3.0"
tracing-subscriber = { version = "=0.3.19", features = ["env-filter"] }
ts-rs = "9.0.1"
typed-builder = "0.23.2"
url = { version = "2.4.1", features = ["serde"] }
uuid = { version = "1.4.1", features = ["v4"] }
visit-rs = "0.1.1"
x25519-dalek = { version = "2.0.1", features = ["static_secrets"] }
zbus = "5.1.1"
[dev-dependencies]
clap_mangen = "0.2.33"
[target.'cfg(target_os = "linux")'.dependencies]
procfs = "0.18.0"
pty-process = "0.5.1"
[profile.test]
opt-level = 3
[profile.dev]
opt-level = 3
[profile.dev.package.backtrace]
opt-level = 3
[profile.dev.package.sqlx-macros]
opt-level = 3

View File

@@ -22,7 +22,9 @@ several different names for different behavior:
- `start-sdk`: This is a CLI tool that aids in building and packaging services - `start-sdk`: This is a CLI tool that aids in building and packaging services
you wish to deploy to StartOS you wish to deploy to StartOS
## Documentation ## Questions
- [ARCHITECTURE.md](ARCHITECTURE.md) — Backend architecture, modules, and patterns If you have questions about how various pieces of the backend system work. Open
- [CONTRIBUTING.md](CONTRIBUTING.md) — How to contribute to core an issue and tag the following people
- dr-bonez

View File

@@ -5,31 +5,11 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
source ./builder-alias.sh source ./builder-alias.sh
set -ea set -ea
INSTALL=false
while [[ $# -gt 0 ]]; do
case $1 in
--install)
INSTALL=true
shift
;;
*)
>&2 echo "Unknown option: $1"
exit 1
;;
esac
done
shopt -s expand_aliases shopt -s expand_aliases
PROFILE=${PROFILE:-release} PROFILE=${PROFILE:-release}
if [ "${PROFILE}" = "release" ]; then if [ "${PROFILE}" = "release" ]; then
BUILD_FLAGS="--release" BUILD_FLAGS="--release"
else
if [ "$PROFILE" != "debug" ]; then
>&2 echo "Unknown profile $PROFILE: falling back to debug..."
PROFILE=debug
fi
fi fi
if [ -z "${ARCH:-}" ]; then if [ -z "${ARCH:-}" ]; then
@@ -60,24 +40,27 @@ if [ -z "${TARGET:-}" ]; then
fi fi
fi fi
cd ../.. cd ..
# Ensure GIT_HASH.txt exists if not created by higher-level build steps
if [ ! -f GIT_HASH.txt ] && command -v git >/dev/null 2>&1; then
git rev-parse HEAD > GIT_HASH.txt || true
fi
FEATURES="$(echo "${ENVIRONMENT:-}" | sed 's/-/,/g')" FEATURES="$(echo "${ENVIRONMENT:-}" | sed 's/-/,/g')"
FEATURE_ARGS="cli"
if [ -n "$FEATURES" ]; then
FEATURE_ARGS="$FEATURE_ARGS,$FEATURES"
fi
RUSTFLAGS="" RUSTFLAGS=""
if [[ "${ENVIRONMENT:-}" =~ (^|-)console($|-) ]]; then if [[ "${ENVIRONMENT:-}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable" RUSTFLAGS="--cfg tokio_unstable"
fi fi
if [[ "${ENVIRONMENT:-}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\"" echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\"" echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-cli --target=$TARGET rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features $FEATURE_ARGS --locked --bin start-cli --target=$TARGET
if [ "$(ls -nd "core/target/$TARGET/$PROFILE/start-cli" | awk '{ print $3 }')" != "$UID" ]; then if [ "$(ls -nd "core/target/$TARGET/release/start-cli" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /usr/local/cargo" rust-zig-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi
if [ "$INSTALL" = "true" ]; then
cp "core/target/$TARGET/$PROFILE/start-cli" ~/.cargo/bin/start-cli
fi fi

View File

@@ -10,11 +10,6 @@ shopt -s expand_aliases
PROFILE=${PROFILE:-release} PROFILE=${PROFILE:-release}
if [ "${PROFILE}" = "release" ]; then if [ "${PROFILE}" = "release" ]; then
BUILD_FLAGS="--release" BUILD_FLAGS="--release"
else
if [ "$PROFILE" != "debug"]; then
>&2 echo "Unknown profile $PROFILE: falling back to debug..."
PROFILE=debug
fi
fi fi
if [ -z "$ARCH" ]; then if [ -z "$ARCH" ]; then
@@ -30,7 +25,7 @@ if [ "$ARCH" = "riscv64" ]; then
RUST_ARCH="riscv64gc" RUST_ARCH="riscv64gc"
fi fi
cd ../.. cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')" FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS="" RUSTFLAGS=""
@@ -38,13 +33,9 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable" RUSTFLAGS="--cfg tokio_unstable"
fi fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\"" echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\"" echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin start-container --target=$RUST_ARCH-unknown-linux-musl rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-container,$FEATURES --locked --bin containerbox --target=$RUST_ARCH-unknown-linux-musl
if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/start-container" | awk '{ print $3 }')" != "$UID" ]; then if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/release/containerbox" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /usr/local/cargo" rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo"
fi fi

View File

@@ -10,11 +10,6 @@ shopt -s expand_aliases
PROFILE=${PROFILE:-release} PROFILE=${PROFILE:-release}
if [ "${PROFILE}" = "release" ]; then if [ "${PROFILE}" = "release" ]; then
BUILD_FLAGS="--release" BUILD_FLAGS="--release"
else
if [ "$PROFILE" != "debug"]; then
>&2 echo "Unknown profile $PROFILE: falling back to debug..."
PROFILE=debug
fi
fi fi
if [ -z "$ARCH" ]; then if [ -z "$ARCH" ]; then
@@ -30,7 +25,7 @@ if [ "$ARCH" = "riscv64" ]; then
RUST_ARCH="riscv64gc" RUST_ARCH="riscv64gc"
fi fi
cd ../.. cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')" FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS="" RUSTFLAGS=""
@@ -38,13 +33,9 @@ if [[ "${ENVIRONMENT}" =~ (^|-)console($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable" RUSTFLAGS="--cfg tokio_unstable"
fi fi
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="$RUSTFLAGS -C debuginfo=1"
fi
echo "FEATURES=\"$FEATURES\"" echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\"" echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --features=$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl rust-zig-builder cargo zigbuild --manifest-path=./core/Cargo.toml $BUILD_FLAGS --no-default-features --features cli-registry,registry,$FEATURES --locked --bin registrybox --target=$RUST_ARCH-unknown-linux-musl
if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/$PROFILE/registrybox" | awk '{ print $3 }')" != "$UID" ]; then if [ "$(ls -nd "core/target/$RUST_ARCH-unknown-linux-musl/release/registrybox" | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /usr/local/cargo" rust-zig-builder sh -c "chown -R $UID:$UID core/target && chown -R $UID:$UID /root/.cargo"
fi fi

Some files were not shown because too many files have changed in this diff Show More