Compare commits
145 Commits
v0.4.0-alp
...
bugfix/alp
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
24c1f47886 | ||
|
|
1b9fcaad2b | ||
|
|
900d86ab83 | ||
|
|
c1a328e5ca | ||
|
|
2903b949ea | ||
|
|
8ac8dae6fd | ||
|
|
0e8dd82910 | ||
|
|
873922d9e3 | ||
|
|
c9ce2c57b3 | ||
|
|
6c9cbebe9c | ||
|
|
dd9837b9b2 | ||
|
|
7313693a9e | ||
|
|
66a606c14e | ||
|
|
7352602f58 | ||
|
|
4ab51c4570 | ||
|
|
1c1ae11241 | ||
|
|
cc6a134a32 | ||
|
|
3ae24e63e2 | ||
|
|
8562e1e19d | ||
|
|
90d8d39adf | ||
|
|
9f7bc74a1e | ||
|
|
65e1c9e5d8 | ||
|
|
5a6b2a5588 | ||
|
|
e86b06c2cd | ||
|
|
7b8bb92d60 | ||
|
|
ebb7916ecd | ||
|
|
b5ac0b5200 | ||
|
|
a90b96cddd | ||
|
|
d1b80cffb8 | ||
|
|
ae5fe88a40 | ||
|
|
fc4b887b71 | ||
|
|
a81b1aa5a6 | ||
|
|
d8663cd3ae | ||
|
|
9f36bc5b5d | ||
|
|
e2804f9b88 | ||
|
|
3cf9dbc6d2 | ||
|
|
0fa069126b | ||
|
|
50004da782 | ||
|
|
517bf80fc8 | ||
|
|
6091314981 | ||
|
|
c485edfa12 | ||
|
|
fd54e9ca91 | ||
|
|
d1444b1175 | ||
|
|
3024db2654 | ||
|
|
dba1cb93c1 | ||
|
|
d12b278a84 | ||
|
|
0070a8e692 | ||
|
|
efc12691bd | ||
|
|
effcec7e2e | ||
|
|
10a5bc0280 | ||
|
|
90b73dd320 | ||
|
|
324f9d17cd | ||
|
|
a782cb270b | ||
|
|
c59c619e12 | ||
|
|
00eecf3704 | ||
|
|
b67e554e76 | ||
|
|
36b8fda6db | ||
|
|
d2f12a7efc | ||
|
|
8dd50eb9c0 | ||
|
|
73c6696873 | ||
|
|
2586f841b8 | ||
|
|
ccf6fa34b1 | ||
|
|
9546fc9ce0 | ||
|
|
3441d4d6d6 | ||
|
|
7b05a7c585 | ||
|
|
76de6be7de | ||
|
|
c52fcf5087 | ||
|
|
be921b7865 | ||
|
|
43e514f9ee | ||
|
|
8ef4ef4895 | ||
|
|
36bf55c133 | ||
|
|
f56262b845 | ||
|
|
5316d6ea68 | ||
|
|
ea8a7c0a57 | ||
|
|
68ae365897 | ||
|
|
ba71f205dd | ||
|
|
95a519cbe8 | ||
|
|
efd90d3bdf | ||
|
|
a4bae73592 | ||
|
|
8b89f016ad | ||
|
|
3320391fcc | ||
|
|
26a68afdef | ||
|
|
0260c1532d | ||
|
|
b6262c8e13 | ||
|
|
ba740a9ee2 | ||
|
|
f2142f0bb3 | ||
|
|
86ca23c093 | ||
|
|
463b6ca4ef | ||
|
|
58e0b166cb | ||
|
|
2a678bb017 | ||
|
|
5664456b77 | ||
|
|
3685b7e57e | ||
|
|
989d5f73b1 | ||
|
|
4f84073cb5 | ||
|
|
c190295c34 | ||
|
|
60875644a1 | ||
|
|
113b09ad01 | ||
|
|
2605d0e671 | ||
|
|
d232b91d31 | ||
|
|
c65db31fd9 | ||
|
|
99871805bd | ||
|
|
e8ef39adad | ||
|
|
466b9217b5 | ||
|
|
c9a7f519b9 | ||
|
|
96ae532879 | ||
|
|
eda08d5b0f | ||
|
|
7c12b58bb5 | ||
|
|
5446c89bc0 | ||
|
|
2d0251e585 | ||
|
|
f41710c892 | ||
|
|
df3f79f282 | ||
|
|
f8df692865 | ||
|
|
0c6d3b188d | ||
|
|
e7a38863ab | ||
|
|
720e0fcdab | ||
|
|
bf8ff84522 | ||
|
|
5a9510238e | ||
|
|
7b3c74179b | ||
|
|
cd70fa4c32 | ||
|
|
83133ced6a | ||
|
|
6c5179a179 | ||
|
|
e33ab39b85 | ||
|
|
9567bcec1b | ||
|
|
550b16dc0b | ||
|
|
5d8331b7f7 | ||
|
|
e35b643e51 | ||
|
|
bc6a92677b | ||
|
|
f52072e6ec | ||
|
|
9c43c43a46 | ||
|
|
0430e0f930 | ||
|
|
b945243d1a | ||
|
|
d8484a8b26 | ||
|
|
3c27499795 | ||
|
|
7c772e873d | ||
|
|
db2fab245e | ||
|
|
a9c9917f1a | ||
|
|
23e2e9e9cc | ||
|
|
2369e92460 | ||
|
|
a53b15f2a3 | ||
|
|
72eb8b1eb6 | ||
|
|
4db54f3b83 | ||
|
|
24eb27f005 | ||
|
|
009d76ea35 | ||
|
|
6e8a425eb1 | ||
|
|
66188d791b |
1
.claude/settings.json
Normal file
@@ -0,0 +1 @@
|
||||
{}
|
||||
81
.github/actions/setup-build/action.yml
vendored
Normal file
@@ -0,0 +1,81 @@
|
||||
name: Setup Build Environment
|
||||
description: Common build environment setup steps
|
||||
|
||||
inputs:
|
||||
nodejs-version:
|
||||
description: Node.js version
|
||||
required: true
|
||||
setup-python:
|
||||
description: Set up Python
|
||||
required: false
|
||||
default: "false"
|
||||
setup-docker:
|
||||
description: Set up Docker QEMU and Buildx
|
||||
required: false
|
||||
default: "true"
|
||||
setup-sccache:
|
||||
description: Configure sccache for GitHub Actions
|
||||
required: false
|
||||
default: "true"
|
||||
free-space:
|
||||
description: Remove unnecessary packages to free disk space
|
||||
required: false
|
||||
default: "true"
|
||||
|
||||
runs:
|
||||
using: composite
|
||||
steps:
|
||||
- name: Free disk space
|
||||
if: inputs.free-space == 'true'
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt-get remove --purge -y azure-cli || true
|
||||
sudo apt-get remove --purge -y firefox || true
|
||||
sudo apt-get remove --purge -y ghc-* || true
|
||||
sudo apt-get remove --purge -y google-cloud-sdk || true
|
||||
sudo apt-get remove --purge -y google-chrome-stable || true
|
||||
sudo apt-get remove --purge -y powershell || true
|
||||
sudo apt-get remove --purge -y php* || true
|
||||
sudo apt-get remove --purge -y ruby* || true
|
||||
sudo apt-get remove --purge -y mono-* || true
|
||||
sudo apt-get autoremove -y
|
||||
sudo apt-get clean
|
||||
sudo rm -rf /usr/lib/jvm
|
||||
sudo rm -rf /usr/local/.ghcup
|
||||
sudo rm -rf /usr/local/lib/android
|
||||
sudo rm -rf /usr/share/dotnet
|
||||
sudo rm -rf /usr/share/swift
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY"
|
||||
|
||||
# BuildJet runners lack /opt/hostedtoolcache, which setup-python and setup-qemu expect
|
||||
- name: Ensure hostedtoolcache exists
|
||||
shell: bash
|
||||
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
|
||||
|
||||
- name: Set up Python
|
||||
if: inputs.setup-python == 'true'
|
||||
uses: actions/setup-python@v6
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
- uses: actions/setup-node@v6
|
||||
with:
|
||||
node-version: ${{ inputs.nodejs-version }}
|
||||
cache: npm
|
||||
cache-dependency-path: "**/package-lock.json"
|
||||
|
||||
- name: Set up Docker QEMU
|
||||
if: inputs.setup-docker == 'true'
|
||||
uses: docker/setup-qemu-action@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
if: inputs.setup-docker == 'true'
|
||||
uses: docker/setup-buildx-action@v4
|
||||
|
||||
- name: Configure sccache
|
||||
if: inputs.setup-sccache == 'true'
|
||||
uses: actions/github-script@v8
|
||||
with:
|
||||
script: |
|
||||
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
|
||||
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
||||
88
.github/workflows/start-cli.yaml
vendored
Normal file
@@ -0,0 +1,88 @@
|
||||
name: start-cli
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
environment:
|
||||
type: choice
|
||||
description: Environment
|
||||
options:
|
||||
- NONE
|
||||
- dev
|
||||
- unstable
|
||||
- dev-unstable
|
||||
runner:
|
||||
type: choice
|
||||
description: Runner
|
||||
options:
|
||||
- standard
|
||||
- fast
|
||||
arch:
|
||||
type: choice
|
||||
description: Architecture
|
||||
options:
|
||||
- ALL
|
||||
- x86_64
|
||||
- x86_64-apple
|
||||
- aarch64
|
||||
- aarch64-apple
|
||||
- riscv64
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- next/*
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next/*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
NODEJS_VERSION: "24.11.0"
|
||||
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||
|
||||
jobs:
|
||||
compile:
|
||||
name: Build Debian Package
|
||||
if: github.event.pull_request.draft != true
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
triple: >-
|
||||
${{
|
||||
fromJson('{
|
||||
"x86_64": ["x86_64-unknown-linux-musl"],
|
||||
"x86_64-apple": ["x86_64-apple-darwin"],
|
||||
"aarch64": ["aarch64-unknown-linux-musl"],
|
||||
"x86_64-apple": ["aarch64-apple-darwin"],
|
||||
"riscv64": ["riscv64gc-unknown-linux-musl"],
|
||||
"ALL": ["x86_64-unknown-linux-musl", "x86_64-apple-darwin", "aarch64-unknown-linux-musl", "aarch64-apple-darwin", "riscv64gc-unknown-linux-musl"]
|
||||
}')[github.event.inputs.platform || 'ALL']
|
||||
}}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
steps:
|
||||
- name: Mount tmpfs
|
||||
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||
run: sudo mount -t tmpfs tmpfs .
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: ./.github/actions/setup-build
|
||||
with:
|
||||
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Make
|
||||
run: TARGET=${{ matrix.triple }} make cli
|
||||
env:
|
||||
PLATFORM: ${{ matrix.arch }}
|
||||
SCCACHE_GHA_ENABLED: on
|
||||
SCCACHE_GHA_VERSION: 0
|
||||
|
||||
- uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: start-cli_${{ matrix.triple }}
|
||||
path: core/target/${{ matrix.triple }}/release/start-cli
|
||||
173
.github/workflows/start-registry.yaml
vendored
Normal file
@@ -0,0 +1,173 @@
|
||||
name: start-registry
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
environment:
|
||||
type: choice
|
||||
description: Environment
|
||||
options:
|
||||
- NONE
|
||||
- dev
|
||||
- unstable
|
||||
- dev-unstable
|
||||
runner:
|
||||
type: choice
|
||||
description: Runner
|
||||
options:
|
||||
- standard
|
||||
- fast
|
||||
arch:
|
||||
type: choice
|
||||
description: Architecture
|
||||
options:
|
||||
- ALL
|
||||
- x86_64
|
||||
- aarch64
|
||||
- riscv64
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
- next/*
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
- next/*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
NODEJS_VERSION: "24.11.0"
|
||||
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||
|
||||
jobs:
|
||||
compile:
|
||||
name: Build Debian Package
|
||||
if: github.event.pull_request.draft != true
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
arch: >-
|
||||
${{
|
||||
fromJson('{
|
||||
"x86_64": ["x86_64"],
|
||||
"aarch64": ["aarch64"],
|
||||
"riscv64": ["riscv64"],
|
||||
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||
}')[github.event.inputs.platform || 'ALL']
|
||||
}}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
steps:
|
||||
- name: Mount tmpfs
|
||||
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||
run: sudo mount -t tmpfs tmpfs .
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
- uses: ./.github/actions/setup-build
|
||||
with:
|
||||
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Make
|
||||
run: make registry-deb
|
||||
env:
|
||||
PLATFORM: ${{ matrix.arch }}
|
||||
SCCACHE_GHA_ENABLED: on
|
||||
SCCACHE_GHA_VERSION: 0
|
||||
|
||||
- uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: start-registry_${{ matrix.arch }}.deb
|
||||
path: results/start-registry-*_${{ matrix.arch }}.deb
|
||||
|
||||
create-image:
|
||||
name: Create Docker Image
|
||||
needs: [compile]
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
steps:
|
||||
- name: Cleaning up unnecessary files
|
||||
run: |
|
||||
sudo apt-get remove --purge -y google-chrome-stable firefox mono-devel
|
||||
sudo apt-get autoremove -y
|
||||
sudo apt-get clean
|
||||
|
||||
- run: |
|
||||
sudo mount -t tmpfs tmpfs .
|
||||
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||
|
||||
- name: Set up docker QEMU
|
||||
uses: docker/setup-qemu-action@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v4
|
||||
|
||||
- name: "Login to GitHub Container Registry"
|
||||
uses: docker/login-action@v4
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{github.actor}}
|
||||
password: ${{secrets.GITHUB_TOKEN}}
|
||||
|
||||
- name: Docker meta
|
||||
id: meta
|
||||
uses: docker/metadata-action@v6
|
||||
with:
|
||||
images: ghcr.io/Start9Labs/startos-registry
|
||||
tags: |
|
||||
type=raw,value=${{ github.ref_name }}
|
||||
|
||||
- name: Download debian package
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
pattern: start-registry_*.deb
|
||||
|
||||
- name: Map matrix.arch to docker platform
|
||||
run: |
|
||||
platforms=""
|
||||
for deb in *.deb; do
|
||||
filename=$(basename "$deb" .deb)
|
||||
arch="${filename#*_}"
|
||||
case "$arch" in
|
||||
x86_64)
|
||||
platform="linux/amd64"
|
||||
;;
|
||||
aarch64)
|
||||
platform="linux/arm64"
|
||||
;;
|
||||
riscv64)
|
||||
platform="linux/riscv64"
|
||||
;;
|
||||
*)
|
||||
echo "Unknown architecture: $arch" >&2
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
if [ -z "$platforms" ]; then
|
||||
platforms="$platform"
|
||||
else
|
||||
platforms="$platforms,$platform"
|
||||
fi
|
||||
done
|
||||
echo "DOCKER_PLATFORM=$platforms" >> "$GITHUB_ENV"
|
||||
|
||||
- run: |
|
||||
cat | docker buildx build --platform "$DOCKER_PLATFORM" --push -t ${{ steps.meta.outputs.tags }} -f - . << 'EOF'
|
||||
FROM debian:trixie
|
||||
|
||||
ADD *.deb .
|
||||
|
||||
RUN apt-get update && apt-get install -y ./*_$(uname -m).deb && rm -rf *.deb /var/lib/apt/lists/*
|
||||
|
||||
VOLUME /var/lib/startos
|
||||
|
||||
ENV RUST_LOG=startos=debug
|
||||
|
||||
ENTRYPOINT ["start-registryd"]
|
||||
|
||||
EOF
|
||||
44
.github/workflows/start-tunnel.yaml
vendored
@@ -1,4 +1,4 @@
|
||||
name: Start-Tunnel
|
||||
name: start-tunnel
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
@@ -35,13 +35,18 @@ on:
|
||||
- master
|
||||
- next/*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
NODEJS_VERSION: "24.11.0"
|
||||
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||
|
||||
jobs:
|
||||
compile:
|
||||
name: Compile Base Binaries
|
||||
name: Build Debian Package
|
||||
if: github.event.pull_request.draft != true
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
@@ -56,36 +61,15 @@ jobs:
|
||||
}}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
steps:
|
||||
- name: Cleaning up unnecessary files
|
||||
run: |
|
||||
sudo apt-get remove --purge -y google-chrome-stable firefox mono-devel
|
||||
sudo apt-get autoremove -y
|
||||
sudo apt-get clean
|
||||
|
||||
- run: |
|
||||
sudo mount -t tmpfs tmpfs .
|
||||
- name: Mount tmpfs
|
||||
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
run: sudo mount -t tmpfs tmpfs .
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: ./.github/actions/setup-build
|
||||
with:
|
||||
node-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Set up docker QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Configure sccache
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
|
||||
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
||||
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Make
|
||||
run: make tunnel-deb
|
||||
@@ -94,7 +78,7 @@ jobs:
|
||||
SCCACHE_GHA_ENABLED: on
|
||||
SCCACHE_GHA_VERSION: 0
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: start-tunnel_${{ matrix.arch }}.deb
|
||||
path: start-tunnel-*_${{ matrix.arch }}.deb
|
||||
path: results/start-tunnel-*_${{ matrix.arch }}.deb
|
||||
|
||||
196
.github/workflows/startos-iso.yaml
vendored
@@ -25,10 +25,13 @@ on:
|
||||
- ALL
|
||||
- x86_64
|
||||
- x86_64-nonfree
|
||||
- x86_64-nvidia
|
||||
- aarch64
|
||||
- aarch64-nonfree
|
||||
- raspberrypi
|
||||
- aarch64-nvidia
|
||||
# - raspberrypi
|
||||
- riscv64
|
||||
- riscv64-nonfree
|
||||
deploy:
|
||||
type: choice
|
||||
description: Deploy
|
||||
@@ -45,6 +48,10 @@ on:
|
||||
- master
|
||||
- next/*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
NODEJS_VERSION: "24.11.0"
|
||||
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
|
||||
@@ -52,6 +59,7 @@ env:
|
||||
jobs:
|
||||
compile:
|
||||
name: Compile Base Binaries
|
||||
if: github.event.pull_request.draft != true
|
||||
strategy:
|
||||
fail-fast: true
|
||||
matrix:
|
||||
@@ -60,52 +68,45 @@ jobs:
|
||||
fromJson('{
|
||||
"x86_64": ["x86_64"],
|
||||
"x86_64-nonfree": ["x86_64"],
|
||||
"x86_64-nvidia": ["x86_64"],
|
||||
"aarch64": ["aarch64"],
|
||||
"aarch64-nonfree": ["aarch64"],
|
||||
"aarch64-nvidia": ["aarch64"],
|
||||
"raspberrypi": ["aarch64"],
|
||||
"riscv64": ["riscv64"],
|
||||
"ALL": ["x86_64", "aarch64"]
|
||||
"riscv64-nonfree": ["riscv64"],
|
||||
"ALL": ["x86_64", "aarch64", "riscv64"]
|
||||
}')[github.event.inputs.platform || 'ALL']
|
||||
}}
|
||||
runs-on: ${{ fromJson('["ubuntu-latest", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
|
||||
runs-on: >-
|
||||
${{
|
||||
fromJson(
|
||||
format(
|
||||
'["{0}", "{1}"]',
|
||||
fromJson('{
|
||||
"x86_64": "ubuntu-latest",
|
||||
"aarch64": "ubuntu-24.04-arm",
|
||||
"riscv64": "ubuntu-latest"
|
||||
}')[matrix.arch],
|
||||
fromJson('{
|
||||
"x86_64": "buildjet-32vcpu-ubuntu-2204",
|
||||
"aarch64": "buildjet-32vcpu-ubuntu-2204-arm",
|
||||
"riscv64": "buildjet-32vcpu-ubuntu-2204"
|
||||
}')[matrix.arch]
|
||||
)
|
||||
)[github.event.inputs.runner == 'fast']
|
||||
}}
|
||||
steps:
|
||||
- name: Cleaning up unnecessary files
|
||||
run: |
|
||||
sudo apt-get remove --purge -y google-chrome-stable firefox mono-devel
|
||||
sudo apt-get autoremove -y
|
||||
sudo apt-get clean
|
||||
- run: |
|
||||
sudo mount -t tmpfs tmpfs .
|
||||
- name: Mount tmpfs
|
||||
if: ${{ github.event.inputs.runner == 'fast' }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
run: sudo mount -t tmpfs tmpfs .
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
- uses: ./.github/actions/setup-build
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
with:
|
||||
node-version: ${{ env.NODEJS_VERSION }}
|
||||
|
||||
- name: Set up docker QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Set up system dependencies
|
||||
run: sudo apt-get update && sudo apt-get install -y qemu-user-static systemd-container squashfuse
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Configure sccache
|
||||
uses: actions/github-script@v7
|
||||
with:
|
||||
script: |
|
||||
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
|
||||
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
|
||||
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||
setup-python: "true"
|
||||
|
||||
- name: Make
|
||||
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
|
||||
@@ -113,7 +114,7 @@ jobs:
|
||||
SCCACHE_GHA_ENABLED: on
|
||||
SCCACHE_GHA_VERSION: 0
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: compiled-${{ matrix.arch }}.tar
|
||||
path: compiled-${{ matrix.arch }}.tar
|
||||
@@ -129,7 +130,7 @@ jobs:
|
||||
format(
|
||||
'[
|
||||
["{0}"],
|
||||
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "raspberrypi"]
|
||||
["x86_64", "x86_64-nonfree", "x86_64-nvidia", "aarch64", "aarch64-nonfree", "aarch64-nvidia", "raspberrypi", "riscv64", "riscv64-nonfree"]
|
||||
]',
|
||||
github.event.inputs.platform || 'ALL'
|
||||
)
|
||||
@@ -139,14 +140,28 @@ jobs:
|
||||
${{
|
||||
fromJson(
|
||||
format(
|
||||
'["ubuntu-latest", "{0}"]',
|
||||
'["{0}", "{1}"]',
|
||||
fromJson('{
|
||||
"x86_64": "ubuntu-latest",
|
||||
"x86_64-nonfree": "ubuntu-latest",
|
||||
"x86_64-nvidia": "ubuntu-latest",
|
||||
"aarch64": "ubuntu-24.04-arm",
|
||||
"aarch64-nonfree": "ubuntu-24.04-arm",
|
||||
"aarch64-nvidia": "ubuntu-24.04-arm",
|
||||
"raspberrypi": "ubuntu-24.04-arm",
|
||||
"riscv64": "ubuntu-24.04-arm",
|
||||
"riscv64-nonfree": "ubuntu-24.04-arm",
|
||||
}')[matrix.platform],
|
||||
fromJson('{
|
||||
"x86_64": "buildjet-8vcpu-ubuntu-2204",
|
||||
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
|
||||
"x86_64-nvidia": "buildjet-8vcpu-ubuntu-2204",
|
||||
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"aarch64-nvidia": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
|
||||
"riscv64": "buildjet-8vcpu-ubuntu-2204",
|
||||
"riscv64-nonfree": "buildjet-8vcpu-ubuntu-2204",
|
||||
}')[matrix.platform]
|
||||
)
|
||||
)[github.event.inputs.runner == 'fast']
|
||||
@@ -157,45 +172,50 @@ jobs:
|
||||
fromJson('{
|
||||
"x86_64": "x86_64",
|
||||
"x86_64-nonfree": "x86_64",
|
||||
"x86_64-nvidia": "x86_64",
|
||||
"aarch64": "aarch64",
|
||||
"aarch64-nonfree": "aarch64",
|
||||
"aarch64-nvidia": "aarch64",
|
||||
"raspberrypi": "aarch64",
|
||||
"riscv64": "riscv64",
|
||||
"riscv64-nonfree": "riscv64",
|
||||
}')[matrix.platform]
|
||||
}}
|
||||
steps:
|
||||
- name: Free space
|
||||
run: rm -rf /opt/hostedtoolcache*
|
||||
run: |
|
||||
sudo apt-get remove --purge -y azure-cli || true
|
||||
sudo apt-get remove --purge -y firefox || true
|
||||
sudo apt-get remove --purge -y ghc-* || true
|
||||
sudo apt-get remove --purge -y google-cloud-sdk || true
|
||||
sudo apt-get remove --purge -y google-chrome-stable || true
|
||||
sudo apt-get remove --purge -y powershell || true
|
||||
sudo apt-get remove --purge -y php* || true
|
||||
sudo apt-get remove --purge -y ruby* || true
|
||||
sudo apt-get remove --purge -y mono-* || true
|
||||
sudo apt-get autoremove -y
|
||||
sudo apt-get clean
|
||||
sudo rm -rf /usr/lib/jvm # All JDKs
|
||||
sudo rm -rf /usr/local/.ghcup # Haskell toolchain
|
||||
sudo rm -rf /usr/local/lib/android # Android SDK/NDK, emulator
|
||||
sudo rm -rf /usr/share/dotnet # .NET SDKs
|
||||
sudo rm -rf /usr/share/swift # Swift toolchain (if present)
|
||||
sudo rm -rf "$AGENT_TOOLSDIRECTORY" # Pre-cached tool cache (Go, Node, etc.)
|
||||
if: ${{ github.event.inputs.runner != 'fast' }}
|
||||
|
||||
- uses: actions/checkout@v4
|
||||
# BuildJet runners lack /opt/hostedtoolcache, which setup-qemu expects
|
||||
- name: Ensure hostedtoolcache exists
|
||||
run: sudo mkdir -p /opt/hostedtoolcache && sudo chown $USER:$USER /opt/hostedtoolcache
|
||||
|
||||
- name: Set up docker QEMU
|
||||
uses: docker/setup-qemu-action@v4
|
||||
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up Python
|
||||
uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.x"
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y qemu-user-static
|
||||
wget https://deb.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.2-1_all.deb
|
||||
sha256sum ./debspawn_0.6.2-1_all.deb | grep 37ef27458cb1e35e8bce4d4f639b06b4b3866fc0b9191ec6b9bd157afd06a817
|
||||
sudo apt-get install -y ./debspawn_0.6.2-1_all.deb
|
||||
|
||||
- name: Configure debspawn
|
||||
run: |
|
||||
sudo mkdir -p /etc/debspawn/
|
||||
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
|
||||
sudo mkdir -p /var/tmp/debspawn
|
||||
|
||||
- run: sudo mount -t tmpfs tmpfs /var/tmp/debspawn
|
||||
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree') }}
|
||||
|
||||
- name: Download compiled artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v8
|
||||
with:
|
||||
name: compiled-${{ env.ARCH }}.tar
|
||||
|
||||
@@ -206,22 +226,19 @@ jobs:
|
||||
run: |
|
||||
mkdir -p web/node_modules
|
||||
mkdir -p web/dist/raw
|
||||
mkdir -p core/startos/bindings
|
||||
mkdir -p core/bindings
|
||||
mkdir -p sdk/base/lib/osBindings
|
||||
mkdir -p container-runtime/node_modules
|
||||
mkdir -p container-runtime/dist
|
||||
mkdir -p container-runtime/dist/node_modules
|
||||
mkdir -p core/startos/bindings
|
||||
mkdir -p sdk/dist
|
||||
mkdir -p sdk/baseDist
|
||||
mkdir -p patch-db/client/node_modules
|
||||
mkdir -p patch-db/client/dist
|
||||
mkdir -p web/.angular
|
||||
mkdir -p web/dist/raw/ui
|
||||
mkdir -p web/dist/raw/install-wizard
|
||||
mkdir -p web/dist/raw/setup-wizard
|
||||
mkdir -p web/dist/static/ui
|
||||
mkdir -p web/dist/static/install-wizard
|
||||
mkdir -p web/dist/static/setup-wizard
|
||||
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
|
||||
|
||||
@@ -235,56 +252,19 @@ jobs:
|
||||
run: PLATFORM=${{ matrix.platform }} make img
|
||||
if: ${{ matrix.platform == 'raspberrypi' }}
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: ${{ matrix.platform }}.squashfs
|
||||
path: results/*.squashfs
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: ${{ matrix.platform }}.iso
|
||||
path: results/*.iso
|
||||
if: ${{ matrix.platform != 'raspberrypi' }}
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
- uses: actions/upload-artifact@v7
|
||||
with:
|
||||
name: ${{ matrix.platform }}.img
|
||||
path: results/*.img
|
||||
if: ${{ matrix.platform == 'raspberrypi' }}
|
||||
|
||||
- name: Upload OTA to registry
|
||||
run: >-
|
||||
PLATFORM=${{ matrix.platform }} make upload-ota TARGET="${{
|
||||
fromJson('{
|
||||
"alpha": "alpha-registry-x.start9.com",
|
||||
"beta": "beta-registry.start9.com",
|
||||
}')[github.event.inputs.deploy]
|
||||
}}" KEY="${{
|
||||
fromJson(
|
||||
format('{{
|
||||
"alpha": "{0}",
|
||||
"beta": "{1}",
|
||||
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
|
||||
)[github.event.inputs.deploy]
|
||||
}}"
|
||||
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
|
||||
|
||||
index:
|
||||
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
|
||||
needs: [image]
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- run: >-
|
||||
curl "https://${{
|
||||
fromJson('{
|
||||
"alpha": "alpha-registry-x.start9.com",
|
||||
"beta": "beta-registry.start9.com",
|
||||
}')[github.event.inputs.deploy]
|
||||
}}:8443/resync.cgi?key=${{
|
||||
fromJson(
|
||||
format('{{
|
||||
"alpha": "{0}",
|
||||
"beta": "{1}",
|
||||
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
|
||||
)[github.event.inputs.deploy]
|
||||
}}"
|
||||
|
||||
15
.github/workflows/test.yaml
vendored
@@ -10,6 +10,10 @@ on:
|
||||
- master
|
||||
- next/*
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.head_ref || github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
NODEJS_VERSION: "24.11.0"
|
||||
ENVIRONMENT: dev-unstable
|
||||
@@ -17,15 +21,18 @@ env:
|
||||
jobs:
|
||||
test:
|
||||
name: Run Automated Tests
|
||||
if: github.event.pull_request.draft != true
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- uses: actions/setup-node@v4
|
||||
- uses: ./.github/actions/setup-build
|
||||
with:
|
||||
node-version: ${{ env.NODEJS_VERSION }}
|
||||
nodejs-version: ${{ env.NODEJS_VERSION }}
|
||||
free-space: "false"
|
||||
setup-docker: "false"
|
||||
setup-sccache: "false"
|
||||
|
||||
- name: Build And Run Tests
|
||||
run: make test
|
||||
|
||||
27
.gitignore
vendored
@@ -1,28 +1,25 @@
|
||||
.DS_Store
|
||||
.idea
|
||||
/*.img
|
||||
/*.img.gz
|
||||
/*.img.xz
|
||||
/*-raspios-bullseye-arm64-lite.img
|
||||
/*-raspios-bullseye-arm64-lite.zip
|
||||
*.img
|
||||
*.img.gz
|
||||
*.img.xz
|
||||
*.zip
|
||||
/product_key.txt
|
||||
/*_product_key.txt
|
||||
.vscode/settings.json
|
||||
deploy_web.sh
|
||||
deploy_web.sh
|
||||
secrets.db
|
||||
.vscode/
|
||||
/cargo-deps/**/*
|
||||
/PLATFORM.txt
|
||||
/ENVIRONMENT.txt
|
||||
/GIT_HASH.txt
|
||||
/VERSION.txt
|
||||
/*.deb
|
||||
/build/env/*.txt
|
||||
*.deb
|
||||
/target
|
||||
/*.squashfs
|
||||
*.squashfs
|
||||
/results
|
||||
/dpkg-workdir
|
||||
/compiled.tar
|
||||
/compiled-*.tar
|
||||
/firmware
|
||||
/tmp
|
||||
/build/lib/firmware
|
||||
tmp
|
||||
web/.i18n-checked
|
||||
docs/USER.md
|
||||
*.s9pk
|
||||
|
||||
101
ARCHITECTURE.md
Normal file
@@ -0,0 +1,101 @@
|
||||
# Architecture
|
||||
|
||||
StartOS is an open-source Linux distribution for running personal servers. It manages discovery, installation, network configuration, backups, and health monitoring of self-hosted services.
|
||||
|
||||
## Tech Stack
|
||||
|
||||
- Backend: Rust (async/Tokio, Axum web framework)
|
||||
- Frontend: Angular 21 + TypeScript + Taiga UI 5
|
||||
- Container runtime: Node.js/TypeScript with LXC
|
||||
- Database/State: Patch-DB (git submodule) - storage layer with reactive frontend sync
|
||||
- API: JSON-RPC via rpc-toolkit (see `core/rpc-toolkit.md`)
|
||||
- Auth: Password + session cookie, public/private key signatures, local authcookie (see `core/src/middleware/auth/`)
|
||||
|
||||
## Project Structure
|
||||
|
||||
```bash
|
||||
/
|
||||
├── assets/ # Screenshots for README
|
||||
├── build/ # Auxiliary files and scripts for deployed images
|
||||
├── container-runtime/ # Node.js program managing package containers
|
||||
├── core/ # Rust backend: API, daemon (startd), CLI (start-cli)
|
||||
├── debian/ # Debian package maintainer scripts
|
||||
├── image-recipe/ # Scripts for building StartOS images
|
||||
├── patch-db/ # (submodule) Diff-based data store for frontend sync
|
||||
├── sdk/ # TypeScript SDK for building StartOS packages
|
||||
└── web/ # Web UIs (Angular)
|
||||
```
|
||||
|
||||
## Components
|
||||
|
||||
- **`core/`** — Rust backend daemon. Produces a single binary `startbox` that is symlinked as `startd` (main daemon), `start-cli` (CLI), `start-container` (runs inside LXC containers), `registrybox` (package registry), and `tunnelbox` (VPN/tunnel). Handles all backend logic: RPC API, service lifecycle, networking (DNS, ACME, WiFi, Tor, WireGuard), backups, and database state management. See [core/ARCHITECTURE.md](core/ARCHITECTURE.md).
|
||||
|
||||
- **`web/`** — Angular 21 + TypeScript workspace using Taiga UI 5. Contains three applications (admin UI, setup wizard, VPN management) and two shared libraries (common components/services, marketplace). Communicates with the backend exclusively via JSON-RPC. See [web/ARCHITECTURE.md](web/ARCHITECTURE.md).
|
||||
|
||||
- **`container-runtime/`** — Node.js runtime that runs inside each service's LXC container. Loads the service's JavaScript from its S9PK package and manages subcontainers. Communicates with the host daemon via JSON-RPC over Unix socket. See [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md).
|
||||
|
||||
- **`sdk/`** — TypeScript SDK for packaging services for StartOS (`@start9labs/start-sdk`). Split into `base/` (core types, ABI definitions, effects interface, consumed by web as `@start9labs/start-sdk-base`) and `package/` (full SDK for service developers, consumed by container-runtime as `@start9labs/start-sdk`).
|
||||
|
||||
- **`patch-db/`** — Git submodule providing diff-based state synchronization. Uses CBOR encoding. Backend mutations produce diffs that are pushed to the frontend via WebSocket, enabling reactive UI updates without polling. See [patch-db repo](https://github.com/Start9Labs/patch-db).
|
||||
|
||||
## Build Pipeline
|
||||
|
||||
Components have a strict dependency chain. Changes flow in one direction:
|
||||
|
||||
```
|
||||
Rust (core/)
|
||||
→ cargo test exports ts-rs types to core/bindings/
|
||||
→ rsync copies to sdk/base/lib/osBindings/
|
||||
→ SDK build produces baseDist/ and dist/
|
||||
→ web/ consumes baseDist/ (via @start9labs/start-sdk-base)
|
||||
→ container-runtime/ consumes dist/ (via @start9labs/start-sdk)
|
||||
```
|
||||
|
||||
Key make targets along this chain:
|
||||
|
||||
| Step | Command | What it does |
|
||||
|---|---|---|
|
||||
| 1 | `cargo check -p start-os` | Verify Rust compiles |
|
||||
| 2 | `make ts-bindings` | Export ts-rs types → rsync to SDK |
|
||||
| 3 | `cd sdk && make baseDist dist` | Build SDK packages |
|
||||
| 4 | `cd web && npm run check` | Type-check Angular projects |
|
||||
| 5 | `cd container-runtime && npm run check` | Type-check runtime |
|
||||
|
||||
**Important**: Editing `sdk/base/lib/osBindings/*.ts` alone is NOT sufficient — you must rebuild the SDK bundle (step 3) before web/container-runtime can see the changes.
|
||||
|
||||
## Cross-Layer Verification
|
||||
|
||||
When making changes across multiple layers (Rust, SDK, web, container-runtime), verify in this order:
|
||||
|
||||
1. **Rust**: `cargo check -p start-os` — verifies core compiles
|
||||
2. **TS bindings**: `make ts-bindings` — regenerates TypeScript types from Rust `#[ts(export)]` structs
|
||||
- Runs `./core/build/build-ts.sh` to export ts-rs types to `core/bindings/`
|
||||
- Syncs `core/bindings/` → `sdk/base/lib/osBindings/` via rsync
|
||||
- If you manually edit files in `sdk/base/lib/osBindings/`, you must still rebuild the SDK (step 3)
|
||||
3. **SDK bundle**: `cd sdk && make baseDist dist` — compiles SDK source into packages
|
||||
- `baseDist/` is consumed by `/web` (via `@start9labs/start-sdk-base`)
|
||||
- `dist/` is consumed by `/container-runtime` (via `@start9labs/start-sdk`)
|
||||
- Web and container-runtime reference the **built** SDK, not source files
|
||||
4. **Web type check**: `cd web && npm run check` — type-checks all Angular projects
|
||||
5. **Container runtime type check**: `cd container-runtime && npm run check` — type-checks the runtime
|
||||
|
||||
## Data Flow: Backend to Frontend
|
||||
|
||||
StartOS uses Patch-DB for reactive state synchronization:
|
||||
|
||||
1. The backend mutates state via `db.mutate()`, producing CBOR diffs
|
||||
2. Diffs are pushed to the frontend over a persistent WebSocket connection
|
||||
3. The frontend applies diffs to its local state copy and notifies observers
|
||||
4. Components watch specific database paths via `PatchDB.watch$()`, receiving updates reactively
|
||||
|
||||
This means the UI is always eventually consistent with the backend — after any mutating API call, the frontend waits for the corresponding PatchDB diff before resolving, so the UI reflects the result immediately.
|
||||
|
||||
## Further Reading
|
||||
|
||||
- [core/ARCHITECTURE.md](core/ARCHITECTURE.md) — Rust backend architecture
|
||||
- [web/ARCHITECTURE.md](web/ARCHITECTURE.md) — Angular frontend architecture
|
||||
- [container-runtime/CLAUDE.md](container-runtime/CLAUDE.md) — Container runtime details
|
||||
- [core/rpc-toolkit.md](core/rpc-toolkit.md) — JSON-RPC handler patterns
|
||||
- [core/s9pk-structure.md](core/s9pk-structure.md) — S9PK package format
|
||||
- [docs/exver.md](docs/exver.md) — Extended versioning format
|
||||
- [docs/VERSION_BUMP.md](docs/VERSION_BUMP.md) — Version bumping guide
|
||||
59
CLAUDE.md
Normal file
@@ -0,0 +1,59 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Architecture
|
||||
|
||||
See [ARCHITECTURE.md](ARCHITECTURE.md) for the full system architecture, component map, build pipeline, and cross-layer verification order.
|
||||
|
||||
Each major component has its own `CLAUDE.md` with detailed guidance: `core/`, `web/`, `container-runtime/`, `sdk/`.
|
||||
|
||||
## Build & Development
|
||||
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md) for:
|
||||
|
||||
- Environment setup and requirements
|
||||
- Build commands and make targets
|
||||
- Testing and formatting commands
|
||||
- Environment variables
|
||||
|
||||
**Quick reference:**
|
||||
|
||||
```bash
|
||||
. ./devmode.sh # Enable dev mode
|
||||
make update-startbox REMOTE=start9@<ip> # Fastest iteration (binary + UI)
|
||||
make test-core # Run Rust tests
|
||||
```
|
||||
|
||||
## Operating Rules
|
||||
|
||||
- Always verify cross-layer changes using the order described in [ARCHITECTURE.md](ARCHITECTURE.md#cross-layer-verification)
|
||||
- Check component-level CLAUDE.md files for component-specific conventions. ALWAYS read it before operating on that component.
|
||||
- Follow existing patterns before inventing new ones
|
||||
- Always use `make` recipes when they exist for testing builds rather than manually invoking build commands
|
||||
- **Commit signing:** Never push unsigned commits. Before pushing, check all unpushed commits for signatures with `git log --show-signature @{upstream}..HEAD`. If any are unsigned, prompt the user to sign them with `git rebase --exec 'git commit --amend -S --no-edit' @{upstream}`.
|
||||
|
||||
## Supplementary Documentation
|
||||
|
||||
The `docs/` directory contains cross-cutting documentation for AI assistants:
|
||||
|
||||
- `TODO.md` - Pending tasks for AI agents (check this first, remove items when completed)
|
||||
- `USER.md` - Current user identifier (gitignored, see below)
|
||||
- `exver.md` - Extended versioning format (used across core, sdk, and web)
|
||||
- `VERSION_BUMP.md` - Guide for bumping the StartOS version across the codebase
|
||||
|
||||
Component-specific docs live alongside their code (e.g., `core/rpc-toolkit.md`, `core/i18n-patterns.md`).
|
||||
|
||||
### Session Startup
|
||||
|
||||
On startup:
|
||||
|
||||
1. **Check for `docs/USER.md`** - If it doesn't exist, prompt the user for their name/identifier and create it. This file is gitignored since it varies per developer.
|
||||
|
||||
2. **Check `docs/TODO.md` for relevant tasks** - Show TODOs that either:
|
||||
- Have no `@username` tag (relevant to everyone)
|
||||
- Are tagged with the current user's identifier
|
||||
|
||||
Skip TODOs tagged with a different user.
|
||||
|
||||
3. **Ask "What would you like to do today?"** - Offer options for each relevant TODO item, plus "Something else" for other requests.
|
||||
317
CONTRIBUTING.md
@@ -1,133 +1,240 @@
|
||||
# Contributing to StartOS
|
||||
|
||||
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://docs.start9.com/latest/packaging-guide/). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
|
||||
This guide is for contributing to the StartOS. If you are interested in packaging a service for StartOS, visit the [service packaging guide](https://github.com/Start9Labs/ai-service-packaging). If you are interested in promoting, providing technical support, creating tutorials, or helping in other ways, please visit the [Start9 website](https://start9.com/contribute).
|
||||
|
||||
## Collaboration
|
||||
|
||||
- [Matrix](https://matrix.to/#/#community-dev:matrix.start9labs.com)
|
||||
- [Telegram](https://t.me/start9_labs/47471)
|
||||
- [Matrix](https://matrix.to/#/#dev-startos:matrix.start9labs.com)
|
||||
|
||||
## Project Structure
|
||||
|
||||
```bash
|
||||
/
|
||||
├── assets/
|
||||
├── container-runtime/
|
||||
├── core/
|
||||
├── build/
|
||||
├── debian/
|
||||
├── web/
|
||||
├── image-recipe/
|
||||
├── patch-db
|
||||
└── sdk/
|
||||
```
|
||||
|
||||
#### assets
|
||||
|
||||
screenshots for the StartOS README
|
||||
|
||||
#### container-runtime
|
||||
|
||||
A NodeJS program that dynamically loads maintainer scripts and communicates with the OS to manage packages
|
||||
|
||||
#### core
|
||||
|
||||
An API, daemon (startd), and CLI (start-cli) that together provide the core functionality of StartOS.
|
||||
|
||||
#### build
|
||||
|
||||
Auxiliary files and scripts to include in deployed StartOS images
|
||||
|
||||
#### debian
|
||||
|
||||
Maintainer scripts for the StartOS Debian package
|
||||
|
||||
#### web
|
||||
|
||||
Web UIs served under various conditions and used to interact with StartOS APIs.
|
||||
|
||||
#### image-recipe
|
||||
|
||||
Scripts for building StartOS images
|
||||
|
||||
#### patch-db (submodule)
|
||||
|
||||
A diff based data store used to synchronize data between the web interfaces and server.
|
||||
|
||||
#### sdk
|
||||
|
||||
A typescript sdk for building start-os packages
|
||||
For project structure and system architecture, see [ARCHITECTURE.md](ARCHITECTURE.md).
|
||||
|
||||
## Environment Setup
|
||||
|
||||
#### Clone the StartOS repository
|
||||
### Installing Dependencies (Debian/Ubuntu)
|
||||
|
||||
> Debian/Ubuntu is the only officially supported build environment.
|
||||
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install).
|
||||
|
||||
```sh
|
||||
git clone https://github.com/Start9Labs/start-os.git --recurse-submodules
|
||||
sudo apt update
|
||||
sudo apt install -y ca-certificates curl gpg build-essential
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||
sudo apt update
|
||||
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
|
||||
sudo mkdir -p /etc/debspawn/
|
||||
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
|
||||
sudo usermod -aG docker $USER
|
||||
sudo su $USER
|
||||
docker run --privileged --rm tonistiigi/binfmt --install all
|
||||
docker buildx create --use
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
|
||||
source ~/.bashrc
|
||||
nvm install 24
|
||||
nvm use 24
|
||||
nvm alias default 24 # this prevents your machine from reverting back to another version
|
||||
```
|
||||
|
||||
### Cloning the Repository
|
||||
|
||||
```sh
|
||||
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
|
||||
cd start-os
|
||||
```
|
||||
|
||||
#### Continue to your project of interest for additional instructions:
|
||||
### Development Mode
|
||||
|
||||
- [`core`](core/README.md)
|
||||
- [`web-interfaces`](web-interfaces/README.md)
|
||||
- [`build`](build/README.md)
|
||||
- [`patch-db`](https://github.com/Start9Labs/patch-db)
|
||||
For faster iteration during development:
|
||||
|
||||
```sh
|
||||
. ./devmode.sh
|
||||
```
|
||||
|
||||
This sets `ENVIRONMENT=dev` and `GIT_BRANCH_AS_HASH=1` to prevent rebuilds on every commit.
|
||||
|
||||
## Building
|
||||
|
||||
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components. To build any specific component, simply run `make <TARGET>` replacing `<TARGET>` with the name of the target you'd like to build
|
||||
All builds can be performed on any operating system that can run Docker.
|
||||
|
||||
This project uses [GNU Make](https://www.gnu.org/software/make/) to build its components.
|
||||
|
||||
### Requirements
|
||||
|
||||
- [GNU Make](https://www.gnu.org/software/make/)
|
||||
- [Docker](https://docs.docker.com/get-docker/)
|
||||
- [Docker](https://docs.docker.com/get-docker/) or [Podman](https://podman.io/)
|
||||
- [NodeJS v20.16.0](https://docs.npmjs.com/downloading-and-installing-node-js-and-npm)
|
||||
- [sed](https://www.gnu.org/software/sed/)
|
||||
- [grep](https://www.gnu.org/software/grep/)
|
||||
- [awk](https://www.gnu.org/software/gawk/)
|
||||
- [Rust](https://rustup.rs/) (nightly for formatting)
|
||||
- [sed](https://www.gnu.org/software/sed/), [grep](https://www.gnu.org/software/grep/), [awk](https://www.gnu.org/software/gawk/)
|
||||
- [jq](https://jqlang.github.io/jq/)
|
||||
- [gzip](https://www.gnu.org/software/gzip/)
|
||||
- [brotli](https://github.com/google/brotli)
|
||||
- [gzip](https://www.gnu.org/software/gzip/), [brotli](https://github.com/google/brotli)
|
||||
|
||||
### Environment variables
|
||||
### Environment Variables
|
||||
|
||||
- `PLATFORM`: which platform you would like to build for. Must be one of `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `raspberrypi`
|
||||
- NOTE: `nonfree` images are for including `nonfree` firmware packages in the built ISO
|
||||
- `ENVIRONMENT`: a hyphen separated set of feature flags to enable
|
||||
- `dev`: enables password ssh (INSECURE!) and does not compress frontends
|
||||
- `unstable`: enables assertions that will cause errors on unexpected inconsistencies that are undesirable in production use either for performance or reliability reasons
|
||||
- `docker`: use `docker` instead of `podman`
|
||||
- `GIT_BRANCH_AS_HASH`: set to `1` to use the current git branch name as the git hash so that the project does not need to be rebuilt on each commit
|
||||
| Variable | Description |
|
||||
| -------------------- | --------------------------------------------------------------------------------------------------- |
|
||||
| `PLATFORM` | Target platform: `x86_64`, `x86_64-nonfree`, `aarch64`, `aarch64-nonfree`, `riscv64`, `raspberrypi` |
|
||||
| `ENVIRONMENT` | Hyphen-separated feature flags (see below) |
|
||||
| `PROFILE` | Build profile: `release` (default) or `dev` |
|
||||
| `GIT_BRANCH_AS_HASH` | Set to `1` to use git branch name as version hash (avoids rebuilds) |
|
||||
|
||||
### Useful Make Targets
|
||||
**ENVIRONMENT flags:**
|
||||
|
||||
- `iso`: Create a full `.iso` image
|
||||
- Only possible from Debian
|
||||
- Not available for `PLATFORM=raspberrypi`
|
||||
- Additional Requirements:
|
||||
- [debspawn](https://github.com/lkhq/debspawn)
|
||||
- `img`: Create a full `.img` image
|
||||
- Only possible from Debian
|
||||
- Only available for `PLATFORM=raspberrypi`
|
||||
- Additional Requirements:
|
||||
- [debspawn](https://github.com/lkhq/debspawn)
|
||||
- `format`: Run automatic code formatting for the project
|
||||
- Additional Requirements:
|
||||
- [rust](https://rustup.rs/)
|
||||
- `test`: Run automated tests for the project
|
||||
- Additional Requirements:
|
||||
- [rust](https://rustup.rs/)
|
||||
- `update`: Deploy the current working project to a device over ssh as if through an over-the-air update
|
||||
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
|
||||
- `reflash`: Deploy the current working project to a device over ssh as if using a live `iso` image to reflash it
|
||||
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
|
||||
- `update-overlay`: Deploy the current working project to a device over ssh to the in-memory overlay without restarting it
|
||||
- WARNING: changes will be reverted after the device is rebooted
|
||||
- WARNING: changes to `init` will not take effect as the device is already initialized
|
||||
- Requires an argument `REMOTE` which is the ssh address of the device, i.e. `start9@192.168.122.2`
|
||||
- `wormhole`: Deploy the `startbox` to a device using [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
|
||||
- When the build it complete will emit a command to paste into the shell of the device to upgrade it
|
||||
- Additional Requirements:
|
||||
- [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)
|
||||
- `clean`: Delete all compiled artifacts
|
||||
- `dev` - Enables password SSH before setup, skips frontend compression
|
||||
- `unstable` - Enables assertions and debugging with performance penalty
|
||||
- `console` - Enables tokio-console for async debugging
|
||||
|
||||
**Platform notes:**
|
||||
|
||||
- `-nonfree` variants include proprietary firmware and drivers
|
||||
- `raspberrypi` includes non-free components by necessity
|
||||
- Platform is remembered between builds if not specified
|
||||
|
||||
### Make Targets
|
||||
|
||||
#### Building
|
||||
|
||||
| Target | Description |
|
||||
| ------------- | ---------------------------------------------- |
|
||||
| `iso` | Create full `.iso` image (not for raspberrypi) |
|
||||
| `img` | Create full `.img` image (raspberrypi only) |
|
||||
| `deb` | Build Debian package |
|
||||
| `all` | Build all Rust binaries |
|
||||
| `uis` | Build all web UIs |
|
||||
| `ui` | Build main UI only |
|
||||
| `ts-bindings` | Generate TypeScript bindings from Rust types |
|
||||
|
||||
#### Deploying to Device
|
||||
|
||||
For devices on the same network:
|
||||
|
||||
| Target | Description |
|
||||
| ------------------------------------ | ----------------------------------------------- |
|
||||
| `update-startbox REMOTE=start9@<ip>` | Deploy binary + UI only (fastest) |
|
||||
| `update-deb REMOTE=start9@<ip>` | Deploy full Debian package |
|
||||
| `update REMOTE=start9@<ip>` | OTA-style update |
|
||||
| `reflash REMOTE=start9@<ip>` | Reflash as if using live ISO |
|
||||
| `update-overlay REMOTE=start9@<ip>` | Deploy to in-memory overlay (reverts on reboot) |
|
||||
|
||||
For devices on different networks (uses [magic-wormhole](https://github.com/magic-wormhole/magic-wormhole)):
|
||||
|
||||
| Target | Description |
|
||||
| ------------------- | -------------------- |
|
||||
| `wormhole` | Send startbox binary |
|
||||
| `wormhole-deb` | Send Debian package |
|
||||
| `wormhole-squashfs` | Send squashfs image |
|
||||
|
||||
### Creating a VM
|
||||
|
||||
Install virt-manager:
|
||||
|
||||
```sh
|
||||
sudo apt update
|
||||
sudo apt install -y virt-manager
|
||||
sudo usermod -aG libvirt $USER
|
||||
sudo su $USER
|
||||
virt-manager
|
||||
```
|
||||
|
||||
Follow the screenshot walkthrough in [`assets/create-vm/`](assets/create-vm/) to create a new virtual machine. Key steps:
|
||||
|
||||
1. Create a new virtual machine
|
||||
2. Browse for the ISO — create a storage pool pointing to your `results/` directory
|
||||
3. Select "Generic or unknown OS"
|
||||
4. Set memory and CPUs
|
||||
5. Create a disk and name the VM
|
||||
|
||||
Build an ISO first:
|
||||
|
||||
```sh
|
||||
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
|
||||
```
|
||||
|
||||
#### Other
|
||||
|
||||
| Target | Description |
|
||||
| ------------------------ | ------------------------------------------- |
|
||||
| `format` | Run code formatting (Rust nightly required) |
|
||||
| `test` | Run all automated tests |
|
||||
| `test-core` | Run Rust tests |
|
||||
| `test-sdk` | Run SDK tests |
|
||||
| `test-container-runtime` | Run container runtime tests |
|
||||
| `clean` | Delete all compiled artifacts |
|
||||
|
||||
## Testing
|
||||
|
||||
```bash
|
||||
make test # All tests
|
||||
make test-core # Rust tests (via ./core/run-tests.sh)
|
||||
make test-sdk # SDK tests
|
||||
make test-container-runtime # Container runtime tests
|
||||
|
||||
# Run specific Rust test
|
||||
cd core && cargo test <test_name> --features=test
|
||||
```
|
||||
|
||||
## Code Formatting
|
||||
|
||||
```bash
|
||||
# Rust (requires nightly)
|
||||
make format
|
||||
|
||||
# TypeScript/HTML/SCSS (web)
|
||||
cd web && npm run format
|
||||
```
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
### Formatting
|
||||
|
||||
Run the formatters before committing. Configuration is handled by `rustfmt.toml` (Rust) and prettier configs (TypeScript).
|
||||
|
||||
### Documentation & Comments
|
||||
|
||||
**Rust:**
|
||||
|
||||
- Add doc comments (`///`) to public APIs, structs, and non-obvious functions
|
||||
- Use `//` comments sparingly for complex logic that isn't self-evident
|
||||
- Prefer self-documenting code (clear naming, small functions) over comments
|
||||
|
||||
**TypeScript:**
|
||||
|
||||
- Document exported functions and complex types with JSDoc
|
||||
- Keep comments focused on "why" rather than "what"
|
||||
|
||||
**General:**
|
||||
|
||||
- Don't add comments that just restate the code
|
||||
- Update or remove comments when code changes
|
||||
- TODOs should include context: `// TODO(username): reason`
|
||||
|
||||
### Commit Messages
|
||||
|
||||
Use [Conventional Commits](https://www.conventionalcommits.org/):
|
||||
|
||||
```
|
||||
<type>(<scope>): <description>
|
||||
|
||||
[optional body]
|
||||
|
||||
[optional footer]
|
||||
```
|
||||
|
||||
**Types:**
|
||||
|
||||
- `feat` - New feature
|
||||
- `fix` - Bug fix
|
||||
- `docs` - Documentation only
|
||||
- `style` - Formatting, no code change
|
||||
- `refactor` - Code change that neither fixes a bug nor adds a feature
|
||||
- `test` - Adding or updating tests
|
||||
- `chore` - Build process, dependencies, etc.
|
||||
|
||||
**Examples:**
|
||||
|
||||
```
|
||||
feat(web): add dark mode toggle
|
||||
fix(core): resolve race condition in service startup
|
||||
docs: update CONTRIBUTING.md with style guidelines
|
||||
refactor(sdk): simplify package validation logic
|
||||
```
|
||||
|
||||
134
DEVELOPMENT.md
@@ -1,134 +0,0 @@
|
||||
# Setting up your development environment on Debian/Ubuntu
|
||||
|
||||
A step-by-step guide
|
||||
|
||||
> This is the only officially supported build environment.
|
||||
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
|
||||
|
||||
## Installing dependencies
|
||||
|
||||
Run the following commands one at a time
|
||||
|
||||
```sh
|
||||
sudo apt update
|
||||
sudo apt install -y ca-certificates curl gpg build-essential
|
||||
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
|
||||
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
|
||||
sudo apt update
|
||||
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
|
||||
sudo mkdir -p /etc/debspawn/
|
||||
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
|
||||
sudo usermod -aG docker $USER
|
||||
sudo su $USER
|
||||
docker run --privileged --rm tonistiigi/binfmt --install all
|
||||
docker buildx create --use
|
||||
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
|
||||
source ~/.bashrc
|
||||
nvm install 24
|
||||
nvm use 24
|
||||
nvm alias default 24 # this prevents your machine from reverting back to another version
|
||||
```
|
||||
|
||||
## Cloning the repository
|
||||
|
||||
```sh
|
||||
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/major
|
||||
cd start-os
|
||||
```
|
||||
|
||||
## Building an ISO
|
||||
|
||||
```sh
|
||||
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
|
||||
```
|
||||
|
||||
This will build an ISO for your current architecture. If you are building to run on an architecture other than the one you are currently on, replace `$(uname -m)` with the correct platform for the device (one of `aarch64`, `aarch64-nonfree`, `x86_64`, `x86_64-nonfree`, `raspberrypi`)
|
||||
|
||||
## Creating a VM
|
||||
|
||||
### Install virt-manager
|
||||
|
||||
```sh
|
||||
sudo apt update
|
||||
sudo apt install -y virt-manager
|
||||
sudo usermod -aG libvirt $USER
|
||||
sudo su $USER
|
||||
```
|
||||
|
||||
### Launch virt-manager
|
||||
|
||||
```sh
|
||||
virt-manager
|
||||
```
|
||||
|
||||
### Create new virtual machine
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
#### make sure to set "Target Path" to the path to your results directory in start-os
|
||||
|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||

|
||||
|
||||
## Updating a VM
|
||||
|
||||
The fastest way to update a VM to your latest code depends on what you changed:
|
||||
|
||||
### UI or startd:
|
||||
|
||||
```sh
|
||||
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-startbox REMOTE=start9@<VM IP>
|
||||
```
|
||||
|
||||
### Container runtime or debian dependencies:
|
||||
|
||||
```sh
|
||||
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-deb REMOTE=start9@<VM IP>
|
||||
```
|
||||
|
||||
### Image recipe:
|
||||
|
||||
```sh
|
||||
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-squashfs REMOTE=start9@<VM IP>
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
If the device you are building for is not available via ssh, it is also possible to use `magic-wormhole` to send the relevant files.
|
||||
|
||||
### Prerequisites:
|
||||
|
||||
```sh
|
||||
sudo apt update
|
||||
sudo apt install -y magic-wormhole
|
||||
```
|
||||
|
||||
As before, the fastest way to update a VM to your latest code depends on what you changed. Each of the following commands will return a command to paste into the shell of the device you would like to upgrade.
|
||||
|
||||
### UI or startd:
|
||||
|
||||
```sh
|
||||
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole
|
||||
```
|
||||
|
||||
### Container runtime or debian dependencies:
|
||||
|
||||
```sh
|
||||
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-deb
|
||||
```
|
||||
|
||||
### Image recipe:
|
||||
|
||||
```sh
|
||||
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-squashfs
|
||||
```
|
||||
207
Makefile
@@ -1,45 +1,45 @@
|
||||
ls-files = $(shell git ls-files --cached --others --exclude-standard $1)
|
||||
PROFILE = release
|
||||
|
||||
PLATFORM_FILE := $(shell ./check-platform.sh)
|
||||
ENVIRONMENT_FILE := $(shell ./check-environment.sh)
|
||||
GIT_HASH_FILE := $(shell ./check-git-hash.sh)
|
||||
VERSION_FILE := $(shell ./check-version.sh)
|
||||
BASENAME := $(shell PROJECT=startos ./basename.sh)
|
||||
PLATFORM := $(shell if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)
|
||||
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
|
||||
PLATFORM_FILE := $(shell ./build/env/check-platform.sh)
|
||||
ENVIRONMENT_FILE := $(shell ./build/env/check-environment.sh)
|
||||
GIT_HASH_FILE := $(shell ./build/env/check-git-hash.sh)
|
||||
VERSION_FILE := $(shell ./build/env/check-version.sh)
|
||||
BASENAME := $(shell PROJECT=startos ./build/env/basename.sh)
|
||||
PLATFORM := $(shell if [ -f $(PLATFORM_FILE) ]; then cat $(PLATFORM_FILE); else echo unknown; fi)
|
||||
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; elif [ "$(PLATFORM)" = "rockchip64" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g; s/-nvidia$$//g'; fi)
|
||||
RUST_ARCH := $(shell if [ "$(ARCH)" = "riscv64" ]; then echo riscv64gc; else echo $(ARCH); fi)
|
||||
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./basename.sh)
|
||||
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./basename.sh)
|
||||
REGISTRY_BASENAME := $(shell PROJECT=start-registry PLATFORM=$(ARCH) ./build/env/basename.sh)
|
||||
TUNNEL_BASENAME := $(shell PROJECT=start-tunnel PLATFORM=$(ARCH) ./build/env/basename.sh)
|
||||
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
|
||||
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html web/dist/raw/install-wizard/index.html
|
||||
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html web/dist/static/install-wizard/index.html
|
||||
FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
|
||||
BUILD_SRC := $(call ls-files, build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
|
||||
IMAGE_RECIPE_SRC := $(call ls-files, image-recipe/)
|
||||
STARTD_SRC := core/startos/startd.service $(BUILD_SRC)
|
||||
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html
|
||||
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html
|
||||
FIRMWARE_ROMS := build/lib/firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./build/lib/firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
|
||||
TOR_S9PK := build/lib/tor_$(ARCH).s9pk
|
||||
BUILD_SRC := $(call ls-files, build/lib) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS) $(TOR_S9PK)
|
||||
IMAGE_RECIPE_SRC := $(call ls-files, build/image-recipe/)
|
||||
STARTD_SRC := core/startd.service $(BUILD_SRC)
|
||||
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
|
||||
WEB_SHARED_SRC := $(call ls-files, web/projects/shared) $(call ls-files, web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
|
||||
WEB_UI_SRC := $(call ls-files, web/projects/ui)
|
||||
WEB_SETUP_WIZARD_SRC := $(call ls-files, web/projects/setup-wizard)
|
||||
WEB_INSTALL_WIZARD_SRC := $(call ls-files, web/projects/install-wizard)
|
||||
WEB_START_TUNNEL_SRC := $(call ls-files, web/projects/start-tunnel)
|
||||
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
|
||||
GZIP_BIN := $(shell which pigz || which gzip)
|
||||
TAR_BIN := $(shell which gtar || which tar)
|
||||
COMPILED_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox core/target/$(RUST_ARCH)-unknown-linux-musl/release/containerbox container-runtime/rootfs.$(ARCH).squashfs
|
||||
STARTOS_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs $(PLATFORM_FILE) \
|
||||
COMPILED_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container container-runtime/rootfs.$(ARCH).squashfs
|
||||
STARTOS_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs $(PLATFORM_FILE) \
|
||||
$(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then \
|
||||
echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; \
|
||||
echo target/aarch64-unknown-linux-musl/release/pi-beep; \
|
||||
fi) \
|
||||
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then \
|
||||
echo cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph; \
|
||||
echo target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph; \
|
||||
fi') \
|
||||
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]; then \
|
||||
echo cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console; \
|
||||
echo target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console; \
|
||||
fi')
|
||||
REGISTRY_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox core/startos/start-registryd.service
|
||||
TUNNEL_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/startos/start-tunneld.service
|
||||
REGISTRY_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox core/start-registryd.service
|
||||
TUNNEL_TARGETS := core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service
|
||||
|
||||
ifeq ($(REMOTE),)
|
||||
mkdir = mkdir -p $1
|
||||
@@ -62,7 +62,7 @@ endif
|
||||
|
||||
.DELETE_ON_ERROR:
|
||||
|
||||
.PHONY: all metadata install clean format cli uis ui reflash deb $(IMAGE_TYPE) squashfs wormhole wormhole-deb test test-core test-sdk test-container-runtime registry install-registry tunnel install-tunnel ts-bindings
|
||||
.PHONY: all metadata install clean format install-cli cli uis ui reflash deb $(IMAGE_TYPE) squashfs wormhole wormhole-deb test test-core test-sdk test-container-runtime registry install-registry tunnel install-tunnel ts-bindings
|
||||
|
||||
all: $(STARTOS_TARGETS)
|
||||
|
||||
@@ -73,7 +73,7 @@ metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
|
||||
|
||||
clean:
|
||||
rm -rf core/target
|
||||
rm -rf core/startos/bindings
|
||||
rm -rf core/bindings
|
||||
rm -rf web/.angular
|
||||
rm -f web/config.json
|
||||
rm -rf web/node_modules
|
||||
@@ -81,7 +81,7 @@ clean:
|
||||
rm -rf patch-db/client/node_modules
|
||||
rm -rf patch-db/client/dist
|
||||
rm -rf patch-db/target
|
||||
rm -rf cargo-deps
|
||||
rm -rf target
|
||||
rm -rf dpkg-workdir
|
||||
rm -rf image-recipe/deb
|
||||
rm -rf results
|
||||
@@ -89,14 +89,8 @@ clean:
|
||||
rm -rf container-runtime/dist
|
||||
rm -rf container-runtime/node_modules
|
||||
rm -f container-runtime/*.squashfs
|
||||
if [ -d container-runtime/tmp/combined ] && mountpoint container-runtime/tmp/combined; then sudo umount container-runtime/tmp/combined; fi
|
||||
if [ -d container-runtime/tmp/lower ] && mountpoint container-runtime/tmp/lower; then sudo umount container-runtime/tmp/lower; fi
|
||||
rm -rf container-runtime/tmp
|
||||
(cd sdk && make clean)
|
||||
rm -f ENVIRONMENT.txt
|
||||
rm -f PLATFORM.txt
|
||||
rm -f GIT_HASH.txt
|
||||
rm -f VERSION.txt
|
||||
rm -f env/*.txt
|
||||
|
||||
format:
|
||||
cd core && cargo +nightly fmt
|
||||
@@ -112,8 +106,11 @@ test-sdk: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
|
||||
test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
|
||||
cd container-runtime && npm test
|
||||
|
||||
cli:
|
||||
./core/install-cli.sh
|
||||
install-cli: $(GIT_HASH_FILE)
|
||||
./core/build/build-cli.sh --install
|
||||
|
||||
cli: $(GIT_HASH_FILE)
|
||||
./core/build/build-cli.sh
|
||||
|
||||
registry: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox
|
||||
|
||||
@@ -124,49 +121,54 @@ install-registry: $(REGISTRY_TARGETS)
|
||||
$(call ln,/usr/bin/start-registrybox,$(DESTDIR)/usr/bin/start-registry)
|
||||
|
||||
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||
$(call cp,core/startos/start-registryd.service,$(DESTDIR)/lib/systemd/system/start-registryd.service)
|
||||
$(call cp,core/start-registryd.service,$(DESTDIR)/lib/systemd/system/start-registryd.service)
|
||||
|
||||
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/registrybox: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build-registrybox.sh
|
||||
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-registrybox.sh
|
||||
|
||||
tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox
|
||||
|
||||
install-tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/startos/start-tunneld.service
|
||||
install-tunnel: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox core/start-tunneld.service
|
||||
$(call mkdir,$(DESTDIR)/usr/bin)
|
||||
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox,$(DESTDIR)/usr/bin/start-tunnelbox)
|
||||
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunneld)
|
||||
$(call ln,/usr/bin/start-tunnelbox,$(DESTDIR)/usr/bin/start-tunnel)
|
||||
|
||||
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||
$(call cp,core/startos/start-tunneld.service,$(DESTDIR)/lib/systemd/system/start-tunneld.service)
|
||||
$(call cp,core/start-tunneld.service,$(DESTDIR)/lib/systemd/system/start-tunneld.service)
|
||||
|
||||
$(call mkdir,$(DESTDIR)/usr/lib/startos/scripts)
|
||||
$(call cp,build/lib/scripts/forward-port,$(DESTDIR)/usr/lib/startos/scripts/forward-port)
|
||||
|
||||
$(call mkdir,$(DESTDIR)/etc/apt/sources.list.d)
|
||||
$(call cp,apt/start9.list,$(DESTDIR)/etc/apt/sources.list.d/start9.list)
|
||||
$(call mkdir,$(DESTDIR)/usr/share/keyrings)
|
||||
$(call cp,apt/start9.gpg,$(DESTDIR)/usr/share/keyrings/start9.gpg)
|
||||
|
||||
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/tunnelbox: $(CORE_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) web/dist/static/start-tunnel/index.html
|
||||
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build-tunnelbox.sh
|
||||
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-tunnelbox.sh
|
||||
|
||||
deb: results/$(BASENAME).deb
|
||||
|
||||
results/$(BASENAME).deb: dpkg-build.sh $(call ls-files,debian/startos) $(STARTOS_TARGETS)
|
||||
PLATFORM=$(PLATFORM) REQUIRES=debian ./build/os-compat/run-compat.sh ./dpkg-build.sh
|
||||
results/$(BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/startos) $(STARTOS_TARGETS)
|
||||
PLATFORM=$(PLATFORM) REQUIRES=debian ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
|
||||
|
||||
registry-deb: results/$(REGISTRY_BASENAME).deb
|
||||
|
||||
results/$(REGISTRY_BASENAME).deb: dpkg-build.sh $(call ls-files,debian/start-registry) $(REGISTRY_TARGETS)
|
||||
PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian ./build/os-compat/run-compat.sh ./dpkg-build.sh
|
||||
results/$(REGISTRY_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-registry) $(REGISTRY_TARGETS)
|
||||
PROJECT=start-registry PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=ca-certificates ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
|
||||
|
||||
tunnel-deb: results/$(TUNNEL_BASENAME).deb
|
||||
|
||||
results/$(TUNNEL_BASENAME).deb: dpkg-build.sh $(call ls-files,debian/start-tunnel) $(TUNNEL_TARGETS)
|
||||
PROJECT=start-tunnel PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=wireguard-tools,iptables,conntrack ./build/os-compat/run-compat.sh ./dpkg-build.sh
|
||||
results/$(TUNNEL_BASENAME).deb: debian/dpkg-build.sh $(call ls-files,debian/start-tunnel) $(TUNNEL_TARGETS) build/lib/scripts/forward-port
|
||||
PROJECT=start-tunnel PLATFORM=$(ARCH) REQUIRES=debian DEPENDS=wireguard-tools,iptables,conntrack ./build/os-compat/run-compat.sh ./debian/dpkg-build.sh
|
||||
|
||||
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
|
||||
|
||||
squashfs: results/$(BASENAME).squashfs
|
||||
|
||||
results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_SRC) results/$(BASENAME).deb
|
||||
./image-recipe/run-local-build.sh "results/$(BASENAME).deb"
|
||||
ARCH=$(ARCH) ./build/image-recipe/run-local-build.sh "results/$(BASENAME).deb"
|
||||
|
||||
# For creating os images. DO NOT USE
|
||||
install: $(STARTOS_TARGETS)
|
||||
@@ -175,18 +177,21 @@ install: $(STARTOS_TARGETS)
|
||||
$(call cp,core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox,$(DESTDIR)/usr/bin/startbox)
|
||||
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
|
||||
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
|
||||
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
|
||||
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,target/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
|
||||
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
|
||||
$(call cp,cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph,$(DESTDIR)/usr/bin/flamegraph); \
|
||||
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph,$(DESTDIR)/usr/bin/flamegraph); \
|
||||
fi
|
||||
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)console($$|-) ]]'; then \
|
||||
$(call cp,cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); \
|
||||
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); \
|
||||
fi
|
||||
$(call cp,cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
|
||||
$(call cp,target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
|
||||
$(call ln,/usr/bin/startos-backup-fs,$(DESTDIR)/usr/sbin/mount.backup-fs)
|
||||
|
||||
$(call mkdir,$(DESTDIR)/lib/systemd/system)
|
||||
$(call cp,core/startos/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
|
||||
$(call cp,core/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
|
||||
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
|
||||
sed -i '/^Environment=/a Environment=RUST_BACKTRACE=full' $(DESTDIR)/lib/systemd/system/startd.service; \
|
||||
fi
|
||||
|
||||
$(call mkdir,$(DESTDIR)/usr/lib)
|
||||
$(call rm,$(DESTDIR)/usr/lib/startos)
|
||||
@@ -194,18 +199,16 @@ install: $(STARTOS_TARGETS)
|
||||
$(call mkdir,$(DESTDIR)/usr/lib/startos/container-runtime)
|
||||
$(call cp,container-runtime/rootfs.$(ARCH).squashfs,$(DESTDIR)/usr/lib/startos/container-runtime/rootfs.squashfs)
|
||||
|
||||
$(call cp,PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
|
||||
$(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
|
||||
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
|
||||
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
|
||||
|
||||
$(call cp,firmware/$(PLATFORM),$(DESTDIR)/usr/lib/startos/firmware)
|
||||
$(call cp,build/env/PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
|
||||
$(call cp,build/env/ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
|
||||
$(call cp,build/env/GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
|
||||
$(call cp,build/env/VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
|
||||
|
||||
update-overlay: $(STARTOS_TARGETS)
|
||||
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
|
||||
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
|
||||
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||
@if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
|
||||
@if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat $(VERSION_FILE)`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
|
||||
$(call ssh,"sudo systemctl stop startd")
|
||||
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
|
||||
$(call ssh,"sudo systemctl start startd")
|
||||
@@ -242,9 +245,9 @@ update-startbox: core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
|
||||
update-deb: results/$(BASENAME).deb # better than update, but only available from debian
|
||||
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
|
||||
$(call mkdir,/media/startos/next/tmp/startos-deb)
|
||||
$(call cp,results/$(BASENAME).deb,/media/startos/next/tmp/startos-deb/$(BASENAME).deb)
|
||||
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /tmp/startos-deb/$(BASENAME).deb"')
|
||||
$(call mkdir,/media/startos/next/var/tmp/startos-deb)
|
||||
$(call cp,results/$(BASENAME).deb,/media/startos/next/var/tmp/startos-deb/$(BASENAME).deb)
|
||||
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /var/tmp/startos-deb/$(BASENAME).deb"')
|
||||
|
||||
update-squashfs: results/$(BASENAME).squashfs
|
||||
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
|
||||
@@ -263,7 +266,7 @@ emulate-reflash: $(STARTOS_TARGETS)
|
||||
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
|
||||
|
||||
upload-ota: results/$(BASENAME).squashfs
|
||||
TARGET=$(TARGET) KEY=$(KEY) ./upload-ota.sh
|
||||
TARGET=$(TARGET) KEY=$(KEY) ./build/upload-ota.sh
|
||||
|
||||
container-runtime/debian.$(ARCH).squashfs: ./container-runtime/download-base-image.sh
|
||||
ARCH=$(ARCH) ./container-runtime/download-base-image.sh
|
||||
@@ -276,16 +279,20 @@ container-runtime/node_modules/.package-lock.json: container-runtime/package-loc
|
||||
npm --prefix container-runtime ci
|
||||
touch container-runtime/node_modules/.package-lock.json
|
||||
|
||||
ts-bindings: core/startos/bindings/index.ts
|
||||
ts-bindings: core/bindings/index.ts
|
||||
mkdir -p sdk/base/lib/osBindings
|
||||
rsync -ac --delete core/startos/bindings/ sdk/base/lib/osBindings/
|
||||
rsync -ac --delete core/bindings/ sdk/base/lib/osBindings/
|
||||
|
||||
core/startos/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
|
||||
rm -rf core/startos/bindings
|
||||
./core/build-ts.sh
|
||||
ls core/startos/bindings/*.ts | sed 's/core\/startos\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/startos/bindings/index.ts
|
||||
npm --prefix sdk exec -- prettier --config ./sdk/base/package.json -w ./core/startos/bindings/*.ts
|
||||
touch core/startos/bindings/index.ts
|
||||
core/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
|
||||
rm -rf core/bindings
|
||||
./core/build/build-ts.sh
|
||||
ls core/bindings/*.ts | sed 's/core\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/bindings/index.ts
|
||||
if [ -d core/bindings/tunnel ]; then \
|
||||
ls core/bindings/tunnel/*.ts | sed 's/core\/bindings\/tunnel\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' > core/bindings/tunnel/index.ts; \
|
||||
echo 'export * as Tunnel from "./tunnel";' >> core/bindings/index.ts; \
|
||||
fi
|
||||
npm --prefix sdk/base exec -- prettier --config=./sdk/base/package.json -w './core/bindings/**/*.ts'
|
||||
touch core/bindings/index.ts
|
||||
|
||||
sdk/dist/package.json sdk/baseDist/package.json: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
|
||||
(cd sdk && make bundle)
|
||||
@@ -300,22 +307,25 @@ container-runtime/dist/node_modules/.package-lock.json container-runtime/dist/pa
|
||||
./container-runtime/install-dist-deps.sh
|
||||
touch container-runtime/dist/node_modules/.package-lock.json
|
||||
|
||||
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules/.package-lock.json core/target/$(RUST_ARCH)-unknown-linux-musl/release/containerbox
|
||||
ARCH=$(ARCH) REQUIRES=linux ./build/os-compat/run-compat.sh ./container-runtime/update-image.sh
|
||||
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/update-image-local.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules/.package-lock.json core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container
|
||||
ARCH=$(ARCH) ./container-runtime/update-image-local.sh
|
||||
|
||||
build/lib/depends build/lib/conflicts: $(ENVIRONMENT_FILE) $(PLATFORM_FILE) $(shell ls build/dpkg-deps/*)
|
||||
PLATFORM=$(PLATFORM) ARCH=$(ARCH) build/dpkg-deps/generate.sh
|
||||
|
||||
$(FIRMWARE_ROMS): build/lib/firmware.json download-firmware.sh $(PLATFORM_FILE)
|
||||
./download-firmware.sh $(PLATFORM)
|
||||
$(FIRMWARE_ROMS): build/lib/firmware.json ./build/download-firmware.sh $(PLATFORM_FILE)
|
||||
./build/download-firmware.sh $(PLATFORM)
|
||||
|
||||
$(TOR_S9PK): ./build/download-tor-s9pk.sh
|
||||
./build/download-tor-s9pk.sh $(ARCH)
|
||||
|
||||
core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
|
||||
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build-startbox.sh
|
||||
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build/build-startbox.sh
|
||||
touch core/target/$(RUST_ARCH)-unknown-linux-musl/$(PROFILE)/startbox
|
||||
|
||||
core/target/$(RUST_ARCH)-unknown-linux-musl/release/containerbox: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||
ARCH=$(ARCH) ./core/build-containerbox.sh
|
||||
touch core/target/$(RUST_ARCH)-unknown-linux-musl/release/containerbox
|
||||
core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container: $(CORE_SRC) $(ENVIRONMENT_FILE)
|
||||
ARCH=$(ARCH) ./core/build/build-start-container.sh
|
||||
touch core/target/$(RUST_ARCH)-unknown-linux-musl/release/start-container
|
||||
|
||||
web/package-lock.json: web/package.json sdk/baseDist/package.json
|
||||
npm --prefix web i
|
||||
@@ -330,27 +340,27 @@ web/.angular/.updated: patch-db/client/dist/index.js sdk/baseDist/package.json w
|
||||
mkdir -p web/.angular
|
||||
touch web/.angular/.updated
|
||||
|
||||
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
|
||||
web/.i18n-checked: $(WEB_SHARED_SRC) $(WEB_UI_SRC) $(WEB_SETUP_WIZARD_SRC) $(WEB_START_TUNNEL_SRC)
|
||||
npm --prefix web run check:i18n
|
||||
touch web/.i18n-checked
|
||||
|
||||
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
|
||||
npm --prefix web run build:ui
|
||||
touch web/dist/raw/ui/index.html
|
||||
|
||||
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
|
||||
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
|
||||
npm --prefix web run build:setup
|
||||
touch web/dist/raw/setup-wizard/index.html
|
||||
|
||||
web/dist/raw/install-wizard/index.html: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
|
||||
npm --prefix web run build:install
|
||||
touch web/dist/raw/install-wizard/index.html
|
||||
|
||||
web/dist/raw/start-tunnel/index.html: $(WEB_START_TUNNEL_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
|
||||
web/dist/raw/start-tunnel/index.html: $(WEB_START_TUNNEL_SRC) $(WEB_SHARED_SRC) web/.angular/.updated web/.i18n-checked
|
||||
npm --prefix web run build:tunnel
|
||||
touch web/dist/raw/start-tunnel/index.html
|
||||
|
||||
web/dist/static/%/index.html: web/dist/raw/%/index.html
|
||||
./compress-uis.sh $*
|
||||
./web/compress-uis.sh $*
|
||||
|
||||
web/config.json: $(GIT_HASH_FILE) web/config-sample.json
|
||||
jq '.useMocks = false' web/config-sample.json | jq '.gitHash = "$(shell cat GIT_HASH.txt)"' > web/config.json
|
||||
web/config.json: $(GIT_HASH_FILE) $(ENVIRONMENT_FILE) web/config-sample.json web/update-config.sh
|
||||
./web/update-config.sh
|
||||
|
||||
patch-db/client/node_modules/.package-lock.json: patch-db/client/package.json
|
||||
npm --prefix patch-db/client ci
|
||||
@@ -371,14 +381,17 @@ uis: $(WEB_UIS)
|
||||
# this is a convenience step to build the UI
|
||||
ui: web/dist/raw/ui
|
||||
|
||||
cargo-deps/aarch64-unknown-linux-musl/release/pi-beep:
|
||||
ARCH=aarch64 ./build-cargo-dep.sh pi-beep
|
||||
target/aarch64-unknown-linux-musl/release/pi-beep: ./build/build-cargo-dep.sh
|
||||
ARCH=aarch64 ./build/build-cargo-dep.sh pi-beep
|
||||
|
||||
cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console:
|
||||
ARCH=$(ARCH) PREINSTALL="apk add musl-dev pkgconfig" ./build-cargo-dep.sh tokio-console
|
||||
target/$(RUST_ARCH)-unknown-linux-musl/release/tokio-console: ./build/build-cargo-dep.sh
|
||||
ARCH=$(ARCH) ./build/build-cargo-dep.sh tokio-console
|
||||
touch $@
|
||||
|
||||
cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs:
|
||||
ARCH=$(ARCH) PREINSTALL="apk add fuse3 fuse3-dev fuse3-static musl-dev pkgconfig" ./build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs
|
||||
target/$(RUST_ARCH)-unknown-linux-musl/release/startos-backup-fs: ./build/build-cargo-dep.sh
|
||||
ARCH=$(ARCH) ./build/build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs
|
||||
touch $@
|
||||
|
||||
cargo-deps/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph:
|
||||
ARCH=$(ARCH) PREINSTALL="apk add musl-dev pkgconfig" ./build-cargo-dep.sh flamegraph
|
||||
target/$(RUST_ARCH)-unknown-linux-musl/release/flamegraph: ./build/build-cargo-dep.sh
|
||||
ARCH=$(ARCH) ./build/build-cargo-dep.sh flamegraph
|
||||
touch $@
|
||||
|
||||
88
README.md
@@ -7,76 +7,64 @@
|
||||
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
|
||||
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
|
||||
</a>
|
||||
<a href="https://heyapollo.com/product/startos">
|
||||
<a href="https://heyapollo.com/product/startos">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
|
||||
</a>
|
||||
<a href="https://twitter.com/start9labs">
|
||||
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
|
||||
</a>
|
||||
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
|
||||
</a>
|
||||
<a href="https://t.me/start9_labs">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
|
||||
</a>
|
||||
<a href="https://docs.start9.com">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
|
||||
</a>
|
||||
<a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
|
||||
<a href="https://matrix.to/#/#dev-startos:matrix.start9labs.com">
|
||||
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
|
||||
</a>
|
||||
<a href="https://start9.com">
|
||||
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
|
||||
</a>
|
||||
</div>
|
||||
<br />
|
||||
<div align="center">
|
||||
<h3>
|
||||
Welcome to the era of Sovereign Computing
|
||||
</h3>
|
||||
<p>
|
||||
StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
|
||||
</p>
|
||||
</div>
|
||||
<br />
|
||||
<p align="center">
|
||||
<img src="assets/StartOS.png" alt="StartOS" width="85%">
|
||||
</p>
|
||||
<br />
|
||||
|
||||
## Running StartOS
|
||||
> [!WARNING]
|
||||
> StartOS is in beta. It lacks features. It doesn't always work perfectly. Start9 servers are not plug and play. Using them properly requires some effort and patience. Please do not use StartOS or purchase a server if you are unable or unwilling to follow instructions and learn new concepts.
|
||||
## What is StartOS?
|
||||
|
||||
### 💰 Buy a Start9 server
|
||||
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
|
||||
StartOS is an open-source Linux distribution for running a personal server. It handles discovery, installation, network configuration, data backup, dependency management, and health monitoring of self-hosted services.
|
||||
|
||||
### 👷 Build your own server
|
||||
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
|
||||
1. You already have hardware
|
||||
1. You want to save on shipping costs
|
||||
1. You prefer not to divulge your physical address
|
||||
1. You just like building things
|
||||
**Tech stack:** Rust backend (Tokio/Axum), Angular frontend, Node.js container runtime with LXC, and a custom diff-based database ([Patch-DB](https://github.com/Start9Labs/patch-db)) for reactive state synchronization.
|
||||
|
||||
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
|
||||
Services run in isolated LXC containers, packaged as [S9PKs](https://github.com/Start9Labs/start-os/blob/master/core/s9pk-structure.md) — a signed, merkle-archived format that supports partial downloads and cryptographic verification.
|
||||
|
||||
## ❤️ Contributing
|
||||
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
|
||||
## What can you do with it?
|
||||
|
||||
To report security issues, please email our security team - security@start9.com.
|
||||
StartOS lets you self-host services that would otherwise depend on third-party cloud providers — giving you full ownership of your data and infrastructure.
|
||||
|
||||
## 🌎 Marketplace
|
||||
There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
|
||||
Browse available services on the [Start9 Marketplace](https://marketplace.start9.com/), including:
|
||||
|
||||
## 🖥️ User Interface Screenshots
|
||||
- **Bitcoin & Lightning** — Run a full Bitcoin node, Lightning node, BTCPay Server, and other payment infrastructure
|
||||
- **Communication** — Self-host Matrix, SimpleX, or other messaging platforms
|
||||
- **Cloud Storage** — Run Nextcloud, Vaultwarden, and other productivity tools
|
||||
|
||||
<p align="center">
|
||||
<img src="assets/registry.png" alt="StartOS Marketplace" width="49%">
|
||||
<img src="assets/community.png" alt="StartOS Community Registry" width="49%">
|
||||
<img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%">
|
||||
<img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%">
|
||||
<img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%">
|
||||
<img src="assets/system.png" alt="StartOS System Settings" width="49%">
|
||||
<img src="assets/welcome.png" alt="StartOS System Settings" width="49%">
|
||||
<img src="assets/logs.png" alt="StartOS System Settings" width="49%">
|
||||
</p>
|
||||
Services are added by the community. If a service you want isn't available, you can [package it yourself](https://github.com/Start9Labs/ai-service-packaging/).
|
||||
|
||||
## Getting StartOS
|
||||
|
||||
### Buy a Start9 server
|
||||
|
||||
The easiest path. [Buy a server](https://store.start9.com) from Start9 and plug it in.
|
||||
|
||||
### Build your own
|
||||
|
||||
Follow the [install guide](https://docs.start9.com/start-os/installing.html) to install StartOS on your own hardware. . Reasons to go this route:
|
||||
|
||||
1. You already have compatible hardware
|
||||
2. You want to save on shipping costs
|
||||
3. You prefer not to share your physical address
|
||||
4. You enjoy building things
|
||||
|
||||
### Build from source
|
||||
|
||||
See [CONTRIBUTING.md](CONTRIBUTING.md) for environment setup, build instructions, and development workflow.
|
||||
|
||||
## Contributing
|
||||
|
||||
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. See [CONTRIBUTING.md](CONTRIBUTING.md) or visit [start9.com/contribute](https://start9.com/contribute/).
|
||||
|
||||
To report security issues, email [security@start9.com](mailto:security@start9.com).
|
||||
|
||||
@@ -1,77 +0,0 @@
|
||||
# StartTunnel
|
||||
|
||||
A self-hosted WireGuard VPN optimized for creating VLANs and reverse tunneling to personal servers.
|
||||
|
||||
You can think of StartTunnel as "virtual router in the cloud".
|
||||
|
||||
Use it for private remote access to self-hosted services running on a personal server, or to expose self-hosted services to the public Internet without revealing the host server's IP address.
|
||||
|
||||
## Features
|
||||
|
||||
- **Create Subnets**: Each subnet creates a private, virtual local area network (VLAN), similar to the LAN created by a home router.
|
||||
|
||||
- **Add Devices**: When you add a device (server, phone, laptop) to a subnet, it receives a LAN IP address on that subnet as well as a unique WireGuard config that must be copied, downloaded, or scanned into the device.
|
||||
|
||||
- **Forward Ports**: Forwarding a port creates a "reverse tunnel", exposing a specific port on a specific device to the public Internet.
|
||||
|
||||
## Features
|
||||
|
||||
- **Create Subnets**: Each subnet creates a private, virtual local area network (VLAN), similar to the LAN created by a home router.
|
||||
|
||||
- **Add Devices**: When you add a device (server, phone, laptop) to a subnet, it receives a LAN IP address on that subnet as well as a unique Wireguard config that must be copied, downloaded, or scanned into the device.
|
||||
|
||||
- **Forward Ports**: Forwarding a port creates a "reverse tunnel", exposing a specific port on a specific device to the public Internet.
|
||||
|
||||
## Installation
|
||||
|
||||
1. Rent a low cost VPS. For most use cases, the cheapest option should be enough.
|
||||
|
||||
- It must have a dedicated public IP address.
|
||||
- For compute (CPU), memory (RAM), and storage (disk), choose the minimum spec.
|
||||
- For transfer (bandwidth), it depends on (1) your use case and (2) your home Internet's _upload_ speed. Even if you intend to serve large files or stream content from your server, there is no reason to pay for speeds that exceed your home Internet's upload speed.
|
||||
|
||||
1. Provision the VPS with the latest version of Debian.
|
||||
|
||||
1. Access the VPS via SSH.
|
||||
|
||||
1. Install StartTunnel:
|
||||
|
||||
```sh
|
||||
TMP_DIR=$(mktemp -d) && (cd $TMP_DIR && wget https://github.com/Start9Labs/start-os/releases/download/v0.4.0-alpha.12/start-tunnel-0.4.0-alpha.12-unknown.dev_$(uname -m).deb && apt-get install -y ./start-tunnel-0.4.0-alpha.12-unknown.dev_$(uname -m).deb) && rm -rf $TMP_DIR && systemctl start start-tunneld && echo "Installation Succeeded"
|
||||
```
|
||||
|
||||
5. [Initialize the web interface](#web-interface) (recommended)
|
||||
|
||||
## Updating
|
||||
|
||||
```sh
|
||||
TMP_DIR=$(mktemp -d) && (cd $TMP_DIR && wget https://github.com/Start9Labs/start-os/releases/download/v0.4.0-alpha.12/start-tunnel-0.4.0-alpha.12-unknown.dev_$(uname -m).deb && apt-get install --reinstall -y ./start-tunnel-0.4.0-alpha.12-unknown.dev_$(uname -m).deb) && rm -rf $TMP_DIR && systemctl daemon-reload && systemctl restart start-tunneld && echo "Update Succeeded"
|
||||
```
|
||||
|
||||
## CLI
|
||||
|
||||
By default, StartTunnel is managed via the `start-tunnel` command line interface, which is self-documented.
|
||||
|
||||
```
|
||||
start-tunnel --help
|
||||
```
|
||||
|
||||
## Web Interface
|
||||
|
||||
If you choose to enable the web interface (recommended in most cases), StartTunnel can be accessed as a website from the browser, or programmatically via API.
|
||||
|
||||
1. Initialize the web interface.
|
||||
|
||||
start-tunnel web init
|
||||
|
||||
1. When prompted, select the IP address at which to host the web interface. In many cases, there will be only one IP address.
|
||||
|
||||
1. When prompted, enter the port at which to host the web interface. The default is 8443, and we recommend using it. If you change the default, choose an uncommon port to avoid conflicts.
|
||||
|
||||
1. Select whether to autogenerate a self-signed certificate or provide your own certificate and key. If you choose to autogenerate, you will be asked to list all IP addresses and domains for which to sign the certificate. For example, if you intend to access your StartTunnel web UI at a domain, include the domain in the list.
|
||||
|
||||
1. You will receive a success message with 3 pieces of information:
|
||||
|
||||
- <https://IP:port>: the URL where you can reach your personal web interface.
|
||||
- Password: an autogenerated password for your interface. If you lose/forget it, you can reset using the CLI.
|
||||
- Root Certificate Authority: the Root CA of your StartTunnel instance. If not already, trust it in your browser or system keychain.
|
||||
BIN
apt/start9.gpg
Normal file
1
apt/start9.list
Normal file
@@ -0,0 +1 @@
|
||||
deb [arch=amd64,arm64,riscv64 signed-by=/usr/share/keyrings/start9.gpg] https://start9-debs.nyc3.cdn.digitaloceanspaces.com stable main
|
||||
|
Before Width: | Height: | Size: 2.1 MiB |
|
Before Width: | Height: | Size: 396 KiB |
|
Before Width: | Height: | Size: 402 KiB |
|
Before Width: | Height: | Size: 591 KiB |
BIN
assets/logs.png
|
Before Width: | Height: | Size: 1.6 MiB |
|
Before Width: | Height: | Size: 319 KiB |
|
Before Width: | Height: | Size: 521 KiB |
|
Before Width: | Height: | Size: 331 KiB |
|
Before Width: | Height: | Size: 402 KiB |
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
shopt -s expand_aliases
|
||||
|
||||
if [ "$0" != "./build-cargo-dep.sh" ]; then
|
||||
>&2 echo "Must be run from start-os directory"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
USE_TTY=
|
||||
if tty -s; then
|
||||
USE_TTY="-it"
|
||||
fi
|
||||
|
||||
if [ -z "$ARCH" ]; then
|
||||
ARCH=$(uname -m)
|
||||
fi
|
||||
|
||||
DOCKER_PLATFORM="linux/${ARCH}"
|
||||
if [ "$ARCH" = aarch64 ] || [ "$ARCH" = arm64 ]; then
|
||||
DOCKER_PLATFORM="linux/arm64"
|
||||
elif [ "$ARCH" = x86_64 ]; then
|
||||
DOCKER_PLATFORM="linux/amd64"
|
||||
fi
|
||||
|
||||
mkdir -p cargo-deps
|
||||
alias 'rust-musl-builder'='docker run $USE_TTY --platform=${DOCKER_PLATFORM} --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)"/cargo-deps:/home/rust/src -w /home/rust/src -P rust:alpine'
|
||||
|
||||
PREINSTALL=${PREINSTALL:-true}
|
||||
|
||||
rust-musl-builder sh -c "$PREINSTALL && cargo install $* --target-dir /home/rust/src --target=$ARCH-unknown-linux-musl"
|
||||
sudo chown -R $USER cargo-deps
|
||||
sudo chown -R $USER ~/.cargo
|
||||
0
build/README.md
Normal file
138
build/apt/publish-deb.sh
Executable file
@@ -0,0 +1,138 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Publish .deb files to an S3-hosted apt repository.
|
||||
#
|
||||
# Usage: publish-deb.sh <deb-file-or-directory> [<deb-file-or-directory> ...]
|
||||
#
|
||||
# Environment variables:
|
||||
# GPG_PRIVATE_KEY - Armored GPG private key (imported if set)
|
||||
# GPG_KEY_ID - GPG key ID for signing
|
||||
# S3_ACCESS_KEY - S3 access key
|
||||
# S3_SECRET_KEY - S3 secret key
|
||||
# S3_ENDPOINT - S3 endpoint (default: https://nyc3.digitaloceanspaces.com)
|
||||
# S3_BUCKET - S3 bucket name (default: start9-debs)
|
||||
# SUITE - Apt suite name (default: stable)
|
||||
# COMPONENT - Apt component name (default: main)
|
||||
|
||||
set -e
|
||||
|
||||
if [ $# -eq 0 ]; then
|
||||
echo "Usage: $0 <deb-file-or-directory> [...]" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
BUCKET="${S3_BUCKET:-start9-debs}"
|
||||
ENDPOINT="${S3_ENDPOINT:-https://nyc3.digitaloceanspaces.com}"
|
||||
SUITE="${SUITE:-stable}"
|
||||
COMPONENT="${COMPONENT:-main}"
|
||||
REPO_DIR="$(mktemp -d)"
|
||||
|
||||
cleanup() {
|
||||
rm -rf "$REPO_DIR"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Import GPG key if provided
|
||||
if [ -n "$GPG_PRIVATE_KEY" ]; then
|
||||
echo "$GPG_PRIVATE_KEY" | gpg --batch --import 2>/dev/null
|
||||
fi
|
||||
|
||||
# Configure s3cmd
|
||||
if [ -n "$S3_ACCESS_KEY" ] && [ -n "$S3_SECRET_KEY" ]; then
|
||||
S3CMD_CONFIG="$(mktemp)"
|
||||
cat > "$S3CMD_CONFIG" <<EOF
|
||||
[default]
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
host_base = $(echo "$ENDPOINT" | sed 's|https://||')
|
||||
host_bucket = %(bucket)s.$(echo "$ENDPOINT" | sed 's|https://||')
|
||||
use_https = True
|
||||
EOF
|
||||
s3() {
|
||||
s3cmd -c "$S3CMD_CONFIG" "$@"
|
||||
}
|
||||
else
|
||||
# Fall back to default ~/.s3cfg
|
||||
S3CMD_CONFIG=""
|
||||
s3() {
|
||||
s3cmd "$@"
|
||||
}
|
||||
fi
|
||||
|
||||
# Sync existing repo from S3
|
||||
echo "Syncing existing repo from s3://${BUCKET}/ ..."
|
||||
s3 sync --no-mime-magic "s3://${BUCKET}/" "$REPO_DIR/" 2>/dev/null || true
|
||||
|
||||
# Collect all .deb files from arguments
|
||||
DEB_FILES=()
|
||||
for arg in "$@"; do
|
||||
if [ -d "$arg" ]; then
|
||||
while IFS= read -r -d '' f; do
|
||||
DEB_FILES+=("$f")
|
||||
done < <(find "$arg" -name '*.deb' -print0)
|
||||
elif [ -f "$arg" ]; then
|
||||
DEB_FILES+=("$arg")
|
||||
else
|
||||
echo "Warning: $arg is not a file or directory, skipping" >&2
|
||||
fi
|
||||
done
|
||||
|
||||
if [ ${#DEB_FILES[@]} -eq 0 ]; then
|
||||
echo "No .deb files found" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Copy each deb to the pool, renaming to standard format
|
||||
for deb in "${DEB_FILES[@]}"; do
|
||||
PKG_NAME="$(dpkg-deb --field "$deb" Package)"
|
||||
POOL_DIR="$REPO_DIR/pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}"
|
||||
mkdir -p "$POOL_DIR"
|
||||
cp "$deb" "$POOL_DIR/"
|
||||
dpkg-name -o "$POOL_DIR/$(basename "$deb")" 2>/dev/null || true
|
||||
echo "Added: $(basename "$deb") -> pool/${COMPONENT}/${PKG_NAME:0:1}/${PKG_NAME}/"
|
||||
done
|
||||
|
||||
# Generate Packages indices for each architecture
|
||||
for arch in amd64 arm64 riscv64; do
|
||||
BINARY_DIR="$REPO_DIR/dists/${SUITE}/${COMPONENT}/binary-${arch}"
|
||||
mkdir -p "$BINARY_DIR"
|
||||
(
|
||||
cd "$REPO_DIR"
|
||||
dpkg-scanpackages --arch "$arch" pool/ > "$BINARY_DIR/Packages"
|
||||
gzip -k -f "$BINARY_DIR/Packages"
|
||||
)
|
||||
echo "Generated Packages index for ${arch}"
|
||||
done
|
||||
|
||||
# Generate Release file
|
||||
(
|
||||
cd "$REPO_DIR/dists/${SUITE}"
|
||||
apt-ftparchive release \
|
||||
-o "APT::FTPArchive::Release::Origin=Start9" \
|
||||
-o "APT::FTPArchive::Release::Label=Start9" \
|
||||
-o "APT::FTPArchive::Release::Suite=${SUITE}" \
|
||||
-o "APT::FTPArchive::Release::Codename=${SUITE}" \
|
||||
-o "APT::FTPArchive::Release::Architectures=amd64 arm64 riscv64" \
|
||||
-o "APT::FTPArchive::Release::Components=${COMPONENT}" \
|
||||
. > Release
|
||||
)
|
||||
echo "Generated Release file"
|
||||
|
||||
# Sign if GPG key is available
|
||||
if [ -n "$GPG_KEY_ID" ]; then
|
||||
(
|
||||
cd "$REPO_DIR/dists/${SUITE}"
|
||||
gpg --default-key "$GPG_KEY_ID" --batch --yes --detach-sign -o Release.gpg Release
|
||||
gpg --default-key "$GPG_KEY_ID" --batch --yes --clearsign -o InRelease Release
|
||||
)
|
||||
echo "Signed Release file with key ${GPG_KEY_ID}"
|
||||
else
|
||||
echo "Warning: GPG_KEY_ID not set, Release file is unsigned" >&2
|
||||
fi
|
||||
|
||||
# Upload to S3
|
||||
echo "Uploading to s3://${BUCKET}/ ..."
|
||||
s3 sync --acl-public --no-mime-magic "$REPO_DIR/" "s3://${BUCKET}/"
|
||||
|
||||
[ -n "$S3CMD_CONFIG" ] && rm -f "$S3CMD_CONFIG"
|
||||
echo "Done."
|
||||
26
build/build-cargo-dep.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/.."
|
||||
|
||||
set -e
|
||||
shopt -s expand_aliases
|
||||
|
||||
if [ -z "$ARCH" ]; then
|
||||
ARCH=$(uname -m)
|
||||
fi
|
||||
|
||||
RUST_ARCH="$ARCH"
|
||||
if [ "$ARCH" = "riscv64" ]; then
|
||||
RUST_ARCH="riscv64gc"
|
||||
fi
|
||||
|
||||
mkdir -p target
|
||||
|
||||
source core/build/builder-alias.sh
|
||||
|
||||
RUSTFLAGS="-C target-feature=+crt-static"
|
||||
|
||||
rust-zig-builder cargo-zigbuild install $* --target-dir /workdir/target/ --target=$RUST_ARCH-unknown-linux-musl
|
||||
if [ "$(ls -nd "target/$RUST_ARCH-unknown-linux-musl/release/${!#}" | awk '{ print $3 }')" != "$UID" ]; then
|
||||
rust-zig-builder sh -c "chown -R $UID:$UID target && chown -R $UID:$UID /usr/local/cargo"
|
||||
fi
|
||||
@@ -11,13 +11,13 @@ if [ -z "$PLATFORM" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rm -rf ./firmware/$PLATFORM
|
||||
mkdir -p ./firmware/$PLATFORM
|
||||
rm -rf ./lib/firmware/$PLATFORM
|
||||
mkdir -p ./lib/firmware/$PLATFORM
|
||||
|
||||
cd ./firmware/$PLATFORM
|
||||
cd ./lib/firmware/$PLATFORM
|
||||
|
||||
firmwares=()
|
||||
while IFS= read -r line; do firmwares+=("$line"); done < <(jq -c ".[] | select(.platform[] | contains(\"$PLATFORM\"))" ../../build/lib/firmware.json)
|
||||
while IFS= read -r line; do firmwares+=("$line"); done < <(jq -c ".[] | select(.platform[] | contains(\"$PLATFORM\"))" ../../firmware.json)
|
||||
for firmware in "${firmwares[@]}"; do
|
||||
if [ -n "$firmware" ]; then
|
||||
id=$(echo "$firmware" | jq --raw-output '.id')
|
||||
14
build/download-tor-s9pk.sh
Executable file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
set -e
|
||||
|
||||
ARCH=$1
|
||||
|
||||
if [ -z "$ARCH" ]; then
|
||||
>&2 echo "usage: $0 <ARCH>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
curl --fail -L -o "./lib/tor_${ARCH}.s9pk" "https://s9pks.nyc3.cdn.digitaloceanspaces.com/tor_${ARCH}.s9pk"
|
||||
@@ -3,6 +3,7 @@ avahi-utils
|
||||
b3sum
|
||||
bash-completion
|
||||
beep
|
||||
binfmt-support
|
||||
bmon
|
||||
btrfs-progs
|
||||
ca-certificates
|
||||
@@ -10,11 +11,13 @@ cifs-utils
|
||||
conntrack
|
||||
cryptsetup
|
||||
curl
|
||||
dkms
|
||||
dmidecode
|
||||
dnsutils
|
||||
dosfstools
|
||||
e2fsprogs
|
||||
ecryptfs-utils
|
||||
equivs
|
||||
exfatprogs
|
||||
flashrom
|
||||
fuse3
|
||||
@@ -34,6 +37,7 @@ lvm2
|
||||
lxc
|
||||
magic-wormhole
|
||||
man-db
|
||||
mokutil
|
||||
ncdu
|
||||
net-tools
|
||||
network-manager
|
||||
@@ -44,6 +48,7 @@ openssh-server
|
||||
podman
|
||||
psmisc
|
||||
qemu-guest-agent
|
||||
qemu-user-static
|
||||
rfkill
|
||||
rsync
|
||||
samba-common-bin
|
||||
@@ -52,6 +57,7 @@ socat
|
||||
sqlite3
|
||||
squashfs-tools
|
||||
squashfs-tools-ng
|
||||
ssl-cert
|
||||
sudo
|
||||
systemd
|
||||
systemd-resolved
|
||||
|
||||
1
build/dpkg-deps/dev.depends
Normal file
@@ -0,0 +1 @@
|
||||
+ nmap
|
||||
@@ -9,6 +9,13 @@ FEATURES+=("${ARCH}")
|
||||
if [ "$ARCH" != "$PLATFORM" ]; then
|
||||
FEATURES+=("${PLATFORM}")
|
||||
fi
|
||||
if [[ "$PLATFORM" =~ -nonfree$ ]]; then
|
||||
FEATURES+=("nonfree")
|
||||
fi
|
||||
if [[ "$PLATFORM" =~ -nvidia$ ]]; then
|
||||
FEATURES+=("nonfree")
|
||||
FEATURES+=("nvidia")
|
||||
fi
|
||||
|
||||
feature_file_checker='
|
||||
/^#/ { next }
|
||||
|
||||
7
build/dpkg-deps/nonfree.depends
Normal file
@@ -0,0 +1,7 @@
|
||||
+ firmware-amd-graphics
|
||||
+ firmware-atheros
|
||||
+ firmware-brcm80211
|
||||
+ firmware-iwlwifi
|
||||
+ firmware-libertas
|
||||
+ firmware-misc-nonfree
|
||||
+ firmware-realtek
|
||||
1
build/dpkg-deps/nvidia.depends
Normal file
@@ -0,0 +1 @@
|
||||
+ nvidia-container-toolkit
|
||||
@@ -1,5 +1,6 @@
|
||||
- grub-efi
|
||||
+ gdisk
|
||||
+ parted
|
||||
+ u-boot-rpi
|
||||
+ raspberrypi-net-mods
|
||||
+ raspberrypi-sys-mods
|
||||
+ raspi-config
|
||||
|
||||
0
basename.sh → build/env/basename.sh
vendored
@@ -1,8 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
if ! [ -f ./ENVIRONMENT.txt ] || [ "$(cat ./ENVIRONMENT.txt)" != "$ENVIRONMENT" ]; then
|
||||
>&2 echo "Updating ENVIRONMENT.txt to \"$ENVIRONMENT\""
|
||||
echo -n "$ENVIRONMENT" > ./ENVIRONMENT.txt
|
||||
fi
|
||||
|
||||
echo -n ./ENVIRONMENT.txt
|
||||
echo -n ./build/env/ENVIRONMENT.txt
|
||||
@@ -1,5 +1,7 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
if [ "$GIT_BRANCH_AS_HASH" != 1 ]; then
|
||||
GIT_HASH="$(git rev-parse HEAD)$(if ! git diff-index --quiet HEAD --; then echo '-modified'; fi)"
|
||||
else
|
||||
@@ -11,4 +13,4 @@ if ! [ -f ./GIT_HASH.txt ] || [ "$(cat ./GIT_HASH.txt)" != "$GIT_HASH" ]; then
|
||||
echo -n "$GIT_HASH" > ./GIT_HASH.txt
|
||||
fi
|
||||
|
||||
echo -n ./GIT_HASH.txt
|
||||
echo -n ./build/env/GIT_HASH.txt
|
||||
@@ -1,8 +1,10 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
if ! [ -f ./PLATFORM.txt ] || [ "$(cat ./PLATFORM.txt)" != "$PLATFORM" ] && [ -n "$PLATFORM" ]; then
|
||||
>&2 echo "Updating PLATFORM.txt to \"$PLATFORM\""
|
||||
echo -n "$PLATFORM" > ./PLATFORM.txt
|
||||
fi
|
||||
|
||||
echo -n ./PLATFORM.txt
|
||||
echo -n ./build/env/PLATFORM.txt
|
||||
@@ -1,6 +1,8 @@
|
||||
#!/bin/bash
|
||||
|
||||
FE_VERSION="$(cat web/package.json | grep '"version"' | sed 's/[ \t]*"version":[ \t]*"\([^"]*\)",/\1/')"
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
FE_VERSION="$(cat ../../web/package.json | grep '"version"' | sed 's/[ \t]*"version":[ \t]*"\([^"]*\)",/\1/')"
|
||||
|
||||
# TODO: Validate other version sources - backend/Cargo.toml, backend/src/version/mod.rs
|
||||
|
||||
@@ -10,4 +12,4 @@ if ! [ -f ./VERSION.txt ] || [ "$(cat ./VERSION.txt)" != "$VERSION" ]; then
|
||||
echo -n "$VERSION" > ./VERSION.txt
|
||||
fi
|
||||
|
||||
echo -n ./VERSION.txt
|
||||
echo -n ./build/env/VERSION.txt
|
||||
@@ -23,6 +23,8 @@ RUN apt-get update && \
|
||||
squashfs-tools \
|
||||
rsync \
|
||||
b3sum \
|
||||
btrfs-progs \
|
||||
gdisk \
|
||||
dpkg-dev
|
||||
|
||||
|
||||
548
build/image-recipe/build.sh
Executable file
@@ -0,0 +1,548 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
|
||||
echo "==== StartOS Image Build ===="
|
||||
|
||||
echo "Building for architecture: $IB_TARGET_ARCH"
|
||||
|
||||
SOURCE_DIR="$(realpath $(dirname "${BASH_SOURCE[0]}"))"
|
||||
|
||||
base_dir="$(pwd -P)"
|
||||
prep_results_dir="$base_dir/images-prep"
|
||||
RESULTS_DIR="$base_dir/results"
|
||||
echo "Saving results in: $RESULTS_DIR"
|
||||
|
||||
DEB_PATH="$base_dir/$1"
|
||||
|
||||
VERSION="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/VERSION.txt)"
|
||||
GIT_HASH="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/GIT_HASH.txt)"
|
||||
if [[ "$GIT_HASH" =~ ^@ ]]; then
|
||||
GIT_HASH="unknown"
|
||||
else
|
||||
GIT_HASH="$(echo -n "$GIT_HASH" | head -c 7)"
|
||||
fi
|
||||
IB_OS_ENV="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/ENVIRONMENT.txt)"
|
||||
IB_TARGET_PLATFORM="$(dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xvf - ./usr/lib/startos/PLATFORM.txt)"
|
||||
|
||||
VERSION_FULL="${VERSION}-${GIT_HASH}"
|
||||
if [ -n "$IB_OS_ENV" ]; then
|
||||
VERSION_FULL="$VERSION_FULL~${IB_OS_ENV}"
|
||||
fi
|
||||
|
||||
IMAGE_BASENAME=startos-${VERSION_FULL}_${IB_TARGET_PLATFORM}
|
||||
|
||||
BOOTLOADERS=grub-efi
|
||||
if [ "$IB_TARGET_PLATFORM" = "x86_64" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "x86_64-nvidia" ]; then
|
||||
IB_TARGET_ARCH=amd64
|
||||
QEMU_ARCH=x86_64
|
||||
BOOTLOADERS=grub-efi,syslinux
|
||||
elif [ "$IB_TARGET_PLATFORM" = "aarch64" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nonfree" ] || [ "$IB_TARGET_PLATFORM" = "aarch64-nvidia" ] || [ "$IB_TARGET_PLATFORM" = "raspberrypi" ] || [ "$IB_TARGET_PLATFORM" = "rockchip64" ]; then
|
||||
IB_TARGET_ARCH=arm64
|
||||
QEMU_ARCH=aarch64
|
||||
elif [ "$IB_TARGET_PLATFORM" = "riscv64" ] || [ "$IB_TARGET_PLATFORM" = "riscv64-nonfree" ]; then
|
||||
IB_TARGET_ARCH=riscv64
|
||||
QEMU_ARCH=riscv64
|
||||
else
|
||||
IB_TARGET_ARCH="$IB_TARGET_PLATFORM"
|
||||
QEMU_ARCH="$IB_TARGET_PLATFORM"
|
||||
fi
|
||||
|
||||
QEMU_ARGS=()
|
||||
if [ "$QEMU_ARCH" != $(uname -m) ]; then
|
||||
QEMU_ARGS+=(--bootstrap-qemu-arch ${IB_TARGET_ARCH})
|
||||
QEMU_ARGS+=(--bootstrap-qemu-static /usr/bin/qemu-${QEMU_ARCH}-static)
|
||||
fi
|
||||
|
||||
mkdir -p $prep_results_dir
|
||||
|
||||
cd $prep_results_dir
|
||||
|
||||
NON_FREE=
|
||||
if [[ "${IB_TARGET_PLATFORM}" =~ -nonfree$ ]] || [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]] || [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
|
||||
NON_FREE=1
|
||||
fi
|
||||
NVIDIA=
|
||||
if [[ "${IB_TARGET_PLATFORM}" =~ -nvidia$ ]]; then
|
||||
NVIDIA=1
|
||||
fi
|
||||
IMAGE_TYPE=iso
|
||||
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ] || [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
|
||||
IMAGE_TYPE=img
|
||||
fi
|
||||
|
||||
ARCHIVE_AREAS="main contrib"
|
||||
if [ "$NON_FREE" = 1 ]; then
|
||||
if [ "$IB_SUITE" = "bullseye" ]; then
|
||||
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free"
|
||||
else
|
||||
ARCHIVE_AREAS="$ARCHIVE_AREAS non-free non-free-firmware"
|
||||
fi
|
||||
fi
|
||||
|
||||
PLATFORM_CONFIG_EXTRAS=()
|
||||
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
|
||||
PLATFORM_CONFIG_EXTRAS+=( --firmware-binary false )
|
||||
PLATFORM_CONFIG_EXTRAS+=( --firmware-chroot false )
|
||||
RPI_KERNEL_VERSION=6.12.47+rpt
|
||||
PLATFORM_CONFIG_EXTRAS+=( --linux-packages linux-image-$RPI_KERNEL_VERSION )
|
||||
PLATFORM_CONFIG_EXTRAS+=( --linux-flavours "rpi-v8 rpi-2712" )
|
||||
elif [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
|
||||
PLATFORM_CONFIG_EXTRAS+=( --linux-flavours rockchip64 )
|
||||
elif [ "${IB_TARGET_ARCH}" = "riscv64" ]; then
|
||||
PLATFORM_CONFIG_EXTRAS+=( --uefi-secure-boot=disable )
|
||||
fi
|
||||
|
||||
|
||||
cat > /etc/wgetrc << EOF
|
||||
retry_connrefused = on
|
||||
tries = 100
|
||||
EOF
|
||||
lb config \
|
||||
--iso-application "StartOS v${VERSION_FULL} ${IB_TARGET_ARCH}" \
|
||||
--iso-volume "StartOS v${VERSION} ${IB_TARGET_ARCH}" \
|
||||
--iso-preparer "START9 LABS; HTTPS://START9.COM" \
|
||||
--iso-publisher "START9 LABS; HTTPS://START9.COM" \
|
||||
--backports true \
|
||||
--bootappend-live "boot=live noautologin console=tty0" \
|
||||
--bootloaders $BOOTLOADERS \
|
||||
--cache false \
|
||||
--mirror-bootstrap "https://deb.debian.org/debian/" \
|
||||
--mirror-chroot "https://deb.debian.org/debian/" \
|
||||
--mirror-chroot-security "https://security.debian.org/debian-security" \
|
||||
-d ${IB_SUITE} \
|
||||
-a ${IB_TARGET_ARCH} \
|
||||
${QEMU_ARGS[@]} \
|
||||
--archive-areas "${ARCHIVE_AREAS}" \
|
||||
${PLATFORM_CONFIG_EXTRAS[@]}
|
||||
|
||||
# Overlays
|
||||
|
||||
mkdir -p config/packages.chroot/
|
||||
cp $RESULTS_DIR/$IMAGE_BASENAME.deb config/packages.chroot/
|
||||
dpkg-name config/packages.chroot/*.deb
|
||||
|
||||
mkdir -p config/includes.chroot/etc
|
||||
echo start > config/includes.chroot/etc/hostname
|
||||
cat > config/includes.chroot/etc/hosts << EOT
|
||||
127.0.0.1 localhost start
|
||||
::1 localhost start ip6-localhost ip6-loopback
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
EOT
|
||||
|
||||
if [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
|
||||
mkdir -p config/includes.chroot/etc/ssh/sshd_config.d
|
||||
echo "PasswordAuthentication yes" > config/includes.chroot/etc/ssh/sshd_config.d/dev-password-auth.conf
|
||||
fi
|
||||
|
||||
# Installer marker file (used by installed GRUB to detect the live USB)
|
||||
mkdir -p config/includes.binary
|
||||
touch config/includes.binary/.startos-installer
|
||||
|
||||
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
|
||||
mkdir -p config/includes.chroot
|
||||
git clone --depth=1 --branch=stable https://github.com/raspberrypi/rpi-firmware.git config/includes.chroot/boot
|
||||
rm -rf config/includes.chroot/boot/.git config/includes.chroot/boot/modules
|
||||
rsync -rLp $SOURCE_DIR/raspberrypi/squashfs/ config/includes.chroot/
|
||||
fi
|
||||
|
||||
# Bootloaders
|
||||
|
||||
rm -rf config/bootloaders
|
||||
cp -r /usr/share/live/build/bootloaders config/bootloaders
|
||||
|
||||
cat > config/bootloaders/syslinux/syslinux.cfg << EOF
|
||||
include menu.cfg
|
||||
default vesamenu.c32
|
||||
prompt 0
|
||||
timeout 50
|
||||
EOF
|
||||
|
||||
cat > config/bootloaders/isolinux/isolinux.cfg << EOF
|
||||
include menu.cfg
|
||||
default vesamenu.c32
|
||||
prompt 0
|
||||
timeout 50
|
||||
EOF
|
||||
|
||||
# Extract splash.png from the deb package
|
||||
dpkg-deb --fsys-tarfile $DEB_PATH | tar --to-stdout -xf - ./usr/lib/startos/splash.png > /tmp/splash.png
|
||||
cp /tmp/splash.png config/bootloaders/syslinux_common/splash.png
|
||||
cp /tmp/splash.png config/bootloaders/isolinux/splash.png
|
||||
cp /tmp/splash.png config/bootloaders/grub-pc/splash.png
|
||||
rm /tmp/splash.png
|
||||
|
||||
sed -i -e '2i set timeout=5' config/bootloaders/grub-pc/config.cfg
|
||||
|
||||
# Archives
|
||||
|
||||
mkdir -p config/archives
|
||||
|
||||
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
|
||||
# Fetch the keyring package (not the old raspberrypi.gpg.key, which has
|
||||
# SHA1-only binding signatures that sqv on Trixie rejects).
|
||||
KEYRING_DEB=$(mktemp)
|
||||
curl -fsSL -o "$KEYRING_DEB" https://archive.raspberrypi.com/debian/pool/main/r/raspberrypi-archive-keyring/raspberrypi-archive-keyring_2025.1+rpt1_all.deb
|
||||
dpkg-deb -x "$KEYRING_DEB" "$KEYRING_DEB.d"
|
||||
cp "$KEYRING_DEB.d/usr/share/keyrings/raspberrypi-archive-keyring.gpg" config/archives/raspi.key
|
||||
rm -rf "$KEYRING_DEB" "$KEYRING_DEB.d"
|
||||
echo "deb [arch=${IB_TARGET_ARCH} signed-by=/etc/apt/trusted.gpg.d/raspi.key.gpg] https://archive.raspberrypi.com/debian/ ${IB_SUITE} main" > config/archives/raspi.list
|
||||
fi
|
||||
|
||||
if [ "${IB_TARGET_PLATFORM}" = "rockchip64" ]; then
|
||||
curl -fsSL https://apt.armbian.com/armbian.key | gpg --dearmor -o config/archives/armbian.key
|
||||
echo "deb https://apt.armbian.com/ ${IB_SUITE} main" > config/archives/armbian.list
|
||||
fi
|
||||
|
||||
if [ "$NVIDIA" = 1 ]; then
|
||||
curl -fsSL https://nvidia.github.io/libnvidia-container/gpgkey | gpg --dearmor -o config/archives/nvidia-container-toolkit.key
|
||||
curl -s -L https://nvidia.github.io/libnvidia-container/stable/deb/nvidia-container-toolkit.list \
|
||||
| sed 's#deb https://#deb [signed-by=/etc/apt/trusted.gpg.d/nvidia-container-toolkit.key.gpg] https://#g' \
|
||||
> config/archives/nvidia-container-toolkit.list
|
||||
fi
|
||||
|
||||
cat > config/archives/backports.pref <<-EOF
|
||||
Package: linux-image-*
|
||||
Pin: release n=${IB_SUITE}-backports
|
||||
Pin-Priority: 500
|
||||
|
||||
Package: linux-headers-*
|
||||
Pin: release n=${IB_SUITE}-backports
|
||||
Pin-Priority: 500
|
||||
|
||||
Package: *nvidia*
|
||||
Pin: release n=${IB_SUITE}-backports
|
||||
Pin-Priority: 500
|
||||
EOF
|
||||
|
||||
# Hooks
|
||||
|
||||
cat > config/hooks/normal/9000-install-startos.hook.chroot << EOF
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
if [ "${IB_TARGET_PLATFORM}" != "raspberrypi" ]; then
|
||||
/usr/lib/startos/scripts/enable-kiosk
|
||||
fi
|
||||
|
||||
if [ "${NVIDIA}" = "1" ]; then
|
||||
# install a specific NVIDIA driver version
|
||||
|
||||
# ---------------- configuration ----------------
|
||||
NVIDIA_DRIVER_VERSION="\${NVIDIA_DRIVER_VERSION:-580.126.09}"
|
||||
|
||||
BASE_URL="https://download.nvidia.com/XFree86/Linux-${QEMU_ARCH}"
|
||||
|
||||
echo "[nvidia-hook] Using NVIDIA driver: \${NVIDIA_DRIVER_VERSION}" >&2
|
||||
|
||||
# ---------------- kernel version ----------------
|
||||
|
||||
# Determine target kernel version from newest /boot/vmlinuz-* in the chroot.
|
||||
KVER="\$(
|
||||
ls -1t /boot/vmlinuz-* 2>/dev/null \
|
||||
| head -n1 \
|
||||
| sed 's|.*/vmlinuz-||'
|
||||
)"
|
||||
|
||||
if [ -z "\${KVER}" ]; then
|
||||
echo "[nvidia-hook] ERROR: no /boot/vmlinuz-* found; cannot determine kernel version" >&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "[nvidia-hook] Target kernel version: \${KVER}" >&2
|
||||
|
||||
# Ensure kernel headers are present
|
||||
TEMP_APT_DEPS=(build-essential pkg-config)
|
||||
if [ ! -e "/lib/modules/\${KVER}/build" ]; then
|
||||
TEMP_APT_DEPS+=(linux-headers-\${KVER})
|
||||
fi
|
||||
|
||||
echo "[nvidia-hook] Installing build dependencies" >&2
|
||||
|
||||
/usr/lib/startos/scripts/install-equivs <<-EOF
|
||||
Package: nvidia-depends
|
||||
Version: \${NVIDIA_DRIVER_VERSION}
|
||||
Section: unknown
|
||||
Priority: optional
|
||||
Depends: \${dep_list="\$(IFS=', '; echo "\${TEMP_APT_DEPS[*]}")"}
|
||||
EOF
|
||||
|
||||
# ---------------- download and run installer ----------------
|
||||
|
||||
RUN_NAME="NVIDIA-Linux-${QEMU_ARCH}-\${NVIDIA_DRIVER_VERSION}.run"
|
||||
RUN_PATH="/root/\${RUN_NAME}"
|
||||
RUN_URL="\${BASE_URL}/\${NVIDIA_DRIVER_VERSION}/\${RUN_NAME}"
|
||||
|
||||
echo "[nvidia-hook] Downloading \${RUN_URL}" >&2
|
||||
wget -O "\${RUN_PATH}" "\${RUN_URL}"
|
||||
chmod +x "\${RUN_PATH}"
|
||||
|
||||
echo "[nvidia-hook] Running NVIDIA installer for kernel \${KVER}" >&2
|
||||
|
||||
if ! sh "\${RUN_PATH}" \
|
||||
--silent \
|
||||
--kernel-name="\${KVER}" \
|
||||
--no-x-check \
|
||||
--no-nouveau-check \
|
||||
--no-runlevel-check; then
|
||||
cat /var/log/nvidia-installer.log
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Rebuild module metadata
|
||||
echo "[nvidia-hook] Running depmod for \${KVER}" >&2
|
||||
depmod -a "\${KVER}"
|
||||
|
||||
echo "[nvidia-hook] NVIDIA \${NVIDIA_DRIVER_VERSION} installation complete for kernel \${KVER}" >&2
|
||||
|
||||
echo "[nvidia-hook] Removing .run installer..." >&2
|
||||
rm -f "\${RUN_PATH}"
|
||||
|
||||
echo "[nvidia-hook] Blacklisting nouveau..." >&2
|
||||
echo "blacklist nouveau" > /etc/modprobe.d/blacklist-nouveau.conf
|
||||
echo "options nouveau modeset=0" >> /etc/modprobe.d/blacklist-nouveau.conf
|
||||
|
||||
echo "[nvidia-hook] Rebuilding initramfs..." >&2
|
||||
update-initramfs -u -k "\${KVER}"
|
||||
|
||||
echo "[nvidia-hook] Removing build dependencies..." >&2
|
||||
apt-get purge -y nvidia-depends
|
||||
apt-get autoremove -y
|
||||
echo "[nvidia-hook] Removed build dependencies." >&2
|
||||
fi
|
||||
|
||||
# Install linux-kbuild for sign-file (Secure Boot module signing)
|
||||
KVER_ALL="\$(ls -1t /boot/vmlinuz-* 2>/dev/null | head -n1 | sed 's|.*/vmlinuz-||')"
|
||||
if [ -n "\${KVER_ALL}" ]; then
|
||||
KBUILD_VER="\$(echo "\${KVER_ALL}" | grep -oP '^\d+\.\d+')"
|
||||
if [ -n "\${KBUILD_VER}" ]; then
|
||||
echo "[build] Installing linux-kbuild-\${KBUILD_VER} for Secure Boot support" >&2
|
||||
apt-get install -y "linux-kbuild-\${KBUILD_VER}" || echo "[build] WARNING: linux-kbuild-\${KBUILD_VER} not available" >&2
|
||||
fi
|
||||
fi
|
||||
|
||||
cp /etc/resolv.conf /etc/resolv.conf.bak
|
||||
|
||||
if [ "${IB_SUITE}" = trixie ] && [ "${IB_TARGET_ARCH}" != riscv64 ]; then
|
||||
echo 'deb https://deb.debian.org/debian/ bookworm main' > /etc/apt/sources.list.d/bookworm.list
|
||||
apt-get update
|
||||
apt-get install -y postgresql-15
|
||||
rm /etc/apt/sources.list.d/bookworm.list
|
||||
apt-get update
|
||||
systemctl mask postgresql
|
||||
fi
|
||||
|
||||
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
|
||||
ln -sf /usr/bin/pi-beep /usr/local/bin/beep
|
||||
sh /boot/firmware/config.sh > /boot/firmware/config.txt
|
||||
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-v8 ${RPI_KERNEL_VERSION}-rpi-v8
|
||||
mkinitramfs -c gzip -o /boot/initrd.img-${RPI_KERNEL_VERSION}-rpi-2712 ${RPI_KERNEL_VERSION}-rpi-2712
|
||||
cp /usr/lib/u-boot/rpi_arm64/u-boot.bin /boot/firmware/u-boot.bin
|
||||
fi
|
||||
|
||||
useradd --shell /bin/bash -G startos -m start9
|
||||
echo start9:embassy | chpasswd
|
||||
usermod -aG sudo start9
|
||||
usermod -aG systemd-journal start9
|
||||
|
||||
echo "start9 ALL=(ALL:ALL) NOPASSWD: ALL" | sudo tee "/etc/sudoers.d/010_start9-nopasswd"
|
||||
|
||||
if ! [[ "${IB_OS_ENV}" =~ (^|-)dev($|-) ]]; then
|
||||
passwd -l start9
|
||||
fi
|
||||
|
||||
mkdir -p /media/startos
|
||||
chmod 750 /media/startos
|
||||
chown root:startos /media/startos
|
||||
|
||||
EOF
|
||||
|
||||
SOURCE_DATE_EPOCH="${SOURCE_DATE_EPOCH:-$(date '+%s')}"
|
||||
|
||||
if lb bootstrap; then
|
||||
true
|
||||
else
|
||||
EXIT=$?
|
||||
cat ./chroot/debootstrap/debootstrap.log
|
||||
exit $EXIT
|
||||
fi
|
||||
lb chroot
|
||||
lb installer
|
||||
lb binary_chroot
|
||||
lb chroot_prep install all mode-apt-install-binary mode-archives-chroot
|
||||
mv chroot/chroot/etc/resolv.conf.bak chroot/chroot/etc/resolv.conf
|
||||
lb binary_rootfs
|
||||
|
||||
cp $prep_results_dir/binary/live/filesystem.squashfs $RESULTS_DIR/$IMAGE_BASENAME.squashfs
|
||||
|
||||
if [ "${IMAGE_TYPE}" = iso ]; then
|
||||
|
||||
lb binary_manifest
|
||||
lb binary_package-lists
|
||||
lb binary_linux-image
|
||||
lb binary_memtest
|
||||
lb binary_grub-legacy
|
||||
lb binary_grub-pc
|
||||
lb binary_grub_cfg
|
||||
lb binary_syslinux
|
||||
lb binary_disk
|
||||
lb binary_loadlin
|
||||
lb binary_win32-loader
|
||||
lb binary_includes
|
||||
lb binary_grub-efi
|
||||
lb binary_hooks
|
||||
lb binary_checksums
|
||||
find binary -newermt "$(date -d@${SOURCE_DATE_EPOCH} '+%Y-%m-%d %H:%M:%S')" -printf "%y %p\n" -exec touch '{}' -d@${SOURCE_DATE_EPOCH} --no-dereference ';' > binary.modified_timestamps
|
||||
lb binary_iso
|
||||
lb binary_onie
|
||||
lb binary_netboot
|
||||
lb binary_tar
|
||||
lb binary_hdd
|
||||
lb binary_zsync
|
||||
lb chroot_prep remove all mode-archives-chroot
|
||||
lb source
|
||||
|
||||
mv $prep_results_dir/live-image-${IB_TARGET_ARCH}.hybrid.iso $RESULTS_DIR/$IMAGE_BASENAME.iso
|
||||
|
||||
elif [ "${IMAGE_TYPE}" = img ]; then
|
||||
|
||||
SECTOR_LEN=512
|
||||
FW_START=$((1024 * 1024)) # 1MiB (sector 2048) — Pi-specific
|
||||
FW_LEN=$((128 * 1024 * 1024)) # 128MiB (Pi firmware + U-Boot + DTBs)
|
||||
FW_END=$((FW_START + FW_LEN - 1))
|
||||
ESP_START=$((FW_END + 1)) # 100MB EFI System Partition (matches os_install)
|
||||
ESP_LEN=$((100 * 1024 * 1024))
|
||||
ESP_END=$((ESP_START + ESP_LEN - 1))
|
||||
BOOT_START=$((ESP_END + 1)) # 2GB /boot (matches os_install)
|
||||
BOOT_LEN=$((2 * 1024 * 1024 * 1024))
|
||||
BOOT_END=$((BOOT_START + BOOT_LEN - 1))
|
||||
ROOT_START=$((BOOT_END + 1))
|
||||
|
||||
# Size root partition to fit the squashfs + 256MB overhead for btrfs
|
||||
# metadata and config overlay, avoiding the need for btrfs resize
|
||||
SQUASHFS_SIZE=$(stat -c %s $prep_results_dir/binary/live/filesystem.squashfs)
|
||||
ROOT_LEN=$(( SQUASHFS_SIZE + 256 * 1024 * 1024 ))
|
||||
# Align to sector boundary
|
||||
ROOT_LEN=$(( (ROOT_LEN + SECTOR_LEN - 1) / SECTOR_LEN * SECTOR_LEN ))
|
||||
|
||||
# Total image: partitions + GPT backup header (34 sectors)
|
||||
IMG_LEN=$((ROOT_START + ROOT_LEN + 34 * SECTOR_LEN))
|
||||
|
||||
# Fixed GPT partition UUIDs (deterministic, based on old MBR disk ID cb15ae4d)
|
||||
FW_UUID=cb15ae4d-0001-4000-8000-000000000001
|
||||
ESP_UUID=cb15ae4d-0002-4000-8000-000000000002
|
||||
BOOT_UUID=cb15ae4d-0003-4000-8000-000000000003
|
||||
ROOT_UUID=cb15ae4d-0004-4000-8000-000000000004
|
||||
|
||||
TARGET_NAME=$prep_results_dir/${IMAGE_BASENAME}.img
|
||||
truncate -s $IMG_LEN $TARGET_NAME
|
||||
|
||||
sfdisk $TARGET_NAME <<-EOF
|
||||
label: gpt
|
||||
|
||||
${TARGET_NAME}1 : start=$((FW_START / SECTOR_LEN)), size=$((FW_LEN / SECTOR_LEN)), type=EBD0A0A2-B9E5-4433-87C0-68B6B72699C7, uuid=${FW_UUID}, name="firmware"
|
||||
${TARGET_NAME}2 : start=$((ESP_START / SECTOR_LEN)), size=$((ESP_LEN / SECTOR_LEN)), type=C12A7328-F81F-11D2-BA4B-00A0C93EC93B, uuid=${ESP_UUID}, name="efi"
|
||||
${TARGET_NAME}3 : start=$((BOOT_START / SECTOR_LEN)), size=$((BOOT_LEN / SECTOR_LEN)), type=0FC63DAF-8483-4772-8E79-3D69D8477DE4, uuid=${BOOT_UUID}, name="boot"
|
||||
${TARGET_NAME}4 : start=$((ROOT_START / SECTOR_LEN)), size=$((ROOT_LEN / SECTOR_LEN)), type=B921B045-1DF0-41C3-AF44-4C6F280D3FAE, uuid=${ROOT_UUID}, name="root"
|
||||
EOF
|
||||
|
||||
# Create named loop device nodes (high minor numbers to avoid conflicts)
|
||||
# and detach any stale ones from previous failed builds
|
||||
FW_DEV=/dev/startos-loop-fw
|
||||
ESP_DEV=/dev/startos-loop-esp
|
||||
BOOT_DEV=/dev/startos-loop-boot
|
||||
ROOT_DEV=/dev/startos-loop-root
|
||||
for dev in $FW_DEV:200 $ESP_DEV:201 $BOOT_DEV:202 $ROOT_DEV:203; do
|
||||
name=${dev%:*}
|
||||
minor=${dev#*:}
|
||||
[ -e $name ] || mknod $name b 7 $minor
|
||||
losetup -d $name 2>/dev/null || true
|
||||
done
|
||||
|
||||
losetup $FW_DEV --offset $FW_START --sizelimit $FW_LEN $TARGET_NAME
|
||||
losetup $ESP_DEV --offset $ESP_START --sizelimit $ESP_LEN $TARGET_NAME
|
||||
losetup $BOOT_DEV --offset $BOOT_START --sizelimit $BOOT_LEN $TARGET_NAME
|
||||
losetup $ROOT_DEV --offset $ROOT_START --sizelimit $ROOT_LEN $TARGET_NAME
|
||||
|
||||
mkfs.vfat -F32 -n firmware $FW_DEV
|
||||
mkfs.vfat -F32 -n efi $ESP_DEV
|
||||
mkfs.vfat -F32 -n boot $BOOT_DEV
|
||||
mkfs.btrfs -f -L rootfs $ROOT_DEV
|
||||
|
||||
TMPDIR=$(mktemp -d)
|
||||
|
||||
# Extract boot files from squashfs to staging area
|
||||
BOOT_STAGING=$(mktemp -d)
|
||||
unsquashfs -n -f -d $BOOT_STAGING $prep_results_dir/binary/live/filesystem.squashfs boot
|
||||
|
||||
# Mount partitions (nested: firmware and efi inside boot)
|
||||
mkdir -p $TMPDIR/boot $TMPDIR/root
|
||||
mount $BOOT_DEV $TMPDIR/boot
|
||||
mkdir -p $TMPDIR/boot/firmware $TMPDIR/boot/efi
|
||||
mount $FW_DEV $TMPDIR/boot/firmware
|
||||
mount $ESP_DEV $TMPDIR/boot/efi
|
||||
mount $ROOT_DEV $TMPDIR/root
|
||||
|
||||
# Copy boot files — nested mounts route firmware/* to the firmware partition
|
||||
cp -a $BOOT_STAGING/boot/. $TMPDIR/boot/
|
||||
rm -rf $BOOT_STAGING
|
||||
|
||||
mkdir $TMPDIR/root/images $TMPDIR/root/config
|
||||
B3SUM=$(b3sum $prep_results_dir/binary/live/filesystem.squashfs | head -c 16)
|
||||
cp $prep_results_dir/binary/live/filesystem.squashfs $TMPDIR/root/images/$B3SUM.rootfs
|
||||
ln -rsf $TMPDIR/root/images/$B3SUM.rootfs $TMPDIR/root/config/current.rootfs
|
||||
|
||||
mkdir -p $TMPDIR/next $TMPDIR/lower $TMPDIR/root/config/work $TMPDIR/root/config/overlay
|
||||
mount $TMPDIR/root/config/current.rootfs $TMPDIR/lower
|
||||
|
||||
mount -t overlay -o lowerdir=$TMPDIR/lower,workdir=$TMPDIR/root/config/work,upperdir=$TMPDIR/root/config/overlay overlay $TMPDIR/next
|
||||
|
||||
if [ "${IB_TARGET_PLATFORM}" = "raspberrypi" ]; then
|
||||
rsync -a $SOURCE_DIR/raspberrypi/img/ $TMPDIR/next/
|
||||
|
||||
# Install GRUB: ESP at /boot/efi (Part 2), /boot (Part 3)
|
||||
mkdir -p $TMPDIR/next/boot \
|
||||
$TMPDIR/next/dev $TMPDIR/next/proc $TMPDIR/next/sys $TMPDIR/next/media/startos/root
|
||||
mount --rbind $TMPDIR/boot $TMPDIR/next/boot
|
||||
mount --bind /dev $TMPDIR/next/dev
|
||||
mount -t proc proc $TMPDIR/next/proc
|
||||
mount -t sysfs sysfs $TMPDIR/next/sys
|
||||
mount --bind $TMPDIR/root $TMPDIR/next/media/startos/root
|
||||
|
||||
chroot $TMPDIR/next grub-install --target=arm64-efi --removable --efi-directory=/boot/efi --boot-directory=/boot --no-nvram
|
||||
chroot $TMPDIR/next update-grub
|
||||
|
||||
umount $TMPDIR/next/media/startos/root
|
||||
umount $TMPDIR/next/sys
|
||||
umount $TMPDIR/next/proc
|
||||
umount $TMPDIR/next/dev
|
||||
umount -l $TMPDIR/next/boot
|
||||
|
||||
# Fix root= in grub.cfg: update-grub sees loop devices, but the
|
||||
# real device uses a fixed GPT PARTUUID for root (Part 4).
|
||||
sed -i "s|root=[^ ]*|root=PARTUUID=${ROOT_UUID}|g" $TMPDIR/boot/grub/grub.cfg
|
||||
|
||||
# Inject first-boot resize script into GRUB config
|
||||
sed -i 's| boot=startos| boot=startos init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/grub/grub.cfg
|
||||
fi
|
||||
|
||||
umount $TMPDIR/next
|
||||
umount $TMPDIR/lower
|
||||
|
||||
umount $TMPDIR/boot/firmware
|
||||
umount $TMPDIR/boot/efi
|
||||
umount $TMPDIR/boot
|
||||
umount $TMPDIR/root
|
||||
|
||||
losetup -d $ROOT_DEV
|
||||
losetup -d $BOOT_DEV
|
||||
losetup -d $ESP_DEV
|
||||
losetup -d $FW_DEV
|
||||
|
||||
mv $TARGET_NAME $RESULTS_DIR/$IMAGE_BASENAME.img
|
||||
|
||||
fi
|
||||
|
||||
chown $IB_UID:$IB_UID $RESULTS_DIR/$IMAGE_BASENAME.*
|
||||
4
build/image-recipe/raspberrypi/img/etc/fstab
Normal file
@@ -0,0 +1,4 @@
|
||||
PARTUUID=cb15ae4d-0001-4000-8000-000000000001 /boot/firmware vfat umask=0077 0 2
|
||||
PARTUUID=cb15ae4d-0002-4000-8000-000000000002 /boot/efi vfat umask=0077 0 1
|
||||
PARTUUID=cb15ae4d-0003-4000-8000-000000000003 /boot vfat umask=0077 0 2
|
||||
PARTUUID=cb15ae4d-0004-4000-8000-000000000004 / btrfs defaults 0 1
|
||||
@@ -12,15 +12,16 @@ get_variables () {
|
||||
BOOT_DEV_NAME=$(echo /sys/block/*/"${BOOT_PART_NAME}" | cut -d "/" -f 4)
|
||||
BOOT_PART_NUM=$(cat "/sys/block/${BOOT_DEV_NAME}/${BOOT_PART_NAME}/partition")
|
||||
|
||||
OLD_DISKID=$(fdisk -l "$ROOT_DEV" | sed -n 's/Disk identifier: 0x\([^ ]*\)/\1/p')
|
||||
|
||||
ROOT_DEV_SIZE=$(cat "/sys/block/${ROOT_DEV_NAME}/size")
|
||||
if [ "$ROOT_DEV_SIZE" -le 67108864 ]; then
|
||||
TARGET_END=$((ROOT_DEV_SIZE - 1))
|
||||
# GPT backup header/entries occupy last 33 sectors
|
||||
USABLE_END=$((ROOT_DEV_SIZE - 34))
|
||||
|
||||
if [ "$USABLE_END" -le 67108864 ]; then
|
||||
TARGET_END=$USABLE_END
|
||||
else
|
||||
TARGET_END=$((33554432 - 1))
|
||||
DATA_PART_START=33554432
|
||||
DATA_PART_END=$((ROOT_DEV_SIZE - 1))
|
||||
DATA_PART_END=$USABLE_END
|
||||
fi
|
||||
|
||||
PARTITION_TABLE=$(parted -m "$ROOT_DEV" unit s print | tr -d 's')
|
||||
@@ -57,37 +58,30 @@ check_variables () {
|
||||
main () {
|
||||
get_variables
|
||||
|
||||
# Fix GPT backup header first — the image was built with a tight root
|
||||
# partition, so the backup GPT is not at the end of the SD card. parted
|
||||
# will prompt interactively if this isn't fixed before we use it.
|
||||
sgdisk -e "$ROOT_DEV" 2>/dev/null || true
|
||||
|
||||
if ! check_variables; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
# if [ "$ROOT_PART_END" -eq "$TARGET_END" ]; then
|
||||
# reboot_pi
|
||||
# fi
|
||||
|
||||
if ! echo Yes | parted -m --align=optimal "$ROOT_DEV" ---pretend-input-tty u s resizepart "$ROOT_PART_NUM" "$TARGET_END" ; then
|
||||
FAIL_REASON="Root partition resize failed"
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [ -n "$DATA_PART_START" ]; then
|
||||
if ! parted -ms --align=optimal "$ROOT_DEV" u s mkpart primary "$DATA_PART_START" "$DATA_PART_END"; then
|
||||
if ! parted -ms --align=optimal "$ROOT_DEV" u s mkpart data "$DATA_PART_START" "$DATA_PART_END"; then
|
||||
FAIL_REASON="Data partition creation failed"
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
(
|
||||
echo x
|
||||
echo i
|
||||
echo "0xcb15ae4d"
|
||||
echo r
|
||||
echo w
|
||||
) | fdisk $ROOT_DEV
|
||||
|
||||
mount / -o remount,rw
|
||||
|
||||
resize2fs $ROOT_PART_DEV
|
||||
btrfs filesystem resize max /media/startos/root
|
||||
|
||||
if ! systemd-machine-id-setup --root=/media/startos/config/overlay/; then
|
||||
FAIL_REASON="systemd-machine-id-setup failed"
|
||||
@@ -111,7 +105,7 @@ mount / -o remount,ro
|
||||
beep
|
||||
|
||||
if main; then
|
||||
sed -i 's| init=/usr/lib/startos/scripts/init_resize\.sh||' /boot/cmdline.txt
|
||||
sed -i 's| init=/usr/lib/startos/scripts/init_resize\.sh||' /boot/grub/grub.cfg
|
||||
echo "Resized root filesystem. Rebooting in 5 seconds..."
|
||||
sleep 5
|
||||
else
|
||||
@@ -27,20 +27,18 @@ disable_overscan=1
|
||||
# (e.g. for USB device mode) or if USB support is not required.
|
||||
otg_mode=1
|
||||
|
||||
[all]
|
||||
|
||||
[pi4]
|
||||
# Run as fast as firmware / board allows
|
||||
arm_boost=1
|
||||
kernel=vmlinuz-${KERNEL_VERSION}-rpi-v8
|
||||
initramfs initrd.img-${KERNEL_VERSION}-rpi-v8 followkernel
|
||||
|
||||
[pi5]
|
||||
kernel=vmlinuz-${KERNEL_VERSION}-rpi-2712
|
||||
initramfs initrd.img-${KERNEL_VERSION}-rpi-2712 followkernel
|
||||
|
||||
[all]
|
||||
gpu_mem=16
|
||||
dtoverlay=pwm-2chan,disable-bt
|
||||
|
||||
EOF
|
||||
# Enable UART for U-Boot and serial console
|
||||
enable_uart=1
|
||||
|
||||
# Load U-Boot as the bootloader (GRUB is chainloaded from U-Boot)
|
||||
kernel=u-boot.bin
|
||||
|
||||
EOF
|
||||
@@ -84,4 +84,8 @@ arm_boost=1
|
||||
gpu_mem=16
|
||||
dtoverlay=pwm-2chan,disable-bt
|
||||
|
||||
auto_initramfs=1
|
||||
# Enable UART for U-Boot and serial console
|
||||
enable_uart=1
|
||||
|
||||
# Load U-Boot as the bootloader (GRUB is chainloaded from U-Boot)
|
||||
kernel=u-boot.bin
|
||||
@@ -0,0 +1,4 @@
|
||||
# Raspberry Pi-specific GRUB overrides
|
||||
# Overrides GRUB_CMDLINE_LINUX from /etc/default/grub with Pi-specific
|
||||
# console devices and hardware quirks.
|
||||
GRUB_CMDLINE_LINUX="boot=startos console=serial0,115200 console=tty1 usb-storage.quirks=152d:0562:u,14cd:121c:u,0781:cfcb:u cgroup_enable=cpuset cgroup_memory=1 cgroup_enable=memory"
|
||||
@@ -1,6 +1,3 @@
|
||||
os-partitions:
|
||||
boot: /dev/mmcblk0p1
|
||||
root: /dev/mmcblk0p2
|
||||
ethernet-interface: end0
|
||||
wifi-interface: wlan0
|
||||
disable-encryption: true
|
||||
35
build/image-recipe/run-local-build.sh
Executable file
@@ -0,0 +1,35 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
|
||||
|
||||
BASEDIR="$(pwd -P)"
|
||||
|
||||
SUITE=trixie
|
||||
|
||||
USE_TTY=
|
||||
if tty -s; then
|
||||
USE_TTY="-it"
|
||||
fi
|
||||
|
||||
dockerfile_hash=$(sha256sum ${BASEDIR}/build/image-recipe/Dockerfile | head -c 7)
|
||||
|
||||
docker_img_name="start9/build-iso:${SUITE}-${dockerfile_hash}"
|
||||
|
||||
platform=linux/${ARCH}
|
||||
case $ARCH in
|
||||
x86_64)
|
||||
platform=linux/amd64;;
|
||||
aarch64)
|
||||
platform=linux/arm64;;
|
||||
esac
|
||||
|
||||
if ! docker run --rm --platform=$platform "${docker_img_name}" true 2> /dev/null; then
|
||||
docker buildx build --load --platform=$platform --build-arg=SUITE=${SUITE} -t "${docker_img_name}" ./build/image-recipe
|
||||
fi
|
||||
|
||||
docker run $USE_TTY --rm --platform=$platform --privileged -v "$(pwd)/build/image-recipe:/root/image-recipe" -v "$(pwd)/results:/root/results" \
|
||||
-e IB_SUITE="$SUITE" \
|
||||
-e IB_UID="$UID" \
|
||||
-e IB_INCLUDE \
|
||||
"${docker_img_name}" /root/image-recipe/build.sh $@
|
||||
51
build/lib/grub-theme/theme.txt
Normal file
@@ -0,0 +1,51 @@
|
||||
desktop-image: "../splash.png"
|
||||
title-color: "#ffffff"
|
||||
title-font: "Unifont Regular 16"
|
||||
title-text: "StartOS Boot Menu with GRUB"
|
||||
message-font: "Unifont Regular 16"
|
||||
terminal-font: "Unifont Regular 16"
|
||||
|
||||
#help bar at the bottom
|
||||
+ label {
|
||||
top = 100%-50
|
||||
left = 0
|
||||
width = 100%
|
||||
height = 20
|
||||
text = "@KEYMAP_SHORT@"
|
||||
align = "center"
|
||||
color = "#ffffff"
|
||||
font = "Unifont Regular 16"
|
||||
}
|
||||
|
||||
#boot menu
|
||||
+ boot_menu {
|
||||
left = 10%
|
||||
width = 80%
|
||||
top = 52%
|
||||
height = 48%-80
|
||||
item_color = "#a8a8a8"
|
||||
item_font = "Unifont Regular 16"
|
||||
selected_item_color= "#ffffff"
|
||||
selected_item_font = "Unifont Regular 16"
|
||||
item_height = 16
|
||||
item_padding = 0
|
||||
item_spacing = 4
|
||||
icon_width = 0
|
||||
icon_heigh = 0
|
||||
item_icon_space = 0
|
||||
}
|
||||
|
||||
#progress bar
|
||||
+ progress_bar {
|
||||
id = "__timeout__"
|
||||
left = 15%
|
||||
top = 100%-80
|
||||
height = 16
|
||||
width = 70%
|
||||
font = "Unifont Regular 16"
|
||||
text_color = "#000000"
|
||||
fg_color = "#ffffff"
|
||||
bg_color = "#a8a8a8"
|
||||
border_color = "#ffffff"
|
||||
text = "@TIMEOUT_NOTIFICATION_LONG@"
|
||||
}
|
||||
@@ -4,7 +4,7 @@ parse_essential_db_info() {
|
||||
DB_DUMP="/tmp/startos_db.json"
|
||||
|
||||
if command -v start-cli >/dev/null 2>&1; then
|
||||
start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1
|
||||
timeout 30 start-cli db dump > "$DB_DUMP" 2>/dev/null || return 1
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
@@ -22,7 +22,7 @@ parse_essential_db_info() {
|
||||
RAM_GB="unknown"
|
||||
fi
|
||||
|
||||
RUNNING_SERVICES=$(jq -r '[.value.packageData[] | select(.status.main == "running")] | length' "$DB_DUMP" 2>/dev/null)
|
||||
RUNNING_SERVICES=$(jq -r '[.value.packageData[] | select(.statusInfo.started != null)] | length' "$DB_DUMP" 2>/dev/null)
|
||||
TOTAL_SERVICES=$(jq -r '.value.packageData | length' "$DB_DUMP" 2>/dev/null)
|
||||
|
||||
rm -f "$DB_DUMP"
|
||||
|
||||
@@ -34,7 +34,7 @@ set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
|
||||
|
||||
if [ -z "$NO_SYNC" ]; then
|
||||
echo 'Syncing...'
|
||||
umount -R /media/startos/next 2> /dev/null
|
||||
umount -l /media/startos/next 2> /dev/null
|
||||
umount /media/startos/upper 2> /dev/null
|
||||
rm -rf /media/startos/upper /media/startos/next
|
||||
mkdir /media/startos/upper
|
||||
@@ -58,13 +58,13 @@ mkdir -p /media/startos/next/media/startos/root
|
||||
mount --bind /run /media/startos/next/run
|
||||
mount --bind /tmp /media/startos/next/tmp
|
||||
mount --bind /dev /media/startos/next/dev
|
||||
mount --bind /sys /media/startos/next/sys
|
||||
mount --bind /proc /media/startos/next/proc
|
||||
mount -t sysfs sysfs /media/startos/next/sys
|
||||
mount -t proc proc /media/startos/next/proc
|
||||
mount --bind /boot /media/startos/next/boot
|
||||
mount --bind /media/startos/root /media/startos/next/media/startos/root
|
||||
|
||||
if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then
|
||||
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
|
||||
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
|
||||
mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars
|
||||
fi
|
||||
|
||||
if [ -z "$*" ]; then
|
||||
@@ -75,7 +75,7 @@ else
|
||||
CHROOT_RES=$?
|
||||
fi
|
||||
|
||||
if mountpoint /media/startos/next/sys/firmware/efi/efivars 2> /dev/null; then
|
||||
if mountpoint /media/startos/next/sys/firmware/efi/efivars 2>&1 > /dev/null; then
|
||||
umount /media/startos/next/sys/firmware/efi/efivars
|
||||
fi
|
||||
|
||||
@@ -111,6 +111,6 @@ if [ "$CHROOT_RES" -eq 0 ]; then
|
||||
reboot
|
||||
fi
|
||||
|
||||
umount -R /media/startos/next
|
||||
umount /media/startos/upper
|
||||
umount -l /media/startos/next
|
||||
umount -l /media/startos/upper
|
||||
rm -rf /media/startos/upper /media/startos/next
|
||||
@@ -1,29 +1,72 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ -z "$sip" ] || [ -z "$dip" ] || [ -z "$sport" ] || [ -z "$dport" ]; then
|
||||
if [ -z "$sip" ] || [ -z "$dip" ] || [ -z "$dprefix" ] || [ -z "$sport" ] || [ -z "$dport" ]; then
|
||||
>&2 echo 'missing required env var'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
rule_exists() {
|
||||
iptables -t nat -C "$@" 2>/dev/null
|
||||
}
|
||||
NAME="F$(echo "$sip:$sport -> $dip/$dprefix:$dport ${src_subnet:-any}" | sha256sum | head -c 15)"
|
||||
|
||||
apply_rule() {
|
||||
if [ "$UNDO" = "1" ]; then
|
||||
if rule_exists "$@"; then
|
||||
iptables -t nat -D "$@"
|
||||
fi
|
||||
else
|
||||
if ! rule_exists "$@"; then
|
||||
iptables -t nat -A "$@"
|
||||
fi
|
||||
for kind in INPUT FORWARD ACCEPT; do
|
||||
if ! iptables -C $kind -j "${NAME}_${kind}" 2> /dev/null; then
|
||||
iptables -N "${NAME}_${kind}" 2> /dev/null
|
||||
iptables -A $kind -j "${NAME}_${kind}"
|
||||
fi
|
||||
}
|
||||
done
|
||||
for kind in PREROUTING OUTPUT POSTROUTING; do
|
||||
if ! iptables -t nat -C $kind -j "${NAME}_${kind}" 2> /dev/null; then
|
||||
iptables -t nat -N "${NAME}_${kind}" 2> /dev/null
|
||||
iptables -t nat -A $kind -j "${NAME}_${kind}"
|
||||
fi
|
||||
done
|
||||
|
||||
apply_rule PREROUTING -p tcp -d $sip --dport $sport -j DNAT --to-destination $dip:$dport
|
||||
apply_rule OUTPUT -p tcp -d $sip --dport $sport -j DNAT --to-destination $dip:$dport
|
||||
err=0
|
||||
trap 'err=1' ERR
|
||||
|
||||
for kind in INPUT FORWARD ACCEPT; do
|
||||
iptables -F "${NAME}_${kind}" 2> /dev/null
|
||||
done
|
||||
for kind in PREROUTING OUTPUT POSTROUTING; do
|
||||
iptables -t nat -F "${NAME}_${kind}" 2> /dev/null
|
||||
done
|
||||
if [ "$UNDO" = 1 ]; then
|
||||
conntrack -D -p tcp -d $sip --dport $sport || true # conntrack returns exit 1 if no connections are active
|
||||
fi
|
||||
conntrack -D -p udp -d $sip --dport $sport || true # conntrack returns exit 1 if no connections are active
|
||||
exit $err
|
||||
fi
|
||||
|
||||
# DNAT: rewrite destination for incoming packets (external traffic)
|
||||
# When src_subnet is set, only forward traffic from that subnet (private forwards)
|
||||
if [ -n "$src_subnet" ]; then
|
||||
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
|
||||
iptables -t nat -A ${NAME}_PREROUTING -s "$src_subnet" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
|
||||
# Also allow containers on the bridge subnet to reach this forward
|
||||
if [ -n "$bridge_subnet" ]; then
|
||||
iptables -t nat -A ${NAME}_PREROUTING -s "$bridge_subnet" -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
|
||||
iptables -t nat -A ${NAME}_PREROUTING -s "$bridge_subnet" -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
|
||||
fi
|
||||
else
|
||||
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
|
||||
iptables -t nat -A ${NAME}_PREROUTING -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
|
||||
fi
|
||||
|
||||
# DNAT: rewrite destination for locally-originated packets (hairpin from host itself)
|
||||
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p tcp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
|
||||
iptables -t nat -A ${NAME}_OUTPUT -d "$sip" -p udp --dport "$sport" -j DNAT --to-destination "$dip:$dport"
|
||||
|
||||
# Allow new connections to be forwarded to the destination
|
||||
iptables -A ${NAME}_FORWARD -d $dip -p tcp --dport $dport -m state --state NEW -j ACCEPT
|
||||
iptables -A ${NAME}_FORWARD -d $dip -p udp --dport $dport -m state --state NEW -j ACCEPT
|
||||
|
||||
# NAT hairpin: masquerade traffic from the bridge subnet or host to the DNAT
|
||||
# target, so replies route back through the host for proper NAT reversal.
|
||||
# Container-to-container hairpin (source is on the bridge subnet)
|
||||
if [ -n "$bridge_subnet" ]; then
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$bridge_subnet" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$bridge_subnet" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
|
||||
fi
|
||||
# Host-to-container hairpin (host connects to its own gateway IP, source is sip)
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$sip" -d "$dip" -p tcp --dport "$dport" -j MASQUERADE
|
||||
iptables -t nat -A ${NAME}_POSTROUTING -s "$sip" -d "$dip" -p udp --dport "$dport" -j MASQUERADE
|
||||
|
||||
exit $err
|
||||
|
||||
20
build/lib/scripts/install-equivs
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
|
||||
export DEBIAN_FRONTEND=noninteractive
|
||||
export DEBCONF_NONINTERACTIVE_SEEN=true
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
(
|
||||
set -e
|
||||
cd $TMP_DIR
|
||||
|
||||
cat > control.equivs
|
||||
equivs-build control.equivs
|
||||
apt-get install -y ./*.deb < /dev/null
|
||||
)
|
||||
|
||||
rm -rf $TMP_DIR
|
||||
|
||||
echo Install complete. >&2
|
||||
exit 0
|
||||
@@ -29,10 +29,13 @@ if [ -z "$needed" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
MARGIN=${MARGIN:-1073741824}
|
||||
target=$((needed + MARGIN))
|
||||
|
||||
if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then
|
||||
echo 'Pruning...'
|
||||
current="$(readlink -f /media/startos/config/current.rootfs)"
|
||||
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$needed" ]]; do
|
||||
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$target" ]]; do
|
||||
to_prune="$(ls -t1 /media/startos/images/*.rootfs /media/startos/images/*.squashfs 2> /dev/null | grep -v "$current" | tail -n1)"
|
||||
if [ -e "$to_prune" ]; then
|
||||
echo " Pruning $to_prune"
|
||||
|
||||
76
build/lib/scripts/sign-unsigned-modules
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/bin/bash
|
||||
|
||||
# sign-unsigned-modules [--source <dir> --dest <dir>] [--sign-file <path>]
|
||||
# [--mok-key <path>] [--mok-pub <path>]
|
||||
#
|
||||
# Signs all unsigned kernel modules using the DKMS MOK key.
|
||||
#
|
||||
# Default (install) mode:
|
||||
# Run inside a chroot. Finds and signs unsigned modules in /lib/modules in-place.
|
||||
# sign-file and MOK key are auto-detected from standard paths.
|
||||
#
|
||||
# Overlay mode (--source/--dest):
|
||||
# Finds unsigned modules in <source>, copies to <dest>, signs the copies.
|
||||
# Clears old signed modules in <dest> first. Used during upgrades where the
|
||||
# overlay upper is tmpfs and writes would be lost.
|
||||
|
||||
set -e
|
||||
|
||||
SOURCE=""
|
||||
DEST=""
|
||||
SIGN_FILE=""
|
||||
MOK_KEY="/var/lib/dkms/mok.key"
|
||||
MOK_PUB="/var/lib/dkms/mok.pub"
|
||||
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
--source) SOURCE="$2"; shift 2;;
|
||||
--dest) DEST="$2"; shift 2;;
|
||||
--sign-file) SIGN_FILE="$2"; shift 2;;
|
||||
--mok-key) MOK_KEY="$2"; shift 2;;
|
||||
--mok-pub) MOK_PUB="$2"; shift 2;;
|
||||
*) echo "Unknown option: $1" >&2; exit 1;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Auto-detect sign-file if not specified
|
||||
if [ -z "$SIGN_FILE" ]; then
|
||||
SIGN_FILE="$(ls -1 /usr/lib/linux-kbuild-*/scripts/sign-file 2>/dev/null | head -1)"
|
||||
fi
|
||||
|
||||
if [ -z "$SIGN_FILE" ] || [ ! -x "$SIGN_FILE" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ ! -f "$MOK_KEY" ] || [ ! -f "$MOK_PUB" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
COUNT=0
|
||||
|
||||
if [ -n "$SOURCE" ] && [ -n "$DEST" ]; then
|
||||
# Overlay mode: find unsigned in source, copy to dest, sign in dest
|
||||
rm -rf "${DEST}"/lib/modules
|
||||
|
||||
for ko in $(find "${SOURCE}"/lib/modules -name '*.ko' 2>/dev/null); do
|
||||
if ! modinfo "$ko" 2>/dev/null | grep -q '^sig_id:'; then
|
||||
rel_path="${ko#${SOURCE}}"
|
||||
mkdir -p "${DEST}$(dirname "$rel_path")"
|
||||
cp "$ko" "${DEST}${rel_path}"
|
||||
"$SIGN_FILE" sha256 "$MOK_KEY" "$MOK_PUB" "${DEST}${rel_path}"
|
||||
COUNT=$((COUNT + 1))
|
||||
fi
|
||||
done
|
||||
else
|
||||
# In-place mode: sign modules directly
|
||||
for ko in $(find /lib/modules -name '*.ko' 2>/dev/null); do
|
||||
if ! modinfo "$ko" 2>/dev/null | grep -q '^sig_id:'; then
|
||||
"$SIGN_FILE" sha256 "$MOK_KEY" "$MOK_PUB" "$ko"
|
||||
COUNT=$((COUNT + 1))
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
if [ $COUNT -gt 0 ]; then
|
||||
echo "[sign-modules] Signed $COUNT unsigned kernel modules"
|
||||
fi
|
||||
@@ -104,6 +104,7 @@ local_mount_root()
|
||||
-olowerdir=/startos/config/overlay:/lower,upperdir=/upper/data,workdir=/upper/work \
|
||||
overlay ${rootmnt}
|
||||
|
||||
mkdir -m 750 -p ${rootmnt}/media/startos
|
||||
mkdir -p ${rootmnt}/media/startos/config
|
||||
mount --bind /startos/config ${rootmnt}/media/startos/config
|
||||
mkdir -p ${rootmnt}/media/startos/images
|
||||
|
||||
@@ -1,36 +1,64 @@
|
||||
#!/bin/bash
|
||||
|
||||
fail=$(printf " [\033[31m fail \033[0m]")
|
||||
pass=$(printf " [\033[32m pass \033[0m]")
|
||||
# --- Config ---
|
||||
# Colors (using printf to ensure compatibility)
|
||||
GRAY=$(printf '\033[90m')
|
||||
GREEN=$(printf '\033[32m')
|
||||
RED=$(printf '\033[31m')
|
||||
NC=$(printf '\033[0m') # No Color
|
||||
|
||||
# Proxies to test
|
||||
proxies=(
|
||||
"Host Tor|127.0.1.1:9050"
|
||||
"Startd Tor|10.0.3.1:9050"
|
||||
)
|
||||
|
||||
# Default URLs
|
||||
onion_list=(
|
||||
"The Tor Project|http://2gzyxa5ihm7nsggfxnu52rck2vv4rvmdlkiu3zzui5du4xyclen53wid.onion"
|
||||
"Start9|http://privacy34kn4ez3y3nijweec6w4g54i3g54sdv7r5mr6soma3w4begyd.onion"
|
||||
"Mempool|http://mempoolhqx4isw62xs7abwphsq7ldayuidyx2v2oethdhhj6mlo2r6ad.onion"
|
||||
"DuckDuckGo|https://duckduckgogg42xjoc72x3sjasowoarfbgcmvfimaftt6twagswzczad.onion"
|
||||
"Brave Search|https://search.brave4u7jddbv7cyviptqjc7jusxh72uik7zt6adtckl5f4nwy2v72qd.onion"
|
||||
)
|
||||
|
||||
# Check if ~/.startos/tor-check.list exists and read its contents if available
|
||||
if [ -f ~/.startos/tor-check.list ]; then
|
||||
while IFS= read -r line; do
|
||||
# Check if the line starts with a #
|
||||
if [[ ! "$line" =~ ^# ]]; then
|
||||
onion_list+=("$line")
|
||||
# Load custom list
|
||||
[ -f ~/.startos/tor-check.list ] && readarray -t custom_list < <(grep -v '^#' ~/.startos/tor-check.list) && onion_list+=("${custom_list[@]}")
|
||||
|
||||
# --- Functions ---
|
||||
print_line() { printf "${GRAY}────────────────────────────────────────${NC}\n"; }
|
||||
|
||||
# --- Main ---
|
||||
echo "Testing Onion Connections..."
|
||||
|
||||
for proxy_info in "${proxies[@]}"; do
|
||||
proxy_name="${proxy_info%%|*}"
|
||||
proxy_addr="${proxy_info#*|}"
|
||||
|
||||
print_line
|
||||
printf "${GRAY}Proxy: %s (%s)${NC}\n" "$proxy_name" "$proxy_addr"
|
||||
|
||||
for data in "${onion_list[@]}"; do
|
||||
name="${data%%|*}"
|
||||
url="${data#*|}"
|
||||
|
||||
# Capture verbose output + http code.
|
||||
# --no-progress-meter: Suppresses the "0 0 0" stats but keeps -v output
|
||||
output=$(curl -v --no-progress-meter --max-time 15 --socks5-hostname "$proxy_addr" "$url" 2>&1)
|
||||
exit_code=$?
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
printf " ${GREEN}[pass]${NC} %s (%s)\n" "$name" "$url"
|
||||
else
|
||||
printf " ${RED}[fail]${NC} %s (%s)\n" "$name" "$url"
|
||||
printf " ${RED}↳ Curl Error %s${NC}\n" "$exit_code"
|
||||
|
||||
# Print the last 4 lines of verbose log to show the specific handshake error
|
||||
# We look for lines starting with '*' or '>' or '<' to filter out junk if any remains
|
||||
echo "$output" | tail -n 4 | sed "s/^/ ${GRAY}/"
|
||||
fi
|
||||
done < ~/.startos/tor-check.list
|
||||
fi
|
||||
|
||||
echo "Testing connection to Onion Pages ..."
|
||||
|
||||
for data in "${onion_list[@]}"; do
|
||||
name="${data%%|*}"
|
||||
url="${data#*|}"
|
||||
if curl --socks5-hostname localhost:9050 "$url" > /dev/null 2>&1; then
|
||||
echo " ${pass}: $name ($url) "
|
||||
else
|
||||
echo " ${fail}: $name ($url) "
|
||||
fi
|
||||
done
|
||||
done
|
||||
|
||||
echo
|
||||
echo "Done."
|
||||
print_line
|
||||
# Reset color just in case
|
||||
printf "${NC}"
|
||||
|
||||
@@ -24,7 +24,7 @@ fi
|
||||
|
||||
unsquashfs -f -d / $1 boot
|
||||
|
||||
umount -R /media/startos/next 2> /dev/null || true
|
||||
umount -l /media/startos/next 2> /dev/null || true
|
||||
umount /media/startos/upper 2> /dev/null || true
|
||||
umount /media/startos/lower 2> /dev/null || true
|
||||
|
||||
@@ -45,32 +45,36 @@ mkdir -p /media/startos/next/media/startos/root
|
||||
mount --bind /run /media/startos/next/run
|
||||
mount --bind /tmp /media/startos/next/tmp
|
||||
mount --bind /dev /media/startos/next/dev
|
||||
mount --bind /sys /media/startos/next/sys
|
||||
mount --bind /proc /media/startos/next/proc
|
||||
mount --bind /boot /media/startos/next/boot
|
||||
mount -t sysfs sysfs /media/startos/next/sys
|
||||
mount -t proc proc /media/startos/next/proc
|
||||
mount --rbind /boot /media/startos/next/boot
|
||||
mount --bind /media/startos/root /media/startos/next/media/startos/root
|
||||
|
||||
if mountpoint /boot/efi 2> /dev/null; then
|
||||
mkdir -p /media/startos/next/boot/efi
|
||||
mount --bind /boot/efi /media/startos/next/boot/efi
|
||||
fi
|
||||
|
||||
if mountpoint /sys/firmware/efi/efivars 2> /dev/null; then
|
||||
mount --bind /sys/firmware/efi/efivars /media/startos/next/sys/firmware/efi/efivars
|
||||
if mountpoint /sys/firmware/efi/efivars 2>&1 > /dev/null; then
|
||||
mount -t efivarfs efivarfs /media/startos/next/sys/firmware/efi/efivars
|
||||
fi
|
||||
|
||||
chroot /media/startos/next bash -e << "EOF"
|
||||
|
||||
if dpkg -s grub-common 2>&1 > /dev/null; then
|
||||
if [ -f /boot/grub/grub.cfg ]; then
|
||||
grub-install /dev/$(eval $(lsblk -o MOUNTPOINT,PKNAME -P | grep 'MOUNTPOINT="/media/startos/root"') && echo $PKNAME)
|
||||
update-grub
|
||||
fi
|
||||
|
||||
EOF
|
||||
|
||||
# Sign unsigned kernel modules for Secure Boot
|
||||
SIGN_FILE="$(ls -1 /media/startos/next/usr/lib/linux-kbuild-*/scripts/sign-file 2>/dev/null | head -1)"
|
||||
/media/startos/next/usr/lib/startos/scripts/sign-unsigned-modules \
|
||||
--source /media/startos/lower \
|
||||
--dest /media/startos/config/overlay \
|
||||
--sign-file "$SIGN_FILE" \
|
||||
--mok-key /media/startos/config/overlay/var/lib/dkms/mok.key \
|
||||
--mok-pub /media/startos/config/overlay/var/lib/dkms/mok.pub
|
||||
|
||||
sync
|
||||
|
||||
umount -R /media/startos/next
|
||||
umount -l /media/startos/next
|
||||
umount /media/startos/upper
|
||||
umount /media/startos/lower
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 9.6 KiB After Width: | Height: | Size: 9.6 KiB |
367
build/manage-release.sh
Executable file
@@ -0,0 +1,367 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
REPO="Start9Labs/start-os"
|
||||
REGISTRY="https://alpha-registry-x.start9.com"
|
||||
S3_BUCKET="s3://startos-images"
|
||||
S3_CDN="https://startos-images.nyc3.cdn.digitaloceanspaces.com"
|
||||
START9_GPG_KEY="2D63C217"
|
||||
|
||||
ARCHES="aarch64 aarch64-nonfree aarch64-nvidia riscv64 riscv64-nonfree x86_64 x86_64-nonfree x86_64-nvidia"
|
||||
CLI_ARCHES="aarch64 riscv64 x86_64"
|
||||
|
||||
parse_run_id() {
|
||||
local val="$1"
|
||||
if [[ "$val" =~ /actions/runs/([0-9]+) ]]; then
|
||||
echo "${BASH_REMATCH[1]}"
|
||||
else
|
||||
echo "$val"
|
||||
fi
|
||||
}
|
||||
|
||||
require_version() {
|
||||
if [ -z "${VERSION:-}" ]; then
|
||||
read -rp "VERSION: " VERSION
|
||||
if [ -z "$VERSION" ]; then
|
||||
>&2 echo '$VERSION required'
|
||||
exit 2
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
release_dir() {
|
||||
echo "$HOME/Downloads/v$VERSION"
|
||||
}
|
||||
|
||||
ensure_release_dir() {
|
||||
local dir
|
||||
dir=$(release_dir)
|
||||
if [ "$CLEAN" = "1" ]; then
|
||||
rm -rf "$dir"
|
||||
fi
|
||||
mkdir -p "$dir"
|
||||
cd "$dir"
|
||||
}
|
||||
|
||||
enter_release_dir() {
|
||||
local dir
|
||||
dir=$(release_dir)
|
||||
if [ ! -d "$dir" ]; then
|
||||
>&2 echo "Release directory $dir does not exist. Run 'download' or 'pull' first."
|
||||
exit 1
|
||||
fi
|
||||
cd "$dir"
|
||||
}
|
||||
|
||||
cli_target_for() {
|
||||
local arch=$1 os=$2
|
||||
local pair="${arch}-${os}"
|
||||
if [ "$pair" = "riscv64-linux" ]; then
|
||||
echo "riscv64gc-unknown-linux-musl"
|
||||
elif [ "$pair" = "riscv64-macos" ]; then
|
||||
return 1
|
||||
elif [ "$os" = "linux" ]; then
|
||||
echo "${arch}-unknown-linux-musl"
|
||||
elif [ "$os" = "macos" ]; then
|
||||
echo "${arch}-apple-darwin"
|
||||
fi
|
||||
}
|
||||
|
||||
release_files() {
|
||||
for file in *.iso *.squashfs *.deb; do
|
||||
[ -f "$file" ] && echo "$file"
|
||||
done
|
||||
for file in start-cli_*; do
|
||||
[[ "$file" == *.asc ]] && continue
|
||||
[ -f "$file" ] && echo "$file"
|
||||
done
|
||||
}
|
||||
|
||||
resolve_gh_user() {
|
||||
GH_USER=${GH_USER:-$(gh api user -q .login 2>/dev/null || true)}
|
||||
GH_GPG_KEY=$(git config user.signingkey 2>/dev/null || true)
|
||||
}
|
||||
|
||||
# --- Subcommands ---
|
||||
|
||||
cmd_download() {
|
||||
require_version
|
||||
|
||||
if [ -z "${RUN_ID:-}" ]; then
|
||||
read -rp "RUN_ID (OS images, leave blank to skip): " RUN_ID
|
||||
fi
|
||||
RUN_ID=$(parse_run_id "${RUN_ID:-}")
|
||||
|
||||
if [ -z "${ST_RUN_ID:-}" ]; then
|
||||
read -rp "ST_RUN_ID (start-tunnel, leave blank to skip): " ST_RUN_ID
|
||||
fi
|
||||
ST_RUN_ID=$(parse_run_id "${ST_RUN_ID:-}")
|
||||
|
||||
if [ -z "${CLI_RUN_ID:-}" ]; then
|
||||
read -rp "CLI_RUN_ID (start-cli, leave blank to skip): " CLI_RUN_ID
|
||||
fi
|
||||
CLI_RUN_ID=$(parse_run_id "${CLI_RUN_ID:-}")
|
||||
|
||||
ensure_release_dir
|
||||
|
||||
if [ -n "$RUN_ID" ]; then
|
||||
for arch in $ARCHES; do
|
||||
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.squashfs" -D "$(pwd)"; do sleep 1; done
|
||||
done
|
||||
for arch in $ARCHES; do
|
||||
while ! gh run download -R $REPO "$RUN_ID" -n "$arch.iso" -D "$(pwd)"; do sleep 1; done
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -n "$ST_RUN_ID" ]; then
|
||||
for arch in $CLI_ARCHES; do
|
||||
while ! gh run download -R $REPO "$ST_RUN_ID" -n "start-tunnel_$arch.deb" -D "$(pwd)"; do sleep 1; done
|
||||
done
|
||||
fi
|
||||
|
||||
if [ -n "$CLI_RUN_ID" ]; then
|
||||
for arch in $CLI_ARCHES; do
|
||||
for os in linux macos; do
|
||||
local target
|
||||
target=$(cli_target_for "$arch" "$os") || continue
|
||||
while ! gh run download -R $REPO "$CLI_RUN_ID" -n "start-cli_$target" -D "$(pwd)"; do sleep 1; done
|
||||
mv start-cli "start-cli_${arch}-${os}"
|
||||
done
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
cmd_pull() {
|
||||
require_version
|
||||
ensure_release_dir
|
||||
|
||||
echo "Downloading release assets from tag v$VERSION..."
|
||||
|
||||
# Download debs and CLI binaries from the GH release
|
||||
for file in $(gh release view -R $REPO "v$VERSION" --json assets -q '.assets[].name' | grep -E '\.(deb)$|^start-cli_'); do
|
||||
gh release download -R $REPO "v$VERSION" -p "$file" -D "$(pwd)" --clobber
|
||||
done
|
||||
|
||||
# Download ISOs and squashfs from S3 CDN
|
||||
for arch in $ARCHES; do
|
||||
for ext in squashfs iso; do
|
||||
# Get the actual filename from the GH release asset list or body
|
||||
local filename
|
||||
filename=$(gh release view -R $REPO "v$VERSION" --json assets -q ".assets[].name" | grep "_${arch}\\.${ext}$" || true)
|
||||
if [ -z "$filename" ]; then
|
||||
filename=$(gh release view -R $REPO "v$VERSION" --json body -q .body | grep -oP "[^ ]*_${arch}\\.${ext}" | head -1 || true)
|
||||
fi
|
||||
if [ -n "$filename" ]; then
|
||||
echo "Downloading $filename from S3..."
|
||||
curl -fSL -o "$filename" "$S3_CDN/v$VERSION/$filename"
|
||||
fi
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
cmd_register() {
|
||||
require_version
|
||||
enter_release_dir
|
||||
start-cli --registry=$REGISTRY registry os version add "$VERSION" "v$VERSION" '' ">=0.3.5 <=$VERSION"
|
||||
}
|
||||
|
||||
cmd_upload() {
|
||||
require_version
|
||||
enter_release_dir
|
||||
|
||||
for file in $(release_files); do
|
||||
case "$file" in
|
||||
*.iso|*.squashfs)
|
||||
s3cmd put -P "$file" "$S3_BUCKET/v$VERSION/$file"
|
||||
;;
|
||||
*)
|
||||
gh release upload -R $REPO "v$VERSION" "$file"
|
||||
;;
|
||||
esac
|
||||
done
|
||||
}
|
||||
|
||||
cmd_index() {
|
||||
require_version
|
||||
enter_release_dir
|
||||
|
||||
for arch in $ARCHES; do
|
||||
for file in *_"$arch".squashfs *_"$arch".iso; do
|
||||
start-cli --registry=$REGISTRY registry os asset add --platform="$arch" --version="$VERSION" "$file" "$S3_CDN/v$VERSION/$file"
|
||||
done
|
||||
done
|
||||
}
|
||||
|
||||
cmd_sign() {
|
||||
require_version
|
||||
enter_release_dir
|
||||
resolve_gh_user
|
||||
|
||||
mkdir -p signatures
|
||||
|
||||
for file in $(release_files); do
|
||||
gpg -u $START9_GPG_KEY --detach-sign --armor -o "signatures/${file}.start9.asc" "$file"
|
||||
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
|
||||
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "signatures/${file}.${GH_USER}.asc" "$file"
|
||||
fi
|
||||
done
|
||||
|
||||
gpg --export -a $START9_GPG_KEY > signatures/start9.key.asc
|
||||
if [ -n "$GH_USER" ] && [ -n "$GH_GPG_KEY" ]; then
|
||||
gpg --export -a "$GH_GPG_KEY" > "signatures/${GH_USER}.key.asc"
|
||||
else
|
||||
>&2 echo 'Warning: could not determine GitHub user or GPG signing key, skipping personal signature'
|
||||
fi
|
||||
tar -czvf signatures.tar.gz -C signatures .
|
||||
|
||||
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
|
||||
}
|
||||
|
||||
cmd_cosign() {
|
||||
require_version
|
||||
enter_release_dir
|
||||
resolve_gh_user
|
||||
|
||||
if [ -z "$GH_USER" ] || [ -z "$GH_GPG_KEY" ]; then
|
||||
>&2 echo 'Error: could not determine GitHub user or GPG signing key'
|
||||
>&2 echo "Set GH_USER and/or configure git user.signingkey"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Downloading existing signatures..."
|
||||
gh release download -R $REPO "v$VERSION" -p "signatures.tar.gz" -D "$(pwd)" --clobber
|
||||
mkdir -p signatures
|
||||
tar -xzf signatures.tar.gz -C signatures
|
||||
|
||||
echo "Adding personal signatures as $GH_USER..."
|
||||
for file in $(release_files); do
|
||||
gpg -u "$GH_GPG_KEY" --detach-sign --armor -o "signatures/${file}.${GH_USER}.asc" "$file"
|
||||
done
|
||||
|
||||
gpg --export -a "$GH_GPG_KEY" > "signatures/${GH_USER}.key.asc"
|
||||
|
||||
echo "Re-packing signatures..."
|
||||
tar -czvf signatures.tar.gz -C signatures .
|
||||
|
||||
gh release upload -R $REPO "v$VERSION" signatures.tar.gz --clobber
|
||||
echo "Done. Personal signatures for $GH_USER added to v$VERSION."
|
||||
}
|
||||
|
||||
cmd_notes() {
|
||||
require_version
|
||||
enter_release_dir
|
||||
|
||||
cat << EOF
|
||||
# ISO Downloads
|
||||
|
||||
- [x86_64/AMD64]($S3_CDN/v$VERSION/$(ls *_x86_64-nonfree.iso))
|
||||
- [x86_64/AMD64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_x86_64-nvidia.iso))
|
||||
- [x86_64/AMD64-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_x86_64.iso) "Without proprietary software or drivers")
|
||||
- [aarch64/ARM64]($S3_CDN/v$VERSION/$(ls *_aarch64-nonfree.iso))
|
||||
- [aarch64/ARM64 + NVIDIA]($S3_CDN/v$VERSION/$(ls *_aarch64-nvidia.iso))
|
||||
- [aarch64/ARM64-slim (FOSS-Only)]($S3_CDN/v$VERSION/$(ls *_aarch64.iso) "Without proprietary software or drivers")
|
||||
- [RISCV64 (RVA23)]($S3_CDN/v$VERSION/$(ls *_riscv64-nonfree.iso))
|
||||
- [RISCV64 (RVA23)-slim (FOSS-only)]($S3_CDN/v$VERSION/$(ls *_riscv64.iso) "Without proprietary software or drivers")
|
||||
|
||||
EOF
|
||||
cat << 'EOF'
|
||||
# StartOS Checksums
|
||||
|
||||
## SHA-256
|
||||
```
|
||||
EOF
|
||||
sha256sum *.iso *.squashfs
|
||||
cat << 'EOF'
|
||||
```
|
||||
|
||||
## BLAKE-3
|
||||
```
|
||||
EOF
|
||||
b3sum *.iso *.squashfs
|
||||
cat << 'EOF'
|
||||
```
|
||||
|
||||
# Start-Tunnel Checksums
|
||||
|
||||
## SHA-256
|
||||
```
|
||||
EOF
|
||||
sha256sum start-tunnel*.deb
|
||||
cat << 'EOF'
|
||||
```
|
||||
|
||||
## BLAKE-3
|
||||
```
|
||||
EOF
|
||||
b3sum start-tunnel*.deb
|
||||
cat << 'EOF'
|
||||
```
|
||||
|
||||
# start-cli Checksums
|
||||
|
||||
## SHA-256
|
||||
```
|
||||
EOF
|
||||
release_files | grep '^start-cli_' | xargs sha256sum
|
||||
cat << 'EOF'
|
||||
```
|
||||
|
||||
## BLAKE-3
|
||||
```
|
||||
EOF
|
||||
release_files | grep '^start-cli_' | xargs b3sum
|
||||
cat << 'EOF'
|
||||
```
|
||||
EOF
|
||||
}
|
||||
|
||||
cmd_full_release() {
|
||||
cmd_download
|
||||
cmd_register
|
||||
cmd_upload
|
||||
cmd_index
|
||||
cmd_sign
|
||||
cmd_notes
|
||||
}
|
||||
|
||||
usage() {
|
||||
cat << 'EOF'
|
||||
Usage: manage-release.sh <subcommand>
|
||||
|
||||
Subcommands:
|
||||
download Download artifacts from GitHub Actions runs
|
||||
Requires: RUN_ID, ST_RUN_ID, CLI_RUN_ID (any combination)
|
||||
pull Download an existing release from the GH tag and S3
|
||||
register Register the version in the Start9 registry
|
||||
upload Upload artifacts to GitHub Releases and S3
|
||||
index Add assets to the registry index
|
||||
sign Sign all artifacts with Start9 org key (+ personal key if available)
|
||||
and upload signatures.tar.gz
|
||||
cosign Add personal GPG signature to an existing release's signatures
|
||||
(requires 'pull' first so you can verify assets before signing)
|
||||
notes Print release notes with download links and checksums
|
||||
full-release Run: download → register → upload → index → sign → notes
|
||||
|
||||
Environment variables:
|
||||
VERSION (required) Release version
|
||||
RUN_ID GitHub Actions run ID for OS images (download subcommand)
|
||||
ST_RUN_ID GitHub Actions run ID for start-tunnel (download subcommand)
|
||||
CLI_RUN_ID GitHub Actions run ID for start-cli (download subcommand)
|
||||
GH_USER Override GitHub username (default: autodetected via gh cli)
|
||||
CLEAN Set to 1 to wipe and recreate the release directory
|
||||
EOF
|
||||
}
|
||||
|
||||
case "${1:-}" in
|
||||
download) cmd_download ;;
|
||||
pull) cmd_pull ;;
|
||||
register) cmd_register ;;
|
||||
upload) cmd_upload ;;
|
||||
index) cmd_index ;;
|
||||
sign) cmd_sign ;;
|
||||
cosign) cmd_cosign ;;
|
||||
notes) cmd_notes ;;
|
||||
full-release) cmd_full_release ;;
|
||||
*) usage; exit 1 ;;
|
||||
esac
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM debian:bookworm
|
||||
FROM debian:trixie
|
||||
|
||||
RUN apt-get update && \
|
||||
apt-get install -y \
|
||||
@@ -12,45 +12,14 @@ RUN apt-get update && \
|
||||
jq \
|
||||
gzip \
|
||||
brotli \
|
||||
qemu-user-static \
|
||||
binfmt-support \
|
||||
squashfs-tools \
|
||||
git \
|
||||
debspawn \
|
||||
rsync \
|
||||
b3sum \
|
||||
fuse-overlayfs \
|
||||
sudo \
|
||||
systemd \
|
||||
systemd-container \
|
||||
systemd-sysv \
|
||||
dbus \
|
||||
dbus-user-session
|
||||
|
||||
RUN systemctl mask \
|
||||
systemd-firstboot.service \
|
||||
systemd-udevd.service \
|
||||
getty@tty1.service \
|
||||
console-getty.service
|
||||
nodejs
|
||||
|
||||
RUN git config --global --add safe.directory /root/start-os
|
||||
|
||||
RUN mkdir -p /etc/debspawn && \
|
||||
echo "AllowUnsafePermissions=true" > /etc/debspawn/global.toml
|
||||
|
||||
ENV NVM_DIR=~/.nvm
|
||||
ENV NODE_VERSION=22
|
||||
RUN mkdir -p $NVM_DIR && \
|
||||
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash && \
|
||||
. $NVM_DIR/nvm.sh \
|
||||
nvm install $NODE_VERSION && \
|
||||
nvm use $NODE_VERSION && \
|
||||
nvm alias default $NODE_VERSION && \
|
||||
ln -s $(which node) /usr/bin/node && \
|
||||
ln -s $(which npm) /usr/bin/npm
|
||||
|
||||
RUN mkdir -p /root/start-os
|
||||
WORKDIR /root/start-os
|
||||
|
||||
COPY docker-entrypoint.sh /docker-entrypoint.sh
|
||||
ENTRYPOINT [ "/docker-entrypoint.sh" ]
|
||||
@@ -1,3 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
exec /lib/systemd/systemd --unit=multi-user.target --show-status=false --log-target=journal
|
||||
@@ -1,27 +1,30 @@
|
||||
#!/bin/bash
|
||||
|
||||
if [ "$FORCE_COMPAT" = 1 ] || ( [ "$REQUIRES" = "linux" ] && [ "$(uname -s)" != "Linux" ] ) || ( [ "$REQUIRES" = "debian" ] && ! which dpkg > /dev/null ); then
|
||||
project_pwd="$(cd "$(dirname "${BASH_SOURCE[0]}")"/../.. && pwd)/"
|
||||
pwd="$(pwd)/"
|
||||
if ! [[ "$pwd" = "$project_pwd"* ]]; then
|
||||
>&2 echo "Must be run from start-os project dir"
|
||||
exit 1
|
||||
fi
|
||||
rel_pwd="${pwd#"$project_pwd"}"
|
||||
pwd=$(pwd)
|
||||
|
||||
SYSTEMD_TTY="-P"
|
||||
USE_TTY=
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
|
||||
|
||||
set -e
|
||||
|
||||
rel_pwd="${pwd#"$(pwd)"}"
|
||||
|
||||
COMPAT_ARCH=$(uname -m)
|
||||
|
||||
platform=linux/$COMPAT_ARCH
|
||||
|
||||
case $COMPAT_ARCH in
|
||||
x86_64)
|
||||
platform=linux/amd64;;
|
||||
aarch64)
|
||||
platform=linux/arm64;;
|
||||
esac
|
||||
|
||||
if [ "$FORCE_COMPAT" = 1 ] || ( [ "$REQUIRES" = "linux" ] && [ "$(uname -s)" != "Linux" ] ) || ( [ "$REQUIRES" = "debian" ] && ! which dpkg > /dev/null ); then
|
||||
if tty -s; then
|
||||
USE_TTY="-it"
|
||||
SYSTEMD_TTY="-t"
|
||||
fi
|
||||
|
||||
docker run -d --rm --name os-compat --privileged --security-opt apparmor=unconfined -v "${project_pwd}:/root/start-os" -v /lib/modules:/lib/modules:ro start9/build-env
|
||||
while ! docker exec os-compat systemctl is-active --quiet multi-user.target 2> /dev/null; do sleep .5; done
|
||||
docker exec -eARCH -eENVIRONMENT -ePLATFORM -eGIT_BRANCH_AS_HASH -ePROJECT -eDEPENDS -eCONFLICTS $USE_TTY -w "/root/start-os${rel_pwd}" os-compat $@
|
||||
code=$?
|
||||
docker stop os-compat
|
||||
exit $code
|
||||
docker run $USE_TTY --platform=$platform -eARCH -eENVIRONMENT -ePLATFORM -eGIT_BRANCH_AS_HASH -ePROJECT -eDEPENDS -eCONFLICTS -w "/root/start-os${rel_pwd}" --rm -v "$(pwd):/root/start-os" start9/build-env $@
|
||||
else
|
||||
exec $@
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -1,87 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
function partition_for () {
|
||||
if [[ "$1" =~ [0-9]+$ ]]; then
|
||||
echo "$1p$2"
|
||||
else
|
||||
echo "$1$2"
|
||||
fi
|
||||
}
|
||||
|
||||
VERSION=$(cat VERSION.txt)
|
||||
ENVIRONMENT=$(cat ENVIRONMENT.txt)
|
||||
GIT_HASH=$(cat GIT_HASH.txt | head -c 7)
|
||||
DATE=$(date +%Y%m%d)
|
||||
|
||||
ROOT_PART_END=7217792
|
||||
|
||||
VERSION_FULL="$VERSION-$GIT_HASH"
|
||||
|
||||
if [ -n "$ENVIRONMENT" ]; then
|
||||
VERSION_FULL="$VERSION_FULL~$ENVIRONMENT"
|
||||
fi
|
||||
|
||||
TARGET_NAME=startos-${VERSION_FULL}-${DATE}_raspberrypi.img
|
||||
TARGET_SIZE=$[($ROOT_PART_END+1)*512]
|
||||
|
||||
rm -f $TARGET_NAME
|
||||
truncate -s $TARGET_SIZE $TARGET_NAME
|
||||
(
|
||||
echo o
|
||||
echo x
|
||||
echo i
|
||||
echo "0xcb15ae4d"
|
||||
echo r
|
||||
echo n
|
||||
echo p
|
||||
echo 1
|
||||
echo 2048
|
||||
echo 526335
|
||||
echo t
|
||||
echo c
|
||||
echo n
|
||||
echo p
|
||||
echo 2
|
||||
echo 526336
|
||||
echo $ROOT_PART_END
|
||||
echo a
|
||||
echo 1
|
||||
echo w
|
||||
) | fdisk $TARGET_NAME
|
||||
OUTPUT_DEVICE=$(sudo losetup --show -fP $TARGET_NAME)
|
||||
sudo mkfs.ext4 `partition_for ${OUTPUT_DEVICE} 2`
|
||||
sudo mkfs.vfat `partition_for ${OUTPUT_DEVICE} 1`
|
||||
|
||||
TMPDIR=$(mktemp -d)
|
||||
|
||||
sudo mount `partition_for ${OUTPUT_DEVICE} 2` $TMPDIR
|
||||
sudo mkdir $TMPDIR/boot
|
||||
sudo mount `partition_for ${OUTPUT_DEVICE} 1` $TMPDIR/boot
|
||||
sudo unsquashfs -f -d $TMPDIR startos.raspberrypi.squashfs
|
||||
REAL_GIT_HASH=$(cat $TMPDIR/usr/lib/startos/GIT_HASH.txt)
|
||||
REAL_VERSION=$(cat $TMPDIR/usr/lib/startos/VERSION.txt)
|
||||
REAL_ENVIRONMENT=$(cat $TMPDIR/usr/lib/startos/ENVIRONMENT.txt)
|
||||
sudo sed -i 's| boot=startos| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
|
||||
sudo cp ./build/raspberrypi/fstab $TMPDIR/etc/
|
||||
sudo cp ./build/raspberrypi/init_resize.sh $TMPDIR/usr/lib/startos/scripts/init_resize.sh
|
||||
sudo umount $TMPDIR/boot
|
||||
sudo umount $TMPDIR
|
||||
sudo losetup -d $OUTPUT_DEVICE
|
||||
|
||||
if [ "$ALLOW_VERSION_MISMATCH" != 1 ]; then
|
||||
if [ "$(cat GIT_HASH.txt)" != "$REAL_GIT_HASH" ]; then
|
||||
>&2 echo "startos.raspberrypi.squashfs GIT_HASH.txt mismatch"
|
||||
>&2 echo "expected $REAL_GIT_HASH (dpkg) found $(cat GIT_HASH.txt) (repo)"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$(cat VERSION.txt)" != "$REAL_VERSION" ]; then
|
||||
>&2 echo "startos.raspberrypi.squashfs VERSION.txt mismatch"
|
||||
exit 1
|
||||
fi
|
||||
if [ "$(cat ENVIRONMENT.txt)" != "$REAL_ENVIRONMENT" ]; then
|
||||
>&2 echo "startos.raspberrypi.squashfs ENVIRONMENT.txt mismatch"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
32
container-runtime/CLAUDE.md
Normal file
@@ -0,0 +1,32 @@
|
||||
# Container Runtime — Node.js Service Manager
|
||||
|
||||
Node.js runtime that manages service containers via JSON-RPC. See `RPCSpec.md` in this directory for the full RPC protocol.
|
||||
|
||||
## Architecture
|
||||
|
||||
```
|
||||
LXC Container (uniform base for all services)
|
||||
└── systemd
|
||||
└── container-runtime.service
|
||||
└── Loads /usr/lib/startos/package/index.js (from s9pk javascript.squashfs)
|
||||
└── Package JS launches subcontainers (from images in s9pk)
|
||||
```
|
||||
|
||||
The container runtime communicates with the host via JSON-RPC over Unix socket. Package JavaScript must export functions conforming to the `ABI` type defined in `sdk/base/lib/types.ts`.
|
||||
|
||||
## `/media/startos/` Directory (mounted by host into container)
|
||||
|
||||
| Path | Description |
|
||||
| -------------------- | ----------------------------------------------------- |
|
||||
| `volumes/<name>/` | Package data volumes (id-mapped, persistent) |
|
||||
| `assets/` | Read-only assets from s9pk `assets.squashfs` |
|
||||
| `images/<name>/` | Container images (squashfs, used for subcontainers) |
|
||||
| `images/<name>.env` | Environment variables for image |
|
||||
| `images/<name>.json` | Image metadata |
|
||||
| `backup/` | Backup mount point (mounted during backup operations) |
|
||||
| `rpc/service.sock` | RPC socket (container runtime listens here) |
|
||||
| `rpc/host.sock` | Host RPC socket (for effects callbacks to host) |
|
||||
|
||||
## S9PK Structure
|
||||
|
||||
See `../core/s9pk-structure.md` for the S9PK package format.
|
||||
@@ -1,16 +1,21 @@
|
||||
# Container RPC SERVER Specification
|
||||
# Container RPC Server Specification
|
||||
|
||||
The container runtime exposes a JSON-RPC server over a Unix socket at `/media/startos/rpc/service.sock`.
|
||||
|
||||
## Methods
|
||||
|
||||
### init
|
||||
|
||||
initialize runtime (mount `/proc`, `/sys`, `/dev`, and `/run` to each image in `/media/images`)
|
||||
Initialize the runtime and system.
|
||||
|
||||
called after os has mounted js and images to the container
|
||||
#### params
|
||||
|
||||
#### args
|
||||
|
||||
`[]`
|
||||
```ts
|
||||
{
|
||||
id: string,
|
||||
kind: "install" | "update" | "restore" | null,
|
||||
}
|
||||
```
|
||||
|
||||
#### response
|
||||
|
||||
@@ -18,11 +23,16 @@ called after os has mounted js and images to the container
|
||||
|
||||
### exit
|
||||
|
||||
shutdown runtime
|
||||
Shutdown runtime and optionally run exit hooks for a target version.
|
||||
|
||||
#### args
|
||||
#### params
|
||||
|
||||
`[]`
|
||||
```ts
|
||||
{
|
||||
id: string,
|
||||
target: string | null, // ExtendedVersion or VersionRange
|
||||
}
|
||||
```
|
||||
|
||||
#### response
|
||||
|
||||
@@ -30,11 +40,11 @@ shutdown runtime
|
||||
|
||||
### start
|
||||
|
||||
run main method if not already running
|
||||
Run main method if not already running.
|
||||
|
||||
#### args
|
||||
#### params
|
||||
|
||||
`[]`
|
||||
None
|
||||
|
||||
#### response
|
||||
|
||||
@@ -42,11 +52,11 @@ run main method if not already running
|
||||
|
||||
### stop
|
||||
|
||||
stop main method by sending SIGTERM to child processes, and SIGKILL after timeout
|
||||
Stop main method by sending SIGTERM to child processes, and SIGKILL after timeout.
|
||||
|
||||
#### args
|
||||
#### params
|
||||
|
||||
`{ timeout: millis }`
|
||||
None
|
||||
|
||||
#### response
|
||||
|
||||
@@ -54,15 +64,16 @@ stop main method by sending SIGTERM to child processes, and SIGKILL after timeou
|
||||
|
||||
### execute
|
||||
|
||||
run a specific package procedure
|
||||
Run a specific package procedure.
|
||||
|
||||
#### args
|
||||
#### params
|
||||
|
||||
```ts
|
||||
{
|
||||
procedure: JsonPath,
|
||||
input: any,
|
||||
timeout: millis,
|
||||
id: string, // event ID
|
||||
procedure: string, // JSON path (e.g., "/backup/create", "/actions/{name}/run")
|
||||
input: any,
|
||||
timeout: number | null,
|
||||
}
|
||||
```
|
||||
|
||||
@@ -72,18 +83,64 @@ run a specific package procedure
|
||||
|
||||
### sandbox
|
||||
|
||||
run a specific package procedure in sandbox mode
|
||||
Run a specific package procedure in sandbox mode. Same interface as `execute`.
|
||||
|
||||
#### args
|
||||
UNIMPLEMENTED: this feature is planned but does not exist
|
||||
|
||||
#### params
|
||||
|
||||
```ts
|
||||
{
|
||||
procedure: JsonPath,
|
||||
input: any,
|
||||
timeout: millis,
|
||||
id: string,
|
||||
procedure: string,
|
||||
input: any,
|
||||
timeout: number | null,
|
||||
}
|
||||
```
|
||||
|
||||
#### response
|
||||
|
||||
`any`
|
||||
|
||||
### callback
|
||||
|
||||
Handle a callback from an effect.
|
||||
|
||||
#### params
|
||||
|
||||
```ts
|
||||
{
|
||||
id: number,
|
||||
args: any[],
|
||||
}
|
||||
```
|
||||
|
||||
#### response
|
||||
|
||||
`null` (no response sent)
|
||||
|
||||
### eval
|
||||
|
||||
Evaluate a script in the runtime context. Used for debugging.
|
||||
|
||||
#### params
|
||||
|
||||
```ts
|
||||
{
|
||||
script: string,
|
||||
}
|
||||
```
|
||||
|
||||
#### response
|
||||
|
||||
`any`
|
||||
|
||||
## Procedures
|
||||
|
||||
The `execute` and `sandbox` methods route to procedures based on the `procedure` path:
|
||||
|
||||
| Procedure | Description |
|
||||
| -------------------------- | ---------------------------- |
|
||||
| `/backup/create` | Create a backup |
|
||||
| `/actions/{name}/getInput` | Get input spec for an action |
|
||||
| `/actions/{name}/run` | Run an action with input |
|
||||
|
||||
30
container-runtime/__mocks__/mime.js
Normal file
@@ -0,0 +1,30 @@
|
||||
// Mock for ESM-only mime package — Jest's module loader doesn't support require(esm)
|
||||
const types = {
|
||||
".png": "image/png",
|
||||
".jpg": "image/jpeg",
|
||||
".jpeg": "image/jpeg",
|
||||
".gif": "image/gif",
|
||||
".svg": "image/svg+xml",
|
||||
".webp": "image/webp",
|
||||
".ico": "image/x-icon",
|
||||
".json": "application/json",
|
||||
".js": "application/javascript",
|
||||
".html": "text/html",
|
||||
".css": "text/css",
|
||||
".txt": "text/plain",
|
||||
".md": "text/markdown",
|
||||
}
|
||||
|
||||
module.exports = {
|
||||
default: {
|
||||
getType(path) {
|
||||
const ext = "." + path.split(".").pop()
|
||||
return types[ext] || null
|
||||
},
|
||||
getExtension(type) {
|
||||
const entry = Object.entries(types).find(([, v]) => v === type)
|
||||
return entry ? entry[0].slice(1) : null
|
||||
},
|
||||
},
|
||||
__esModule: true,
|
||||
}
|
||||
@@ -4,7 +4,8 @@ OnFailure=container-runtime-failure.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
ExecStart=/usr/bin/node --experimental-detect-module --trace-warnings --unhandled-rejections=warn /usr/lib/startos/init/index.js
|
||||
Environment=RUST_LOG=startos=debug
|
||||
ExecStart=/usr/bin/node --experimental-detect-module --trace-warnings /usr/lib/startos/init/index.js
|
||||
Restart=no
|
||||
|
||||
[Install]
|
||||
|
||||
@@ -2,9 +2,6 @@
|
||||
|
||||
set -e
|
||||
|
||||
mkdir -p /run/systemd/resolve
|
||||
echo "nameserver 8.8.8.8" > /run/systemd/resolve/stub-resolv.conf
|
||||
|
||||
apt-get update
|
||||
apt-get install -y curl rsync qemu-user-static nodejs
|
||||
|
||||
@@ -16,7 +13,4 @@ sed -i '/\(^\|#\)ForwardToSyslog=/c\ForwardToSyslog=no' /etc/systemd/journald.co
|
||||
|
||||
systemctl enable container-runtime.service
|
||||
|
||||
rm -rf /run/systemd
|
||||
|
||||
rm -f /etc/resolv.conf
|
||||
echo "nameserver 10.0.3.1" > /etc/resolv.conf
|
||||
@@ -5,4 +5,7 @@ module.exports = {
|
||||
testEnvironment: "node",
|
||||
rootDir: "./src/",
|
||||
modulePathIgnorePatterns: ["./dist/"],
|
||||
moduleNameMapper: {
|
||||
"^mime$": "<rootDir>/../__mocks__/mime.js",
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
IMAGE=$1
|
||||
|
||||
if [ -z "$IMAGE" ]; then
|
||||
>&2 echo "usage: $0 <image id>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [ -d "/media/images/$IMAGE" ]; then
|
||||
>&2 echo "image does not exist"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
container=$(mktemp -d)
|
||||
mkdir -p $container/rootfs $container/upper $container/work
|
||||
mount -t overlay -olowerdir=/media/images/$IMAGE,upperdir=$container/upper,workdir=$container/work overlay $container/rootfs
|
||||
|
||||
rootfs=$container/rootfs
|
||||
|
||||
for special in dev sys proc run; do
|
||||
mkdir -p $rootfs/$special
|
||||
mount --bind /$special $rootfs/$special
|
||||
done
|
||||
|
||||
echo $rootfs
|
||||
14
container-runtime/package-lock.json
generated
@@ -19,7 +19,6 @@
|
||||
"lodash.merge": "^4.6.2",
|
||||
"mime": "^4.0.7",
|
||||
"node-fetch": "^3.1.0",
|
||||
"ts-matches": "^6.3.2",
|
||||
"tslib": "^2.5.3",
|
||||
"typescript": "^5.1.3",
|
||||
"yaml": "^2.3.1"
|
||||
@@ -38,7 +37,7 @@
|
||||
},
|
||||
"../sdk/dist": {
|
||||
"name": "@start9labs/start-sdk",
|
||||
"version": "0.4.0-beta.44",
|
||||
"version": "0.4.0-beta.61",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"@iarna/toml": "^3.0.0",
|
||||
@@ -49,8 +48,9 @@
|
||||
"ini": "^5.0.0",
|
||||
"isomorphic-fetch": "^3.0.0",
|
||||
"mime": "^4.0.7",
|
||||
"ts-matches": "^6.3.2",
|
||||
"yaml": "^2.7.1"
|
||||
"yaml": "^2.7.1",
|
||||
"zod": "^4.3.6",
|
||||
"zod-deep-partial": "^1.2.0"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@types/jest": "^29.4.0",
|
||||
@@ -6494,12 +6494,6 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"node_modules/ts-matches": {
|
||||
"version": "6.3.2",
|
||||
"resolved": "https://registry.npmjs.org/ts-matches/-/ts-matches-6.3.2.tgz",
|
||||
"integrity": "sha512-UhSgJymF8cLd4y0vV29qlKVCkQpUtekAaujXbQVc729FezS8HwqzepqvtjzQ3HboatIqN/Idor85O2RMwT7lIQ==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/tslib": {
|
||||
"version": "2.8.1",
|
||||
"resolved": "https://registry.npmjs.org/tslib/-/tslib-2.8.1.tgz",
|
||||
|
||||
@@ -28,7 +28,6 @@
|
||||
"lodash.merge": "^4.6.2",
|
||||
"mime": "^4.0.7",
|
||||
"node-fetch": "^3.1.0",
|
||||
"ts-matches": "^6.3.2",
|
||||
"tslib": "^2.5.3",
|
||||
"typescript": "^5.1.3",
|
||||
"yaml": "^2.3.1"
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
rootfs=$1
|
||||
if [ -z "$rootfs" ]; then
|
||||
>&2 echo "usage: $0 <container rootfs path>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
umount --recursive $rootfs
|
||||
rm -rf $rootfs/..
|
||||
@@ -3,33 +3,39 @@ import {
|
||||
types as T,
|
||||
utils,
|
||||
VersionRange,
|
||||
z,
|
||||
} from "@start9labs/start-sdk"
|
||||
import * as net from "net"
|
||||
import { object, string, number, literals, some, unknown } from "ts-matches"
|
||||
import { Effects } from "../Models/Effects"
|
||||
|
||||
import { CallbackHolder } from "../Models/CallbackHolder"
|
||||
import { asError } from "@start9labs/start-sdk/base/lib/util"
|
||||
const matchRpcError = object({
|
||||
error: object({
|
||||
code: number,
|
||||
message: string,
|
||||
data: some(
|
||||
string,
|
||||
object({
|
||||
details: string,
|
||||
debug: string.nullable().optional(),
|
||||
}),
|
||||
)
|
||||
const matchRpcError = z.object({
|
||||
error: z.object({
|
||||
code: z.number(),
|
||||
message: z.string(),
|
||||
data: z
|
||||
.union([
|
||||
z.string(),
|
||||
z.object({
|
||||
details: z.string(),
|
||||
debug: z.string().nullable().optional(),
|
||||
}),
|
||||
])
|
||||
.nullable()
|
||||
.optional(),
|
||||
}),
|
||||
})
|
||||
const testRpcError = matchRpcError.test
|
||||
const testRpcResult = object({
|
||||
result: unknown,
|
||||
}).test
|
||||
type RpcError = typeof matchRpcError._TYPE
|
||||
function testRpcError(v: unknown): v is RpcError {
|
||||
return matchRpcError.safeParse(v).success
|
||||
}
|
||||
const matchRpcResult = z.object({
|
||||
result: z.unknown(),
|
||||
})
|
||||
function testRpcResult(v: unknown): v is z.infer<typeof matchRpcResult> {
|
||||
return matchRpcResult.safeParse(v).success
|
||||
}
|
||||
type RpcError = z.infer<typeof matchRpcError>
|
||||
|
||||
const SOCKET_PATH = "/media/startos/rpc/host.sock"
|
||||
let hostSystemId = 0
|
||||
@@ -71,7 +77,7 @@ const rpcRoundFor =
|
||||
"Error in host RPC:",
|
||||
utils.asError({ method, params, error: res.error }),
|
||||
)
|
||||
if (string.test(res.error.data)) {
|
||||
if (typeof res.error.data === "string") {
|
||||
message += ": " + res.error.data
|
||||
console.error(`Details: ${res.error.data}`)
|
||||
} else {
|
||||
@@ -178,6 +184,14 @@ export function makeEffects(context: EffectContext): Effects {
|
||||
T.Effects["getInstalledPackages"]
|
||||
>
|
||||
},
|
||||
getServiceManifest(
|
||||
...[options]: Parameters<T.Effects["getServiceManifest"]>
|
||||
) {
|
||||
return rpcRound("get-service-manifest", {
|
||||
...options,
|
||||
callback: context.callbacks?.addCallback(options.callback) || null,
|
||||
}) as ReturnType<T.Effects["getServiceManifest"]>
|
||||
},
|
||||
subcontainer: {
|
||||
createFs(options: { imageId: string; name: string }) {
|
||||
return rpcRound("subcontainer.create-fs", options) as ReturnType<
|
||||
@@ -198,9 +212,10 @@ export function makeEffects(context: EffectContext): Effects {
|
||||
>
|
||||
}) as Effects["exportServiceInterface"],
|
||||
getContainerIp(...[options]: Parameters<T.Effects["getContainerIp"]>) {
|
||||
return rpcRound("get-container-ip", options) as ReturnType<
|
||||
T.Effects["getContainerIp"]
|
||||
>
|
||||
return rpcRound("get-container-ip", {
|
||||
...options,
|
||||
callback: context.callbacks?.addCallback(options.callback) || null,
|
||||
}) as ReturnType<T.Effects["getContainerIp"]>
|
||||
},
|
||||
getOsIp(...[]: Parameters<T.Effects["getOsIp"]>) {
|
||||
return rpcRound("get-os-ip", {}) as ReturnType<T.Effects["getOsIp"]>
|
||||
@@ -231,9 +246,10 @@ export function makeEffects(context: EffectContext): Effects {
|
||||
>
|
||||
},
|
||||
getSslCertificate(options: Parameters<T.Effects["getSslCertificate"]>[0]) {
|
||||
return rpcRound("get-ssl-certificate", options) as ReturnType<
|
||||
T.Effects["getSslCertificate"]
|
||||
>
|
||||
return rpcRound("get-ssl-certificate", {
|
||||
...options,
|
||||
callback: context.callbacks?.addCallback(options.callback) || null,
|
||||
}) as ReturnType<T.Effects["getSslCertificate"]>
|
||||
},
|
||||
getSslKey(options: Parameters<T.Effects["getSslKey"]>[0]) {
|
||||
return rpcRound("get-ssl-key", options) as ReturnType<
|
||||
@@ -246,6 +262,14 @@ export function makeEffects(context: EffectContext): Effects {
|
||||
callback: context.callbacks?.addCallback(options.callback) || null,
|
||||
}) as ReturnType<T.Effects["getSystemSmtp"]>
|
||||
},
|
||||
getOutboundGateway(
|
||||
...[options]: Parameters<T.Effects["getOutboundGateway"]>
|
||||
) {
|
||||
return rpcRound("get-outbound-gateway", {
|
||||
...options,
|
||||
callback: context.callbacks?.addCallback(options.callback) || null,
|
||||
}) as ReturnType<T.Effects["getOutboundGateway"]>
|
||||
},
|
||||
listServiceInterfaces(
|
||||
...[options]: Parameters<T.Effects["listServiceInterfaces"]>
|
||||
) {
|
||||
@@ -287,8 +311,12 @@ export function makeEffects(context: EffectContext): Effects {
|
||||
},
|
||||
|
||||
getStatus(...[o]: Parameters<T.Effects["getStatus"]>) {
|
||||
return rpcRound("get-status", o) as ReturnType<T.Effects["getStatus"]>
|
||||
return rpcRound("get-status", {
|
||||
...o,
|
||||
callback: context.callbacks?.addCallback(o.callback) || null,
|
||||
}) as ReturnType<T.Effects["getStatus"]>
|
||||
},
|
||||
/// DEPRECATED
|
||||
setMainStatus(o: { status: "running" | "stopped" }): Promise<null> {
|
||||
return rpcRound("set-main-status", o) as ReturnType<
|
||||
T.Effects["setHealth"]
|
||||
@@ -308,9 +336,35 @@ export function makeEffects(context: EffectContext): Effects {
|
||||
T.Effects["setDataVersion"]
|
||||
>
|
||||
},
|
||||
plugin: {
|
||||
url: {
|
||||
register(
|
||||
...[options]: Parameters<T.Effects["plugin"]["url"]["register"]>
|
||||
) {
|
||||
return rpcRound("plugin.url.register", options) as ReturnType<
|
||||
T.Effects["plugin"]["url"]["register"]
|
||||
>
|
||||
},
|
||||
exportUrl(
|
||||
...[options]: Parameters<T.Effects["plugin"]["url"]["exportUrl"]>
|
||||
) {
|
||||
return rpcRound("plugin.url.export-url", options) as ReturnType<
|
||||
T.Effects["plugin"]["url"]["exportUrl"]
|
||||
>
|
||||
},
|
||||
clearUrls(
|
||||
...[options]: Parameters<T.Effects["plugin"]["url"]["clearUrls"]>
|
||||
) {
|
||||
return rpcRound("plugin.url.clear-urls", options) as ReturnType<
|
||||
T.Effects["plugin"]["url"]["clearUrls"]
|
||||
>
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
if (context.callbacks?.onLeaveContext)
|
||||
self.onLeaveContext(() => {
|
||||
self.constRetry = undefined
|
||||
self.isInContext = false
|
||||
self.onLeaveContext = () => {
|
||||
console.warn(
|
||||
|
||||
@@ -1,25 +1,13 @@
|
||||
// @ts-check
|
||||
|
||||
import * as net from "net"
|
||||
import {
|
||||
object,
|
||||
some,
|
||||
string,
|
||||
literal,
|
||||
array,
|
||||
number,
|
||||
matches,
|
||||
any,
|
||||
shape,
|
||||
anyOf,
|
||||
literals,
|
||||
} from "ts-matches"
|
||||
|
||||
import {
|
||||
ExtendedVersion,
|
||||
types as T,
|
||||
utils,
|
||||
VersionRange,
|
||||
z,
|
||||
} from "@start9labs/start-sdk"
|
||||
import * as fs from "fs"
|
||||
|
||||
@@ -29,89 +17,92 @@ import { jsonPath, unNestPath } from "../Models/JsonPath"
|
||||
import { System } from "../Interfaces/System"
|
||||
import { makeEffects } from "./EffectCreator"
|
||||
type MaybePromise<T> = T | Promise<T>
|
||||
export const matchRpcResult = anyOf(
|
||||
object({ result: any }),
|
||||
object({
|
||||
error: object({
|
||||
code: number,
|
||||
message: string,
|
||||
data: object({
|
||||
details: string.optional(),
|
||||
debug: any.optional(),
|
||||
})
|
||||
export const matchRpcResult = z.union([
|
||||
z.object({ result: z.any() }),
|
||||
z.object({
|
||||
error: z.object({
|
||||
code: z.number(),
|
||||
message: z.string(),
|
||||
data: z
|
||||
.object({
|
||||
details: z.string().optional(),
|
||||
debug: z.any().optional(),
|
||||
})
|
||||
.nullable()
|
||||
.optional(),
|
||||
}),
|
||||
}),
|
||||
)
|
||||
])
|
||||
|
||||
export type RpcResult = typeof matchRpcResult._TYPE
|
||||
export type RpcResult = z.infer<typeof matchRpcResult>
|
||||
type SocketResponse = ({ jsonrpc: "2.0"; id: IdType } & RpcResult) | null
|
||||
|
||||
const SOCKET_PARENT = "/media/startos/rpc"
|
||||
const SOCKET_PATH = "/media/startos/rpc/service.sock"
|
||||
const jsonrpc = "2.0" as const
|
||||
|
||||
const isResult = object({ result: any }).test
|
||||
const isResultSchema = z.object({ result: z.any() })
|
||||
const isResult = (v: unknown): v is z.infer<typeof isResultSchema> =>
|
||||
isResultSchema.safeParse(v).success
|
||||
|
||||
const idType = some(string, number, literal(null))
|
||||
const idType = z.union([z.string(), z.number(), z.literal(null)])
|
||||
type IdType = null | string | number | undefined
|
||||
const runType = object({
|
||||
const runType = z.object({
|
||||
id: idType.optional(),
|
||||
method: literal("execute"),
|
||||
params: object({
|
||||
id: string,
|
||||
procedure: string,
|
||||
input: any,
|
||||
timeout: number.nullable().optional(),
|
||||
method: z.literal("execute"),
|
||||
params: z.object({
|
||||
id: z.string(),
|
||||
procedure: z.string(),
|
||||
input: z.any(),
|
||||
timeout: z.number().nullable().optional(),
|
||||
}),
|
||||
})
|
||||
const sandboxRunType = object({
|
||||
const sandboxRunType = z.object({
|
||||
id: idType.optional(),
|
||||
method: literal("sandbox"),
|
||||
params: object({
|
||||
id: string,
|
||||
procedure: string,
|
||||
input: any,
|
||||
timeout: number.nullable().optional(),
|
||||
method: z.literal("sandbox"),
|
||||
params: z.object({
|
||||
id: z.string(),
|
||||
procedure: z.string(),
|
||||
input: z.any(),
|
||||
timeout: z.number().nullable().optional(),
|
||||
}),
|
||||
})
|
||||
const callbackType = object({
|
||||
method: literal("callback"),
|
||||
params: object({
|
||||
id: number,
|
||||
args: array,
|
||||
const callbackType = z.object({
|
||||
method: z.literal("callback"),
|
||||
params: z.object({
|
||||
id: z.number(),
|
||||
args: z.array(z.unknown()),
|
||||
}),
|
||||
})
|
||||
const initType = object({
|
||||
const initType = z.object({
|
||||
id: idType.optional(),
|
||||
method: literal("init"),
|
||||
params: object({
|
||||
id: string,
|
||||
kind: literals("install", "update", "restore").nullable(),
|
||||
method: z.literal("init"),
|
||||
params: z.object({
|
||||
id: z.string(),
|
||||
kind: z.enum(["install", "update", "restore"]).nullable(),
|
||||
}),
|
||||
})
|
||||
const startType = object({
|
||||
const startType = z.object({
|
||||
id: idType.optional(),
|
||||
method: literal("start"),
|
||||
method: z.literal("start"),
|
||||
})
|
||||
const stopType = object({
|
||||
const stopType = z.object({
|
||||
id: idType.optional(),
|
||||
method: literal("stop"),
|
||||
method: z.literal("stop"),
|
||||
})
|
||||
const exitType = object({
|
||||
const exitType = z.object({
|
||||
id: idType.optional(),
|
||||
method: literal("exit"),
|
||||
params: object({
|
||||
id: string,
|
||||
target: string.nullable(),
|
||||
method: z.literal("exit"),
|
||||
params: z.object({
|
||||
id: z.string(),
|
||||
target: z.string().nullable(),
|
||||
}),
|
||||
})
|
||||
const evalType = object({
|
||||
const evalType = z.object({
|
||||
id: idType.optional(),
|
||||
method: literal("eval"),
|
||||
params: object({
|
||||
script: string,
|
||||
method: z.literal("eval"),
|
||||
params: z.object({
|
||||
script: z.string(),
|
||||
}),
|
||||
})
|
||||
|
||||
@@ -144,8 +135,11 @@ const handleRpc = (id: IdType, result: Promise<RpcResult>) =>
|
||||
},
|
||||
}))
|
||||
|
||||
const hasId = object({ id: idType }).test
|
||||
const hasIdSchema = z.object({ id: idType })
|
||||
const hasId = (v: unknown): v is z.infer<typeof hasIdSchema> =>
|
||||
hasIdSchema.safeParse(v).success
|
||||
export class RpcListener {
|
||||
shouldExit = false
|
||||
unixSocketServer = net.createServer(async (server) => {})
|
||||
private _system: System | undefined
|
||||
private callbacks: CallbackHolder | undefined
|
||||
@@ -210,6 +204,11 @@ export class RpcListener {
|
||||
.catch(mapError)
|
||||
.then(logData("response"))
|
||||
.then(writeDataToSocket)
|
||||
.then((_) => {
|
||||
if (this.shouldExit) {
|
||||
process.exit(0)
|
||||
}
|
||||
})
|
||||
.catch((e) => {
|
||||
console.error(`Major error in socket handling: ${e}`)
|
||||
console.debug(`Data in: ${a.toString()}`)
|
||||
@@ -240,40 +239,52 @@ export class RpcListener {
|
||||
}
|
||||
|
||||
private dealWithInput(input: unknown): MaybePromise<SocketResponse> {
|
||||
return matches(input)
|
||||
.when(runType, async ({ id, params }) => {
|
||||
const parsed = z.object({ method: z.string() }).safeParse(input)
|
||||
if (!parsed.success) {
|
||||
console.warn(
|
||||
`Couldn't parse the following input ${JSON.stringify(input)}`,
|
||||
)
|
||||
return {
|
||||
jsonrpc,
|
||||
id: (input as any)?.id,
|
||||
error: {
|
||||
code: -32602,
|
||||
message: "invalid params",
|
||||
data: {
|
||||
details: JSON.stringify(input),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
switch (parsed.data.method) {
|
||||
case "execute": {
|
||||
const { id, params } = runType.parse(input)
|
||||
const system = this.system
|
||||
const procedure = jsonPath.unsafeCast(params.procedure)
|
||||
const { input, timeout, id: eventId } = params
|
||||
const result = this.getResult(
|
||||
procedure,
|
||||
system,
|
||||
eventId,
|
||||
timeout,
|
||||
input,
|
||||
)
|
||||
const procedure = jsonPath.parse(params.procedure)
|
||||
const { input: inp, timeout, id: eventId } = params
|
||||
const result = this.getResult(procedure, system, eventId, timeout, inp)
|
||||
|
||||
return handleRpc(id, result)
|
||||
})
|
||||
.when(sandboxRunType, async ({ id, params }) => {
|
||||
}
|
||||
case "sandbox": {
|
||||
const { id, params } = sandboxRunType.parse(input)
|
||||
const system = this.system
|
||||
const procedure = jsonPath.unsafeCast(params.procedure)
|
||||
const { input, timeout, id: eventId } = params
|
||||
const result = this.getResult(
|
||||
procedure,
|
||||
system,
|
||||
eventId,
|
||||
timeout,
|
||||
input,
|
||||
)
|
||||
const procedure = jsonPath.parse(params.procedure)
|
||||
const { input: inp, timeout, id: eventId } = params
|
||||
const result = this.getResult(procedure, system, eventId, timeout, inp)
|
||||
|
||||
return handleRpc(id, result)
|
||||
})
|
||||
.when(callbackType, async ({ params: { id, args } }) => {
|
||||
}
|
||||
case "callback": {
|
||||
const {
|
||||
params: { id, args },
|
||||
} = callbackType.parse(input)
|
||||
this.callCallback(id, args)
|
||||
return null
|
||||
})
|
||||
.when(startType, async ({ id }) => {
|
||||
}
|
||||
case "start": {
|
||||
const { id } = startType.parse(input)
|
||||
const callbacks =
|
||||
this.callbacks?.getChild("main") || this.callbacks?.child("main")
|
||||
const effects = makeEffects({
|
||||
@@ -284,15 +295,17 @@ export class RpcListener {
|
||||
id,
|
||||
this.system.start(effects).then((result) => ({ result })),
|
||||
)
|
||||
})
|
||||
.when(stopType, async ({ id }) => {
|
||||
}
|
||||
case "stop": {
|
||||
const { id } = stopType.parse(input)
|
||||
this.callbacks?.removeChild("main")
|
||||
return handleRpc(
|
||||
id,
|
||||
this.system.stop().then((result) => ({ result })),
|
||||
)
|
||||
})
|
||||
.when(exitType, async ({ id, params }) => {
|
||||
}
|
||||
case "exit": {
|
||||
const { id, params } = exitType.parse(input)
|
||||
return handleRpc(
|
||||
id,
|
||||
(async () => {
|
||||
@@ -310,11 +323,13 @@ export class RpcListener {
|
||||
}),
|
||||
target,
|
||||
)
|
||||
this.shouldExit = true
|
||||
}
|
||||
})().then((result) => ({ result })),
|
||||
)
|
||||
})
|
||||
.when(initType, async ({ id, params }) => {
|
||||
}
|
||||
case "init": {
|
||||
const { id, params } = initType.parse(input)
|
||||
return handleRpc(
|
||||
id,
|
||||
(async () => {
|
||||
@@ -339,8 +354,9 @@ export class RpcListener {
|
||||
}
|
||||
})().then((result) => ({ result })),
|
||||
)
|
||||
})
|
||||
.when(evalType, async ({ id, params }) => {
|
||||
}
|
||||
case "eval": {
|
||||
const { id, params } = evalType.parse(input)
|
||||
return handleRpc(
|
||||
id,
|
||||
(async () => {
|
||||
@@ -365,41 +381,28 @@ export class RpcListener {
|
||||
}
|
||||
})(),
|
||||
)
|
||||
})
|
||||
.when(
|
||||
shape({ id: idType.optional(), method: string }),
|
||||
({ id, method }) => ({
|
||||
}
|
||||
default: {
|
||||
const { id, method } = z
|
||||
.object({ id: idType.optional(), method: z.string() })
|
||||
.passthrough()
|
||||
.parse(input)
|
||||
return {
|
||||
jsonrpc,
|
||||
id,
|
||||
error: {
|
||||
code: -32601,
|
||||
message: `Method not found`,
|
||||
message: "Method not found",
|
||||
data: {
|
||||
details: method,
|
||||
},
|
||||
},
|
||||
}),
|
||||
)
|
||||
|
||||
.defaultToLazy(() => {
|
||||
console.warn(
|
||||
`Couldn't parse the following input ${JSON.stringify(input)}`,
|
||||
)
|
||||
return {
|
||||
jsonrpc,
|
||||
id: (input as any)?.id,
|
||||
error: {
|
||||
code: -32602,
|
||||
message: "invalid params",
|
||||
data: {
|
||||
details: JSON.stringify(input),
|
||||
},
|
||||
},
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
private getResult(
|
||||
procedure: typeof jsonPath._TYPE,
|
||||
procedure: z.infer<typeof jsonPath>,
|
||||
system: System,
|
||||
eventId: string,
|
||||
timeout: number | null | undefined,
|
||||
@@ -427,6 +430,7 @@ export class RpcListener {
|
||||
return system.getActionInput(
|
||||
effects,
|
||||
procedures[2],
|
||||
input?.prefill ?? null,
|
||||
timeout || null,
|
||||
)
|
||||
case procedures[1] === "actions" && procedures[3] === "run":
|
||||
@@ -438,26 +442,18 @@ export class RpcListener {
|
||||
)
|
||||
}
|
||||
}
|
||||
})().then(ensureResultTypeShape, (error) =>
|
||||
matches(error)
|
||||
.when(
|
||||
object({
|
||||
error: string,
|
||||
code: number.defaultTo(0),
|
||||
}),
|
||||
(error) => ({
|
||||
error: {
|
||||
code: error.code,
|
||||
message: error.error,
|
||||
},
|
||||
}),
|
||||
)
|
||||
.defaultToLazy(() => ({
|
||||
error: {
|
||||
code: 0,
|
||||
message: String(error),
|
||||
},
|
||||
})),
|
||||
)
|
||||
})().then(ensureResultTypeShape, (error) => {
|
||||
const errorSchema = z.object({
|
||||
error: z.string(),
|
||||
code: z.number().default(0),
|
||||
})
|
||||
const parsed = errorSchema.safeParse(error)
|
||||
if (parsed.success) {
|
||||
return {
|
||||
error: { code: parsed.data.code, message: parsed.data.error },
|
||||
}
|
||||
}
|
||||
return { error: { code: 0, message: String(error) } }
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,7 +2,7 @@ import * as fs from "fs/promises"
|
||||
import * as cp from "child_process"
|
||||
import { SubContainer, types as T } from "@start9labs/start-sdk"
|
||||
import { promisify } from "util"
|
||||
import { DockerProcedure, VolumeId } from "../../../Models/DockerProcedure"
|
||||
import { DockerProcedure } from "../../../Models/DockerProcedure"
|
||||
import { Volume } from "./matchVolume"
|
||||
import {
|
||||
CommandOptions,
|
||||
@@ -28,7 +28,7 @@ export class DockerProcedureContainer extends Drop {
|
||||
effects: T.Effects,
|
||||
packageId: string,
|
||||
data: DockerProcedure,
|
||||
volumes: { [id: VolumeId]: Volume },
|
||||
volumes: { [id: string]: Volume },
|
||||
name: string,
|
||||
options: { subcontainer?: SubContainer<SDKManifest> } = {},
|
||||
) {
|
||||
@@ -47,7 +47,7 @@ export class DockerProcedureContainer extends Drop {
|
||||
effects: T.Effects,
|
||||
packageId: string,
|
||||
data: DockerProcedure,
|
||||
volumes: { [id: VolumeId]: Volume },
|
||||
volumes: { [id: string]: Volume },
|
||||
name: string,
|
||||
) {
|
||||
const subcontainer = await SubContainerOwned.of(
|
||||
@@ -64,7 +64,7 @@ export class DockerProcedureContainer extends Drop {
|
||||
? `${subcontainer.rootfs}${mounts[mount]}`
|
||||
: `${subcontainer.rootfs}/${mounts[mount]}`
|
||||
await fs.mkdir(path, { recursive: true })
|
||||
const volumeMount = volumes[mount]
|
||||
const volumeMount: Volume = volumes[mount]
|
||||
if (volumeMount.type === "data") {
|
||||
await subcontainer.mount(
|
||||
Mounts.of().mountVolume({
|
||||
@@ -82,18 +82,15 @@ export class DockerProcedureContainer extends Drop {
|
||||
}),
|
||||
)
|
||||
} else if (volumeMount.type === "certificate") {
|
||||
const hostInfo = await effects.getHostInfo({
|
||||
hostId: volumeMount["interface-id"],
|
||||
})
|
||||
const hostnames = [
|
||||
`${packageId}.embassy`,
|
||||
...new Set(
|
||||
Object.values(
|
||||
(
|
||||
await effects.getHostInfo({
|
||||
hostId: volumeMount["interface-id"],
|
||||
})
|
||||
)?.hostnameInfo || {},
|
||||
)
|
||||
.flatMap((h) => h)
|
||||
.flatMap((h) => (h.kind === "onion" ? [h.hostname.value] : [])),
|
||||
Object.values(hostInfo?.bindings || {})
|
||||
.flatMap((b) => b.addresses.available)
|
||||
.map((h) => h.hostname),
|
||||
).values(),
|
||||
]
|
||||
const certChain = await effects.getSslCertificate({
|
||||
@@ -118,7 +115,7 @@ export class DockerProcedureContainer extends Drop {
|
||||
subpath: volumeMount.path,
|
||||
readonly: volumeMount.readonly,
|
||||
volumeId: volumeMount["volume-id"],
|
||||
filetype: "directory",
|
||||
idmap: [],
|
||||
},
|
||||
})
|
||||
} else if (volumeMount.type === "backup") {
|
||||
|
||||
@@ -10,7 +10,6 @@ import { SDKManifest } from "@start9labs/start-sdk/base/lib/types"
|
||||
import { SubContainerRc } from "@start9labs/start-sdk/package/lib/util/SubContainer"
|
||||
|
||||
const EMBASSY_HEALTH_INTERVAL = 15 * 1000
|
||||
const EMBASSY_PROPERTIES_LOOP = 30 * 1000
|
||||
/**
|
||||
* We wanted something to represent what the main loop is doing, and
|
||||
* in this case it used to run the properties, health, and the docker/ js main.
|
||||
@@ -120,6 +119,7 @@ export class MainLoop {
|
||||
? {
|
||||
preferredExternalPort: lanConf.external,
|
||||
alpn: { specified: ["http/1.1"] },
|
||||
addXForwardedHeaders: false,
|
||||
}
|
||||
: null,
|
||||
})
|
||||
@@ -133,7 +133,7 @@ export class MainLoop {
|
||||
delete this.mainEvent
|
||||
delete this.healthLoops
|
||||
await main?.daemon
|
||||
.stop()
|
||||
.term()
|
||||
.catch((e: unknown) => console.error(`Main loop error`, utils.asError(e)))
|
||||
this.effects.setMainStatus({ status: "stopped" })
|
||||
if (healthLoops) healthLoops.forEach((x) => clearInterval(x.interval))
|
||||
|
||||
@@ -15,26 +15,11 @@ import { System } from "../../../Interfaces/System"
|
||||
import { matchManifest, Manifest } from "./matchManifest"
|
||||
import * as childProcess from "node:child_process"
|
||||
import { DockerProcedureContainer } from "./DockerProcedureContainer"
|
||||
import { DockerProcedure } from "../../../Models/DockerProcedure"
|
||||
import { promisify } from "node:util"
|
||||
import * as U from "./oldEmbassyTypes"
|
||||
import { MainLoop } from "./MainLoop"
|
||||
import {
|
||||
matches,
|
||||
boolean,
|
||||
dictionary,
|
||||
literal,
|
||||
literals,
|
||||
object,
|
||||
string,
|
||||
unknown,
|
||||
any,
|
||||
tuple,
|
||||
number,
|
||||
anyOf,
|
||||
deferred,
|
||||
Parser,
|
||||
array,
|
||||
} from "ts-matches"
|
||||
import { z } from "@start9labs/start-sdk"
|
||||
import { AddSslOptions } from "@start9labs/start-sdk/base/lib/osBindings"
|
||||
import {
|
||||
BindOptionsByProtocol,
|
||||
@@ -50,40 +35,116 @@ import {
|
||||
transformOldConfigToNew,
|
||||
} from "./transformConfigSpec"
|
||||
import { partialDiff } from "@start9labs/start-sdk/base/lib/util"
|
||||
import { Volume } from "@start9labs/start-sdk/package/lib/util/Volume"
|
||||
|
||||
type Optional<A> = A | undefined | null
|
||||
function todo(): never {
|
||||
throw new Error("Not implemented")
|
||||
}
|
||||
|
||||
function getStatus(
|
||||
effects: Effects,
|
||||
options: Omit<Parameters<Effects["getStatus"]>[0], "callback"> = {},
|
||||
) {
|
||||
async function* watch(abort?: AbortSignal) {
|
||||
const resolveCell = { resolve: () => {} }
|
||||
effects.onLeaveContext(() => {
|
||||
resolveCell.resolve()
|
||||
})
|
||||
abort?.addEventListener("abort", () => resolveCell.resolve())
|
||||
while (effects.isInContext && !abort?.aborted) {
|
||||
let callback: () => void = () => {}
|
||||
const waitForNext = new Promise<void>((resolve) => {
|
||||
callback = resolve
|
||||
resolveCell.resolve = resolve
|
||||
})
|
||||
yield await effects.getStatus({ ...options, callback })
|
||||
await waitForNext
|
||||
}
|
||||
}
|
||||
return {
|
||||
const: () =>
|
||||
effects.getStatus({
|
||||
...options,
|
||||
callback:
|
||||
effects.constRetry &&
|
||||
(() => effects.constRetry && effects.constRetry()),
|
||||
}),
|
||||
once: () => effects.getStatus(options),
|
||||
watch: (abort?: AbortSignal) => {
|
||||
const ctrl = new AbortController()
|
||||
abort?.addEventListener("abort", () => ctrl.abort())
|
||||
return watch(ctrl.signal)
|
||||
},
|
||||
onChange: (
|
||||
callback: (
|
||||
value: T.StatusInfo | null,
|
||||
error?: Error,
|
||||
) => { cancel: boolean } | Promise<{ cancel: boolean }>,
|
||||
) => {
|
||||
;(async () => {
|
||||
const ctrl = new AbortController()
|
||||
for await (const value of watch(ctrl.signal)) {
|
||||
try {
|
||||
const res = await callback(value)
|
||||
if (res.cancel) {
|
||||
ctrl.abort()
|
||||
break
|
||||
}
|
||||
} catch (e) {
|
||||
console.error(
|
||||
"callback function threw an error @ getStatus.onChange",
|
||||
e,
|
||||
)
|
||||
}
|
||||
}
|
||||
})()
|
||||
.catch((e) => callback(null, e as Error))
|
||||
.catch((e) =>
|
||||
console.error(
|
||||
"callback function threw an error @ getStatus.onChange",
|
||||
e,
|
||||
),
|
||||
)
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Local type for procedure values from the manifest.
|
||||
* The manifest's zod schemas use ZodTypeAny casts that produce `unknown` in zod v4.
|
||||
* This type restores the expected shape for type-safe property access.
|
||||
*/
|
||||
type Procedure =
|
||||
| (DockerProcedure & { type: "docker" })
|
||||
| { type: "script"; args: unknown[] | null }
|
||||
|
||||
const MANIFEST_LOCATION = "/usr/lib/startos/package/embassyManifest.json"
|
||||
export const EMBASSY_JS_LOCATION = "/usr/lib/startos/package/embassy.js"
|
||||
|
||||
const configFile = FileHelper.json(
|
||||
{
|
||||
volumeId: "embassy",
|
||||
base: new Volume("embassy"),
|
||||
subpath: "config.json",
|
||||
},
|
||||
matches.any,
|
||||
z.any(),
|
||||
)
|
||||
const dependsOnFile = FileHelper.json(
|
||||
{
|
||||
volumeId: "embassy",
|
||||
base: new Volume("embassy"),
|
||||
subpath: "dependsOn.json",
|
||||
},
|
||||
dictionary([string, array(string)]),
|
||||
z.record(z.string(), z.array(z.string())),
|
||||
)
|
||||
|
||||
const matchResult = object({
|
||||
result: any,
|
||||
const matchResult = z.object({
|
||||
result: z.any(),
|
||||
})
|
||||
const matchError = object({
|
||||
error: string,
|
||||
const matchError = z.object({
|
||||
error: z.string(),
|
||||
})
|
||||
const matchErrorCode = object<{
|
||||
"error-code": [number, string] | readonly [number, string]
|
||||
}>({
|
||||
"error-code": tuple(number, string),
|
||||
const matchErrorCode = z.object({
|
||||
"error-code": z.tuple([z.number(), z.string()]),
|
||||
})
|
||||
|
||||
const assertNever = (
|
||||
@@ -95,29 +156,34 @@ const assertNever = (
|
||||
/**
|
||||
Should be changing the type for specific properties, and this is mostly a transformation for the old return types to the newer one.
|
||||
*/
|
||||
function isMatchResult(a: unknown): a is z.infer<typeof matchResult> {
|
||||
return matchResult.safeParse(a).success
|
||||
}
|
||||
function isMatchError(a: unknown): a is z.infer<typeof matchError> {
|
||||
return matchError.safeParse(a).success
|
||||
}
|
||||
function isMatchErrorCode(a: unknown): a is z.infer<typeof matchErrorCode> {
|
||||
return matchErrorCode.safeParse(a).success
|
||||
}
|
||||
const fromReturnType = <A>(a: U.ResultType<A>): A => {
|
||||
if (matchResult.test(a)) {
|
||||
if (isMatchResult(a)) {
|
||||
return a.result
|
||||
}
|
||||
if (matchError.test(a)) {
|
||||
if (isMatchError(a)) {
|
||||
console.info({ passedErrorStack: new Error().stack, error: a.error })
|
||||
throw { error: a.error }
|
||||
}
|
||||
if (matchErrorCode.test(a)) {
|
||||
if (isMatchErrorCode(a)) {
|
||||
const [code, message] = a["error-code"]
|
||||
throw { error: message, code }
|
||||
}
|
||||
return assertNever(a)
|
||||
return assertNever(a as never)
|
||||
}
|
||||
|
||||
const matchSetResult = object({
|
||||
"depends-on": dictionary([string, array(string)])
|
||||
.nullable()
|
||||
.optional(),
|
||||
dependsOn: dictionary([string, array(string)])
|
||||
.nullable()
|
||||
.optional(),
|
||||
signal: literals(
|
||||
const matchSetResult = z.object({
|
||||
"depends-on": z.record(z.string(), z.array(z.string())).nullable().optional(),
|
||||
dependsOn: z.record(z.string(), z.array(z.string())).nullable().optional(),
|
||||
signal: z.enum([
|
||||
"SIGTERM",
|
||||
"SIGHUP",
|
||||
"SIGINT",
|
||||
@@ -150,7 +216,7 @@ const matchSetResult = object({
|
||||
"SIGPWR",
|
||||
"SIGSYS",
|
||||
"SIGINFO",
|
||||
),
|
||||
]),
|
||||
})
|
||||
|
||||
type OldGetConfigRes = {
|
||||
@@ -232,33 +298,29 @@ const asProperty = (x: PackagePropertiesV2): PropertiesReturn =>
|
||||
Object.fromEntries(
|
||||
Object.entries(x).map(([key, value]) => [key, asProperty_(value)]),
|
||||
)
|
||||
const [matchPackageProperties, setMatchPackageProperties] =
|
||||
deferred<PackagePropertiesV2>()
|
||||
const matchPackagePropertyObject: Parser<unknown, PackagePropertyObject> =
|
||||
object({
|
||||
value: matchPackageProperties,
|
||||
type: literal("object"),
|
||||
description: string,
|
||||
})
|
||||
const matchPackagePropertyObject: z.ZodType<PackagePropertyObject> = z.object({
|
||||
value: z.lazy(() => matchPackageProperties),
|
||||
type: z.literal("object"),
|
||||
description: z.string(),
|
||||
})
|
||||
|
||||
const matchPackagePropertyString: Parser<unknown, PackagePropertyString> =
|
||||
object({
|
||||
type: literal("string"),
|
||||
description: string.nullable().optional(),
|
||||
value: string,
|
||||
copyable: boolean.nullable().optional(),
|
||||
qr: boolean.nullable().optional(),
|
||||
masked: boolean.nullable().optional(),
|
||||
})
|
||||
setMatchPackageProperties(
|
||||
dictionary([
|
||||
string,
|
||||
anyOf(matchPackagePropertyObject, matchPackagePropertyString),
|
||||
]),
|
||||
const matchPackagePropertyString: z.ZodType<PackagePropertyString> = z.object({
|
||||
type: z.literal("string"),
|
||||
description: z.string().nullable().optional(),
|
||||
value: z.string(),
|
||||
copyable: z.boolean().nullable().optional(),
|
||||
qr: z.boolean().nullable().optional(),
|
||||
masked: z.boolean().nullable().optional(),
|
||||
})
|
||||
const matchPackageProperties: z.ZodType<PackagePropertiesV2> = z.lazy(() =>
|
||||
z.record(
|
||||
z.string(),
|
||||
z.union([matchPackagePropertyObject, matchPackagePropertyString]),
|
||||
),
|
||||
)
|
||||
|
||||
const matchProperties = object({
|
||||
version: literal(2),
|
||||
const matchProperties = z.object({
|
||||
version: z.literal(2),
|
||||
data: matchPackageProperties,
|
||||
})
|
||||
|
||||
@@ -287,7 +349,6 @@ function convertProperties(
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_REGISTRY = "https://registry.start9.com"
|
||||
export class SystemForEmbassy implements System {
|
||||
private version: ExtendedVersion
|
||||
currentRunning: MainLoop | undefined
|
||||
@@ -303,7 +364,7 @@ export class SystemForEmbassy implements System {
|
||||
})
|
||||
const manifestData = await fs.readFile(manifestLocation, "utf-8")
|
||||
return new SystemForEmbassy(
|
||||
matchManifest.unsafeCast(JSON.parse(manifestData)),
|
||||
matchManifest.parse(JSON.parse(manifestData)),
|
||||
moduleCode,
|
||||
)
|
||||
}
|
||||
@@ -331,6 +392,10 @@ export class SystemForEmbassy implements System {
|
||||
) {
|
||||
this.version.upstream.prerelease = ["alpha"]
|
||||
}
|
||||
|
||||
if (this.manifest.id === "nostr") {
|
||||
this.manifest.id = "nostr-rs-relay"
|
||||
}
|
||||
}
|
||||
|
||||
async init(
|
||||
@@ -385,7 +450,9 @@ export class SystemForEmbassy implements System {
|
||||
delete this.currentRunning
|
||||
if (currentRunning) {
|
||||
await currentRunning.clean({
|
||||
timeout: fromDuration(this.manifest.main["sigterm-timeout"] || "30s"),
|
||||
timeout: fromDuration(
|
||||
(this.manifest.main["sigterm-timeout"] as any) || "30s",
|
||||
),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -456,6 +523,7 @@ export class SystemForEmbassy implements System {
|
||||
addSsl = {
|
||||
preferredExternalPort: lanPortNum,
|
||||
alpn: { specified: [] },
|
||||
addXForwardedHeaders: false,
|
||||
}
|
||||
}
|
||||
return [
|
||||
@@ -505,6 +573,7 @@ export class SystemForEmbassy implements System {
|
||||
async getActionInput(
|
||||
effects: Effects,
|
||||
actionId: string,
|
||||
_prefill: Record<string, unknown> | null,
|
||||
timeoutMs: number | null,
|
||||
): Promise<T.ActionInput | null> {
|
||||
if (actionId === "config") {
|
||||
@@ -617,7 +686,7 @@ export class SystemForEmbassy implements System {
|
||||
effects: Effects,
|
||||
timeoutMs: number | null,
|
||||
): Promise<void> {
|
||||
const backup = this.manifest.backup.create
|
||||
const backup = this.manifest.backup.create as Procedure
|
||||
if (backup.type === "docker") {
|
||||
const commands = [backup.entrypoint, ...backup.args]
|
||||
const container = await DockerProcedureContainer.of(
|
||||
@@ -650,7 +719,7 @@ export class SystemForEmbassy implements System {
|
||||
encoding: "utf-8",
|
||||
})
|
||||
.catch((_) => null)
|
||||
const restoreBackup = this.manifest.backup.restore
|
||||
const restoreBackup = this.manifest.backup.restore as Procedure
|
||||
if (restoreBackup.type === "docker") {
|
||||
const commands = [restoreBackup.entrypoint, ...restoreBackup.args]
|
||||
const container = await DockerProcedureContainer.of(
|
||||
@@ -683,7 +752,7 @@ export class SystemForEmbassy implements System {
|
||||
effects: Effects,
|
||||
timeoutMs: number | null,
|
||||
): Promise<OldGetConfigRes> {
|
||||
const config = this.manifest.config?.get
|
||||
const config = this.manifest.config?.get as Procedure | undefined
|
||||
if (!config) return { spec: {} }
|
||||
if (config.type === "docker") {
|
||||
const commands = [config.entrypoint, ...config.args]
|
||||
@@ -725,7 +794,7 @@ export class SystemForEmbassy implements System {
|
||||
)
|
||||
await updateConfig(effects, this.manifest, spec, newConfig)
|
||||
await configFile.write(effects, newConfig)
|
||||
const setConfigValue = this.manifest.config?.set
|
||||
const setConfigValue = this.manifest.config?.set as Procedure | undefined
|
||||
if (!setConfigValue) return
|
||||
if (setConfigValue.type === "docker") {
|
||||
const commands = [
|
||||
@@ -740,7 +809,7 @@ export class SystemForEmbassy implements System {
|
||||
this.manifest.volumes,
|
||||
`Set Config - ${commands.join(" ")}`,
|
||||
)
|
||||
const answer = matchSetResult.unsafeCast(
|
||||
const answer = matchSetResult.parse(
|
||||
JSON.parse(
|
||||
(await container.execFail(commands, timeoutMs)).stdout.toString(),
|
||||
),
|
||||
@@ -753,7 +822,7 @@ export class SystemForEmbassy implements System {
|
||||
const method = moduleCode.setConfig
|
||||
if (!method) throw new Error("Expecting that the method setConfig exists")
|
||||
|
||||
const answer = matchSetResult.unsafeCast(
|
||||
const answer = matchSetResult.parse(
|
||||
await method(
|
||||
polyfillEffects(effects, this.manifest),
|
||||
newConfig as U.Config,
|
||||
@@ -782,7 +851,11 @@ export class SystemForEmbassy implements System {
|
||||
const requiredDeps = {
|
||||
...Object.fromEntries(
|
||||
Object.entries(this.manifest.dependencies ?? {})
|
||||
.filter(([k, v]) => v?.requirement.type === "required")
|
||||
.filter(
|
||||
([k, v]) =>
|
||||
(v?.requirement as { type: string } | undefined)?.type ===
|
||||
"required",
|
||||
)
|
||||
.map((x) => [x[0], []]) || [],
|
||||
),
|
||||
}
|
||||
@@ -850,7 +923,7 @@ export class SystemForEmbassy implements System {
|
||||
}
|
||||
|
||||
if (migration) {
|
||||
const [_, procedure] = migration
|
||||
const [_, procedure] = migration as readonly [unknown, Procedure]
|
||||
if (procedure.type === "docker") {
|
||||
const commands = [procedure.entrypoint, ...procedure.args]
|
||||
const container = await DockerProcedureContainer.of(
|
||||
@@ -888,8 +961,10 @@ export class SystemForEmbassy implements System {
|
||||
effects: Effects,
|
||||
timeoutMs: number | null,
|
||||
): Promise<PropertiesReturn> {
|
||||
// TODO BLU-J set the properties ever so often
|
||||
const setConfigValue = this.manifest.properties
|
||||
const setConfigValue = this.manifest.properties as
|
||||
| Procedure
|
||||
| null
|
||||
| undefined
|
||||
if (!setConfigValue) throw new Error("There is no properties")
|
||||
if (setConfigValue.type === "docker") {
|
||||
const commands = [setConfigValue.entrypoint, ...setConfigValue.args]
|
||||
@@ -900,7 +975,7 @@ export class SystemForEmbassy implements System {
|
||||
this.manifest.volumes,
|
||||
`Properties - ${commands.join(" ")}`,
|
||||
)
|
||||
const properties = matchProperties.unsafeCast(
|
||||
const properties = matchProperties.parse(
|
||||
JSON.parse(
|
||||
(await container.execFail(commands, timeoutMs)).stdout.toString(),
|
||||
),
|
||||
@@ -911,7 +986,7 @@ export class SystemForEmbassy implements System {
|
||||
const method = moduleCode.properties
|
||||
if (!method)
|
||||
throw new Error("Expecting that the method properties exists")
|
||||
const properties = matchProperties.unsafeCast(
|
||||
const properties = matchProperties.parse(
|
||||
await method(polyfillEffects(effects, this.manifest)).then(
|
||||
fromReturnType,
|
||||
),
|
||||
@@ -926,7 +1001,8 @@ export class SystemForEmbassy implements System {
|
||||
formData: unknown,
|
||||
timeoutMs: number | null,
|
||||
): Promise<T.ActionResult> {
|
||||
const actionProcedure = this.manifest.actions?.[actionId]?.implementation
|
||||
const actionProcedure = this.manifest.actions?.[actionId]
|
||||
?.implementation as Procedure | undefined
|
||||
const toActionResult = ({
|
||||
message,
|
||||
value,
|
||||
@@ -993,7 +1069,9 @@ export class SystemForEmbassy implements System {
|
||||
oldConfig: unknown,
|
||||
timeoutMs: number | null,
|
||||
): Promise<object> {
|
||||
const actionProcedure = this.manifest.dependencies?.[id]?.config?.check
|
||||
const actionProcedure = this.manifest.dependencies?.[id]?.config?.check as
|
||||
| Procedure
|
||||
| undefined
|
||||
if (!actionProcedure) return { message: "Action not found", value: null }
|
||||
if (actionProcedure.type === "docker") {
|
||||
const commands = [
|
||||
@@ -1036,16 +1114,26 @@ export class SystemForEmbassy implements System {
|
||||
timeoutMs: number | null,
|
||||
): Promise<void> {
|
||||
// TODO: docker
|
||||
await effects.mount({
|
||||
location: `/media/embassy/${id}`,
|
||||
target: {
|
||||
packageId: id,
|
||||
volumeId: "embassy",
|
||||
subpath: null,
|
||||
readonly: true,
|
||||
filetype: "directory",
|
||||
},
|
||||
})
|
||||
const status = await getStatus(effects, { packageId: id }).const()
|
||||
if (!status) return
|
||||
try {
|
||||
await effects.mount({
|
||||
location: `/media/embassy/${id}`,
|
||||
target: {
|
||||
packageId: id,
|
||||
volumeId: "embassy",
|
||||
subpath: null,
|
||||
readonly: true,
|
||||
idmap: [],
|
||||
},
|
||||
})
|
||||
} catch (e) {
|
||||
console.error(
|
||||
`Failed to mount dependency volume for ${id}, skipping autoconfig:`,
|
||||
e,
|
||||
)
|
||||
return
|
||||
}
|
||||
configFile
|
||||
.withPath(`/media/embassy/${id}/config.json`)
|
||||
.read()
|
||||
@@ -1085,40 +1173,50 @@ export class SystemForEmbassy implements System {
|
||||
}
|
||||
}
|
||||
|
||||
const matchPointer = object({
|
||||
type: literal("pointer"),
|
||||
const matchPointer = z.object({
|
||||
type: z.literal("pointer"),
|
||||
})
|
||||
|
||||
const matchPointerPackage = object({
|
||||
subtype: literal("package"),
|
||||
target: literals("tor-key", "tor-address", "lan-address"),
|
||||
"package-id": string,
|
||||
interface: string,
|
||||
const matchPointerPackage = z.object({
|
||||
subtype: z.literal("package"),
|
||||
target: z.enum(["tor-key", "tor-address", "lan-address"]),
|
||||
"package-id": z.string(),
|
||||
interface: z.string(),
|
||||
})
|
||||
const matchPointerConfig = object({
|
||||
subtype: literal("package"),
|
||||
target: literals("config"),
|
||||
"package-id": string,
|
||||
selector: string,
|
||||
multi: boolean,
|
||||
const matchPointerConfig = z.object({
|
||||
subtype: z.literal("package"),
|
||||
target: z.enum(["config"]),
|
||||
"package-id": z.string(),
|
||||
selector: z.string(),
|
||||
multi: z.boolean(),
|
||||
})
|
||||
const matchSpec = object({
|
||||
spec: object,
|
||||
const matchSpec = z.object({
|
||||
spec: z.record(z.string(), z.unknown()),
|
||||
})
|
||||
const matchVariants = object({ variants: dictionary([string, unknown]) })
|
||||
const matchVariants = z.object({ variants: z.record(z.string(), z.unknown()) })
|
||||
function isMatchPointer(v: unknown): v is z.infer<typeof matchPointer> {
|
||||
return matchPointer.safeParse(v).success
|
||||
}
|
||||
function isMatchSpec(v: unknown): v is z.infer<typeof matchSpec> {
|
||||
return matchSpec.safeParse(v).success
|
||||
}
|
||||
function isMatchVariants(v: unknown): v is z.infer<typeof matchVariants> {
|
||||
return matchVariants.safeParse(v).success
|
||||
}
|
||||
function cleanSpecOfPointers<T>(mutSpec: T): T {
|
||||
if (!object.test(mutSpec)) return mutSpec
|
||||
if (typeof mutSpec !== "object" || mutSpec === null) return mutSpec
|
||||
for (const key in mutSpec) {
|
||||
const value = mutSpec[key]
|
||||
if (matchSpec.test(value)) value.spec = cleanSpecOfPointers(value.spec)
|
||||
if (matchVariants.test(value))
|
||||
if (isMatchSpec(value))
|
||||
value.spec = cleanSpecOfPointers(value.spec) as Record<string, unknown>
|
||||
if (isMatchVariants(value))
|
||||
value.variants = Object.fromEntries(
|
||||
Object.entries(value.variants).map(([key, value]) => [
|
||||
key,
|
||||
cleanSpecOfPointers(value),
|
||||
]),
|
||||
)
|
||||
if (!matchPointer.test(value)) continue
|
||||
if (!isMatchPointer(value)) continue
|
||||
delete mutSpec[key]
|
||||
// // if (value.target === )
|
||||
}
|
||||
@@ -1184,6 +1282,11 @@ async function updateConfig(
|
||||
if (specValue.target === "config") {
|
||||
const jp = require("jsonpath")
|
||||
const depId = specValue["package-id"]
|
||||
const depStatus = await getStatus(effects, { packageId: depId }).const()
|
||||
if (!depStatus) {
|
||||
mutConfigValue[key] = null
|
||||
continue
|
||||
}
|
||||
await effects.mount({
|
||||
location: `/media/embassy/${depId}`,
|
||||
target: {
|
||||
@@ -1191,7 +1294,7 @@ async function updateConfig(
|
||||
volumeId: "embassy",
|
||||
subpath: null,
|
||||
readonly: true,
|
||||
filetype: "directory",
|
||||
idmap: [],
|
||||
},
|
||||
})
|
||||
const remoteConfig = configFile
|
||||
@@ -1240,12 +1343,8 @@ async function updateConfig(
|
||||
? ""
|
||||
: catchFn(
|
||||
() =>
|
||||
(specValue.target === "lan-address"
|
||||
? filled.addressInfo!.localHostnames[0] ||
|
||||
filled.addressInfo!.onionHostnames[0]
|
||||
: filled.addressInfo!.onionHostnames[0] ||
|
||||
filled.addressInfo!.localHostnames[0]
|
||||
).hostname.value,
|
||||
filled.addressInfo!.filter({ kind: "mdns" })!.hostnames[0]
|
||||
.hostname,
|
||||
) || ""
|
||||
mutConfigValue[key] = url
|
||||
}
|
||||
@@ -1268,7 +1367,7 @@ function extractServiceInterfaceId(manifest: Manifest, specInterface: string) {
|
||||
}
|
||||
async function convertToNewConfig(value: OldGetConfigRes) {
|
||||
try {
|
||||
const valueSpec: OldConfigSpec = matchOldConfigSpec.unsafeCast(value.spec)
|
||||
const valueSpec: OldConfigSpec = matchOldConfigSpec.parse(value.spec)
|
||||
const spec = transformConfigSpec(valueSpec)
|
||||
if (!value.config) return { spec, config: null }
|
||||
const config = transformOldConfigToNew(valueSpec, value.config) ?? null
|
||||
|
||||
@@ -4,9 +4,9 @@ import synapseManifest from "./__fixtures__/synapseManifest"
|
||||
|
||||
describe("matchManifest", () => {
|
||||
test("gittea", () => {
|
||||
matchManifest.unsafeCast(giteaManifest)
|
||||
matchManifest.parse(giteaManifest)
|
||||
})
|
||||
test("synapse", () => {
|
||||
matchManifest.unsafeCast(synapseManifest)
|
||||
matchManifest.parse(synapseManifest)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,126 +1,123 @@
|
||||
import {
|
||||
object,
|
||||
literal,
|
||||
string,
|
||||
array,
|
||||
boolean,
|
||||
dictionary,
|
||||
literals,
|
||||
number,
|
||||
unknown,
|
||||
some,
|
||||
every,
|
||||
} from "ts-matches"
|
||||
import { z } from "@start9labs/start-sdk"
|
||||
import { matchVolume } from "./matchVolume"
|
||||
import { matchDockerProcedure } from "../../../Models/DockerProcedure"
|
||||
|
||||
const matchJsProcedure = object({
|
||||
type: literal("script"),
|
||||
args: array(unknown).nullable().optional().defaultTo([]),
|
||||
const matchJsProcedure = z.object({
|
||||
type: z.literal("script"),
|
||||
args: z.array(z.unknown()).nullable().optional().default([]),
|
||||
})
|
||||
|
||||
const matchProcedure = some(matchDockerProcedure, matchJsProcedure)
|
||||
export type Procedure = typeof matchProcedure._TYPE
|
||||
const matchProcedure = z.union([matchDockerProcedure, matchJsProcedure])
|
||||
export type Procedure = z.infer<typeof matchProcedure>
|
||||
|
||||
const matchAction = object({
|
||||
name: string,
|
||||
description: string,
|
||||
warning: string.nullable().optional(),
|
||||
const healthCheckFields = {
|
||||
name: z.string(),
|
||||
"success-message": z.string().nullable().optional(),
|
||||
}
|
||||
|
||||
const matchAction = z.object({
|
||||
name: z.string(),
|
||||
description: z.string(),
|
||||
warning: z.string().nullable().optional(),
|
||||
implementation: matchProcedure,
|
||||
"allowed-statuses": array(literals("running", "stopped")),
|
||||
"input-spec": unknown.nullable().optional(),
|
||||
"allowed-statuses": z.array(z.enum(["running", "stopped"])),
|
||||
"input-spec": z.unknown().nullable().optional(),
|
||||
})
|
||||
export const matchManifest = object({
|
||||
id: string,
|
||||
title: string,
|
||||
version: string,
|
||||
export const matchManifest = z.object({
|
||||
id: z.string(),
|
||||
title: z.string(),
|
||||
version: z.string(),
|
||||
main: matchDockerProcedure,
|
||||
assets: object({
|
||||
assets: string.nullable().optional(),
|
||||
scripts: string.nullable().optional(),
|
||||
})
|
||||
assets: z
|
||||
.object({
|
||||
assets: z.string().nullable().optional(),
|
||||
scripts: z.string().nullable().optional(),
|
||||
})
|
||||
.nullable()
|
||||
.optional(),
|
||||
"health-checks": dictionary([
|
||||
string,
|
||||
every(
|
||||
matchProcedure,
|
||||
object({
|
||||
name: string,
|
||||
["success-message"]: string.nullable().optional(),
|
||||
}),
|
||||
),
|
||||
]),
|
||||
config: object({
|
||||
get: matchProcedure,
|
||||
set: matchProcedure,
|
||||
})
|
||||
"health-checks": z.record(
|
||||
z.string(),
|
||||
z.union([
|
||||
matchDockerProcedure.extend(healthCheckFields),
|
||||
matchJsProcedure.extend(healthCheckFields),
|
||||
]),
|
||||
),
|
||||
config: z
|
||||
.object({
|
||||
get: matchProcedure,
|
||||
set: matchProcedure,
|
||||
})
|
||||
.nullable()
|
||||
.optional(),
|
||||
properties: matchProcedure.nullable().optional(),
|
||||
volumes: dictionary([string, matchVolume]),
|
||||
interfaces: dictionary([
|
||||
string,
|
||||
object({
|
||||
name: string,
|
||||
description: string,
|
||||
"tor-config": object({
|
||||
"port-mapping": dictionary([string, string]),
|
||||
})
|
||||
volumes: z.record(z.string(), matchVolume),
|
||||
interfaces: z.record(
|
||||
z.string(),
|
||||
z.object({
|
||||
name: z.string(),
|
||||
description: z.string(),
|
||||
"tor-config": z
|
||||
.object({
|
||||
"port-mapping": z.record(z.string(), z.string()),
|
||||
})
|
||||
.nullable()
|
||||
.optional(),
|
||||
"lan-config": dictionary([
|
||||
string,
|
||||
object({
|
||||
ssl: boolean,
|
||||
internal: number,
|
||||
}),
|
||||
])
|
||||
"lan-config": z
|
||||
.record(
|
||||
z.string(),
|
||||
z.object({
|
||||
ssl: z.boolean(),
|
||||
internal: z.number(),
|
||||
}),
|
||||
)
|
||||
.nullable()
|
||||
.optional(),
|
||||
ui: boolean,
|
||||
protocols: array(string),
|
||||
ui: z.boolean(),
|
||||
protocols: z.array(z.string()),
|
||||
}),
|
||||
]),
|
||||
backup: object({
|
||||
),
|
||||
backup: z.object({
|
||||
create: matchProcedure,
|
||||
restore: matchProcedure,
|
||||
}),
|
||||
migrations: object({
|
||||
to: dictionary([string, matchProcedure]),
|
||||
from: dictionary([string, matchProcedure]),
|
||||
})
|
||||
migrations: z
|
||||
.object({
|
||||
to: z.record(z.string(), matchProcedure),
|
||||
from: z.record(z.string(), matchProcedure),
|
||||
})
|
||||
.nullable()
|
||||
.optional(),
|
||||
dependencies: dictionary([
|
||||
string,
|
||||
object({
|
||||
version: string,
|
||||
requirement: some(
|
||||
object({
|
||||
type: literal("opt-in"),
|
||||
how: string,
|
||||
}),
|
||||
object({
|
||||
type: literal("opt-out"),
|
||||
how: string,
|
||||
}),
|
||||
object({
|
||||
type: literal("required"),
|
||||
}),
|
||||
),
|
||||
description: string.nullable().optional(),
|
||||
config: object({
|
||||
check: matchProcedure,
|
||||
"auto-configure": matchProcedure,
|
||||
dependencies: z.record(
|
||||
z.string(),
|
||||
z
|
||||
.object({
|
||||
version: z.string(),
|
||||
requirement: z.union([
|
||||
z.object({
|
||||
type: z.literal("opt-in"),
|
||||
how: z.string(),
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal("opt-out"),
|
||||
how: z.string(),
|
||||
}),
|
||||
z.object({
|
||||
type: z.literal("required"),
|
||||
}),
|
||||
]),
|
||||
description: z.string().nullable().optional(),
|
||||
config: z
|
||||
.object({
|
||||
check: matchProcedure,
|
||||
"auto-configure": matchProcedure,
|
||||
})
|
||||
.nullable()
|
||||
.optional(),
|
||||
})
|
||||
.nullable()
|
||||
.optional(),
|
||||
})
|
||||
.nullable()
|
||||
.optional(),
|
||||
]),
|
||||
),
|
||||
|
||||
actions: dictionary([string, matchAction]),
|
||||
actions: z.record(z.string(), matchAction),
|
||||
})
|
||||
export type Manifest = typeof matchManifest._TYPE
|
||||
export type Manifest = z.infer<typeof matchManifest>
|
||||
|
||||
@@ -1,32 +1,32 @@
|
||||
import { object, literal, string, boolean, some } from "ts-matches"
|
||||
import { z } from "@start9labs/start-sdk"
|
||||
|
||||
const matchDataVolume = object({
|
||||
type: literal("data"),
|
||||
readonly: boolean.optional(),
|
||||
const matchDataVolume = z.object({
|
||||
type: z.literal("data"),
|
||||
readonly: z.boolean().optional(),
|
||||
})
|
||||
const matchAssetVolume = object({
|
||||
type: literal("assets"),
|
||||
const matchAssetVolume = z.object({
|
||||
type: z.literal("assets"),
|
||||
})
|
||||
const matchPointerVolume = object({
|
||||
type: literal("pointer"),
|
||||
"package-id": string,
|
||||
"volume-id": string,
|
||||
path: string,
|
||||
readonly: boolean,
|
||||
const matchPointerVolume = z.object({
|
||||
type: z.literal("pointer"),
|
||||
"package-id": z.string(),
|
||||
"volume-id": z.string(),
|
||||
path: z.string(),
|
||||
readonly: z.boolean(),
|
||||
})
|
||||
const matchCertificateVolume = object({
|
||||
type: literal("certificate"),
|
||||
"interface-id": string,
|
||||
const matchCertificateVolume = z.object({
|
||||
type: z.literal("certificate"),
|
||||
"interface-id": z.string(),
|
||||
})
|
||||
const matchBackupVolume = object({
|
||||
type: literal("backup"),
|
||||
readonly: boolean,
|
||||
const matchBackupVolume = z.object({
|
||||
type: z.literal("backup"),
|
||||
readonly: z.boolean(),
|
||||
})
|
||||
export const matchVolume = some(
|
||||
export const matchVolume = z.union([
|
||||
matchDataVolume,
|
||||
matchAssetVolume,
|
||||
matchPointerVolume,
|
||||
matchCertificateVolume,
|
||||
matchBackupVolume,
|
||||
)
|
||||
export type Volume = typeof matchVolume._TYPE
|
||||
])
|
||||
export type Volume = z.infer<typeof matchVolume>
|
||||
|
||||
@@ -12,43 +12,43 @@ import nostrConfig2 from "./__fixtures__/nostrConfig2"
|
||||
|
||||
describe("transformConfigSpec", () => {
|
||||
test("matchOldConfigSpec(embassyPages.homepage.variants[web-page])", () => {
|
||||
matchOldConfigSpec.unsafeCast(
|
||||
matchOldConfigSpec.parse(
|
||||
fixtureEmbassyPagesConfig.homepage.variants["web-page"],
|
||||
)
|
||||
})
|
||||
test("matchOldConfigSpec(embassyPages)", () => {
|
||||
matchOldConfigSpec.unsafeCast(fixtureEmbassyPagesConfig)
|
||||
matchOldConfigSpec.parse(fixtureEmbassyPagesConfig)
|
||||
})
|
||||
test("transformConfigSpec(embassyPages)", () => {
|
||||
const spec = matchOldConfigSpec.unsafeCast(fixtureEmbassyPagesConfig)
|
||||
const spec = matchOldConfigSpec.parse(fixtureEmbassyPagesConfig)
|
||||
expect(transformConfigSpec(spec)).toMatchSnapshot()
|
||||
})
|
||||
|
||||
test("matchOldConfigSpec(RTL.nodes)", () => {
|
||||
matchOldValueSpecList.unsafeCast(fixtureRTLConfig.nodes)
|
||||
matchOldValueSpecList.parse(fixtureRTLConfig.nodes)
|
||||
})
|
||||
test("matchOldConfigSpec(RTL)", () => {
|
||||
matchOldConfigSpec.unsafeCast(fixtureRTLConfig)
|
||||
matchOldConfigSpec.parse(fixtureRTLConfig)
|
||||
})
|
||||
test("transformConfigSpec(RTL)", () => {
|
||||
const spec = matchOldConfigSpec.unsafeCast(fixtureRTLConfig)
|
||||
const spec = matchOldConfigSpec.parse(fixtureRTLConfig)
|
||||
expect(transformConfigSpec(spec)).toMatchSnapshot()
|
||||
})
|
||||
|
||||
test("transformConfigSpec(searNXG)", () => {
|
||||
const spec = matchOldConfigSpec.unsafeCast(searNXG)
|
||||
const spec = matchOldConfigSpec.parse(searNXG)
|
||||
expect(transformConfigSpec(spec)).toMatchSnapshot()
|
||||
})
|
||||
test("transformConfigSpec(bitcoind)", () => {
|
||||
const spec = matchOldConfigSpec.unsafeCast(bitcoind)
|
||||
const spec = matchOldConfigSpec.parse(bitcoind)
|
||||
expect(transformConfigSpec(spec)).toMatchSnapshot()
|
||||
})
|
||||
test("transformConfigSpec(nostr)", () => {
|
||||
const spec = matchOldConfigSpec.unsafeCast(nostr)
|
||||
const spec = matchOldConfigSpec.parse(nostr)
|
||||
expect(transformConfigSpec(spec)).toMatchSnapshot()
|
||||
})
|
||||
test("transformConfigSpec(nostr2)", () => {
|
||||
const spec = matchOldConfigSpec.unsafeCast(nostrConfig2)
|
||||
const spec = matchOldConfigSpec.parse(nostrConfig2)
|
||||
expect(transformConfigSpec(spec)).toMatchSnapshot()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,19 +1,4 @@
|
||||
import { IST } from "@start9labs/start-sdk"
|
||||
import {
|
||||
dictionary,
|
||||
object,
|
||||
anyOf,
|
||||
string,
|
||||
literals,
|
||||
array,
|
||||
number,
|
||||
boolean,
|
||||
Parser,
|
||||
deferred,
|
||||
every,
|
||||
nill,
|
||||
literal,
|
||||
} from "ts-matches"
|
||||
import { IST, z } from "@start9labs/start-sdk"
|
||||
|
||||
export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
|
||||
return Object.entries(oldSpec).reduce((inputSpec, [key, oldVal]) => {
|
||||
@@ -82,7 +67,7 @@ export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
|
||||
name: oldVal.name,
|
||||
description: oldVal.description || null,
|
||||
warning: oldVal.warning || null,
|
||||
spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(oldVal.spec)),
|
||||
spec: transformConfigSpec(matchOldConfigSpec.parse(oldVal.spec)),
|
||||
}
|
||||
} else if (oldVal.type === "string") {
|
||||
newVal = {
|
||||
@@ -121,7 +106,7 @@ export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
|
||||
...obj,
|
||||
[id]: {
|
||||
name: oldVal.tag["variant-names"][id] || id,
|
||||
spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(spec)),
|
||||
spec: transformConfigSpec(matchOldConfigSpec.parse(spec)),
|
||||
},
|
||||
}),
|
||||
{} as Record<string, { name: string; spec: IST.InputSpec }>,
|
||||
@@ -153,7 +138,7 @@ export function transformOldConfigToNew(
|
||||
|
||||
if (isObject(val)) {
|
||||
newVal = transformOldConfigToNew(
|
||||
matchOldConfigSpec.unsafeCast(val.spec),
|
||||
matchOldConfigSpec.parse(val.spec),
|
||||
config[key],
|
||||
)
|
||||
}
|
||||
@@ -172,7 +157,7 @@ export function transformOldConfigToNew(
|
||||
newVal = {
|
||||
selection,
|
||||
value: transformOldConfigToNew(
|
||||
matchOldConfigSpec.unsafeCast(val.variants[selection]),
|
||||
matchOldConfigSpec.parse(val.variants[selection]),
|
||||
config[key],
|
||||
),
|
||||
}
|
||||
@@ -183,10 +168,7 @@ export function transformOldConfigToNew(
|
||||
|
||||
if (isObjectList(val)) {
|
||||
newVal = (config[key] as object[]).map((obj) =>
|
||||
transformOldConfigToNew(
|
||||
matchOldConfigSpec.unsafeCast(val.spec.spec),
|
||||
obj,
|
||||
),
|
||||
transformOldConfigToNew(matchOldConfigSpec.parse(val.spec.spec), obj),
|
||||
)
|
||||
} else if (isUnionList(val)) return obj
|
||||
}
|
||||
@@ -212,7 +194,7 @@ export function transformNewConfigToOld(
|
||||
|
||||
if (isObject(val)) {
|
||||
newVal = transformNewConfigToOld(
|
||||
matchOldConfigSpec.unsafeCast(val.spec),
|
||||
matchOldConfigSpec.parse(val.spec),
|
||||
config[key],
|
||||
)
|
||||
}
|
||||
@@ -221,7 +203,7 @@ export function transformNewConfigToOld(
|
||||
newVal = {
|
||||
[val.tag.id]: config[key].selection,
|
||||
...transformNewConfigToOld(
|
||||
matchOldConfigSpec.unsafeCast(val.variants[config[key].selection]),
|
||||
matchOldConfigSpec.parse(val.variants[config[key].selection]),
|
||||
config[key].value,
|
||||
),
|
||||
}
|
||||
@@ -230,10 +212,7 @@ export function transformNewConfigToOld(
|
||||
if (isList(val)) {
|
||||
if (isObjectList(val)) {
|
||||
newVal = (config[key] as object[]).map((obj) =>
|
||||
transformNewConfigToOld(
|
||||
matchOldConfigSpec.unsafeCast(val.spec.spec),
|
||||
obj,
|
||||
),
|
||||
transformNewConfigToOld(matchOldConfigSpec.parse(val.spec.spec), obj),
|
||||
)
|
||||
} else if (isUnionList(val)) return obj
|
||||
}
|
||||
@@ -337,9 +316,7 @@ function getListSpec(
|
||||
default: oldVal.default as Record<string, unknown>[],
|
||||
spec: {
|
||||
type: "object",
|
||||
spec: transformConfigSpec(
|
||||
matchOldConfigSpec.unsafeCast(oldVal.spec.spec),
|
||||
),
|
||||
spec: transformConfigSpec(matchOldConfigSpec.parse(oldVal.spec.spec)),
|
||||
uniqueBy: oldVal.spec["unique-by"] || null,
|
||||
displayAs: oldVal.spec["display-as"] || null,
|
||||
},
|
||||
@@ -393,211 +370,281 @@ function isUnionList(
|
||||
}
|
||||
|
||||
export type OldConfigSpec = Record<string, OldValueSpec>
|
||||
const [_matchOldConfigSpec, setMatchOldConfigSpec] = deferred<unknown>()
|
||||
export const matchOldConfigSpec = _matchOldConfigSpec as Parser<
|
||||
unknown,
|
||||
OldConfigSpec
|
||||
>
|
||||
export const matchOldDefaultString = anyOf(
|
||||
string,
|
||||
object({ charset: string, len: number }),
|
||||
export const matchOldConfigSpec: z.ZodType<OldConfigSpec> = z.lazy(() =>
|
||||
z.record(z.string(), matchOldValueSpec),
|
||||
)
|
||||
type OldDefaultString = typeof matchOldDefaultString._TYPE
|
||||
export const matchOldDefaultString = z.union([
|
||||
z.string(),
|
||||
z.object({ charset: z.string(), len: z.number() }),
|
||||
])
|
||||
type OldDefaultString = z.infer<typeof matchOldDefaultString>
|
||||
|
||||
export const matchOldValueSpecString = object({
|
||||
type: literals("string"),
|
||||
name: string,
|
||||
masked: boolean.nullable().optional(),
|
||||
copyable: boolean.nullable().optional(),
|
||||
nullable: boolean.nullable().optional(),
|
||||
placeholder: string.nullable().optional(),
|
||||
pattern: string.nullable().optional(),
|
||||
"pattern-description": string.nullable().optional(),
|
||||
export const matchOldValueSpecString = z.object({
|
||||
type: z.enum(["string"]),
|
||||
name: z.string(),
|
||||
masked: z.boolean().nullable().optional(),
|
||||
copyable: z.boolean().nullable().optional(),
|
||||
nullable: z.boolean().nullable().optional(),
|
||||
placeholder: z.string().nullable().optional(),
|
||||
pattern: z.string().nullable().optional(),
|
||||
"pattern-description": z.string().nullable().optional(),
|
||||
default: matchOldDefaultString.nullable().optional(),
|
||||
textarea: boolean.nullable().optional(),
|
||||
description: string.nullable().optional(),
|
||||
warning: string.nullable().optional(),
|
||||
textarea: z.boolean().nullable().optional(),
|
||||
description: z.string().nullable().optional(),
|
||||
warning: z.string().nullable().optional(),
|
||||
})
|
||||
|
||||
export const matchOldValueSpecNumber = object({
|
||||
type: literals("number"),
|
||||
nullable: boolean,
|
||||
name: string,
|
||||
range: string,
|
||||
integral: boolean,
|
||||
default: number.nullable().optional(),
|
||||
description: string.nullable().optional(),
|
||||
warning: string.nullable().optional(),
|
||||
units: string.nullable().optional(),
|
||||
placeholder: anyOf(number, string).nullable().optional(),
|
||||
export const matchOldValueSpecNumber = z.object({
|
||||
type: z.enum(["number"]),
|
||||
nullable: z.boolean(),
|
||||
name: z.string(),
|
||||
range: z.string(),
|
||||
integral: z.boolean(),
|
||||
default: z.number().nullable().optional(),
|
||||
description: z.string().nullable().optional(),
|
||||
warning: z.string().nullable().optional(),
|
||||
units: z.string().nullable().optional(),
|
||||
placeholder: z.union([z.number(), z.string()]).nullable().optional(),
|
||||
})
|
||||
type OldValueSpecNumber = typeof matchOldValueSpecNumber._TYPE
|
||||
type OldValueSpecNumber = z.infer<typeof matchOldValueSpecNumber>
|
||||
|
||||
export const matchOldValueSpecBoolean = object({
|
||||
type: literals("boolean"),
|
||||
default: boolean,
|
||||
name: string,
|
||||
description: string.nullable().optional(),
|
||||
warning: string.nullable().optional(),
|
||||
export const matchOldValueSpecBoolean = z.object({
|
||||
type: z.enum(["boolean"]),
|
||||
default: z.boolean(),
|
||||
name: z.string(),
|
||||
description: z.string().nullable().optional(),
|
||||
warning: z.string().nullable().optional(),
|
||||
})
|
||||
type OldValueSpecBoolean = typeof matchOldValueSpecBoolean._TYPE
|
||||
type OldValueSpecBoolean = z.infer<typeof matchOldValueSpecBoolean>
|
||||
|
||||
const matchOldValueSpecObject = object({
|
||||
type: literals("object"),
|
||||
spec: _matchOldConfigSpec,
|
||||
name: string,
|
||||
description: string.nullable().optional(),
|
||||
warning: string.nullable().optional(),
|
||||
type OldValueSpecObject = {
|
||||
type: "object"
|
||||
spec: OldConfigSpec
|
||||
name: string
|
||||
description?: string | null
|
||||
warning?: string | null
|
||||
}
|
||||
const matchOldValueSpecObject: z.ZodType<OldValueSpecObject> = z.object({
|
||||
type: z.enum(["object"]),
|
||||
spec: z.lazy(() => matchOldConfigSpec),
|
||||
name: z.string(),
|
||||
description: z.string().nullable().optional(),
|
||||
warning: z.string().nullable().optional(),
|
||||
})
|
||||
type OldValueSpecObject = typeof matchOldValueSpecObject._TYPE
|
||||
|
||||
const matchOldValueSpecEnum = object({
|
||||
values: array(string),
|
||||
"value-names": dictionary([string, string]),
|
||||
type: literals("enum"),
|
||||
default: string,
|
||||
name: string,
|
||||
description: string.nullable().optional(),
|
||||
warning: string.nullable().optional(),
|
||||
const matchOldValueSpecEnum = z.object({
|
||||
values: z.array(z.string()),
|
||||
"value-names": z.record(z.string(), z.string()),
|
||||
type: z.enum(["enum"]),
|
||||
default: z.string(),
|
||||
name: z.string(),
|
||||
description: z.string().nullable().optional(),
|
||||
warning: z.string().nullable().optional(),
|
||||
})
|
||||
type OldValueSpecEnum = typeof matchOldValueSpecEnum._TYPE
|
||||
type OldValueSpecEnum = z.infer<typeof matchOldValueSpecEnum>
|
||||
|
||||
const matchOldUnionTagSpec = object({
|
||||
id: string, // The name of the field containing one of the union variants
|
||||
"variant-names": dictionary([string, string]), // The name of each variant
|
||||
name: string,
|
||||
description: string.nullable().optional(),
|
||||
warning: string.nullable().optional(),
|
||||
const matchOldUnionTagSpec = z.object({
|
||||
id: z.string(), // The name of the field containing one of the union variants
|
||||
"variant-names": z.record(z.string(), z.string()), // The name of each variant
|
||||
name: z.string(),
|
||||
description: z.string().nullable().optional(),
|
||||
warning: z.string().nullable().optional(),
|
||||
})
|
||||
const matchOldValueSpecUnion = object({
|
||||
type: literals("union"),
|
||||
type OldValueSpecUnion = {
|
||||
type: "union"
|
||||
tag: z.infer<typeof matchOldUnionTagSpec>
|
||||
variants: Record<string, OldConfigSpec>
|
||||
default: string
|
||||
}
|
||||
const matchOldValueSpecUnion: z.ZodType<OldValueSpecUnion> = z.object({
|
||||
type: z.enum(["union"]),
|
||||
tag: matchOldUnionTagSpec,
|
||||
variants: dictionary([string, _matchOldConfigSpec]),
|
||||
default: string,
|
||||
variants: z.record(
|
||||
z.string(),
|
||||
z.lazy(() => matchOldConfigSpec),
|
||||
),
|
||||
default: z.string(),
|
||||
})
|
||||
type OldValueSpecUnion = typeof matchOldValueSpecUnion._TYPE
|
||||
|
||||
const [matchOldUniqueBy, setOldUniqueBy] = deferred<OldUniqueBy>()
|
||||
type OldUniqueBy =
|
||||
| null
|
||||
| string
|
||||
| { any: OldUniqueBy[] }
|
||||
| { all: OldUniqueBy[] }
|
||||
|
||||
setOldUniqueBy(
|
||||
anyOf(
|
||||
nill,
|
||||
string,
|
||||
object({ any: array(matchOldUniqueBy) }),
|
||||
object({ all: array(matchOldUniqueBy) }),
|
||||
),
|
||||
const matchOldUniqueBy: z.ZodType<OldUniqueBy> = z.lazy(() =>
|
||||
z.union([
|
||||
z.null(),
|
||||
z.string(),
|
||||
z.object({ any: z.array(matchOldUniqueBy) }),
|
||||
z.object({ all: z.array(matchOldUniqueBy) }),
|
||||
]),
|
||||
)
|
||||
|
||||
const matchOldListValueSpecObject = object({
|
||||
spec: _matchOldConfigSpec, // this is a mapped type of the config object at this level, replacing the object's values with specs on those values
|
||||
"unique-by": matchOldUniqueBy.nullable().optional(), // indicates whether duplicates can be permitted in the list
|
||||
"display-as": string.nullable().optional(), // this should be a handlebars template which can make use of the entire config which corresponds to 'spec'
|
||||
})
|
||||
const matchOldListValueSpecUnion = object({
|
||||
type OldListValueSpecObject = {
|
||||
spec: OldConfigSpec
|
||||
"unique-by"?: OldUniqueBy | null
|
||||
"display-as"?: string | null
|
||||
}
|
||||
const matchOldListValueSpecObject: z.ZodType<OldListValueSpecObject> = z.object(
|
||||
{
|
||||
spec: z.lazy(() => matchOldConfigSpec), // this is a mapped type of the config object at this level, replacing the object's values with specs on those values
|
||||
"unique-by": matchOldUniqueBy.nullable().optional(), // indicates whether duplicates can be permitted in the list
|
||||
"display-as": z.string().nullable().optional(), // this should be a handlebars template which can make use of the entire config which corresponds to 'spec'
|
||||
},
|
||||
)
|
||||
type OldListValueSpecUnion = {
|
||||
"unique-by"?: OldUniqueBy | null
|
||||
"display-as"?: string | null
|
||||
tag: z.infer<typeof matchOldUnionTagSpec>
|
||||
variants: Record<string, OldConfigSpec>
|
||||
}
|
||||
const matchOldListValueSpecUnion: z.ZodType<OldListValueSpecUnion> = z.object({
|
||||
"unique-by": matchOldUniqueBy.nullable().optional(),
|
||||
"display-as": string.nullable().optional(),
|
||||
"display-as": z.string().nullable().optional(),
|
||||
tag: matchOldUnionTagSpec,
|
||||
variants: dictionary([string, _matchOldConfigSpec]),
|
||||
variants: z.record(
|
||||
z.string(),
|
||||
z.lazy(() => matchOldConfigSpec),
|
||||
),
|
||||
})
|
||||
const matchOldListValueSpecString = object({
|
||||
masked: boolean.nullable().optional(),
|
||||
copyable: boolean.nullable().optional(),
|
||||
pattern: string.nullable().optional(),
|
||||
"pattern-description": string.nullable().optional(),
|
||||
placeholder: string.nullable().optional(),
|
||||
const matchOldListValueSpecString = z.object({
|
||||
masked: z.boolean().nullable().optional(),
|
||||
copyable: z.boolean().nullable().optional(),
|
||||
pattern: z.string().nullable().optional(),
|
||||
"pattern-description": z.string().nullable().optional(),
|
||||
placeholder: z.string().nullable().optional(),
|
||||
})
|
||||
|
||||
const matchOldListValueSpecEnum = object({
|
||||
values: array(string),
|
||||
"value-names": dictionary([string, string]),
|
||||
const matchOldListValueSpecEnum = z.object({
|
||||
values: z.array(z.string()),
|
||||
"value-names": z.record(z.string(), z.string()),
|
||||
})
|
||||
const matchOldListValueSpecNumber = object({
|
||||
range: string,
|
||||
integral: boolean,
|
||||
units: string.nullable().optional(),
|
||||
placeholder: anyOf(number, string).nullable().optional(),
|
||||
const matchOldListValueSpecNumber = z.object({
|
||||
range: z.string(),
|
||||
integral: z.boolean(),
|
||||
units: z.string().nullable().optional(),
|
||||
placeholder: z.union([z.number(), z.string()]).nullable().optional(),
|
||||
})
|
||||
|
||||
type OldValueSpecListBase = {
|
||||
type: "list"
|
||||
range: string
|
||||
default: string[] | number[] | OldDefaultString[] | Record<string, unknown>[]
|
||||
name: string
|
||||
description?: string | null
|
||||
warning?: string | null
|
||||
}
|
||||
|
||||
type OldValueSpecList = OldValueSpecListBase &
|
||||
(
|
||||
| { subtype: "string"; spec: z.infer<typeof matchOldListValueSpecString> }
|
||||
| { subtype: "enum"; spec: z.infer<typeof matchOldListValueSpecEnum> }
|
||||
| { subtype: "object"; spec: OldListValueSpecObject }
|
||||
| { subtype: "number"; spec: z.infer<typeof matchOldListValueSpecNumber> }
|
||||
| { subtype: "union"; spec: OldListValueSpecUnion }
|
||||
)
|
||||
|
||||
// represents a spec for a list
|
||||
export const matchOldValueSpecList = every(
|
||||
object({
|
||||
type: literals("list"),
|
||||
range: string, // '[0,1]' (inclusive) OR '[0,*)' (right unbounded), normal math rules
|
||||
default: anyOf(
|
||||
array(string),
|
||||
array(number),
|
||||
array(matchOldDefaultString),
|
||||
array(object),
|
||||
),
|
||||
name: string,
|
||||
description: string.nullable().optional(),
|
||||
warning: string.nullable().optional(),
|
||||
}),
|
||||
anyOf(
|
||||
object({
|
||||
subtype: literals("string"),
|
||||
spec: matchOldListValueSpecString,
|
||||
export const matchOldValueSpecList: z.ZodType<OldValueSpecList> =
|
||||
z.intersection(
|
||||
z.object({
|
||||
type: z.enum(["list"]),
|
||||
range: z.string(), // '[0,1]' (inclusive) OR '[0,*)' (right unbounded), normal math rules
|
||||
default: z.union([
|
||||
z.array(z.string()),
|
||||
z.array(z.number()),
|
||||
z.array(matchOldDefaultString),
|
||||
z.array(z.object({}).passthrough()),
|
||||
]),
|
||||
name: z.string(),
|
||||
description: z.string().nullable().optional(),
|
||||
warning: z.string().nullable().optional(),
|
||||
}),
|
||||
object({
|
||||
subtype: literals("enum"),
|
||||
spec: matchOldListValueSpecEnum,
|
||||
}),
|
||||
object({
|
||||
subtype: literals("object"),
|
||||
spec: matchOldListValueSpecObject,
|
||||
}),
|
||||
object({
|
||||
subtype: literals("number"),
|
||||
spec: matchOldListValueSpecNumber,
|
||||
}),
|
||||
object({
|
||||
subtype: literals("union"),
|
||||
spec: matchOldListValueSpecUnion,
|
||||
}),
|
||||
),
|
||||
)
|
||||
type OldValueSpecList = typeof matchOldValueSpecList._TYPE
|
||||
z.union([
|
||||
z.object({
|
||||
subtype: z.enum(["string"]),
|
||||
spec: matchOldListValueSpecString,
|
||||
}),
|
||||
z.object({
|
||||
subtype: z.enum(["enum"]),
|
||||
spec: matchOldListValueSpecEnum,
|
||||
}),
|
||||
z.object({
|
||||
subtype: z.enum(["object"]),
|
||||
spec: matchOldListValueSpecObject,
|
||||
}),
|
||||
z.object({
|
||||
subtype: z.enum(["number"]),
|
||||
spec: matchOldListValueSpecNumber,
|
||||
}),
|
||||
z.object({
|
||||
subtype: z.enum(["union"]),
|
||||
spec: matchOldListValueSpecUnion,
|
||||
}),
|
||||
]),
|
||||
) as unknown as z.ZodType<OldValueSpecList>
|
||||
|
||||
const matchOldValueSpecPointer = every(
|
||||
object({
|
||||
type: literal("pointer"),
|
||||
}),
|
||||
anyOf(
|
||||
object({
|
||||
subtype: literal("package"),
|
||||
target: literals("tor-key", "tor-address", "lan-address"),
|
||||
"package-id": string,
|
||||
interface: string,
|
||||
}),
|
||||
object({
|
||||
subtype: literal("package"),
|
||||
target: literals("config"),
|
||||
"package-id": string,
|
||||
selector: string,
|
||||
multi: boolean,
|
||||
}),
|
||||
),
|
||||
type OldValueSpecPointer = {
|
||||
type: "pointer"
|
||||
} & (
|
||||
| {
|
||||
subtype: "package"
|
||||
target: "tor-key" | "tor-address" | "lan-address"
|
||||
"package-id": string
|
||||
interface: string
|
||||
}
|
||||
| {
|
||||
subtype: "package"
|
||||
target: "config"
|
||||
"package-id": string
|
||||
selector: string
|
||||
multi: boolean
|
||||
}
|
||||
)
|
||||
type OldValueSpecPointer = typeof matchOldValueSpecPointer._TYPE
|
||||
const matchOldValueSpecPointer: z.ZodType<OldValueSpecPointer> = z.intersection(
|
||||
z.object({
|
||||
type: z.literal("pointer"),
|
||||
}),
|
||||
z.union([
|
||||
z.object({
|
||||
subtype: z.literal("package"),
|
||||
target: z.enum(["tor-key", "tor-address", "lan-address"]),
|
||||
"package-id": z.string(),
|
||||
interface: z.string(),
|
||||
}),
|
||||
z.object({
|
||||
subtype: z.literal("package"),
|
||||
target: z.enum(["config"]),
|
||||
"package-id": z.string(),
|
||||
selector: z.string(),
|
||||
multi: z.boolean(),
|
||||
}),
|
||||
]),
|
||||
) as unknown as z.ZodType<OldValueSpecPointer>
|
||||
|
||||
export const matchOldValueSpec = anyOf(
|
||||
type OldValueSpecString = z.infer<typeof matchOldValueSpecString>
|
||||
|
||||
type OldValueSpec =
|
||||
| OldValueSpecString
|
||||
| OldValueSpecNumber
|
||||
| OldValueSpecBoolean
|
||||
| OldValueSpecObject
|
||||
| OldValueSpecEnum
|
||||
| OldValueSpecList
|
||||
| OldValueSpecUnion
|
||||
| OldValueSpecPointer
|
||||
|
||||
export const matchOldValueSpec: z.ZodType<OldValueSpec> = z.union([
|
||||
matchOldValueSpecString,
|
||||
matchOldValueSpecNumber,
|
||||
matchOldValueSpecBoolean,
|
||||
matchOldValueSpecObject,
|
||||
matchOldValueSpecObject as z.ZodType<OldValueSpecObject>,
|
||||
matchOldValueSpecEnum,
|
||||
matchOldValueSpecList,
|
||||
matchOldValueSpecUnion,
|
||||
matchOldValueSpecPointer,
|
||||
)
|
||||
type OldValueSpec = typeof matchOldValueSpec._TYPE
|
||||
|
||||
setMatchOldConfigSpec(dictionary([string, matchOldValueSpec]))
|
||||
matchOldValueSpecList as z.ZodType<OldValueSpecList>,
|
||||
matchOldValueSpecUnion as z.ZodType<OldValueSpecUnion>,
|
||||
matchOldValueSpecPointer as z.ZodType<OldValueSpecPointer>,
|
||||
])
|
||||
|
||||
export class Range {
|
||||
min?: number
|
||||
|
||||
@@ -47,11 +47,12 @@ export class SystemForStartOs implements System {
|
||||
getActionInput(
|
||||
effects: Effects,
|
||||
id: string,
|
||||
prefill: Record<string, unknown> | null,
|
||||
timeoutMs: number | null,
|
||||
): Promise<T.ActionInput | null> {
|
||||
const action = this.abi.actions.get(id)
|
||||
if (!action) throw new Error(`Action ${id} not found`)
|
||||
return action.getInput({ effects })
|
||||
return action.getInput({ effects, prefill })
|
||||
}
|
||||
runAction(
|
||||
effects: Effects,
|
||||
@@ -68,22 +69,18 @@ export class SystemForStartOs implements System {
|
||||
try {
|
||||
if (this.runningMain || this.starting) return
|
||||
this.starting = true
|
||||
effects.constRetry = utils.once(() => effects.restart())
|
||||
effects.constRetry = utils.once(() => {
|
||||
console.debug(".const() triggered")
|
||||
if (effects.isInContext) effects.restart()
|
||||
})
|
||||
let mainOnTerm: () => Promise<void> | undefined
|
||||
const started = async (onTerm: () => Promise<void>) => {
|
||||
await effects.setMainStatus({ status: "running" })
|
||||
mainOnTerm = onTerm
|
||||
return null
|
||||
}
|
||||
const daemons = await (
|
||||
await this.abi.main({
|
||||
effects,
|
||||
started,
|
||||
})
|
||||
).build()
|
||||
this.runningMain = {
|
||||
stop: async () => {
|
||||
if (mainOnTerm) await mainOnTerm()
|
||||
await daemons.term()
|
||||
},
|
||||
}
|
||||
|
||||
@@ -33,6 +33,7 @@ export type System = {
|
||||
getActionInput(
|
||||
effects: Effects,
|
||||
actionId: string,
|
||||
prefill: Record<string, unknown> | null,
|
||||
timeoutMs: number | null,
|
||||
): Promise<T.ActionInput | null>
|
||||
|
||||
|
||||
@@ -1,41 +1,19 @@
|
||||
import {
|
||||
object,
|
||||
literal,
|
||||
string,
|
||||
boolean,
|
||||
array,
|
||||
dictionary,
|
||||
literals,
|
||||
number,
|
||||
Parser,
|
||||
some,
|
||||
} from "ts-matches"
|
||||
import { z } from "@start9labs/start-sdk"
|
||||
import { matchDuration } from "./Duration"
|
||||
|
||||
const VolumeId = string
|
||||
const Path = string
|
||||
|
||||
export type VolumeId = string
|
||||
export type Path = string
|
||||
export const matchDockerProcedure = object({
|
||||
type: literal("docker"),
|
||||
image: string,
|
||||
system: boolean.optional(),
|
||||
entrypoint: string,
|
||||
args: array(string).defaultTo([]),
|
||||
mounts: dictionary([VolumeId, Path]).optional(),
|
||||
"io-format": literals(
|
||||
"json",
|
||||
"json-pretty",
|
||||
"yaml",
|
||||
"cbor",
|
||||
"toml",
|
||||
"toml-pretty",
|
||||
)
|
||||
export const matchDockerProcedure = z.object({
|
||||
type: z.literal("docker"),
|
||||
image: z.string(),
|
||||
system: z.boolean().optional(),
|
||||
entrypoint: z.string(),
|
||||
args: z.array(z.string()).default([]),
|
||||
mounts: z.record(z.string(), z.string()).optional(),
|
||||
"io-format": z
|
||||
.enum(["json", "json-pretty", "yaml", "cbor", "toml", "toml-pretty"])
|
||||
.nullable()
|
||||
.optional(),
|
||||
"sigterm-timeout": some(number, matchDuration).onMismatch(30),
|
||||
inject: boolean.defaultTo(false),
|
||||
"sigterm-timeout": z.union([z.number(), matchDuration]).catch(30),
|
||||
inject: z.boolean().default(false),
|
||||
})
|
||||
|
||||
export type DockerProcedure = typeof matchDockerProcedure._TYPE
|
||||
export type DockerProcedure = z.infer<typeof matchDockerProcedure>
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { string } from "ts-matches"
|
||||
import { z } from "@start9labs/start-sdk"
|
||||
|
||||
export type TimeUnit = "d" | "h" | "s" | "ms" | "m" | "µs" | "ns"
|
||||
export type Duration = `${number}${TimeUnit}`
|
||||
|
||||
const durationRegex = /^([0-9]*(\.[0-9]+)?)(ns|µs|ms|s|m|d)$/
|
||||
|
||||
export const matchDuration = string.refine(isDuration)
|
||||
export const matchDuration = z.string().refine(isDuration)
|
||||
export function isDuration(value: string): value is Duration {
|
||||
return durationRegex.test(value)
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import { literals, some, string } from "ts-matches"
|
||||
import { z } from "@start9labs/start-sdk"
|
||||
|
||||
type NestedPath<A extends string, B extends string> = `/${A}/${string}/${B}`
|
||||
type NestedPaths = NestedPath<"actions", "run" | "getInput">
|
||||
// prettier-ignore
|
||||
type UnNestPaths<A> =
|
||||
A extends `${infer A}/${infer B}` ? [...UnNestPaths<A>, ... UnNestPaths<B>] :
|
||||
type UnNestPaths<A> =
|
||||
A extends `${infer A}/${infer B}` ? [...UnNestPaths<A>, ... UnNestPaths<B>] :
|
||||
[A]
|
||||
|
||||
export function unNestPath<A extends string>(a: A): UnNestPaths<A> {
|
||||
@@ -17,14 +17,14 @@ function isNestedPath(path: string): path is NestedPaths {
|
||||
return true
|
||||
return false
|
||||
}
|
||||
export const jsonPath = some(
|
||||
literals(
|
||||
export const jsonPath = z.union([
|
||||
z.enum([
|
||||
"/packageInit",
|
||||
"/packageUninit",
|
||||
"/backup/create",
|
||||
"/backup/restore",
|
||||
),
|
||||
string.refine(isNestedPath, "isNestedPath"),
|
||||
)
|
||||
]),
|
||||
z.string().refine(isNestedPath),
|
||||
])
|
||||
|
||||
export type JsonPath = typeof jsonPath._TYPE
|
||||
export type JsonPath = z.infer<typeof jsonPath>
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import { RpcListener } from "./Adapters/RpcListener"
|
||||
import { SystemForEmbassy } from "./Adapters/Systems/SystemForEmbassy"
|
||||
import { AllGetDependencies } from "./Interfaces/AllGetDependencies"
|
||||
import { getSystem } from "./Adapters/Systems"
|
||||
|
||||
@@ -7,6 +6,24 @@ const getDependencies: AllGetDependencies = {
|
||||
system: getSystem,
|
||||
}
|
||||
|
||||
process.on("unhandledRejection", (reason) => {
|
||||
if (
|
||||
reason instanceof Error &&
|
||||
"muteUnhandled" in reason &&
|
||||
reason.muteUnhandled
|
||||
) {
|
||||
// mute
|
||||
} else {
|
||||
console.error("Unhandled promise rejection", reason)
|
||||
}
|
||||
})
|
||||
|
||||
for (let s of ["SIGTERM", "SIGINT", "SIGHUP"]) {
|
||||
process.on(s, (s) => {
|
||||
console.log(`Caught ${s}`)
|
||||
})
|
||||
}
|
||||
|
||||
new RpcListener(getDependencies)
|
||||
|
||||
/**
|
||||
|
||||
21
container-runtime/update-image-local.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/.."
|
||||
|
||||
USE_TTY=
|
||||
if tty -s; then
|
||||
USE_TTY="-it"
|
||||
fi
|
||||
|
||||
DOCKER_PLATFORM=linux/${ARCH}
|
||||
case $ARCH in
|
||||
x86_64)
|
||||
DOCKER_PLATFORM=linux/amd64;;
|
||||
aarch64)
|
||||
DOCKER_PLATFORM=linux/arm64;;
|
||||
esac
|
||||
|
||||
docker run --rm $USE_TTY --platform=$DOCKER_PLATFORM -eARCH --privileged -v "$(pwd):/root/start-os" start9/build-env /root/start-os/container-runtime/update-image.sh
|
||||
if [ "$(ls -nd "container-runtime/rootfs.${ARCH}.squashfs" | awk '{ print $3 }')" != "$UID" ]; then
|
||||
docker run --rm $USE_TTY -v "$(pwd):/root/start-os" start9/build-env chown -R $UID:$UID /root/start-os/container-runtime
|
||||
fi
|
||||
@@ -9,56 +9,34 @@ if [ "$ARCH" = "riscv64" ]; then
|
||||
RUST_ARCH="riscv64gc"
|
||||
fi
|
||||
|
||||
if mountpoint -q tmp/combined; then sudo umount -l tmp/combined; fi
|
||||
if mountpoint -q tmp/lower; then sudo umount tmp/lower; fi
|
||||
sudo rm -rf tmp
|
||||
mkdir -p tmp/lower tmp/upper tmp/work tmp/combined
|
||||
if which squashfuse > /dev/null; then
|
||||
sudo squashfuse debian.${ARCH}.squashfs tmp/lower
|
||||
else
|
||||
sudo mount debian.${ARCH}.squashfs tmp/lower
|
||||
fi
|
||||
if which fuse-overlayfs > /dev/null; then
|
||||
sudo fuse-overlayfs -olowerdir=tmp/lower,upperdir=tmp/upper,workdir=tmp/work overlay tmp/combined
|
||||
else
|
||||
sudo mount -t overlay -olowerdir=tmp/lower,upperdir=tmp/upper,workdir=tmp/work overlay tmp/combined
|
||||
fi
|
||||
mount -t tmpfs tmpfs /tmp
|
||||
mkdir -p /tmp/lower /tmp/upper /tmp/work /tmp/combined
|
||||
mount -o loop debian.${ARCH}.squashfs /tmp/lower
|
||||
mount -t overlay -olowerdir=/tmp/lower,upperdir=/tmp/upper,workdir=/tmp/work overlay /tmp/combined
|
||||
|
||||
QEMU=
|
||||
if [ "$ARCH" != "$(uname -m)" ]; then
|
||||
QEMU=/usr/bin/qemu-${ARCH}-static
|
||||
if ! which qemu-$ARCH-static > /dev/null; then
|
||||
>&2 echo qemu-user-static is required for cross-platform builds
|
||||
sudo umount tmp/combined
|
||||
sudo umount tmp/lower
|
||||
sudo rm -rf tmp
|
||||
exit 1
|
||||
fi
|
||||
sudo cp $(which qemu-$ARCH-static) tmp/combined${QEMU}
|
||||
fi
|
||||
|
||||
sudo mkdir -p tmp/combined/usr/lib/startos/
|
||||
sudo rsync -a --copy-unsafe-links dist/ tmp/combined/usr/lib/startos/init/
|
||||
sudo chown -R 0:0 tmp/combined/usr/lib/startos/
|
||||
sudo cp container-runtime.service tmp/combined/lib/systemd/system/container-runtime.service
|
||||
sudo chown 0:0 tmp/combined/lib/systemd/system/container-runtime.service
|
||||
sudo cp container-runtime-failure.service tmp/combined/lib/systemd/system/container-runtime-failure.service
|
||||
sudo chown 0:0 tmp/combined/lib/systemd/system/container-runtime-failure.service
|
||||
sudo cp ../core/target/${RUST_ARCH}-unknown-linux-musl/release/containerbox tmp/combined/usr/bin/start-container
|
||||
echo -e '#!/bin/bash\nexec start-container "$@"' | sudo tee tmp/combined/usr/bin/start-cli # TODO: remove
|
||||
sudo chmod +x tmp/combined/usr/bin/start-cli
|
||||
sudo chown 0:0 tmp/combined/usr/bin/start-container
|
||||
echo container-runtime | sha256sum | head -c 32 | cat - <(echo) | sudo tee tmp/combined/etc/machine-id
|
||||
cat deb-install.sh | sudo systemd-nspawn --console=pipe -D tmp/combined $QEMU /bin/bash
|
||||
sudo truncate -s 0 tmp/combined/etc/machine-id
|
||||
|
||||
if [ -n "$QEMU" ]; then
|
||||
sudo rm tmp/combined${QEMU}
|
||||
fi
|
||||
mkdir -p /tmp/combined/usr/lib/startos/
|
||||
rsync -a --copy-unsafe-links --info=progress2 dist/ /tmp/combined/usr/lib/startos/init/
|
||||
chown -R 0:0 /tmp/combined/usr/lib/startos/
|
||||
cp container-runtime.service /tmp/combined/lib/systemd/system/container-runtime.service
|
||||
chown 0:0 /tmp/combined/lib/systemd/system/container-runtime.service
|
||||
cp container-runtime-failure.service /tmp/combined/lib/systemd/system/container-runtime-failure.service
|
||||
chown 0:0 /tmp/combined/lib/systemd/system/container-runtime-failure.service
|
||||
cp ../core/target/${RUST_ARCH}-unknown-linux-musl/release/start-container /tmp/combined/usr/bin/start-container
|
||||
echo -e '#!/bin/bash\nexec start-container "$@"' > /tmp/combined/usr/bin/start-cli # TODO: remove
|
||||
chmod +x /tmp/combined/usr/bin/start-cli
|
||||
chown 0:0 /tmp/combined/usr/bin/start-container
|
||||
echo container-runtime | sha256sum | head -c 32 | cat - <(echo) > /tmp/combined/etc/machine-id
|
||||
rm -f /tmp/combined/etc/resolv.conf
|
||||
cp /etc/resolv.conf /tmp/combined/etc/resolv.conf
|
||||
for fs in proc sys dev; do
|
||||
mount --bind /$fs /tmp/combined/$fs
|
||||
done
|
||||
cat deb-install.sh | chroot /tmp/combined /bin/bash
|
||||
for fs in proc sys dev; do
|
||||
umount /tmp/combined/$fs
|
||||
done
|
||||
truncate -s 0 /tmp/combined/etc/machine-id
|
||||
|
||||
rm -f rootfs.${ARCH}.squashfs
|
||||
mkdir -p ../build/lib/container-runtime
|
||||
sudo mksquashfs tmp/combined rootfs.${ARCH}.squashfs
|
||||
sudo umount tmp/combined
|
||||
sudo umount tmp/lower
|
||||
sudo rm -rf tmp
|
||||
mksquashfs /tmp/combined rootfs.${ARCH}.squashfs
|
||||