Merge branch 'next/minor' of github.com:Start9Labs/start-os into next/major

This commit is contained in:
Matt Hill
2024-08-08 10:52:49 -06:00
765 changed files with 43858 additions and 19423 deletions

View File

@@ -71,27 +71,27 @@ jobs:
sudo mount -t tmpfs tmpfs .
if: ${{ github.event.inputs.runner == 'fast' }}
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v3
- uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v2
uses: docker/setup-qemu-action@v3
- name: Set up system QEMU
run: sudo apt-get update && sudo apt-get install -y qemu-user-static
- name: Set up system dependencies
run: sudo apt-get update && sudo apt-get install -y qemu-user-static systemd-container squashfuse
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
- name: Make
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: compiled-${{ matrix.arch }}.tar
path: compiled-${{ matrix.arch }}.tar
@@ -140,7 +140,11 @@ jobs:
}')[matrix.platform]
}}
steps:
- uses: actions/checkout@v3
- name: Free space
run: rm -rf /opt/hostedtoolcache*
if: ${{ github.event.inputs.runner != 'fast' }}
- uses: actions/checkout@v4
with:
submodules: recursive
@@ -162,7 +166,7 @@ jobs:
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree') }}
- name: Download compiled artifacts
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: compiled-${{ env.ARCH }}.tar
@@ -171,8 +175,10 @@ jobs:
- name: Prevent rebuild of compiled artifacts
run: |
mkdir -p web/node_modules
mkdir -p web/dist/raw
touch core/startos/bindings
touch sdk/lib/osBindings
mkdir -p container-runtime/dist
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
@@ -184,18 +190,18 @@ jobs:
run: PLATFORM=${{ matrix.platform }} make img
if: ${{ matrix.platform == 'raspberrypi' }}
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}.squashfs
path: results/*.squashfs
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}.iso
path: results/*.iso
if: ${{ matrix.platform != 'raspberrypi' }}
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}.img
path: results/*.img

View File

@@ -19,11 +19,11 @@ jobs:
name: Run Automated Tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v3
- uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}

1
.gitignore vendored
View File

@@ -20,7 +20,6 @@ secrets.db
/ENVIRONMENT.txt
/GIT_HASH.txt
/VERSION.txt
/eos-*.tar.gz
/*.deb
/target
/*.squashfs

133
DEVELOPMENT.md Normal file
View File

@@ -0,0 +1,133 @@
# Setting up your development environment on Debian/Ubuntu
A step-by-step guide
> This is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
## Installing dependencies
Run the following commands one at a time
```sh
sudo apt update
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 20
nvm use 20
```
## Cloning the repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/minor
cd start-os
```
## Building an ISO
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
This will build an ISO for your current architecture. If you are building to run on an architecture other than the one you are currently on, replace `$(uname -m)` with the correct platform for the device (one of `aarch64`, `aarch64-nonfree`, `x86_64`, `x86_64-nonfree`, `raspberrypi`)
## Creating a VM
### Install virt-manager
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
```
### Launch virt-manager
```sh
virt-manager
```
### Create new virtual machine
![Select "Create a new virtual machine"](assets/create-vm/step-1.png)
![Click "Forward"](assets/create-vm/step-2.png)
![Click "Browse"](assets/create-vm/step-3.png)
![Click "+"](assets/create-vm/step-4.png)
#### make sure to set "Target Path" to the path to your results directory in start-os
![Create storage pool](assets/create-vm/step-5.png)
![Select storage pool](assets/create-vm/step-6.png)
![Select ISO](assets/create-vm/step-7.png)
![Select "Generic or unknown OS" and click "Forward"](assets/create-vm/step-8.png)
![Set Memory and CPUs](assets/create-vm/step-9.png)
![Create disk](assets/create-vm/step-10.png)
![Name VM](assets/create-vm/step-11.png)
![Create network](assets/create-vm/step-12.png)
## Updating a VM
The fastest way to update a VM to your latest code depends on what you changed:
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-startbox REMOTE=start9@<VM IP>
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-deb REMOTE=start9@<VM IP>
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-squashfs REMOTE=start9@<VM IP>
```
---
If the device you are building for is not available via ssh, it is also possible to use `magic-wormhole` to send the relevant files.
### Prerequisites:
```sh
sudo apt update
sudo apt install -y magic-wormhole
```
As before, the fastest way to update a VM to your latest code depends on what you changed. Each of the following commands will return a command to paste into the shell of the device you would like to upgrade.
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-deb
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-squashfs
```

148
Makefile
View File

@@ -6,26 +6,25 @@ BASENAME := $(shell ./basename.sh)
PLATFORM := $(shell if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
BINS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox
WEB_UIS := web/dist/raw/ui web/dist/raw/setup-wizard web/dist/raw/install-wizard
FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts container-runtime/rootfs.$(ARCH).squashfs $(FIRMWARE_ROMS)
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
DEBIAN_SRC := $(shell git ls-files debian/)
IMAGE_RECIPE_SRC := $(shell git ls-files image-recipe/)
STARTD_SRC := core/startos/startd.service $(BUILD_SRC)
COMPAT_SRC := $(shell git ls-files system-images/compat/)
UTILS_SRC := $(shell git ls-files system-images/utils/)
BINFMT_SRC := $(shell git ls-files system-images/binfmt/)
CORE_SRC := $(shell git ls-files -- core ':!:core/startos/bindings/*') $(shell git ls-files --recurse-submodules patch-db) web/dist/static web/patchdb-ui-seed.json $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(shell git ls-files web/projects/shared) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules web/config.json patch-db/client/dist web/patchdb-ui-seed.json
CORE_SRC := $(shell git ls-files core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(shell git ls-files web/projects/shared) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist web/patchdb-ui-seed.json sdk/dist
WEB_UI_SRC := $(shell git ls-files web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(shell git ls-files web/projects/setup-wizard)
WEB_INSTALL_WIZARD_SRC := $(shell git ls-files web/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip)
TAR_BIN := $(shell which gtar || which tar)
COMPILED_TARGETS := $(BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar container-runtime/rootfs.$(ARCH).squashfs
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; fi') $(PLATFORM_FILE) sdk/lib/test
COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar container-runtime/rootfs.$(ARCH).squashfs
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; fi') $(PLATFORM_FILE)
ifeq ($(REMOTE),)
mkdir = mkdir -p $1
@@ -48,7 +47,7 @@ endif
.DELETE_ON_ERROR:
.PHONY: all metadata install clean format cli uis ui reflash deb $(IMAGE_TYPE) squashfs sudo wormhole test
.PHONY: all metadata install clean format cli uis ui reflash deb $(IMAGE_TYPE) squashfs sudo wormhole wormhole-deb test test-core test-sdk test-container-runtime
all: $(ALL_TARGETS)
@@ -64,6 +63,7 @@ clean:
rm -f system-images/**/*.tar
rm -rf system-images/compat/target
rm -rf core/target
rm -rf core/startos/bindings
rm -rf web/.angular
rm -f web/config.json
rm -rf web/node_modules
@@ -79,8 +79,8 @@ clean:
rm -rf container-runtime/dist
rm -rf container-runtime/node_modules
rm -f container-runtime/*.squashfs
rm -rf sdk/dist
rm -rf sdk/node_modules
rm -rf container-runtime/tmp
(cd sdk && make clean)
rm -f ENVIRONMENT.txt
rm -f PLATFORM.txt
rm -f GIT_HASH.txt
@@ -89,10 +89,16 @@ clean:
format:
cd core && cargo +nightly fmt
test: $(CORE_SRC) $(ENVIRONMENT_FILE)
(cd core && cargo build && cargo test)
npm --prefix sdk exec -- prettier -w ./core/startos/bindings/*.ts
(cd sdk && make test)
test: | test-core test-sdk test-container-runtime
test-core: $(CORE_SRC) $(ENVIRONMENT_FILE)
cd core && cargo build --features=test && cargo test --features=test
test-sdk: $(shell git ls-files sdk) sdk/lib/osBindings
cd sdk && make test
test-container-runtime: container-runtime/node_modules $(shell git ls-files container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
cd container-runtime && npm test
cli:
cd core && ./install-cli.sh
@@ -102,7 +108,7 @@ deb: results/$(BASENAME).deb
debian/control: build/lib/depends build/lib/conflicts
./debuild/control.sh
results/$(BASENAME).deb: dpkg-build.sh $(DEBIAN_SRC) $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
results/$(BASENAME).deb: dpkg-build.sh $(DEBIAN_SRC) $(ALL_TARGETS)
PLATFORM=$(PLATFORM) ./dpkg-build.sh
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
@@ -115,13 +121,15 @@ results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_S
# For creating os images. DO NOT USE
install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/bin)
$(call mkdir,$(DESTDIR)/usr/sbin)
$(call cp,core/target/$(ARCH)-unknown-linux-musl/release/startbox,$(DESTDIR)/usr/bin/startbox)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/embassy-cli)
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then $(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); fi
$(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
$(call ln,/usr/bin/startos-backup-fs,$(DESTDIR)/usr/sbin/mount.backup-fs)
$(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/startos/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
@@ -140,7 +148,6 @@ install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/lib/startos/system-images)
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/compat.tar)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/utils.tar)
$(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/binfmt.tar)
$(call cp,firmware/$(PLATFORM),$(DESTDIR)/usr/lib/startos/firmware)
@@ -154,38 +161,80 @@ update-overlay: $(ALL_TARGETS)
$(call ssh,"sudo systemctl start startd")
wormhole: core/target/$(ARCH)-unknown-linux-musl/release/startbox
@echo "Paste the following command into the shell of your start-os server:"
@echo "Paste the following command into the shell of your StartOS server:"
@echo
@wormhole send core/target/$(ARCH)-unknown-linux-musl/release/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
wormhole-deb: results/$(BASENAME).deb
@echo "Paste the following command into the shell of your StartOS server:"
@echo
@wormhole send results/$(BASENAME).deb 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade '"'"'cd $$(mktemp -d) && wormhole receive --accept-file %s && apt-get install -y --reinstall ./$(BASENAME).deb'"'"'\n", $$3 }'
wormhole-squashfs: results/$(BASENAME).squashfs
$(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs | head -c 32))
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
@echo "Paste the following command into the shell of your StartOS server:"
@echo
@wormhole send results/$(BASENAME).squashfs 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo sh -c '"'"'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE) && cd /media/startos/images && wormhole receive --accept-file %s && mv $(BASENAME).squashfs $(SQFS_SUM).rootfs && ln -rsf ./$(SQFS_SUM).rootfs ../config/current.rootfs && sync && reboot'"'"'\n", $$3 }'
update: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo NO_SYNC=1 /media/embassy/next/usr/lib/startos/scripts/chroot-and-upgrade "apt-get install -y $(shell cat ./build/lib/depends)"')
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
update-startbox: core/target/$(ARCH)-unknown-linux-musl/release/startbox # only update binary (faster than full update)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call cp,core/target/$(ARCH)-unknown-linux-musl/release/startbox,/media/startos/next/usr/bin/startbox)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync true')
update-deb: results/$(BASENAME).deb # better than update, but only available from debian
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call mkdir,/media/startos/next/tmp/startos-deb)
$(call cp,results/$(BASENAME).deb,/media/startos/next/tmp/startos-deb/$(BASENAME).deb)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /tmp/startos-deb/$(BASENAME).deb"')
update-squashfs: results/$(BASENAME).squashfs
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs))
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
$(call ssh,'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE)')
$(call cp,results/$(BASENAME).squashfs,/media/startos/images/$(SQFS_SUM).rootfs)
$(call ssh,'sudo ln -rsf /media/startos/images/$(SQFS_SUM).rootfs /media/startos/config/current.rootfs')
$(call ssh,'sudo reboot')
emulate-reflash: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
$(call ssh,"sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot")
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo rm -f /media/startos/config/disk.guid')
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
upload-ota: results/$(BASENAME).squashfs
TARGET=$(TARGET) KEY=$(KEY) ./upload-ota.sh
container-runtime/alpine.$(ARCH).squashfs:
container-runtime/debian.$(ARCH).squashfs:
ARCH=$(ARCH) ./container-runtime/download-base-image.sh
container-runtime/node_modules: container-runtime/package.json container-runtime/package-lock.json sdk/dist
npm --prefix container-runtime ci
touch container-runtime/node_modules
core/startos/bindings: $(shell git ls-files -- core ':!:core/startos/bindings/*') $(ENVIRONMENT_FILE)
rm -rf core/startos/bindings
(cd core/ && cargo test --features=test)
sdk/lib/osBindings: core/startos/bindings
mkdir -p sdk/lib/osBindings
ls core/startos/bindings/*.ts | sed 's/core\/startos\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' > core/startos/bindings/index.ts
npm --prefix sdk exec -- prettier -w ./core/startos/bindings/*.ts
npm --prefix sdk exec -- prettier --config ./sdk/package.json -w ./core/startos/bindings/*.ts
rsync -ac --delete core/startos/bindings/ sdk/lib/osBindings/
touch sdk/lib/osBindings
sdk/dist: $(shell git ls-files sdk) core/startos/bindings
core/startos/bindings: $(shell git ls-files core) $(ENVIRONMENT_FILE)
rm -rf core/startos/bindings
(cd core/ && cargo test --features=test 'export_bindings_')
touch core/startos/bindings
sdk/dist: $(shell git ls-files sdk) sdk/lib/osBindings
(cd sdk && make bundle)
# TODO: make container-runtime its own makefile?
@@ -196,7 +245,7 @@ container-runtime/dist/node_modules container-runtime/dist/package.json containe
./container-runtime/install-dist-deps.sh
touch container-runtime/dist/node_modules
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/alpine.$(ARCH).squashfs container-runtime/containerRuntime.rc container-runtime/update-image.sh container-runtime/dist/index.js container-runtime/dist/node_modules core/target/$(ARCH)-unknown-linux-musl/release/containerbox | sudo
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules core/target/$(ARCH)-unknown-linux-musl/release/containerbox | sudo
ARCH=$(ARCH) ./container-runtime/update-image.sh
build/lib/depends build/lib/conflicts: build/dpkg-deps/*
@@ -214,22 +263,33 @@ system-images/utils/docker-images/$(ARCH).tar: $(UTILS_SRC)
system-images/binfmt/docker-images/$(ARCH).tar: $(BINFMT_SRC)
cd system-images/binfmt && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
$(BINS): $(CORE_SRC) $(ENVIRONMENT_FILE)
cd core && ARCH=$(ARCH) ./build-prod.sh
touch $(BINS)
core/target/$(ARCH)-unknown-linux-musl/release/startbox: $(CORE_SRC) web/dist/static web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build-startbox.sh
touch core/target/$(ARCH)-unknown-linux-musl/release/startbox
web/node_modules: web/package.json sdk/dist
(cd sdk && make bundle)
core/target/$(ARCH)-unknown-linux-musl/release/containerbox: $(CORE_SRC) $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build-containerbox.sh
touch core/target/$(ARCH)-unknown-linux-musl/release/containerbox
web/node_modules/.package-lock.json: web/package.json sdk/dist
npm --prefix web ci
touch web/node_modules/.package-lock.json
web/dist/raw/ui: $(WEB_UI_SRC) $(WEB_SHARED_SRC)
web/.angular: patch-db/client/dist sdk/dist web/node_modules/.package-lock.json
rm -rf web/.angular
mkdir -p web/.angular
web/dist/raw/ui: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular
npm --prefix web run build:ui
touch web/dist/raw/ui
web/dist/raw/setup-wizard: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC)
web/dist/raw/setup-wizard: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular
npm --prefix web run build:setup
touch web/dist/raw/setup-wizard
web/dist/raw/install-wizard: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC)
web/dist/raw/install-wizard: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular
npm --prefix web run build:install
touch web/dist/raw/install-wizard
web/dist/static: $(WEB_UIS) $(ENVIRONMENT_FILE)
./compress-uis.sh
@@ -243,10 +303,11 @@ web/patchdb-ui-seed.json: web/package.json
patch-db/client/node_modules: patch-db/client/package.json
npm --prefix patch-db/client ci
touch patch-db/client/node_modules
patch-db/client/dist: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules
! test -d patch-db/client/dist || rm -rf patch-db/client/dist
npm --prefix web run build:deps
rm -rf patch-db/client/dist
npm --prefix patch-db/client run build
# used by github actions
compiled-$(ARCH).tar: $(COMPILED_TARGETS) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
@@ -261,5 +322,8 @@ ui: web/dist/raw/ui
cargo-deps/aarch64-unknown-linux-musl/release/pi-beep:
ARCH=aarch64 ./build-cargo-dep.sh pi-beep
cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console: | sudo
ARCH=$(ARCH) ./build-cargo-dep.sh tokio-console
cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console:
ARCH=$(ARCH) PREINSTALL="apk add musl-dev pkgconfig" ./build-cargo-dep.sh tokio-console
cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs:
ARCH=$(ARCH) PREINSTALL="apk add fuse3 fuse3-dev fuse3-static musl-dev pkgconfig" ./build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs

BIN
assets/create-vm/step-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

BIN
assets/create-vm/step-2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

BIN
assets/create-vm/step-3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

BIN
assets/create-vm/step-4.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

BIN
assets/create-vm/step-5.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

BIN
assets/create-vm/step-6.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

BIN
assets/create-vm/step-7.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

BIN
assets/create-vm/step-8.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

BIN
assets/create-vm/step-9.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

View File

@@ -17,9 +17,18 @@ if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
mkdir -p cargo-deps
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)"/cargo-deps:/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
DOCKER_PLATFORM="linux/${ARCH}"
if [ "$ARCH" = aarch64 ]; then
DOCKER_PLATFORM="linux/arm64"
elif [ "$ARCH" = x86_64 ]; then
DOCKER_PLATFORM="linux/amd64"
fi
rust-musl-builder cargo install "$1" --target-dir /home/rust/src --target=$ARCH-unknown-linux-musl
mkdir -p cargo-deps
alias 'rust-musl-builder'='docker run $USE_TTY --platform=${DOCKER_PLATFORM} --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)"/cargo-deps:/home/rust/src -w /home/rust/src -P rust:alpine'
PREINSTALL=${PREINSTALL:-true}
rust-musl-builder sh -c "$PREINSTALL && cargo install $* --target-dir /home/rust/src --target=$ARCH-unknown-linux-musl"
sudo chown -R $USER cargo-deps
sudo chown -R $USER ~/.cargo

View File

@@ -1,107 +0,0 @@
# Building StartOS
⚠️ The commands given assume a Debian or Ubuntu-based environment. _Building in
a VM is NOT yet supported_ ⚠️
## Prerequisites
1. Install dependencies
- Avahi
- `sudo apt install -y avahi-daemon`
- Installed by default on most Debian systems - https://avahi.org
- Build Essentials (needed to run `make`)
- `sudo apt install -y build-essential`
- Docker
- `curl -fsSL https://get.docker.com | sh`
- https://docs.docker.com/get-docker
- Add your user to the docker group: `sudo usermod -a -G docker $USER`
- Reload user environment `exec sudo su -l $USER`
- Prepare Docker environment
- Setup buildx (https://docs.docker.com/buildx/working-with-buildx/)
- Create a builder: `docker buildx create --use`
- Add multi-arch build ability:
`docker run --rm --privileged linuxkit/binfmt:v0.8`
- Node Version 12+
- snap: `sudo snap install node`
- [nvm](https://github.com/nvm-sh/nvm#installing-and-updating):
`nvm install --lts`
- https://nodejs.org/en/docs
- NPM Version 7+
- apt: `sudo apt install -y npm`
- [nvm](https://github.com/nvm-sh/nvm#installing-and-updating):
`nvm install --lts`
- https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
- jq
- `sudo apt install -y jq`
- https://stedolan.github.io/jq
- yq
- snap: `sudo snap install yq`
- binaries: https://github.com/mikefarah/yq/releases/
- https://mikefarah.gitbook.io/yq
2. Clone the latest repo with required submodules
> :information_source: You chan check latest available version
> [here](https://github.com/Start9Labs/start-os/releases)
```
git clone --recursive https://github.com/Start9Labs/start-os.git --branch latest
```
## Build Raspberry Pi Image
```
cd start-os
make embassyos-raspi.img ARCH=aarch64
```
## Flash
Flash the resulting `embassyos-raspi.img` to your SD Card
We recommend [Balena Etcher](https://www.balena.io/etcher/)
## Setup
Visit http://start.local from any web browser - We recommend
[Firefox](https://www.mozilla.org/firefox/browsers)
Enter your product key. This is generated during the build process and can be
found in `product_key.txt`, located in the root directory.
## Troubleshooting
1. I just flashed my SD card, fired up StartOS, bootup sounds and all, but my
browser is saying "Unable to connect" with start.local.
- Try doing a hard refresh on your browser, or opening the url in a
private/incognito window. If you've ran an instance of StartOS before,
sometimes you can have a stale cache that will block you from navigating to
the page.
2. Flashing the image isn't working with balenaEtcher. I'm getting
`Cannot read property 'message' of null` when I try.
- The latest versions of Balena may not flash properly. This version here:
https://github.com/balena-io/etcher/releases/tag/v1.5.122 should work
properly.
3. Startup isn't working properly and I'm curious as to why. How can I view logs
regarding startup for debugging?
- Find the IP of your device
- Run `nc <ip> 8080` and it will print the logs
4. I need to ssh into my server to fix something, but I cannot get to the
console to add ssh keys normally.
- During the Build step, instead of running just
`make embassyos-raspi.img ARCH=aarch64` run
`ENVIRONMENT=dev make embassyos-raspi.img ARCH=aarch64`. Flash like normal,
and insert into your server. Boot up StartOS, then on another computer on
the same network, ssh into the the server with the username `start9` password
`embassy`.
4. I need to reset my password, how can I do that?
- You will need to reflash your device. Select "Use Existing Drive" once you are
in setup, and it will prompt you to set a new password.

View File

@@ -1,76 +0,0 @@
# Release Process
## `embassyos_0.3.x-1_amd64.deb`
- Description: debian package for x86_64 - intended to be installed on pureos
- Destination: GitHub Release Tag
- Requires: N/A
- Build steps:
- Clone `https://github.com/Start9Labs/embassy-os-deb` at `master`
- Run `make TAG=master` from that folder
- Artifact: `./embassyos_0.3.x-1_amd64.deb`
## `eos-<version>-<git hash>-<date>_amd64.iso`
- Description: live usb image for x86_64
- Destination: GitHub Release Tag
- Requires: `embassyos_0.3.x-1_amd64.deb`
- Build steps:
- Clone `https://github.com/Start9Labs/eos-image-recipes` at `master`
- Copy `embassyos_0.3.x-1_amd64.deb` to
`overlays/vendor/root/embassyos_0.3.x-1_amd64.deb`
- Run `./run-local-build.sh byzantium` from that folder
- Artifact: `./results/eos-<version>-<git hash>-<date>_amd64.iso`
## `eos.x86_64.squashfs`
- Description: compressed embassyOS x86_64 filesystem image
- Destination: GitHub Release Tag, Registry @
`resources/eos/<version>/eos.x86_64.squashfs`
- Requires: `eos-<version>-<git hash>-<date>_amd64.iso`
- Build steps:
- From `https://github.com/Start9Labs/eos-image-recipes` at `master`
- `./extract-squashfs.sh results/eos-<version>-<git hash>-<date>_amd64.iso` (run on Linux)
- Artifact: `./results/eos.x86_64.squashfs`
## `eos.raspberrypi.squashfs`
- Description: compressed embassyOS raspberrypi filesystem image
- Destination: GitHub Release Tag, Registry @
`resources/eos/<version>/eos.raspberrypi.squashfs`
- Requires: N/A
- Build steps:
- Clone `https://github.com/Start9Labs/embassy-os` at `master`
- `make embassyos-raspi.img`
- flash `embassyos-raspi.img` to raspberry pi
- boot raspberry pi with ethernet
- wait for chime
- you can watch logs using `nc <ip> 8080`
- unplug raspberry pi, put sd card back in build machine
- `./build/raspberry-pi/rip-image.sh`
- Artifact: `./eos.raspberrypi.squashfs`
## `lite-upgrade.img`
- Description: update image for users coming from 0.3.2.1 and before
- Destination: Registry @ `resources/eos/<version>/eos.img`
- Requires: `eos.raspberrypi.squashfs`
- Build steps:
- From `https://github.com/Start9Labs/embassy-os` at `master`
- `make lite-upgrade.img`
- Artifact `./lite-upgrade.img`
## `eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
- Description: pre-initialized raspberrypi image
- Destination: GitHub Release Tag (as tar.gz)
- Requires: `eos.raspberrypi.squashfs`
- Build steps:
- From `https://github.com/Start9Labs/embassy-os` at `master`
- `make eos_raspberrypi.img`
- `tar --format=posix -cS -f- eos-<version>-<git hash>-<date>_raspberrypi.img | gzip > eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
- Artifact `./eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
## `embassy-sdk`
- Build and deploy to all registries

View File

@@ -1,5 +1,6 @@
avahi-daemon
avahi-utils
b3sum
bash-completion
beep
bmon
@@ -14,10 +15,12 @@ e2fsprogs
ecryptfs-utils
exfatprogs
flashrom
fuse3
grub-common
htop
httpdirfs
iotop
iptables
iw
jq
libyajl2

View File

@@ -4,6 +4,3 @@ set -e
curl -fsSL https://deb.torproject.org/torproject.org/A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89.asc | gpg --dearmor -o- > /usr/share/keyrings/tor-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/tor-archive-keyring.gpg] https://deb.torproject.org/torproject.org bullseye main" > /etc/apt/sources.list.d/tor.list
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o- > /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bullseye stable" > /etc/apt/sources.list.d/docker.list

View File

@@ -1,46 +1,99 @@
#!/bin/bash
SOURCE_DIR="$(dirname "${BASH_SOURCE[0]}")"
if [ "$UID" -ne 0 ]; then
>&2 echo 'Must be run as root'
exit 1
fi
POSITIONAL_ARGS=()
while [[ $# -gt 0 ]]; do
case $1 in
--no-sync)
NO_SYNC=1
shift
;;
--create)
ONLY_CREATE=1
shift
;;
-*|--*)
echo "Unknown option $1"
exit 1
;;
*)
POSITIONAL_ARGS+=("$1") # save positional arg
shift # past argument
;;
esac
done
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
if [ -z "$NO_SYNC" ]; then
echo 'Syncing...'
rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next
umount -R /media/startos/next 2> /dev/null
umount -R /media/startos/upper 2> /dev/null
rm -rf /media/startos/upper /media/startos/next
mkdir /media/startos/upper
mount -t tmpfs tmpfs /media/startos/upper
mkdir -p /media/startos/upper/data /media/startos/upper/work /media/startos/next
mount -t overlay \
-olowerdir=/media/startos/current,upperdir=/media/startos/upper/data,workdir=/media/startos/upper/work \
overlay /media/startos/next
fi
mkdir -p /media/embassy/next/run
mkdir -p /media/embassy/next/dev
mkdir -p /media/embassy/next/sys
mkdir -p /media/embassy/next/proc
mkdir -p /media/embassy/next/boot
mount --bind /run /media/embassy/next/run
mount --bind /dev /media/embassy/next/dev
mount --bind /sys /media/embassy/next/sys
mount --bind /proc /media/embassy/next/proc
mount --bind /boot /media/embassy/next/boot
if [ -n "$ONLY_CREATE" ]; then
exit 0
fi
mkdir -p /media/startos/next/run
mkdir -p /media/startos/next/dev
mkdir -p /media/startos/next/sys
mkdir -p /media/startos/next/proc
mkdir -p /media/startos/next/boot
mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
if [ -z "$*" ]; then
chroot /media/embassy/next
chroot /media/startos/next
CHROOT_RES=$?
else
chroot /media/embassy/next "$SHELL" -c "$*"
chroot /media/startos/next "$SHELL" -c "$*"
CHROOT_RES=$?
fi
umount /media/embassy/next/run
umount /media/embassy/next/dev
umount /media/embassy/next/sys
umount /media/embassy/next/proc
umount /media/embassy/next/boot
umount /media/startos/next/run
umount /media/startos/next/tmp
umount /media/startos/next/dev
umount /media/startos/next/sys
umount /media/startos/next/proc
umount /media/startos/next/boot
if [ "$CHROOT_RES" -eq 0 ]; then
if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then
${SOURCE_DIR}/prune-images $(du -s --bytes /media/startos/next | awk '{print $1}')
fi
echo 'Upgrading...'
touch /media/embassy/config/upgrade
time mksquashfs /media/startos/next /media/startos/images/next.squashfs -b 4096 -comp gzip
hash=$(b3sum /media/startos/images/next.squashfs | head -c 32)
mv /media/startos/images/next.squashfs /media/startos/images/${hash}.rootfs
ln -rsf /media/startos/images/${hash}.rootfs /media/startos/config/current.rootfs
sync
reboot
fi
fi
umount -R /media/startos/next
umount -R /media/startos/upper
rm -rf /media/startos/upper /media/startos/next

View File

@@ -1,98 +0,0 @@
# Local filesystem mounting -*- shell-script -*-
#
# This script overrides local_mount_root() in /scripts/local
# and mounts root as a read-only filesystem with a temporary (rw)
# overlay filesystem.
#
. /scripts/local
local_mount_root()
{
echo 'using embassy initramfs module'
local_top
local_device_setup "${ROOT}" "root file system"
ROOT="${DEV}"
# Get the root filesystem type if not set
if [ -z "${ROOTFSTYPE}" ]; then
FSTYPE=$(get_fstype "${ROOT}")
else
FSTYPE=${ROOTFSTYPE}
fi
local_premount
# CHANGES TO THE ORIGINAL FUNCTION BEGIN HERE
# N.B. this code still lacks error checking
modprobe ${FSTYPE}
checkfs ${ROOT} root "${FSTYPE}"
ROOTFLAGS="$(echo "${ROOTFLAGS}" | sed 's/subvol=\(next\|current\)//' | sed 's/^-o *$//')"
if [ "${FSTYPE}" != "unknown" ]; then
mount -t ${FSTYPE} ${ROOTFLAGS} ${ROOT} ${rootmnt}
else
mount ${ROOTFLAGS} ${ROOT} ${rootmnt}
fi
echo 'mounting embassyfs'
mkdir /embassyfs
mount --move ${rootmnt} /embassyfs
if ! [ -d /embassyfs/current ] && [ -d /embassyfs/prev ]; then
mv /embassyfs/prev /embassyfs/current
fi
if ! [ -d /embassyfs/current ]; then
mkdir /embassyfs/current
for FILE in $(ls /embassyfs); do
if [ "$FILE" != current ]; then
mv /embassyfs/$FILE /embassyfs/current/
fi
done
fi
mkdir -p /embassyfs/config
if [ -f /embassyfs/config/upgrade ] && [ -d /embassyfs/next ]; then
mv /embassyfs/current /embassyfs/prev
mv /embassyfs/next /embassyfs/current
rm /embassyfs/config/upgrade
fi
if ! [ -d /embassyfs/next ]; then
if [ -d /embassyfs/prev ]; then
mv /embassyfs/prev /embassyfs/next
else
mkdir /embassyfs/next
fi
fi
mkdir /lower /upper
mount -r --bind /embassyfs/current /lower
modprobe overlay || insmod "/lower/lib/modules/$(uname -r)/kernel/fs/overlayfs/overlay.ko"
# Mount a tmpfs for the overlay in /upper
mount -t tmpfs tmpfs /upper
mkdir /upper/data /upper/work
# Mount the final overlay-root in $rootmnt
mount -t overlay \
-olowerdir=/lower,upperdir=/upper/data,workdir=/upper/work \
overlay ${rootmnt}
mkdir -p ${rootmnt}/media/embassy/config
mount --bind /embassyfs/config ${rootmnt}/media/embassy/config
mkdir -p ${rootmnt}/media/embassy/next
mount --bind /embassyfs/next ${rootmnt}/media/embassy/next
mkdir -p ${rootmnt}/media/embassy/embassyfs
mount -r --bind /embassyfs ${rootmnt}/media/embassy/embassyfs
}

View File

@@ -14,14 +14,8 @@ if ! id kiosk; then
useradd -s /bin/bash --create-home kiosk
fi
# create kiosk script
cat > /home/kiosk/kiosk.sh << 'EOF'
#!/bin/sh
PROFILE=$(mktemp -d)
if [ -f /usr/local/share/ca-certificates/startos-root-ca.crt ]; then
certutil -A -n "StartOS Local Root CA" -t "TCu,Cuw,Tuw" -i /usr/local/share/ca-certificates/startos-root-ca.crt -d $PROFILE
fi
cat >> $PROFILE/prefs.js << EOT
mkdir /home/kiosk/fx-profile
cat >> /home/kiosk/fx-profile/prefs.js << EOF
user_pref("app.normandy.api_url", "");
user_pref("app.normandy.enabled", false);
user_pref("app.shield.optoutstudies.enabled", false);
@@ -87,7 +81,11 @@ user_pref("toolkit.telemetry.shutdownPingSender.enabled", false);
user_pref("toolkit.telemetry.unified", false);
user_pref("toolkit.telemetry.updatePing.enabled", false);
user_pref("toolkit.telemetry.cachedClientID", "");
EOT
EOF
# create kiosk script
cat > /home/kiosk/kiosk.sh << 'EOF'
#!/bin/sh
while ! curl "http://localhost" > /dev/null; do
sleep 1
done
@@ -101,8 +99,7 @@ done
killall firefox-esr
) &
matchbox-window-manager -use_titlebar no &
firefox-esr http://localhost --profile $PROFILE
rm -rf $PROFILE
firefox-esr http://localhost --profile /home/kiosk/fx-profile
EOF
chmod +x /home/kiosk/kiosk.sh
@@ -116,6 +113,8 @@ fi
EOF
fi
chown -R kiosk:kiosk /home/kiosk
# enable autologin
mkdir -p /etc/systemd/system/getty@tty1.service.d
cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << 'EOF'

View File

@@ -3,8 +3,8 @@
ARGS=
for ARG in $@; do
if [ -d "/media/embassy/embassyfs" ] && [ "$ARG" = "/" ]; then
ARG=/media/embassy/embassyfs
if [ -d "/media/startos/root" ] && [ "$ARG" = "/" ]; then
ARG=/media/startos/root
fi
ARGS="$ARGS $ARG"
done

49
build/lib/scripts/prune-images Executable file
View File

@@ -0,0 +1,49 @@
#!/bin/bash
if [ "$UID" -ne 0 ]; then
>&2 echo 'Must be run as root'
exit 1
fi
POSITIONAL_ARGS=()
while [[ $# -gt 0 ]]; do
case $1 in
-*|--*)
echo "Unknown option $1"
exit 1
;;
*)
POSITIONAL_ARGS+=("$1") # save positional arg
shift # past argument
;;
esac
done
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
needed=$1
if [ -z "$needed" ]; then
>&2 echo "usage: $0 <SPACE NEEDED>"
exit 1
fi
if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then
echo 'Pruning...'
current="$(readlink -f /media/startos/config/current.rootfs)"
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$needed" ]]; do
to_prune="$(ls -t1 /media/startos/images/*.rootfs /media/startos/images/*.squashfs | grep -v "$current" | tail -n1)"
if [ -e "$to_prune" ]; then
echo " Pruning $to_prune"
rm -rf "$to_prune"
else
>&2 echo "Not enough space and nothing to prune!"
exit 1
fi
done
echo 'done.'
else
>&2 echo 'No current.rootfs, not safe to prune'
exit 1
fi

View File

@@ -0,0 +1,114 @@
# Local filesystem mounting -*- shell-script -*-
#
# This script overrides local_mount_root() in /scripts/local
# and mounts root as a read-only filesystem with a temporary (rw)
# overlay filesystem.
#
. /scripts/local
local_mount_root()
{
echo 'using startos initramfs module'
local_top
local_device_setup "${ROOT}" "root file system"
ROOT="${DEV}"
# Get the root filesystem type if not set
if [ -z "${ROOTFSTYPE}" ]; then
FSTYPE=$(get_fstype "${ROOT}")
else
FSTYPE=${ROOTFSTYPE}
fi
local_premount
# CHANGES TO THE ORIGINAL FUNCTION BEGIN HERE
# N.B. this code still lacks error checking
modprobe ${FSTYPE}
checkfs ${ROOT} root "${FSTYPE}"
echo 'mounting startos'
mkdir /startos
ROOTFLAGS="$(echo "${ROOTFLAGS}" | sed 's/subvol=\(next\|current\)//' | sed 's/^-o *$//')"
if [ "${FSTYPE}" != "unknown" ]; then
mount -t ${FSTYPE} ${ROOTFLAGS} ${ROOT} /startos
else
mount ${ROOTFLAGS} ${ROOT} /startos
fi
if [ -d /startos/images ]; then
if [ -h /startos/config/current.rootfs ] && [ -e /startos/config/current.rootfs ]; then
image=$(readlink -f /startos/config/current.rootfs)
else
image="$(ls -t1 /startos/images/*.rootfs | head -n1)"
fi
if ! [ -f "$image" ]; then
>&2 echo "image $image not available to boot"
exit 1
fi
else
if [ -f /startos/config/upgrade ] && [ -d /startos/next ]; then
oldroot=/startos/next
elif [ -d /startos/current ]; then
oldroot=/startos/current
elif [ -d /startos/prev ]; then
oldroot=/startos/prev
else
>&2 echo no StartOS filesystem found
exit 1
fi
mkdir -p /startos/config/overlay/etc
mv $oldroot/etc/fstab /startos/config/overlay/etc/fstab
mv $oldroot/etc/machine-id /startos/config/overlay/etc/machine-id
mv $oldroot/etc/ssh /startos/config/overlay/etc/ssh
mkdir -p /startos/images
mv $oldroot /startos/images/legacy.rootfs
rm -rf /startos/next /startos/current /startos/prev
ln -rsf /startos/images/old.squashfs /startos/config/current.rootfs
image=$(readlink -f /startos/config/current.rootfs)
fi
mkdir /lower /upper
if [ -d "$image" ]; then
mount -r --bind $image /lower
elif [ -f "$image" ]; then
modprobe squashfs
mount -r $image /lower
else
>&2 echo "not a regular file or directory: $image"
exit 1
fi
modprobe overlay || insmod "/lower/lib/modules/$(uname -r)/kernel/fs/overlayfs/overlay.ko"
# Mount a tmpfs for the overlay in /upper
mount -t tmpfs tmpfs /upper
mkdir /upper/data /upper/work
mkdir -p /startos/config/overlay
# Mount the final overlay-root in $rootmnt
mount -t overlay \
-olowerdir=/startos/config/overlay:/lower,upperdir=/upper/data,workdir=/upper/work \
overlay ${rootmnt}
mkdir -p ${rootmnt}/media/startos/config
mount --bind /startos/config ${rootmnt}/media/startos/config
mkdir -p ${rootmnt}/media/startos/images
mount --bind /startos/images ${rootmnt}/media/startos/images
mkdir -p ${rootmnt}/media/startos/root
mount -r --bind /startos ${rootmnt}/media/startos/root
mkdir -p ${rootmnt}/media/startos/current
mount -r --bind /lower ${rootmnt}/media/startos/current
}

View File

@@ -63,7 +63,7 @@ sudo unsquashfs -f -d $TMPDIR startos.raspberrypi.squashfs
REAL_GIT_HASH=$(cat $TMPDIR/usr/lib/startos/GIT_HASH.txt)
REAL_VERSION=$(cat $TMPDIR/usr/lib/startos/VERSION.txt)
REAL_ENVIRONMENT=$(cat $TMPDIR/usr/lib/startos/ENVIRONMENT.txt)
sudo sed -i 's| boot=embassy| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo sed -i 's| boot=startos| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo cp ./build/raspberrypi/fstab $TMPDIR/etc/
sudo cp ./build/raspberrypi/init_resize.sh $TMPDIR/usr/lib/startos/scripts/init_resize.sh
sudo umount $TMPDIR/boot

View File

@@ -1,7 +1,7 @@
#!/bin/bash
if [ "$GIT_BRANCH_AS_HASH" != 1 ]; then
GIT_HASH="$(git describe --always --abbrev=40 --dirty=-modified)"
GIT_HASH="$(git rev-parse HEAD)$(if ! git diff-index --quiet HEAD --; then echo '-modified'; fi)"
else
GIT_HASH="@$(git rev-parse --abbrev-ref HEAD)"
fi

View File

@@ -1,4 +0,0 @@
FROM node:18-alpine
ADD ./startInit.js /usr/local/lib/startInit.js
ADD ./entrypoint.sh /usr/local/bin/entrypoint.sh

View File

@@ -3,38 +3,61 @@
## Methods
### init
initialize runtime (mount `/proc`, `/sys`, `/dev`, and `/run` to each image in `/media/images`)
called after os has mounted js and images to the container
#### args
`[]`
#### response
`null`
### exit
shutdown runtime
#### args
`[]`
#### response
`null`
### start
run main method if not already running
#### args
`[]`
#### response
`null`
### stop
stop main method by sending SIGTERM to child processes, and SIGKILL after timeout
#### args
`{ timeout: millis }`
#### response
`null`
### execute
run a specific package procedure
#### args
#### args
```ts
{
procedure: JsonPath,
@@ -42,12 +65,17 @@ run a specific package procedure
timeout: millis,
}
```
#### response
`any`
### sandbox
run a specific package procedure in sandbox mode
#### args
#### args
```ts
{
procedure: JsonPath,
@@ -55,5 +83,7 @@ run a specific package procedure in sandbox mode
timeout: millis,
}
```
#### response
`any`

View File

@@ -0,0 +1,9 @@
[Unit]
Description=StartOS Container Runtime
[Service]
Type=simple
ExecStart=/usr/bin/node --experimental-detect-module --unhandled-rejections=warn /usr/lib/startos/init/index.js
[Install]
WantedBy=multi-user.target

View File

@@ -1,10 +0,0 @@
#!/sbin/openrc-run
name=containerRuntime
#cfgfile="/etc/containerRuntime/containerRuntime.conf"
command="/usr/bin/node"
command_args="--experimental-detect-module --unhandled-rejections=warn /usr/lib/startos/init/index.js"
pidfile="/run/containerRuntime.pid"
command_background="yes"
output_log="/var/log/containerRuntime.log"
error_log="/var/log/containerRuntime.err"

View File

@@ -0,0 +1,23 @@
#!/bin/bash
set -e
mkdir -p /run/systemd/resolve
echo "nameserver 8.8.8.8" > /run/systemd/resolve/stub-resolv.conf
apt-get update
apt-get install -y curl rsync qemu-user-static
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
source ~/.bashrc
nvm install 20
ln -s $(which node) /usr/bin/node
sed -i '/\(^\|#\)Storage=/c\Storage=persistent' /etc/systemd/journald.conf
sed -i '/\(^\|#\)Compress=/c\Compress=yes' /etc/systemd/journald.conf
sed -i '/\(^\|#\)SystemMaxUse=/c\SystemMaxUse=1G' /etc/systemd/journald.conf
sed -i '/\(^\|#\)ForwardToSyslog=/c\ForwardToSyslog=no' /etc/systemd/journald.conf
systemctl enable container-runtime.service
rm -rf /run/systemd

View File

@@ -4,8 +4,8 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
DISTRO=alpine
VERSION=3.19
DISTRO=debian
VERSION=bookworm
ARCH=${ARCH:-$(uname -m)}
FLAVOR=default
@@ -16,4 +16,8 @@ elif [ "$_ARCH" = "aarch64" ]; then
_ARCH=arm64
fi
curl https://images.linuxcontainers.org/$(curl --silent https://images.linuxcontainers.org/meta/1.0/index-system | grep "^$DISTRO;$VERSION;$_ARCH;$FLAVOR;" | head -n1 | sed 's/^.*;//g')/rootfs.squashfs --output alpine.${ARCH}.squashfs
URL="https://images.linuxcontainers.org/$(curl -fsSL https://images.linuxcontainers.org/meta/1.0/index-system | grep "^$DISTRO;$VERSION;$_ARCH;$FLAVOR;" | head -n1 | sed 's/^.*;//g')/rootfs.squashfs"
echo "Downloading $URL to debian.${ARCH}.squashfs"
curl -fsSL "$URL" > debian.${ARCH}.squashfs

View File

@@ -0,0 +1,8 @@
/** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest",
automock: false,
testEnvironment: "node",
rootDir: "./src/",
modulePathIgnorePatterns: ["./dist/"],
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,12 +1,13 @@
{
"name": "start-init",
"name": "container-runtime",
"version": "0.0.0",
"description": "We want to be the sdk intermitent for the system",
"module": "./index.js",
"scripts": {
"check": "tsc --noEmit",
"build": "prettier --write '**/*.ts' && rm -rf dist && tsc",
"tsc": "rm -rf dist; tsc"
"build": "prettier . '!tmp/**' --write && rm -rf dist && tsc",
"tsc": "rm -rf dist; tsc",
"test": "jest -c ./jest.config.js"
},
"author": "",
"prettier": {
@@ -17,12 +18,16 @@
},
"dependencies": {
"@iarna/toml": "^2.2.5",
"@noble/curves": "^1.4.0",
"@noble/hashes": "^1.4.0",
"@start9labs/start-sdk": "file:../sdk/dist",
"esbuild-plugin-resolve": "^2.0.0",
"filebrowser": "^1.0.0",
"isomorphic-fetch": "^3.0.0",
"jsonpath": "^1.1.1",
"lodash.merge": "^4.6.2",
"node-fetch": "^3.1.0",
"ts-matches": "^5.4.1",
"ts-matches": "^5.5.1",
"tslib": "^2.5.3",
"typescript": "^5.1.3",
"yaml": "^2.3.1"
@@ -30,8 +35,12 @@
"devDependencies": {
"@swc/cli": "^0.1.62",
"@swc/core": "^1.3.65",
"@types/jest": "^29.5.12",
"@types/jsonpath": "^0.2.4",
"@types/node": "^20.11.13",
"jest": "^29.7.0",
"prettier": "^3.2.5",
"ts-jest": "^29.2.3",
"typescript": ">5.2"
}
}

View File

@@ -0,0 +1,301 @@
import { types as T } from "@start9labs/start-sdk"
import * as net from "net"
import { object, string, number, literals, some, unknown } from "ts-matches"
import { Effects } from "../Models/Effects"
import { CallbackHolder } from "../Models/CallbackHolder"
import { MainEffects } from "@start9labs/start-sdk/cjs/lib/StartSdk"
const matchRpcError = object({
error: object(
{
code: number,
message: string,
data: some(
string,
object(
{
details: string,
debug: string,
},
["debug"],
),
),
},
["data"],
),
})
const testRpcError = matchRpcError.test
const testRpcResult = object({
result: unknown,
}).test
type RpcError = typeof matchRpcError._TYPE
const SOCKET_PATH = "/media/startos/rpc/host.sock"
let hostSystemId = 0
export type EffectContext = {
procedureId: string | null
callbacks: CallbackHolder | null
}
const rpcRoundFor =
(procedureId: string | null) =>
<K extends keyof Effects | "getStore" | "setStore" | "clearCallbacks">(
method: K,
params: Record<string, unknown>,
) => {
const id = hostSystemId++
const client = net.createConnection({ path: SOCKET_PATH }, () => {
client.write(
JSON.stringify({
id,
method,
params: { ...params, procedureId },
}) + "\n",
)
})
let bufs: Buffer[] = []
return new Promise((resolve, reject) => {
client.on("data", (data) => {
try {
bufs.push(data)
if (data.reduce((acc, x) => acc || x == 10, false)) {
const res: unknown = JSON.parse(
Buffer.concat(bufs).toString().split("\n")[0],
)
if (testRpcError(res)) {
let message = res.error.message
console.error("Error in host RPC:", { method, params })
if (string.test(res.error.data)) {
message += ": " + res.error.data
console.error(`Details: ${res.error.data}`)
} else {
if (res.error.data?.details) {
message += ": " + res.error.data.details
console.error(`Details: ${res.error.data.details}`)
}
if (res.error.data?.debug) {
message += "\n" + res.error.data.debug
console.error(`Debug: ${res.error.data.debug}`)
}
}
reject(new Error(`${message}@${method}`))
} else if (testRpcResult(res)) {
resolve(res.result)
} else {
reject(new Error(`malformed response ${JSON.stringify(res)}`))
}
}
} catch (error) {
reject(error)
}
client.end()
})
client.on("error", (error) => {
reject(error)
})
})
}
function makeEffects(context: EffectContext): Effects {
const rpcRound = rpcRoundFor(context.procedureId)
const self: Effects = {
bind(...[options]: Parameters<T.Effects["bind"]>) {
return rpcRound("bind", {
...options,
stack: new Error().stack,
}) as ReturnType<T.Effects["bind"]>
},
clearBindings(...[]: Parameters<T.Effects["clearBindings"]>) {
return rpcRound("clearBindings", {}) as ReturnType<
T.Effects["clearBindings"]
>
},
clearServiceInterfaces(
...[]: Parameters<T.Effects["clearServiceInterfaces"]>
) {
return rpcRound("clearServiceInterfaces", {}) as ReturnType<
T.Effects["clearServiceInterfaces"]
>
},
getInstalledPackages(...[]: Parameters<T.Effects["getInstalledPackages"]>) {
return rpcRound("getInstalledPackages", {}) as ReturnType<
T.Effects["getInstalledPackages"]
>
},
createOverlayedImage(options: {
imageId: string
}): Promise<[string, string]> {
return rpcRound("createOverlayedImage", options) as ReturnType<
T.Effects["createOverlayedImage"]
>
},
destroyOverlayedImage(options: { guid: string }): Promise<void> {
return rpcRound("destroyOverlayedImage", options) as ReturnType<
T.Effects["destroyOverlayedImage"]
>
},
executeAction(...[options]: Parameters<T.Effects["executeAction"]>) {
return rpcRound("executeAction", options) as ReturnType<
T.Effects["executeAction"]
>
},
exportAction(...[options]: Parameters<T.Effects["exportAction"]>) {
return rpcRound("exportAction", options) as ReturnType<
T.Effects["exportAction"]
>
},
exportServiceInterface: ((
...[options]: Parameters<Effects["exportServiceInterface"]>
) => {
return rpcRound("exportServiceInterface", options) as ReturnType<
T.Effects["exportServiceInterface"]
>
}) as Effects["exportServiceInterface"],
exposeForDependents(
...[options]: Parameters<T.Effects["exposeForDependents"]>
) {
return rpcRound("exposeForDependents", options) as ReturnType<
T.Effects["exposeForDependents"]
>
},
getConfigured(...[]: Parameters<T.Effects["getConfigured"]>) {
return rpcRound("getConfigured", {}) as ReturnType<
T.Effects["getConfigured"]
>
},
getContainerIp(...[]: Parameters<T.Effects["getContainerIp"]>) {
return rpcRound("getContainerIp", {}) as ReturnType<
T.Effects["getContainerIp"]
>
},
getHostInfo: ((...[allOptions]: Parameters<T.Effects["getHostInfo"]>) => {
const options = {
...allOptions,
callback: context.callbacks?.addCallback(allOptions.callback) || null,
}
return rpcRound("getHostInfo", options) as ReturnType<
T.Effects["getHostInfo"]
> as any
}) as Effects["getHostInfo"],
getServiceInterface(
...[options]: Parameters<T.Effects["getServiceInterface"]>
) {
return rpcRound("getServiceInterface", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getServiceInterface"]>
},
getPrimaryUrl(...[options]: Parameters<T.Effects["getPrimaryUrl"]>) {
return rpcRound("getPrimaryUrl", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getPrimaryUrl"]>
},
getServicePortForward(
...[options]: Parameters<T.Effects["getServicePortForward"]>
) {
return rpcRound("getServicePortForward", options) as ReturnType<
T.Effects["getServicePortForward"]
>
},
getSslCertificate(options: Parameters<T.Effects["getSslCertificate"]>[0]) {
return rpcRound("getSslCertificate", options) as ReturnType<
T.Effects["getSslCertificate"]
>
},
getSslKey(options: Parameters<T.Effects["getSslKey"]>[0]) {
return rpcRound("getSslKey", options) as ReturnType<
T.Effects["getSslKey"]
>
},
getSystemSmtp(...[options]: Parameters<T.Effects["getSystemSmtp"]>) {
return rpcRound("getSystemSmtp", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getSystemSmtp"]>
},
listServiceInterfaces(
...[options]: Parameters<T.Effects["listServiceInterfaces"]>
) {
return rpcRound("listServiceInterfaces", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["listServiceInterfaces"]>
},
mount(...[options]: Parameters<T.Effects["mount"]>) {
return rpcRound("mount", options) as ReturnType<T.Effects["mount"]>
},
clearActions(...[]: Parameters<T.Effects["clearActions"]>) {
return rpcRound("clearActions", {}) as ReturnType<
T.Effects["clearActions"]
>
},
restart(...[]: Parameters<T.Effects["restart"]>) {
return rpcRound("restart", {}) as ReturnType<T.Effects["restart"]>
},
setConfigured(...[configured]: Parameters<T.Effects["setConfigured"]>) {
return rpcRound("setConfigured", { configured }) as ReturnType<
T.Effects["setConfigured"]
>
},
setDependencies(
dependencies: Parameters<T.Effects["setDependencies"]>[0],
): ReturnType<T.Effects["setDependencies"]> {
return rpcRound("setDependencies", dependencies) as ReturnType<
T.Effects["setDependencies"]
>
},
checkDependencies(
options: Parameters<T.Effects["checkDependencies"]>[0],
): ReturnType<T.Effects["checkDependencies"]> {
return rpcRound("checkDependencies", options) as ReturnType<
T.Effects["checkDependencies"]
>
},
getDependencies(): ReturnType<T.Effects["getDependencies"]> {
return rpcRound("getDependencies", {}) as ReturnType<
T.Effects["getDependencies"]
>
},
setHealth(...[options]: Parameters<T.Effects["setHealth"]>) {
return rpcRound("setHealth", options) as ReturnType<
T.Effects["setHealth"]
>
},
setMainStatus(o: { status: "running" | "stopped" }): Promise<void> {
return rpcRound("setMainStatus", o) as ReturnType<T.Effects["setHealth"]>
},
shutdown(...[]: Parameters<T.Effects["shutdown"]>) {
return rpcRound("shutdown", {}) as ReturnType<T.Effects["shutdown"]>
},
store: {
get: async (options: any) =>
rpcRound("getStore", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as any,
set: async (options: any) =>
rpcRound("setStore", options) as ReturnType<T.Effects["store"]["set"]>,
} as T.Effects["store"],
}
return self
}
export function makeProcedureEffects(procedureId: string): Effects {
return makeEffects({ procedureId, callbacks: null })
}
export function makeMainEffects(): MainEffects {
const rpcRound = rpcRoundFor(null)
return {
_type: "main",
clearCallbacks: () => {
return rpcRound("clearCallbacks", {}) as Promise<void>
},
...makeEffects({ procedureId: null, callbacks: new CallbackHolder() }),
}
}

View File

@@ -1,290 +0,0 @@
import { types as T } from "@start9labs/start-sdk"
import * as net from "net"
import { object, string, number, literals, some, unknown } from "ts-matches"
import { Effects } from "../Models/Effects"
import { CallbackHolder } from "../Models/CallbackHolder"
const matchRpcError = object({
error: object(
{
code: number,
message: string,
data: some(
string,
object(
{
details: string,
debug: string,
},
["debug"],
),
),
},
["data"],
),
})
const testRpcError = matchRpcError.test
const testRpcResult = object({
result: unknown,
}).test
type RpcError = typeof matchRpcError._TYPE
const SOCKET_PATH = "/media/startos/rpc/host.sock"
const MAIN = "/main" as const
export class HostSystemStartOs implements Effects {
static of(callbackHolder: CallbackHolder) {
return new HostSystemStartOs(callbackHolder)
}
constructor(readonly callbackHolder: CallbackHolder) {}
id = 0
rpcRound<K extends keyof Effects | "getStore" | "setStore">(
method: K,
params: unknown,
) {
const id = this.id++
const client = net.createConnection({ path: SOCKET_PATH }, () => {
client.write(
JSON.stringify({
id,
method,
params,
}) + "\n",
)
})
let bufs: Buffer[] = []
return new Promise((resolve, reject) => {
client.on("data", (data) => {
try {
bufs.push(data)
if (data.reduce((acc, x) => acc || x == 10, false)) {
const res: unknown = JSON.parse(
Buffer.concat(bufs).toString().split("\n")[0],
)
if (testRpcError(res)) {
let message = res.error.message
console.error({ method, params, hostSystemStartOs: true })
if (string.test(res.error.data)) {
message += ": " + res.error.data
console.error(res.error.data)
} else {
if (res.error.data?.details) {
message += ": " + res.error.data.details
console.error(res.error.data.details)
}
if (res.error.data?.debug) {
message += "\n" + res.error.data.debug
console.error("Debug: " + res.error.data.debug)
}
}
reject(new Error(`${message}@${method}`))
} else if (testRpcResult(res)) {
resolve(res.result)
} else {
reject(new Error(`malformed response ${JSON.stringify(res)}`))
}
}
} catch (error) {
reject(error)
}
client.end()
})
client.on("error", (error) => {
reject(error)
})
})
}
bind(...[options]: Parameters<T.Effects["bind"]>) {
return this.rpcRound("bind", options) as ReturnType<T.Effects["bind"]>
}
clearBindings(...[]: Parameters<T.Effects["clearBindings"]>) {
return this.rpcRound("clearBindings", null) as ReturnType<
T.Effects["clearBindings"]
>
}
clearServiceInterfaces(
...[]: Parameters<T.Effects["clearServiceInterfaces"]>
) {
return this.rpcRound("clearServiceInterfaces", null) as ReturnType<
T.Effects["clearServiceInterfaces"]
>
}
createOverlayedImage(options: {
imageId: string
}): Promise<[string, string]> {
return this.rpcRound("createOverlayedImage", options) as ReturnType<
T.Effects["createOverlayedImage"]
>
}
destroyOverlayedImage(options: { guid: string }): Promise<void> {
return this.rpcRound("destroyOverlayedImage", options) as ReturnType<
T.Effects["destroyOverlayedImage"]
>
}
executeAction(...[options]: Parameters<T.Effects["executeAction"]>) {
return this.rpcRound("executeAction", options) as ReturnType<
T.Effects["executeAction"]
>
}
exists(...[packageId]: Parameters<T.Effects["exists"]>) {
return this.rpcRound("exists", packageId) as ReturnType<T.Effects["exists"]>
}
exportAction(...[options]: Parameters<T.Effects["exportAction"]>) {
return this.rpcRound("exportAction", options) as ReturnType<
T.Effects["exportAction"]
>
}
exportServiceInterface: Effects["exportServiceInterface"] = (
...[options]: Parameters<Effects["exportServiceInterface"]>
) => {
return this.rpcRound("exportServiceInterface", options) as ReturnType<
T.Effects["exportServiceInterface"]
>
}
exposeForDependents(...[options]: any) {
return this.rpcRound("exposeForDependents", null) as ReturnType<
T.Effects["exposeForDependents"]
>
}
getConfigured(...[]: Parameters<T.Effects["getConfigured"]>) {
return this.rpcRound("getConfigured", null) as ReturnType<
T.Effects["getConfigured"]
>
}
getContainerIp(...[]: Parameters<T.Effects["getContainerIp"]>) {
return this.rpcRound("getContainerIp", null) as ReturnType<
T.Effects["getContainerIp"]
>
}
getHostInfo: Effects["getHostInfo"] = (...[allOptions]: any[]) => {
const options = {
...allOptions,
callback: this.callbackHolder.addCallback(allOptions.callback),
}
return this.rpcRound("getHostInfo", options) as ReturnType<
T.Effects["getHostInfo"]
> as any
}
getServiceInterface(
...[options]: Parameters<T.Effects["getServiceInterface"]>
) {
return this.rpcRound("getServiceInterface", {
...options,
callback: this.callbackHolder.addCallback(options.callback),
}) as ReturnType<T.Effects["getServiceInterface"]>
}
getPrimaryUrl(...[options]: Parameters<T.Effects["getPrimaryUrl"]>) {
return this.rpcRound("getPrimaryUrl", {
...options,
callback: this.callbackHolder.addCallback(options.callback),
}) as ReturnType<T.Effects["getPrimaryUrl"]>
}
getServicePortForward(
...[options]: Parameters<T.Effects["getServicePortForward"]>
) {
return this.rpcRound("getServicePortForward", options) as ReturnType<
T.Effects["getServicePortForward"]
>
}
getSslCertificate(options: Parameters<T.Effects["getSslCertificate"]>[0]) {
return this.rpcRound("getSslCertificate", options) as ReturnType<
T.Effects["getSslCertificate"]
>
}
getSslKey(options: Parameters<T.Effects["getSslKey"]>[0]) {
return this.rpcRound("getSslKey", options) as ReturnType<
T.Effects["getSslKey"]
>
}
getSystemSmtp(...[options]: Parameters<T.Effects["getSystemSmtp"]>) {
return this.rpcRound("getSystemSmtp", {
...options,
callback: this.callbackHolder.addCallback(options.callback),
}) as ReturnType<T.Effects["getSystemSmtp"]>
}
listServiceInterfaces(
...[options]: Parameters<T.Effects["listServiceInterfaces"]>
) {
return this.rpcRound("listServiceInterfaces", {
...options,
callback: this.callbackHolder.addCallback(options.callback),
}) as ReturnType<T.Effects["listServiceInterfaces"]>
}
mount(...[options]: Parameters<T.Effects["mount"]>) {
return this.rpcRound("mount", options) as ReturnType<T.Effects["mount"]>
}
removeAction(...[options]: Parameters<T.Effects["removeAction"]>) {
return this.rpcRound("removeAction", options) as ReturnType<
T.Effects["removeAction"]
>
}
removeAddress(...[options]: Parameters<T.Effects["removeAddress"]>) {
return this.rpcRound("removeAddress", options) as ReturnType<
T.Effects["removeAddress"]
>
}
restart(...[]: Parameters<T.Effects["restart"]>) {
return this.rpcRound("restart", null)
}
reverseProxy(...[options]: Parameters<T.Effects["reverseProxy"]>) {
return this.rpcRound("reverseProxy", options) as ReturnType<
T.Effects["reverseProxy"]
>
}
running(...[packageId]: Parameters<T.Effects["running"]>) {
return this.rpcRound("running", { packageId }) as ReturnType<
T.Effects["running"]
>
}
// runRsync(...[options]: Parameters<T.Effects[""]>) {
//
// return this.rpcRound('executeAction', options) as ReturnType<T.Effects["executeAction"]>
//
// return this.rpcRound('executeAction', options) as ReturnType<T.Effects["executeAction"]>
// }
setConfigured(...[configured]: Parameters<T.Effects["setConfigured"]>) {
return this.rpcRound("setConfigured", { configured }) as ReturnType<
T.Effects["setConfigured"]
>
}
setDependencies(
...[dependencies]: Parameters<T.Effects["setDependencies"]>
): ReturnType<T.Effects["setDependencies"]> {
return this.rpcRound("setDependencies", { dependencies }) as ReturnType<
T.Effects["setDependencies"]
>
}
setHealth(...[options]: Parameters<T.Effects["setHealth"]>) {
return this.rpcRound("setHealth", options) as ReturnType<
T.Effects["setHealth"]
>
}
setMainStatus(o: { status: "running" | "stopped" }): Promise<void> {
return this.rpcRound("setMainStatus", o) as ReturnType<
T.Effects["setHealth"]
>
}
shutdown(...[]: Parameters<T.Effects["shutdown"]>) {
return this.rpcRound("shutdown", null)
}
stopped(...[packageId]: Parameters<T.Effects["stopped"]>) {
return this.rpcRound("stopped", { packageId }) as ReturnType<
T.Effects["stopped"]
>
}
store: T.Effects["store"] = {
get: async (options: any) =>
this.rpcRound("getStore", {
...options,
callback: this.callbackHolder.addCallback(options.callback),
}) as any,
set: async (options: any) =>
this.rpcRound("setStore", options) as ReturnType<
T.Effects["store"]["set"]
>,
}
}

View File

@@ -15,15 +15,16 @@ import {
} from "ts-matches"
import { types as T } from "@start9labs/start-sdk"
import * as CP from "child_process"
import * as Mod from "module"
import * as fs from "fs"
import { CallbackHolder } from "../Models/CallbackHolder"
import { AllGetDependencies } from "../Interfaces/AllGetDependencies"
import { HostSystem } from "../Interfaces/HostSystem"
import { jsonPath } from "../Models/JsonPath"
import { System } from "../Interfaces/System"
import { RunningMain, System } from "../Interfaces/System"
import {
MakeMainEffects,
MakeProcedureEffects,
} from "../Interfaces/MakeEffects"
type MaybePromise<T> = T | Promise<T>
export const matchRpcResult = anyOf(
object({ result: any }),
@@ -45,7 +46,7 @@ export const matchRpcResult = anyOf(
}),
)
export type RpcResult = typeof matchRpcResult._TYPE
type SocketResponse = { jsonrpc: "2.0"; id: IdType } & RpcResult
type SocketResponse = ({ jsonrpc: "2.0"; id: IdType } & RpcResult) | null
const SOCKET_PARENT = "/media/startos/rpc"
const SOCKET_PATH = "/media/startos/rpc/service.sock"
@@ -58,11 +59,12 @@ const runType = object({
method: literal("execute"),
params: object(
{
id: string,
procedure: string,
input: any,
timeout: number,
},
["timeout"],
["timeout", "input"],
),
})
const sandboxRunType = object({
@@ -70,18 +72,18 @@ const sandboxRunType = object({
method: literal("sandbox"),
params: object(
{
id: string,
procedure: string,
input: any,
timeout: number,
},
["timeout"],
["timeout", "input"],
),
})
const callbackType = object({
id: idType,
method: literal("callback"),
params: object({
callback: string,
callback: number,
args: array,
}),
})
@@ -89,6 +91,14 @@ const initType = object({
id: idType,
method: literal("init"),
})
const startType = object({
id: idType,
method: literal("start"),
})
const stopType = object({
id: idType,
method: literal("stop"),
})
const exitType = object({
id: idType,
method: literal("exit"),
@@ -101,34 +111,41 @@ const evalType = object({
}),
})
const jsonParse = (x: Buffer) => JSON.parse(x.toString())
function reduceMethod(
methodArgs: object,
effects: HostSystem,
): (previousValue: any, currentValue: string) => any {
return (x: any, method: string) =>
Promise.resolve(x)
.then((x) => x[method])
.then((x) =>
typeof x !== "function"
? x
: x({
...methodArgs,
effects,
}),
const jsonParse = (x: string) => JSON.parse(x)
const handleRpc = (id: IdType, result: Promise<RpcResult>) =>
result
.then((result) => ({
jsonrpc,
id,
...result,
}))
.then((x) => {
if (
("result" in x && x.result === undefined) ||
!("error" in x || "result" in x)
)
}
(x as any).result = null
return x
})
.catch((error) => ({
jsonrpc,
id,
error: {
code: 0,
message: typeof error,
data: { details: "" + error, debug: error?.stack },
},
}))
const hasId = object({ id: idType }).test
export class RpcListener {
unixSocketServer = net.createServer(async (server) => {})
private _system: System | undefined
private _effects: HostSystem | undefined
private _makeProcedureEffects: MakeProcedureEffects | undefined
private _makeMainEffects: MakeMainEffects | undefined
constructor(
readonly getDependencies: AllGetDependencies,
private callbacks = new CallbackHolder(),
) {
constructor(readonly getDependencies: AllGetDependencies) {
if (!fs.existsSync(SOCKET_PARENT)) {
fs.mkdirSync(SOCKET_PARENT, { recursive: true })
}
@@ -160,125 +177,140 @@ export class RpcListener {
details: error?.message ?? String(error),
debug: error?.stack,
},
code: 0,
code: 1,
},
})
const writeDataToSocket = (x: SocketResponse) =>
new Promise((resolve) => s.write(JSON.stringify(x), resolve))
const writeDataToSocket = (x: SocketResponse) => {
if (x != null) {
return new Promise((resolve) =>
s.write(JSON.stringify(x) + "\n", resolve),
)
}
}
s.on("data", (a) =>
Promise.resolve(a)
.then((b) => b.toString())
.then(logData("dataIn"))
.then(jsonParse)
.then(captureId)
.then((x) => this.dealWithInput(x))
.catch(mapError)
.then(logData("response"))
.then(writeDataToSocket)
.finally(() => void s.end()),
.then(writeDataToSocket),
)
})
}
private get effects() {
return this.getDependencies.hostSystem()(this.callbacks)
}
private get system() {
if (!this._system) throw new Error("System not initialized")
return this._system
}
private get makeProcedureEffects() {
if (!this._makeProcedureEffects) {
this._makeProcedureEffects = this.getDependencies.makeProcedureEffects()
}
return this._makeProcedureEffects
}
private get makeMainEffects() {
if (!this._makeMainEffects) {
this._makeMainEffects = this.getDependencies.makeMainEffects()
}
return this._makeMainEffects
}
private dealWithInput(input: unknown): MaybePromise<SocketResponse> {
return matches(input)
.when(some(runType, sandboxRunType), async ({ id, params }) => {
.when(runType, async ({ id, params }) => {
const system = this.system
const procedure = jsonPath.unsafeCast(params.procedure)
return system
.execute(this.effects, {
const effects = this.getDependencies.makeProcedureEffects()(params.id)
return handleRpc(
id,
system.execute(effects, {
procedure,
input: params.input,
timeout: params.timeout,
})
.then((result) => ({
jsonrpc,
id,
...result,
}))
.then((x) => {
if (
("result" in x && x.result === undefined) ||
!("error" in x || "result" in x)
)
(x as any).result = null
return x
})
.catch((error) => ({
jsonrpc,
id,
error: {
code: 0,
message: typeof error,
data: { details: "" + error, debug: error?.stack },
},
}))
}),
)
})
.when(callbackType, async ({ id, params: { callback, args } }) =>
Promise.resolve(this.callbacks.callCallback(callback, args))
.then((result) => ({
jsonrpc,
id,
result,
}))
.catch((error) => ({
jsonrpc,
id,
error: {
code: 0,
message: typeof error,
data: {
details: error?.message ?? String(error),
debug: error?.stack,
},
},
})),
)
.when(exitType, async ({ id }) => {
if (this._system) this._system.exit(this.effects)
delete this._system
delete this._effects
return {
jsonrpc,
.when(sandboxRunType, async ({ id, params }) => {
const system = this.system
const procedure = jsonPath.unsafeCast(params.procedure)
const effects = this.makeProcedureEffects(params.id)
return handleRpc(
id,
result: null,
}
system.sandbox(effects, {
procedure,
input: params.input,
timeout: params.timeout,
}),
)
})
.when(callbackType, async ({ params: { callback, args } }) => {
this.system.callCallback(callback, args)
return null
})
.when(startType, async ({ id }) => {
return handleRpc(
id,
this.system
.start(this.makeMainEffects())
.then((result) => ({ result })),
)
})
.when(stopType, async ({ id }) => {
return handleRpc(
id,
this.system.stop().then((result) => ({ result })),
)
})
.when(exitType, async ({ id }) => {
return handleRpc(
id,
(async () => {
if (this._system) await this._system.exit()
})().then((result) => ({ result })),
)
})
.when(initType, async ({ id }) => {
this._system = await this.getDependencies.system()
return {
jsonrpc,
return handleRpc(
id,
result: null,
}
(async () => {
if (!this._system) {
const system = await this.getDependencies.system()
await system.init()
this._system = system
}
})().then((result) => ({ result })),
)
})
.when(evalType, async ({ id, params }) => {
const result = await new Function(
`return (async () => { return (${params.script}) }).call(this)`,
).call({
listener: this,
require: require,
})
return {
jsonrpc,
return handleRpc(
id,
result: !["string", "number", "boolean", "null", "object"].includes(
typeof result,
)
? null
: result,
}
(async () => {
const result = await new Function(
`return (async () => { return (${params.script}) }).call(this)`,
).call({
listener: this,
require: require,
})
return {
jsonrpc,
id,
result: ![
"string",
"number",
"boolean",
"null",
"object",
].includes(typeof result)
? null
: result,
}
})(),
)
})
.when(shape({ id: idType, method: string }), ({ id, method }) => ({
jsonrpc,

View File

@@ -14,10 +14,11 @@ export class DockerProcedureContainer {
// }
static async of(
effects: T.Effects,
packageId: string,
data: DockerProcedure,
volumes: { [id: VolumeId]: Volume },
) {
const overlay = await Overlay.of(effects, data.image)
const overlay = await Overlay.of(effects, { id: data.image })
if (data.mounts) {
const mounts = data.mounts
@@ -38,16 +39,25 @@ export class DockerProcedureContainer {
mounts[mount],
)
} else if (volumeMount.type === "certificate") {
volumeMount
const hostnames = [
`${packageId}.embassy`,
...new Set(
Object.values(
(
await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
)?.hostnameInfo || {},
)
.flatMap((h) => h)
.flatMap((h) => (h.kind === "onion" ? [h.hostname.value] : [])),
).values(),
]
const certChain = await effects.getSslCertificate({
packageId: null,
hostId: volumeMount["interface-id"],
algorithm: null,
hostnames,
})
const key = await effects.getSslKey({
packageId: null,
hostId: volumeMount["interface-id"],
algorithm: null,
hostnames,
})
await fs.writeFile(
`${path}/${volumeMount["interface-id"]}.cert.pem`,
@@ -58,17 +68,19 @@ export class DockerProcedureContainer {
key,
)
} else if (volumeMount.type === "pointer") {
await effects.mount({
location: path,
target: {
packageId: volumeMount["package-id"],
subpath: volumeMount.path,
readonly: volumeMount.readonly,
volumeId: volumeMount["volume-id"],
},
})
await effects
.mount({
location: path,
target: {
packageId: volumeMount["package-id"],
subpath: volumeMount.path,
readonly: volumeMount.readonly,
volumeId: volumeMount["volume-id"],
},
})
.catch(console.warn)
} else if (volumeMount.type === "backup") {
throw new Error("TODO")
await overlay.mount({ type: "backup", subpath: null }, mounts[mount])
}
}
}
@@ -84,10 +96,19 @@ export class DockerProcedureContainer {
}
}
async execSpawn(commands: string[]) {
async execFail(commands: string[], timeoutMs: number | null) {
try {
const spawned = await this.overlay.spawn(commands)
return spawned
const res = await this.overlay.exec(commands, {}, timeoutMs)
if (res.exitCode !== 0) {
const codeOrSignal =
res.exitCode !== null
? `code ${res.exitCode}`
: `signal ${res.exitSignal}`
throw new Error(
`Process exited with ${codeOrSignal}: ${res.stderr.toString()}`,
)
}
return res
} finally {
await this.overlay.destroy()
}

View File

@@ -1,8 +1,10 @@
import { PolyfillEffects } from "./polyfillEffects"
import { polyfillEffects } from "./polyfillEffects"
import { DockerProcedureContainer } from "./DockerProcedureContainer"
import { SystemForEmbassy } from "."
import { HostSystemStartOs } from "../../HostSystemStartOs"
import { Daemons, T, daemons } from "@start9labs/start-sdk"
import { T, utils } from "@start9labs/start-sdk"
import { Daemon } from "@start9labs/start-sdk/cjs/lib/mainFn/Daemon"
import { Effects } from "../../../Models/Effects"
import { off } from "node:process"
const EMBASSY_HEALTH_INTERVAL = 15 * 1000
const EMBASSY_PROPERTIES_LOOP = 30 * 1000
@@ -12,25 +14,28 @@ const EMBASSY_PROPERTIES_LOOP = 30 * 1000
* Also, this has an ability to clean itself up too if need be.
*/
export class MainLoop {
private healthLoops:
| {
name: string
interval: NodeJS.Timeout
}[]
| undefined
private healthLoops?: {
name: string
interval: NodeJS.Timeout
}[]
private mainEvent:
| Promise<{
daemon: T.DaemonReturned
wait: Promise<unknown>
}>
| undefined
constructor(
private mainEvent?: {
daemon: Daemon
}
private constructor(
readonly system: SystemForEmbassy,
readonly effects: HostSystemStartOs,
) {
this.healthLoops = this.constructHealthLoops()
this.mainEvent = this.constructMainEvent()
readonly effects: Effects,
) {}
static async of(
system: SystemForEmbassy,
effects: Effects,
): Promise<MainLoop> {
const res = new MainLoop(system, effects)
res.healthLoops = res.constructHealthLoops()
res.mainEvent = await res.constructMainEvent()
return res
}
private async constructMainEvent() {
@@ -40,44 +45,76 @@ export class MainLoop {
...system.manifest.main.args,
]
await this.setupInterfaces(effects)
await effects.setMainStatus({ status: "running" })
const jsMain = (this.system.moduleCode as any)?.jsMain
const dockerProcedureContainer = await DockerProcedureContainer.of(
effects,
this.system.manifest.id,
this.system.manifest.main,
this.system.manifest.volumes,
)
if (jsMain) {
const daemons = Daemons.of({
effects,
started: async (_) => {},
healthReceipts: [],
})
throw new Error("todo")
// return {
// daemon,
// wait: daemon.wait().finally(() => {
// this.clean()
// effects.setMainStatus({ status: "stopped" })
// }),
// }
throw new Error("Unreachable")
}
const daemon = await daemons.runDaemon()(
const daemon = await Daemon.of()(
this.effects,
this.system.manifest.main.image,
{ id: this.system.manifest.main.image },
currentCommand,
{
overlay: dockerProcedureContainer.overlay,
sigtermTimeout: utils.inMs(
this.system.manifest.main["sigterm-timeout"],
),
},
)
daemon.start()
return {
daemon,
wait: daemon.wait().finally(() => {
this.clean()
effects
.setMainStatus({ status: "stopped" })
.catch((e) => console.error("Could not set the status to stopped"))
}),
}
}
private async setupInterfaces(effects: T.Effects) {
for (const interfaceId in this.system.manifest.interfaces) {
const iface = this.system.manifest.interfaces[interfaceId]
const internalPorts = new Set<number>()
for (const port of Object.values(
iface["tor-config"]?.["port-mapping"] || {},
)) {
internalPorts.add(parseInt(port))
}
for (const port of Object.values(iface["lan-config"] || {})) {
internalPorts.add(port.internal)
}
for (const internalPort of internalPorts) {
const torConf = Object.entries(
iface["tor-config"]?.["port-mapping"] || {},
)
.map(([external, internal]) => ({
internal: parseInt(internal),
external: parseInt(external),
}))
.find((conf) => conf.internal == internalPort)
const lanConf = Object.entries(iface["lan-config"] || {})
.map(([external, conf]) => ({
external: parseInt(external),
...conf,
}))
.find((conf) => conf.internal == internalPort)
await effects.bind({
kind: "multi",
id: interfaceId,
internalPort,
preferredExternalPort: torConf?.external || internalPort,
secure: null,
addSsl: lanConf?.ssl
? {
preferredExternalPort: lanConf.external,
alpn: { specified: ["http/1.1"] },
}
: null,
})
}
}
}
@@ -86,7 +123,8 @@ export class MainLoop {
const main = await mainEvent
delete this.mainEvent
delete this.healthLoops
if (mainEvent) await main?.daemon.term()
await main?.daemon.stop().catch((e) => console.error(e))
this.effects.setMainStatus({ status: "stopped" })
if (healthLoops) healthLoops.forEach((x) => clearInterval(x.interval))
}
@@ -102,14 +140,24 @@ export class MainLoop {
if (actionProcedure.type === "docker") {
const container = await DockerProcedureContainer.of(
effects,
manifest.id,
actionProcedure,
manifest.volumes,
)
const executed = await container.execSpawn([
const executed = await container.exec([
actionProcedure.entrypoint,
...actionProcedure.args,
JSON.stringify(timeChanged),
])
if (executed.exitCode === 0) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "success",
message: actionProcedure["success-message"],
})
return
}
if (executed.exitCode === 59) {
await effects.setHealth({
id: healthId,
@@ -173,7 +221,7 @@ export class MainLoop {
}
const result = await method(
new PolyfillEffects(effects, this.system.manifest),
polyfillEffects(effects, this.system.manifest),
timeChanged,
)

View File

@@ -0,0 +1,387 @@
export default {
"peer-tor-address": {
name: "Peer Tor Address",
description: "The Tor address of the peer interface",
type: "pointer",
subtype: "package",
"package-id": "bitcoind",
target: "tor-address",
interface: "peer",
},
"rpc-tor-address": {
name: "RPC Tor Address",
description: "The Tor address of the RPC interface",
type: "pointer",
subtype: "package",
"package-id": "bitcoind",
target: "tor-address",
interface: "rpc",
},
rpc: {
type: "object",
name: "RPC Settings",
description: "RPC configuration options.",
spec: {
enable: {
type: "boolean",
name: "Enable",
description: "Allow remote RPC requests.",
default: true,
},
username: {
type: "string",
nullable: false,
name: "Username",
description: "The username for connecting to Bitcoin over RPC.",
warning:
"You will need to restart all services that depend on Bitcoin.",
default: "bitcoin",
masked: true,
pattern: "^[a-zA-Z0-9_]+$",
"pattern-description": "Must be alphanumeric (can contain underscore).",
},
password: {
type: "string",
nullable: false,
name: "RPC Password",
description: "The password for connecting to Bitcoin over RPC.",
warning:
"You will need to restart all services that depend on Bitcoin.",
default: {
charset: "a-z,2-7",
len: 20,
},
pattern: "^[a-zA-Z0-9_]+$",
"pattern-description": "Must be alphanumeric (can contain underscore).",
copyable: true,
masked: true,
},
advanced: {
type: "object",
name: "Advanced",
description: "Advanced RPC Settings",
spec: {
auth: {
name: "Authorization",
description:
"Username and hashed password for JSON-RPC connections. RPC clients connect using the usual http basic authentication.",
type: "list",
subtype: "string",
default: [],
spec: {
pattern: "^[a-zA-Z0-9_-]+:([0-9a-fA-F]{2})+\\$([0-9a-fA-F]{2})+$",
"pattern-description":
'Each item must be of the form "<USERNAME>:<SALT>$<HASH>".',
},
range: "[0,*)",
},
servertimeout: {
name: "Rpc Server Timeout",
description:
"Number of seconds after which an uncompleted RPC call will time out.",
type: "number",
nullable: false,
range: "[5,300]",
integral: true,
units: "seconds",
default: 30,
},
threads: {
name: "Threads",
description:
"Set the number of threads for handling RPC calls. You may wish to increase this if you are making lots of calls via an integration.",
type: "number",
nullable: false,
default: 16,
range: "[1,64]",
integral: true,
units: undefined,
},
workqueue: {
name: "Work Queue",
description:
"Set the depth of the work queue to service RPC calls. Determines how long the backlog of RPC requests can get before it just rejects new ones.",
type: "number",
nullable: false,
default: 128,
range: "[8,256]",
integral: true,
units: "requests",
},
},
},
},
},
"zmq-enabled": {
type: "boolean",
name: "ZeroMQ Enabled",
description:
"The ZeroMQ interface is useful for some applications which might require data related to block and transaction events from Bitcoin Core. For example, LND requires ZeroMQ be enabled for LND to get the latest block data",
default: true,
},
txindex: {
type: "boolean",
name: "Transaction Index",
description:
"By enabling Transaction Index (txindex) Bitcoin Core will build a complete transaction index. This allows Bitcoin Core to access any transaction with commands like `gettransaction`.",
default: true,
},
coinstatsindex: {
type: "boolean",
name: "Coinstats Index",
description:
"Enabling Coinstats Index reduces the time for the gettxoutsetinfo RPC to complete at the cost of using additional disk space",
default: false,
},
wallet: {
type: "object",
name: "Wallet",
description: "Wallet Settings",
spec: {
enable: {
name: "Enable Wallet",
description: "Load the wallet and enable wallet RPC calls.",
type: "boolean",
default: true,
},
avoidpartialspends: {
name: "Avoid Partial Spends",
description:
"Group outputs by address, selecting all or none, instead of selecting on a per-output basis. This improves privacy at the expense of higher transaction fees.",
type: "boolean",
default: true,
},
discardfee: {
name: "Discard Change Tolerance",
description:
"The fee rate (in BTC/kB) that indicates your tolerance for discarding change by adding it to the fee.",
type: "number",
nullable: false,
default: 0.0001,
range: "[0,.01]",
integral: false,
units: "BTC/kB",
},
},
},
advanced: {
type: "object",
name: "Advanced",
description: "Advanced Settings",
spec: {
mempool: {
type: "object",
name: "Mempool",
description: "Mempool Settings",
spec: {
persistmempool: {
type: "boolean",
name: "Persist Mempool",
description: "Save the mempool on shutdown and load on restart.",
default: true,
},
maxmempool: {
type: "number",
nullable: false,
name: "Max Mempool Size",
description:
"Keep the transaction memory pool below <n> megabytes.",
range: "[1,*)",
integral: true,
units: "MiB",
default: 300,
},
mempoolexpiry: {
type: "number",
nullable: false,
name: "Mempool Expiration",
description:
"Do not keep transactions in the mempool longer than <n> hours.",
range: "[1,*)",
integral: true,
units: "Hr",
default: 336,
},
mempoolfullrbf: {
name: "Enable Full RBF",
description:
"Policy for your node to use for relaying and mining unconfirmed transactions. For details, see https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-24.0.1.md#notice-of-new-option-for-transaction-replacement-policies",
type: "boolean",
default: true,
},
permitbaremultisig: {
type: "boolean",
name: "Permit Bare Multisig",
description: "Relay non-P2SH multisig transactions",
default: true,
},
datacarrier: {
type: "boolean",
name: "Relay OP_RETURN Transactions",
description: "Relay transactions with OP_RETURN outputs",
default: true,
},
datacarriersize: {
type: "number",
nullable: false,
name: "Max OP_RETURN Size",
description: "Maximum size of data in OP_RETURN outputs to relay",
range: "[0,10000]",
integral: true,
units: "bytes",
default: 83,
},
},
},
peers: {
type: "object",
name: "Peers",
description: "Peer Connection Settings",
spec: {
listen: {
type: "boolean",
name: "Make Public",
description:
"Allow other nodes to find your server on the network.",
default: true,
},
onlyconnect: {
type: "boolean",
name: "Disable Peer Discovery",
description: "Only connect to specified peers.",
default: false,
},
onlyonion: {
type: "boolean",
name: "Disable Clearnet",
description: "Only connect to peers over Tor.",
default: false,
},
v2transport: {
type: "boolean",
name: "Use V2 P2P Transport Protocol",
description:
"Enable or disable the use of BIP324 V2 P2P transport protocol.",
default: false,
},
addnode: {
name: "Add Nodes",
description: "Add addresses of nodes to connect to.",
type: "list",
subtype: "object",
range: "[0,*)",
default: [],
spec: {
spec: {
hostname: {
type: "string",
nullable: false,
name: "Hostname",
description: "Domain or IP address of bitcoin peer",
pattern:
"(^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$)|((^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$)|(^[a-z2-7]{16}\\.onion$)|(^([a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?\\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$))",
"pattern-description":
"Must be either a domain name, or an IPv4 or IPv6 address. Do not include protocol scheme (eg 'http://') or port.",
},
port: {
type: "number",
nullable: true,
name: "Port",
description:
"Port that peer is listening on for inbound p2p connections",
range: "[0,65535]",
integral: true,
},
},
},
},
},
},
pruning: {
type: "union",
name: "Pruning Settings",
description:
"Blockchain Pruning Options\nReduce the blockchain size on disk\n",
warning:
"Disabling pruning will convert your node into a full archival node. This requires a resync of the entire blockchain, a process that may take several days.\n",
tag: {
id: "mode",
name: "Pruning Mode",
description:
"- Disabled: Disable pruning\n- Automatic: Limit blockchain size on disk to a certain number of megabytes\n",
"variant-names": {
disabled: "Disabled",
automatic: "Automatic",
},
},
variants: {
disabled: {},
automatic: {
size: {
type: "number",
nullable: false,
name: "Max Chain Size",
description: "Limit of blockchain size on disk.",
warning:
"Increasing this value will require re-syncing your node.",
default: 550,
range: "[550,1000000)",
integral: true,
units: "MiB",
},
},
},
default: "disabled",
},
dbcache: {
type: "number",
nullable: true,
name: "Database Cache",
description:
"How much RAM to allocate for caching the TXO set. Higher values improve syncing performance, but increase your chance of using up all your system's memory or corrupting your database in the event of an ungraceful shutdown. Set this high but comfortably below your system's total RAM during IBD, then turn down to 450 (or leave blank) once the sync completes.",
warning:
"WARNING: Increasing this value results in a higher chance of ungraceful shutdowns, which can leave your node unusable if it happens during the initial block download. Use this setting with caution. Be sure to set this back to the default (450 or leave blank) once your node is synced. DO NOT press the STOP button if your dbcache is large. Instead, set this number back to the default, hit save, and wait for bitcoind to restart on its own.",
range: "(0,*)",
integral: true,
units: "MiB",
},
blockfilters: {
type: "object",
name: "Block Filters",
description: "Settings for storing and serving compact block filters",
spec: {
blockfilterindex: {
type: "boolean",
name: "Compute Compact Block Filters (BIP158)",
description:
"Generate Compact Block Filters during initial sync (IBD) to enable 'getblockfilter' RPC. This is useful if dependent services need block filters to efficiently scan for addresses/transactions etc.",
default: true,
},
peerblockfilters: {
type: "boolean",
name: "Serve Compact Block Filters to Peers (BIP157)",
description:
"Serve Compact Block Filters as a peer service to other nodes on the network. This is useful if you wish to connect an SPV client to your node to make it efficient to scan transactions without having to download all block data. 'Compute Compact Block Filters (BIP158)' is required.",
default: false,
},
},
},
bloomfilters: {
type: "object",
name: "Bloom Filters (BIP37)",
description: "Setting for serving Bloom Filters",
spec: {
peerbloomfilters: {
type: "boolean",
name: "Serve Bloom Filters to Peers",
description:
"Peers have the option of setting filters on each connection they make after the version handshake has completed. Bloom filters are for clients implementing SPV (Simplified Payment Verification) that want to check that block headers connect together correctly, without needing to verify the full blockchain. The client must trust that the transactions in the chain are in fact valid. It is highly recommended AGAINST using for anything except Bisq integration.",
warning:
"This is ONLY for use with Bisq integration, please use Block Filters for all other applications.",
default: false,
},
},
},
},
},
}

View File

@@ -0,0 +1,127 @@
export default {
homepage: {
name: "Homepage",
description:
"The page that will be displayed when your Start9 Pages .onion address is visited. Since this page is technically publicly accessible, you can choose to which type of page to display.",
type: "union",
default: "welcome",
tag: {
id: "type",
name: "Type",
"variant-names": {
welcome: "Welcome",
index: "Table of Contents",
"web-page": "Web Page",
redirect: "Redirect",
},
},
variants: {
welcome: {},
index: {},
"web-page": {
source: {
name: "Folder Location",
description: "The service that contains your website files.",
type: "enum",
values: ["filebrowser", "nextcloud"],
"value-names": {},
default: "nextcloud",
},
folder: {
type: "string",
name: "Folder Path",
placeholder: "e.g. websites/resume",
description:
'The path to the folder that contains the static files of your website. For example, a value of "projects/resume" would tell Start9 Pages to look for that folder path in the selected service.',
pattern:
"^(\\.|[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)(/[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|/([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)*/?$",
"pattern-description": "Must be a valid relative file path",
nullable: false,
},
},
redirect: {
target: {
type: "string",
name: "Target Subdomain",
description:
"The name of the subdomain to redirect users to. This must be a valid subdomain site within your Start9 Pages.",
pattern: "^[a-z-]+$",
"pattern-description":
"May contain only lowercase characters and hyphens.",
nullable: false,
},
},
},
},
subdomains: {
type: "list",
name: "Subdomains",
description: "The websites you want to serve.",
default: [],
range: "[0, *)",
subtype: "object",
spec: {
"unique-by": "name",
"display-as": "{{name}}",
spec: {
name: {
type: "string",
nullable: false,
name: "Subdomain name",
description:
'The subdomain of your Start9 Pages .onion address to host the website on. For example, a value of "me" would produce a website hosted at http://me.xxxxxx.onion.',
pattern: "^[a-z-]+$",
"pattern-description":
"May contain only lowercase characters and hyphens",
},
settings: {
type: "union",
name: "Settings",
description:
"The desired behavior you want to occur when the subdomain is visited. You can either redirect to another subdomain, or load a stored web page.",
default: "web-page",
tag: {
id: "type",
name: "Type",
"variant-names": { "web-page": "Web Page", redirect: "Redirect" },
},
variants: {
"web-page": {
source: {
name: "Folder Location",
description: "The service that contains your website files.",
type: "enum",
values: ["filebrowser", "nextcloud"],
"value-names": {},
default: "nextcloud",
},
folder: {
type: "string",
name: "Folder Path",
placeholder: "e.g. websites/resume",
description:
'The path to the folder that contains the website files. For example, a value of "projects/resume" would tell Start9 Pages to look for that folder path in the selected service.',
pattern:
"^(\\.|[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)(/[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|/([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)*/?$",
"pattern-description": "Must be a valid relative file path",
nullable: false,
},
},
redirect: {
target: {
type: "string",
name: "Target Subdomain",
description:
"The subdomain of your Start9 Pages .onion address to redirect to. This should be the name of another subdomain on Start9 Pages. Leave empty to redirect to the homepage.",
pattern: "^[a-z-]+$",
"pattern-description":
"May contain only lowercase characters and hyphens.",
nullable: false,
},
},
},
},
},
},
},
}

View File

@@ -0,0 +1,28 @@
export default {
"tor-address": {
name: "Tor Address",
description: "The Tor address of the network interface",
type: "pointer",
subtype: "package",
"package-id": "nostr-wallet-connect",
target: "tor-address",
interface: "main",
},
"lan-address": {
name: "LAN Address",
description: "The LAN address of the network interface",
type: "pointer",
subtype: "package",
"package-id": "nostr-wallet-connect",
target: "lan-address",
interface: "main",
},
"nostr-relay": {
type: "string",
name: "Nostr Relay",
default: "wss://relay.getalby.com/v1",
description: "The Nostr Relay to use for Nostr Wallet Connect connections",
copyable: true,
nullable: false,
},
}

View File

@@ -0,0 +1,39 @@
export default {
"instance-name": {
type: "string",
name: "SearXNG Instance Name",
description:
"Enter a name for your SearXNG instance. This is the name that will be listed if you want to share your SearXNG engine publicly.",
nullable: false,
default: "My SearXNG Engine",
placeholder: "Uncle Jim SearXNG Engine",
},
"tor-url": {
name: "Enable Tor address as the base URL",
description:
"Activates the utilization of a .onion address as the primary URL, particularly beneficial for publicly hosted instances over the Tor network.",
type: "boolean",
default: false,
},
"enable-metrics": {
name: "Enable Stats",
description:
"Your SearXNG instance will collect anonymous stats about its own usage and performance. You can view these metrics by appending `/stats` or `/stats/errors` to your SearXNG URL.",
type: "boolean",
default: true,
}, //,
// "email-address": {
// "type": "string",
// "name": "Email Address",
// "description": "Your Email address - required to create an SSL certificate.",
// "nullable": false,
// "default": "youremail@domain.com",
// },
// "public-host": {
// "type": "string",
// "name": "Public Domain Name",
// "description": "Enter a domain name here if you want to share your SearXNG engine publicly. You will also need to modify your domain name's DNS settings to point to your Start9 server.",
// "nullable": true,
// "placeholder": "https://search.mydomain.com"
// }
}

View File

@@ -0,0 +1,791 @@
// Jest Snapshot v1, https://goo.gl/fbAQLP
exports[`transformConfigSpec transformConfigSpec(bitcoind) 1`] = `
{
"advanced": {
"description": "Advanced Settings",
"name": "Advanced",
"spec": {
"blockfilters": {
"description": "Settings for storing and serving compact block filters",
"name": "Block Filters",
"spec": {
"blockfilterindex": {
"default": true,
"description": "Generate Compact Block Filters during initial sync (IBD) to enable 'getblockfilter' RPC. This is useful if dependent services need block filters to efficiently scan for addresses/transactions etc.",
"disabled": false,
"immutable": false,
"name": "Compute Compact Block Filters (BIP158)",
"type": "toggle",
"warning": null,
},
"peerblockfilters": {
"default": false,
"description": "Serve Compact Block Filters as a peer service to other nodes on the network. This is useful if you wish to connect an SPV client to your node to make it efficient to scan transactions without having to download all block data. 'Compute Compact Block Filters (BIP158)' is required.",
"disabled": false,
"immutable": false,
"name": "Serve Compact Block Filters to Peers (BIP157)",
"type": "toggle",
"warning": null,
},
},
"type": "object",
"warning": null,
},
"bloomfilters": {
"description": "Setting for serving Bloom Filters",
"name": "Bloom Filters (BIP37)",
"spec": {
"peerbloomfilters": {
"default": false,
"description": "Peers have the option of setting filters on each connection they make after the version handshake has completed. Bloom filters are for clients implementing SPV (Simplified Payment Verification) that want to check that block headers connect together correctly, without needing to verify the full blockchain. The client must trust that the transactions in the chain are in fact valid. It is highly recommended AGAINST using for anything except Bisq integration.",
"disabled": false,
"immutable": false,
"name": "Serve Bloom Filters to Peers",
"type": "toggle",
"warning": "This is ONLY for use with Bisq integration, please use Block Filters for all other applications.",
},
},
"type": "object",
"warning": null,
},
"dbcache": {
"default": null,
"description": "How much RAM to allocate for caching the TXO set. Higher values improve syncing performance, but increase your chance of using up all your system's memory or corrupting your database in the event of an ungraceful shutdown. Set this high but comfortably below your system's total RAM during IBD, then turn down to 450 (or leave blank) once the sync completes.",
"disabled": false,
"immutable": false,
"integer": true,
"max": null,
"min": null,
"name": "Database Cache",
"placeholder": null,
"required": false,
"step": null,
"type": "number",
"units": "MiB",
"warning": "WARNING: Increasing this value results in a higher chance of ungraceful shutdowns, which can leave your node unusable if it happens during the initial block download. Use this setting with caution. Be sure to set this back to the default (450 or leave blank) once your node is synced. DO NOT press the STOP button if your dbcache is large. Instead, set this number back to the default, hit save, and wait for bitcoind to restart on its own.",
},
"mempool": {
"description": "Mempool Settings",
"name": "Mempool",
"spec": {
"datacarrier": {
"default": true,
"description": "Relay transactions with OP_RETURN outputs",
"disabled": false,
"immutable": false,
"name": "Relay OP_RETURN Transactions",
"type": "toggle",
"warning": null,
},
"datacarriersize": {
"default": 83,
"description": "Maximum size of data in OP_RETURN outputs to relay",
"disabled": false,
"immutable": false,
"integer": true,
"max": 10000,
"min": null,
"name": "Max OP_RETURN Size",
"placeholder": null,
"required": true,
"step": null,
"type": "number",
"units": "bytes",
"warning": null,
},
"maxmempool": {
"default": 300,
"description": "Keep the transaction memory pool below <n> megabytes.",
"disabled": false,
"immutable": false,
"integer": true,
"max": null,
"min": 1,
"name": "Max Mempool Size",
"placeholder": null,
"required": true,
"step": null,
"type": "number",
"units": "MiB",
"warning": null,
},
"mempoolexpiry": {
"default": 336,
"description": "Do not keep transactions in the mempool longer than <n> hours.",
"disabled": false,
"immutable": false,
"integer": true,
"max": null,
"min": 1,
"name": "Mempool Expiration",
"placeholder": null,
"required": true,
"step": null,
"type": "number",
"units": "Hr",
"warning": null,
},
"mempoolfullrbf": {
"default": true,
"description": "Policy for your node to use for relaying and mining unconfirmed transactions. For details, see https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-24.0.1.md#notice-of-new-option-for-transaction-replacement-policies",
"disabled": false,
"immutable": false,
"name": "Enable Full RBF",
"type": "toggle",
"warning": null,
},
"permitbaremultisig": {
"default": true,
"description": "Relay non-P2SH multisig transactions",
"disabled": false,
"immutable": false,
"name": "Permit Bare Multisig",
"type": "toggle",
"warning": null,
},
"persistmempool": {
"default": true,
"description": "Save the mempool on shutdown and load on restart.",
"disabled": false,
"immutable": false,
"name": "Persist Mempool",
"type": "toggle",
"warning": null,
},
},
"type": "object",
"warning": null,
},
"peers": {
"description": "Peer Connection Settings",
"name": "Peers",
"spec": {
"addnode": {
"default": [],
"description": "Add addresses of nodes to connect to.",
"disabled": false,
"maxLength": null,
"minLength": null,
"name": "Add Nodes",
"spec": {
"displayAs": null,
"spec": {
"hostname": {
"default": null,
"description": "Domain or IP address of bitcoin peer",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": false,
"maxLength": null,
"minLength": null,
"name": "Hostname",
"patterns": [
{
"description": "Must be either a domain name, or an IPv4 or IPv6 address. Do not include protocol scheme (eg 'http://') or port.",
"regex": "(^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$)|((^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$)|(^[a-z2-7]{16}\\.onion$)|(^([a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?\\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$))",
},
],
"placeholder": null,
"required": true,
"type": "text",
"warning": null,
},
"port": {
"default": null,
"description": "Port that peer is listening on for inbound p2p connections",
"disabled": false,
"immutable": false,
"integer": true,
"max": 65535,
"min": null,
"name": "Port",
"placeholder": null,
"required": false,
"step": null,
"type": "number",
"units": null,
"warning": null,
},
},
"type": "object",
"uniqueBy": null,
},
"type": "list",
"warning": null,
},
"listen": {
"default": true,
"description": "Allow other nodes to find your server on the network.",
"disabled": false,
"immutable": false,
"name": "Make Public",
"type": "toggle",
"warning": null,
},
"onlyconnect": {
"default": false,
"description": "Only connect to specified peers.",
"disabled": false,
"immutable": false,
"name": "Disable Peer Discovery",
"type": "toggle",
"warning": null,
},
"onlyonion": {
"default": false,
"description": "Only connect to peers over Tor.",
"disabled": false,
"immutable": false,
"name": "Disable Clearnet",
"type": "toggle",
"warning": null,
},
"v2transport": {
"default": false,
"description": "Enable or disable the use of BIP324 V2 P2P transport protocol.",
"disabled": false,
"immutable": false,
"name": "Use V2 P2P Transport Protocol",
"type": "toggle",
"warning": null,
},
},
"type": "object",
"warning": null,
},
"pruning": {
"default": "disabled",
"description": "- Disabled: Disable pruning
- Automatic: Limit blockchain size on disk to a certain number of megabytes
",
"disabled": false,
"immutable": false,
"name": "Pruning Mode",
"required": true,
"type": "union",
"variants": {
"automatic": {
"name": "Automatic",
"spec": {
"size": {
"default": 550,
"description": "Limit of blockchain size on disk.",
"disabled": false,
"immutable": false,
"integer": true,
"max": 999999,
"min": 550,
"name": "Max Chain Size",
"placeholder": null,
"required": true,
"step": null,
"type": "number",
"units": "MiB",
"warning": "Increasing this value will require re-syncing your node.",
},
},
},
"disabled": {
"name": "Disabled",
"spec": {},
},
},
"warning": null,
},
},
"type": "object",
"warning": null,
},
"coinstatsindex": {
"default": false,
"description": "Enabling Coinstats Index reduces the time for the gettxoutsetinfo RPC to complete at the cost of using additional disk space",
"disabled": false,
"immutable": false,
"name": "Coinstats Index",
"type": "toggle",
"warning": null,
},
"rpc": {
"description": "RPC configuration options.",
"name": "RPC Settings",
"spec": {
"advanced": {
"description": "Advanced RPC Settings",
"name": "Advanced",
"spec": {
"auth": {
"default": [],
"description": "Username and hashed password for JSON-RPC connections. RPC clients connect using the usual http basic authentication.",
"disabled": false,
"maxLength": null,
"minLength": null,
"name": "Authorization",
"spec": {
"generate": null,
"inputmode": "text",
"masked": false,
"maxLength": null,
"minLength": null,
"patterns": [
{
"description": "Each item must be of the form "<USERNAME>:<SALT>$<HASH>".",
"regex": "^[a-zA-Z0-9_-]+:([0-9a-fA-F]{2})+\\$([0-9a-fA-F]{2})+$",
},
],
"placeholder": null,
"type": "text",
},
"type": "list",
"warning": null,
},
"servertimeout": {
"default": 30,
"description": "Number of seconds after which an uncompleted RPC call will time out.",
"disabled": false,
"immutable": false,
"integer": true,
"max": 300,
"min": 5,
"name": "Rpc Server Timeout",
"placeholder": null,
"required": true,
"step": null,
"type": "number",
"units": "seconds",
"warning": null,
},
"threads": {
"default": 16,
"description": "Set the number of threads for handling RPC calls. You may wish to increase this if you are making lots of calls via an integration.",
"disabled": false,
"immutable": false,
"integer": true,
"max": 64,
"min": 1,
"name": "Threads",
"placeholder": null,
"required": true,
"step": null,
"type": "number",
"units": null,
"warning": null,
},
"workqueue": {
"default": 128,
"description": "Set the depth of the work queue to service RPC calls. Determines how long the backlog of RPC requests can get before it just rejects new ones.",
"disabled": false,
"immutable": false,
"integer": true,
"max": 256,
"min": 8,
"name": "Work Queue",
"placeholder": null,
"required": true,
"step": null,
"type": "number",
"units": "requests",
"warning": null,
},
},
"type": "object",
"warning": null,
},
"enable": {
"default": true,
"description": "Allow remote RPC requests.",
"disabled": false,
"immutable": false,
"name": "Enable",
"type": "toggle",
"warning": null,
},
"password": {
"default": {
"charset": "a-z,2-7",
"len": 20,
},
"description": "The password for connecting to Bitcoin over RPC.",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": true,
"maxLength": null,
"minLength": null,
"name": "RPC Password",
"patterns": [
{
"description": "Must be alphanumeric (can contain underscore).",
"regex": "^[a-zA-Z0-9_]+$",
},
],
"placeholder": null,
"required": true,
"type": "text",
"warning": "You will need to restart all services that depend on Bitcoin.",
},
"username": {
"default": "bitcoin",
"description": "The username for connecting to Bitcoin over RPC.",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": true,
"maxLength": null,
"minLength": null,
"name": "Username",
"patterns": [
{
"description": "Must be alphanumeric (can contain underscore).",
"regex": "^[a-zA-Z0-9_]+$",
},
],
"placeholder": null,
"required": true,
"type": "text",
"warning": "You will need to restart all services that depend on Bitcoin.",
},
},
"type": "object",
"warning": null,
},
"txindex": {
"default": true,
"description": "By enabling Transaction Index (txindex) Bitcoin Core will build a complete transaction index. This allows Bitcoin Core to access any transaction with commands like \`gettransaction\`.",
"disabled": false,
"immutable": false,
"name": "Transaction Index",
"type": "toggle",
"warning": null,
},
"wallet": {
"description": "Wallet Settings",
"name": "Wallet",
"spec": {
"avoidpartialspends": {
"default": true,
"description": "Group outputs by address, selecting all or none, instead of selecting on a per-output basis. This improves privacy at the expense of higher transaction fees.",
"disabled": false,
"immutable": false,
"name": "Avoid Partial Spends",
"type": "toggle",
"warning": null,
},
"discardfee": {
"default": 0.0001,
"description": "The fee rate (in BTC/kB) that indicates your tolerance for discarding change by adding it to the fee.",
"disabled": false,
"immutable": false,
"integer": false,
"max": 0.01,
"min": null,
"name": "Discard Change Tolerance",
"placeholder": null,
"required": true,
"step": null,
"type": "number",
"units": "BTC/kB",
"warning": null,
},
"enable": {
"default": true,
"description": "Load the wallet and enable wallet RPC calls.",
"disabled": false,
"immutable": false,
"name": "Enable Wallet",
"type": "toggle",
"warning": null,
},
},
"type": "object",
"warning": null,
},
"zmq-enabled": {
"default": true,
"description": "The ZeroMQ interface is useful for some applications which might require data related to block and transaction events from Bitcoin Core. For example, LND requires ZeroMQ be enabled for LND to get the latest block data",
"disabled": false,
"immutable": false,
"name": "ZeroMQ Enabled",
"type": "toggle",
"warning": null,
},
}
`;
exports[`transformConfigSpec transformConfigSpec(embassyPages) 1`] = `
{
"homepage": {
"default": "welcome",
"description": null,
"disabled": false,
"immutable": false,
"name": "Type",
"required": true,
"type": "union",
"variants": {
"index": {
"name": "Table of Contents",
"spec": {},
},
"redirect": {
"name": "Redirect",
"spec": {
"target": {
"default": null,
"description": "The name of the subdomain to redirect users to. This must be a valid subdomain site within your Start9 Pages.",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": false,
"maxLength": null,
"minLength": null,
"name": "Target Subdomain",
"patterns": [
{
"description": "May contain only lowercase characters and hyphens.",
"regex": "^[a-z-]+$",
},
],
"placeholder": null,
"required": true,
"type": "text",
"warning": null,
},
},
},
"web-page": {
"name": "Web Page",
"spec": {
"folder": {
"default": null,
"description": "The path to the folder that contains the static files of your website. For example, a value of "projects/resume" would tell Start9 Pages to look for that folder path in the selected service.",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": false,
"maxLength": null,
"minLength": null,
"name": "Folder Path",
"patterns": [
{
"description": "Must be a valid relative file path",
"regex": "^(\\.|[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)(/[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|/([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)*/?$",
},
],
"placeholder": "e.g. websites/resume",
"required": true,
"type": "text",
"warning": null,
},
"source": {
"default": "nextcloud",
"description": "The service that contains your website files.",
"disabled": false,
"immutable": false,
"name": "Folder Location",
"required": false,
"type": "select",
"values": {
"filebrowser": "filebrowser",
"nextcloud": "nextcloud",
},
"warning": null,
},
},
},
"welcome": {
"name": "Welcome",
"spec": {},
},
},
"warning": null,
},
"subdomains": {
"default": [],
"description": "The websites you want to serve.",
"disabled": false,
"maxLength": null,
"minLength": null,
"name": "Subdomains",
"spec": {
"displayAs": "{{name}}",
"spec": {
"name": {
"default": null,
"description": "The subdomain of your Start9 Pages .onion address to host the website on. For example, a value of "me" would produce a website hosted at http://me.xxxxxx.onion.",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": false,
"maxLength": null,
"minLength": null,
"name": "Subdomain name",
"patterns": [
{
"description": "May contain only lowercase characters and hyphens",
"regex": "^[a-z-]+$",
},
],
"placeholder": null,
"required": true,
"type": "text",
"warning": null,
},
"settings": {
"default": "web-page",
"description": null,
"disabled": false,
"immutable": false,
"name": "Type",
"required": true,
"type": "union",
"variants": {
"redirect": {
"name": "Redirect",
"spec": {
"target": {
"default": null,
"description": "The subdomain of your Start9 Pages .onion address to redirect to. This should be the name of another subdomain on Start9 Pages. Leave empty to redirect to the homepage.",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": false,
"maxLength": null,
"minLength": null,
"name": "Target Subdomain",
"patterns": [
{
"description": "May contain only lowercase characters and hyphens.",
"regex": "^[a-z-]+$",
},
],
"placeholder": null,
"required": true,
"type": "text",
"warning": null,
},
},
},
"web-page": {
"name": "Web Page",
"spec": {
"folder": {
"default": null,
"description": "The path to the folder that contains the website files. For example, a value of "projects/resume" would tell Start9 Pages to look for that folder path in the selected service.",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": false,
"maxLength": null,
"minLength": null,
"name": "Folder Path",
"patterns": [
{
"description": "Must be a valid relative file path",
"regex": "^(\\.|[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)(/[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|/([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)*/?$",
},
],
"placeholder": "e.g. websites/resume",
"required": true,
"type": "text",
"warning": null,
},
"source": {
"default": "nextcloud",
"description": "The service that contains your website files.",
"disabled": false,
"immutable": false,
"name": "Folder Location",
"required": false,
"type": "select",
"values": {
"filebrowser": "filebrowser",
"nextcloud": "nextcloud",
},
"warning": null,
},
},
},
},
"warning": null,
},
},
"type": "object",
"uniqueBy": "name",
},
"type": "list",
"warning": null,
},
}
`;
exports[`transformConfigSpec transformConfigSpec(nostr) 1`] = `
{
"nostr-relay": {
"default": "wss://relay.getalby.com/v1",
"description": "The Nostr Relay to use for Nostr Wallet Connect connections",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": false,
"maxLength": null,
"minLength": null,
"name": "Nostr Relay",
"patterns": [],
"placeholder": null,
"required": true,
"type": "text",
"warning": null,
},
}
`;
exports[`transformConfigSpec transformConfigSpec(searNXG) 1`] = `
{
"enable-metrics": {
"default": true,
"description": "Your SearXNG instance will collect anonymous stats about its own usage and performance. You can view these metrics by appending \`/stats\` or \`/stats/errors\` to your SearXNG URL.",
"disabled": false,
"immutable": false,
"name": "Enable Stats",
"type": "toggle",
"warning": null,
},
"instance-name": {
"default": "My SearXNG Engine",
"description": "Enter a name for your SearXNG instance. This is the name that will be listed if you want to share your SearXNG engine publicly.",
"disabled": false,
"generate": null,
"immutable": false,
"inputmode": "text",
"masked": false,
"maxLength": null,
"minLength": null,
"name": "SearXNG Instance Name",
"patterns": [],
"placeholder": "Uncle Jim SearXNG Engine",
"required": true,
"type": "text",
"warning": null,
},
"tor-url": {
"default": false,
"description": "Activates the utilization of a .onion address as the primary URL, particularly beneficial for publicly hosted instances over the Tor network.",
"disabled": false,
"immutable": false,
"name": "Enable Tor address as the base URL",
"type": "toggle",
"warning": null,
},
}
`;

View File

@@ -57,6 +57,7 @@ export const matchManifest = object(
matchProcedure,
object({
name: string,
["success-message"]: string,
}),
),
]),
@@ -68,13 +69,25 @@ export const matchManifest = object(
volumes: dictionary([string, matchVolume]),
interfaces: dictionary([
string,
object({
name: string,
"tor-config": object({}),
"lan-config": object({}),
ui: boolean,
protocols: array(string),
}),
object(
{
name: string,
description: string,
"tor-config": object({
"port-mapping": dictionary([string, string]),
}),
"lan-config": dictionary([
string,
object({
ssl: boolean,
internal: number,
}),
]),
ui: boolean,
protocols: array(string),
},
["lan-config", "tor-config"],
),
]),
backup: object({
create: matchProcedure,

View File

@@ -99,17 +99,8 @@ export type Effects = {
/** Sandbox mode lets us read but not write */
is_sandboxed(): boolean
// Does a volume and path exist?
exists(input: { volumeId: string; path: string }): Promise<boolean>
bindLocal(options: {
internalPort: number
name: string
externalPort: number
}): Promise<string>
bindTor(options: {
internalPort: number
name: string
externalPort: number
}): Promise<string>
fetch(
url: string,
@@ -129,6 +120,10 @@ export type Effects = {
/// Returns the body as a json
json(): Promise<unknown>
}>
diskUsage(options?: {
volumeId: string
path: string
}): Promise<{ used: number; total: number }>
runRsync(options: {
srcVolume: string

View File

@@ -3,213 +3,434 @@ import * as oet from "./oldEmbassyTypes"
import { Volume } from "../../../Models/Volume"
import * as child_process from "child_process"
import { promisify } from "util"
import { startSdk } from "@start9labs/start-sdk"
import { HostSystemStartOs } from "../../HostSystemStartOs"
import { daemons, startSdk, T } from "@start9labs/start-sdk"
import "isomorphic-fetch"
import { Manifest } from "./matchManifest"
const execFile = promisify(child_process.execFile)
export class PolyfillEffects implements oet.Effects {
constructor(
readonly effects: HostSystemStartOs,
private manifest: Manifest,
) {}
async writeFile(input: {
path: string
volumeId: string
toWrite: string
}): Promise<void> {
await fs.writeFile(
new Volume(input.volumeId, input.path).path,
input.toWrite,
)
}
async readFile(input: { volumeId: string; path: string }): Promise<string> {
return (
await fs.readFile(new Volume(input.volumeId, input.path).path)
).toString()
}
async metadata(input: {
volumeId: string
path: string
}): Promise<oet.Metadata> {
const stats = await fs.stat(new Volume(input.volumeId, input.path).path)
return {
fileType: stats.isFile() ? "file" : "directory",
gid: stats.gid,
uid: stats.uid,
mode: stats.mode,
isDir: stats.isDirectory(),
isFile: stats.isFile(),
isSymlink: stats.isSymbolicLink(),
len: stats.size,
readonly: (stats.mode & 0o200) > 0,
}
}
async createDir(input: { volumeId: string; path: string }): Promise<string> {
const path = new Volume(input.volumeId, input.path).path
await fs.mkdir(path, { recursive: true })
return path
}
async readDir(input: { volumeId: string; path: string }): Promise<string[]> {
return fs.readdir(new Volume(input.volumeId, input.path).path)
}
async removeDir(input: { volumeId: string; path: string }): Promise<string> {
const path = new Volume(input.volumeId, input.path).path
await fs.rmdir(new Volume(input.volumeId, input.path).path, {
recursive: true,
})
return path
}
removeFile(input: { volumeId: string; path: string }): Promise<void> {
return fs.rm(new Volume(input.volumeId, input.path).path)
}
async writeJsonFile(input: {
volumeId: string
path: string
toWrite: Record<string, unknown>
}): Promise<void> {
await fs.writeFile(
new Volume(input.volumeId, input.path).path,
JSON.stringify(input.toWrite),
)
}
async readJsonFile(input: {
volumeId: string
path: string
}): Promise<Record<string, unknown>> {
return JSON.parse(
(
await fs.readFile(new Volume(input.volumeId, input.path).path)
).toString(),
)
}
runCommand({
command,
args,
timeoutMillis,
}: {
command: string
args?: string[] | undefined
timeoutMillis?: number | undefined
}): Promise<oet.ResultType<string>> {
return startSdk
.runCommand(
this.effects,
this.manifest.main.image,
[command, ...(args || [])],
{},
import { DockerProcedureContainer } from "./DockerProcedureContainer"
import * as cp from "child_process"
import { Effects } from "../../../Models/Effects"
export const execFile = promisify(cp.execFile)
export const polyfillEffects = (
effects: Effects,
manifest: Manifest,
): oet.Effects => {
const self = {
effects,
manifest,
async writeFile(input: {
path: string
volumeId: string
toWrite: string
}): Promise<void> {
await fs.writeFile(
new Volume(input.volumeId, input.path).path,
input.toWrite,
)
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x) => (!!x.stderr ? { error: x.stderr } : { result: x.stdout }))
}
runDaemon(input: { command: string; args?: string[] | undefined }): {
wait(): Promise<oet.ResultType<string>>
term(): Promise<void>
} {
throw new Error("Method not implemented.")
}
chown(input: { volumeId: string; path: string; uid: string }): Promise<null> {
throw new Error("Method not implemented.")
}
chmod(input: {
volumeId: string
path: string
mode: string
}): Promise<null> {
throw new Error("Method not implemented.")
}
sleep(timeMs: number): Promise<null> {
return new Promise((resolve) => setTimeout(resolve, timeMs))
}
trace(whatToPrint: string): void {
console.trace(whatToPrint)
}
warn(whatToPrint: string): void {
console.warn(whatToPrint)
}
error(whatToPrint: string): void {
console.error(whatToPrint)
}
debug(whatToPrint: string): void {
console.debug(whatToPrint)
}
info(whatToPrint: string): void {
console.log(false)
}
is_sandboxed(): boolean {
return false
}
exists(input: { volumeId: string; path: string }): Promise<boolean> {
return this.metadata(input)
.then(() => true)
.catch(() => false)
}
bindLocal(options: {
internalPort: number
name: string
externalPort: number
}): Promise<string> {
throw new Error("Method not implemented.")
}
bindTor(options: {
internalPort: number
name: string
externalPort: number
}): Promise<string> {
throw new Error("Method not implemented.")
}
async fetch(
url: string,
options?:
| {
method?:
| "GET"
| "POST"
| "PUT"
| "DELETE"
| "HEAD"
| "PATCH"
| undefined
headers?: Record<string, string> | undefined
body?: string | undefined
},
async readFile(input: { volumeId: string; path: string }): Promise<string> {
return (
await fs.readFile(new Volume(input.volumeId, input.path).path)
).toString()
},
async metadata(input: {
volumeId: string
path: string
}): Promise<oet.Metadata> {
const stats = await fs.stat(new Volume(input.volumeId, input.path).path)
return {
fileType: stats.isFile() ? "file" : "directory",
gid: stats.gid,
uid: stats.uid,
mode: stats.mode,
isDir: stats.isDirectory(),
isFile: stats.isFile(),
isSymlink: stats.isSymbolicLink(),
len: stats.size,
readonly: (stats.mode & 0o200) > 0,
}
},
async createDir(input: {
volumeId: string
path: string
}): Promise<string> {
const path = new Volume(input.volumeId, input.path).path
await fs.mkdir(path, { recursive: true })
return path
},
async readDir(input: {
volumeId: string
path: string
}): Promise<string[]> {
return fs.readdir(new Volume(input.volumeId, input.path).path)
},
async removeDir(input: {
volumeId: string
path: string
}): Promise<string> {
const path = new Volume(input.volumeId, input.path).path
await fs.rmdir(new Volume(input.volumeId, input.path).path, {
recursive: true,
})
return path
},
removeFile(input: { volumeId: string; path: string }): Promise<void> {
return fs.rm(new Volume(input.volumeId, input.path).path)
},
async writeJsonFile(input: {
volumeId: string
path: string
toWrite: Record<string, unknown>
}): Promise<void> {
await fs.writeFile(
new Volume(input.volumeId, input.path).path,
JSON.stringify(input.toWrite),
)
},
async readJsonFile(input: {
volumeId: string
path: string
}): Promise<Record<string, unknown>> {
return JSON.parse(
(
await fs.readFile(new Volume(input.volumeId, input.path).path)
).toString(),
)
},
runCommand({
command,
args,
timeoutMillis,
}: {
command: string
args?: string[] | undefined
timeoutMillis?: number | undefined
}): Promise<oet.ResultType<string>> {
return startSdk
.runCommand(
effects,
{ id: manifest.main.image },
[command, ...(args || [])],
{},
)
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) =>
!!x.stderr ? { error: x.stderr } : { result: x.stdout },
)
},
runDaemon(input: { command: string; args?: string[] | undefined }): {
wait(): Promise<oet.ResultType<string>>
term(): Promise<void>
} {
const dockerProcedureContainer = DockerProcedureContainer.of(
effects,
manifest.id,
manifest.main,
manifest.volumes,
)
const daemon = dockerProcedureContainer.then((dockerProcedureContainer) =>
daemons.runCommand()(
effects,
{ id: manifest.main.image },
[input.command, ...(input.args || [])],
{
overlay: dockerProcedureContainer.overlay,
},
),
)
return {
wait: () =>
daemon.then((daemon) =>
daemon.wait().then(() => {
return { result: "" }
}),
),
term: () => daemon.then((daemon) => daemon.term()),
}
},
async chown(input: {
volumeId: string
path: string
uid: string
}): Promise<null> {
await startSdk
.runCommand(
effects,
{ id: manifest.main.image },
["chown", "--recursive", input.uid, `/drive/${input.path}`],
{
mounts: [
{
path: "/drive",
options: {
type: "volume",
id: input.volumeId,
subpath: null,
readonly: false,
},
},
],
},
)
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) => {
if (!!x.stderr) {
throw new Error(x.stderr)
}
})
return null
},
async chmod(input: {
volumeId: string
path: string
mode: string
}): Promise<null> {
await startSdk
.runCommand(
effects,
{ id: manifest.main.image },
["chmod", "--recursive", input.mode, `/drive/${input.path}`],
{
mounts: [
{
path: "/drive",
options: {
type: "volume",
id: input.volumeId,
subpath: null,
readonly: false,
},
},
],
},
)
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) => {
if (!!x.stderr) {
throw new Error(x.stderr)
}
})
return null
},
sleep(timeMs: number): Promise<null> {
return new Promise((resolve) => setTimeout(resolve, timeMs))
},
trace(whatToPrint: string): void {
console.trace(whatToPrint)
},
warn(whatToPrint: string): void {
console.warn(whatToPrint)
},
error(whatToPrint: string): void {
console.error(whatToPrint)
},
debug(whatToPrint: string): void {
console.debug(whatToPrint)
},
info(whatToPrint: string): void {
console.log(false)
},
is_sandboxed(): boolean {
return false
},
exists(input: { volumeId: string; path: string }): Promise<boolean> {
return self
.metadata(input)
.then(() => true)
.catch(() => false)
},
async fetch(
url: string,
options?:
| {
method?:
| "GET"
| "POST"
| "PUT"
| "DELETE"
| "HEAD"
| "PATCH"
| undefined
headers?: Record<string, string> | undefined
body?: string | undefined
}
| undefined,
): Promise<{
method: string
ok: boolean
status: number
headers: Record<string, string>
body?: string | null | undefined
text(): Promise<string>
json(): Promise<unknown>
}> {
const fetched = await fetch(url, options)
return {
method: fetched.type,
ok: fetched.ok,
status: fetched.status,
headers: Object.fromEntries(fetched.headers.entries()),
body: await fetched.text(),
text: () => fetched.text(),
json: () => fetched.json(),
}
},
runRsync(rsyncOptions: {
srcVolume: string
dstVolume: string
srcPath: string
dstPath: string
options: oet.BackupOptions
}): {
id: () => Promise<string>
wait: () => Promise<null>
progress: () => Promise<number>
} {
let secondRun: ReturnType<typeof self._runRsync> | undefined
let firstRun = self._runRsync(rsyncOptions)
let waitValue = firstRun.wait().then((x) => {
secondRun = self._runRsync(rsyncOptions)
return secondRun.wait()
})
const id = async () => {
return secondRun?.id?.() ?? firstRun.id()
}
const wait = () => waitValue
const progress = async () => {
const secondProgress = secondRun?.progress?.()
if (secondProgress) {
return (await secondProgress) / 2.0 + 0.5
}
| undefined,
): Promise<{
method: string
ok: boolean
status: number
headers: Record<string, string>
body?: string | null | undefined
text(): Promise<string>
json(): Promise<unknown>
}> {
const fetched = await fetch(url, options)
return {
method: fetched.type,
ok: fetched.ok,
status: fetched.status,
headers: Object.fromEntries(fetched.headers.entries()),
body: await fetched.text(),
text: () => fetched.text(),
json: () => fetched.json(),
}
}
runRsync(options: {
srcVolume: string
dstVolume: string
srcPath: string
dstPath: string
options: oet.BackupOptions
}): {
id: () => Promise<string>
wait: () => Promise<null>
progress: () => Promise<number>
} {
throw new Error("Method not implemented.")
return (await firstRun.progress()) / 2.0
}
return { id, wait, progress }
},
_runRsync(rsyncOptions: {
srcVolume: string
dstVolume: string
srcPath: string
dstPath: string
options: oet.BackupOptions
}): {
id: () => Promise<string>
wait: () => Promise<null>
progress: () => Promise<number>
} {
const { srcVolume, dstVolume, srcPath, dstPath, options } = rsyncOptions
const command = "rsync"
const args: string[] = []
if (options.delete) {
args.push("--delete")
}
if (options.force) {
args.push("--force")
}
if (options.ignoreExisting) {
args.push("--ignore-existing")
}
for (const exclude of options.exclude) {
args.push(`--exclude=${exclude}`)
}
args.push("-actAXH")
args.push("--info=progress2")
args.push("--no-inc-recursive")
args.push(new Volume(srcVolume, srcPath).path)
args.push(new Volume(dstVolume, dstPath).path)
const spawned = child_process.spawn(command, args, { detached: true })
let percentage = 0.0
spawned.stdout.on("data", (data: unknown) => {
const lines = String(data).replace("\r", "\n").split("\n")
for (const line of lines) {
const parsed = /$([0-9.]+)%/.exec(line)?.[1]
if (!parsed) continue
percentage = Number.parseFloat(parsed)
}
})
spawned.stderr.on("data", (data: unknown) => {
console.error(String(data))
})
const id = async () => {
const pid = spawned.pid
if (pid === undefined) {
throw new Error("rsync process has no pid")
}
return String(pid)
}
const waitPromise = new Promise<null>((resolve, reject) => {
spawned.on("exit", (code: any) => {
if (code === 0) {
resolve(null)
} else {
reject(new Error(`rsync exited with code ${code}`))
}
})
})
const wait = () => waitPromise
const progress = () => Promise.resolve(percentage)
return { id, wait, progress }
},
async diskUsage(
options?: { volumeId: string; path: string } | undefined,
): Promise<{ used: number; total: number }> {
const output = await execFile("df", ["--block-size=1", "-P", "/"])
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) => {
if (!!x.stderr) {
throw new Error(x.stderr)
}
return parseDfOutput(x.stdout)
})
if (!!options) {
const used = await execFile("du", [
"-s",
"--block-size=1",
"-P",
new Volume(options.volumeId, options.path).path,
])
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) => {
if (!!x.stderr) {
throw new Error(x.stderr)
}
return Number.parseInt(x.stdout.split(/\s+/)[0])
})
return {
...output,
used,
}
}
return output
},
}
return self
}
function parseDfOutput(output: string): { used: number; total: number } {
const lines = output
.split("\n")
.filter((x) => x.length)
.map((x) => x.split(/\s+/))
const index = lines.splice(0, 1)[0].map((x) => x.toLowerCase())
const usedIndex = index.indexOf("used")
const availableIndex = index.indexOf("available")
const used = lines.map((x) => Number.parseInt(x[usedIndex]))[0] || 0
const total = lines.map((x) => Number.parseInt(x[availableIndex]))[0] || 0
return { used, total }
}

View File

@@ -0,0 +1,33 @@
import { matchOldConfigSpec, transformConfigSpec } from "./transformConfigSpec"
import fixtureEmbasyPagesConfig from "./__fixtures__/embasyPagesConfig"
import searNXG from "./__fixtures__/searNXG"
import bitcoind from "./__fixtures__/bitcoind"
import nostr from "./__fixtures__/nostr"
describe("transformConfigSpec", () => {
test("matchOldConfigSpec(embassyPages.homepage.variants[web-page])", () => {
matchOldConfigSpec.unsafeCast(
fixtureEmbasyPagesConfig.homepage.variants["web-page"],
)
})
test("matchOldConfigSpec(embassyPages)", () => {
matchOldConfigSpec.unsafeCast(fixtureEmbasyPagesConfig)
})
test("transformConfigSpec(embassyPages)", () => {
const spec = matchOldConfigSpec.unsafeCast(fixtureEmbasyPagesConfig)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(searNXG)", () => {
const spec = matchOldConfigSpec.unsafeCast(searNXG)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(bitcoind)", () => {
const spec = matchOldConfigSpec.unsafeCast(bitcoind)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(nostr)", () => {
const spec = matchOldConfigSpec.unsafeCast(nostr)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
})

View File

@@ -0,0 +1,589 @@
import { CT } from "@start9labs/start-sdk"
import {
dictionary,
object,
anyOf,
string,
literals,
array,
number,
boolean,
Parser,
deferred,
every,
nill,
literal,
} from "ts-matches"
export function transformConfigSpec(oldSpec: OldConfigSpec): CT.InputSpec {
return Object.entries(oldSpec).reduce((inputSpec, [key, oldVal]) => {
let newVal: CT.ValueSpec
if (oldVal.type === "boolean") {
newVal = {
type: "toggle",
name: oldVal.name,
default: oldVal.default,
description: oldVal.description || null,
warning: oldVal.warning || null,
disabled: false,
immutable: false,
}
} else if (oldVal.type === "enum") {
newVal = {
type: "select",
name: oldVal.name,
description: oldVal.description || null,
warning: oldVal.warning || null,
default: oldVal.default,
values: oldVal.values.reduce(
(obj, curr) => ({
...obj,
[curr]: oldVal["value-names"][curr] || curr,
}),
{},
),
required: false,
disabled: false,
immutable: false,
}
} else if (oldVal.type === "list") {
newVal = getListSpec(oldVal)
} else if (oldVal.type === "number") {
const range = Range.from(oldVal.range)
newVal = {
type: "number",
name: oldVal.name,
default: oldVal.default || null,
description: oldVal.description || null,
warning: oldVal.warning || null,
disabled: false,
immutable: false,
required: !oldVal.nullable,
min: range.min
? range.minInclusive
? range.min
: range.min + 1
: null,
max: range.max
? range.maxInclusive
? range.max
: range.max - 1
: null,
integer: oldVal.integral,
step: null,
units: oldVal.units || null,
placeholder: oldVal.placeholder || null,
}
} else if (oldVal.type === "object") {
newVal = {
type: "object",
name: oldVal.name,
description: oldVal.description || null,
warning: oldVal.warning || null,
spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(oldVal.spec)),
}
} else if (oldVal.type === "string") {
newVal = {
type: "text",
name: oldVal.name,
default: oldVal.default || null,
description: oldVal.description || null,
warning: oldVal.warning || null,
disabled: false,
immutable: false,
required: !oldVal.nullable,
patterns:
oldVal.pattern && oldVal["pattern-description"]
? [
{
regex: oldVal.pattern,
description: oldVal["pattern-description"],
},
]
: [],
minLength: null,
maxLength: null,
masked: oldVal.masked || false,
generate: null,
inputmode: "text",
placeholder: oldVal.placeholder || null,
}
} else if (oldVal.type === "union") {
newVal = {
type: "union",
name: oldVal.tag.name,
description: oldVal.tag.description || null,
warning: oldVal.tag.warning || null,
variants: Object.entries(oldVal.variants).reduce(
(obj, [id, spec]) => ({
...obj,
[id]: {
name: oldVal.tag["variant-names"][id] || id,
spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(spec)),
},
}),
{} as Record<string, { name: string; spec: CT.InputSpec }>,
),
disabled: false,
required: true,
default: oldVal.default,
immutable: false,
}
} else if (oldVal.type === "pointer") {
return inputSpec
} else {
throw new Error(`unknown spec ${JSON.stringify(oldVal)}`)
}
return {
...inputSpec,
[key]: newVal,
}
}, {} as CT.InputSpec)
}
export function transformOldConfigToNew(
spec: OldConfigSpec,
config: Record<string, any>,
): Record<string, any> {
return Object.entries(spec).reduce((obj, [key, val]) => {
let newVal = config[key]
if (isObject(val)) {
newVal = transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.spec),
config[key],
)
}
if (isUnion(val)) {
const selection = config[key][val.tag.id]
delete config[key][val.tag.id]
newVal = {
selection,
value: transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.variants[selection]),
config[key],
),
}
}
if (isList(val) && isObjectList(val)) {
newVal = (config[key] as object[]).map((obj) =>
transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.spec.spec),
obj,
),
)
}
if (isPointer(val)) {
return obj
}
return {
...obj,
[key]: newVal,
}
}, {})
}
export function transformNewConfigToOld(
spec: OldConfigSpec,
config: Record<string, any>,
): Record<string, any> {
return Object.entries(spec).reduce((obj, [key, val]) => {
let newVal = config[key]
if (isObject(val)) {
newVal = transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.spec),
config[key],
)
}
if (isUnion(val)) {
newVal = {
[val.tag.id]: config[key].selection,
...transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.variants[config[key].selection]),
config[key].value,
),
}
}
if (isList(val) && isObjectList(val)) {
newVal = (config[key] as object[]).map((obj) =>
transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.spec.spec),
obj,
),
)
}
return {
...obj,
[key]: newVal,
}
}, {})
}
function getListSpec(
oldVal: OldValueSpecList,
): CT.ValueSpecMultiselect | CT.ValueSpecList {
const range = Range.from(oldVal.range)
let partial: Omit<CT.ValueSpecList, "type" | "spec" | "default"> = {
name: oldVal.name,
description: oldVal.description || null,
warning: oldVal.warning || null,
minLength: range.min
? range.minInclusive
? range.min
: range.min + 1
: null,
maxLength: range.max
? range.maxInclusive
? range.max
: range.max - 1
: null,
disabled: false,
}
if (isEnumList(oldVal)) {
return {
...partial,
type: "multiselect",
default: oldVal.default as string[],
immutable: false,
values: oldVal.spec.values.reduce(
(obj, curr) => ({
...obj,
[curr]: oldVal.spec["value-names"][curr],
}),
{},
),
}
} else if (isStringList(oldVal)) {
return {
...partial,
type: "list",
default: oldVal.default as string[],
spec: {
type: "text",
patterns:
oldVal.spec.pattern && oldVal.spec["pattern-description"]
? [
{
regex: oldVal.spec.pattern,
description: oldVal.spec["pattern-description"],
},
]
: [],
minLength: null,
maxLength: null,
masked: oldVal.spec.masked || false,
generate: null,
inputmode: "text",
placeholder: oldVal.spec.placeholder || null,
},
}
} else if (isObjectList(oldVal)) {
return {
...partial,
type: "list",
default: oldVal.default as Record<string, unknown>[],
spec: {
type: "object",
spec: transformConfigSpec(
matchOldConfigSpec.unsafeCast(oldVal.spec.spec),
),
uniqueBy: oldVal.spec["unique-by"] || null,
displayAs: oldVal.spec["display-as"] || null,
},
}
} else {
throw new Error("Invalid list subtype. enum, string, and object permitted.")
}
}
function isObject(val: OldValueSpec): val is OldValueSpecObject {
return val.type === "object"
}
function isUnion(val: OldValueSpec): val is OldValueSpecUnion {
return val.type === "union"
}
function isList(val: OldValueSpec): val is OldValueSpecList {
return val.type === "list"
}
function isPointer(val: OldValueSpec): val is OldValueSpecPointer {
return val.type === "pointer"
}
function isEnumList(
val: OldValueSpecList,
): val is OldValueSpecList & { subtype: "enum" } {
return val.subtype === "enum"
}
function isStringList(
val: OldValueSpecList,
): val is OldValueSpecList & { subtype: "string" } {
return val.subtype === "string"
}
function isObjectList(
val: OldValueSpecList,
): val is OldValueSpecList & { subtype: "object" } {
if (["number", "union"].includes(val.subtype)) {
throw new Error("Invalid list subtype. enum, string, and object permitted.")
}
return val.subtype === "object"
}
export type OldConfigSpec = Record<string, OldValueSpec>
const [_matchOldConfigSpec, setMatchOldConfigSpec] = deferred<unknown>()
export const matchOldConfigSpec = _matchOldConfigSpec as Parser<
unknown,
OldConfigSpec
>
export const matchOldDefaultString = anyOf(
string,
object({ charset: string, len: number }),
)
type OldDefaultString = typeof matchOldDefaultString._TYPE
export const matchOldValueSpecString = object(
{
type: literals("string"),
name: string,
masked: boolean,
copyable: boolean,
nullable: boolean,
placeholder: string,
pattern: string,
"pattern-description": string,
default: matchOldDefaultString,
textarea: boolean,
description: string,
warning: string,
},
[
"masked",
"copyable",
"nullable",
"placeholder",
"pattern",
"pattern-description",
"default",
"textarea",
"description",
"warning",
],
)
export const matchOldValueSpecNumber = object(
{
type: literals("number"),
nullable: boolean,
name: string,
range: string,
integral: boolean,
default: number,
description: string,
warning: string,
units: string,
placeholder: string,
},
["default", "description", "warning", "units", "placeholder"],
)
type OldValueSpecNumber = typeof matchOldValueSpecNumber._TYPE
export const matchOldValueSpecBoolean = object(
{
type: literals("boolean"),
default: boolean,
name: string,
description: string,
warning: string,
},
["description", "warning"],
)
type OldValueSpecBoolean = typeof matchOldValueSpecBoolean._TYPE
const matchOldValueSpecObject = object(
{
type: literals("object"),
spec: _matchOldConfigSpec,
name: string,
description: string,
warning: string,
},
["description", "warning"],
)
type OldValueSpecObject = typeof matchOldValueSpecObject._TYPE
const matchOldValueSpecEnum = object(
{
values: array(string),
"value-names": dictionary([string, string]),
type: literals("enum"),
default: string,
name: string,
description: string,
warning: string,
},
["description", "warning"],
)
type OldValueSpecEnum = typeof matchOldValueSpecEnum._TYPE
const matchOldUnionTagSpec = object(
{
id: string, // The name of the field containing one of the union variants
"variant-names": dictionary([string, string]), // The name of each variant
name: string,
description: string,
warning: string,
},
["description", "warning"],
)
const matchOldValueSpecUnion = object({
type: literals("union"),
tag: matchOldUnionTagSpec,
variants: dictionary([string, _matchOldConfigSpec]),
default: string,
})
type OldValueSpecUnion = typeof matchOldValueSpecUnion._TYPE
const [matchOldUniqueBy, setOldUniqueBy] = deferred<OldUniqueBy>()
type OldUniqueBy =
| null
| string
| { any: OldUniqueBy[] }
| { all: OldUniqueBy[] }
setOldUniqueBy(
anyOf(
nill,
string,
object({ any: array(matchOldUniqueBy) }),
object({ all: array(matchOldUniqueBy) }),
),
)
const matchOldListValueSpecObject = object(
{
spec: _matchOldConfigSpec, // this is a mapped type of the config object at this level, replacing the object's values with specs on those values
"unique-by": matchOldUniqueBy, // indicates whether duplicates can be permitted in the list
"display-as": string, // this should be a handlebars template which can make use of the entire config which corresponds to 'spec'
},
["display-as", "unique-by"],
)
const matchOldListValueSpecString = object(
{
masked: boolean,
copyable: boolean,
pattern: string,
"pattern-description": string,
placeholder: string,
},
["pattern", "pattern-description", "placeholder", "copyable", "masked"],
)
const matchOldListValueSpecEnum = object({
values: array(string),
"value-names": dictionary([string, string]),
})
// represents a spec for a list
const matchOldValueSpecList = every(
object(
{
type: literals("list"),
range: string, // '[0,1]' (inclusive) OR '[0,*)' (right unbounded), normal math rules
default: anyOf(
array(string),
array(number),
array(matchOldDefaultString),
array(object),
),
name: string,
description: string,
warning: string,
},
["description", "warning"],
),
anyOf(
object({
subtype: literals("string"),
spec: matchOldListValueSpecString,
}),
object({
subtype: literals("enum"),
spec: matchOldListValueSpecEnum,
}),
object({
subtype: literals("object"),
spec: matchOldListValueSpecObject,
}),
),
)
type OldValueSpecList = typeof matchOldValueSpecList._TYPE
const matchOldValueSpecPointer = every(
object({
type: literal("pointer"),
}),
anyOf(
object({
subtype: literal("package"),
target: literals("tor-key", "tor-address", "lan-address"),
"package-id": string,
interface: string,
}),
object({
subtype: literal("package"),
target: literals("config"),
"package-id": string,
selector: string,
multi: boolean,
}),
),
)
type OldValueSpecPointer = typeof matchOldValueSpecPointer._TYPE
export const matchOldValueSpec = anyOf(
matchOldValueSpecString,
matchOldValueSpecNumber,
matchOldValueSpecBoolean,
matchOldValueSpecObject,
matchOldValueSpecEnum,
matchOldValueSpecList,
matchOldValueSpecUnion,
matchOldValueSpecPointer,
)
type OldValueSpec = typeof matchOldValueSpec._TYPE
setMatchOldConfigSpec(dictionary([string, matchOldValueSpec]))
export class Range {
min?: number
max?: number
minInclusive!: boolean
maxInclusive!: boolean
static from(s: string = "(*,*)"): Range {
const r = new Range()
r.minInclusive = s.startsWith("[")
r.maxInclusive = s.endsWith("]")
const [minStr, maxStr] = s.split(",").map((a) => a.trim())
r.min = minStr === "(*" ? undefined : Number(minStr.slice(1))
r.max = maxStr === "*)" ? undefined : Number(maxStr.slice(0, -1))
return r
}
}

View File

@@ -1,152 +1,226 @@
import { ExecuteResult, System } from "../../Interfaces/System"
import { ExecuteResult, Procedure, System } from "../../Interfaces/System"
import { unNestPath } from "../../Models/JsonPath"
import { string } from "ts-matches"
import { HostSystemStartOs } from "../HostSystemStartOs"
import matches, { any, number, object, string, tuple } from "ts-matches"
import { Effects } from "../../Models/Effects"
import { RpcResult } from "../RpcListener"
import { RpcResult, matchRpcResult } from "../RpcListener"
import { duration } from "../../Models/Duration"
const LOCATION = "/usr/lib/startos/package/startos"
import { T } from "@start9labs/start-sdk"
import { Volume } from "../../Models/Volume"
import { MainEffects } from "@start9labs/start-sdk/cjs/lib/StartSdk"
import { CallbackHolder } from "../../Models/CallbackHolder"
export const STARTOS_JS_LOCATION = "/usr/lib/startos/package/index.js"
type RunningMain = {
effects: MainEffects
stop: () => Promise<void>
callbacks: CallbackHolder
}
export class SystemForStartOs implements System {
private onTerm: (() => Promise<void>) | undefined
private runningMain: RunningMain | undefined
static of() {
return new SystemForStartOs()
return new SystemForStartOs(require(STARTOS_JS_LOCATION))
}
constructor() {}
constructor(readonly abi: T.ABI) {}
async init(): Promise<void> {}
async exit(): Promise<void> {}
async start(effects: MainEffects): Promise<void> {
if (this.runningMain) await this.stop()
let mainOnTerm: () => Promise<void> | undefined
const started = async (onTerm: () => Promise<void>) => {
await effects.setMainStatus({ status: "running" })
mainOnTerm = onTerm
}
const daemons = await (
await this.abi.main({
effects: effects as MainEffects,
started,
})
).build()
this.runningMain = {
effects,
stop: async () => {
if (mainOnTerm) await mainOnTerm()
await daemons.term()
},
callbacks: new CallbackHolder(),
}
}
callCallback(callback: number, args: any[]): void {
if (this.runningMain) {
this.runningMain.callbacks
.callCallback(callback, args)
.catch((error) => console.error(`callback ${callback} failed`, error))
} else {
console.warn(`callback ${callback} ignored because system is not running`)
}
}
async stop(): Promise<void> {
if (this.runningMain) {
await this.runningMain.stop()
await this.runningMain.effects.clearCallbacks()
this.runningMain = undefined
}
}
async execute(
effects: HostSystemStartOs,
effects: Effects,
options: {
procedure:
| "/init"
| "/uninit"
| "/main/start"
| "/main/stop"
| "/config/set"
| "/config/get"
| "/backup/create"
| "/backup/restore"
| "/actions/metadata"
| `/actions/${string}/get`
| `/actions/${string}/run`
| `/dependencies/${string}/query`
| `/dependencies/${string}/update`
input: unknown
procedure: Procedure
input?: unknown
timeout?: number | undefined
},
): Promise<RpcResult> {
return { result: await this._execute(effects, options) }
return this._execute(effects, options)
.then((x) =>
matches(x)
.when(
object({
result: any,
}),
(x) => x,
)
.when(
object({
error: string,
}),
(x) => ({
error: {
code: 0,
message: x.error,
},
}),
)
.when(
object({
"error-code": tuple(number, string),
}),
({ "error-code": [code, message] }) => ({
error: {
code,
message,
},
}),
)
.defaultTo({ result: x }),
)
.catch((error: unknown) => {
if (error instanceof Error)
return {
error: {
code: 0,
message: error.name,
data: {
details: error.message,
debug: `${error?.cause ?? "[noCause]"}:${error?.stack ?? "[noStack]"}`,
},
},
}
if (matchRpcResult.test(error)) return error
return {
error: {
code: 0,
message: String(error),
},
}
})
}
async _execute(
effects: Effects,
effects: Effects | MainEffects,
options: {
procedure:
| "/init"
| "/uninit"
| "/main/start"
| "/main/stop"
| "/config/set"
| "/config/get"
| "/backup/create"
| "/backup/restore"
| "/actions/metadata"
| `/actions/${string}/get`
| `/actions/${string}/run`
| `/dependencies/${string}/query`
| `/dependencies/${string}/update`
input: unknown
procedure: Procedure
input?: unknown
timeout?: number | undefined
},
): Promise<unknown> {
switch (options.procedure) {
case "/init": {
const path = `${LOCATION}/procedures/init`
const procedure: any = await import(path).catch(() => require(path))
const previousVersion = string.optional().unsafeCast(options)
return procedure.init({ effects, previousVersion })
const previousVersion =
string.optional().unsafeCast(options.input) || null
return this.abi.init({ effects, previousVersion })
}
case "/uninit": {
const path = `${LOCATION}/procedures/init`
const procedure: any = await import(path).catch(() => require(path))
const nextVersion = string.optional().unsafeCast(options)
return procedure.uninit({ effects, nextVersion })
}
case "/main/start": {
const path = `${LOCATION}/procedures/main`
const procedure: any = await import(path).catch(() => require(path))
const started = async (onTerm: () => Promise<void>) => {
await effects.setMainStatus({ status: "running" })
if (this.onTerm) await this.onTerm()
this.onTerm = onTerm
}
return procedure.main({ effects, started })
}
case "/main/stop": {
await effects.setMainStatus({ status: "stopped" })
if (this.onTerm) await this.onTerm()
delete this.onTerm
return duration(30, "s")
const nextVersion = string.optional().unsafeCast(options.input) || null
return this.abi.uninit({ effects, nextVersion })
}
// case "/main/start": {
//
// }
// case "/main/stop": {
// if (this.onTerm) await this.onTerm()
// await effects.setMainStatus({ status: "stopped" })
// delete this.onTerm
// return duration(30, "s")
// }
case "/config/set": {
const path = `${LOCATION}/procedures/config`
const procedure: any = await import(path).catch(() => require(path))
const input = options.input
return procedure.setConfig({ effects, input })
const input = options.input as any // TODO
return this.abi.setConfig({ effects, input })
}
case "/config/get": {
const path = `${LOCATION}/procedures/config`
const procedure: any = await import(path).catch(() => require(path))
return procedure.getConfig({ effects })
return this.abi.getConfig({ effects })
}
case "/backup/create":
return this.abi.createBackup({
effects,
pathMaker: ((options) =>
new Volume(options.volume, options.path).path) as T.PathMaker,
})
case "/backup/restore":
throw new Error("this should be called with the init/unit")
return this.abi.restoreBackup({
effects,
pathMaker: ((options) =>
new Volume(options.volume, options.path).path) as T.PathMaker,
})
case "/actions/metadata": {
const path = `${LOCATION}/procedures/actions`
const procedure: any = await import(path).catch(() => require(path))
return procedure.actionsMetadata({ effects })
return this.abi.actionsMetadata({ effects })
}
case "/properties": {
throw new Error("TODO")
}
default:
const procedures = unNestPath(options.procedure)
const id = procedures[2]
switch (true) {
case procedures[1] === "actions" && procedures[3] === "get": {
const path = `${LOCATION}/procedures/actions`
const action: any = (await import(path).catch(() => require(path)))
.actions[id]
const action = (await this.abi.actions({ effects }))[id]
if (!action) throw new Error(`Action ${id} not found`)
return action.get({ effects })
return action.getConfig({ effects })
}
case procedures[1] === "actions" && procedures[3] === "run": {
const path = `${LOCATION}/procedures/actions`
const action: any = (await import(path).catch(() => require(path)))
.actions[id]
const action = (await this.abi.actions({ effects }))[id]
if (!action) throw new Error(`Action ${id} not found`)
const input = options.input
return action.run({ effects, input })
return action.run({ effects, input: options.input as any }) // TODO
}
case procedures[1] === "dependencies" && procedures[3] === "query": {
const path = `${LOCATION}/procedures/dependencies`
const dependencyConfig: any = (
await import(path).catch(() => require(path))
).dependencyConfig[id]
const dependencyConfig = this.abi.dependencyConfig[id]
if (!dependencyConfig)
throw new Error(`dependencyConfig ${id} not found`)
const localConfig = options.input
return dependencyConfig.query({ effects, localConfig })
return dependencyConfig.query({ effects })
}
case procedures[1] === "dependencies" && procedures[3] === "update": {
const path = `${LOCATION}/procedures/dependencies`
const dependencyConfig: any = (
await import(path).catch(() => require(path))
).dependencyConfig[id]
const dependencyConfig = this.abi.dependencyConfig[id]
if (!dependencyConfig)
throw new Error(`dependencyConfig ${id} not found`)
return dependencyConfig.update(options.input)
return dependencyConfig.update(options.input as any) // TODO
}
}
return
}
throw new Error("Method not implemented.")
}
exit(effects: Effects): Promise<void> {
throw new Error("Method not implemented.")
async sandbox(
effects: Effects,
options: { procedure: Procedure; input?: unknown; timeout?: number },
): Promise<RpcResult> {
return this.execute(effects, options)
}
}

View File

@@ -1,6 +1,22 @@
import * as fs from "node:fs/promises"
import { System } from "../../Interfaces/System"
import { SystemForEmbassy } from "./SystemForEmbassy"
import { SystemForStartOs } from "./SystemForStartOs"
import { EMBASSY_JS_LOCATION, SystemForEmbassy } from "./SystemForEmbassy"
import { STARTOS_JS_LOCATION, SystemForStartOs } from "./SystemForStartOs"
export async function getSystem(): Promise<System> {
return SystemForEmbassy.of()
if (
await fs.access(STARTOS_JS_LOCATION).then(
() => true,
() => false,
)
) {
return SystemForStartOs.of()
} else if (
await fs.access(EMBASSY_JS_LOCATION).then(
() => true,
() => false,
)
) {
return SystemForEmbassy.of()
}
throw new Error(`${STARTOS_JS_LOCATION} not found`)
}

View File

@@ -1,6 +1,7 @@
import { GetDependency } from "./GetDependency"
import { System } from "./System"
import { GetHostSystem, HostSystem } from "./HostSystem"
import { MakeMainEffects, MakeProcedureEffects } from "./MakeEffects"
export type AllGetDependencies = GetDependency<"system", Promise<System>> &
GetDependency<"hostSystem", GetHostSystem>
GetDependency<"makeProcedureEffects", MakeProcedureEffects> &
GetDependency<"makeMainEffects", MakeMainEffects>

View File

@@ -1,7 +0,0 @@
import { types as T } from "@start9labs/start-sdk"
import { CallbackHolder } from "../Models/CallbackHolder"
import { Effects } from "../Models/Effects"
export type HostSystem = Effects
export type GetHostSystem = (callbackHolder: CallbackHolder) => HostSystem

View File

@@ -0,0 +1,4 @@
import { Effects } from "../Models/Effects"
import { MainEffects } from "@start9labs/start-sdk/cjs/lib/StartSdk"
export type MakeProcedureEffects = (procedureId: string) => Effects
export type MakeMainEffects = () => MainEffects

View File

@@ -1,32 +1,54 @@
import { types as T } from "@start9labs/start-sdk"
import { JsonPath } from "../Models/JsonPath"
import { HostSystemStartOs } from "../Adapters/HostSystemStartOs"
import { RpcResult } from "../Adapters/RpcListener"
import { Effects } from "../Models/Effects"
import { CallbackHolder } from "../Models/CallbackHolder"
import { MainEffects } from "@start9labs/start-sdk/cjs/lib/StartSdk"
export type Procedure =
| "/init"
| "/uninit"
| "/config/set"
| "/config/get"
| "/backup/create"
| "/backup/restore"
| "/actions/metadata"
| "/properties"
| `/actions/${string}/get`
| `/actions/${string}/run`
| `/dependencies/${string}/query`
| `/dependencies/${string}/update`
export type ExecuteResult =
| { ok: unknown }
| { err: { code: number; message: string } }
export interface System {
// init(effects: Effects): Promise<void>
// exit(effects: Effects): Promise<void>
// start(effects: Effects): Promise<void>
// stop(effects: Effects, options: { timeout: number, signal?: number }): Promise<void>
export type System = {
init(): Promise<void>
start(effects: MainEffects): Promise<void>
callCallback(callback: number, args: any[]): void
stop(): Promise<void>
execute(
effects: T.Effects,
effects: Effects,
options: {
procedure: JsonPath
procedure: Procedure
input: unknown
timeout?: number
},
): Promise<RpcResult>
sandbox(
effects: Effects,
options: {
procedure: Procedure
input: unknown
timeout?: number
},
): Promise<RpcResult>
// sandbox(
// effects: Effects,
// options: {
// procedure: JsonPath
// input: unknown
// timeout?: number
// },
// ): Promise<unknown>
exit(effects: T.Effects): Promise<void>
exit(): Promise<void>
}
export type RunningMain = {
callbacks: CallbackHolder
stop(): Promise<void>
}

View File

@@ -1,17 +1,19 @@
export class CallbackHolder {
constructor() {}
private root = (Math.random() + 1).toString(36).substring(7)
private inc = 0
private callbacks = new Map<string, Function>()
private callbacks = new Map<number, Function>()
private newId() {
return this.root + (this.inc++).toString(36)
return this.inc++
}
addCallback(callback: Function) {
addCallback(callback?: Function) {
if (!callback) {
return
}
const id = this.newId()
this.callbacks.set(id, callback)
return id
}
callCallback(index: string, args: any[]): Promise<unknown> {
callCallback(index: number, args: any[]): Promise<unknown> {
const callback = this.callbacks.get(index)
if (!callback) throw new Error(`Callback ${index} does not exist`)
this.callbacks.delete(index)

View File

@@ -8,7 +8,9 @@ import {
literals,
number,
Parser,
some,
} from "ts-matches"
import { matchDuration } from "./Duration"
const VolumeId = string
const Path = string
@@ -31,7 +33,7 @@ export const matchDockerProcedure = object(
"toml",
"toml-pretty",
),
"sigterm-timeout": number,
"sigterm-timeout": some(number, matchDuration),
inject: boolean,
},
["io-format", "sigterm-timeout", "system", "args", "inject", "mounts"],

View File

@@ -1,6 +1,30 @@
export type TimeUnit = "d" | "h" | "s" | "ms"
import { string } from "ts-matches"
export type TimeUnit = "d" | "h" | "s" | "ms" | "m" | "µs" | "ns"
export type Duration = `${number}${TimeUnit}`
export function duration(timeValue: number, timeUnit: TimeUnit = "s") {
return `${timeValue}${timeUnit}` as Duration
const durationRegex = /^([0-9]*(\.[0-9]+)?)(ns|µs|ms|s|m|d)$/
export const matchDuration = string.refine(isDuration)
export function isDuration(value: string): value is Duration {
return durationRegex.test(value)
}
export function duration(timeValue: number, timeUnit: TimeUnit = "s") {
return `${timeValue > 0 ? timeValue : 0}${timeUnit}` as Duration
}
const unitsToSeconds: Record<string, number> = {
ns: 1e-9,
µs: 1e-6,
ms: 0.001,
s: 1,
m: 60,
h: 3600,
d: 86400,
}
export function fromDuration(duration: Duration | number): number {
if (typeof duration === "number") return duration
const [, num, , unit] = duration.match(durationRegex) || []
return Number(num) * unitsToSeconds[unit]
}

View File

@@ -1,5 +1,3 @@
import { types as T } from "@start9labs/start-sdk"
export type Effects = T.Effects & {
setMainStatus(o: { status: "running" | "stopped" }): Promise<void>
}
export type Effects = T.Effects

View File

@@ -28,8 +28,6 @@ export const jsonPath = some(
literals(
"/init",
"/uninit",
"/main/start",
"/main/stop",
"/config/set",
"/config/get",
"/backup/create",

View File

@@ -1,14 +1,17 @@
import * as fs from "node:fs/promises"
export const BACKUP = "backup"
export class Volume {
readonly path: string
constructor(
readonly volumeId: string,
_path = "",
) {
const path = (this.path = `/media/startos/volumes/${volumeId}${
!_path ? "" : `/${_path}`
}`)
if (volumeId.toLowerCase() === BACKUP) {
this.path = `/media/startos/backup${!_path ? "" : `/${_path}`}`
} else {
this.path = `/media/startos/volumes/${volumeId}${!_path ? "" : `/${_path}`}`
}
}
async exists() {
return fs.stat(this.path).then(

View File

@@ -1,12 +1,13 @@
import { RpcListener } from "./Adapters/RpcListener"
import { SystemForEmbassy } from "./Adapters/Systems/SystemForEmbassy"
import { HostSystemStartOs } from "./Adapters/HostSystemStartOs"
import { makeMainEffects, makeProcedureEffects } from "./Adapters/EffectCreator"
import { AllGetDependencies } from "./Interfaces/AllGetDependencies"
import { getSystem } from "./Adapters/Systems"
const getDependencies: AllGetDependencies = {
system: getSystem,
hostSystem: () => HostSystemStartOs.of,
makeProcedureEffects: () => makeProcedureEffects,
makeMainEffects: () => makeMainEffects,
}
new RpcListener(getDependencies)

View File

@@ -1,11 +1,5 @@
{
"include": [
"./**/*.mjs",
"./**/*.js",
"src/Adapters/RpcListener.ts",
"src/index.ts",
"effects.ts"
],
"include": ["./**/*.ts"],
"exclude": ["dist"],
"inputs": ["./src/index.ts"],
"compilerOptions": {
@@ -19,9 +13,10 @@
"declaration": true,
"noImplicitAny": true,
"esModuleInterop": true,
"types": ["node"],
"types": ["node", "jest"],
"moduleResolution": "Node16",
"skipLibCheck": true
"skipLibCheck": true,
"resolveJsonModule": true
},
"ts-node": {
"compilerOptions": {

View File

@@ -4,12 +4,15 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
if mountpoint tmp/combined; then sudo umount tmp/combined; fi
if mountpoint tmp/combined; then sudo umount -R tmp/combined; fi
if mountpoint tmp/lower; then sudo umount tmp/lower; fi
sudo rm -rf tmp
mkdir -p tmp/lower tmp/upper tmp/work tmp/combined
sudo mount alpine.${ARCH}.squashfs tmp/lower
if which squashfuse > /dev/null; then
sudo squashfuse debian.${ARCH}.squashfs tmp/lower
else
sudo mount debian.${ARCH}.squashfs tmp/lower
fi
sudo mount -t overlay -olowerdir=tmp/lower,upperdir=tmp/upper,workdir=tmp/work overlay tmp/combined
QEMU=
@@ -18,21 +21,21 @@ if [ "$ARCH" != "$(uname -m)" ]; then
sudo cp $(which qemu-$ARCH-static) tmp/combined${QEMU}
fi
echo "nameserver 8.8.8.8" | sudo tee tmp/combined/etc/resolv.conf # TODO - delegate to host resolver?
sudo chroot tmp/combined $QEMU /sbin/apk add nodejs
sudo mkdir -p tmp/combined/usr/lib/startos/
sudo rsync -a --copy-unsafe-links dist/ tmp/combined/usr/lib/startos/init/
sudo cp containerRuntime.rc tmp/combined/etc/init.d/containerRuntime
sudo chown -R 0:0 tmp/combined/usr/lib/startos/
sudo cp container-runtime.service tmp/combined/lib/systemd/system/container-runtime.service
sudo chown 0:0 tmp/combined/lib/systemd/system/container-runtime.service
sudo cp ../core/target/$ARCH-unknown-linux-musl/release/containerbox tmp/combined/usr/bin/start-cli
sudo chmod +x tmp/combined/etc/init.d/containerRuntime
sudo chroot tmp/combined $QEMU /sbin/rc-update add containerRuntime default
sudo chown 0:0 tmp/combined/usr/bin/start-cli
echo container-runtime | sha256sum | head -c 32 | cat - <(echo) | sudo tee tmp/combined/etc/machine-id
cat deb-install.sh | sudo systemd-nspawn --console=pipe -D tmp/combined $QEMU /bin/bash
sudo truncate -s 0 tmp/combined/etc/machine-id
if [ -n "$QEMU" ]; then
sudo rm tmp/combined${QEMU}
fi
sudo truncate -s 0 tmp/combined/etc/resolv.conf
sudo chown -R 0:0 tmp/combined
rm -f rootfs.${ARCH}.squashfs
mkdir -p ../build/lib/container-runtime
sudo mksquashfs tmp/combined rootfs.${ARCH}.squashfs

1
core/.gitignore vendored
View File

@@ -8,3 +8,4 @@ secrets.db
.env
.editorconfig
proptest-regressions/**/*
/startos/bindings/*

2569
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -28,18 +28,12 @@ set +e
fail=
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
if ! rust-musl-builder sh -c "(cd core && cargo build --release $(if [ -n "$FEATURES" ]; then echo "--features $FEATURES"; fi) --locked --bin startbox --target=$ARCH-unknown-linux-musl)"; then
fail=true
fi
if ! rust-musl-builder sh -c "(cd core && cargo build --release --no-default-features --features container-runtime,$FEATURES --locked --bin containerbox --target=$ARCH-unknown-linux-musl)"; then
if ! rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features container-runtime,$FEATURES --locked --bin containerbox --target=$ARCH-unknown-linux-musl && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"; then
fail=true
fi
set -e
cd core
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
if [ -n "$fail" ]; then
exit 1
fi

39
core/build-registrybox.sh Executable file
View File

@@ -0,0 +1,39 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
set +e
fail=
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
if ! rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features cli,registry,$FEATURES --locked --bin registrybox --target=$ARCH-unknown-linux-musl && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"; then
fail=true
fi
set -e
cd core
if [ -n "$fail" ]; then
exit 1
fi

39
core/build-startbox.sh Executable file
View File

@@ -0,0 +1,39 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
set +e
fail=
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
if ! rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features cli,daemon,$FEATURES --locked --bin startbox --target=$ARCH-unknown-linux-musl && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"; then
fail=true
fi
set -e
cd core
if [ -n "$fail" ]; then
exit 1
fi

View File

@@ -11,7 +11,7 @@ futures = "0.3.28"
lazy_async_pool = "0.3.3"
models = { path = "../models" }
pin-project = "1.1.3"
rpc-toolkit = "0.2.3"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "refactor/no-dyn-ctx" }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
tokio = { version = "1", features = ["full"] }

View File

@@ -50,7 +50,8 @@ pub async fn canonicalize(
}
let path = path.as_ref();
if tokio::fs::metadata(path).await.is_err() {
if let (Some(parent), Some(file_name)) = (path.parent(), path.file_name()) {
let parent = path.parent().unwrap_or(Path::new("."));
if let Some(file_name) = path.file_name() {
if create_parent && tokio::fs::metadata(parent).await.is_err() {
return Ok(create_canonical_folder(parent).await?.join(file_name));
} else {

View File

@@ -1,10 +1,14 @@
use std::path::{Path, PathBuf};
use models::{PackageId, Version};
use models::{PackageId, VersionString};
pub const PKG_SCRIPT_DIR: &str = "package-data/scripts";
pub fn script_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, version: &Version) -> PathBuf {
pub fn script_dir<P: AsRef<Path>>(
datadir: P,
pkg_id: &PackageId,
version: &VersionString,
) -> PathBuf {
datadir
.as_ref()
.join(&*PKG_SCRIPT_DIR)

View File

@@ -12,4 +12,4 @@ if [ -z "$PLATFORM" ]; then
export PLATFORM=$(uname -m)
fi
cargo install --path=./startos --no-default-features --features=cli,docker --bin start-cli --locked
cargo install --path=./startos --no-default-features --features=cli,docker,registry --bin start-cli --locked

View File

@@ -6,12 +6,13 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
axum = "0.7.5"
base64 = "0.21.4"
color-eyre = "0.6.2"
ed25519-dalek = { version = "2.0.0", features = ["serde"] }
lazy_static = "1.4"
mbrman = "0.5.2"
emver = { version = "0.1", git = "https://github.com/Start9Labs/emver-rs.git", features = [
exver = { version = "0.2.0", git = "https://github.com/Start9Labs/exver-rs.git", features = [
"serde",
] }
ipnet = "2.8.0"
@@ -22,8 +23,8 @@ patch-db = { version = "*", path = "../../patch-db/patch-db", features = [
] }
rand = "0.8.5"
regex = "1.10.2"
reqwest = "0.11.22"
rpc-toolkit = "0.2.2"
reqwest = "0.12"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "refactor/no-dyn-ctx" }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
sqlx = { version = "0.7.2", features = [

View File

@@ -1,9 +1,10 @@
use std::fmt::{Debug, Display};
use axum::http::uri::InvalidUri;
use axum::http::StatusCode;
use color_eyre::eyre::eyre;
use num_enum::TryFromPrimitive;
use patch_db::Revision;
use rpc_toolkit::hyper::http::uri::InvalidUri;
use rpc_toolkit::reqwest;
use rpc_toolkit::yajrc::{
RpcError, INVALID_PARAMS_ERROR, INVALID_REQUEST_ERROR, METHOD_NOT_FOUND_ERROR, PARSE_ERROR,
@@ -88,6 +89,7 @@ pub enum ErrorKind {
Timeout = 71,
Lxc = 72,
Cancelled = 73,
Git = 74,
}
impl ErrorKind {
pub fn as_str(&self) -> &'static str {
@@ -166,6 +168,7 @@ impl ErrorKind {
Timeout => "Timeout Error",
Lxc => "LXC Error",
Cancelled => "Cancelled",
Git => "Git Error",
}
}
}
@@ -207,6 +210,13 @@ impl Error {
}
}
}
impl axum::response::IntoResponse for Error {
fn into_response(self) -> axum::response::Response {
let mut res = axum::Json(RpcError::from(self)).into_response();
*res.status_mut() = StatusCode::INTERNAL_SERVER_ERROR;
res
}
}
impl From<std::convert::Infallible> for Error {
fn from(value: std::convert::Infallible) -> Self {
match value {}
@@ -232,8 +242,8 @@ impl From<std::string::FromUtf8Error> for Error {
Error::new(e, ErrorKind::Utf8)
}
}
impl From<emver::ParseError> for Error {
fn from(e: emver::ParseError) -> Self {
impl From<exver::ParseError> for Error {
fn from(e: exver::ParseError) -> Self {
Error::new(e, ErrorKind::ParseVersion)
}
}
@@ -480,6 +490,7 @@ where
{
fn with_kind(self, kind: ErrorKind) -> Result<T, Error>;
fn with_ctx<F: FnOnce(&E) -> (ErrorKind, D), D: Display>(self, f: F) -> Result<T, Error>;
fn log_err(self) -> Option<T>;
}
impl<T, E> ResultExt<T, E> for Result<T, E>
where
@@ -506,6 +517,18 @@ where
}
})
}
fn log_err(self) -> Option<T> {
match self {
Ok(a) => Some(a),
Err(e) => {
let e: color_eyre::eyre::Error = e.into();
tracing::error!("{e}");
tracing::debug!("{e:?}");
None
}
}
}
}
impl<T> ResultExt<T, Error> for Result<T, Error> {
fn with_kind(self, kind: ErrorKind) -> Result<T, Error> {
@@ -529,6 +552,17 @@ impl<T> ResultExt<T, Error> for Result<T, Error> {
}
})
}
fn log_err(self) -> Option<T> {
match self {
Ok(a) => Some(a),
Err(e) => {
tracing::error!("{e}");
tracing::debug!("{e:?}");
None
}
}
}
}
pub trait OptionExt<T>

View File

@@ -5,7 +5,7 @@ use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize};
use ts_rs::TS;
use crate::{Id, InvalidId, PackageId, Version};
use crate::{Id, InvalidId, PackageId, VersionString};
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, TS)]
#[ts(type = "string")]
@@ -21,7 +21,7 @@ impl std::fmt::Display for ImageId {
}
}
impl ImageId {
pub fn for_package(&self, pkg_id: &PackageId, pkg_version: Option<&Version>) -> String {
pub fn for_package(&self, pkg_id: &PackageId, pkg_version: Option<&VersionString>) -> String {
format!(
"start9/{}/{}:{}",
pkg_id,

View File

@@ -24,12 +24,17 @@ pub use service_interface::ServiceInterfaceId;
pub use volume::VolumeId;
lazy_static::lazy_static! {
static ref ID_REGEX: Regex = Regex::new("^[a-z]+(-[a-z]+)*$").unwrap();
static ref ID_REGEX: Regex = Regex::new("^[a-z]+(-[a-z0-9]+)*$").unwrap();
pub static ref SYSTEM_ID: Id = Id(InternedString::intern("x_system"));
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Default)]
pub struct Id(InternedString);
impl std::fmt::Debug for Id {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl TryFrom<InternedString> for Id {
type Error = InvalidId;
fn try_from(value: InternedString) -> Result<Self, Self::Error> {

View File

@@ -61,6 +61,11 @@ impl Borrow<str> for PackageId {
self.0.as_ref()
}
}
impl<'a> Borrow<str> for &'a PackageId {
fn borrow(&self) -> &str {
self.0.as_ref()
}
}
impl AsRef<Path> for PackageId {
fn as_ref(&self) -> &Path {
self.0.as_ref().as_ref()

View File

@@ -1,11 +1,9 @@
use serde::{Deserialize, Serialize};
use crate::ActionId;
use crate::{ActionId, PackageId};
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum ProcedureName {
StartMain,
StopMain,
GetConfig,
SetConfig,
CreateBackup,
@@ -14,8 +12,8 @@ pub enum ProcedureName {
ActionMetadata,
RunAction(ActionId),
GetAction(ActionId),
QueryDependency(ActionId),
UpdateDependency(ActionId),
QueryDependency(PackageId),
UpdateDependency(PackageId),
Init,
Uninit,
}
@@ -25,8 +23,6 @@ impl ProcedureName {
match self {
ProcedureName::Init => "/init".to_string(),
ProcedureName::Uninit => "/uninit".to_string(),
ProcedureName::StartMain => "/main/start".to_string(),
ProcedureName::StopMain => "/main/stop".to_string(),
ProcedureName::SetConfig => "/config/set".to_string(),
ProcedureName::GetConfig => "/config/get".to_string(),
ProcedureName::CreateBackup => "/backup/create".to_string(),

View File

@@ -3,100 +3,109 @@ use std::ops::Deref;
use std::str::FromStr;
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use ts_rs::TS;
#[derive(Debug, Clone)]
pub struct Version {
version: emver::Version,
#[derive(Debug, Clone, TS)]
#[ts(type = "string", rename = "Version")]
pub struct VersionString {
version: exver::ExtendedVersion,
string: String,
}
impl Version {
impl VersionString {
pub fn as_str(&self) -> &str {
self.string.as_str()
}
pub fn into_version(self) -> emver::Version {
pub fn into_version(self) -> exver::ExtendedVersion {
self.version
}
}
impl std::fmt::Display for Version {
impl std::fmt::Display for VersionString {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.string)
}
}
impl std::str::FromStr for Version {
type Err = <emver::Version as FromStr>::Err;
impl std::str::FromStr for VersionString {
type Err = <exver::ExtendedVersion as FromStr>::Err;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(Version {
Ok(VersionString {
string: s.to_owned(),
version: s.parse()?,
})
}
}
impl From<emver::Version> for Version {
fn from(v: emver::Version) -> Self {
Version {
impl From<exver::ExtendedVersion> for VersionString {
fn from(v: exver::ExtendedVersion) -> Self {
VersionString {
string: v.to_string(),
version: v,
}
}
}
impl From<Version> for emver::Version {
fn from(v: Version) -> Self {
impl From<VersionString> for exver::ExtendedVersion {
fn from(v: VersionString) -> Self {
v.version
}
}
impl Default for Version {
impl Default for VersionString {
fn default() -> Self {
Self::from(emver::Version::default())
Self::from(exver::ExtendedVersion::default())
}
}
impl Deref for Version {
type Target = emver::Version;
impl Deref for VersionString {
type Target = exver::ExtendedVersion;
fn deref(&self) -> &Self::Target {
&self.version
}
}
impl AsRef<emver::Version> for Version {
fn as_ref(&self) -> &emver::Version {
impl AsRef<exver::ExtendedVersion> for VersionString {
fn as_ref(&self) -> &exver::ExtendedVersion {
&self.version
}
}
impl AsRef<str> for Version {
impl AsRef<str> for VersionString {
fn as_ref(&self) -> &str {
self.as_str()
}
}
impl PartialEq for Version {
fn eq(&self, other: &Version) -> bool {
impl PartialEq for VersionString {
fn eq(&self, other: &VersionString) -> bool {
self.version.eq(&other.version)
}
}
impl Eq for Version {}
impl PartialOrd for Version {
impl Eq for VersionString {}
impl PartialOrd for VersionString {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.version.partial_cmp(&other.version)
}
}
impl Ord for Version {
impl Ord for VersionString {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.version.cmp(&other.version)
self.version.partial_cmp(&other.version).unwrap_or_else(|| {
match (self.version.flavor(), other.version.flavor()) {
(None, Some(_)) => std::cmp::Ordering::Greater,
(Some(_), None) => std::cmp::Ordering::Less,
(a, b) => a.cmp(&b),
}
})
}
}
impl Hash for Version {
impl Hash for VersionString {
fn hash<H: Hasher>(&self, state: &mut H) {
self.version.hash(state)
}
}
impl<'de> Deserialize<'de> for Version {
impl<'de> Deserialize<'de> for VersionString {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let string = String::deserialize(deserializer)?;
let version = emver::Version::from_str(&string).map_err(::serde::de::Error::custom)?;
let version =
exver::ExtendedVersion::from_str(&string).map_err(::serde::de::Error::custom)?;
Ok(Self { string, version })
}
}
impl Serialize for Version {
impl Serialize for VersionString {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,

View File

@@ -1,16 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": []
},
"hash": "1ce5254f27de971fd87f5ab66d300f2b22433c86617a0dbf796bf2170186dd2e"
}

View File

@@ -1,14 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930"
}

View File

@@ -1,40 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "path",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "username",
"type_info": "Text"
},
{
"ordinal": 3,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false,
false,
false,
true
]
},
"hash": "28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec"
}

View File

@@ -1,15 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM tor WHERE package = $1 AND interface = $2",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text"
]
},
"nullable": []
},
"hash": "350ab82048fb4a049042e4fdbe1b8c606ca400e43e31b9a05d2937217e0f6962"
}

View File

@@ -1,34 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "fingerprint",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "openssh_pubkey",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false
]
},
"hash": "4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997"
}

View File

@@ -1,50 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "logged_in",
"type_info": "Timestamp"
},
{
"ordinal": 2,
"name": "logged_out",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "last_active",
"type_info": "Timestamp"
},
{
"ordinal": 4,
"name": "user_agent",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "metadata",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
true,
false,
true,
false
]
},
"hash": "4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96"
}

View File

@@ -1,14 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "4bcfbefb1eb3181343871a1cd7fc3afb81c2be5c681cfa8b4be0ce70610e9c3a"
}

View File

@@ -1,20 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT password FROM account",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a"
}

View File

@@ -1,23 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text",
"Text"
]
},
"nullable": [
false
]
},
"hash": "687688055e63d27123cdc89a5bbbd8361776290a9411d527eaf1fdb40bef399d"
}

View File

@@ -1,14 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca"
}

View File

@@ -1,24 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": [
false
]
},
"hash": "770c1017734720453dc87b58c385b987c5af5807151ff71a59000014586752e0"
}

View File

@@ -1,65 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "package_id",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "code",
"type_info": "Int4"
},
{
"ordinal": 4,
"name": "level",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "title",
"type_info": "Text"
},
{
"ordinal": 6,
"name": "message",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "data",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4",
"Int8"
]
},
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
]
},
"hash": "7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210"
}

View File

@@ -1,19 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n INSERT INTO account (\n id,\n server_id,\n hostname,\n password,\n network_key,\n root_ca_key_pem,\n root_ca_cert_pem\n ) VALUES (\n 0, $1, $2, $3, $4, $5, $6\n ) ON CONFLICT (id) DO UPDATE SET\n server_id = EXCLUDED.server_id,\n hostname = EXCLUDED.hostname,\n password = EXCLUDED.password,\n network_key = EXCLUDED.network_key,\n root_ca_key_pem = EXCLUDED.root_ca_key_pem,\n root_ca_cert_pem = EXCLUDED.root_ca_cert_pem\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Bytea",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "7c7a3549c997eb75bf964ea65fbb98a73045adf618696cd838d79203ef5383fb"
}

View File

@@ -1,14 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM tor WHERE package = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576"
}

Some files were not shown because too many files have changed in this diff Show More