Compare commits

...

428 Commits

Author SHA1 Message Date
Aiden McClelland
79dbbdf6b4 fix mounts for pre-existing subcontainers (#2870)
* fix mounts for pre-existing subcontainers

* don't error on missing assets
2025-04-11 19:03:25 +00:00
Aiden McClelland
20d3b5288c sdk tweaks (#2858)
* sdk tweaks

* beta.20

* alpha.18
2025-04-07 19:55:38 +00:00
Mariusz Kogen
6ecaeb4fde fix initiall setup as user and clear messaging (#2848)
* fix initiall setup as user and clear messaging

* fix this and that :)

* add IPv6 support to validate_ip function

* Use vpn-clearnet as name for the interface

* Rebrand and finish with docs link

* set static clearnet name

* Magic clearnet to the end :D

* change the command name

* the name is magic-clearnet

* wireguard-vps-proxy-setup

* one more fix
2025-03-30 16:04:34 +02:00
Lucy
0016b4bd72 allow ids to include numbers (#2857) 2025-03-28 16:59:48 +00:00
Aiden McClelland
b8ff331ccc add callback for getHost 2025-03-21 11:17:05 -06:00
Aiden McClelland
9e63f3f7c6 add callback for getContainerIp (#2851)
* add callback for getContainerIp

* register callback before retrieving info

* version bump; only use backports for linux
2025-03-20 21:54:05 +00:00
Aiden McClelland
05162ca350 Bugfix/sdk misc (#2847)
* misc sdk fixes

* version bump

* formatting

* add missing dependency to root

* alpha.16 and beta.17

* beta.18
2025-03-16 09:04:10 -06:00
Sam Sartor
e662b2f393 Version range compression utils (#2840)
* DNF normalization wip

* a bunch of wip stuff

* it is alive!

* tests

* deduplicate strings in tests

* fix != flavor behavior & parse flavor constraints & equals shorthand for normalize

* use normalization

* more comments & fix tests not running because of bad rebase

* fix comments+tests

* slightly better comment

* fix dependency & typos

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-03-04 22:55:20 +00:00
Aiden McClelland
63bc71da13 fix issues with legacy packages (#2841)
* fix issues with legacy packages

* include non-prerelease versions within compat range

* lock sdk to corresponding os prerelease

* bump sdk version

* fixes from PR review
2025-03-03 17:30:36 +00:00
Aiden McClelland
737beb11f6 improve error handling (#2839) 2025-02-24 20:49:16 +00:00
Aiden McClelland
f55af7da4c hotfix for alpha.15 (#2838)
* hotfix for alpha.15

* sdk version bump
2025-02-24 20:08:02 +00:00
Aiden McClelland
80461a78b0 misc improvements (#2836)
* misc improvements

* kill proc before destroying subcontainer fs

* version bump

* beta.11

* use bind mount explicitly

* Update sdk/base/lib/Effects.ts

Co-authored-by: Dominion5254 <musashidisciple@proton.me>

---------

Co-authored-by: Dominion5254 <musashidisciple@proton.me>
2025-02-21 22:08:22 +00:00
Aiden McClelland
40d194672b change 'delete' to 'remove' everywhere to be consistent (#2834) 2025-02-21 00:14:04 +00:00
Aiden McClelland
d63341ea06 alpha.14 (#2833) 2025-02-19 01:34:01 +00:00
Aiden McClelland
df8c8dc93b fix #2813 (#2832) 2025-02-18 22:54:33 +00:00
Aiden McClelland
dd3a140cb1 fix inputspec passthrough (#2830)
* fix inputspec passthrough

* beta.9
2025-02-18 19:41:20 +00:00
Aiden McClelland
44aa3cc9b5 sdk hotfix 2025-02-12 17:11:54 -07:00
Aiden McClelland
b88b24e231 sdk version bump 2025-02-12 16:05:11 -07:00
Aiden McClelland
890c31ba74 minor sdk tweaks (#2828) 2025-02-12 22:08:13 +00:00
Aiden McClelland
6dc9a11a89 misc improvements to cli (#2827)
* misc improvements to cli

* switch host shorthand to H

* simplify macro
2025-02-12 19:20:18 +00:00
Lucy
3047dae703 Action Request updates + misc fixes (#2818)
* fix web manifest format error

* fix setting optional dependencies

* rework dependency actions to be nested

* fix styling

* fix styles

* combine action requests into same component

* only display actions header if they exist

* fix storing polyfill dependencies

* fix styling and button propagation

* fixes for setting polyfill dependencies

* revert to test

* revert required deps setting logic

* add logs and adjust logic

* test

* fix deps logic when changing config

* remove logs; deps working as expected
2025-02-08 18:11:26 -07:00
Alex Inkin
4e22f13007 Fix/unions (#2825)
* mocks for union value

* fix: properly handle values in unions

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2025-02-06 10:06:04 -07:00
Mariusz Kogen
04611b0ae2 feat: add ssh key auth check and config on VPS (#2824) 2025-02-05 22:14:35 +01:00
Aiden McClelland
a00f1ab549 fix version bump 2025-01-29 12:29:49 -07:00
Aiden McClelland
446b37793b miscellaneous bugfixes for alpha12 (#2823)
* miscellaneous bugfixes for alpha12

* fix deserialization of path in cifs share

* catch error in setup.status

* actually reserialize db after migration

* better progress reporting for migrations

* fix infinite drop

* fix raspi build

* fix race condition

* version bump

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2025-01-28 23:02:52 +00:00
Alex Inkin
b83eeeb131 feat: better form array validation (#2821) 2025-01-27 17:46:21 -07:00
Matt Hill
e8d727c07a better acme ux (#2820)
* better acme ux

* fix patching arrays... again

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-01-27 18:40:26 +00:00
Mariusz Kogen
e28fa26c43 Set proper group permissions and enable ssh-copy-id password prompt (#2817) 2025-01-23 14:41:44 -07:00
Matt Hill
639fc3793a dont show success message if smtp test fails 2025-01-23 13:44:29 -07:00
Mariusz Kogen
2aaae5265a feat: add WireGuard VPS setup automation script (#2810)
* feat: add WireGuard VPS setup automation script

Adds a comprehensive bash script that automates:
- SSH key setup and authentication
- WireGuard installation on remote VPS
- Configuration download and import to NetworkManager
- User-friendly CLI interface with validation
- Detailed status messages and error handling
- Instructions for exposing services via ACME/Let's Encrypt

* use cat heredoc for issue files to fix formatting

Replaces echo with cat heredoc when writing to /etc/issue and /etc/issue.net to properly preserve escape sequences and prevent unwanted newlines in login prompts.

* add convent `wg-vps-setup` symlink to PATH

* sync ssh privkey on init

* Update default ssh key location

* simplify to use existing StartOS SSH keys and fix .ssh permission

* finetune

* Switch to start9labs repo

* rename some files

* set correct ownership

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-01-22 23:53:31 +00:00
Alex Inkin
baa4c1fd25 fix: fix resetting form to default values (#2816) 2025-01-21 20:52:47 -07:00
Matt Hill
479797361e add clearnet functionality to frontend (#2814)
* add clearnet functionality to frontend

* add pattern and add sync db on rpcs

* add domain pattern

* show acme name instead of url if known

* dont blow up if domain not present after delete

* use common name for letsencrypt

* normalize urls

* refactor start-os ui net service

* backend migration and rpcs for serverInfo.host

* fix cors

* implement clearnet for main startos ui

* ability to add and remove tor addresses, including vanity

* add guard to prevent duplicate addresses

* misc bugfixes

* better heuristics for launching UIs

* fix ipv6 mocks

* fix ipv6 display bug

* rewrite url selection for launch ui

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-01-22 03:46:36 +00:00
Aiden McClelland
0a9f1d2a27 fix migration for alpha.10 (#2811)
* fix migration for alpha.10

* fix binds

* don't commit if db model does not match

* stronger guard

* better guard
2025-01-15 15:40:10 -07:00
Aiden McClelland
5e103770fd rename some things in the sdk (#2809)
* rename some things in the sdk

* fix docs

* rename some types exported from rust
2025-01-15 16:58:50 +00:00
Matt Hill
e012a29b5e add smtp to frontend (#2802)
* add smtp to frontend

* left align headers

* just email

* change all to email

* fix test-smtp api

* types

* fix email from and login address handling

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-01-15 00:32:19 +00:00
Aiden McClelland
5d759f810c Bugfix/websockets (#2808)
* retry logic for init status

* fix login flashing and sideload hanging

* add logging

* misc backend bugfixes

* use closingObserver instead

* always show reinstall button

* go back to endWith

* show error if sideload fails

* refactor more watch channels

* navigate to services page on sideload complete

* handle error closure events properly

* handle error scenario better in sideload websocket

* remove a clone

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2025-01-14 03:39:52 +00:00
Remco Ros
eb1f3a0ced sdk: checkPortListening: check tcp6/udp6 ports (#2763)
* sdk: checkPortListening: check tcp6/udp6 ports

* allow ipv6 if unspecified address

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2025-01-09 16:40:41 -07:00
Aiden McClelland
29e8210782 enabling support for wireguard and firewall (#2713)
* wip: enabling support for wireguard and firewall

* wip

* wip

* wip

* wip

* wip

* implement some things

* fix warning

* wip

* alpha.23

* misc fixes

* remove ufw since no longer required

* remove debug info

* add cli bindings

* debugging

* fixes

* individualized acme and privacy settings for domains and bindings

* sdk version bump

* migration

* misc fixes

* refactor Host::update

* debug info

* refactor webserver

* misc fixes

* misc fixes

* refactor port forwarding

* recheck interfaces every 5 min if no dbus event

* misc fixes and cleanup

* misc fixes
2025-01-09 16:34:34 -07:00
Dominion5254
45ca9405d3 Feat/test smtp (#2806)
* add test-smtp server subcommand

* return error is password is None

* fix return type

* borrow variables

* convert args to &str

* Tuple needs to have the same types apparently

* Clone instead

* fix formatting

* improve test email body

* Update core/startos/src/system.rs

Co-authored-by: kn0wmad <39687477+kn0wmad@users.noreply.github.com>

* add tls connection

* remove commented code

* use aidens mail-send fork

---------

Co-authored-by: kn0wmad <39687477+kn0wmad@users.noreply.github.com>
2025-01-09 20:43:53 +00:00
Alex Inkin
e9d851e4d3 fix: reset sideload service after websocket completes (#2798)
* fix: reset sideload service after websocket completes

* chore: fix comment
2024-12-11 16:14:01 -07:00
Mariusz Kogen
c675d0feee Escape backslashes in /etc/issue to prevent unwanted newlines (#2797) 2024-12-10 09:55:20 -07:00
Matt Hill
1859c0505e remove deprecated useHash param 2024-12-06 08:53:59 -07:00
Aiden McClelland
f15251096c sdk beta.0 2024-12-03 16:47:45 -07:00
Matt Hill
ef28b01286 delete patch dump and ack-welcome references 2024-12-02 16:58:39 -07:00
Aiden McClelland
f48750c22c v0.3.6-alpha.9 (#2795)
* v0.3.6-alpha.9

* fix raspi build

* backup kernel still .51
2024-12-02 22:03:40 +00:00
Matt Hill
7a96e94491 More SDK comments (#2796)
* sdk tweaks

* switch back to deeppartial

* WIP, update comments

* reinstall chesterton's fence

* more comments

* delete extra package.lock

* handle TODOs

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-12-02 20:58:28 +00:00
Matt Hill
22a32af750 use notification system for OS updates (#2670)
* use notification system for OS updates

* feat: Include the version update notification in the update in rs

* chore: Change the location of the comment

* progress on release notes

* fill out missing sections

* fix build

* fix build

---------

Co-authored-by: J H <dragondef@gmail.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-12-02 20:58:09 +00:00
Mariusz Kogen
dd423f2e7b Add System Debug Information Gathering Script (#2738)
* Add gather_debug_info.sh for comprehensive StartOS diagnostics
* chore: Update the services to use the lxc instead of podman
* chore: Add symlink /usr/bin/gather-debug

---------

Co-authored-by: Jade <2364004+Blu-J@users.noreply.github.com>
2024-12-02 17:27:32 +01:00
Matt Hill
12dec676db Update sdk comments (#2793)
* sdk tweaks

* switch back to deeppartial

* WIP, update comments

* reinstall chesterton's fence

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-11-26 23:54:05 -07:00
Aiden McClelland
504f1a8e97 sdk tweaks (#2791)
* sdk tweaks

* switch back to deeppartial
2024-11-25 18:49:11 +00:00
Mariusz Kogen
e4a2af6ae7 Add serial console support for headless operation (#2790)
* implement serial console support
* customize local and remote login prompt
2024-11-23 12:32:52 +01:00
Aiden McClelland
fefa88fc2a Feature/cli clearnet (#2789)
* add support for ACME cert acquisition

* add support for modifying hosts for a package

* misc fixes

* more fixes

* use different port for lan clearnet than wan clearnet

* fix chroot-and-upgrade always growing

* bail on failure

* wip

* fix alpn auth

* bump async-acme

* fix cli

* add barebones documentation

* add domain to hostname info
2024-11-21 17:55:59 +00:00
Alex Inkin
ed8a7ee8a5 feat: make favicon react to theme (#2786)
Co-authored-by: Matt Hill <mattnine@protonmail.com>
2024-11-19 14:19:41 -07:00
Aiden McClelland
1771797453 sdk input spec improvements (#2785)
* sdk input spec improvements

* more sdk changes

* fe changes

* alpha.14

* fix tests

* separate validator in filehelper

* use deeppartial for getinput

* fix union type and update ts-matches

* alpha.15

* alpha.16

* alpha.17

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2024-11-19 11:25:43 -07:00
Aiden McClelland
46179f5c83 attempt to fix webserver lockup (#2788) 2024-11-14 18:31:47 +00:00
Jade
db6fc661a6 fix: Dependency (#2784)
* fix: Dependency

* fix: set deps during container init
2024-11-13 10:53:19 -07:00
Matt Hill
c088ab7a79 remove file from input spec (#2782) 2024-11-11 12:17:44 -07:00
Aiden McClelland
aab2b8fdbc do not request config action if no config exists (#2779) 2024-11-11 18:54:51 +00:00
Aiden McClelland
b1e7a717af allow updating grub from chroot-and-upgrade (#2778) 2024-11-08 12:39:16 -07:00
Aiden McClelland
25e38bfc98 do not mute logs of subcontainer launch dummy (#2781) 2024-11-08 12:39:02 -07:00
Aiden McClelland
279c7324c4 download to directory not filename (#2777) 2024-11-08 12:38:46 -07:00
Matt Hill
1c90303914 closes #2340 and #2431, fixes bug with select all for backup (#2780)
* closes #2340 and #2431, fixes bug with select all for backup

* revefrt mock
2024-11-08 11:57:42 -07:00
Aiden McClelland
6ab6502742 alpha.8 (#2776) 2024-11-06 03:52:38 +00:00
Aiden McClelland
b79c029f21 Feature/registry improvements (#2772)
* add build cli script for cross-building cli

* sdk alpha.13

* registry improvements
2024-11-06 03:38:52 +00:00
Aiden McClelland
020268fe67 don't attempt autoconfig if config is null (#2775)
* don't attempt autoconfig if config is null

* quiet

* fixes
2024-11-06 03:38:30 +00:00
Aiden McClelland
176b1c9d20 allow lxc-net for tor (#2774)
* allow lxc-net for tor

* /24
2024-11-05 17:50:24 +00:00
Jade
5ab2efa0c0 wip(fix): Working on fixing the migration. (#2771)
* wip(fix): Working on fixing the migration.

* get s9pk compat key

* wip: Change to just using the archive to not use the manifest parsing.

* fix: Fix the rebuild

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-11-05 17:49:38 +00:00
Jade
88320488a7 fix: Actions like the action don't give the results (#2770) 2024-10-30 20:41:06 +00:00
Aiden McClelland
2091abeea2 persist hostname in config overlay (#2769)
* persist hostname

* add migration

* fix version mismatch

* remove dmesg logging from build
2024-10-30 18:55:36 +00:00
Aiden McClelland
480f5c1a9a pi 5 support (#2640)
* prioritize raspi repo

* change kernel

* use newer kernel

* Update build.sh

* fix ssh keygen

* switch to .com

* use raspi-update to download firmware

* Update build.sh

* Update build.sh

* Update build.sh

* Update build.sh

* switch to boot/firmware

* fix fstab

* update-initramfs

* skip check partition

* switch back to boot

* fix initramfs

* use rpi-update kernels

* simplify kernel selection
2024-10-30 09:15:24 -06:00
Jade
8e0db2705f Fix/mac start cli packing (#2767)
* wip

* wip: Adding more of the docker for the mac build

* fix: Running a build

* chore: Make the code a little cleaner

* optimize: reduce docker image size for mac-tar2sqfs

* feat: Update sdk-utils container usage and Dockerfile

* feat: Publish SDK Utils Container image

* clean up ...

* feat: Add manual input to control tagging Docker image as 'latest'

* fix: Update workflow input handling

* switch to different repo and clean

---------

Co-authored-by: Mariusz Kogen <k0gen@pm.me>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2024-10-29 20:38:24 +00:00
Matt Hill
1be9cdae67 use hardware requirements to display conflicts and prevent install (#2700)
* use hardware requirements to display conflicts and prevent install

* better messaging and also consider OS compatibility

* wip: backend hw requirements

* update backend components

* migration

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-10-29 19:48:03 +00:00
Jade
e1a91a7e53 Feat: With path (#2768) 2024-10-29 19:09:56 +00:00
Remco Ros
b952e3183f sdk: allow passing docker build arguments in service manifest (#2764)
* start-cli s9pk pack: silence mksquashfs output

* sdk: allow passing docker build arguments in service manifest

* merge EnvVar into BuildArg
2024-10-28 22:33:26 +00:00
Aiden McClelland
26ae0bf207 sdk tweaks (#2760)
* sdk tweaks

* update action result types

* accommodate new action response types

* fix: show action value labels

* Feature/get status effect (#2765)

* wip: get status

* feat: Add the get_status for effects

* feat: Do a callback

---------

Co-authored-by: J H <dragondef@gmail.com>

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: waterplea <alexander@inkin.ru>
Co-authored-by: J H <dragondef@gmail.com>
2024-10-28 18:12:36 +00:00
Remco Ros
42cfd69463 sdk: fix piping stdio of Daemons, support onStdOut/onStderr (#2762) 2024-10-24 16:29:12 -06:00
Jade
7694b68e06 Feat/stats (#2761)
* Feat: Add the memory for the stats.

* Chore: Add %
2024-10-22 13:49:01 -06:00
Jade
28e39c57bd Fix: Error Messages in HealthCheck (#2759)
* Fix: Error Messages in HealthCheck

* Update sdk/package/lib/util/SubContainer.ts

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* fix ts error

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-10-21 20:47:09 +00:00
Aiden McClelland
2fa0a57d2b fixing raspi image (#2712)
* wip: fixing raspi image

* fix pi build
2024-10-18 20:17:56 +00:00
Matt Hill
c9f3e1bdab fix bug allowing click on disabled actions 2024-10-17 21:20:46 -06:00
Matt Hill
2ba56b8c59 Convert properties to an action (#2751)
* update actions response types and partially implement in UI

* further remove diagnostic ui

* convert action response nested to array

* prepare action res modal for Alex

* ad dproperties action for Bitcoin

* feat: add action success dialog (#2753)

* feat: add action success dialog

* mocks for string action res and hide properties from actions page

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>

* return null

* remove properties from backend

* misc fixes

* make severity separate argument

* rename ActionRequest to ActionRequestOptions

* add clearRequests

* fix s9pk build

* remove config and properties, introduce action requests

* better ux, better moocks, include icons

* fix dependency types

* add variant for versionCompat

* fix dep icon display and patch operation display

* misc fixes

* misc fixes

* alpha 12

* honor provided input to set values in action

* fix: show full descriptions of action success items (#2758)

* fix type

* fix: fix build:deps command on Windows (#2752)

* fix: fix build:deps command on Windows

* fix: add escaped quotes

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>

* misc db compatibility fixes

---------

Co-authored-by: Alex Inkin <alexander@inkin.ru>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2024-10-17 13:31:56 -06:00
Jade
fb074c8c32 036 migration (#2750)
* chore: convert to use a value, cause why not

* wip: Add the up for this going up

* wip: trait changes

* wip: Add in some more of the private transformations

* chore(wip): Adding the ssh_keys todo

* wip: Add cifs

* fix migration structure

* chore: Fix the trait for the version

* wip(feat): Notifications are in the system

* fix marker trait hell

* handle key todos

* wip: Testing the migration in a system.

* fix pubkey parser

* fix: migration works

* wip: Trying to get the migration stuff?

* fix: Can now install the packages that we wanted, yay!"

* Merge branch 'next/minor' of github.com:Start9Labs/start-os into feat/migration

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-10-16 10:09:30 -06:00
Dominion5254
9fc082d1e6 add with_about for CLI commands (#2741)
* add with_about for echo, server, and auth

* update for feedback

* finish (most) remaining command documentation

* update comments after additional clarification

* add expanded_api descriptions

* add comments for action_api

* add comments for remaining apis

* add comment for package-rebuild

* fix build errors

* missed one with_about

* add context to git-info subcommands

* remove context from git-info subcommands

* Make git-info from_fns generic over context

* make version::git_info generic over the context

* try removing generics from subcommand and version::git_info

* try adding a closure with context

* Updates for reviewer feedback
2024-10-16 09:11:32 -06:00
Aiden McClelland
0c04802560 fix cors (#2749) 2024-10-01 11:44:24 -06:00
Aiden McClelland
5146689158 v0.3.6-alpha.6 (#2748) 2024-09-27 16:38:28 -06:00
Aiden McClelland
e7fa94c3d3 add error status (#2746)
* add error status

* update types

* ṗ̶̰̙̓͒̈́ͅü̵̢̙̫̣ŗ̷̪̺̺͛g̴̲͉͎̬̒̇e̵̪̎̅͌ ̶̡̜̘͐͛t̶͎͍̣̿̍̐h̴͕̩͗̈́̎̑e̵͚͒̂͝ ̸̛͙̦͈͝v̶̱͙̬̽̔ọ̶̧̡̒̓i̸̬̲͍̋̈́d̴͉̀

* fix some extra voids

* add `package.rebuild`

* introduce error status and pkg rebuild and fix mocks

* minor fixes

* fix build

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2024-09-26 20:19:06 -06:00
Aiden McClelland
db0695126f Refactor/actions (#2733)
* store, properties, manifest

* interfaces

* init and backups

* fix init and backups

* file models

* more versions

* dependencies

* config except dynamic types

* clean up config

* remove disabled from non-dynamic vaues

* actions

* standardize example code block formats

* wip: actions refactor

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* commit types

* fix types

* update types

* update action request type

* update apis

* add description to actionrequest

* clean up imports

* revert package json

* chore: Remove the recursive to the index

* chore: Remove the other thing I was testing

* flatten action requests

* update container runtime with new config paradigm

* new actions strategy

* seems to be working

* misc backend fixes

* fix fe bugs

* only show breakages if breakages

* only show success modal if result

* don't panic on failed removal

* hide config from actions page

* polyfill autoconfig

* use metadata strategy for actions instead of prev

* misc fixes

* chore: split the sdk into 2 libs (#2736)

* follow sideload progress (#2718)

* follow sideload progress

* small bugfix

* shareReplay with no refcount false

* don't wrap sideload progress in RPCResult

* dont present toast

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>

* chore: Add the initial of the creation of the two sdk

* chore: Add in the baseDist

* chore: Add in the baseDist

* chore: Get the web and the runtime-container running

* chore: Remove the empty file

* chore: Fix it so the container-runtime works

---------

Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>

* misc fixes

* update todos

* minor clean up

* fix link script

* update node version in CI test

* fix node version syntax in ci build

* wip: fixing callbacks

* fix sdk makefile dependencies

* add support for const outside of main

* update apis

* don't panic!

* Chore: Capture weird case on rpc, and log that

* fix procedure id issue

* pass input value for dep auto config

* handle disabled and warning for actions

* chore: Fix for link not having node_modules

* sdk fixes

* fix build

* fix build

* fix build

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: Jade <Blu-J@users.noreply.github.com>
Co-authored-by: J H <dragondef@gmail.com>
Co-authored-by: Jade <2364004+Blu-J@users.noreply.github.com>
Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
2024-09-25 16:12:52 -06:00
Aiden McClelland
eec5cf6b65 add support for remote attaching to container (#2732)
* add support for remote attaching to container

* feature: Add in the subcontainer searching

* feat: Add in the name/ imageId filtering

* Feat: Fix the env and the workdir

* chore: Make the sigkill first?

* add some extra guard on term

* fix: Health during error doesnt return what we need

* chore: Cleanup for pr

* fix build

* fix build

* Update startos-iso.yaml

* Update startos-iso.yaml

* Update startos-iso.yaml

* Update startos-iso.yaml

* Update startos-iso.yaml

* Update startos-iso.yaml

* Update startos-iso.yaml

* check status during build

---------

Co-authored-by: J H <dragondef@gmail.com>
2024-09-20 15:38:16 -06:00
Matt Hill
24c6cd235b Merge pull request #2737 from Start9Labs/fix/flavors
Fix/flavors
2024-09-17 16:32:42 -06:00
Lucy Cifferello
47855dc78b remove explicit type 2024-09-17 14:46:09 -04:00
Lucy Cifferello
dbbc42c5fd update packages 2024-09-12 17:03:01 -04:00
Lucy Cifferello
27416efb6d only display alt implementations if no flavors 2024-09-12 11:55:54 -04:00
Lucy Cifferello
21dd08544b update version to clear refresh alert 2024-09-12 11:55:10 -04:00
Lucy Cifferello
ae88f7d181 add types 2024-09-12 11:51:19 -04:00
Matt Hill
9981ee7601 follow sideload progress (#2718)
* follow sideload progress

* small bugfix

* shareReplay with no refcount false

* don't wrap sideload progress in RPCResult

* dont present toast

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-09-03 09:23:47 -06:00
Jade
66b018a355 Fix/health check error (#2731)
* fix: No error's with an error code

* fix dns query

* await resolv.conf copy

* use tty in subcontainer exec if parent is tty

* Fix: Home=root for inject services

* fix: Add the action inject too

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-08-30 06:29:27 +00:00
Aiden McClelland
ed1bc6c215 fix: session display (#2730)
* fixes #2651

* fix display

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2024-08-28 18:36:57 +00:00
Aiden McClelland
c552fdfc0f fixes #2651 (#2729) 2024-08-27 17:11:37 -06:00
Aiden McClelland
4006dba9f1 fixes #2702 (#2728) 2024-08-27 16:48:11 -06:00
Aiden McClelland
571db5c0ee Bugfix/mac build (#2726)
* fix mac build

* additional fixes

* handle arm64 from uname -m

* handle arm64 from uname -m in all builds

* gracefully handle rootless docker

* use cross-platform method of determining file uid
2024-08-26 22:52:23 +00:00
Aiden McClelland
9059855f2b run tests in docker (#2725) 2024-08-23 22:54:31 +00:00
Jade
e423678995 chore: Bump the version to 5 (#2724) 2024-08-23 12:52:41 -06:00
Jade
ece5577f26 feat: Adding in the effects to the startSdk (#2722)
Currently the start sdk that we expose calls some of the effects. And
there are others that need to be called via the effects object. The
idea is that all the effects that could and should be called are from
the startsdk side
2024-08-23 11:20:18 -06:00
Jade
f373abdd14 fix: Container runtime actions (#2723)
Actions where running in a race condition that they sometimes didn't wait for the container to be started and the issue was the exec that was then run after would have an issue.
2024-08-23 11:19:49 -06:00
Aiden McClelland
4defec194f Feature/subcontainers (#2720)
* wip: subcontainers

* wip: subcontainer infra

* rename NonDestroyableOverlay to SubContainerHandle

* chore: Changes to the container and other things

* wip:

* wip: fixes

* fix launch & exec

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* tweak apis

* misc fixes

* don't treat sigterm as error

* handle health check set during starting

---------

Co-authored-by: J H <dragondef@gmail.com>
Co-authored-by: Jade <Blu-J@users.noreply.github.com>
2024-08-22 21:45:54 -06:00
Aiden McClelland
72898d897c Merge branch 'next/minor' of github.com:Start9Labs/start-os into next/minor 2024-08-19 21:39:45 -06:00
Aiden McClelland
c6ee65b654 bump sdk version 2024-08-19 21:39:38 -06:00
Jade
4d7694de24 chore: reimplement refactor for the changes (#2716)
* chore: reimplement refactor for the changes

* chore: Make it so even more cases are caught on the transformation

* Update container-runtime/src/Adapters/Systems/SystemForEmbassy/index.ts

* chore: Update the types of the action result because it wasnt matching what was in the action.rs
2024-08-19 21:38:05 -06:00
Matt Hill
a083f25b6c better ergonomics for versions (#2717) 2024-08-19 19:44:57 +00:00
Aiden McClelland
6a8d8babce fix uid mapping in squashfs's made from tarballs (#2710) 2024-08-16 13:40:10 -06:00
Aiden McClelland
f692ebbbb9 fix runtime lockup (#2711) 2024-08-15 23:41:14 +00:00
Aiden McClelland
c174b65465 create version graph to handle migrations (#2708)
* create version graph to handle migrations

* Fix some version alpha test

* connect dataVersion api

* rename init fns

* improve types and add tests

* set data version after backup restore

* chore: Add some types tests for version info

* wip: More changes to versionInfo tests

* wip: fix my stupid

* update mocks

* update runtime

* chore: Fix the loop

---------

Co-authored-by: Jade <2364004+Blu-J@users.noreply.github.com>
Co-authored-by: J H <dragondef@gmail.com>
2024-08-15 20:58:53 +00:00
Jade
c704626a39 Fix/overlay destroy (#2707)
* feature: Make all errors in console.error be including an error for that stack tract

* feature: Make all errors in console.error be including an error for that stack tract

* fix: Add the tinisubreaper for the subreapers to know they are not the reaper

* fix: overlay always destroyed

* chore: Move the style of destroy to just private
2024-08-14 11:16:23 -06:00
Aiden McClelland
7ef25a3816 Merge pull request #2703 from Start9Labs/bugfix/misc
Bugfix/misc
2024-08-13 21:42:50 +00:00
Matt Hill
46a893a8b6 fix bug with setup wiz recovery 2024-08-10 05:46:54 -06:00
Jade
30885cee01 fix: Gitea/ Synapse/ Nostr types for manifest + config (#2704) 2024-08-10 11:07:06 +02:00
Matt Hill
9237984782 remove disabled from createInterface 2024-08-08 22:26:42 -06:00
Aiden McClelland
c289629a28 bump sdk version 2024-08-08 20:10:37 -06:00
Jade
806196f572 fix: Gitea/ Synapse/ Nostr types for manifest + config (#2704) 2024-08-08 17:00:13 -06:00
Aiden McClelland
0e598660b4 redesign checkDependencies api 2024-08-08 15:54:46 -06:00
Aiden McClelland
058bfe0737 sdk updates 2024-08-08 11:10:02 -06:00
J H
bd7adafee0 Fix: sdk setupManifest pass through docs 2024-08-07 16:38:35 -06:00
J H
faf0c2b816 Merge branch 'bugfix/misc' of github.com:Start9Labs/start-os into bugfix/misc 2024-08-07 06:19:16 -06:00
J H
419d4986f6 fix: Inject for actions and health 2024-08-07 06:19:04 -06:00
Mariusz Kogen
9f1a9a7d9c fix CI build
* update nodejs
* set up python
2024-08-07 10:40:25 +02:00
Mariusz Kogen
a3e7e7c6c9 version bump 2024-08-07 08:51:15 +02:00
Aiden McClelland
94a5075b6d Merge pull request #2684 from Start9Labs/bugfix/misc
miscellaneous fixes from alpha testing
2024-08-06 16:53:37 -06:00
Aiden McClelland
7c32404b69 fix test 2024-08-06 14:40:49 -06:00
Aiden McClelland
d0c2dc53fe fix dns in the overlay 2024-08-06 14:27:05 -06:00
Aiden McClelland
0e8530172c fix config set dry 2024-08-06 13:59:14 -06:00
Aiden McClelland
4427aeac54 fix asset mounts 2024-08-06 13:21:12 -06:00
Aiden McClelland
93640bb08e don't bail on error in dep config on startup 2024-08-06 13:20:59 -06:00
J H
512ed71fc3 fixes: The case on the readonly that the path before doesn't exist, just let it 2024-08-05 13:11:55 -06:00
Matt Hill
0cfc43c444 don't deprecate wifi 2024-08-01 21:39:16 -06:00
Matt Hill
ecd0edc29e Merge pull request #2701 from Start9Labs/bugfix/misc-alex
fix: address TODOs and close dialogs upon state change
2024-07-31 08:03:45 -06:00
waterplea
6168a006f4 fix: address TODOs and close dialogs upon state change 2024-07-31 11:57:56 +04:00
Mariusz Kogen
82ba5dad1b version bump 2024-07-31 09:11:37 +02:00
Aiden McClelland
972ee8e42e premake 5 versions 2024-07-31 00:06:44 -06:00
Aiden McClelland
7cd3f285ad fix dependency autoconfig 2024-07-30 12:08:20 -06:00
Matt Hill
89e327383e remove file uploads from config 2024-07-30 09:09:35 -06:00
Matt Hill
290a15bbd9 remove sourceVersion and minor cleanup 2024-07-29 22:42:17 -06:00
Aiden McClelland
1dd21f1f76 fix config pointers 2024-07-29 18:46:02 -06:00
Aiden McClelland
46b3f83ce2 don't trim logs 2024-07-29 18:45:18 -06:00
Aiden McClelland
5c153c9e21 improve install performance 2024-07-29 18:44:56 -06:00
Aiden McClelland
bca75a3ea4 stop container before unmounting logs 2024-07-29 13:18:04 -06:00
Aiden McClelland
0bc6f972b2 reserialize getConfig response for backwards compatibility 2024-07-29 13:00:48 -06:00
Aiden McClelland
36cc9cc1ec fix firmware checker 2024-07-29 12:20:13 -06:00
Aiden McClelland
ccbb68aa0c fix instructions on installed packages 2024-07-29 11:34:59 -06:00
Aiden McClelland
08003c59b6 don't lazily unmount unless on error 2024-07-29 11:34:38 -06:00
Mariusz Kogen
dafa638558 fix SSH Key message (#2686)
* fix SSH Key message

* Update web/projects/ui/src/app/pages/server-routes/ssh-keys/ssh-keys.page.ts

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2024-07-29 17:25:01 +00:00
Jade
75e5250509 fixed: Transforming for bitcoind and nostr (#2688) 2024-07-29 17:24:10 +00:00
Matt Hill
0ed6eb7029 Fix sessions (#2689)
* add loggedIn key to sessions

* show loggedIn timestamp in list

* don't double hash active session

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-07-29 17:13:35 +00:00
Aiden McClelland
63e26b6050 fix race condition 2024-07-29 10:15:46 -06:00
Aiden McClelland
3e7578d670 bump version 2024-07-26 20:31:09 -06:00
Aiden McClelland
6f07ec2597 fix bindings 2024-07-26 19:29:24 -06:00
Aiden McClelland
e65c0a0d1d fix tests 2024-07-26 19:19:19 -06:00
Aiden McClelland
be217b5354 update-grub on update 2024-07-26 19:06:11 -06:00
Aiden McClelland
bfe3029d31 fix dependency autoconfig 2024-07-26 17:49:44 -06:00
Aiden McClelland
6abdc39fe5 ignore error on dependent mounts in polyfill 2024-07-26 17:44:16 -06:00
J H
bf55367f4d chore: remove the need for the method in the autoconfig 2024-07-26 15:12:22 -06:00
Aiden McClelland
9480758310 decrease lxc-net init weight 2024-07-26 15:05:25 -06:00
Aiden McClelland
25b33fb031 use ci for test 2024-07-26 14:47:55 -06:00
Aiden McClelland
10ede0d21c delegate pointer removal to config transformer 2024-07-26 14:47:43 -06:00
Aiden McClelland
698bdd619f fix version mapping 2024-07-26 13:26:23 -06:00
Aiden McClelland
5cef6874f6 install before test 2024-07-26 12:33:22 -06:00
Aiden McClelland
6d42ae2629 brackets for ipv6 2024-07-26 12:30:26 -06:00
Aiden McClelland
a3b94816f9 ephemeral logins 2024-07-26 12:29:59 -06:00
Jade
e0b47feb8b Fixing: Some getConfigs where breaking in new system (#2685) 2024-07-26 17:19:16 +00:00
Aiden McClelland
8aecec0b9a fix canonicalization 2024-07-26 11:09:46 -06:00
Aiden McClelland
078bf41029 bump version 2024-07-26 02:07:39 -06:00
Aiden McClelland
2754302fb7 standardize result type for sideload progress 2024-07-26 02:02:58 -06:00
Aiden McClelland
dfb7658c3e implement mount for dependencies 2024-07-26 01:43:46 -06:00
Aiden McClelland
a743785faf cleanup on uninstall 2024-07-26 01:42:10 -06:00
Aiden McClelland
e4782dee68 fix ca cert issue 2024-07-26 01:41:11 -06:00
Aiden McClelland
64315df85f log url for download 2024-07-25 17:34:48 -06:00
Aiden McClelland
2a1fd16849 curl fail and show error 2024-07-25 17:08:19 -06:00
Aiden McClelland
21e31d540e Merge branch 'bugfix/misc' of github.com:Start9Labs/start-os into bugfix/misc 2024-07-25 16:14:06 -06:00
Aiden McClelland
370c38ec76 fix launchUI button 2024-07-25 16:14:04 -06:00
Aiden McClelland
854044229c reduce reliance on sudo 2024-07-25 15:44:51 -06:00
Aiden McClelland
69baa44a3a use squashfuse if available 2024-07-25 15:44:40 -06:00
Aiden McClelland
419e3f7f2b fix https redirect 2024-07-25 14:34:30 -06:00
Aiden McClelland
a9373d9779 don't show "Bytes" for overall progress 2024-07-25 14:34:03 -06:00
J H
1a0536d212 fix: Optional input 2024-07-25 13:25:18 -06:00
Aiden McClelland
099b77cf9b fix .local service resolution 2024-07-25 12:30:05 -06:00
Aiden McClelland
c3d17bf847 fix sync_db middleware 2024-07-25 12:26:49 -06:00
Aiden McClelland
e04b93a51a fix builds on platforms without kernel support for squashfs 2024-07-25 12:17:13 -06:00
Aiden McClelland
b36b62c68e Feature/callbacks (#2678)
* wip

* initialize callbacks

* wip

* smtp

* list_service_interfaces

* wip

* wip

* fix domains

* fix hostname handling in NetService

* misc fixes

* getInstalledPackages

* misc fixes

* publish v6 lib

* refactor service effects

* fix import

* fix container runtime

* fix tests

* apply suggestions from review
2024-07-25 17:44:51 +00:00
Matt Hill
ab465a755e default to all category and fix rounding for progress (#2682)
* default to all category and fix rounding for progress

* Update install-progress.pipe.ts
2024-07-24 22:40:13 -06:00
Aiden McClelland
c6f19db1ec Bugfix/wsl build (#2681)
* explicitly declare squashfs as loop device

* Update update-image.sh
2024-07-23 18:35:38 +00:00
Aiden McClelland
019142efc9 v0.3.6-alpha.0 (#2680)
* v0.3.6-alpha.0

* show welcome on fresh install
2024-07-23 18:18:17 +00:00
Lucy
a535fc17c3 Feature/fe new registry (#2647)
* bugfixes

* update fe types

* implement new registry types in marketplace and ui

* fix marketplace types to have default params

* add alt implementation toggle

* merge cleanup

* more cleanup and notes

* fix build

* cleanup sync with next/minor

* add exver JS parser

* parse ValidExVer to string

* update types to interface

* add VersionRange and comparative functions

* Parse ExtendedVersion from string

* add conjunction, disjunction, and inversion logic

* consider flavor in satisfiedBy fn

* consider prerelease for ordering

* add compare fn for sorting

* rename fns for consistency

* refactoring

* update compare fn to return null if flavors don't match

* begin simplifying dependencies

* under construction

* wip

* add dependency metadata to CurrentDependencyInfo

* ditch inheritance for recursive VersionRange constructor. Recursive 'satisfiedBy' fn wip

* preprocess manifest

* misc fixes

* use sdk version as osVersion in manifest

* chore: Change the type to just validate and not generate all solutions.

* add publishedAt

* fix pegjs exports

* integrate exver into sdk

* misc fixes

* complete satisfiedBy fn

* refactor - use greaterThanOrEqual and lessThanOrEqual fns

* fix tests

* update dependency details

* update types

* remove interim types

* rename alt implementation to flavor

* cleanup os update

* format exver.ts

* add s9pk parsing endpoints

* fix build

* update to exver

* exver and bug fixes

* update static endpoints + cleanup

* cleanup

* update static proxy verification

* make mocks more robust; fix dep icon fallback; cleanup

* refactor alert versions and update fixtures

* registry bugfixes

* misc fixes

* cleanup unused

* convert patchdb ui seed to camelCase

* update otherVersions type

* change otherVersions: null to 'none'

* refactor and complete feature

* improve static endpoints

* fix install params

* mask systemd-networkd-wait-online

* fix static file fetching

* include non-matching versions in otherVersions

* convert release notes to modal and clean up displayExver

* alert for no other versions

* Fix ack-instructions casing

* fix indeterminate loader on service install

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Shadowy Super Coder <musashidisciple@proton.me>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: J H <dragondef@gmail.com>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
2024-07-23 00:48:12 +00:00
Aiden McClelland
0fbb18b315 Merge branch 'master' into next/minor 2024-07-22 11:43:00 -06:00
Jade
3eb0093d2a feature: Adding in the stopping state (#2677)
* feature: Adding in the stopping state

* chore: Deal with timeout in the sigterm for main

* chore: Update the timeout

* Update web/projects/ui/src/app/pages/apps-routes/app-list/app-list-pkg/app-list-pkg.component.ts

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* Update web/projects/ui/src/app/pages/apps-routes/app-show/components/app-show-status/app-show-status.component.ts

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2024-07-22 17:40:12 +00:00
Matt Hill
196561fed2 init UI increase logs buffer and don't throw on websocket unsubscribe (#2669)
* init UI increase logs buffer and don't throw on websocket unsubscribe

* fix: remove smooth scroll for logs

---------

Co-authored-by: waterplea <alexander@inkin.ru>
2024-07-19 03:49:31 +00:00
Jade
8f0bdcd172 Fix/backups (#2659)
* fix master build (#2639)

* feat: Change ts to use rsync
Chore: Update the ts to use types over interface

* feat: Get the rust and the js to do a backup

* Wip: Got the backup working?

* fix permissions

* remove trixie list

* update tokio to fix timer bug

* fix error handling on backup

* wip

* remove idmap

* run restore before init, and init with own version on restore

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-07-17 21:46:27 +00:00
Matt Hill
95611e9c4b Merge pull request #2668 from Start9Labs/fix/backup-create
solve infinite recursion and promise returning true
2024-07-12 11:16:00 -06:00
waterplea
62fc6afd8a fix: fix select on mobile 2024-07-12 12:46:07 +05:00
waterplea
0f5cec0a60 fix: fix wrong password messaging 2024-07-12 11:14:45 +05:00
Matt Hill
d235ebaac9 solve infinite recursion and promise returning true 2024-07-11 17:58:07 -06:00
Aiden McClelland
6def083b4f fix deadlock on install (#2667)
* fix deadlock on install

* improve pruning script

* bump tokio
2024-07-11 20:55:13 +00:00
Aiden McClelland
87322744d4 Feature/backup fs (#2665)
* port 040 config, WIP

* update fixtures

* use taiga modal for backups too

* fix: update Taiga UI and refactor everything to work

* chore: package-lock

* fix interfaces and mocks for interfaces

* better mocks

* function to transform old spec to new

* delete unused fns

* delete unused FE config utils

* fix exports from sdk

* reorganize exports

* functions to translate config

* rename unionSelectKey and unionValueKey

* new backup fs

* update sdk types

* change types, include fuse module

* fix casing

* rework setup wiz

* rework UI

* only fuse3

* fix arm build

* misc fixes

* fix duplicate server select

* fix: fix throwing inside dialog

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: waterplea <alexander@inkin.ru>
Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
2024-07-11 17:32:46 +00:00
Matt Hill
f2a02b392e Merge pull request #2648 from Start9Labs/feat/boot-param
Boot param for logs subscription
2024-07-10 14:54:39 -06:00
Aiden McClelland
e6cedc257e add boot param to logs request 2024-07-10 13:00:02 -06:00
Aiden McClelland
1b5cf2d272 Merge branch 'next/minor' of github.com:Start9Labs/start-os into feat/boot-param 2024-07-10 12:18:48 -06:00
Matt Hill
f76e822381 port 040 config (#2657)
* port 040 config, WIP

* update fixtures

* use taiga modal for backups too

* fix: update Taiga UI and refactor everything to work

* chore: package-lock

* fix interfaces and mocks for interfaces

* better mocks

* function to transform old spec to new

* delete unused fns

* delete unused FE config utils

* fix exports from sdk

* reorganize exports

* functions to translate config

* rename unionSelectKey and unionValueKey

* Adding in the transformation of the getConfig to the new types.

* chore: add Taiga UI to preloader

---------

Co-authored-by: waterplea <alexander@inkin.ru>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: J H <dragondef@gmail.com>
2024-07-10 17:58:02 +00:00
Aiden McClelland
822dd5e100 Feature/UI sideload (#2658)
* ui sideloading

* remove subtlecrypto import

* fix parser

* misc fixes

* allow docker pull during compat conversion
2024-06-28 21:03:01 +00:00
Matt Hill
c16d8a1da1 fix setup wizard styles and remove diagnostic from angular.json (#2656) 2024-06-25 20:58:24 -06:00
Aiden McClelland
ab1fdf69c8 add docs for development environment (#2655) 2024-06-26 00:11:11 +00:00
Aiden McClelland
0e506f5716 fix container cli (#2654) 2024-06-25 18:34:47 +00:00
Matt Hill
0a98ccff0c Merge pull request #2653 from Start9Labs/fix/ca-and-snek
fix ca trust test and snek high score
2024-06-25 10:58:38 -06:00
Matt Hill
0c188f6d10 fix ca trust test and snek high score 2024-06-25 10:54:09 -06:00
Matt Hill
8009dd691b Merge pull request #2635 from Start9Labs/feature/registry-metrics
Feature/registry analytics
2024-06-25 10:10:29 -06:00
Aiden McClelland
13d0e9914b Merge branch 'next/minor' of github.com:Start9Labs/start-os into feature/registry-metrics 2024-06-24 16:24:31 -06:00
Aiden McClelland
9da49be44d Bugfix/patch db subscriber (#2652)
* fix socket sending empty patches

* do not timeout tcp connections, just poll them more

* switch from poll to tcp keepalive
2024-06-24 22:15:56 +00:00
Aiden McClelland
00f7fa507b remove analyticsd, clean up script 2024-06-24 16:15:32 -06:00
Jade
2c255b6dfe chore: Do some type cleanups (#2650)
chore: fix the WithProcedureId
2024-06-24 16:00:31 -06:00
Matt Hill
68ed1c80ce update todos 2024-06-22 21:47:18 -06:00
Matt Hill
e0d23f4436 bump patchDB dep 2024-06-22 11:33:30 -06:00
Matt Hill
509f8a5353 Merge pull request #2649 from Start9Labs/cyclic-dep
feat: get rid of cyclic dep between patch-db and api service
2024-06-21 21:26:38 -06:00
Shadowy Super Coder
b0c0cd7fda add script to cache registry db 2024-06-21 18:40:32 -06:00
Shadowy Super Coder
133dfd5063 match query to registry table 2024-06-21 18:39:05 -06:00
waterplea
e6abf4e33b feat: get rid of cyclic dep between patch-db and api service
Signed-off-by: waterplea <alexander@inkin.ru>
2024-06-21 15:51:04 +05:00
Mariusz Kogen
07104b18f5 Update workflows actions (#2628)
* Update workflows actions to the latest versions
2024-06-20 20:59:16 +02:00
Matt Hill
f39b85abf2 bump to 036 2024-06-20 10:08:00 -06:00
Matt Hill
c6c97491ac add boot param to logs subscription 2024-06-20 10:07:39 -06:00
Jade
355452cdb3 Feat/next packages (#2646)
* fix mac build

* wip

* chore: Update the effects to get rid of bad pattern

* chore: Some small changes

* wip

* fix: Health checks don't show during race

* fix: Restart working

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-06-19 17:30:05 -06:00
Matt Hill
da3720c7a9 Feat/combine uis (#2633)
* wip

* restructure backend for new ui structure

* new patchdb bootstrap, single websocket api, local storage migration, more

* update db websocket

* init apis

* update patch-db

* setup progress

* feat: implement state service, alert and routing

Signed-off-by: waterplea <alexander@inkin.ru>

* update setup wizard for new types

* feat: add init page

Signed-off-by: waterplea <alexander@inkin.ru>

* chore: refactor message, patch-db source stream and connection service

Signed-off-by: waterplea <alexander@inkin.ru>

* fix method not found on state

* fix backend bugs

* fix compat assets

* address comments

* remove unneeded styling

* cleaner progress

* bugfixes

* fix init logs

* fix progress reporting

* fix navigation by getting state after init

* remove patch dependency from live api

* fix caching

* re-add patchDB to live api

* fix metrics values

* send close frame

* add bootId and fix polling

---------

Signed-off-by: waterplea <alexander@inkin.ru>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: waterplea <alexander@inkin.ru>
2024-06-19 19:51:44 +00:00
Aiden McClelland
e92d4ff147 fix compat assets (#2645)
* fix compat assets

* return error on s9pk parse fail in sideload

* return parse error over websocket
2024-06-17 16:37:57 +00:00
Jade
bb514d6216 Chore/refactoring effects (#2644)
* fix mac build

* wip

* chore: Update the effects to get rid of bad pattern

* chore: Some small changes

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-06-14 20:16:12 +00:00
Aiden McClelland
3f380fa0da feature: pack s9pk (#2642)
* TODO: images

* wip

* pack s9pk images

* include path in packsource error

* debug info

* add cmd as context to invoke

* filehelper bugfix

* fix file helper

* fix exposeForDependents

* misc fixes

* force image removal

* fix filtering

* fix deadlock

* fix api

* chore: Up the version of the package.json

* always allow concurrency within same call stack

* Update core/startos/src/s9pk/merkle_archive/expected.rs

Co-authored-by: Jade <2364004+Blu-J@users.noreply.github.com>

---------

Co-authored-by: J H <dragondef@gmail.com>
Co-authored-by: Jade <2364004+Blu-J@users.noreply.github.com>
2024-06-12 17:46:59 +00:00
Jade
5aefb707fa feat: Add the merge to the file. (#2643)
* feat: Add the merge to the file.

* chore: Fix the early escape
2024-06-11 04:38:12 +00:00
Shadowy Super Coder
4afd3c2322 move MAU tracking back to registry 2024-06-10 18:56:39 -06:00
Jade
4d6cb091cc Feature/disk usage (#2637)
* feat: Add disk usage

* Fixed: let the set config work with nesting.

* chore: Changes

* chore: Add default route

* fix: Tor only config

* chore
2024-06-07 18:17:45 +00:00
Jade
2c12af5af8 Feature/network (#2622)
* Feature: Add in the clear bindings

* wip: Working on network

* fix: Make it so the config gives the url

* chore: Remove the repeated types

* chore: Add in the todo's here

* chore: UPdate and remove some poorly name var

* chore: Remove the clear-bindings impl

* chore: Remove the wrapper

* handle HostnameInfo for Host bindings

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* ??

* chore: Make the install work

* Fix: Url's not being created

* chore: Fix the local onion in url

* include port in hostname

* Chore of adding a comment just to modify.

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Jade <Blu-J@users.noreply.github.com>
2024-06-06 21:39:54 +00:00
Shadowy Super Coder
9487529992 remove os version from activity 2024-06-04 11:54:49 -06:00
Shadowy Super Coder
fa347fd49d remove record_metrics fn 2024-06-04 11:53:30 -06:00
Shadowy Super Coder
8f7072d7e9 metrics wip 2024-06-04 09:21:55 -06:00
Aiden McClelland
412c5d68cc Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2024-06-03 11:35:28 -06:00
Aiden McClelland
e06b068033 Merge branch 'master' of github.com:Start9Labs/start-os into next/patch 2024-06-03 10:06:46 -06:00
Aiden McClelland
2568bfde5e create skeleton 2024-05-31 13:46:58 -06:00
Aiden McClelland
fd7c2fbe93 Feature/registry package index (#2623)
* include system images in compat s9pk

* wip

* wip

* update types

* wip

* fix signature serialization

* Add SignatureHeader conversions

* finish display impl for get

---------

Co-authored-by: Shadowy Super Coder <musashidisciple@proton.me>
2024-05-31 18:13:23 +00:00
Aiden McClelland
0ccbb52c1f wait for whole session to exit when sigterm (#2620)
* wait for whole session to exit when sigterm

* fix lint

* rename poorly named variable
2024-05-17 01:54:36 +00:00
Jade
0b8a142de0 fix: Making the daemons keep up the status. (#2617)
* complete get_primary_url fn

* complete clear_network_interfaces fn

* formatting

* complete remove_address fn

* get_system_smtp wip

* complete get_system_smtp and set_system_smtp

* add SetSystemSmtpParams struct

* add set_system_smtp subcommand

* Remove 'Copy' implementation from `HostAddress`

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* Refactor `get_host_primary` fn and clone  resulting `HostAddress`

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* misc fixes and debug info

* seed hosts with a tor address

* fix: Making the daemons keep up the status.

* wipFix: Making a service start

* fix: Both the start + stop of the service.

* fix: Weird edge case of failure and kids

---------

Co-authored-by: Shadowy Super Coder <musashidisciple@proton.me>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-05-13 16:50:25 +00:00
Dominion5254
800b0763e4 More svc effect handlers (#2610)
* complete get_primary_url fn

* complete clear_network_interfaces fn

* formatting

* complete remove_address fn

* get_system_smtp wip

* complete get_system_smtp and set_system_smtp

* add SetSystemSmtpParams struct

* add set_system_smtp subcommand

* Remove 'Copy' implementation from `HostAddress`

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* Refactor `get_host_primary` fn and clone  resulting `HostAddress`

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* misc fixes and debug info

* seed hosts with a tor address

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-05-10 19:20:24 +00:00
Jade
30aabe255b Feature/backup+restore (#2613)
* feat: Implementation on the backup for the service.

* wip: Getting the flow of backup/restore

* feat: Recover

* Feature: Commit the full pass on the backup restore.

* use special type for backup instead of special id (#2614)

* fix: Allow compat docker style to run again

* fix: Backup for the js side

* chore: Update some of the callbacks

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2024-05-06 21:46:36 +00:00
Aiden McClelland
9b14d714ca Feature/new registry (#2612)
* wip

* overhaul boot process

* wip: new registry

* wip

* wip

* wip

* wip

* wip

* wip

* os registry complete

* ui fixes

* fixes

* fixes

* more fixes

* fix merkle archive
2024-05-06 16:20:44 +00:00
Jade
8a38666105 Feature/sdk040dependencies (#2609)
* update registry upload to take id for new admin permissions (#2605)

* wip

* wip: Get the get dependencies

* wip check_dependencies

* wip: Get the build working to the vm

* wip: Add in the last of the things that where needed for the new sdk

* Add fix

* wip: implement the changes

* wip: Fix the naming

---------

Co-authored-by: Lucy <12953208+elvece@users.noreply.github.com>
2024-04-26 17:51:33 -06:00
Dominion5254
e08d93b2aa complete export_service_interface and list_service_interfaces fns (#2595)
* complete export_service_interface fn

* refactor export_service_interface fn

* complete list_service_interfaces fn

* call insert on model and remove unnecessary code

* Refactor export_service_interface

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* Refactor list_service_interfaces

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* get ServiceInterfaceId and HostId from params

* formatting

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2024-04-25 17:16:20 -06:00
Aiden McClelland
3a5ee4a296 kill process by session, and add timeout (#2608) 2024-04-23 20:01:40 +00:00
Aiden McClelland
7b8a0114f5 fix log response (#2607) 2024-04-23 20:01:29 +00:00
Aiden McClelland
003d110948 build multi-arch s9pks (#2601)
* build multi-arch s9pks

* remove images incrementally

* wip

* prevent rebuild

* fix sdk makefile

* fix hanging on uninstall

* fix build

* fix build

* fix build

* fix build (for real this time)

* fix git hash computation
2024-04-22 17:40:10 +00:00
Jade
9eff920989 Feat/logging (#2602)
* wip: Working on something to help

* chore: Add in some of the logging now

* chore: fix the type to interned instead of id

* wip

* wip

* chore: fix the logging by moving levels

* Apply suggestions from code review

* mount at machine id for journal

* Persistant

* limit log size

* feat: Actually logging and mounting now

* fix: Get the logs from the previous versions of the boot

* Chore: Add the boot id

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-04-17 21:46:10 +00:00
Aiden McClelland
711c82472c Feature/debian runtime (#2600)
* wip

* fix build

* run debian update in systemd-nspawn

* bugfix

* fix build

* free up space before image build
2024-04-15 16:00:56 +00:00
Matt Hill
156bf02d21 Merge pull request #2599 from Start9Labs/bugfix/wifi
fix wifi types
2024-04-10 13:49:02 -06:00
Matt Hill
932b53d92d deprecate wifi 2024-04-09 21:06:06 -06:00
Aiden McClelland
e9166c4a7d fix wifi types 2024-04-09 15:24:05 -06:00
Aiden McClelland
2bc64920dd Merge branch 'next/minor' of github.com:Start9Labs/start-os into bugfix/wifi 2024-04-09 15:11:17 -06:00
Aiden McClelland
aee5500833 miscellaneous bugfixes (#2597)
* miscellaneous bugfixes

* misc fixes
2024-04-09 21:10:26 +00:00
Aiden McClelland
f07992c091 misc fixes 2024-04-09 14:04:31 -06:00
Aiden McClelland
313e415ee9 miscellaneous bugfixes 2024-04-08 14:01:16 -06:00
Aiden McClelland
c13d8f3699 finish dependency autoconfig (#2596) 2024-04-08 18:07:56 +00:00
Aiden McClelland
e41f8f1d0f allow concurrency in service actor (#2592) 2024-04-08 17:53:35 +00:00
Dominion5254
75ff541aec complete get_service_port_forward fn (#2579)
* complete get_service_port_forward fn

* refactor get_service_port_forward and get_container_ip

* remove unused function

* move host_id to GetServicePortForwardParams

* replace match with deref

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* refactor get_container_ip to use deref

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2024-04-05 19:20:49 +00:00
Jade
056cab23e0 Fix: Configure was borken (#2589) 2024-04-04 19:17:11 +00:00
Jade
6bc8027644 Feat/implement rest of poly effects (#2587)
* feat: Add the implementation of the rest of the polyfillEffects

* chore: Add in the rsync

* chore: Add in the changes needed to indicate that the service does not need config

* fix: Vaultwarden sets, starts, stops, uninstalls

* chore: Update the polyFilleffect and add two more

* Update MainLoop.ts

* chore: Add in the set config of the deps on the config set
2024-04-04 09:09:59 -06:00
Matt Hill
3b9298ed2b Feature/dependency autoconfig (#2588)
* dependency autoconfig

* FE portion

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-04-03 11:48:26 -06:00
Aiden McClelland
cc1f14e5e9 Merge branch 'next/minor' of github.com:Start9Labs/start-os into next/minor 2024-04-01 14:57:20 -06:00
Aiden McClelland
1c419d5c65 Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2024-04-01 14:57:10 -06:00
Matt Hill
71b83245b4 Chore/unexport api ts (#2585)
* don't export api params

* import from SDK instead of BE

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-04-01 14:47:03 -06:00
Aiden McClelland
2b88555028 Merge branch 'master' of github.com:Start9Labs/start-os into next/patch 2024-04-01 14:46:17 -06:00
Aiden McClelland
f021ad9b0a export api types to ts (#2583) 2024-04-01 13:14:22 -06:00
Aiden McClelland
8884f64b4e Merge branch 'master' of github.com:Start9Labs/start-os into next/minor 2024-04-01 11:15:22 -06:00
Aiden McClelland
dd790dceb5 Chore/reorg backend ts (#2582)
* create index

* chore: Add the star exports for the bindings

---------

Co-authored-by: J H <dragondef@gmail.com>
2024-04-01 17:12:35 +00:00
Matt Hill
8dfc5052e9 ditch more FE enums for clarity and cleanliness 2024-03-30 10:37:31 -06:00
Aiden McClelland
2c308ccd35 Merge pull request #2512 from Start9Labs/integration/new-container-runtime
[feature]: new container runtime
2024-03-29 18:12:07 -06:00
Aiden McClelland
4d6dd44e10 allow downgrades 2024-03-29 15:35:33 -06:00
Aiden McClelland
b6992e32a5 yes 2024-03-29 14:42:33 -06:00
Aiden McClelland
231859303d fix cargo dep 2024-03-29 13:49:05 -06:00
Aiden McClelland
1acdd67fd9 chown volume mountpoints 2024-03-29 13:41:41 -06:00
Aiden McClelland
bec63a9471 remove extra sdk test line 2024-03-29 13:06:51 -06:00
Aiden McClelland
44e856e8dc fix make -t 2024-03-29 12:18:40 -06:00
Aiden McClelland
3bab7678b7 install qemu as root 2024-03-29 10:05:24 -06:00
Aiden McClelland
61f68d9e1b install qemu 2024-03-29 09:49:05 -06:00
Aiden McClelland
94f1562ec5 choose base image by arch instead of platform 2024-03-29 09:16:15 -06:00
Aiden McClelland
46412acd13 add container runtime to compiled tar 2024-03-28 23:31:27 -06:00
Aiden McClelland
e7426ea365 touch bindings before make touch 2024-03-28 17:11:43 -06:00
Aiden McClelland
665eef68b9 fix unterminated quote 2024-03-28 16:36:58 -06:00
Aiden McClelland
7c63d4012f fix build 2024-03-28 15:31:54 -06:00
Aiden McClelland
92be4e774e build fixes 2024-03-28 14:48:45 -06:00
J H
2395502e60 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-28 13:37:21 -06:00
J H
9f3902b48d chore: Fix the last of the actions 2024-03-28 13:37:15 -06:00
Matt Hill
6e76bcb77e fix updates rendering bug 2024-03-28 11:59:42 -06:00
Aiden McClelland
e05a95dc2d Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-28 11:21:13 -06:00
Aiden McClelland
86d61d698a update types and format bindings 2024-03-28 11:20:20 -06:00
J H
8ce6535a7e chore: Update the types for container runtime 2024-03-28 10:51:51 -06:00
J H
65ca038eee chore: Update and fix the things 2024-03-28 10:40:47 -06:00
Aiden McClelland
f41f5ebebd export patchdb ts types from rust 2024-03-27 17:47:12 -06:00
J H
9cf62f03fa Add some extra export action 2024-03-27 15:28:54 -06:00
Aiden McClelland
f770d5072e export patchdb types 2024-03-27 13:58:42 -06:00
J H
5698b830ed Fix: Fix the issue where the restart after a service install and update,
would mak
the system crash
2024-03-27 12:40:23 -06:00
J H
bcc76dd60a fix: The db dump on the private 2024-03-27 11:11:55 -06:00
Matt Hill
22d8d08355 update patch types for current dependencies 2024-03-27 10:25:07 -06:00
Aiden McClelland
f9edff8bf4 handle todos 2024-03-26 16:21:57 -06:00
Aiden McClelland
33e6be1ca6 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-26 15:17:37 -06:00
Aiden McClelland
e25c50a467 fix types to match 2024-03-26 10:46:46 -06:00
J H
f8441ab42e chore: Add in the fix for the test integration to the actual sdk 2024-03-26 10:37:22 -06:00
Aiden McClelland
4589d4b3f5 update ts-rs to 8.1 2024-03-26 10:26:44 -06:00
Aiden McClelland
9cf720e040 rename embassy to startos 2024-03-25 18:21:58 -06:00
Aiden McClelland
cf793f7f49 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-25 15:47:24 -06:00
Aiden McClelland
2b3fddfe89 use correct serverconfig type 2024-03-25 15:47:22 -06:00
J H
e148f143ea wip: Properties 2024-03-25 14:18:09 -06:00
J H
299d9998ad chore: Making sure that the values that we are returning are valid now with the new types 2024-03-25 12:01:13 -06:00
J H
fba1484e2e fix: Bringing in a building for the browser 2024-03-25 11:13:17 -06:00
Matt Hill
c782bab296 switch all FE to camelCase (#2576)
* switch all fe to camelCase

* switch to camelCase on backend

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2024-03-24 12:05:59 -06:00
Matt Hill
b14646ebd9 export tyeps from sdk 2024-03-24 11:57:39 -06:00
J H
7441de5fd9 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-23 09:13:00 -06:00
J H
f5360cb8d4 wip: Adding in properties and nested path 2024-03-23 09:03:56 -06:00
Aiden McClelland
a35baca580 update rust types to match sdk changes 2024-03-21 17:53:34 -06:00
Matt Hill
66b0108c51 revamp manifest types 2024-03-21 17:21:37 -06:00
Shadowy Super Coder
ab836c6922 remove unneeded imports 2024-03-20 21:50:21 -06:00
Shadowy Super Coder
405b3be496 complete get_container_ip effect handler 2024-03-20 21:26:18 -06:00
J H
4a27128a1c chore: Update the types for changes that Matt wanted with sdk + examples 2024-03-20 20:28:31 -06:00
J H
c74bdc97ca fix: util -> utils in the container-runtime 2024-03-20 13:46:32 -06:00
Shadowy Super Coder
ddd5e4c76d blu-j paired changes 2024-03-20 13:43:34 -06:00
J H
41bc519855 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-20 10:48:13 -06:00
J H
53d82618d9 chore: Update the types and get the container-runtime working 2024-03-20 10:48:03 -06:00
Aiden McClelland
57f548c6c0 fix certificate chain 2024-03-19 16:54:27 -06:00
J H
8d83f64aba chore: Update the types for the mocks in the services 2024-03-19 13:27:51 -06:00
J H
9162697117 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-19 12:44:56 -06:00
Aiden McClelland
47b19e3211 fix duplicate 2024-03-19 11:59:02 -06:00
J H
590f6d4c19 chore: Update the types 2024-03-19 11:58:38 -06:00
Aiden McClelland
53108e816f Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-19 11:55:40 -06:00
Aiden McClelland
3ac71e2f7f include sdk.Mounts 2024-03-19 11:55:38 -06:00
Matt Hill
cc38dab76f Rework PackageDataEntry for new strategy (#2573)
* rework PackageDataEntry for new strategy

* fix type error

* fix issues with manifest fetching

* mock installs working
2024-03-19 08:38:04 -06:00
J H
c8be701f0e chore: Add in the mounts 2024-03-18 16:23:09 -06:00
J H
417befb2be Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-18 15:29:08 -06:00
J H
a0ce7f38e7 chore: Add in the thing to do the volumes correctly 2024-03-18 15:28:33 -06:00
Aiden McClelland
962e3d8e56 more specific rust type 2024-03-18 15:24:20 -06:00
J H
3a3df96996 fix: Test 2024-03-18 15:20:17 -06:00
Aiden McClelland
2ffa632796 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-18 15:15:55 -06:00
Aiden McClelland
3c6c0b253d move mounts to daemons constructor 2024-03-18 15:14:36 -06:00
J H
5f40fd6038 chore: Remove the utils 2024-03-18 14:31:01 -06:00
J H
8e2dc8b3ee chore: Fix the effects 2024-03-18 12:47:31 -06:00
Matt Hill
a02b531e47 update sdk 2024-03-18 12:37:50 -06:00
J H
a4cb2708cc Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-18 10:54:36 -06:00
J H
973284607d chore: Update the sdk to match the rust and vica verse 2024-03-18 10:53:38 -06:00
Matt Hill
28fd2f0314 More 036 Frontend changes (#2572)
* update patchDB for futuristic revisions

* interfaces display updates

* remove zram and move tor to settings
2024-03-16 13:09:17 -06:00
J H
9715873007 chore: Add in the rest of the effects for now 2024-03-15 14:29:08 -06:00
J H
18a20407f6 chore: Fix the build 2024-03-15 13:31:44 -06:00
Aiden McClelland
1a396cfc7b reorganize package data and write dependencies rpc (#2571)
* wip

* finish dependencies

* minor fixes
2024-03-15 19:02:47 +00:00
J H
e604c914d1 chore: UPdate to the make, getting rid of a circular dep 2024-03-15 11:25:52 -06:00
J H
a310c160a5 chore: Update the types hopefully 2024-03-14 15:29:01 -06:00
J H
45d50b12fd chore: Update the types for the sdk 2024-03-14 15:18:33 -06:00
J H
e87182264a chore: Fix the build for the start9labs/sdk was a new directory structure 2024-03-14 15:14:12 -06:00
J H
a089d544a5 chore: Add some of the fixes to make the build work !!! 2024-03-13 17:44:13 -06:00
J H
b6fe0be1b2 chore: Add in some more files for the testing of the sdk and the rust interface 2024-03-13 16:23:24 -06:00
J H
ba325b1581 feat: Move the store to private. 2024-03-12 11:58:13 -06:00
Aiden McClelland
1f47abf195 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-11 17:16:07 -06:00
Aiden McClelland
750f35bc36 add --include-private to db dump rpc 2024-03-11 17:15:18 -06:00
J H
c99d9d95c5 chore: Add in the properties to somewhere useful. 2024-03-11 17:06:00 -06:00
J H
4d402b2600 chore: Fix the issue that I made that made it impossible to start a service because that would do a set during a time when there was not even a installed state 2024-03-11 16:34:37 -06:00
Aiden McClelland
64fb002168 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-03-11 16:11:01 -06:00
Aiden McClelland
1308b5bcf3 wait up to 30s for ip address 2024-03-11 16:10:59 -06:00
J H
dc3dc4a1f0 chore: Update some of the system for the embassy properties to be filled with the correct properties.
git push
2024-03-11 15:47:22 -06:00
J H
99bb55af73 Update some of the types 2024-03-11 15:37:04 -06:00
J H
4a285225db chore: Update the version of the cargo 2024-03-08 14:46:44 -07:00
Aiden McClelland
d986bd2a6c sync ssh keys on add 2024-03-07 17:19:01 -07:00
Aiden McClelland
8665342edf fix deadlock 2024-03-07 16:43:13 -07:00
Aiden McClelland
2e7c3bf789 use dockerhub compat 2024-03-07 15:41:27 -07:00
Aiden McClelland
31ea0fe3fe fix async cycle 2024-03-07 14:57:41 -07:00
Aiden McClelland
e0c9f8a5aa Feature/remove postgres (#2570)
* wip: move postgres data to patchdb

* wip

* wip

* wip

* complete notifications and clean up warnings

* fill in user agent

* move os tor bindings to single call
2024-03-07 14:40:22 -07:00
J H
a17ec4221b chore: Remove some of the bad logging 2024-03-07 13:45:38 -07:00
J H
328beaba35 chore: Add in the possibility to get the status code from the executed health check 2024-03-07 13:36:38 -07:00
J H
efbbaa5741 feat: Get the health checks for the js 2024-03-07 11:38:59 -07:00
J H
14be2fa344 chore: Add in the ability to kill the tree of processes 2024-03-06 16:22:29 -07:00
J H
f3ccad192c chore: Add the process tree destroyer 2024-03-06 15:43:07 -07:00
J H
8410929e86 feat: Add the stop/start loop for the service 2024-03-06 10:55:21 -07:00
J H
093a5d4ddf chore: Simplify the state into one 2024-03-06 09:38:55 -07:00
J H
88028412bd chore: Add some documentation for the service actor seed 2024-03-04 14:18:20 -07:00
J H
11c93231aa fix: Let the service be able to be started 2024-03-04 13:37:48 -07:00
J H
5366b4c873 chore: Add another export 2024-02-27 13:25:58 -07:00
J H
171e0ed312 chore: Something 2024-02-27 13:20:55 -07:00
J H
f50ddb436f chore: somethinng 2024-02-27 13:12:51 -07:00
J H
0b4b091580 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-02-27 13:11:09 -07:00
J H
2f6d7ac128 chore: Update to have the startsdk 2024-02-27 13:11:04 -07:00
J H
6b990e1cee chore: Up the version 2024-02-27 12:33:31 -07:00
Aiden McClelland
ddeed65994 remove workspace package json 2024-02-26 17:29:20 -07:00
Aiden McClelland
d87748fda1 add npm workspace file 2024-02-26 16:59:37 -07:00
J H
50f0ead113 Merge branch 'integration/new-container-runtime' of github.com:Start9Labs/start-os into integration/new-container-runtime 2024-02-23 15:32:06 -07:00
J H
4e3075aaba chore: Add in the ability to remove the bad sections? 2024-02-23 15:32:01 -07:00
Matt Hill
87d6684ca7 Frontend changes for 036 (#2554)
* new interfaces and remove tor http warnings

* move sigtermTimeout to stopping main status

* lightning, masked, schemeOverride, invert host-iface relationship

* update for new sdk

* update for latest SDK changes

* Update app-interfaces.page.ts

* Update config.service.ts
2024-02-23 10:38:50 -07:00
J H
3bd7596873 chore: Fix some issues in the installation of a package and other rpc things 2024-02-22 16:42:31 -07:00
Aiden McClelland
39964bf077 Feature/lxc container runtime (#2563)
* wip: context

* wip(fix) Sorta auth

* wip: warnings

* wip(fix): registry/admin

* wip(fix) marketplace

* wip(fix) Some more converted and fixed with the linter and config

* wip: Working on the static server

* wip(fix)static server

* wip: Remove some asynnc

* wip: Something about the request and regular rpc

* wip: gut install

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* wip: Convert the static server into the new system

* wip delete file

* test

* wip(fix) vhost does not need the with safe defaults

* wip: Adding in the wifi

* wip: Fix the developer and the verify

* wip: new install flow

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* fix middleware

* wip

* wip: Fix the auth

* wip

* continue service refactor

* feature: Service get_config

* feat: Action

* wip: Fighting the great fight against the borrow checker

* wip: Remove an error in a file that I just need to deel with later

* chore: Add in some more lifetime stuff to the services

* wip: Install fix on lifetime

* cleanup

* wip: Deal with the borrow later

* more cleanup

* resolve borrowchecker errors

* wip(feat): add in the handler for the socket, for now

* wip(feat): Update the service_effect_handler::action

* chore: Add in the changes to make sure the from_service goes to context

* chore: Change the

* refactor service map

* fix references to service map

* fill out restore

* wip: Before I work on the store stuff

* fix backup module

* handle some warnings

* feat: add in the ui components on the rust side

* feature: Update the procedures

* chore: Update the js side of the main and a few of the others

* chore: Update the rpc listener to match the persistant container

* wip: Working on updating some things to have a better name

* wip(feat): Try and get the rpc to return the correct shape?

* lxc wip

* wip(feat): Try and get the rpc to return the correct shape?

* build for container runtime wip

* remove container-init

* fix build

* fix error

* chore: Update to work I suppose

* lxc wip

* remove docker module and feature

* download alpine squashfs automatically

* overlays effect

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* chore: Add the overlay effect

* feat: Add the mounter in the main

* chore: Convert to use the mounts, still need to work with the sandbox

* install fixes

* fix ssl

* fixes from testing

* implement tmpfile for upload

* wip

* misc fixes

* cleanup

* cleanup

* better progress reporting

* progress for sideload

* return real guid

* add devmode script

* fix lxc rootfs path

* fix percentage bar

* fix progress bar styling

* fix build for unstable

* tweaks

* label progress

* tweaks

* update progress more often

* make symlink in rpc_client

* make socket dir

* fix parent path

* add start-cli to container

* add echo and gitInfo commands

* wip: Add the init + errors

* chore: Add in the exit effect for the system

* chore: Change the type to null for failure to parse

* move sigterm timeout to stopping status

* update order

* chore: Update the return type

* remove dbg

* change the map error

* chore: Update the thing to capture id

* chore add some life changes

* chore: Update the loging

* chore: Update the package to run module

* us From for RpcError

* chore: Update to use import instead

* chore: update

* chore: Use require for the backup

* fix a default

* update the type that is wrong

* chore: Update the type of the manifest

* chore: Update to make null

* only symlink if not exists

* get rid of double result

* better debug info for ErrorCollection

* chore: Update effects

* chore: fix

* mount assets and volumes

* add exec instead of spawn

* fix mounting in image

* fix overlay mounts

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* misc fixes

* feat: Fix two

* fix: systemForEmbassy main

* chore: Fix small part of main loop

* chore: Modify the bundle

* merge

* fixMain loop"

* move tsc to makefile

* chore: Update the return types of the health check

* fix client

* chore: Convert the todo to use tsmatches

* add in the fixes for the seen and create the hack to allow demo

* chore: Update to include the systemForStartOs

* chore UPdate to the latest types from the expected outout

* fixes

* fix typo

* Don't emit if failure on tsc

* wip

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* add s9pk api

* add inspection

* add inspect manifest

* newline after display serializable

* fix squashfs in image name

* edit manifest

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* wait for response on repl

* ignore sig for now

* ignore sig for now

* re-enable sig verification

* fix

* wip

* env and chroot

* add profiling logs

* set uid & gid in squashfs to 100000

* set uid of sqfs to 100000

* fix mksquashfs args

* add env to compat

* fix

* re-add docker feature flag

* fix docker output format being stupid

* here be dragons

* chore: Add in the cross compiling for something

* fix npm link

* extract logs from container on exit

* chore: Update for testing

* add log capture to drop trait

* chore: add in the modifications that I make

* chore: Update small things for no updates

* chore: Update the types of something

* chore: Make main not complain

* idmapped mounts

* idmapped volumes

* re-enable kiosk

* chore: Add in some logging for the new system

* bring in start-sdk

* remove avahi

* chore: Update the deps

* switch to musl

* chore: Update the version of prettier

* chore: Organize'

* chore: Update some of the headers back to the standard of fetch

* fix musl build

* fix idmapped mounts

* fix cross build

* use cross compiler for correct arch

* feat: Add in the faked ssl stuff for the effects

* @dr_bonez Did a solution here

* chore: Something that DrBonez

* chore: up

* wip: We have a working server!!!

* wip

* uninstall

* wip

* tes

* misc fixes

* fix cli

* replace interface with host

* chore: Fix the types in some ts files

* chore: quick update for the system for embassy to update the types

* replace br-start9 with lxcbr0

* split patchdb into public/private

* chore: Add changes for config set

* Feat: Adding some debugging for the errors

* wip: Working on getting the set config to work

* chore: Update and fix the small issue with the deserialization

* lightning, masked, schemeOverride, invert host-iface relationship

* feat: Add in the changes for just the sdk

* feat: Add in the changes for the new effects I suppose for now

* Some small changes ????

---------

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
Co-authored-by: J H <Blu-J@users.noreply.github.com>
Co-authored-by: J H <dragondef@gmail.com>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
2024-02-22 22:37:11 +00:00
Aiden McClelland
089199e7c2 Feature/lxc container runtime (#2562)
* wip(fix): Dependencies

* wip: context

* wip(fix) Sorta auth

* wip: warnings

* wip(fix): registry/admin

* wip(fix) marketplace

* wip(fix) Some more converted and fixed with the linter and config

* wip: Working on the static server

* wip(fix)static server

* wip: Remove some asynnc

* wip: Something about the request and regular rpc

* wip: gut install

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* wip: Convert the static server into the new system

* wip delete file

* test

* wip(fix) vhost does not need the with safe defaults

* wip: Adding in the wifi

* wip: Fix the developer and the verify

* wip: new install flow

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* fix middleware

* wip

* wip: Fix the auth

* wip

* continue service refactor

* feature: Service get_config

* feat: Action

* wip: Fighting the great fight against the borrow checker

* wip: Remove an error in a file that I just need to deel with later

* chore: Add in some more lifetime stuff to the services

* wip: Install fix on lifetime

* cleanup

* wip: Deal with the borrow later

* more cleanup

* resolve borrowchecker errors

* wip(feat): add in the handler for the socket, for now

* wip(feat): Update the service_effect_handler::action

* chore: Add in the changes to make sure the from_service goes to context

* chore: Change the

* refactor service map

* fix references to service map

* fill out restore

* wip: Before I work on the store stuff

* fix backup module

* handle some warnings

* feat: add in the ui components on the rust side

* feature: Update the procedures

* chore: Update the js side of the main and a few of the others

* chore: Update the rpc listener to match the persistant container

* wip: Working on updating some things to have a better name

* wip(feat): Try and get the rpc to return the correct shape?

* lxc wip

* wip(feat): Try and get the rpc to return the correct shape?

* build for container runtime wip

* remove container-init

* fix build

* fix error

* chore: Update to work I suppose

* lxc wip

* remove docker module and feature

* download alpine squashfs automatically

* overlays effect

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* chore: Add the overlay effect

* feat: Add the mounter in the main

* chore: Convert to use the mounts, still need to work with the sandbox

* install fixes

* fix ssl

* fixes from testing

* implement tmpfile for upload

* wip

* misc fixes

* cleanup

* cleanup

* better progress reporting

* progress for sideload

* return real guid

* add devmode script

* fix lxc rootfs path

* fix percentage bar

* fix progress bar styling

* fix build for unstable

* tweaks

* label progress

* tweaks

* update progress more often

* make symlink in rpc_client

* make socket dir

* fix parent path

* add start-cli to container

* add echo and gitInfo commands

* wip: Add the init + errors

* chore: Add in the exit effect for the system

* chore: Change the type to null for failure to parse

* move sigterm timeout to stopping status

* update order

* chore: Update the return type

* remove dbg

* change the map error

* chore: Update the thing to capture id

* chore add some life changes

* chore: Update the loging

* chore: Update the package to run module

* us From for RpcError

* chore: Update to use import instead

* chore: update

* chore: Use require for the backup

* fix a default

* update the type that is wrong

* chore: Update the type of the manifest

* chore: Update to make null

* only symlink if not exists

* get rid of double result

* better debug info for ErrorCollection

* chore: Update effects

* chore: fix

* mount assets and volumes

* add exec instead of spawn

* fix mounting in image

* fix overlay mounts

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* misc fixes

* feat: Fix two

* fix: systemForEmbassy main

* chore: Fix small part of main loop

* chore: Modify the bundle

* merge

* fixMain loop"

* move tsc to makefile

* chore: Update the return types of the health check

* fix client

* chore: Convert the todo to use tsmatches

* add in the fixes for the seen and create the hack to allow demo

* chore: Update to include the systemForStartOs

* chore UPdate to the latest types from the expected outout

* fixes

* fix typo

* Don't emit if failure on tsc

* wip

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* add s9pk api

* add inspection

* add inspect manifest

* newline after display serializable

* fix squashfs in image name

* edit manifest

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* wait for response on repl

* ignore sig for now

* ignore sig for now

* re-enable sig verification

* fix

* wip

* env and chroot

* add profiling logs

* set uid & gid in squashfs to 100000

* set uid of sqfs to 100000

* fix mksquashfs args

* add env to compat

* fix

* re-add docker feature flag

* fix docker output format being stupid

* here be dragons

* chore: Add in the cross compiling for something

* fix npm link

* extract logs from container on exit

* chore: Update for testing

* add log capture to drop trait

* chore: add in the modifications that I make

* chore: Update small things for no updates

* chore: Update the types of something

* chore: Make main not complain

* idmapped mounts

* idmapped volumes

* re-enable kiosk

* chore: Add in some logging for the new system

* bring in start-sdk

* remove avahi

* chore: Update the deps

* switch to musl

* chore: Update the version of prettier

* chore: Organize'

* chore: Update some of the headers back to the standard of fetch

* fix musl build

* fix idmapped mounts

* fix cross build

* use cross compiler for correct arch

* feat: Add in the faked ssl stuff for the effects

* @dr_bonez Did a solution here

* chore: Something that DrBonez

* chore: up

* wip: We have a working server!!!

* wip

* uninstall

* wip

* tes

* misc fixes

* fix cli

* replace interface with host

* chore: Fix the types in some ts files

* chore: quick update for the system for embassy to update the types

* replace br-start9 with lxcbr0

* split patchdb into public/private

* chore: Add changes for config set

* Feat: Adding some debugging for the errors

* wip: Working on getting the set config to work

* chore: Update and fix the small issue with the deserialization

* lightning, masked, schemeOverride, invert host-iface relationship

* feat: Add in the changes for just the sdk

* feat: Add in the changes for the new effects I suppose for now

---------

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
Co-authored-by: J H <Blu-J@users.noreply.github.com>
Co-authored-by: J H <dragondef@gmail.com>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
2024-02-22 21:00:49 +00:00
Matt Hill
d7bc7a2d38 make service interfaces and hosts one to one 2024-02-19 12:40:52 -07:00
Matt Hill
eae75c13bb update network interfaces types 2024-02-17 13:07:41 -07:00
Aiden McClelland
fab13db4b4 Feature/lxc container runtime (#2514)
* wip: static-server errors

* wip: fix wifi

* wip: Fix the service_effects

* wip: Fix cors in the middleware

* wip(chore): Auth clean up the lint.

* wip(fix): Vhost

* wip: continue manager refactor

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* wip: service manager refactor

* wip: Some fixes

* wip(fix): Fix the lib.rs

* wip

* wip(fix): Logs

* wip: bins

* wip(innspect): Add in the inspect

* wip: config

* wip(fix): Diagnostic

* wip(fix): Dependencies

* wip: context

* wip(fix) Sorta auth

* wip: warnings

* wip(fix): registry/admin

* wip(fix) marketplace

* wip(fix) Some more converted and fixed with the linter and config

* wip: Working on the static server

* wip(fix)static server

* wip: Remove some asynnc

* wip: Something about the request and regular rpc

* wip: gut install

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* wip: Convert the static server into the new system

* wip delete file

* test

* wip(fix) vhost does not need the with safe defaults

* wip: Adding in the wifi

* wip: Fix the developer and the verify

* wip: new install flow

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* fix middleware

* wip

* wip: Fix the auth

* wip

* continue service refactor

* feature: Service get_config

* feat: Action

* wip: Fighting the great fight against the borrow checker

* wip: Remove an error in a file that I just need to deel with later

* chore: Add in some more lifetime stuff to the services

* wip: Install fix on lifetime

* cleanup

* wip: Deal with the borrow later

* more cleanup

* resolve borrowchecker errors

* wip(feat): add in the handler for the socket, for now

* wip(feat): Update the service_effect_handler::action

* chore: Add in the changes to make sure the from_service goes to context

* chore: Change the

* refactor service map

* fix references to service map

* fill out restore

* wip: Before I work on the store stuff

* fix backup module

* handle some warnings

* feat: add in the ui components on the rust side

* feature: Update the procedures

* chore: Update the js side of the main and a few of the others

* chore: Update the rpc listener to match the persistant container

* wip: Working on updating some things to have a better name

* wip(feat): Try and get the rpc to return the correct shape?

* lxc wip

* wip(feat): Try and get the rpc to return the correct shape?

* build for container runtime wip

* remove container-init

* fix build

* fix error

* chore: Update to work I suppose

* lxc wip

* remove docker module and feature

* download alpine squashfs automatically

* overlays effect

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* chore: Add the overlay effect

* feat: Add the mounter in the main

* chore: Convert to use the mounts, still need to work with the sandbox

* install fixes

* fix ssl

* fixes from testing

* implement tmpfile for upload

* wip

* misc fixes

* cleanup

* cleanup

* better progress reporting

* progress for sideload

* return real guid

* add devmode script

* fix lxc rootfs path

* fix percentage bar

* fix progress bar styling

* fix build for unstable

* tweaks

* label progress

* tweaks

* update progress more often

* make symlink in rpc_client

* make socket dir

* fix parent path

* add start-cli to container

* add echo and gitInfo commands

* wip: Add the init + errors

* chore: Add in the exit effect for the system

* chore: Change the type to null for failure to parse

* move sigterm timeout to stopping status

* update order

* chore: Update the return type

* remove dbg

* change the map error

* chore: Update the thing to capture id

* chore add some life changes

* chore: Update the loging

* chore: Update the package to run module

* us From for RpcError

* chore: Update to use import instead

* chore: update

* chore: Use require for the backup

* fix a default

* update the type that is wrong

* chore: Update the type of the manifest

* chore: Update to make null

* only symlink if not exists

* get rid of double result

* better debug info for ErrorCollection

* chore: Update effects

* chore: fix

* mount assets and volumes

* add exec instead of spawn

* fix mounting in image

* fix overlay mounts

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* misc fixes

* feat: Fix two

* fix: systemForEmbassy main

* chore: Fix small part of main loop

* chore: Modify the bundle

* merge

* fixMain loop"

* move tsc to makefile

* chore: Update the return types of the health check

* fix client

* chore: Convert the todo to use tsmatches

* add in the fixes for the seen and create the hack to allow demo

* chore: Update to include the systemForStartOs

* chore UPdate to the latest types from the expected outout

* fixes

* fix typo

* Don't emit if failure on tsc

* wip

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* add s9pk api

* add inspection

* add inspect manifest

* newline after display serializable

* fix squashfs in image name

* edit manifest

Co-authored-by: Jade <Blu-J@users.noreply.github.com>

* wait for response on repl

* ignore sig for now

* ignore sig for now

* re-enable sig verification

* fix

* wip

* env and chroot

* add profiling logs

* set uid & gid in squashfs to 100000

* set uid of sqfs to 100000

* fix mksquashfs args

* add env to compat

* fix

* re-add docker feature flag

* fix docker output format being stupid

* here be dragons

* chore: Add in the cross compiling for something

* fix npm link

* extract logs from container on exit

* chore: Update for testing

* add log capture to drop trait

* chore: add in the modifications that I make

* chore: Update small things for no updates

* chore: Update the types of something

* chore: Make main not complain

* idmapped mounts

* idmapped volumes

* re-enable kiosk

* chore: Add in some logging for the new system

* bring in start-sdk

* remove avahi

* chore: Update the deps

* switch to musl

* chore: Update the version of prettier

* chore: Organize'

* chore: Update some of the headers back to the standard of fetch

* fix musl build

* fix idmapped mounts

* fix cross build

* use cross compiler for correct arch

* feat: Add in the faked ssl stuff for the effects

* @dr_bonez Did a solution here

* chore: Something that DrBonez

* chore: up

* wip: We have a working server!!!

* wip

* uninstall

* wip

* tes

---------

Co-authored-by: J H <dragondef@gmail.com>
Co-authored-by: J H <Blu-J@users.noreply.github.com>
Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
2024-02-17 18:14:14 +00:00
Aiden McClelland
d44de670cd Add socat to base dependencies (#2544) 2023-12-20 22:20:01 +00:00
J H
cb63025078 chore: Initial commit for the bump to 0.3.5.2 (#2541)
* chore: Initial commit for the bump

* wip(fix): build

* chore: Update the os welcome page to include the previous release of the 0.3.5.1
2023-12-20 14:58:24 -07:00
J H
685e865b42 fix: Docker stopping will include a timeout (#2540)
* fix sdk build script

* fix: Docker stopping will include a timeoute

So the timeout that was included in the original is not working therefore we move to a doublinig with a timeout

* fix: Adding in the missing suggestions that Aiden has poinited out

* Update install-sdk.sh

* Update install-sdk.sh

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-12-19 10:16:18 -07:00
Aiden McClelland
65009e2f69 Merge branch 'next/minor' of github.com:Start9Labs/start-os into integration/new-container-runtime 2023-11-20 14:13:57 -07:00
Aiden McClelland
4c8a92bb0c Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2023-11-20 13:20:42 -07:00
Aiden McClelland
11a2e96d06 Merge branch 'next/minor' of github.com:Start9Labs/start-os into integration/new-container-runtime 2023-11-17 16:39:13 -07:00
Aiden McClelland
095c5e4f95 Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2023-11-17 16:38:48 -07:00
Aiden McClelland
069db28fb6 Merge branch 'next/minor' of github.com:Start9Labs/start-os into integration/new-container-runtime 2023-11-17 10:54:58 -07:00
Aiden McClelland
2e747d3ece Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2023-11-17 10:53:55 -07:00
Aiden McClelland
6580153f29 Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2023-11-16 15:11:10 -07:00
Aiden McClelland
c988bca958 Merge branch 'next/minor' of github.com:Start9Labs/start-os into integration/new-container-runtime 2023-11-13 16:42:06 -07:00
Aiden McClelland
e84e8edb29 Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2023-11-13 16:35:08 -07:00
Aiden McClelland
a4ef7205ca Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2023-11-13 16:26:00 -07:00
Aiden McClelland
ba8df96e41 Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2023-11-13 16:24:12 -07:00
Aiden McClelland
0e2fc07881 remove js-engine 2023-11-13 16:22:35 -07:00
Aiden McClelland
5c578c0328 Merge branch 'next/minor' of github.com:Start9Labs/start-os into integration/new-container-runtime 2023-11-13 15:42:51 -07:00
Aiden McClelland
5f7ff460fb fix merge 2023-11-13 15:41:35 -07:00
Aiden McClelland
3b3e1e37b9 readd core/startos/src/s9pk/mod.rs 2023-11-13 15:38:44 -07:00
Aiden McClelland
5f40d9400c move container init system to project root 2023-11-13 15:29:27 -07:00
Aiden McClelland
fcdc642acb Merge branch 'next/minor' of github.com:Start9Labs/start-os into chore/removing-non-long-running 2023-11-13 15:26:04 -07:00
J H
46f594ab71 chore: Add in the changes that where discussed with @Dr_Bonez in the room 2023-11-13 15:18:51 -07:00
J H
e8684cbb9d Merge branch 'feature/start_init' into chore/removing-non-long-running 2023-11-13 15:16:25 -07:00
J H
a36ab71600 chore: Add some more comments for DrBones 2023-11-13 15:16:21 -07:00
Aiden McClelland
e4ce05f94d Merge branch 'next/patch' of github.com:Start9Labs/start-os into next/minor 2023-11-13 14:28:26 -07:00
J H
38a624fecf chore: Remove the todoes that we have done.
Leaving in the thing about  the rpc client because that will be part of the rewrite, and some of the previous logic should be usefull for the next version of the api. We do need a bidirection but that should world
2023-11-13 10:55:39 -07:00
Aiden McClelland
fd96859883 [feature]: s9pk v2 (#2507)
* feature: s9pk v2

wip

wip

wip

wip

refactor

* use WriteQueue

* fix proptest

* LoopDev
eager directory hash verification
2023-11-10 21:57:21 +00:00
J H
94d22ed1aa chore: Remove the other procedures since all are now via the js 2023-11-10 09:26:00 -07:00
J H
b5da076e2c chore: Add in some modifications to make the sandboxed and execute in the container 2023-11-08 17:19:30 -07:00
J H
18cd6c81a3 chore: Make sure the test is testing something is correct shape 2023-11-08 15:53:29 -07:00
J H
40b19c5e67 chore: Remove the long running from the docker 2023-11-08 15:35:08 -07:00
J H
7a31d09356 feature: Include the start init files.
This includes the docker commands to get things compressed.
And this is the start of the rpc, but needs lots of work, or very little, not sure yet anymore.
I beleive that the things that are missing are the rpc, and the effects. So, lots of work, but is still good to have I suppose.
2023-08-17 12:49:06 -06:00
1247 changed files with 110470 additions and 43748 deletions

View File

@@ -12,9 +12,6 @@ on:
- dev
- unstable
- dev-unstable
- docker
- dev-docker
- dev-unstable-docker
runner:
type: choice
description: Runner
@@ -48,7 +45,7 @@ on:
- next/*
env:
NODEJS_VERSION: "18.15.0"
NODEJS_VERSION: "20.16.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
@@ -74,24 +71,32 @@ jobs:
sudo mount -t tmpfs tmpfs .
if: ${{ github.event.inputs.runner == 'fast' }}
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v3
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.x"
- uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up docker QEMU
uses: docker/setup-qemu-action@v3
- name: Set up system dependencies
run: sudo apt-get update && sudo apt-get install -y qemu-user-static systemd-container squashfuse
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
uses: docker/setup-buildx-action@v3
- name: Make
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: compiled-${{ matrix.arch }}.tar
path: compiled-${{ matrix.arch }}.tar
@@ -140,10 +145,19 @@ jobs:
}')[matrix.platform]
}}
steps:
- uses: actions/checkout@v3
- name: Free space
run: rm -rf /opt/hostedtoolcache*
if: ${{ github.event.inputs.runner != 'fast' }}
- uses: actions/checkout@v4
with:
submodules: recursive
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.x"
- name: Install dependencies
run: |
sudo apt-get update
@@ -162,7 +176,7 @@ jobs:
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree') }}
- name: Download compiled artifacts
uses: actions/download-artifact@v3
uses: actions/download-artifact@v4
with:
name: compiled-${{ env.ARCH }}.tar
@@ -171,9 +185,29 @@ jobs:
- name: Prevent rebuild of compiled artifacts
run: |
mkdir -p web/node_modules
mkdir -p web/dist/raw
mkdir -p core/startos/bindings
mkdir -p sdk/base/lib/osBindings
mkdir -p container-runtime/node_modules
mkdir -p container-runtime/dist
mkdir -p container-runtime/dist/node_modules
mkdir -p core/startos/bindings
mkdir -p sdk/dist
mkdir -p sdk/baseDist
mkdir -p patch-db/client/node_modules
mkdir -p patch-db/client/dist
mkdir -p web/.angular
mkdir -p web/dist/raw/ui
mkdir -p web/dist/raw/install-wizard
mkdir -p web/dist/raw/setup-wizard
mkdir -p web/dist/static/ui
mkdir -p web/dist/static/install-wizard
mkdir -p web/dist/static/setup-wizard
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
- run: git status
- name: Run iso build
run: PLATFORM=${{ matrix.platform }} make iso
if: ${{ matrix.platform != 'raspberrypi' }}
@@ -182,18 +216,18 @@ jobs:
run: PLATFORM=${{ matrix.platform }} make img
if: ${{ matrix.platform == 'raspberrypi' }}
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}.squashfs
path: results/*.squashfs
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}.iso
path: results/*.iso
if: ${{ matrix.platform != 'raspberrypi' }}
- uses: actions/upload-artifact@v3
- uses: actions/upload-artifact@v4
with:
name: ${{ matrix.platform }}.img
path: results/*.img

View File

@@ -11,7 +11,7 @@ on:
- next/*
env:
NODEJS_VERSION: "18.15.0"
NODEJS_VERSION: "20.16.0"
ENVIRONMENT: dev-unstable
jobs:
@@ -19,11 +19,11 @@ jobs:
name: Run Automated Tests
runs-on: ubuntu-22.04
steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4
with:
submodules: recursive
- uses: actions/setup-node@v3
- uses: actions/setup-node@v4
with:
node-version: ${{ env.NODEJS_VERSION }}

4
.gitignore vendored
View File

@@ -20,7 +20,6 @@ secrets.db
/ENVIRONMENT.txt
/GIT_HASH.txt
/VERSION.txt
/eos-*.tar.gz
/*.deb
/target
/*.squashfs
@@ -28,4 +27,5 @@ secrets.db
/dpkg-workdir
/compiled.tar
/compiled-*.tar
/firmware
/firmware
/tmp

134
DEVELOPMENT.md Normal file
View File

@@ -0,0 +1,134 @@
# Setting up your development environment on Debian/Ubuntu
A step-by-step guide
> This is the only officially supported build environment.
> MacOS has limited build capabilities and Windows requires [WSL2](https://learn.microsoft.com/en-us/windows/wsl/install)
## Installing dependencies
Run the following commands one at a time
```sh
sudo apt update
sudo apt install -y ca-certificates curl gpg build-essential
curl -fsSL https://download.docker.com/linux/debian/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg-architecture -q DEB_HOST_ARCH) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bookworm stable" | sudo tee /etc/apt/sources.list.d/docker.list
sudo apt update
sudo apt install -y sed grep gawk jq gzip brotli containerd.io docker-ce docker-ce-cli docker-compose-plugin qemu-user-static binfmt-support squashfs-tools git debspawn rsync b3sum
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo usermod -aG docker $USER
sudo su $USER
docker run --privileged --rm tonistiigi/binfmt --install all
docker buildx create --use
curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh # proceed with default installation
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/master/install.sh | bash
source ~/.bashrc
nvm install 20
nvm use 20
nvm alias default 20 # this prevents your machine from reverting back to another version
```
## Cloning the repository
```sh
git clone --recursive https://github.com/Start9Labs/start-os.git --branch next/minor
cd start-os
```
## Building an ISO
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make iso
```
This will build an ISO for your current architecture. If you are building to run on an architecture other than the one you are currently on, replace `$(uname -m)` with the correct platform for the device (one of `aarch64`, `aarch64-nonfree`, `x86_64`, `x86_64-nonfree`, `raspberrypi`)
## Creating a VM
### Install virt-manager
```sh
sudo apt update
sudo apt install -y virt-manager
sudo usermod -aG libvirt $USER
sudo su $USER
```
### Launch virt-manager
```sh
virt-manager
```
### Create new virtual machine
![Select "Create a new virtual machine"](assets/create-vm/step-1.png)
![Click "Forward"](assets/create-vm/step-2.png)
![Click "Browse"](assets/create-vm/step-3.png)
![Click "+"](assets/create-vm/step-4.png)
#### make sure to set "Target Path" to the path to your results directory in start-os
![Create storage pool](assets/create-vm/step-5.png)
![Select storage pool](assets/create-vm/step-6.png)
![Select ISO](assets/create-vm/step-7.png)
![Select "Generic or unknown OS" and click "Forward"](assets/create-vm/step-8.png)
![Set Memory and CPUs](assets/create-vm/step-9.png)
![Create disk](assets/create-vm/step-10.png)
![Name VM](assets/create-vm/step-11.png)
![Create network](assets/create-vm/step-12.png)
## Updating a VM
The fastest way to update a VM to your latest code depends on what you changed:
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-startbox REMOTE=start9@<VM IP>
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-deb REMOTE=start9@<VM IP>
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make update-squashfs REMOTE=start9@<VM IP>
```
---
If the device you are building for is not available via ssh, it is also possible to use `magic-wormhole` to send the relevant files.
### Prerequisites:
```sh
sudo apt update
sudo apt install -y magic-wormhole
```
As before, the fastest way to update a VM to your latest code depends on what you changed. Each of the following commands will return a command to paste into the shell of the device you would like to upgrade.
### UI or startd:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole
```
### Container runtime or debian dependencies:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-deb
```
### Image recipe:
```sh
PLATFORM=$(uname -m) ENVIRONMENT=dev make wormhole-squashfs
```

216
Makefile
View File

@@ -6,8 +6,8 @@ BASENAME := $(shell ./basename.sh)
PLATFORM := $(shell if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
BINS := core/target/$(ARCH)-unknown-linux-gnu/release/startbox core/target/aarch64-unknown-linux-musl/release/container-init core/target/x86_64-unknown-linux-musl/release/container-init
WEB_UIS := web/dist/raw/ui web/dist/raw/setup-wizard web/dist/raw/diagnostic-ui web/dist/raw/install-wizard
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html web/dist/raw/install-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html web/dist/static/install-wizard/index.html
FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
DEBIAN_SRC := $(shell git ls-files debian/)
@@ -16,17 +16,17 @@ STARTD_SRC := core/startos/startd.service $(BUILD_SRC)
COMPAT_SRC := $(shell git ls-files system-images/compat/)
UTILS_SRC := $(shell git ls-files system-images/utils/)
BINFMT_SRC := $(shell git ls-files system-images/binfmt/)
CORE_SRC := $(shell git ls-files core) $(shell git ls-files --recurse-submodules patch-db) web/dist/static web/patchdb-ui-seed.json $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(shell git ls-files web/projects/shared) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules web/config.json patch-db/client/dist web/patchdb-ui-seed.json
CORE_SRC := $(shell git ls-files core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(shell git ls-files web/projects/shared) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
WEB_UI_SRC := $(shell git ls-files web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(shell git ls-files web/projects/setup-wizard)
WEB_DIAGNOSTIC_UI_SRC := $(shell git ls-files web/projects/diagnostic-ui)
WEB_INSTALL_WIZARD_SRC := $(shell git ls-files web/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip)
TAR_BIN := $(shell which gtar || which tar)
COMPILED_TARGETS := $(BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console; fi') $(PLATFORM_FILE)
COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar container-runtime/rootfs.$(ARCH).squashfs
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; fi') $(PLATFORM_FILE)
REBUILD_TYPES = 1
ifeq ($(REMOTE),)
mkdir = mkdir -p $1
@@ -49,10 +49,13 @@ endif
.DELETE_ON_ERROR:
.PHONY: all metadata install clean format sdk snapshots uis ui reflash deb $(IMAGE_TYPE) squashfs sudo wormhole test
.PHONY: all metadata install clean format cli uis ui reflash deb $(IMAGE_TYPE) squashfs sudo wormhole wormhole-deb test test-core test-sdk test-container-runtime registry
all: $(ALL_TARGETS)
touch:
touch $(ALL_TARGETS)
metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
sudo:
@@ -62,6 +65,7 @@ clean:
rm -f system-images/**/*.tar
rm -rf system-images/compat/target
rm -rf core/target
rm -rf core/startos/bindings
rm -rf web/.angular
rm -f web/config.json
rm -rf web/node_modules
@@ -74,6 +78,13 @@ clean:
rm -rf image-recipe/deb
rm -rf results
rm -rf build/lib/firmware
rm -rf container-runtime/dist
rm -rf container-runtime/node_modules
rm -f container-runtime/*.squashfs
if [ -d container-runtime/tmp/combined ] && mountpoint container-runtime/tmp/combined; then sudo umount container-runtime/tmp/combined; fi
if [ -d container-runtime/tmp/lower ] && mountpoint container-runtime/tmp/lower; then sudo umount container-runtime/tmp/lower; fi
rm -rf container-runtime/tmp
(cd sdk && make clean)
rm -f ENVIRONMENT.txt
rm -f PLATFORM.txt
rm -f GIT_HASH.txt
@@ -82,18 +93,29 @@ clean:
format:
cd core && cargo +nightly fmt
test: $(CORE_SRC) $(ENVIRONMENT_FILE)
cd core && cargo build && cargo test
test: | test-core test-sdk test-container-runtime
sdk:
cd core && ./install-sdk.sh
test-core: $(CORE_SRC) $(ENVIRONMENT_FILE)
./core/run-tests.sh
test-sdk: $(shell git ls-files sdk) sdk/base/lib/osBindings/index.ts
cd sdk && make test
test-container-runtime: container-runtime/node_modules/.package-lock.json $(shell git ls-files container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
cd container-runtime && npm test
cli:
cd core && ./install-cli.sh
registry:
cd core && ./build-registrybox.sh
deb: results/$(BASENAME).deb
debian/control: build/lib/depends build/lib/conflicts
./debuild/control.sh
results/$(BASENAME).deb: dpkg-build.sh $(DEBIAN_SRC) $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
results/$(BASENAME).deb: dpkg-build.sh $(DEBIAN_SRC) $(ALL_TARGETS)
PLATFORM=$(PLATFORM) ./dpkg-build.sh
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
@@ -104,17 +126,17 @@ results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_S
./image-recipe/run-local-build.sh "results/$(BASENAME).deb"
# For creating os images. DO NOT USE
install: $(ALL_TARGETS)
install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/bin)
$(call cp,core/target/$(ARCH)-unknown-linux-gnu/release/startbox,$(DESTDIR)/usr/bin/startbox)
$(call mkdir,$(DESTDIR)/usr/sbin)
$(call cp,core/target/$(ARCH)-unknown-linux-musl/release/startbox,$(DESTDIR)/usr/bin/startbox)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-deno)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/avahi-alias)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/embassy-cli)
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then $(call cp,cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); fi
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then $(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); fi
$(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
$(call ln,/usr/bin/startos-backup-fs,$(DESTDIR)/usr/sbin/mount.backup-fs)
$(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,core/startos/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
@@ -122,20 +144,17 @@ install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/startos)
$(call cp,build/lib,$(DESTDIR)/usr/lib/startos)
$(call mkdir,$(DESTDIR)/usr/lib/startos/container-runtime)
$(call cp,container-runtime/rootfs.$(ARCH).squashfs,$(DESTDIR)/usr/lib/startos/container-runtime/rootfs.squashfs)
$(call cp,PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
$(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
$(call mkdir,$(DESTDIR)/usr/lib/startos/container)
$(call cp,core/target/aarch64-unknown-linux-musl/release/container-init,$(DESTDIR)/usr/lib/startos/container/container-init.arm64)
$(call cp,core/target/x86_64-unknown-linux-musl/release/container-init,$(DESTDIR)/usr/lib/startos/container/container-init.amd64)
$(call mkdir,$(DESTDIR)/usr/lib/startos/system-images)
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/compat.tar)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/utils.tar)
$(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/binfmt.tar)
$(call cp,firmware/$(PLATFORM),$(DESTDIR)/usr/lib/startos/firmware)
@@ -148,31 +167,103 @@ update-overlay: $(ALL_TARGETS)
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
$(call ssh,"sudo systemctl start startd")
wormhole: core/target/$(ARCH)-unknown-linux-gnu/release/startbox
@wormhole send core/target/$(ARCH)-unknown-linux-gnu/release/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
wormhole: core/target/$(ARCH)-unknown-linux-musl/release/startbox
@echo "Paste the following command into the shell of your StartOS server:"
@echo
@wormhole send core/target/$(ARCH)-unknown-linux-musl/release/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
wormhole-deb: results/$(BASENAME).deb
@echo "Paste the following command into the shell of your StartOS server:"
@echo
@wormhole send results/$(BASENAME).deb 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade '"'"'cd $$(mktemp -d) && wormhole receive --accept-file %s && apt-get install -y --reinstall ./$(BASENAME).deb'"'"'\n", $$3 }'
wormhole-squashfs: results/$(BASENAME).squashfs
$(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs | head -c 32))
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
@echo "Paste the following command into the shell of your StartOS server:"
@echo
@wormhole send results/$(BASENAME).squashfs 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo sh -c '"'"'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE) && cd /media/startos/images && wormhole receive --accept-file %s && mv $(BASENAME).squashfs $(SQFS_SUM).rootfs && ln -rsf ./$(SQFS_SUM).rootfs ../config/current.rootfs && sync && reboot'"'"'\n", $$3 }'
update: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo NO_SYNC=1 /media/embassy/next/usr/lib/startos/scripts/chroot-and-upgrade "apt-get install -y $(shell cat ./build/lib/depends)"')
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
update-startbox: core/target/$(ARCH)-unknown-linux-musl/release/startbox # only update binary (faster than full update)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call cp,core/target/$(ARCH)-unknown-linux-musl/release/startbox,/media/startos/next/usr/bin/startbox)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync true')
update-deb: results/$(BASENAME).deb # better than update, but only available from debian
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call mkdir,/media/startos/next/tmp/startos-deb)
$(call cp,results/$(BASENAME).deb,/media/startos/next/tmp/startos-deb/$(BASENAME).deb)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y --reinstall /tmp/startos-deb/$(BASENAME).deb"')
update-squashfs: results/$(BASENAME).squashfs
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(eval SQFS_SUM := $(shell b3sum results/$(BASENAME).squashfs))
$(eval SQFS_SIZE := $(shell du -s --bytes results/$(BASENAME).squashfs | awk '{print $$1}'))
$(call ssh,'/usr/lib/startos/scripts/prune-images $(SQFS_SIZE)')
$(call cp,results/$(BASENAME).squashfs,/media/startos/images/$(SQFS_SUM).rootfs)
$(call ssh,'sudo ln -rsf /media/startos/images/$(SQFS_SUM).rootfs /media/startos/config/current.rootfs')
$(call ssh,'sudo reboot')
emulate-reflash: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
$(call ssh,"sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot")
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo rm -f /media/startos/config/disk.guid /media/startos/config/overlay/etc/hostname')
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
upload-ota: results/$(BASENAME).squashfs
TARGET=$(TARGET) KEY=$(KEY) ./upload-ota.sh
container-runtime/debian.$(ARCH).squashfs:
ARCH=$(ARCH) ./container-runtime/download-base-image.sh
container-runtime/node_modules/.package-lock.json: container-runtime/package.json container-runtime/package-lock.json sdk/dist/package.json
npm --prefix container-runtime ci
touch container-runtime/node_modules/.package-lock.json
sdk/base/lib/osBindings/index.ts: $(shell if [ "$(REBUILD_TYPES)" -ne 0 ]; then echo core/startos/bindings/index.ts; fi)
mkdir -p sdk/base/lib/osBindings
rsync -ac --delete core/startos/bindings/ sdk/base/lib/osBindings/
touch sdk/base/lib/osBindings/index.ts
core/startos/bindings/index.ts: $(shell git ls-files core) $(ENVIRONMENT_FILE)
rm -rf core/startos/bindings
./core/build-ts.sh
ls core/startos/bindings/*.ts | sed 's/core\/startos\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/startos/bindings/index.ts
npm --prefix sdk exec -- prettier --config ./sdk/base/package.json -w ./core/startos/bindings/*.ts
touch core/startos/bindings/index.ts
sdk/dist/package.json sdk/baseDist/package.json: $(shell git ls-files sdk) sdk/base/lib/osBindings/index.ts
(cd sdk && make bundle)
touch sdk/dist/package.json
touch sdk/baseDist/package.json
# TODO: make container-runtime its own makefile?
container-runtime/dist/index.js: container-runtime/node_modules/.package-lock.json $(shell git ls-files container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
npm --prefix container-runtime run build
container-runtime/dist/node_modules/.package-lock.json container-runtime/dist/package.json container-runtime/dist/package-lock.json: container-runtime/package.json container-runtime/package-lock.json sdk/dist/package.json container-runtime/install-dist-deps.sh
./container-runtime/install-dist-deps.sh
touch container-runtime/dist/node_modules/.package-lock.json
container-runtime/rootfs.$(ARCH).squashfs: container-runtime/debian.$(ARCH).squashfs container-runtime/container-runtime.service container-runtime/update-image.sh container-runtime/deb-install.sh container-runtime/dist/index.js container-runtime/dist/node_modules/.package-lock.json core/target/$(ARCH)-unknown-linux-musl/release/containerbox | sudo
ARCH=$(ARCH) ./container-runtime/update-image.sh
build/lib/depends build/lib/conflicts: build/dpkg-deps/*
build/dpkg-deps/generate.sh
$(FIRMWARE_ROMS): build/lib/firmware.json download-firmware.sh $(PLATFORM_FILE)
./download-firmware.sh $(PLATFORM)
system-images/compat/docker-images/$(ARCH).tar: $(COMPAT_SRC) core/Cargo.lock
system-images/compat/docker-images/$(ARCH).tar: $(COMPAT_SRC)
cd system-images/compat && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
system-images/utils/docker-images/$(ARCH).tar: $(UTILS_SRC)
@@ -181,45 +272,49 @@ system-images/utils/docker-images/$(ARCH).tar: $(UTILS_SRC)
system-images/binfmt/docker-images/$(ARCH).tar: $(BINFMT_SRC)
cd system-images/binfmt && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
snapshots: core/snapshot-creator/Cargo.toml
cd core/ && ARCH=aarch64 ./build-v8-snapshot.sh
cd core/ && ARCH=x86_64 ./build-v8-snapshot.sh
core/target/$(ARCH)-unknown-linux-musl/release/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build-startbox.sh
touch core/target/$(ARCH)-unknown-linux-musl/release/startbox
$(BINS): $(CORE_SRC) $(ENVIRONMENT_FILE)
cd core && ARCH=$(ARCH) ./build-prod.sh
touch $(BINS)
core/target/$(ARCH)-unknown-linux-musl/release/containerbox: $(CORE_SRC) $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build-containerbox.sh
touch core/target/$(ARCH)-unknown-linux-musl/release/containerbox
web/node_modules: web/package.json
web/node_modules/.package-lock.json: web/package.json sdk/baseDist/package.json
npm --prefix web ci
touch web/node_modules/.package-lock.json
web/dist/raw/ui: $(WEB_UI_SRC) $(WEB_SHARED_SRC)
web/.angular/.updated: patch-db/client/dist/index.js sdk/baseDist/package.json web/node_modules/.package-lock.json
rm -rf web/.angular
mkdir -p web/.angular
touch web/.angular/.updated
web/dist/raw/ui/index.html: $(WEB_UI_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
npm --prefix web run build:ui
touch web/dist/raw/ui/index.html
web/dist/raw/setup-wizard: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC)
web/dist/raw/setup-wizard/index.html: $(WEB_SETUP_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
npm --prefix web run build:setup
touch web/dist/raw/setup-wizard/index.html
web/dist/raw/diagnostic-ui: $(WEB_DIAGNOSTIC_UI_SRC) $(WEB_SHARED_SRC)
npm --prefix web run build:dui
web/dist/raw/install-wizard: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC)
web/dist/raw/install-wizard/index.html: $(WEB_INSTALL_WIZARD_SRC) $(WEB_SHARED_SRC) web/.angular/.updated
npm --prefix web run build:install-wiz
touch web/dist/raw/install-wizard/index.html
web/dist/static: $(WEB_UIS) $(ENVIRONMENT_FILE)
$(COMPRESSED_WEB_UIS): $(WEB_UIS) $(ENVIRONMENT_FILE)
./compress-uis.sh
web/config.json: $(GIT_HASH_FILE) web/config-sample.json
jq '.useMocks = false' web/config-sample.json | jq '.gitHash = "$(shell cat GIT_HASH.txt)"' > web/config.json
web/patchdb-ui-seed.json: web/package.json
jq '."ack-welcome" = $(shell jq '.version' web/package.json)' web/patchdb-ui-seed.json > ui-seed.tmp
mv ui-seed.tmp web/patchdb-ui-seed.json
patch-db/client/node_modules: patch-db/client/package.json
patch-db/client/node_modules/.package-lock.json: patch-db/client/package.json
npm --prefix patch-db/client ci
touch patch-db/client/node_modules/.package-lock.json
patch-db/client/dist: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules
! test -d patch-db/client/dist || rm -rf patch-db/client/dist
npm --prefix web run build:deps
patch-db/client/dist/index.js: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules/.package-lock.json
rm -rf patch-db/client/dist
npm --prefix patch-db/client run build
touch patch-db/client/dist/index.js
# used by github actions
compiled-$(ARCH).tar: $(COMPILED_TARGETS) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
@@ -231,8 +326,11 @@ uis: $(WEB_UIS)
# this is a convenience step to build the UI
ui: web/dist/raw/ui
cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep:
cargo-deps/aarch64-unknown-linux-musl/release/pi-beep:
ARCH=aarch64 ./build-cargo-dep.sh pi-beep
cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console:
ARCH=$(ARCH) ./build-cargo-dep.sh tokio-console
cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console:
ARCH=$(ARCH) PREINSTALL="apk add musl-dev pkgconfig" ./build-cargo-dep.sh tokio-console
cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs:
ARCH=$(ARCH) PREINSTALL="apk add fuse3 fuse3-dev fuse3-static musl-dev pkgconfig" ./build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs

BIN
assets/create-vm/step-1.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

BIN
assets/create-vm/step-2.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

BIN
assets/create-vm/step-3.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 46 KiB

BIN
assets/create-vm/step-4.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 50 KiB

BIN
assets/create-vm/step-5.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

BIN
assets/create-vm/step-6.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 52 KiB

BIN
assets/create-vm/step-7.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 64 KiB

BIN
assets/create-vm/step-8.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 53 KiB

BIN
assets/create-vm/step-9.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

View File

@@ -17,9 +17,18 @@ if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
mkdir -p cargo-deps
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$(pwd)"/cargo-deps:/home/rust/src -P start9/rust-arm-cross:aarch64'
DOCKER_PLATFORM="linux/${ARCH}"
if [ "$ARCH" = aarch64 ] || [ "$ARCH" = arm64 ]; then
DOCKER_PLATFORM="linux/arm64"
elif [ "$ARCH" = x86_64 ]; then
DOCKER_PLATFORM="linux/amd64"
fi
rust-arm64-builder cargo install "$1" --target-dir /home/rust/src --target=$ARCH-unknown-linux-gnu
mkdir -p cargo-deps
alias 'rust-musl-builder'='docker run $USE_TTY --platform=${DOCKER_PLATFORM} --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)"/cargo-deps:/home/rust/src -w /home/rust/src -P rust:alpine'
PREINSTALL=${PREINSTALL:-true}
rust-musl-builder sh -c "$PREINSTALL && cargo install $* --target-dir /home/rust/src --target=$ARCH-unknown-linux-musl"
sudo chown -R $USER cargo-deps
sudo chown -R $USER ~/.cargo

4
build/.gitignore vendored
View File

@@ -1,2 +1,2 @@
lib/depends
lib/conflicts
/lib/depends
/lib/conflicts

View File

@@ -1,107 +0,0 @@
# Building StartOS
⚠️ The commands given assume a Debian or Ubuntu-based environment. _Building in
a VM is NOT yet supported_ ⚠️
## Prerequisites
1. Install dependencies
- Avahi
- `sudo apt install -y avahi-daemon`
- Installed by default on most Debian systems - https://avahi.org
- Build Essentials (needed to run `make`)
- `sudo apt install -y build-essential`
- Docker
- `curl -fsSL https://get.docker.com | sh`
- https://docs.docker.com/get-docker
- Add your user to the docker group: `sudo usermod -a -G docker $USER`
- Reload user environment `exec sudo su -l $USER`
- Prepare Docker environment
- Setup buildx (https://docs.docker.com/buildx/working-with-buildx/)
- Create a builder: `docker buildx create --use`
- Add multi-arch build ability:
`docker run --rm --privileged linuxkit/binfmt:v0.8`
- Node Version 12+
- snap: `sudo snap install node`
- [nvm](https://github.com/nvm-sh/nvm#installing-and-updating):
`nvm install --lts`
- https://nodejs.org/en/docs
- NPM Version 7+
- apt: `sudo apt install -y npm`
- [nvm](https://github.com/nvm-sh/nvm#installing-and-updating):
`nvm install --lts`
- https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
- jq
- `sudo apt install -y jq`
- https://stedolan.github.io/jq
- yq
- snap: `sudo snap install yq`
- binaries: https://github.com/mikefarah/yq/releases/
- https://mikefarah.gitbook.io/yq
2. Clone the latest repo with required submodules
> :information_source: You chan check latest available version
> [here](https://github.com/Start9Labs/start-os/releases)
```
git clone --recursive https://github.com/Start9Labs/start-os.git --branch latest
```
## Build Raspberry Pi Image
```
cd start-os
make embassyos-raspi.img ARCH=aarch64
```
## Flash
Flash the resulting `embassyos-raspi.img` to your SD Card
We recommend [Balena Etcher](https://www.balena.io/etcher/)
## Setup
Visit http://start.local from any web browser - We recommend
[Firefox](https://www.mozilla.org/firefox/browsers)
Enter your product key. This is generated during the build process and can be
found in `product_key.txt`, located in the root directory.
## Troubleshooting
1. I just flashed my SD card, fired up StartOS, bootup sounds and all, but my
browser is saying "Unable to connect" with start.local.
- Try doing a hard refresh on your browser, or opening the url in a
private/incognito window. If you've ran an instance of StartOS before,
sometimes you can have a stale cache that will block you from navigating to
the page.
2. Flashing the image isn't working with balenaEtcher. I'm getting
`Cannot read property 'message' of null` when I try.
- The latest versions of Balena may not flash properly. This version here:
https://github.com/balena-io/etcher/releases/tag/v1.5.122 should work
properly.
3. Startup isn't working properly and I'm curious as to why. How can I view logs
regarding startup for debugging?
- Find the IP of your device
- Run `nc <ip> 8080` and it will print the logs
4. I need to ssh into my server to fix something, but I cannot get to the
console to add ssh keys normally.
- During the Build step, instead of running just
`make embassyos-raspi.img ARCH=aarch64` run
`ENVIRONMENT=dev make embassyos-raspi.img ARCH=aarch64`. Flash like normal,
and insert into your server. Boot up StartOS, then on another computer on
the same network, ssh into the the server with the username `start9` password
`embassy`.
4. I need to reset my password, how can I do that?
- You will need to reflash your device. Select "Use Existing Drive" once you are
in setup, and it will prompt you to set a new password.

View File

@@ -1,76 +0,0 @@
# Release Process
## `embassyos_0.3.x-1_amd64.deb`
- Description: debian package for x86_64 - intended to be installed on pureos
- Destination: GitHub Release Tag
- Requires: N/A
- Build steps:
- Clone `https://github.com/Start9Labs/embassy-os-deb` at `master`
- Run `make TAG=master` from that folder
- Artifact: `./embassyos_0.3.x-1_amd64.deb`
## `eos-<version>-<git hash>-<date>_amd64.iso`
- Description: live usb image for x86_64
- Destination: GitHub Release Tag
- Requires: `embassyos_0.3.x-1_amd64.deb`
- Build steps:
- Clone `https://github.com/Start9Labs/eos-image-recipes` at `master`
- Copy `embassyos_0.3.x-1_amd64.deb` to
`overlays/vendor/root/embassyos_0.3.x-1_amd64.deb`
- Run `./run-local-build.sh byzantium` from that folder
- Artifact: `./results/eos-<version>-<git hash>-<date>_amd64.iso`
## `eos.x86_64.squashfs`
- Description: compressed embassyOS x86_64 filesystem image
- Destination: GitHub Release Tag, Registry @
`resources/eos/<version>/eos.x86_64.squashfs`
- Requires: `eos-<version>-<git hash>-<date>_amd64.iso`
- Build steps:
- From `https://github.com/Start9Labs/eos-image-recipes` at `master`
- `./extract-squashfs.sh results/eos-<version>-<git hash>-<date>_amd64.iso`
- Artifact: `./results/eos.x86_64.squashfs`
## `eos.raspberrypi.squashfs`
- Description: compressed embassyOS raspberrypi filesystem image
- Destination: GitHub Release Tag, Registry @
`resources/eos/<version>/eos.raspberrypi.squashfs`
- Requires: N/A
- Build steps:
- Clone `https://github.com/Start9Labs/embassy-os` at `master`
- `make embassyos-raspi.img`
- flash `embassyos-raspi.img` to raspberry pi
- boot raspberry pi with ethernet
- wait for chime
- you can watch logs using `nc <ip> 8080`
- unplug raspberry pi, put sd card back in build machine
- `./build/raspberry-pi/rip-image.sh`
- Artifact: `./eos.raspberrypi.squashfs`
## `lite-upgrade.img`
- Description: update image for users coming from 0.3.2.1 and before
- Destination: Registry @ `resources/eos/<version>/eos.img`
- Requires: `eos.raspberrypi.squashfs`
- Build steps:
- From `https://github.com/Start9Labs/embassy-os` at `master`
- `make lite-upgrade.img`
- Artifact `./lite-upgrade.img`
## `eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
- Description: pre-initialized raspberrypi image
- Destination: GitHub Release Tag (as tar.gz)
- Requires: `eos.raspberrypi.squashfs`
- Build steps:
- From `https://github.com/Start9Labs/embassy-os` at `master`
- `make eos_raspberrypi.img`
- `tar --format=posix -cS -f- eos-<version>-<git hash>-<date>_raspberrypi.img | gzip > eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
- Artifact `./eos-<version>-<git hash>-<date>_raspberrypi.tar.gz`
## `embassy-sdk`
- Build and deploy to all registries

View File

@@ -1,5 +1,6 @@
avahi-daemon
avahi-utils
b3sum
bash-completion
beep
bmon
@@ -8,24 +9,28 @@ ca-certificates
cifs-utils
cryptsetup
curl
dnsutils
dmidecode
dnsutils
dosfstools
e2fsprogs
ecryptfs-utils
exfatprogs
flashrom
fuse3
grub-common
htop
httpdirfs
iotop
iptables
iw
jq
libavahi-client3
libyajl2
linux-cpupower
lm-sensors
lshw
lvm2
lxc
magic-wormhole
man-db
ncdu
@@ -41,8 +46,10 @@ qemu-guest-agent
rsync
samba-common-bin
smartmontools
socat
sqlite3
squashfs-tools
squashfs-tools-ng
sudo
systemd
systemd-resolved
@@ -51,4 +58,5 @@ systemd-timesyncd
tor
util-linux
vim
wireguard-tools
wireless-tools

View File

@@ -1,5 +0,0 @@
+ containerd.io
+ docker-ce
+ docker-ce-cli
+ docker-compose-plugin
- podman

View File

@@ -1,13 +1,13 @@
[
{
"id": "pureboot-librem_mini_v2-basic_usb_autoboot_blob_jail-Release-28.3",
"id": "pureboot-librem_mini_v2-basic_usb_autoboot_blob_jail-Release-29",
"platform": ["x86_64"],
"system-product-name": "librem_mini_v2",
"bios-version": {
"semver-prefix": "PureBoot-Release-",
"semver-range": "<28.3"
"semver-range": "<29"
},
"url": "https://source.puri.sm/firmware/releases/-/raw/98418b5b8e9edc2bd1243ad7052a062f79e2b88e/librem_mini_v2/custom/pureboot-librem_mini_v2-basic_usb_autoboot_blob_jail-Release-28.3.rom.gz",
"shasum": "5019bcf53f7493c7aa74f8ef680d18b5fc26ec156c705a841433aaa2fdef8f35"
"url": "https://source.puri.sm/firmware/releases/-/raw/75631ad6dcf7e6ee73e06a517ac7dc4e017518b7/librem_mini_v2/custom/pureboot-librem_mini_v2-basic_usb_autoboot_blob_jail-Release-29.rom.gz",
"shasum": "96ec04f21b1cfe8e28d9a2418f1ff533efe21f9bbbbf16e162f7c814761b068b"
}
]

View File

@@ -4,6 +4,3 @@ set -e
curl -fsSL https://deb.torproject.org/torproject.org/A3C4F0F979CAA22CDBA8F512EE8CBC9E886DDD89.asc | gpg --dearmor -o- > /usr/share/keyrings/tor-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/tor-archive-keyring.gpg] https://deb.torproject.org/torproject.org bullseye main" > /etc/apt/sources.list.d/tor.list
curl -fsSL https://download.docker.com/linux/debian/gpg | gpg --dearmor -o- > /usr/share/keyrings/docker-archive-keyring.gpg
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/debian bullseye stable" > /etc/apt/sources.list.d/docker.list

View File

@@ -1,46 +1,107 @@
#!/bin/bash
SOURCE_DIR="$(dirname "${BASH_SOURCE[0]}")"
if [ "$UID" -ne 0 ]; then
>&2 echo 'Must be run as root'
exit 1
fi
POSITIONAL_ARGS=()
while [[ $# -gt 0 ]]; do
case $1 in
--no-sync)
NO_SYNC=1
shift
;;
--create)
ONLY_CREATE=1
shift
;;
-*|--*)
echo "Unknown option $1"
exit 1
;;
*)
POSITIONAL_ARGS+=("$1") # save positional arg
shift # past argument
;;
esac
done
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
if [ -z "$NO_SYNC" ]; then
echo 'Syncing...'
rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next
umount -R /media/startos/next 2> /dev/null
umount -R /media/startos/upper 2> /dev/null
rm -rf /media/startos/upper /media/startos/next
mkdir /media/startos/upper
mount -t tmpfs tmpfs /media/startos/upper
mkdir -p /media/startos/upper/data /media/startos/upper/work /media/startos/next
mount -t overlay \
-olowerdir=/media/startos/current,upperdir=/media/startos/upper/data,workdir=/media/startos/upper/work \
overlay /media/startos/next
mkdir -p /media/startos/next/media/startos/root
mount --bind /media/startos/root /media/startos/next/media/startos/root
fi
mkdir -p /media/embassy/next/run
mkdir -p /media/embassy/next/dev
mkdir -p /media/embassy/next/sys
mkdir -p /media/embassy/next/proc
mkdir -p /media/embassy/next/boot
mount --bind /run /media/embassy/next/run
mount --bind /dev /media/embassy/next/dev
mount --bind /sys /media/embassy/next/sys
mount --bind /proc /media/embassy/next/proc
mount --bind /boot /media/embassy/next/boot
if [ -n "$ONLY_CREATE" ]; then
exit 0
fi
mkdir -p /media/startos/next/run
mkdir -p /media/startos/next/dev
mkdir -p /media/startos/next/sys
mkdir -p /media/startos/next/proc
mkdir -p /media/startos/next/boot
mount --bind /run /media/startos/next/run
mount --bind /tmp /media/startos/next/tmp
mount --bind /dev /media/startos/next/dev
mount --bind /sys /media/startos/next/sys
mount --bind /proc /media/startos/next/proc
mount --bind /boot /media/startos/next/boot
if [ -z "$*" ]; then
chroot /media/embassy/next
chroot /media/startos/next
CHROOT_RES=$?
else
chroot /media/embassy/next "$SHELL" -c "$*"
chroot /media/startos/next "$SHELL" -c "$*"
CHROOT_RES=$?
fi
umount /media/embassy/next/run
umount /media/embassy/next/dev
umount /media/embassy/next/sys
umount /media/embassy/next/proc
umount /media/embassy/next/boot
umount /media/startos/next/run
umount /media/startos/next/tmp
umount /media/startos/next/dev
umount /media/startos/next/sys
umount /media/startos/next/proc
umount /media/startos/next/boot
umount /media/startos/next/media/startos/root
if [ "$CHROOT_RES" -eq 0 ]; then
if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then
${SOURCE_DIR}/prune-images $(du -s --bytes /media/startos/next | awk '{print $1}')
fi
echo 'Upgrading...'
touch /media/embassy/config/upgrade
if ! time mksquashfs /media/startos/next /media/startos/images/next.squashfs -b 4096 -comp gzip; then
umount -R /media/startos/next
umount -R /media/startos/upper
rm -rf /media/startos/upper /media/startos/next
exit 1
fi
hash=$(b3sum /media/startos/images/next.squashfs | head -c 32)
mv /media/startos/images/next.squashfs /media/startos/images/${hash}.rootfs
ln -rsf /media/startos/images/${hash}.rootfs /media/startos/config/current.rootfs
sync
reboot
fi
fi
umount -R /media/startos/next
umount -R /media/startos/upper
rm -rf /media/startos/upper /media/startos/next

View File

@@ -1 +0,0 @@
start-cli net dhcp update $interface

View File

@@ -1,98 +0,0 @@
# Local filesystem mounting -*- shell-script -*-
#
# This script overrides local_mount_root() in /scripts/local
# and mounts root as a read-only filesystem with a temporary (rw)
# overlay filesystem.
#
. /scripts/local
local_mount_root()
{
echo 'using embassy initramfs module'
local_top
local_device_setup "${ROOT}" "root file system"
ROOT="${DEV}"
# Get the root filesystem type if not set
if [ -z "${ROOTFSTYPE}" ]; then
FSTYPE=$(get_fstype "${ROOT}")
else
FSTYPE=${ROOTFSTYPE}
fi
local_premount
# CHANGES TO THE ORIGINAL FUNCTION BEGIN HERE
# N.B. this code still lacks error checking
modprobe ${FSTYPE}
checkfs ${ROOT} root "${FSTYPE}"
ROOTFLAGS="$(echo "${ROOTFLAGS}" | sed 's/subvol=\(next\|current\)//' | sed 's/^-o *$//')"
if [ "${FSTYPE}" != "unknown" ]; then
mount -t ${FSTYPE} ${ROOTFLAGS} ${ROOT} ${rootmnt}
else
mount ${ROOTFLAGS} ${ROOT} ${rootmnt}
fi
echo 'mounting embassyfs'
mkdir /embassyfs
mount --move ${rootmnt} /embassyfs
if ! [ -d /embassyfs/current ] && [ -d /embassyfs/prev ]; then
mv /embassyfs/prev /embassyfs/current
fi
if ! [ -d /embassyfs/current ]; then
mkdir /embassyfs/current
for FILE in $(ls /embassyfs); do
if [ "$FILE" != current ]; then
mv /embassyfs/$FILE /embassyfs/current/
fi
done
fi
mkdir -p /embassyfs/config
if [ -f /embassyfs/config/upgrade ] && [ -d /embassyfs/next ]; then
mv /embassyfs/current /embassyfs/prev
mv /embassyfs/next /embassyfs/current
rm /embassyfs/config/upgrade
fi
if ! [ -d /embassyfs/next ]; then
if [ -d /embassyfs/prev ]; then
mv /embassyfs/prev /embassyfs/next
else
mkdir /embassyfs/next
fi
fi
mkdir /lower /upper
mount -r --bind /embassyfs/current /lower
modprobe overlay || insmod "/lower/lib/modules/$(uname -r)/kernel/fs/overlayfs/overlay.ko"
# Mount a tmpfs for the overlay in /upper
mount -t tmpfs tmpfs /upper
mkdir /upper/data /upper/work
# Mount the final overlay-root in $rootmnt
mount -t overlay \
-olowerdir=/lower,upperdir=/upper/data,workdir=/upper/work \
overlay ${rootmnt}
mkdir -p ${rootmnt}/media/embassy/config
mount --bind /embassyfs/config ${rootmnt}/media/embassy/config
mkdir -p ${rootmnt}/media/embassy/next
mount --bind /embassyfs/next ${rootmnt}/media/embassy/next
mkdir -p ${rootmnt}/media/embassy/embassyfs
mount -r --bind /embassyfs ${rootmnt}/media/embassy/embassyfs
}

View File

@@ -4,7 +4,7 @@ set -e
# install dependencies
/usr/bin/apt update
/usr/bin/apt install --no-install-recommends -y xserver-xorg x11-xserver-utils xinit firefox-esr matchbox-window-manager libnss3-tools
/usr/bin/apt install --no-install-recommends -y xserver-xorg x11-xserver-utils xinit firefox-esr matchbox-window-manager libnss3-tools p11-kit-modules
#Change a default preference set by stock debian firefox-esr
sed -i 's|^pref("extensions.update.enabled", true);$|pref("extensions.update.enabled", false);|' /etc/firefox-esr/firefox-esr.js
@@ -14,14 +14,8 @@ if ! id kiosk; then
useradd -s /bin/bash --create-home kiosk
fi
# create kiosk script
cat > /home/kiosk/kiosk.sh << 'EOF'
#!/bin/sh
PROFILE=$(mktemp -d)
if [ -f /usr/local/share/ca-certificates/startos-root-ca.crt ]; then
certutil -A -n "StartOS Local Root CA" -t "TCu,Cuw,Tuw" -i /usr/local/share/ca-certificates/startos-root-ca.crt -d $PROFILE
fi
cat >> $PROFILE/prefs.js << EOT
mkdir /home/kiosk/fx-profile
cat >> /home/kiosk/fx-profile/prefs.js << EOF
user_pref("app.normandy.api_url", "");
user_pref("app.normandy.enabled", false);
user_pref("app.shield.optoutstudies.enabled", false);
@@ -87,7 +81,13 @@ user_pref("toolkit.telemetry.shutdownPingSender.enabled", false);
user_pref("toolkit.telemetry.unified", false);
user_pref("toolkit.telemetry.updatePing.enabled", false);
user_pref("toolkit.telemetry.cachedClientID", "");
EOT
EOF
ln -sf /usr/lib/$(uname -m)-linux-gnu/pkcs11/p11-kit-trust.so /usr/lib/firefox-esr/libnssckbi.so
# create kiosk script
cat > /home/kiosk/kiosk.sh << 'EOF'
#!/bin/sh
while ! curl "http://localhost" > /dev/null; do
sleep 1
done
@@ -101,8 +101,9 @@ done
killall firefox-esr
) &
matchbox-window-manager -use_titlebar no &
firefox-esr http://localhost --profile $PROFILE
rm -rf $PROFILE
cp -r /home/kiosk/fx-profile /home/kiosk/fx-profile-tmp
firefox-esr http://localhost --profile /home/kiosk/fx-profile-tmp
rm -rf /home/kiosk/fx-profile-tmp
EOF
chmod +x /home/kiosk/kiosk.sh
@@ -116,6 +117,8 @@ fi
EOF
fi
chown -R kiosk:kiosk /home/kiosk
# enable autologin
mkdir -p /etc/systemd/system/getty@tty1.service.d
cat > /etc/systemd/system/getty@tty1.service.d/autologin.conf << 'EOF'

View File

@@ -0,0 +1,105 @@
#!/bin/bash
# Define the output file
OUTPUT_FILE="system_debug_info.txt"
# Check if the script is run as root, if not, restart with sudo
if [ "$(id -u)" -ne 0 ]; then
exec sudo bash "$0" "$@"
fi
# Create or clear the output file and add a header
echo "===================================================================" > "$OUTPUT_FILE"
echo " StartOS System Debug Information " >> "$OUTPUT_FILE"
echo "===================================================================" >> "$OUTPUT_FILE"
echo "Generated on: $(date)" >> "$OUTPUT_FILE"
echo "" >> "$OUTPUT_FILE"
# Function to check if a command exists
command_exists() {
command -v "$1" >/dev/null 2>&1
}
# Function to run a command if it exists and append its output to the file with headers
run_command() {
local CMD="$1"
local DESC="$2"
local CMD_NAME="${CMD%% *}" # Extract the command name (first word)
if command_exists "$CMD_NAME"; then
echo "===================================================================" >> "$OUTPUT_FILE"
echo "COMMAND: $CMD" >> "$OUTPUT_FILE"
echo "DESCRIPTION: $DESC" >> "$OUTPUT_FILE"
echo "===================================================================" >> "$OUTPUT_FILE"
echo "" >> "$OUTPUT_FILE"
eval "$CMD" >> "$OUTPUT_FILE" 2>&1
echo "" >> "$OUTPUT_FILE"
else
echo "===================================================================" >> "$OUTPUT_FILE"
echo "COMMAND: $CMD" >> "$OUTPUT_FILE"
echo "DESCRIPTION: $DESC" >> "$OUTPUT_FILE"
echo "===================================================================" >> "$OUTPUT_FILE"
echo "SKIPPED: Command not found" >> "$OUTPUT_FILE"
echo "" >> "$OUTPUT_FILE"
fi
}
# Collecting basic system information
run_command "start-cli --version; start-cli git-info" "StartOS CLI version and Git information"
run_command "hostname" "Hostname of the system"
run_command "uname -a" "Kernel version and system architecture"
# Services Info
run_command "start-cli lxc stats" "All Running Services"
# Collecting CPU information
run_command "lscpu" "CPU architecture information"
run_command "cat /proc/cpuinfo" "Detailed CPU information"
# Collecting memory information
run_command "free -h" "Available and used memory"
run_command "cat /proc/meminfo" "Detailed memory information"
# Collecting storage information
run_command "lsblk" "List of block devices"
run_command "df -h" "Disk space usage"
run_command "fdisk -l" "Detailed disk partition information"
# Collecting network information
run_command "ip a" "Network interfaces and IP addresses"
run_command "ip route" "Routing table"
run_command "netstat -i" "Network interface statistics"
# Collecting RAID information (if applicable)
run_command "cat /proc/mdstat" "List of RAID devices (if applicable)"
# Collecting virtualization information
run_command "egrep -c '(vmx|svm)' /proc/cpuinfo" "Check if CPU supports virtualization"
run_command "systemd-detect-virt" "Check if the system is running inside a virtual machine"
# Final message
echo "===================================================================" >> "$OUTPUT_FILE"
echo " End of StartOS System Debug Information " >> "$OUTPUT_FILE"
echo "===================================================================" >> "$OUTPUT_FILE"
# Prompt user to send the log file to a Start9 Technician
echo "System debug information has been collected in $OUTPUT_FILE."
echo ""
echo "Would you like to send this log file to a Start9 Technician? (yes/no)"
read SEND_LOG
if [[ "$SEND_LOG" == "yes" || "$SEND_LOG" == "y" ]]; then
if command -v wormhole >/dev/null 2>&1; then
echo ""
echo "==================================================================="
echo " Running wormhole to send the file. Please follow the "
echo " instructions and provide the code to the Start9 support team. "
echo "==================================================================="
wormhole send "$OUTPUT_FILE"
echo "==================================================================="
else
echo "Error: wormhole command not found."
fi
else
echo "Log file not sent. You can manually share $OUTPUT_FILE with the Start9 support team if needed."
fi

View File

@@ -3,8 +3,8 @@
ARGS=
for ARG in $@; do
if [ -d "/media/embassy/embassyfs" ] && [ "$ARG" = "/" ]; then
ARG=/media/embassy/embassyfs
if [ -d "/media/startos/root" ] && [ "$ARG" = "/" ]; then
ARG=/media/startos/root
fi
ARGS="$ARGS $ARG"
done

50
build/lib/scripts/prune-images Executable file
View File

@@ -0,0 +1,50 @@
#!/bin/bash
if [ "$UID" -ne 0 ]; then
>&2 echo 'Must be run as root'
exit 1
fi
POSITIONAL_ARGS=()
while [[ $# -gt 0 ]]; do
case $1 in
-*|--*)
echo "Unknown option $1"
exit 1
;;
*)
POSITIONAL_ARGS+=("$1") # save positional arg
shift # past argument
;;
esac
done
set -- "${POSITIONAL_ARGS[@]}" # restore positional parameters
needed=$1
if [ -z "$needed" ]; then
>&2 echo "usage: $0 <SPACE NEEDED>"
exit 1
fi
if [ -h /media/startos/config/current.rootfs ] && [ -e /media/startos/config/current.rootfs ]; then
echo 'Pruning...'
current="$(readlink -f /media/startos/config/current.rootfs)"
while [[ "$(df -B1 --output=avail --sync /media/startos/images | tail -n1)" -lt "$needed" ]]; do
to_prune="$(ls -t1 /media/startos/images/*.rootfs /media/startos/images/*.squashfs 2> /dev/null | grep -v "$current" | tail -n1)"
if [ -e "$to_prune" ]; then
echo " Pruning $to_prune"
rm -rf "$to_prune"
sync
else
>&2 echo "Not enough space and nothing to prune!"
exit 1
fi
done
echo 'done.'
else
>&2 echo 'No current.rootfs, not safe to prune'
exit 1
fi

View File

@@ -0,0 +1,114 @@
# Local filesystem mounting -*- shell-script -*-
#
# This script overrides local_mount_root() in /scripts/local
# and mounts root as a read-only filesystem with a temporary (rw)
# overlay filesystem.
#
. /scripts/local
local_mount_root()
{
echo 'using startos initramfs module'
local_top
local_device_setup "${ROOT}" "root file system"
ROOT="${DEV}"
# Get the root filesystem type if not set
if [ -z "${ROOTFSTYPE}" ]; then
FSTYPE=$(get_fstype "${ROOT}")
else
FSTYPE=${ROOTFSTYPE}
fi
local_premount
# CHANGES TO THE ORIGINAL FUNCTION BEGIN HERE
# N.B. this code still lacks error checking
modprobe ${FSTYPE}
checkfs ${ROOT} root "${FSTYPE}"
echo 'mounting startos'
mkdir /startos
ROOTFLAGS="$(echo "${ROOTFLAGS}" | sed 's/subvol=\(next\|current\)//' | sed 's/^-o *$//')"
if [ "${FSTYPE}" != "unknown" ]; then
mount -t ${FSTYPE} ${ROOTFLAGS} ${ROOT} /startos
else
mount ${ROOTFLAGS} ${ROOT} /startos
fi
if [ -d /startos/images ]; then
if [ -h /startos/config/current.rootfs ] && [ -e /startos/config/current.rootfs ]; then
image=$(readlink -f /startos/config/current.rootfs)
else
image="$(ls -t1 /startos/images/*.rootfs | head -n1)"
fi
if ! [ -f "$image" ]; then
>&2 echo "image $image not available to boot"
exit 1
fi
else
if [ -f /startos/config/upgrade ] && [ -d /startos/next ]; then
oldroot=/startos/next
elif [ -d /startos/current ]; then
oldroot=/startos/current
elif [ -d /startos/prev ]; then
oldroot=/startos/prev
else
>&2 echo no StartOS filesystem found
exit 1
fi
mkdir -p /startos/config/overlay/etc
mv $oldroot/etc/fstab /startos/config/overlay/etc/fstab
mv $oldroot/etc/machine-id /startos/config/overlay/etc/machine-id
mv $oldroot/etc/ssh /startos/config/overlay/etc/ssh
mkdir -p /startos/images
mv $oldroot /startos/images/legacy.rootfs
rm -rf /startos/next /startos/current /startos/prev
ln -rsf /startos/images/old.squashfs /startos/config/current.rootfs
image=$(readlink -f /startos/config/current.rootfs)
fi
mkdir /lower /upper
if [ -d "$image" ]; then
mount -r --bind $image /lower
elif [ -f "$image" ]; then
modprobe squashfs
mount -r $image /lower
else
>&2 echo "not a regular file or directory: $image"
exit 1
fi
modprobe overlay || insmod "/lower/lib/modules/$(uname -r)/kernel/fs/overlayfs/overlay.ko"
# Mount a tmpfs for the overlay in /upper
mount -t tmpfs tmpfs /upper
mkdir /upper/data /upper/work
mkdir -p /startos/config/overlay
# Mount the final overlay-root in $rootmnt
mount -t overlay \
-olowerdir=/startos/config/overlay:/lower,upperdir=/upper/data,workdir=/upper/work \
overlay ${rootmnt}
mkdir -p ${rootmnt}/media/startos/config
mount --bind /startos/config ${rootmnt}/media/startos/config
mkdir -p ${rootmnt}/media/startos/images
mount --bind /startos/images ${rootmnt}/media/startos/images
mkdir -p ${rootmnt}/media/startos/root
mount -r --bind /startos ${rootmnt}/media/startos/root
mkdir -p ${rootmnt}/media/startos/current
mount -r --bind /lower ${rootmnt}/media/startos/current
}

View File

@@ -0,0 +1,555 @@
#!/bin/bash
# =============================================================================
# Wireguard VPS Proxy Setup
# =============================================================================
#
# This script automates the setup of a WireGuard VPN server on a remote VPS
# for StartOS Clearnet functionality. It handles:
#
# 1. SSH key-based authentication setup
# 2. Root access configuration (if needed)
# 3. WireGuard server installation
# 4. Configuration file generation and import
#
# Usage:
# wireguard-vps-proxy-setup [-h] [-i IP] [-u USERNAME] [-p PORT] [-k SSH_KEY]
#
# Options:
# -h Show help message
# -i VPS IP address
# -u SSH username (default: root)
# -p SSH port (default: 22)
# -k Path to custom SSH private key
#
# Example:
# wireguard-vps-proxy-setup -i 110.18.1.1 -u debian
#
# Note: This script requires root privileges and will auto-elevate if needed.
# =============================================================================
# Colors for better output
RED='\033[0;31m'
GREEN='\033[0;32m'
BLUE='\033[1;34m'
YELLOW='\033[1;33m'
NC='\033[0;37m' # No Color
# --- Constants ---
readonly WIREGUARD_INSTALL_URL="https://raw.githubusercontent.com/start9labs/wireguard-vps-proxy-setup/master/wireguard-install.sh"
readonly SSH_KEY_DIR="/home/start9/.ssh"
readonly SSH_KEY_NAME="id_ed25519"
readonly SSH_PRIVATE_KEY="$SSH_KEY_DIR/$SSH_KEY_NAME"
readonly SSH_PUBLIC_KEY="$SSH_PRIVATE_KEY.pub"
# Store original arguments
SCRIPT_ARGS=("$@")
# --- Functions ---
# Function to ensure script runs with root privileges by auto-elevating if needed
check_root() {
if [[ "$EUID" -ne 0 ]]; then
exec sudo "$0" "${SCRIPT_ARGS[@]}"
fi
sudo chown -R start9:startos "$SSH_KEY_DIR"
}
# Function to print banner
print_banner() {
echo -e "${BLUE}"
echo "================================================"
echo -e " ${NC}Wireguard VPS Proxy Setup${BLUE} "
echo "================================================"
echo -e "${NC}"
}
# Function to print usage
print_usage() {
echo -e "Usage: $0 [-h] [-i IP] [-u USERNAME] [-p PORT] [-k SSH_KEY]"
echo "Options:"
echo " -h Show this help message"
echo " -i VPS IP address"
echo " -u SSH username (default: root)"
echo " -p SSH port (default: 22)"
echo " -k Path to the custom SSH private key (optional)"
echo " If no key is provided, the default key '$SSH_PRIVATE_KEY' will be used."
}
# Function to display end message
display_end_message() {
echo -e "\n${BLUE}------------------------------------------------------------------${NC}"
echo -e "${GREEN}Wireguard VPS Proxy server setup complete!${NC}"
echo -e "${BLUE}------------------------------------------------------------------${NC}"
echo -e "\n${GREEN}Clearnet functionality has been enabled via VPS (${VPS_IP})${NC}"
echo -e "\n${YELLOW}Next steps:${NC}"
echo -e "Visit https://docs.start9.com to complete the Clearnet setup"
echo -e "\n${BLUE}------------------------------------------------------------------${NC}"
}
# Function to validate IP address
validate_ip() {
local ip=$1
# IPv4 validation
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
# Additional IPv4 validation to ensure each octet is <= 255
local IFS='.'
read -ra ADDR <<< "$ip"
for i in "${ADDR[@]}"; do
if [ "$i" -gt 255 ]; then
return 1
fi
done
return 0
# IPv6 validation
elif [[ $ip =~ ^([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){6}:[0-9a-fA-F]{1,4}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){5}(:[0-9a-fA-F]{1,4}){1,2}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){4}(:[0-9a-fA-F]{1,4}){1,3}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){3}(:[0-9a-fA-F]{1,4}){1,4}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){2}(:[0-9a-fA-F]{1,4}){1,5}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1}(:[0-9a-fA-F]{1,4}){1,6}$ ]] || \
[[ $ip =~ ^::([0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}$ ]] || \
[[ $ip =~ ^[0-9a-fA-F]{1,4}::([0-9a-fA-F]{1,4}:){0,5}[0-9a-fA-F]{1,4}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,4}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,3}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,2}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,1}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,7}:$ ]] || \
[[ $ip =~ ^::([0-9a-fA-F]{1,4}:){0,7}[0-9a-fA-F]{1,4}$ ]] || \
[[ $ip =~ ^[0-9a-fA-F]{1,4}::([0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,6}(:[0-9a-fA-F]{1,4}){1,1}$ ]] || \
[[ $ip =~ ^([0-9a-fA-F]{1,4}:){1,7}:$ ]] || \
[[ $ip =~ ^::$ ]]; then
return 0
else
return 1
fi
}
# Function for configuring SSH key authentication on remote server
configure_ssh_key_auth() {
echo -e "${BLUE}Configuring SSH key authentication on remote server...${NC}"
ssh -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" '
# Check if PubkeyAuthentication is commented out
if grep -q "^#PubkeyAuthentication" /etc/ssh/sshd_config; then
sed -i "s/^#PubkeyAuthentication.*/PubkeyAuthentication yes/" /etc/ssh/sshd_config
# Check if PubkeyAuthentication exists but is not enabled
elif grep -q "^PubkeyAuthentication" /etc/ssh/sshd_config; then
sed -i "s/^PubkeyAuthentication.*/PubkeyAuthentication yes/" /etc/ssh/sshd_config
# Add PubkeyAuthentication if it doesnt exist
else
echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config
fi
# Enable root login
if grep -q "^#PermitRootLogin" /etc/ssh/sshd_config; then
sed -i "s/^#PermitRootLogin.*/PermitRootLogin yes/" /etc/ssh/sshd_config
elif grep -q "^PermitRootLogin" /etc/ssh/sshd_config; then
sed -i "s/^PermitRootLogin.*/PermitRootLogin yes/" /etc/ssh/sshd_config
else
echo "PermitRootLogin yes" >> /etc/ssh/sshd_config
fi
# Configure AuthorizedKeysFile if needed
if grep -q "^#AuthorizedKeysFile" /etc/ssh/sshd_config; then
sed -i "s/^#AuthorizedKeysFile.*/AuthorizedKeysFile .ssh\/authorized_keys .ssh\/authorized_keys2/" /etc/ssh/sshd_config
elif ! grep -q "^AuthorizedKeysFile" /etc/ssh/sshd_config; then
echo "AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2" >> /etc/ssh/sshd_config
fi
# Reload SSH service
systemctl reload sshd
'
}
# Function to handle StartOS connection (download only)
handle_startos_connection() {
echo -e "${BLUE}Fetching the WireGuard configuration file...${NC}"
# Fetch the client configuration file
config_file=$(ssh -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" 'ls -t ~/*.conf 2>/dev/null | head -n 1')
if [ -z "$config_file" ]; then
echo -e "${RED}Error: No WireGuard configuration file found on the remote server.${NC}"
return 1 # Exit with error
fi
CONFIG_NAME=$(basename "$config_file")
# Download the configuration file
if ! scp -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -P "$SSH_PORT" "$SSH_USER@$VPS_IP":~/"$CONFIG_NAME" ./; then
echo -e "${RED}Error: Failed to download the WireGuard configuration file.${NC}"
return 1 # Exit with error
fi
echo -e "${GREEN}WireGuard configuration file '$CONFIG_NAME' downloaded successfully.${NC}"
return 0
}
# Function to import WireGuard configuration
import_wireguard_config() {
local config_name="$1"
if [ -z "$config_name" ]; then
echo -e "${RED}Error: Configuration file name is missing.${NC}"
return 1
fi
local connection_name=$(basename "$config_name" .conf) #Extract base name without extension
# Check if the connection with same name already exists
if nmcli connection show --active | grep -q "^${connection_name}\s"; then
read -r -p "A connection with the name '$connection_name' already exists. Do you want to override it? (y/N): " answer
if [[ "$answer" =~ ^[Yy]$ ]]; then
nmcli connection delete "$connection_name"
if [ $? -ne 0 ]; then
echo -e "${RED}Error: Failed to delete existing connection '$connection_name'.${NC}"
return 1
fi
# Import if user chose to override or if connection did not exist
if ! nmcli connection import type wireguard file "$config_name"; then
echo -e "${RED}Error: Failed to import the WireGuard configuration using NetworkManager.${NC}"
rm -f "$config_name"
return 1
fi
echo -e "${GREEN}WireGuard configuration '$config_name' has been imported to NetworkManager.${NC}"
rm -f "$config_name"
display_end_message
else
echo -e "${BLUE}Skipping import of the WireGuard configuration.${NC}"
rm -f "$config_name"
return 0
fi
else
# Import if connection did not exist
if command -v nmcli &>/dev/null; then
if ! nmcli connection import type wireguard file "$config_name"; then
echo -e "${RED}Error: Failed to import the WireGuard configuration using NetworkManager.${NC}"
rm -f "$config_name"
return 1
fi
echo -e "${GREEN}WireGuard configuration '$config_name' has been imported to NetworkManager.${NC}"
rm -f "$config_name"
display_end_message
else
echo -e "${YELLOW}Warning: NetworkManager 'nmcli' not found. Configuration file '$config_name' saved in current directory.${NC}"
echo -e "${YELLOW}Import the configuration to your StartOS manually by going to NetworkManager or using wg-quick up <config> command${NC}"
fi
fi
return 0
}
# Function to download the install script
download_install_script() {
echo -e "${BLUE}Downloading latest WireGuard install script...${NC}"
# Download the script
if ! curl -sSf "$WIREGUARD_INSTALL_URL" -o wireguard-install.sh; then
echo -e "${RED}Failed to download WireGuard installation script.${NC}"
return 1
fi
chmod +x wireguard-install.sh
if [ $? -ne 0 ]; then
echo -e "${RED}Failed to chmod +x wireguard install script.${NC}"
return 1
fi
echo -e "${GREEN}WireGuard install script downloaded successfully!${NC}"
return 0
}
# Function to install WireGuard
install_wireguard() {
echo -e "\n${BLUE}Installing WireGuard...${NC}"
# Check if install script exist
if [ ! -f "wireguard-install.sh" ]; then
echo -e "${RED}WireGuard install script is missing. Did it failed to download?${NC}"
return 1
fi
# Run the remote install script and let it complete
if ! ssh -o ConnectTimeout=60 -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" -t "$SSH_USER@$VPS_IP" "bash -c 'export TERM=xterm-256color; export STARTOS_HOSTNAME=clearnet; bash ~/wireguard-install.sh'"; then
echo -e "${RED}WireGuard installation failed on remote server.${NC}"
return 1
fi
# Test if wireguard installed
if ! ssh -q -o BatchMode=yes -o ConnectTimeout=5 -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" "test -f /etc/wireguard/wg0.conf"; then
echo -e "\n${RED}WireGuard installation failed because /etc/wireguard/wg0.conf is missing, which means the script removed it.${NC}"
return 1
fi
echo -e "\n${GREEN}WireGuard installation completed successfully!${NC}"
return 0
}
# Function to enable root login via SSH
enable_root_login() {
echo -e "${BLUE}Checking and configuring root SSH access...${NC}"
# Try to modify sshd config using sudo
if ! ssh -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" '
# Check if we can use sudo without password
if ! sudo -n true 2>/dev/null; then
echo -e "\033[1;33mNOTE: You may be prompted for your sudo password.\033[0m"
fi
# Check if user is in sudo group
if ! groups | grep -q sudo; then
echo -e "\033[1;31mError: Your user is not in the sudo group. Root access cannot be configured.\033[0m"
exit 1
fi
# Backup sshd config
sudo cp /etc/ssh/sshd_config /etc/ssh/sshd_config.bak
# Enable root login with SSH keys only
if sudo grep -q "^PermitRootLogin" /etc/ssh/sshd_config; then
sudo sed -i "s/^PermitRootLogin.*/PermitRootLogin prohibit-password/" /etc/ssh/sshd_config
else
echo "PermitRootLogin prohibit-password" | sudo tee -a /etc/ssh/sshd_config
fi
# Ensure password authentication is disabled
if sudo grep -q "^PasswordAuthentication" /etc/ssh/sshd_config; then
sudo sed -i "s/^PasswordAuthentication.*/PasswordAuthentication no/" /etc/ssh/sshd_config
else
echo "PasswordAuthentication no" | sudo tee -a /etc/ssh/sshd_config
fi
# Set up root SSH directory and keys
echo -e "\033[1;33mSetting up root SSH access...\033[0m"
sudo mkdir -p /root/.ssh
sudo cp ~/.ssh/authorized_keys /root/.ssh/
sudo chown -R root:root /root/.ssh
sudo chmod 700 /root/.ssh
sudo chmod 600 /root/.ssh/authorized_keys
# Reload SSH service
sudo systemctl reload sshd
# Verify the changes
if ! sudo grep -q "^PermitRootLogin prohibit-password" /etc/ssh/sshd_config; then
echo -e "\033[1;31mError: Failed to verify root login configuration.\033[0m"
exit 1
fi
# Test root SSH access
if ! sudo -n true 2>/dev/null; then
echo -e "\033[1;33mNOTE: Please try to log in as root now using your SSH key.\033[0m"
echo -e "\033[1;33mIf successful, run this script again without the -u parameter.\033[0m"
else
echo -e "\033[1;32mRoot SSH access has been configured successfully!\033[0m"
fi
'; then
echo -e "${RED}Failed to configure root SSH access.${NC}"
return 1
fi
echo -e "${GREEN}Root SSH access has been configured successfully!${NC}"
echo -e "${YELLOW}Please try to log in as root now using your SSH key. If successful, run this script again without the -u parameter.${NC}"
return 0
}
# --- Main Script ---
# Initialize variables
VPS_IP=""
SSH_USER="root"
SSH_PORT="22"
CUSTOM_SSH_KEY=""
CONFIG_NAME=""
# Check if the script is run as root before anything else
check_root
# Print banner
print_banner
# Parse command line arguments
while getopts "hi:u:p:k:" opt; do
case $opt in
h)
print_usage
exit 0
;;
i)
VPS_IP=$OPTARG
;;
u)
SSH_USER=$OPTARG
;;
p)
SSH_PORT=$OPTARG
;;
k)
CUSTOM_SSH_KEY=$OPTARG
;;
\?)
echo "Invalid option: -$OPTARG" >&2
print_usage
exit 1
;;
esac
done
# Check if custom SSH key is passed and update the private key variable
if [ -n "$CUSTOM_SSH_KEY" ]; then
if [ ! -f "$CUSTOM_SSH_KEY" ]; then
echo -e "${RED}Custom SSH key '$CUSTOM_SSH_KEY' not found.${NC}"
exit 1
fi
SSH_PRIVATE_KEY="$CUSTOM_SSH_KEY"
SSH_PUBLIC_KEY="$CUSTOM_SSH_KEY.pub"
else
# Use default StartOS SSH key
if [ ! -f "$SSH_PRIVATE_KEY" ]; then
echo -e "${RED}No SSH key found at default location '$SSH_PRIVATE_KEY'. Please ensure StartOS SSH keys are properly configured.${NC}"
exit 1
fi
fi
if [ ! -f "$SSH_PUBLIC_KEY" ]; then
echo -e "${RED}Public key '$SSH_PUBLIC_KEY' not found. Please ensure both private and public keys exist.${NC}"
exit 1
fi
# If VPS_IP is not provided via command line, ask for it
if [ -z "$VPS_IP" ]; then
while true; do
echo -n "Please enter your VPS IP address: "
read VPS_IP
if validate_ip "$VPS_IP"; then
break
else
echo -e "${RED}Invalid IP address format. Please try again.${NC}"
fi
done
fi
# Confirm SSH connection details
echo -e "\n${GREEN}Connection details:${NC}"
echo "VPS IP: $VPS_IP"
echo "SSH User: $SSH_USER"
echo "SSH Port: $SSH_PORT"
echo -e "\n${GREEN}Proceeding with SSH key-based authentication...${NC}\n"
# Copy SSH public key to the remote server
if ! ssh-copy-id -i "$SSH_PUBLIC_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP"; then
echo -e "${RED}Failed to copy SSH key to the remote server. Please ensure you have correct credentials.${NC}"
exit 1
fi
echo -e "${GREEN}SSH key-based authentication configured successfully!${NC}"
# Test SSH connection using key-based authentication
echo -e "\nTesting SSH connection with key-based authentication..."
if ! ssh -q -o BatchMode=yes -o ConnectTimeout=5 -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" 'exit'; then
echo -e "${RED}SSH connection test failed. Please check your credentials and try again.${NC}"
exit 1
fi
# If we're connecting as a non-root user, set up root access first
if [ "$SSH_USER" != "root" ]; then
echo -e "\n${YELLOW}You are connecting as a non-root user. This script needs to enable root SSH access.${NC}"
echo -e "${YELLOW}This is a one-time setup that will allow direct root login for WireGuard installation.${NC}"
echo -n -e "${YELLOW}Would you like to proceed? (y/N): ${NC}"
read -r answer
if [[ "$answer" =~ ^[Yy]$ ]]; then
if enable_root_login; then
echo -e "\n${BLUE}------------------------------------------------------------------${NC}"
echo -e "${GREEN}Root SSH access has been configured successfully!${NC}"
echo -e "${YELLOW}Please run this script again without the -u parameter to continue setup.${NC}"
echo -e "${BLUE}------------------------------------------------------------------${NC}"
exit 0
else
echo -e "${RED}Failed to configure root SSH access. Please check your sudo privileges and try again.${NC}"
exit 1
fi
else
echo -e "\n${BLUE}------------------------------------------------------------------${NC}"
echo -e "${YELLOW}To manually configure SSH for root access:${NC}"
echo -e "\n ${YELLOW}1. Connect to your VPS and edit sshd_config:${NC}"
echo " sudo nano /etc/ssh/sshd_config"
echo -e "\n ${YELLOW}2. Find and uncomment or add these lines:${NC}"
echo " PubkeyAuthentication yes"
echo " PermitRootLogin yes"
echo " AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2"
echo -e "\n ${YELLOW}3. Restart the SSH service:${NC}"
echo " sudo systemctl restart sshd"
echo -e "\n ${YELLOW}4. Copy your SSH key to root user:${NC}"
echo " sudo mkdir -p /root/.ssh"
echo " sudo cp ~/.ssh/authorized_keys /root/.ssh/"
echo " sudo chown -R root:root /root/.ssh"
echo " sudo chmod 700 /root/.ssh"
echo " sudo chmod 600 /root/.ssh/authorized_keys"
echo -e "${BLUE}------------------------------------------------------------------${NC}"
echo -e "\n${YELLOW}After completing these steps, run this script again without the -u parameter.${NC}"
exit 1
fi
fi
# Check if root login is permitted when connecting as root
if [ "$SSH_USER" = "root" ]; then
# Check for both "yes" and "prohibit-password" as valid root login settings
if ! ssh -q -o BatchMode=yes -o ConnectTimeout=5 -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" 'grep -q "^PermitRootLogin.*\(yes\|prohibit-password\)" /etc/ssh/sshd_config'; then
echo -e "\n${RED}Root SSH login is not enabled on your VPS.${NC}"
echo -e "\n${YELLOW}Would you like this script to automatically enable root SSH access? (y/N):${NC} "
read -r answer
if [[ "$answer" =~ ^[Yy]$ ]]; then
configure_ssh_key_auth
else
echo -e "\n${BLUE}------------------------------------------------------------------${NC}"
echo -e "${YELLOW}To manually configure SSH for root access:${NC}"
echo -e "\n ${YELLOW}1. Connect to your VPS and edit sshd_config:${NC}"
echo " sudo nano /etc/ssh/sshd_config"
echo -e "\n ${YELLOW}2. Find and uncomment or add these lines:${NC}"
echo " PubkeyAuthentication yes"
echo " PermitRootLogin prohibit-password"
echo " AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2"
echo -e "\n ${YELLOW}3. Restart the SSH service:${NC}"
echo " sudo systemctl restart sshd"
echo -e "${BLUE}------------------------------------------------------------------${NC}"
echo -e "\n${YELLOW}Please enable root SSH access and run this script again.${NC}"
exit 1
fi
fi
fi
echo -e "${GREEN}SSH connection successful with key-based authentication!${NC}"
# Download the WireGuard install script locally
if ! download_install_script; then
echo -e "${RED}Failed to download the latest install script. Exiting...${NC}"
exit 1
fi
# Upload the install script to the remote server
if ! scp -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -P "$SSH_PORT" wireguard-install.sh "$SSH_USER@$VPS_IP":~/; then
echo -e "${RED}Failed to upload WireGuard install script to the remote server.${NC}"
exit 1
fi
# Install WireGuard on remote server using the downloaded script
if ! install_wireguard; then
echo -e "${RED}WireGuard installation failed.${NC}"
exit 1
fi
# Remove the local install script
rm wireguard-install.sh >/dev/null 2>&1
# Handle the StartOS config (download)
if ! handle_startos_connection; then
echo -e "${RED}StartOS configuration download failed!${NC}"
exit 1
fi
# Import the configuration
if ! import_wireguard_config "$CONFIG_NAME"; then
echo -e "${RED}StartOS configuration import failed or skipped!${NC}"
fi

View File

@@ -63,7 +63,7 @@ sudo unsquashfs -f -d $TMPDIR startos.raspberrypi.squashfs
REAL_GIT_HASH=$(cat $TMPDIR/usr/lib/startos/GIT_HASH.txt)
REAL_VERSION=$(cat $TMPDIR/usr/lib/startos/VERSION.txt)
REAL_ENVIRONMENT=$(cat $TMPDIR/usr/lib/startos/ENVIRONMENT.txt)
sudo sed -i 's| boot=embassy| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo sed -i 's| boot=startos| init=/usr/lib/startos/scripts/init_resize\.sh|' $TMPDIR/boot/cmdline.txt
sudo cp ./build/raspberrypi/fstab $TMPDIR/etc/
sudo cp ./build/raspberrypi/init_resize.sh $TMPDIR/usr/lib/startos/scripts/init_resize.sh
sudo umount $TMPDIR/boot

View File

@@ -1,7 +1,7 @@
#!/bin/bash
if [ "$GIT_BRANCH_AS_HASH" != 1 ]; then
GIT_HASH="$(git describe --always --abbrev=40 --dirty=-modified)"
GIT_HASH="$(git rev-parse HEAD)$(if ! git diff-index --quiet HEAD --; then echo '-modified'; fi)"
else
GIT_HASH="@$(git rev-parse --abbrev-ref HEAD)"
fi

8
container-runtime/.gitignore vendored Normal file
View File

@@ -0,0 +1,8 @@
node_modules/
dist/
bundle.js
startInit.js
service/
service.js
*.squashfs
/tmp

View File

@@ -0,0 +1,89 @@
# Container RPC SERVER Specification
## Methods
### init
initialize runtime (mount `/proc`, `/sys`, `/dev`, and `/run` to each image in `/media/images`)
called after os has mounted js and images to the container
#### args
`[]`
#### response
`null`
### exit
shutdown runtime
#### args
`[]`
#### response
`null`
### start
run main method if not already running
#### args
`[]`
#### response
`null`
### stop
stop main method by sending SIGTERM to child processes, and SIGKILL after timeout
#### args
`{ timeout: millis }`
#### response
`null`
### execute
run a specific package procedure
#### args
```ts
{
procedure: JsonPath,
input: any,
timeout: millis,
}
```
#### response
`any`
### sandbox
run a specific package procedure in sandbox mode
#### args
```ts
{
procedure: JsonPath,
input: any,
timeout: millis,
}
```
#### response
`any`

View File

@@ -0,0 +1,11 @@
[Unit]
Description=StartOS Container Runtime
[Service]
Type=simple
ExecStart=/usr/bin/node --experimental-detect-module --unhandled-rejections=warn /usr/lib/startos/init/index.js
Restart=always
RestartSec=3
[Install]
WantedBy=multi-user.target

View File

@@ -0,0 +1,23 @@
#!/bin/bash
set -e
mkdir -p /run/systemd/resolve
echo "nameserver 8.8.8.8" > /run/systemd/resolve/stub-resolv.conf
apt-get update
apt-get install -y curl rsync qemu-user-static
curl -o- https://raw.githubusercontent.com/nvm-sh/nvm/v0.39.7/install.sh | bash
source ~/.bashrc
nvm install 20
ln -s $(which node) /usr/bin/node
sed -i '/\(^\|#\)Storage=/c\Storage=persistent' /etc/systemd/journald.conf
sed -i '/\(^\|#\)Compress=/c\Compress=yes' /etc/systemd/journald.conf
sed -i '/\(^\|#\)SystemMaxUse=/c\SystemMaxUse=1G' /etc/systemd/journald.conf
sed -i '/\(^\|#\)ForwardToSyslog=/c\ForwardToSyslog=no' /etc/systemd/journald.conf
systemctl enable container-runtime.service
rm -rf /run/systemd

View File

@@ -0,0 +1,23 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
DISTRO=debian
VERSION=bookworm
ARCH=${ARCH:-$(uname -m)}
FLAVOR=default
_ARCH=$ARCH
if [ "$_ARCH" = "x86_64" ]; then
_ARCH=amd64
elif [ "$_ARCH" = "aarch64" ]; then
_ARCH=arm64
fi
URL="https://images.linuxcontainers.org/$(curl -fsSL https://images.linuxcontainers.org/meta/1.0/index-system | grep "^$DISTRO;$VERSION;$_ARCH;$FLAVOR;" | head -n1 | sed 's/^.*;//g')/rootfs.squashfs"
echo "Downloading $URL to debian.${ARCH}.squashfs"
curl -fsSL "$URL" > debian.${ARCH}.squashfs

View File

@@ -0,0 +1,10 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
cat ./package.json | sed 's/file:\.\([.\/]\)/file:..\/.\1/g' > ./dist/package.json
cat ./package-lock.json | sed 's/"\.\([.\/]\)/"..\/.\1/g' > ./dist/package-lock.json
npm --prefix dist ci --omit=dev

View File

@@ -0,0 +1,8 @@
/** @type {import('ts-jest').JestConfigWithTsJest} */
module.exports = {
preset: "ts-jest",
automock: false,
testEnvironment: "node",
rootDir: "./src/",
modulePathIgnorePatterns: ["./dist/"],
}

View File

@@ -0,0 +1,28 @@
#!/bin/bash
set -e
IMAGE=$1
if [ -z "$IMAGE" ]; then
>&2 echo "usage: $0 <image id>"
exit 1
fi
if ! [ -d "/media/images/$IMAGE" ]; then
>&2 echo "image does not exist"
exit 1
fi
container=$(mktemp -d)
mkdir -p $container/rootfs $container/upper $container/work
mount -t overlay -olowerdir=/media/images/$IMAGE,upperdir=$container/upper,workdir=$container/work overlay $container/rootfs
rootfs=$container/rootfs
for special in dev sys proc run; do
mkdir -p $rootfs/$special
mount --bind /$special $rootfs/$special
done
echo $rootfs

9953
container-runtime/package-lock.json generated Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,46 @@
{
"name": "container-runtime",
"version": "0.0.0",
"description": "We want to be the sdk intermitent for the system",
"module": "./index.js",
"scripts": {
"check": "tsc --noEmit",
"build": "prettier . '!tmp/**' --write && rm -rf dist && tsc",
"tsc": "rm -rf dist; tsc",
"test": "jest -c ./jest.config.js"
},
"author": "",
"prettier": {
"trailingComma": "all",
"tabWidth": 2,
"semi": false,
"singleQuote": false
},
"dependencies": {
"@iarna/toml": "^2.2.5",
"@noble/curves": "^1.4.0",
"@noble/hashes": "^1.4.0",
"@start9labs/start-sdk": "file:../sdk/dist",
"esbuild-plugin-resolve": "^2.0.0",
"filebrowser": "^1.0.0",
"isomorphic-fetch": "^3.0.0",
"jsonpath": "^1.1.1",
"lodash.merge": "^4.6.2",
"node-fetch": "^3.1.0",
"ts-matches": "^5.5.1",
"tslib": "^2.5.3",
"typescript": "^5.1.3",
"yaml": "^2.3.1"
},
"devDependencies": {
"@swc/cli": "^0.1.62",
"@swc/core": "^1.3.65",
"@types/jest": "^29.5.12",
"@types/jsonpath": "^0.2.4",
"@types/node": "^20.11.13",
"jest": "^29.7.0",
"prettier": "^3.2.5",
"ts-jest": "^29.2.3",
"typescript": ">5.2"
}
}

View File

@@ -0,0 +1,12 @@
#!/bin/bash
set -e
rootfs=$1
if [ -z "$rootfs" ]; then
>&2 echo "usage: $0 <container rootfs path>"
exit 1
fi
umount --recursive $rootfs
rm -rf $rootfs/..

View File

@@ -0,0 +1,317 @@
import { types as T, utils } from "@start9labs/start-sdk"
import * as net from "net"
import { object, string, number, literals, some, unknown } from "ts-matches"
import { Effects } from "../Models/Effects"
import { CallbackHolder } from "../Models/CallbackHolder"
import { asError } from "@start9labs/start-sdk/base/lib/util"
const matchRpcError = object({
error: object(
{
code: number,
message: string,
data: some(
string,
object(
{
details: string,
debug: string,
},
["debug"],
),
),
},
["data"],
),
})
const testRpcError = matchRpcError.test
const testRpcResult = object({
result: unknown,
}).test
type RpcError = typeof matchRpcError._TYPE
const SOCKET_PATH = "/media/startos/rpc/host.sock"
let hostSystemId = 0
export type EffectContext = {
procedureId: string | null
callbacks?: CallbackHolder
constRetry?: () => void
}
const rpcRoundFor =
(procedureId: string | null) =>
<K extends T.EffectMethod | "clearCallbacks">(
method: K,
params: Record<string, unknown>,
) => {
const id = hostSystemId++
const client = net.createConnection({ path: SOCKET_PATH }, () => {
client.write(
JSON.stringify({
id,
method,
params: { ...params, procedureId: procedureId || undefined },
}) + "\n",
)
})
let bufs: Buffer[] = []
return new Promise((resolve, reject) => {
client.on("data", (data) => {
try {
bufs.push(data)
if (data.reduce((acc, x) => acc || x == 10, false)) {
const res: unknown = JSON.parse(
Buffer.concat(bufs).toString().split("\n")[0],
)
if (testRpcError(res)) {
let message = res.error.message
console.error(
"Error in host RPC:",
utils.asError({ method, params, error: res.error }),
)
if (string.test(res.error.data)) {
message += ": " + res.error.data
console.error(`Details: ${res.error.data}`)
} else {
if (res.error.data?.details) {
message += ": " + res.error.data.details
console.error(`Details: ${res.error.data.details}`)
}
if (res.error.data?.debug) {
message += "\n" + res.error.data.debug
console.error(`Debug: ${res.error.data.debug}`)
}
}
reject(new Error(`${message}@${method}`))
} else if (testRpcResult(res)) {
resolve(res.result)
} else {
reject(new Error(`malformed response ${JSON.stringify(res)}`))
}
}
} catch (error) {
reject(error)
}
client.end()
})
client.on("error", (error) => {
reject(error)
})
})
}
export function makeEffects(context: EffectContext): Effects {
const rpcRound = rpcRoundFor(context.procedureId)
const self: Effects = {
constRetry: context.constRetry,
clearCallbacks(...[options]: Parameters<T.Effects["clearCallbacks"]>) {
return rpcRound("clear-callbacks", {
...options,
}) as ReturnType<T.Effects["clearCallbacks"]>
},
action: {
clear(...[options]: Parameters<T.Effects["action"]["clear"]>) {
return rpcRound("action.clear", {
...options,
}) as ReturnType<T.Effects["action"]["clear"]>
},
export(...[options]: Parameters<T.Effects["action"]["export"]>) {
return rpcRound("action.export", {
...options,
}) as ReturnType<T.Effects["action"]["export"]>
},
getInput(...[options]: Parameters<T.Effects["action"]["getInput"]>) {
return rpcRound("action.get-input", {
...options,
}) as ReturnType<T.Effects["action"]["getInput"]>
},
request(...[options]: Parameters<T.Effects["action"]["request"]>) {
return rpcRound("action.request", {
...options,
}) as ReturnType<T.Effects["action"]["request"]>
},
run(...[options]: Parameters<T.Effects["action"]["run"]>) {
return rpcRound("action.run", {
...options,
}) as ReturnType<T.Effects["action"]["run"]>
},
clearRequests(
...[options]: Parameters<T.Effects["action"]["clearRequests"]>
) {
return rpcRound("action.clear-requests", {
...options,
}) as ReturnType<T.Effects["action"]["clearRequests"]>
},
},
bind(...[options]: Parameters<T.Effects["bind"]>) {
return rpcRound("bind", {
...options,
stack: new Error().stack,
}) as ReturnType<T.Effects["bind"]>
},
clearBindings(...[options]: Parameters<T.Effects["clearBindings"]>) {
return rpcRound("clear-bindings", { ...options }) as ReturnType<
T.Effects["clearBindings"]
>
},
clearServiceInterfaces(
...[options]: Parameters<T.Effects["clearServiceInterfaces"]>
) {
return rpcRound("clear-service-interfaces", { ...options }) as ReturnType<
T.Effects["clearServiceInterfaces"]
>
},
getInstalledPackages(...[]: Parameters<T.Effects["getInstalledPackages"]>) {
return rpcRound("get-installed-packages", {}) as ReturnType<
T.Effects["getInstalledPackages"]
>
},
subcontainer: {
createFs(options: { imageId: string; name: string }) {
return rpcRound("subcontainer.create-fs", options) as ReturnType<
T.Effects["subcontainer"]["createFs"]
>
},
destroyFs(options: { guid: string }): Promise<null> {
return rpcRound("subcontainer.destroy-fs", options) as ReturnType<
T.Effects["subcontainer"]["destroyFs"]
>
},
},
exportServiceInterface: ((
...[options]: Parameters<Effects["exportServiceInterface"]>
) => {
return rpcRound("export-service-interface", options) as ReturnType<
T.Effects["exportServiceInterface"]
>
}) as Effects["exportServiceInterface"],
exposeForDependents(
...[options]: Parameters<T.Effects["exposeForDependents"]>
) {
return rpcRound("expose-for-dependents", options) as ReturnType<
T.Effects["exposeForDependents"]
>
},
getContainerIp(...[options]: Parameters<T.Effects["getContainerIp"]>) {
return rpcRound("get-container-ip", options) as ReturnType<
T.Effects["getContainerIp"]
>
},
getOsIp(...[]: Parameters<T.Effects["getOsIp"]>) {
return rpcRound("get-os-ip", {}) as ReturnType<T.Effects["getOsIp"]>
},
getHostInfo: ((...[allOptions]: Parameters<T.Effects["getHostInfo"]>) => {
const options = {
...allOptions,
callback: context.callbacks?.addCallback(allOptions.callback) || null,
}
return rpcRound("get-host-info", options) as ReturnType<
T.Effects["getHostInfo"]
> as any
}) as Effects["getHostInfo"],
getServiceInterface(
...[options]: Parameters<T.Effects["getServiceInterface"]>
) {
return rpcRound("get-service-interface", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getServiceInterface"]>
},
getServicePortForward(
...[options]: Parameters<T.Effects["getServicePortForward"]>
) {
return rpcRound("get-service-port-forward", options) as ReturnType<
T.Effects["getServicePortForward"]
>
},
getSslCertificate(options: Parameters<T.Effects["getSslCertificate"]>[0]) {
return rpcRound("get-ssl-certificate", options) as ReturnType<
T.Effects["getSslCertificate"]
>
},
getSslKey(options: Parameters<T.Effects["getSslKey"]>[0]) {
return rpcRound("get-ssl-key", options) as ReturnType<
T.Effects["getSslKey"]
>
},
getSystemSmtp(...[options]: Parameters<T.Effects["getSystemSmtp"]>) {
return rpcRound("get-system-smtp", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["getSystemSmtp"]>
},
listServiceInterfaces(
...[options]: Parameters<T.Effects["listServiceInterfaces"]>
) {
return rpcRound("list-service-interfaces", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as ReturnType<T.Effects["listServiceInterfaces"]>
},
mount(...[options]: Parameters<T.Effects["mount"]>) {
return rpcRound("mount", options) as ReturnType<T.Effects["mount"]>
},
restart(...[]: Parameters<T.Effects["restart"]>) {
return rpcRound("restart", {}) as ReturnType<T.Effects["restart"]>
},
setDependencies(
dependencies: Parameters<T.Effects["setDependencies"]>[0],
): ReturnType<T.Effects["setDependencies"]> {
return rpcRound("set-dependencies", dependencies) as ReturnType<
T.Effects["setDependencies"]
>
},
checkDependencies(
options: Parameters<T.Effects["checkDependencies"]>[0],
): ReturnType<T.Effects["checkDependencies"]> {
return rpcRound("check-dependencies", options) as ReturnType<
T.Effects["checkDependencies"]
>
},
getDependencies(): ReturnType<T.Effects["getDependencies"]> {
return rpcRound("get-dependencies", {}) as ReturnType<
T.Effects["getDependencies"]
>
},
setHealth(...[options]: Parameters<T.Effects["setHealth"]>) {
return rpcRound("set-health", options) as ReturnType<
T.Effects["setHealth"]
>
},
getStatus(...[o]: Parameters<T.Effects["getStatus"]>) {
return rpcRound("get-status", o) as ReturnType<T.Effects["getStatus"]>
},
setMainStatus(o: { status: "running" | "stopped" }): Promise<null> {
return rpcRound("set-main-status", o) as ReturnType<
T.Effects["setHealth"]
>
},
shutdown(...[]: Parameters<T.Effects["shutdown"]>) {
return rpcRound("shutdown", {}) as ReturnType<T.Effects["shutdown"]>
},
store: {
get: async (options: any) =>
rpcRound("store.get", {
...options,
callback: context.callbacks?.addCallback(options.callback) || null,
}) as any,
set: async (options: any) =>
rpcRound("store.set", options) as ReturnType<T.Effects["store"]["set"]>,
} as T.Effects["store"],
getDataVersion() {
return rpcRound("get-data-version", {}) as ReturnType<
T.Effects["getDataVersion"]
>
},
setDataVersion(...[options]: Parameters<T.Effects["setDataVersion"]>) {
return rpcRound("set-data-version", options) as ReturnType<
T.Effects["setDataVersion"]
>
},
}
return self
}

View File

@@ -0,0 +1,487 @@
// @ts-check
import * as net from "net"
import {
object,
some,
string,
literal,
array,
number,
matches,
any,
shape,
anyOf,
} from "ts-matches"
import { types as T, utils } from "@start9labs/start-sdk"
import * as fs from "fs"
import { CallbackHolder } from "../Models/CallbackHolder"
import { AllGetDependencies } from "../Interfaces/AllGetDependencies"
import { jsonPath, unNestPath } from "../Models/JsonPath"
import { System } from "../Interfaces/System"
import { makeEffects } from "./EffectCreator"
type MaybePromise<T> = T | Promise<T>
export const matchRpcResult = anyOf(
object({ result: any }),
object({
error: object(
{
code: number,
message: string,
data: object(
{
details: string,
debug: any,
},
["details", "debug"],
),
},
["data"],
),
}),
)
export type RpcResult = typeof matchRpcResult._TYPE
type SocketResponse = ({ jsonrpc: "2.0"; id: IdType } & RpcResult) | null
const SOCKET_PARENT = "/media/startos/rpc"
const SOCKET_PATH = "/media/startos/rpc/service.sock"
const jsonrpc = "2.0" as const
const isResult = object({ result: any }).test
const idType = some(string, number, literal(null))
type IdType = null | string | number | undefined
const runType = object(
{
id: idType,
method: literal("execute"),
params: object(
{
id: string,
procedure: string,
input: any,
timeout: number,
},
["timeout"],
),
},
["id"],
)
const sandboxRunType = object(
{
id: idType,
method: literal("sandbox"),
params: object(
{
id: string,
procedure: string,
input: any,
timeout: number,
},
["timeout"],
),
},
["id"],
)
const callbackType = object({
method: literal("callback"),
params: object({
id: number,
args: array,
}),
})
const initType = object(
{
id: idType,
method: literal("init"),
},
["id"],
)
const startType = object(
{
id: idType,
method: literal("start"),
},
["id"],
)
const stopType = object(
{
id: idType,
method: literal("stop"),
},
["id"],
)
const exitType = object(
{
id: idType,
method: literal("exit"),
},
["id"],
)
const evalType = object(
{
id: idType,
method: literal("eval"),
params: object({
script: string,
}),
},
["id"],
)
const jsonParse = (x: string) => JSON.parse(x)
const handleRpc = (id: IdType, result: Promise<RpcResult>) =>
result
.then((result) => {
return {
jsonrpc,
id,
...result,
}
})
.then((x) => {
if (
("result" in x && x.result === undefined) ||
!("error" in x || "result" in x)
)
(x as any).result = null
return x
})
.catch((error) => ({
jsonrpc,
id,
error: {
code: 0,
message: typeof error,
data: { details: "" + error, debug: error?.stack },
},
}))
const hasId = object({ id: idType }).test
export class RpcListener {
unixSocketServer = net.createServer(async (server) => {})
private _system: System | undefined
private callbacks: CallbackHolder | undefined
constructor(readonly getDependencies: AllGetDependencies) {
if (!fs.existsSync(SOCKET_PARENT)) {
fs.mkdirSync(SOCKET_PARENT, { recursive: true })
}
this.unixSocketServer.listen(SOCKET_PATH)
this.unixSocketServer.on("connection", (s) => {
let id: IdType = null
const captureId = <X>(x: X) => {
if (hasId(x)) id = x.id
return x
}
const logData =
(location: string) =>
<X>(x: X) => {
console.log({
location,
stringified: JSON.stringify(x),
type: typeof x,
id,
})
return x
}
const mapError = (error: any): SocketResponse => ({
jsonrpc,
id,
error: {
message: typeof error,
data: {
details: error?.message ?? String(error),
debug: error?.stack,
},
code: 1,
},
})
const writeDataToSocket = (x: SocketResponse) => {
if (x != null) {
return new Promise((resolve) =>
s.write(JSON.stringify(x) + "\n", resolve),
)
}
}
s.on("data", (a) =>
Promise.resolve(a)
.then((b) => b.toString())
.then((buf) => {
for (let s of buf.split("\n")) {
if (s)
Promise.resolve(s)
.then(logData("dataIn"))
.then(jsonParse)
.then(captureId)
.then((x) => this.dealWithInput(x))
.catch(mapError)
.then(logData("response"))
.then(writeDataToSocket)
.catch((e) => {
console.error(`Major error in socket handling: ${e}`)
console.debug(`Data in: ${a.toString()}`)
})
}
}),
)
})
}
private get system() {
if (!this._system) throw new Error("System not initialized")
return this._system
}
private callbackHolders: Map<string, CallbackHolder> = new Map()
private removeCallbackHolderFor(procedure: string) {
const prev = this.callbackHolders.get(procedure)
if (prev) {
this.callbackHolders.delete(procedure)
this.callbacks?.removeChild(prev)
}
}
private callbackHolderFor(procedure: string): CallbackHolder {
this.removeCallbackHolderFor(procedure)
const callbackHolder = this.callbacks!.child()
this.callbackHolders.set(procedure, callbackHolder)
return callbackHolder
}
callCallback(callback: number, args: any[]): void {
if (this.callbacks) {
this.callbacks
.callCallback(callback, args)
.catch((error) =>
console.error(`callback ${callback} failed`, utils.asError(error)),
)
} else {
console.warn(
`callback ${callback} ignored because system is not initialized`,
)
}
}
private dealWithInput(input: unknown): MaybePromise<SocketResponse> {
return matches(input)
.when(runType, async ({ id, params }) => {
const system = this.system
const procedure = jsonPath.unsafeCast(params.procedure)
const { input, timeout, id: procedureId } = params
const result = this.getResult(
procedure,
system,
procedureId,
timeout,
input,
)
return handleRpc(id, result)
})
.when(sandboxRunType, async ({ id, params }) => {
const system = this.system
const procedure = jsonPath.unsafeCast(params.procedure)
const { input, timeout, id: procedureId } = params
const result = this.getResult(
procedure,
system,
procedureId,
timeout,
input,
)
return handleRpc(id, result)
})
.when(callbackType, async ({ params: { id, args } }) => {
this.callCallback(id, args)
return null
})
.when(startType, async ({ id }) => {
const callbacks = this.callbackHolderFor("main")
const effects = makeEffects({
procedureId: null,
callbacks,
})
return handleRpc(
id,
this.system.start(effects).then((result) => ({ result })),
)
})
.when(stopType, async ({ id }) => {
this.removeCallbackHolderFor("main")
return handleRpc(
id,
this.system.stop().then((result) => ({ result })),
)
})
.when(exitType, async ({ id }) => {
return handleRpc(
id,
(async () => {
if (this._system) await this._system.exit()
})().then((result) => ({ result })),
)
})
.when(initType, async ({ id }) => {
return handleRpc(
id,
(async () => {
if (!this._system) {
const system = await this.getDependencies.system()
this.callbacks = new CallbackHolder(
makeEffects({
procedureId: null,
}),
)
const callbacks = this.callbackHolderFor("containerInit")
await system.containerInit(
makeEffects({
procedureId: null,
callbacks,
}),
)
this._system = system
}
})().then((result) => ({ result })),
)
})
.when(evalType, async ({ id, params }) => {
return handleRpc(
id,
(async () => {
const result = await new Function(
`return (async () => { return (${params.script}) }).call(this)`,
).call({
listener: this,
require: require,
})
return {
jsonrpc,
id,
result: ![
"string",
"number",
"boolean",
"null",
"object",
].includes(typeof result)
? null
: result,
}
})(),
)
})
.when(
shape({ id: idType, method: string }, ["id"]),
({ id, method }) => ({
jsonrpc,
id,
error: {
code: -32601,
message: `Method not found`,
data: {
details: method,
},
},
}),
)
.defaultToLazy(() => {
console.warn(
`Couldn't parse the following input ${JSON.stringify(input)}`,
)
return {
jsonrpc,
id: (input as any)?.id,
error: {
code: -32602,
message: "invalid params",
data: {
details: JSON.stringify(input),
},
},
}
})
}
private getResult(
procedure: typeof jsonPath._TYPE,
system: System,
procedureId: string,
timeout: number | undefined,
input: any,
) {
const ensureResultTypeShape = (
result: void | T.ActionInput | T.ActionResult | null,
): { result: any } => {
return { result }
}
const callbacks = this.callbackHolderFor(procedure)
const effects = makeEffects({
procedureId,
callbacks,
})
return (async () => {
switch (procedure) {
case "/backup/create":
return system.createBackup(effects, timeout || null)
case "/backup/restore":
return system.restoreBackup(effects, timeout || null)
case "/packageInit":
return system.packageInit(effects, timeout || null)
case "/packageUninit":
return system.packageUninit(
effects,
string.optional().unsafeCast(input),
timeout || null,
)
default:
const procedures = unNestPath(procedure)
switch (true) {
case procedures[1] === "actions" && procedures[3] === "getInput":
return system.getActionInput(
effects,
procedures[2],
timeout || null,
)
case procedures[1] === "actions" && procedures[3] === "run":
return system.runAction(
effects,
procedures[2],
input.input,
timeout || null,
)
}
}
})().then(ensureResultTypeShape, (error) =>
matches(error)
.when(
object(
{
error: string,
code: number,
},
["code"],
{ code: 0 },
),
(error) => ({
error: {
code: error.code,
message: error.error,
},
}),
)
.defaultToLazy(() => ({
error: {
code: 0,
message: String(error),
},
})),
)
}
}

View File

@@ -0,0 +1,157 @@
import * as fs from "fs/promises"
import * as cp from "child_process"
import { SubContainer, types as T } from "@start9labs/start-sdk"
import { promisify } from "util"
import { DockerProcedure, VolumeId } from "../../../Models/DockerProcedure"
import { Volume } from "./matchVolume"
import {
CommandOptions,
ExecOptions,
ExecSpawnable,
} from "@start9labs/start-sdk/package/lib/util/SubContainer"
export const exec = promisify(cp.exec)
export const execFile = promisify(cp.execFile)
export class DockerProcedureContainer {
private constructor(private readonly subcontainer: ExecSpawnable) {}
static async of(
effects: T.Effects,
packageId: string,
data: DockerProcedure,
volumes: { [id: VolumeId]: Volume },
name: string,
options: { subcontainer?: ExecSpawnable } = {},
) {
const subcontainer =
options?.subcontainer ??
(await DockerProcedureContainer.createSubContainer(
effects,
packageId,
data,
volumes,
name,
))
return new DockerProcedureContainer(subcontainer)
}
static async createSubContainer(
effects: T.Effects,
packageId: string,
data: DockerProcedure,
volumes: { [id: VolumeId]: Volume },
name: string,
) {
const subcontainer = await SubContainer.of(
effects,
{ imageId: data.image },
name,
)
if (data.mounts) {
const mounts = data.mounts
for (const mount in mounts) {
const path = mounts[mount].startsWith("/")
? `${subcontainer.rootfs}${mounts[mount]}`
: `${subcontainer.rootfs}/${mounts[mount]}`
await fs.mkdir(path, { recursive: true })
const volumeMount = volumes[mount]
if (volumeMount.type === "data") {
await subcontainer.mount(
{ type: "volume", id: mount, subpath: null, readonly: false },
mounts[mount],
)
} else if (volumeMount.type === "assets") {
await subcontainer.mount(
{ type: "assets", subpath: mount },
mounts[mount],
)
} else if (volumeMount.type === "certificate") {
const hostnames = [
`${packageId}.embassy`,
...new Set(
Object.values(
(
await effects.getHostInfo({
hostId: volumeMount["interface-id"],
})
)?.hostnameInfo || {},
)
.flatMap((h) => h)
.flatMap((h) => (h.kind === "onion" ? [h.hostname.value] : [])),
).values(),
]
const certChain = await effects.getSslCertificate({
hostnames,
})
const key = await effects.getSslKey({
hostnames,
})
await fs.writeFile(
`${path}/${volumeMount["interface-id"]}.cert.pem`,
certChain.join("\n"),
)
await fs.writeFile(
`${path}/${volumeMount["interface-id"]}.key.pem`,
key,
)
} else if (volumeMount.type === "pointer") {
await effects
.mount({
location: path,
target: {
packageId: volumeMount["package-id"],
subpath: volumeMount.path,
readonly: volumeMount.readonly,
volumeId: volumeMount["volume-id"],
},
})
.catch(console.warn)
} else if (volumeMount.type === "backup") {
await subcontainer.mount(
{ type: "backup", subpath: null },
mounts[mount],
)
}
}
}
return subcontainer
}
async exec(
commands: string[],
options?: CommandOptions & ExecOptions,
timeoutMs?: number | null,
) {
try {
return await this.subcontainer.exec(commands, options, timeoutMs)
} finally {
await this.subcontainer.destroy?.()
}
}
async execFail(
commands: string[],
timeoutMs: number | null,
options?: CommandOptions & ExecOptions,
) {
try {
const res = await this.subcontainer.exec(commands, options, timeoutMs)
if (res.exitCode !== 0) {
const codeOrSignal =
res.exitCode !== null
? `code ${res.exitCode}`
: `signal ${res.exitSignal}`
throw new Error(
`Process exited with ${codeOrSignal}: ${res.stderr.toString()}`,
)
}
return res
} finally {
await this.subcontainer.destroy?.()
}
}
async spawn(commands: string[]): Promise<cp.ChildProcess> {
return await this.subcontainer.spawn(commands)
}
}

View File

@@ -0,0 +1,346 @@
import { polyfillEffects } from "./polyfillEffects"
import { DockerProcedureContainer } from "./DockerProcedureContainer"
import { SystemForEmbassy } from "."
import { T, utils } from "@start9labs/start-sdk"
import { Daemon } from "@start9labs/start-sdk/package/lib/mainFn/Daemon"
import { Effects } from "../../../Models/Effects"
import { off } from "node:process"
import { CommandController } from "@start9labs/start-sdk/package/lib/mainFn/CommandController"
const EMBASSY_HEALTH_INTERVAL = 15 * 1000
const EMBASSY_PROPERTIES_LOOP = 30 * 1000
/**
* We wanted something to represent what the main loop is doing, and
* in this case it used to run the properties, health, and the docker/ js main.
* Also, this has an ability to clean itself up too if need be.
*/
export class MainLoop {
get mainSubContainerHandle() {
return this.mainEvent?.daemon?.subContainerHandle
}
private healthLoops?: {
name: string
interval: NodeJS.Timeout
}[]
private mainEvent?: {
daemon: Daemon
}
private constructor(
readonly system: SystemForEmbassy,
readonly effects: Effects,
) {}
static async of(
system: SystemForEmbassy,
effects: Effects,
): Promise<MainLoop> {
const res = new MainLoop(system, effects)
res.healthLoops = res.constructHealthLoops()
res.mainEvent = await res.constructMainEvent()
return res
}
private async constructMainEvent() {
const { system, effects } = this
const currentCommand: [string, ...string[]] = [
system.manifest.main.entrypoint,
...system.manifest.main.args,
]
await this.setupInterfaces(effects)
await effects.setMainStatus({ status: "running" })
const jsMain = (this.system.moduleCode as any)?.jsMain
if (jsMain) {
throw new Error("Unreachable")
}
const daemon = new Daemon(async () => {
const subcontainer = await DockerProcedureContainer.createSubContainer(
effects,
this.system.manifest.id,
this.system.manifest.main,
this.system.manifest.volumes,
`Main - ${currentCommand.join(" ")}`,
)
return CommandController.of()(
this.effects,
subcontainer,
currentCommand,
{
runAsInit: true,
env: {
TINI_SUBREAPER: "true",
},
sigtermTimeout: utils.inMs(
this.system.manifest.main["sigterm-timeout"],
),
},
)
})
daemon.start()
return {
daemon,
}
}
private async setupInterfaces(effects: T.Effects) {
for (const interfaceId in this.system.manifest.interfaces) {
const iface = this.system.manifest.interfaces[interfaceId]
const internalPorts = new Set<number>()
for (const port of Object.values(
iface["tor-config"]?.["port-mapping"] || {},
)) {
internalPorts.add(parseInt(port))
}
for (const port of Object.values(iface["lan-config"] || {})) {
internalPorts.add(port.internal)
}
for (const internalPort of internalPorts) {
const torConf = Object.entries(
iface["tor-config"]?.["port-mapping"] || {},
)
.map(([external, internal]) => ({
internal: parseInt(internal),
external: parseInt(external),
}))
.find((conf) => conf.internal == internalPort)
const lanConf = Object.entries(iface["lan-config"] || {})
.map(([external, conf]) => ({
external: parseInt(external),
...conf,
}))
.find((conf) => conf.internal == internalPort)
await effects.bind({
id: interfaceId,
internalPort,
preferredExternalPort: torConf?.external || internalPort,
secure: null,
addSsl: lanConf?.ssl
? {
preferredExternalPort: lanConf.external,
alpn: { specified: ["http/1.1"] },
}
: null,
})
}
}
}
public async clean(options?: { timeout?: number }) {
const { mainEvent, healthLoops } = this
const main = await mainEvent
delete this.mainEvent
delete this.healthLoops
await main?.daemon
.stop()
.catch((e: unknown) => console.error(`Main loop error`, utils.asError(e)))
this.effects.setMainStatus({ status: "stopped" })
if (healthLoops) healthLoops.forEach((x) => clearInterval(x.interval))
}
private constructHealthLoops() {
const { manifest } = this.system
const effects = this.effects
const start = Date.now()
return Object.entries(manifest["health-checks"]).map(
([healthId, value]) => {
effects
.setHealth({
id: healthId,
name: value.name,
result: "starting",
message: null,
})
.catch((e) => console.error(utils.asError(e)))
const interval = setInterval(async () => {
const actionProcedure = value
const timeChanged = Date.now() - start
if (actionProcedure.type === "docker") {
const subcontainer = actionProcedure.inject
? this.mainSubContainerHandle
: undefined
const commands = [
actionProcedure.entrypoint,
...actionProcedure.args,
]
const container = await DockerProcedureContainer.of(
effects,
manifest.id,
actionProcedure,
manifest.volumes,
`Health Check - ${commands.join(" ")}`,
{
subcontainer,
},
)
const env: Record<string, string> = actionProcedure.inject
? {
HOME: "/root",
}
: {}
const executed = await container.exec(commands, {
input: JSON.stringify(timeChanged),
env,
})
if (executed.exitCode === 0) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "success",
message: actionProcedure["success-message"] ?? null,
})
return
}
if (executed.exitCode === 59) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "disabled",
message:
executed.stderr.toString() || executed.stdout.toString(),
})
return
}
if (executed.exitCode === 60) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "starting",
message:
executed.stderr.toString() || executed.stdout.toString(),
})
return
}
if (executed.exitCode === 61) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "loading",
message:
executed.stderr.toString() || executed.stdout.toString(),
})
return
}
const errorMessage = executed.stderr.toString()
const message = executed.stdout.toString()
if (!!errorMessage) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "failure",
message: errorMessage,
})
return
}
if (executed.exitCode && executed.exitCode > 0) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "failure",
message:
executed.stderr.toString() ||
executed.stdout.toString() ||
`Program exited with code ${executed.exitCode}:`,
})
return
}
await effects.setHealth({
id: healthId,
name: value.name,
result: "success",
message,
})
return
} else {
actionProcedure
const moduleCode = await this.system.moduleCode
const method = moduleCode.health?.[healthId]
if (!method) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "failure",
message: `Expecting that the js health check ${healthId} exists`,
})
return
}
const result = await method(
polyfillEffects(effects, this.system.manifest),
timeChanged,
)
if ("result" in result) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "success",
message: null,
})
return
}
if ("error" in result) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "failure",
message: result.error,
})
return
}
if (!("error-code" in result)) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "failure",
message: `Unknown error type ${JSON.stringify(result)}`,
})
return
}
const [code, message] = result["error-code"]
if (code === 59) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "disabled",
message,
})
return
}
if (code === 60) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "starting",
message,
})
return
}
if (code === 61) {
await effects.setHealth({
id: healthId,
name: value.name,
result: "loading",
message,
})
return
}
await effects.setHealth({
id: healthId,
name: value.name,
result: "failure",
message: `${result["error-code"][0]}: ${result["error-code"][1]}`,
})
return
}
}, EMBASSY_HEALTH_INTERVAL)
return { name: healthId, interval }
},
)
}
}

View File

@@ -0,0 +1,387 @@
export default {
"peer-tor-address": {
name: "Peer Tor Address",
description: "The Tor address of the peer interface",
type: "pointer",
subtype: "package",
"package-id": "bitcoind",
target: "tor-address",
interface: "peer",
},
"rpc-tor-address": {
name: "RPC Tor Address",
description: "The Tor address of the RPC interface",
type: "pointer",
subtype: "package",
"package-id": "bitcoind",
target: "tor-address",
interface: "rpc",
},
rpc: {
type: "object",
name: "RPC Settings",
description: "RPC configuration options.",
spec: {
enable: {
type: "boolean",
name: "Enable",
description: "Allow remote RPC requests.",
default: true,
},
username: {
type: "string",
nullable: false,
name: "Username",
description: "The username for connecting to Bitcoin over RPC.",
warning:
"You will need to restart all services that depend on Bitcoin.",
default: "bitcoin",
masked: true,
pattern: "^[a-zA-Z0-9_]+$",
"pattern-description": "Must be alphanumeric (can contain underscore).",
},
password: {
type: "string",
nullable: false,
name: "RPC Password",
description: "The password for connecting to Bitcoin over RPC.",
warning:
"You will need to restart all services that depend on Bitcoin.",
default: {
charset: "a-z,2-7",
len: 20,
},
pattern: "^[a-zA-Z0-9_]+$",
"pattern-description": "Must be alphanumeric (can contain underscore).",
copyable: true,
masked: true,
},
advanced: {
type: "object",
name: "Advanced",
description: "Advanced RPC Settings",
spec: {
auth: {
name: "Authorization",
description:
"Username and hashed password for JSON-RPC connections. RPC clients connect using the usual http basic authentication.",
type: "list",
subtype: "string",
default: [],
spec: {
pattern: "^[a-zA-Z0-9_-]+:([0-9a-fA-F]{2})+\\$([0-9a-fA-F]{2})+$",
"pattern-description":
'Each item must be of the form "<USERNAME>:<SALT>$<HASH>".',
},
range: "[0,*)",
},
servertimeout: {
name: "Rpc Server Timeout",
description:
"Number of seconds after which an uncompleted RPC call will time out.",
type: "number",
nullable: false,
range: "[5,300]",
integral: true,
units: "seconds",
default: 30,
},
threads: {
name: "Threads",
description:
"Set the number of threads for handling RPC calls. You may wish to increase this if you are making lots of calls via an integration.",
type: "number",
nullable: false,
default: 16,
range: "[1,64]",
integral: true,
units: undefined,
},
workqueue: {
name: "Work Queue",
description:
"Set the depth of the work queue to service RPC calls. Determines how long the backlog of RPC requests can get before it just rejects new ones.",
type: "number",
nullable: false,
default: 128,
range: "[8,256]",
integral: true,
units: "requests",
},
},
},
},
},
"zmq-enabled": {
type: "boolean",
name: "ZeroMQ Enabled",
description:
"The ZeroMQ interface is useful for some applications which might require data related to block and transaction events from Bitcoin Core. For example, LND requires ZeroMQ be enabled for LND to get the latest block data",
default: true,
},
txindex: {
type: "boolean",
name: "Transaction Index",
description:
"By enabling Transaction Index (txindex) Bitcoin Core will build a complete transaction index. This allows Bitcoin Core to access any transaction with commands like `gettransaction`.",
default: true,
},
coinstatsindex: {
type: "boolean",
name: "Coinstats Index",
description:
"Enabling Coinstats Index reduces the time for the gettxoutsetinfo RPC to complete at the cost of using additional disk space",
default: false,
},
wallet: {
type: "object",
name: "Wallet",
description: "Wallet Settings",
spec: {
enable: {
name: "Enable Wallet",
description: "Load the wallet and enable wallet RPC calls.",
type: "boolean",
default: true,
},
avoidpartialspends: {
name: "Avoid Partial Spends",
description:
"Group outputs by address, selecting all or none, instead of selecting on a per-output basis. This improves privacy at the expense of higher transaction fees.",
type: "boolean",
default: true,
},
discardfee: {
name: "Discard Change Tolerance",
description:
"The fee rate (in BTC/kB) that indicates your tolerance for discarding change by adding it to the fee.",
type: "number",
nullable: false,
default: 0.0001,
range: "[0,.01]",
integral: false,
units: "BTC/kB",
},
},
},
advanced: {
type: "object",
name: "Advanced",
description: "Advanced Settings",
spec: {
mempool: {
type: "object",
name: "Mempool",
description: "Mempool Settings",
spec: {
persistmempool: {
type: "boolean",
name: "Persist Mempool",
description: "Save the mempool on shutdown and load on restart.",
default: true,
},
maxmempool: {
type: "number",
nullable: false,
name: "Max Mempool Size",
description:
"Keep the transaction memory pool below <n> megabytes.",
range: "[1,*)",
integral: true,
units: "MiB",
default: 300,
},
mempoolexpiry: {
type: "number",
nullable: false,
name: "Mempool Expiration",
description:
"Do not keep transactions in the mempool longer than <n> hours.",
range: "[1,*)",
integral: true,
units: "Hr",
default: 336,
},
mempoolfullrbf: {
name: "Enable Full RBF",
description:
"Policy for your node to use for relaying and mining unconfirmed transactions. For details, see https://github.com/bitcoin/bitcoin/blob/master/doc/release-notes/release-notes-24.0.1.md#notice-of-new-option-for-transaction-replacement-policies",
type: "boolean",
default: true,
},
permitbaremultisig: {
type: "boolean",
name: "Permit Bare Multisig",
description: "Relay non-P2SH multisig transactions",
default: true,
},
datacarrier: {
type: "boolean",
name: "Relay OP_RETURN Transactions",
description: "Relay transactions with OP_RETURN outputs",
default: true,
},
datacarriersize: {
type: "number",
nullable: false,
name: "Max OP_RETURN Size",
description: "Maximum size of data in OP_RETURN outputs to relay",
range: "[0,10000]",
integral: true,
units: "bytes",
default: 83,
},
},
},
peers: {
type: "object",
name: "Peers",
description: "Peer Connection Settings",
spec: {
listen: {
type: "boolean",
name: "Make Public",
description:
"Allow other nodes to find your server on the network.",
default: true,
},
onlyconnect: {
type: "boolean",
name: "Disable Peer Discovery",
description: "Only connect to specified peers.",
default: false,
},
onlyonion: {
type: "boolean",
name: "Disable Clearnet",
description: "Only connect to peers over Tor.",
default: false,
},
v2transport: {
type: "boolean",
name: "Use V2 P2P Transport Protocol",
description:
"Enable or disable the use of BIP324 V2 P2P transport protocol.",
default: false,
},
addnode: {
name: "Add Nodes",
description: "Add addresses of nodes to connect to.",
type: "list",
subtype: "object",
range: "[0,*)",
default: [],
spec: {
spec: {
hostname: {
type: "string",
nullable: false,
name: "Hostname",
description: "Domain or IP address of bitcoin peer",
pattern:
"(^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$)|((^(([0-9a-fA-F]{1,4}:){7,7}[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,7}:|([0-9a-fA-F]{1,4}:){1,6}:[0-9a-fA-F]{1,4}|([0-9a-fA-F]{1,4}:){1,5}(:[0-9a-fA-F]{1,4}){1,2}|([0-9a-fA-F]{1,4}:){1,4}(:[0-9a-fA-F]{1,4}){1,3}|([0-9a-fA-F]{1,4}:){1,3}(:[0-9a-fA-F]{1,4}){1,4}|([0-9a-fA-F]{1,4}:){1,2}(:[0-9a-fA-F]{1,4}){1,5}|[0-9a-fA-F]{1,4}:((:[0-9a-fA-F]{1,4}){1,6})|:((:[0-9a-fA-F]{1,4}){1,7}|:)|fe80:(:[0-9a-fA-F]{0,4}){0,4}%[0-9a-zA-Z]{1,}|::(ffff(:0{1,4}){0,1}:){0,1}((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])|([0-9a-fA-F]{1,4}:){1,4}:((25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9])\\.){3,3}(25[0-5]|(2[0-4]|1{0,1}[0-9]){0,1}[0-9]))$)|(^[a-z2-7]{16}\\.onion$)|(^([a-z0-9]([a-z0-9-]{0,61}[a-z0-9])?\\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9]$))",
"pattern-description":
"Must be either a domain name, or an IPv4 or IPv6 address. Do not include protocol scheme (eg 'http://') or port.",
},
port: {
type: "number",
nullable: true,
name: "Port",
description:
"Port that peer is listening on for inbound p2p connections",
range: "[0,65535]",
integral: true,
},
},
},
},
},
},
pruning: {
type: "union",
name: "Pruning Settings",
description:
"Blockchain Pruning Options\nReduce the blockchain size on disk\n",
warning:
"Disabling pruning will convert your node into a full archival node. This requires a resync of the entire blockchain, a process that may take several days.\n",
tag: {
id: "mode",
name: "Pruning Mode",
description:
"- Disabled: Disable pruning\n- Automatic: Limit blockchain size on disk to a certain number of megabytes\n",
"variant-names": {
disabled: "Disabled",
automatic: "Automatic",
},
},
variants: {
disabled: {},
automatic: {
size: {
type: "number",
nullable: false,
name: "Max Chain Size",
description: "Limit of blockchain size on disk.",
warning:
"Increasing this value will require re-syncing your node.",
default: 550,
range: "[550,1000000)",
integral: true,
units: "MiB",
},
},
},
default: "disabled",
},
dbcache: {
type: "number",
nullable: true,
name: "Database Cache",
description:
"How much RAM to allocate for caching the TXO set. Higher values improve syncing performance, but increase your chance of using up all your system's memory or corrupting your database in the event of an ungraceful shutdown. Set this high but comfortably below your system's total RAM during IBD, then turn down to 450 (or leave blank) once the sync completes.",
warning:
"WARNING: Increasing this value results in a higher chance of ungraceful shutdowns, which can leave your node unusable if it happens during the initial block download. Use this setting with caution. Be sure to set this back to the default (450 or leave blank) once your node is synced. DO NOT press the STOP button if your dbcache is large. Instead, set this number back to the default, hit save, and wait for bitcoind to restart on its own.",
range: "(0,*)",
integral: true,
units: "MiB",
},
blockfilters: {
type: "object",
name: "Block Filters",
description: "Settings for storing and serving compact block filters",
spec: {
blockfilterindex: {
type: "boolean",
name: "Compute Compact Block Filters (BIP158)",
description:
"Generate Compact Block Filters during initial sync (IBD) to enable 'getblockfilter' RPC. This is useful if dependent services need block filters to efficiently scan for addresses/transactions etc.",
default: true,
},
peerblockfilters: {
type: "boolean",
name: "Serve Compact Block Filters to Peers (BIP157)",
description:
"Serve Compact Block Filters as a peer service to other nodes on the network. This is useful if you wish to connect an SPV client to your node to make it efficient to scan transactions without having to download all block data. 'Compute Compact Block Filters (BIP158)' is required.",
default: false,
},
},
},
bloomfilters: {
type: "object",
name: "Bloom Filters (BIP37)",
description: "Setting for serving Bloom Filters",
spec: {
peerbloomfilters: {
type: "boolean",
name: "Serve Bloom Filters to Peers",
description:
"Peers have the option of setting filters on each connection they make after the version handshake has completed. Bloom filters are for clients implementing SPV (Simplified Payment Verification) that want to check that block headers connect together correctly, without needing to verify the full blockchain. The client must trust that the transactions in the chain are in fact valid. It is highly recommended AGAINST using for anything except Bisq integration.",
warning:
"This is ONLY for use with Bisq integration, please use Block Filters for all other applications.",
default: false,
},
},
},
},
},
}

View File

@@ -0,0 +1,127 @@
export default {
homepage: {
name: "Homepage",
description:
"The page that will be displayed when your Start9 Pages .onion address is visited. Since this page is technically publicly accessible, you can choose to which type of page to display.",
type: "union",
default: "welcome",
tag: {
id: "type",
name: "Type",
"variant-names": {
welcome: "Welcome",
index: "Table of Contents",
"web-page": "Web Page",
redirect: "Redirect",
},
},
variants: {
welcome: {},
index: {},
"web-page": {
source: {
name: "Folder Location",
description: "The service that contains your website files.",
type: "enum",
values: ["filebrowser", "nextcloud"],
"value-names": {},
default: "nextcloud",
},
folder: {
type: "string",
name: "Folder Path",
placeholder: "e.g. websites/resume",
description:
'The path to the folder that contains the static files of your website. For example, a value of "projects/resume" would tell Start9 Pages to look for that folder path in the selected service.',
pattern:
"^(\\.|[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)(/[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|/([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)*/?$",
"pattern-description": "Must be a valid relative file path",
nullable: false,
},
},
redirect: {
target: {
type: "string",
name: "Target Subdomain",
description:
"The name of the subdomain to redirect users to. This must be a valid subdomain site within your Start9 Pages.",
pattern: "^[a-z-]+$",
"pattern-description":
"May contain only lowercase characters and hyphens.",
nullable: false,
},
},
},
},
subdomains: {
type: "list",
name: "Subdomains",
description: "The websites you want to serve.",
default: [],
range: "[0, *)",
subtype: "object",
spec: {
"unique-by": "name",
"display-as": "{{name}}",
spec: {
name: {
type: "string",
nullable: false,
name: "Subdomain name",
description:
'The subdomain of your Start9 Pages .onion address to host the website on. For example, a value of "me" would produce a website hosted at http://me.xxxxxx.onion.',
pattern: "^[a-z-]+$",
"pattern-description":
"May contain only lowercase characters and hyphens",
},
settings: {
type: "union",
name: "Settings",
description:
"The desired behavior you want to occur when the subdomain is visited. You can either redirect to another subdomain, or load a stored web page.",
default: "web-page",
tag: {
id: "type",
name: "Type",
"variant-names": { "web-page": "Web Page", redirect: "Redirect" },
},
variants: {
"web-page": {
source: {
name: "Folder Location",
description: "The service that contains your website files.",
type: "enum",
values: ["filebrowser", "nextcloud"],
"value-names": {},
default: "nextcloud",
},
folder: {
type: "string",
name: "Folder Path",
placeholder: "e.g. websites/resume",
description:
'The path to the folder that contains the website files. For example, a value of "projects/resume" would tell Start9 Pages to look for that folder path in the selected service.',
pattern:
"^(\\.|[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)(/[a-zA-Z0-9_ -][a-zA-Z0-9_ .-]*|/([a-zA-Z0-9_ .-][a-zA-Z0-9_ -]+\\.*)+)*/?$",
"pattern-description": "Must be a valid relative file path",
nullable: false,
},
},
redirect: {
target: {
type: "string",
name: "Target Subdomain",
description:
"The subdomain of your Start9 Pages .onion address to redirect to. This should be the name of another subdomain on Start9 Pages. Leave empty to redirect to the homepage.",
pattern: "^[a-z-]+$",
"pattern-description":
"May contain only lowercase characters and hyphens.",
nullable: false,
},
},
},
},
},
},
},
}

View File

@@ -0,0 +1,123 @@
export default {
"eos-version": "0.3.5.1",
id: "gitea",
"git-hash": "91fada3edf30357a2e75c281d32f8888c87fcc2d\n",
title: "Gitea",
version: "1.22.0",
description: {
short: "A painless self-hosted Git service.",
long: "Gitea is a community managed lightweight code hosting solution written in Go. It is published under the MIT license.\n",
},
assets: {
license: "LICENSE",
instructions: "instructions.md",
icon: "icon.png",
"docker-images": null,
assets: null,
scripts: null,
},
build: ["make"],
"release-notes":
"* Upstream code update\n* Fix deprecated config options\n* Full list of upstream changes available [here](https://github.com/go-gitea/gitea/compare/v1.21.8...v1.22.0)\n",
license: "MIT",
"wrapper-repo": "https://github.com/Start9Labs/gitea-startos",
"upstream-repo": "https://github.com/go-gitea/gitea",
"support-site": "https://docs.gitea.io/en-us/",
"marketing-site": "https://gitea.io/en-us/",
"donation-url": null,
alerts: {
install: null,
uninstall: null,
restore: null,
start: null,
stop: null,
},
main: {
type: "docker",
image: "main",
system: false,
entrypoint: "/usr/local/bin/docker_entrypoint.sh",
args: [],
inject: false,
mounts: { main: "/data" },
"io-format": null,
"sigterm-timeout": null,
"shm-size-mb": null,
"gpu-acceleration": false,
},
"health-checks": {
"user-signups-off": {
name: "User Signups Off",
"success-message": null,
type: "script",
args: [],
timeout: null,
},
web: {
name: "Web & Git HTTP Tor Interfaces",
"success-message":
"Gitea is ready to be visited in a web browser and git can be used with SSH over TOR.",
type: "script",
args: [],
timeout: null,
},
},
config: {
get: { type: "script", args: [] },
set: { type: "script", args: [] },
},
properties: { type: "script", args: [] },
volumes: { main: { type: "data" } },
interfaces: {
main: {
name: "Web UI / Git HTTPS/SSH",
description:
"Port 80: Browser Interface and HTTP Git Interface / Port 22: Git SSH Interface",
"tor-config": { "port-mapping": { "22": "22", "80": "3000" } },
"lan-config": { "443": { ssl: true, internal: 3000 } },
ui: true,
protocols: ["tcp", "http", "ssh", "git"],
},
},
backup: {
create: {
type: "docker",
image: "compat",
system: true,
entrypoint: "compat",
args: ["duplicity", "create", "/mnt/backup", "/root/data"],
inject: false,
mounts: { BACKUP: "/mnt/backup", main: "/root/data" },
"io-format": "yaml",
"sigterm-timeout": null,
"shm-size-mb": null,
"gpu-acceleration": false,
},
restore: {
type: "docker",
image: "compat",
system: true,
entrypoint: "compat",
args: ["duplicity", "restore", "/mnt/backup", "/root/data"],
inject: false,
mounts: { BACKUP: "/mnt/backup", main: "/root/data" },
"io-format": "yaml",
"sigterm-timeout": null,
"shm-size-mb": null,
"gpu-acceleration": false,
},
},
migrations: {
from: { "*": { type: "script", args: ["from"] } },
to: { "*": { type: "script", args: ["to"] } },
},
actions: {},
dependencies: {},
containers: null,
replaces: [],
"hardware-requirements": {
device: {},
ram: null,
arch: ["x86_64", "aarch64"],
},
}

View File

@@ -0,0 +1,28 @@
export default {
"tor-address": {
name: "Tor Address",
description: "The Tor address of the network interface",
type: "pointer",
subtype: "package",
"package-id": "nostr-wallet-connect",
target: "tor-address",
interface: "main",
},
"lan-address": {
name: "LAN Address",
description: "The LAN address of the network interface",
type: "pointer",
subtype: "package",
"package-id": "nostr-wallet-connect",
target: "lan-address",
interface: "main",
},
"nostr-relay": {
type: "string",
name: "Nostr Relay",
default: "wss://relay.getalby.com/v1",
description: "The Nostr Relay to use for Nostr Wallet Connect connections",
copyable: true,
nullable: false,
},
}

View File

@@ -0,0 +1,187 @@
export default {
"tor-address": {
name: "Tor Address",
description: "The Tor address for the websocket server.",
type: "pointer",
subtype: "package",
"package-id": "nostr",
target: "tor-address",
interface: "websocket",
},
"lan-address": {
name: "Tor Address",
description: "The LAN address for the websocket server.",
type: "pointer",
subtype: "package",
"package-id": "nostr",
target: "lan-address",
interface: "websocket",
},
"relay-type": {
type: "union",
name: "Relay Type",
warning:
"Running a public relay carries risk. Your relay can be spammed, resulting in large amounts of disk usage.",
tag: {
id: "type",
name: "Relay Type",
description:
"Private or public. A private relay (highly recommended) restricts write access to specific pubkeys. Anyone can write to a public relay.",
"variant-names": { private: "Private", public: "Public" },
},
default: "private",
variants: {
private: {
pubkey_whitelist: {
name: "Pubkey Whitelist (hex)",
description:
"A list of pubkeys that are permitted to publish through your relay. A minimum, you need to enter your own Nostr hex (not npub) pubkey. Go to https://damus.io/key/ to convert from npub to hex.",
type: "list",
range: "[1,*)",
subtype: "string",
spec: {
placeholder: "hex (not npub) pubkey",
pattern: "[0-9a-fA-F]{64}",
"pattern-description":
"Must be a valid 64-digit hexadecimal value (ie a Nostr hex pubkey, not an npub). Go to https://damus.io/key/ to convert npub to hex.",
},
default: [],
},
},
public: {
info: {
name: "Relay Info",
description: "General public info about your relay",
type: "object",
spec: {
name: {
name: "Relay Name",
description: "Your relay's human-readable identifier",
type: "string",
nullable: true,
placeholder: "Bob's Public Relay",
pattern: ".{3,32}",
"pattern-description":
"Must be at least 3 character and no more than 32 characters",
masked: false,
},
description: {
name: "Relay Description",
description: "A more detailed description for your relay",
type: "string",
nullable: true,
placeholder: "The best relay in town",
pattern: ".{6,256}",
"pattern-description":
"Must be at least 6 character and no more than 256 characters",
masked: false,
},
pubkey: {
name: "Admin contact pubkey (hex)",
description:
"The Nostr hex (not npub) pubkey of the relay administrator",
type: "string",
nullable: true,
placeholder: "hex (not npub) pubkey",
pattern: "[0-9a-fA-F]{64}",
"pattern-description":
"Must be a valid 64-digit hexadecimal value (ie a Nostr hex pubkey, not an npub). Go to https://damus.io/key/ to convert npub to hex.",
masked: false,
},
contact: {
name: "Admin contact email",
description: "The email address of the relay administrator",
type: "string",
nullable: true,
pattern: "[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+.[a-zA-Z0-9-.]+",
"pattern-description": "Must be a valid email address.",
masked: false,
},
},
},
limits: {
name: "Limits",
description:
"Data limits to protect your relay from using too many resources",
type: "object",
spec: {
messages_per_sec: {
name: "Messages Per Second Limit",
description:
"Limit events created per second, averaged over one minute. Note: this is for the server as a whole, not per connection.",
type: "number",
nullable: false,
range: "[1,*)",
integral: true,
default: 2,
units: "messages/sec",
},
subscriptions_per_min: {
name: "Subscriptions Per Minute Limit",
description:
"Limit client subscriptions created per second, averaged over one minute. Strongly recommended to set this to a low value such as 10 to ensure fair service.",
type: "number",
nullable: false,
range: "[1,*)",
integral: true,
default: 10,
units: "subscriptions",
},
max_blocking_threads: {
name: "Max Blocking Threads",
description:
"Maximum number of blocking threads used for database connections.",
type: "number",
nullable: false,
range: "[0,*)",
integral: true,
units: "threads",
default: 16,
},
max_event_bytes: {
name: "Max Event Size",
description:
"Limit the maximum size of an EVENT message. Set to 0 for unlimited",
type: "number",
nullable: false,
range: "[0,*)",
integral: true,
units: "bytes",
default: 131072,
},
max_ws_message_bytes: {
name: "Max Websocket Message Size",
description: "Maximum WebSocket message in bytes.",
type: "number",
nullable: false,
range: "[0,*)",
integral: true,
units: "bytes",
default: 131072,
},
max_ws_frame_bytes: {
name: "Max Websocket Frame Size",
description: "Maximum WebSocket frame size in bytes.",
type: "number",
nullable: false,
range: "[0,*)",
integral: true,
units: "bytes",
default: 131072,
},
event_kind_blacklist: {
name: "Event Kind Blacklist",
description:
"Events with these kinds will be discarded. For a list of event kinds, see here: https://github.com/nostr-protocol/nips#event-kinds",
type: "list",
range: "[0,*)",
subtype: "number",
spec: { integral: true, placeholder: 30023, range: "(0,100000]" },
default: [],
},
},
},
},
},
},
}

View File

@@ -0,0 +1,39 @@
export default {
"instance-name": {
type: "string",
name: "SearXNG Instance Name",
description:
"Enter a name for your SearXNG instance. This is the name that will be listed if you want to share your SearXNG engine publicly.",
nullable: false,
default: "My SearXNG Engine",
placeholder: "Uncle Jim SearXNG Engine",
},
"tor-url": {
name: "Enable Tor address as the base URL",
description:
"Activates the utilization of a .onion address as the primary URL, particularly beneficial for publicly hosted instances over the Tor network.",
type: "boolean",
default: false,
},
"enable-metrics": {
name: "Enable Stats",
description:
"Your SearXNG instance will collect anonymous stats about its own usage and performance. You can view these metrics by appending `/stats` or `/stats/errors` to your SearXNG URL.",
type: "boolean",
default: true,
}, //,
// "email-address": {
// "type": "string",
// "name": "Email Address",
// "description": "Your Email address - required to create an SSL certificate.",
// "nullable": false,
// "default": "youremail@domain.com",
// },
// "public-host": {
// "type": "string",
// "name": "Public Domain Name",
// "description": "Enter a domain name here if you want to share your SearXNG engine publicly. You will also need to modify your domain name's DNS settings to point to your Start9 server.",
// "nullable": true,
// "placeholder": "https://search.mydomain.com"
// }
}

View File

@@ -0,0 +1,191 @@
export default {
id: "synapse",
title: "Synapse",
version: "1.98.0",
"release-notes":
"* Upstream code update\n* Synapse Admin updated to the latest version - ([full changelog](https://github.com/Awesome-Technologies/synapse-admin/compare/0.8.7...0.9.1))\n* Instructions update\n* Updated package and upstream repositories links\n* Full list of upstream changes available [here](https://github.com/element-hq/synapse/compare/v1.95.1...v1.98.0)\n",
license: "Apache-2.0",
"wrapper-repo": "https://github.com/Start9Labs/synapse-startos",
"upstream-repo": "https://github.com/element-hq/synapse",
"support-site": "https://github.com/element-hq/synapse/issues",
"marketing-site": "https://matrix.org/",
build: ["make"],
description: {
short:
"Synapse is a battle-tested implementation of the Matrix protocol, the killer of all messaging apps.",
long: "Synapse is the battle-tested, reference implementation of the Matrix protocol. Matrix is a next-generation, federated, full-featured, encrypted, independent messaging system. There are no trusted third parties involved. (see matrix.org for details).",
},
assets: {
license: "LICENSE",
icon: "icon.png",
instructions: "instructions.md",
},
main: {
type: "docker",
image: "main",
entrypoint: "docker_entrypoint.sh",
args: [],
mounts: {
main: "/data",
cert: "/mnt/cert",
"admin-cert": "/mnt/admin-cert",
},
},
"health-checks": {
federation: {
name: "Federation",
type: "docker",
image: "main",
system: false,
entrypoint: "check-federation.sh",
args: [],
mounts: {},
"io-format": "json",
inject: true,
},
"synapse-admin": {
name: "Admin interface",
"success-message":
"Synapse Admin is ready to be visited in a web browser.",
type: "docker",
image: "main",
system: false,
entrypoint: "check-ui.sh",
args: [],
mounts: {},
"io-format": "yaml",
inject: true,
},
"user-signups-off": {
name: "User Signups Off",
type: "docker",
image: "main",
system: false,
entrypoint: "user-signups-off.sh",
args: [],
mounts: {},
"io-format": "yaml",
inject: true,
},
},
config: {
get: {
type: "script",
},
set: {
type: "script",
},
},
properties: {
type: "script",
},
volumes: {
main: {
type: "data",
},
cert: {
type: "certificate",
"interface-id": "main",
},
"admin-cert": {
type: "certificate",
"interface-id": "admin",
},
},
alerts: {
start:
"After your first run, Synapse needs a little time to establish a stable TOR connection over federation. We kindly ask for your patience during this process. Remember, great things take time! 🕒",
},
interfaces: {
main: {
name: "Homeserver Address",
description:
"Used by clients and other servers to connect with your homeserver",
"tor-config": {
"port-mapping": {
"80": "80",
"443": "443",
"8448": "8448",
},
},
ui: false,
protocols: ["tcp", "http", "matrix"],
},
admin: {
name: "Admin Portal",
description: "A web application for administering your Synapse server",
"tor-config": {
"port-mapping": {
"80": "8080",
"443": "4433",
},
},
"lan-config": {
"443": {
ssl: true,
internal: 8080,
},
},
ui: true,
protocols: ["tcp", "http"],
},
},
dependencies: {},
backup: {
create: {
type: "docker",
image: "compat",
system: true,
entrypoint: "compat",
args: ["duplicity", "create", "/mnt/backup", "/data"],
mounts: {
BACKUP: "/mnt/backup",
main: "/data",
},
},
restore: {
type: "docker",
image: "compat",
system: true,
entrypoint: "compat",
args: ["duplicity", "restore", "/mnt/backup", "/data"],
mounts: {
BACKUP: "/mnt/backup",
main: "/data",
},
},
},
actions: {
"reset-first-user": {
name: "Reset First User",
description:
"This action will reset the password of the first user in your database to a random value.",
"allowed-statuses": ["stopped"],
implementation: {
type: "docker",
image: "main",
system: false,
entrypoint: "docker_entrypoint.sh",
args: ["reset-first-user"],
mounts: {
main: "/data",
},
"io-format": "json",
},
},
},
migrations: {
from: {
"*": {
type: "script",
args: ["from"],
},
},
to: {
"*": {
type: "script",
args: ["to"],
},
},
},
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,12 @@
import { matchManifest } from "./matchManifest"
import giteaManifest from "./__fixtures__/giteaManifest"
import synapseManifest from "./__fixtures__/synapseManifest"
describe("matchManifest", () => {
test("gittea", () => {
matchManifest.unsafeCast(giteaManifest)
})
test("synapse", () => {
matchManifest.unsafeCast(synapseManifest)
})
})

View File

@@ -0,0 +1,136 @@
import {
object,
literal,
string,
array,
boolean,
dictionary,
literals,
number,
unknown,
some,
every,
} from "ts-matches"
import { matchVolume } from "./matchVolume"
import { matchDockerProcedure } from "../../../Models/DockerProcedure"
const matchJsProcedure = object(
{
type: literal("script"),
args: array(unknown),
},
["args"],
{
args: [],
},
)
const matchProcedure = some(matchDockerProcedure, matchJsProcedure)
export type Procedure = typeof matchProcedure._TYPE
const matchAction = object(
{
name: string,
description: string,
warning: string,
implementation: matchProcedure,
"allowed-statuses": array(literals("running", "stopped")),
"input-spec": unknown,
},
["warning", "input-spec", "input-spec"],
)
export const matchManifest = object(
{
id: string,
title: string,
version: string,
main: matchDockerProcedure,
assets: object(
{
assets: string,
scripts: string,
},
["assets", "scripts"],
),
"health-checks": dictionary([
string,
every(
matchProcedure,
object(
{
name: string,
["success-message"]: string,
},
["success-message"],
),
),
]),
config: object({
get: matchProcedure,
set: matchProcedure,
}),
properties: matchProcedure,
volumes: dictionary([string, matchVolume]),
interfaces: dictionary([
string,
object(
{
name: string,
description: string,
"tor-config": object({
"port-mapping": dictionary([string, string]),
}),
"lan-config": dictionary([
string,
object({
ssl: boolean,
internal: number,
}),
]),
ui: boolean,
protocols: array(string),
},
["lan-config", "tor-config"],
),
]),
backup: object({
create: matchProcedure,
restore: matchProcedure,
}),
migrations: object({
to: dictionary([string, matchProcedure]),
from: dictionary([string, matchProcedure]),
}),
dependencies: dictionary([
string,
object(
{
version: string,
requirement: some(
object({
type: literal("opt-in"),
how: string,
}),
object({
type: literal("opt-out"),
how: string,
}),
object({
type: literal("required"),
}),
),
description: string,
config: object({
check: matchProcedure,
"auto-configure": matchProcedure,
}),
},
["description", "config"],
),
]),
actions: dictionary([string, matchAction]),
},
["config", "actions", "properties", "migrations", "dependencies"],
)
export type Manifest = typeof matchManifest._TYPE

View File

@@ -0,0 +1,35 @@
import { object, literal, string, boolean, some } from "ts-matches"
const matchDataVolume = object(
{
type: literal("data"),
readonly: boolean,
},
["readonly"],
)
const matchAssetVolume = object({
type: literal("assets"),
})
const matchPointerVolume = object({
type: literal("pointer"),
"package-id": string,
"volume-id": string,
path: string,
readonly: boolean,
})
const matchCertificateVolume = object({
type: literal("certificate"),
"interface-id": string,
})
const matchBackupVolume = object({
type: literal("backup"),
readonly: boolean,
})
export const matchVolume = some(
matchDataVolume,
matchAssetVolume,
matchPointerVolume,
matchCertificateVolume,
matchBackupVolume,
)
export type Volume = typeof matchVolume._TYPE

View File

@@ -0,0 +1,477 @@
// deno-lint-ignore no-namespace
export type ExpectedExports = {
version: 2
/** Set configuration is called after we have modified and saved the configuration in the embassy ui. Use this to make a file for the docker to read from for configuration. */
setConfig: (effects: Effects, input: Config) => Promise<ResultType<SetResult>>
/** Get configuration returns a shape that describes the format that the embassy ui will generate, and later send to the set config */
getConfig: (effects: Effects) => Promise<ResultType<ConfigRes>>
/** These are how we make sure the our dependency configurations are valid and if not how to fix them. */
dependencies: Dependencies
/** For backing up service data though the embassyOS UI */
createBackup: (effects: Effects) => Promise<ResultType<unknown>>
/** For restoring service data that was previously backed up using the embassyOS UI create backup flow. Backup restores are also triggered via the embassyOS UI, or doing a system restore flow during setup. */
restoreBackup: (effects: Effects) => Promise<ResultType<unknown>>
/** Properties are used to get values from the docker, like a username + password, what ports we are hosting from */
properties: (effects: Effects) => Promise<ResultType<Properties>>
health: {
/** Should be the health check id */
[id: string]: (
effects: Effects,
dateMs: number,
) => Promise<ResultType<unknown>>
}
migration: (
effects: Effects,
version: string,
...args: unknown[]
) => Promise<ResultType<MigrationRes>>
action: {
[id: string]: (
effects: Effects,
config?: Config,
) => Promise<ResultType<ActionResult>>
}
/**
* This is the entrypoint for the main container. Used to start up something like the service that the
* package represents, like running a bitcoind in a bitcoind-wrapper.
*/
main: (effects: Effects) => Promise<ResultType<unknown>>
}
/** Used to reach out from the pure js runtime */
export type Effects = {
/** Usable when not sandboxed */
writeFile(input: {
path: string
volumeId: string
toWrite: string
}): Promise<void>
readFile(input: { volumeId: string; path: string }): Promise<string>
metadata(input: { volumeId: string; path: string }): Promise<Metadata>
/** Create a directory. Usable when not sandboxed */
createDir(input: { volumeId: string; path: string }): Promise<string>
readDir(input: { volumeId: string; path: string }): Promise<string[]>
/** Remove a directory. Usable when not sandboxed */
removeDir(input: { volumeId: string; path: string }): Promise<string>
removeFile(input: { volumeId: string; path: string }): Promise<void>
/** Write a json file into an object. Usable when not sandboxed */
writeJsonFile(input: {
volumeId: string
path: string
toWrite: Record<string, unknown>
}): Promise<void>
/** Read a json file into an object */
readJsonFile(input: {
volumeId: string
path: string
}): Promise<Record<string, unknown>>
runCommand(input: {
command: string
args?: string[]
timeoutMillis?: number
}): Promise<ResultType<string>>
runDaemon(input: { command: string; args?: string[] }): {
wait(): Promise<ResultType<string>>
term(): Promise<void>
}
chown(input: { volumeId: string; path: string; uid: string }): Promise<null>
chmod(input: { volumeId: string; path: string; mode: string }): Promise<null>
sleep(timeMs: number): Promise<null>
/** Log at the trace level */
trace(whatToPrint: string): void
/** Log at the warn level */
warn(whatToPrint: string): void
/** Log at the error level */
error(whatToPrint: string): void
/** Log at the debug level */
debug(whatToPrint: string): void
/** Log at the info level */
info(whatToPrint: string): void
/** Sandbox mode lets us read but not write */
is_sandboxed(): boolean
// Does a volume and path exist?
exists(input: { volumeId: string; path: string }): Promise<boolean>
fetch(
url: string,
options?: {
method?: "GET" | "POST" | "PUT" | "DELETE" | "HEAD" | "PATCH"
headers?: Record<string, string>
body?: string
},
): Promise<{
method: string
ok: boolean
status: number
headers: Record<string, string>
body?: string | null
/// Returns the body as a string
text(): Promise<string>
/// Returns the body as a json
json(): Promise<unknown>
}>
diskUsage(options?: {
volumeId: string
path: string
}): Promise<{ used: number; total: number }>
runRsync(options: {
srcVolume: string
dstVolume: string
srcPath: string
dstPath: string
// rsync options: https://linux.die.net/man/1/rsync
options: BackupOptions
}): {
id: () => Promise<string>
wait: () => Promise<null>
progress: () => Promise<number>
}
}
// rsync options: https://linux.die.net/man/1/rsync
export type BackupOptions = {
delete: boolean
force: boolean
ignoreExisting: boolean
exclude: string[]
}
export type Metadata = {
fileType: string
isDir: boolean
isFile: boolean
isSymlink: boolean
len: number
modified?: Date
accessed?: Date
created?: Date
readonly: boolean
uid: number
gid: number
mode: number
}
export type MigrationRes = {
configured: boolean
}
export type ActionResult = {
version: "0"
message: string
value?: string
copyable: boolean
qr: boolean
}
export type ConfigRes = {
/** This should be the previous config, that way during set config we start with the previous */
config?: Config
/** Shape that is describing the form in the ui */
spec: ConfigSpec
}
export type Config = {
[propertyName: string]: unknown
}
export type ConfigSpec = {
/** Given a config value, define what it should render with the following spec */
[configValue: string]: ValueSpecAny
}
export type WithDefault<T, Default> = T & {
default: Default
}
export type WithNullableDefault<T, Default> = T & {
default?: Default
}
export type WithDescription<T> = T & {
description?: string
name: string
warning?: string
}
export type WithOptionalDescription<T> = T & {
/** @deprecated - optional only for backwards compatibility */
description?: string
/** @deprecated - optional only for backwards compatibility */
name?: string
warning?: string
}
export type ListSpec<T> = {
spec: T
range: string
}
export type Tag<T extends string, V> = V & {
type: T
}
export type Subtype<T extends string, V> = V & {
subtype: T
}
export type Target<T extends string, V> = V & {
target: T
}
export type UniqueBy =
| {
any: UniqueBy[]
}
| string
| null
export type WithNullable<T> = T & {
nullable: boolean
}
export type DefaultString =
| string
| {
/** The chars available for the random generation */
charset?: string
/** Length that we generate to */
len: number
}
export type ValueSpecString = // deno-lint-ignore ban-types
(
| {}
| {
pattern: string
"pattern-description": string
}
) & {
copyable?: boolean
masked?: boolean
placeholder?: string
}
export type ValueSpecNumber = {
/** Something like [3,6] or [0, *) */
range?: string
integral?: boolean
/** Used a description of the units */
units?: string
placeholder?: number
}
export type ValueSpecBoolean = Record<string, unknown>
export type ValueSpecAny =
| Tag<"boolean", WithDescription<WithDefault<ValueSpecBoolean, boolean>>>
| Tag<
"string",
WithDescription<
WithNullableDefault<WithNullable<ValueSpecString>, DefaultString>
>
>
| Tag<
"number",
WithDescription<
WithNullableDefault<WithNullable<ValueSpecNumber>, number>
>
>
| Tag<
"enum",
WithDescription<
WithDefault<
{
values: readonly string[] | string[]
"value-names": {
[key: string]: string
}
},
string
>
>
>
| Tag<"list", ValueSpecList>
| Tag<"object", WithDescription<WithNullableDefault<ValueSpecObject, Config>>>
| Tag<"union", WithOptionalDescription<WithDefault<ValueSpecUnion, string>>>
| Tag<
"pointer",
WithDescription<
| Subtype<
"package",
| Target<
"tor-key",
{
"package-id": string
interface: string
}
>
| Target<
"tor-address",
{
"package-id": string
interface: string
}
>
| Target<
"lan-address",
{
"package-id": string
interface: string
}
>
| Target<
"config",
{
"package-id": string
selector: string
multi: boolean
}
>
>
| Subtype<"system", Record<string, unknown>>
>
>
export type ValueSpecUnion = {
/** What tag for the specification, for tag unions */
tag: {
id: string
name: string
description?: string
"variant-names": {
[key: string]: string
}
}
/** The possible enum values */
variants: {
[key: string]: ConfigSpec
}
"display-as"?: string
"unique-by"?: UniqueBy
}
export type ValueSpecObject = {
spec: ConfigSpec
"display-as"?: string
"unique-by"?: UniqueBy
}
export type ValueSpecList =
| Subtype<
"boolean",
WithDescription<WithDefault<ListSpec<ValueSpecBoolean>, boolean[]>>
>
| Subtype<
"string",
WithDescription<WithDefault<ListSpec<ValueSpecString>, string[]>>
>
| Subtype<
"number",
WithDescription<WithDefault<ListSpec<ValueSpecNumber>, number[]>>
>
| Subtype<
"enum",
WithDescription<WithDefault<ListSpec<ValueSpecEnum>, string[]>>
>
| Subtype<
"object",
WithDescription<
WithNullableDefault<
ListSpec<ValueSpecObject>,
Record<string, unknown>[]
>
>
>
| Subtype<
"union",
WithDescription<WithDefault<ListSpec<ValueSpecUnion>, string[]>>
>
export type ValueSpecEnum = {
values: string[]
"value-names": { [key: string]: string }
}
export type SetResult = {
/** These are the unix process signals */
signal:
| "SIGTERM"
| "SIGHUP"
| "SIGINT"
| "SIGQUIT"
| "SIGILL"
| "SIGTRAP"
| "SIGABRT"
| "SIGBUS"
| "SIGFPE"
| "SIGKILL"
| "SIGUSR1"
| "SIGSEGV"
| "SIGUSR2"
| "SIGPIPE"
| "SIGALRM"
| "SIGSTKFLT"
| "SIGCHLD"
| "SIGCONT"
| "SIGSTOP"
| "SIGTSTP"
| "SIGTTIN"
| "SIGTTOU"
| "SIGURG"
| "SIGXCPU"
| "SIGXFSZ"
| "SIGVTALRM"
| "SIGPROF"
| "SIGWINCH"
| "SIGIO"
| "SIGPWR"
| "SIGSYS"
| "SIGEMT"
| "SIGINFO"
"depends-on": DependsOn
}
export type DependsOn = {
[packageId: string]: string[]
}
export type KnownError =
| { error: string }
| {
"error-code": [number, string] | readonly [number, string]
}
export type ResultType<T> = KnownError | { result: T }
export type PackagePropertiesV2 = {
[name: string]: PackagePropertyObject | PackagePropertyString
}
export type PackagePropertyString = {
type: "string"
description?: string
value: string
/** Let's the ui make this copyable button */
copyable?: boolean
/** Let the ui create a qr for this field */
qr?: boolean
/** Hiding the value unless toggled off for field */
masked?: boolean
}
export type PackagePropertyObject = {
value: PackagePropertiesV2
type: "object"
description: string
}
export type Properties = {
version: 2
data: PackagePropertiesV2
}
export type Dependencies = {
/** Id is the id of the package, should be the same as the manifest */
[id: string]: {
/** Checks are called to make sure that our dependency is in the correct shape. If a known error is returned we know that the dependency needs modification */
check(effects: Effects, input: Config): Promise<ResultType<void | null>>
/** This is called after we know that the dependency package needs a new configuration, this would be a transform for defaults */
autoConfigure(effects: Effects, input: Config): Promise<ResultType<Config>>
}
}

View File

@@ -0,0 +1,442 @@
import * as fs from "fs/promises"
import * as oet from "./oldEmbassyTypes"
import { Volume } from "../../../Models/Volume"
import * as child_process from "child_process"
import { promisify } from "util"
import { daemons, startSdk, T, utils } from "@start9labs/start-sdk"
import "isomorphic-fetch"
import { Manifest } from "./matchManifest"
import { DockerProcedureContainer } from "./DockerProcedureContainer"
import * as cp from "child_process"
import { Effects } from "../../../Models/Effects"
import { Mounts } from "@start9labs/start-sdk/package/lib/mainFn/Mounts"
export const execFile = promisify(cp.execFile)
export const polyfillEffects = (
effects: Effects,
manifest: Manifest,
): oet.Effects => {
const self = {
effects,
manifest,
async writeFile(input: {
path: string
volumeId: string
toWrite: string
}): Promise<void> {
await fs.writeFile(
new Volume(input.volumeId, input.path).path,
input.toWrite,
)
},
async readFile(input: { volumeId: string; path: string }): Promise<string> {
return (
await fs.readFile(new Volume(input.volumeId, input.path).path)
).toString()
},
async metadata(input: {
volumeId: string
path: string
}): Promise<oet.Metadata> {
const stats = await fs.stat(new Volume(input.volumeId, input.path).path)
return {
fileType: stats.isFile() ? "file" : "directory",
gid: stats.gid,
uid: stats.uid,
mode: stats.mode,
isDir: stats.isDirectory(),
isFile: stats.isFile(),
isSymlink: stats.isSymbolicLink(),
len: stats.size,
readonly: (stats.mode & 0o200) > 0,
}
},
async createDir(input: {
volumeId: string
path: string
}): Promise<string> {
const path = new Volume(input.volumeId, input.path).path
await fs.mkdir(path, { recursive: true })
return path
},
async readDir(input: {
volumeId: string
path: string
}): Promise<string[]> {
return fs.readdir(new Volume(input.volumeId, input.path).path)
},
async removeDir(input: {
volumeId: string
path: string
}): Promise<string> {
const path = new Volume(input.volumeId, input.path).path
await fs.rmdir(new Volume(input.volumeId, input.path).path, {
recursive: true,
})
return path
},
removeFile(input: { volumeId: string; path: string }): Promise<void> {
return fs.rm(new Volume(input.volumeId, input.path).path)
},
async writeJsonFile(input: {
volumeId: string
path: string
toWrite: Record<string, unknown>
}): Promise<void> {
await fs.writeFile(
new Volume(input.volumeId, input.path).path,
JSON.stringify(input.toWrite),
)
},
async readJsonFile(input: {
volumeId: string
path: string
}): Promise<Record<string, unknown>> {
return JSON.parse(
(
await fs.readFile(new Volume(input.volumeId, input.path).path)
).toString(),
)
},
runCommand({
command,
args,
timeoutMillis,
}: {
command: string
args?: string[] | undefined
timeoutMillis?: number | undefined
}): Promise<oet.ResultType<string>> {
const commands: [string, ...string[]] = [command, ...(args || [])]
return startSdk
.runCommand(
effects,
{ imageId: manifest.main.image },
commands,
{ mounts: Mounts.of() },
commands.join(" "),
)
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) =>
!!x.stderr ? { error: x.stderr } : { result: x.stdout },
)
},
runDaemon(input: { command: string; args?: string[] | undefined }): {
wait(): Promise<oet.ResultType<string>>
term(): Promise<void>
} {
const promiseSubcontainer = DockerProcedureContainer.createSubContainer(
effects,
manifest.id,
manifest.main,
manifest.volumes,
[input.command, ...(input.args || [])].join(" "),
)
const daemon = promiseSubcontainer.then((subcontainer) =>
daemons.runCommand()(
effects,
subcontainer,
[input.command, ...(input.args || [])],
{},
),
)
return {
wait: () =>
daemon.then((daemon) =>
daemon.wait().then(() => {
return { result: "" }
}),
),
term: () => daemon.then((daemon) => daemon.term()),
}
},
async chown(input: {
volumeId: string
path: string
uid: string
}): Promise<null> {
const commands: [string, ...string[]] = [
"chown",
"--recursive",
input.uid,
`/drive/${input.path}`,
]
await startSdk
.runCommand(
effects,
{ imageId: manifest.main.image },
commands,
{
mounts: Mounts.of().addVolume(
input.volumeId,
null,
"/drive",
false,
),
},
commands.join(" "),
)
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) => {
if (!!x.stderr) {
throw new Error(x.stderr)
}
})
return null
},
async chmod(input: {
volumeId: string
path: string
mode: string
}): Promise<null> {
const commands: [string, ...string[]] = [
"chmod",
"--recursive",
input.mode,
`/drive/${input.path}`,
]
await startSdk
.runCommand(
effects,
{ imageId: manifest.main.image },
commands,
{
mounts: Mounts.of().addVolume(
input.volumeId,
null,
"/drive",
false,
),
},
commands.join(" "),
)
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) => {
if (!!x.stderr) {
throw new Error(x.stderr)
}
})
return null
},
sleep(timeMs: number): Promise<null> {
return new Promise((resolve) => setTimeout(resolve, timeMs))
},
trace(whatToPrint: string): void {
console.trace(utils.asError(whatToPrint))
},
warn(whatToPrint: string): void {
console.warn(utils.asError(whatToPrint))
},
error(whatToPrint: string): void {
console.error(utils.asError(whatToPrint))
},
debug(whatToPrint: string): void {
console.debug(utils.asError(whatToPrint))
},
info(whatToPrint: string): void {
console.log(false)
},
is_sandboxed(): boolean {
return false
},
exists(input: { volumeId: string; path: string }): Promise<boolean> {
return self
.metadata(input)
.then(() => true)
.catch(() => false)
},
async fetch(
url: string,
options?:
| {
method?:
| "GET"
| "POST"
| "PUT"
| "DELETE"
| "HEAD"
| "PATCH"
| undefined
headers?: Record<string, string> | undefined
body?: string | undefined
}
| undefined,
): Promise<{
method: string
ok: boolean
status: number
headers: Record<string, string>
body?: string | null | undefined
text(): Promise<string>
json(): Promise<unknown>
}> {
const fetched = await fetch(url, options)
return {
method: fetched.type,
ok: fetched.ok,
status: fetched.status,
headers: Object.fromEntries(fetched.headers.entries()),
body: await fetched.text(),
text: () => fetched.text(),
json: () => fetched.json(),
}
},
runRsync(rsyncOptions: {
srcVolume: string
dstVolume: string
srcPath: string
dstPath: string
options: oet.BackupOptions
}): {
id: () => Promise<string>
wait: () => Promise<null>
progress: () => Promise<number>
} {
let secondRun: ReturnType<typeof self._runRsync> | undefined
let firstRun = self._runRsync(rsyncOptions)
let waitValue = firstRun.wait().then((x) => {
secondRun = self._runRsync(rsyncOptions)
return secondRun.wait()
})
const id = async () => {
return secondRun?.id?.() ?? firstRun.id()
}
const wait = () => waitValue
const progress = async () => {
const secondProgress = secondRun?.progress?.()
if (secondProgress) {
return (await secondProgress) / 2.0 + 0.5
}
return (await firstRun.progress()) / 2.0
}
return { id, wait, progress }
},
_runRsync(rsyncOptions: {
srcVolume: string
dstVolume: string
srcPath: string
dstPath: string
options: oet.BackupOptions
}): {
id: () => Promise<string>
wait: () => Promise<null>
progress: () => Promise<number>
} {
const { srcVolume, dstVolume, srcPath, dstPath, options } = rsyncOptions
const command = "rsync"
const args: string[] = []
if (options.delete) {
args.push("--delete")
}
if (options.force) {
args.push("--force")
}
if (options.ignoreExisting) {
args.push("--ignore-existing")
}
for (const exclude of options.exclude) {
args.push(`--exclude=${exclude}`)
}
args.push("-actAXH")
args.push("--info=progress2")
args.push("--no-inc-recursive")
args.push(new Volume(srcVolume, srcPath).path)
args.push(new Volume(dstVolume, dstPath).path)
const spawned = child_process.spawn(command, args, { detached: true })
let percentage = 0.0
spawned.stdout.on("data", (data: unknown) => {
const lines = String(data).replace("\r", "\n").split("\n")
for (const line of lines) {
const parsed = /$([0-9.]+)%/.exec(line)?.[1]
if (!parsed) continue
percentage = Number.parseFloat(parsed)
}
})
spawned.stderr.on("data", (data: unknown) => {
console.error(`polyfill.runAsync`, utils.asError(data))
})
const id = async () => {
const pid = spawned.pid
if (pid === undefined) {
throw new Error("rsync process has no pid")
}
return String(pid)
}
const waitPromise = new Promise<null>((resolve, reject) => {
spawned.on("exit", (code: any) => {
if (code === 0) {
resolve(null)
} else {
reject(new Error(`rsync exited with code ${code}`))
}
})
})
const wait = () => waitPromise
const progress = () => Promise.resolve(percentage)
return { id, wait, progress }
},
async diskUsage(
options?: { volumeId: string; path: string } | undefined,
): Promise<{ used: number; total: number }> {
const output = await execFile("df", ["--block-size=1", "-P", "/"])
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) => {
if (!!x.stderr) {
throw new Error(x.stderr)
}
return parseDfOutput(x.stdout)
})
if (!!options) {
const used = await execFile("du", [
"-s",
"--block-size=1",
"-P",
new Volume(options.volumeId, options.path).path,
])
.then((x: any) => ({
stderr: x.stderr.toString(),
stdout: x.stdout.toString(),
}))
.then((x: any) => {
if (!!x.stderr) {
throw new Error(x.stderr)
}
return Number.parseInt(x.stdout.split(/\s+/)[0])
})
return {
...output,
used,
}
}
return output
},
}
return self
}
function parseDfOutput(output: string): { used: number; total: number } {
const lines = output
.split("\n")
.filter((x) => x.length)
.map((x) => x.split(/\s+/))
const index = lines.splice(0, 1)[0].map((x) => x.toLowerCase())
const usedIndex = index.indexOf("used")
const sizeIndex = index.indexOf("size")
const used = lines.map((x) => Number.parseInt(x[usedIndex]))[0] || 0
const total = lines.map((x) => Number.parseInt(x[sizeIndex]))[0] || 0
return { used, total }
}

View File

@@ -0,0 +1,38 @@
import { matchOldConfigSpec, transformConfigSpec } from "./transformConfigSpec"
import fixtureEmbasyPagesConfig from "./__fixtures__/embasyPagesConfig"
import searNXG from "./__fixtures__/searNXG"
import bitcoind from "./__fixtures__/bitcoind"
import nostr from "./__fixtures__/nostr"
import nostrConfig2 from "./__fixtures__/nostrConfig2"
describe("transformConfigSpec", () => {
test("matchOldConfigSpec(embassyPages.homepage.variants[web-page])", () => {
matchOldConfigSpec.unsafeCast(
fixtureEmbasyPagesConfig.homepage.variants["web-page"],
)
})
test("matchOldConfigSpec(embassyPages)", () => {
matchOldConfigSpec.unsafeCast(fixtureEmbasyPagesConfig)
})
test("transformConfigSpec(embassyPages)", () => {
const spec = matchOldConfigSpec.unsafeCast(fixtureEmbasyPagesConfig)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(searNXG)", () => {
const spec = matchOldConfigSpec.unsafeCast(searNXG)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(bitcoind)", () => {
const spec = matchOldConfigSpec.unsafeCast(bitcoind)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(nostr)", () => {
const spec = matchOldConfigSpec.unsafeCast(nostr)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
test("transformConfigSpec(nostr2)", () => {
const spec = matchOldConfigSpec.unsafeCast(nostrConfig2)
expect(transformConfigSpec(spec)).toMatchSnapshot()
})
})

View File

@@ -0,0 +1,640 @@
import { IST } from "@start9labs/start-sdk"
import {
dictionary,
object,
anyOf,
string,
literals,
array,
number,
boolean,
Parser,
deferred,
every,
nill,
literal,
} from "ts-matches"
export function transformConfigSpec(oldSpec: OldConfigSpec): IST.InputSpec {
return Object.entries(oldSpec).reduce((inputSpec, [key, oldVal]) => {
let newVal: IST.ValueSpec
if (oldVal.type === "boolean") {
newVal = {
type: "toggle",
name: oldVal.name,
default: oldVal.default,
description: oldVal.description || null,
warning: oldVal.warning || null,
disabled: false,
immutable: false,
}
} else if (oldVal.type === "enum") {
newVal = {
type: "select",
name: oldVal.name,
description: oldVal.description || null,
warning: oldVal.warning || null,
default: oldVal.default,
values: oldVal.values.reduce(
(obj, curr) => ({
...obj,
[curr]: oldVal["value-names"][curr] || curr,
}),
{},
),
disabled: false,
immutable: false,
}
} else if (oldVal.type === "list") {
newVal = getListSpec(oldVal)
} else if (oldVal.type === "number") {
const range = Range.from(oldVal.range)
newVal = {
type: "number",
name: oldVal.name,
default: oldVal.default || null,
description: oldVal.description || null,
warning: oldVal.warning || null,
disabled: false,
immutable: false,
required: !oldVal.nullable,
min: range.min
? range.minInclusive
? range.min
: range.min + 1
: null,
max: range.max
? range.maxInclusive
? range.max
: range.max - 1
: null,
integer: oldVal.integral,
step: null,
units: oldVal.units || null,
placeholder: oldVal.placeholder ? String(oldVal.placeholder) : null,
}
} else if (oldVal.type === "object") {
newVal = {
type: "object",
name: oldVal.name,
description: oldVal.description || null,
warning: oldVal.warning || null,
spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(oldVal.spec)),
}
} else if (oldVal.type === "string") {
newVal = {
type: "text",
name: oldVal.name,
default: oldVal.default || null,
description: oldVal.description || null,
warning: oldVal.warning || null,
disabled: false,
immutable: false,
required: !oldVal.nullable,
patterns:
oldVal.pattern && oldVal["pattern-description"]
? [
{
regex: oldVal.pattern,
description: oldVal["pattern-description"],
},
]
: [],
minLength: null,
maxLength: null,
masked: oldVal.masked || false,
generate: null,
inputmode: "text",
placeholder: oldVal.placeholder || null,
}
} else if (oldVal.type === "union") {
newVal = {
type: "union",
name: oldVal.tag.name,
description: oldVal.tag.description || null,
warning: oldVal.tag.warning || null,
variants: Object.entries(oldVal.variants).reduce(
(obj, [id, spec]) => ({
...obj,
[id]: {
name: oldVal.tag["variant-names"][id] || id,
spec: transformConfigSpec(matchOldConfigSpec.unsafeCast(spec)),
},
}),
{} as Record<string, { name: string; spec: IST.InputSpec }>,
),
disabled: false,
default: oldVal.default,
immutable: false,
}
} else if (oldVal.type === "pointer") {
return inputSpec
} else {
throw new Error(`unknown spec ${JSON.stringify(oldVal)}`)
}
return {
...inputSpec,
[key]: newVal,
}
}, {} as IST.InputSpec)
}
export function transformOldConfigToNew(
spec: OldConfigSpec,
config: Record<string, any>,
): Record<string, any> {
if (!config) return config
return Object.entries(spec).reduce((obj, [key, val]) => {
let newVal = config[key]
if (isObject(val)) {
newVal = transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.spec),
config[key],
)
}
if (isUnion(val)) {
if (!config[key]) return obj
const selection = config[key]?.[val.tag.id]
if (!selection) return obj
delete config[key][val.tag.id]
if (!val.variants[selection]) return obj
newVal = {
selection,
value: transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.variants[selection]),
config[key],
),
}
}
if (isList(val) && isObjectList(val)) {
if (!config[key]) return obj
newVal = (config[key] as object[]).map((obj) =>
transformOldConfigToNew(
matchOldConfigSpec.unsafeCast(val.spec.spec),
obj,
),
)
}
if (isPointer(val)) {
return obj
}
return {
...obj,
[key]: newVal,
}
}, {})
}
export function transformNewConfigToOld(
spec: OldConfigSpec,
config: Record<string, any>,
): Record<string, any> {
return Object.entries(spec).reduce((obj, [key, val]) => {
let newVal = config[key]
if (isObject(val)) {
newVal = transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.spec),
config[key],
)
}
if (isUnion(val)) {
newVal = {
[val.tag.id]: config[key].selection,
...transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.variants[config[key].selection]),
config[key].value,
),
}
}
if (isList(val) && isObjectList(val)) {
newVal = (config[key] as object[]).map((obj) =>
transformNewConfigToOld(
matchOldConfigSpec.unsafeCast(val.spec.spec),
obj,
),
)
}
return {
...obj,
[key]: newVal,
}
}, {})
}
function getListSpec(
oldVal: OldValueSpecList,
): IST.ValueSpecMultiselect | IST.ValueSpecList {
const range = Range.from(oldVal.range)
let partial: Omit<IST.ValueSpecList, "type" | "spec" | "default"> = {
name: oldVal.name,
description: oldVal.description || null,
warning: oldVal.warning || null,
minLength: range.min
? range.minInclusive
? range.min
: range.min + 1
: null,
maxLength: range.max
? range.maxInclusive
? range.max
: range.max - 1
: null,
disabled: false,
}
if (isEnumList(oldVal)) {
return {
...partial,
type: "multiselect",
default: oldVal.default as string[],
immutable: false,
values: oldVal.spec.values.reduce(
(obj, curr) => ({
...obj,
[curr]: oldVal.spec["value-names"][curr],
}),
{},
),
}
} else if (isNumberList(oldVal)) {
return {
...partial,
type: "list",
default: oldVal.default.map(String) as string[],
spec: {
type: "text",
patterns: oldVal.spec.integral
? [{ regex: "[0-9]+", description: "Integral number type" }]
: [
{
regex: "[-+]?[0-9]*\\.?[0-9]+",
description: "Number type",
},
],
minLength: null,
maxLength: null,
masked: false,
generate: null,
inputmode: "text",
placeholder: oldVal.spec.placeholder
? String(oldVal.spec.placeholder)
: null,
},
}
} else if (isStringList(oldVal)) {
return {
...partial,
type: "list",
default: oldVal.default as string[],
spec: {
type: "text",
patterns:
oldVal.spec.pattern && oldVal.spec["pattern-description"]
? [
{
regex: oldVal.spec.pattern,
description: oldVal.spec["pattern-description"],
},
]
: [],
minLength: null,
maxLength: null,
masked: oldVal.spec.masked || false,
generate: null,
inputmode: "text",
placeholder: oldVal.spec.placeholder || null,
},
}
} else if (isObjectList(oldVal)) {
return {
...partial,
type: "list",
default: oldVal.default as Record<string, unknown>[],
spec: {
type: "object",
spec: transformConfigSpec(
matchOldConfigSpec.unsafeCast(oldVal.spec.spec),
),
uniqueBy: oldVal.spec["unique-by"] || null,
displayAs: oldVal.spec["display-as"] || null,
},
}
} else {
throw new Error("Invalid list subtype. enum, string, and object permitted.")
}
}
function isObject(val: OldValueSpec): val is OldValueSpecObject {
return val.type === "object"
}
function isUnion(val: OldValueSpec): val is OldValueSpecUnion {
return val.type === "union"
}
function isList(val: OldValueSpec): val is OldValueSpecList {
return val.type === "list"
}
function isPointer(val: OldValueSpec): val is OldValueSpecPointer {
return val.type === "pointer"
}
function isEnumList(
val: OldValueSpecList,
): val is OldValueSpecList & { subtype: "enum" } {
return val.subtype === "enum"
}
function isStringList(
val: OldValueSpecList,
): val is OldValueSpecList & { subtype: "string" } {
return val.subtype === "string"
}
function isNumberList(
val: OldValueSpecList,
): val is OldValueSpecList & { subtype: "number" } {
return val.subtype === "number"
}
function isObjectList(
val: OldValueSpecList,
): val is OldValueSpecList & { subtype: "object" } {
if (["union"].includes(val.subtype)) {
throw new Error("Invalid list subtype. enum, string, and object permitted.")
}
return val.subtype === "object"
}
export type OldConfigSpec = Record<string, OldValueSpec>
const [_matchOldConfigSpec, setMatchOldConfigSpec] = deferred<unknown>()
export const matchOldConfigSpec = _matchOldConfigSpec as Parser<
unknown,
OldConfigSpec
>
export const matchOldDefaultString = anyOf(
string,
object({ charset: string, len: number }),
)
type OldDefaultString = typeof matchOldDefaultString._TYPE
export const matchOldValueSpecString = object(
{
type: literals("string"),
name: string,
masked: boolean,
copyable: boolean,
nullable: boolean,
placeholder: string,
pattern: string,
"pattern-description": string,
default: matchOldDefaultString,
textarea: boolean,
description: string,
warning: string,
},
[
"masked",
"copyable",
"nullable",
"placeholder",
"pattern",
"pattern-description",
"default",
"textarea",
"description",
"warning",
],
)
export const matchOldValueSpecNumber = object(
{
type: literals("number"),
nullable: boolean,
name: string,
range: string,
integral: boolean,
default: number,
description: string,
warning: string,
units: string,
placeholder: anyOf(number, string),
},
["default", "description", "warning", "units", "placeholder"],
)
type OldValueSpecNumber = typeof matchOldValueSpecNumber._TYPE
export const matchOldValueSpecBoolean = object(
{
type: literals("boolean"),
default: boolean,
name: string,
description: string,
warning: string,
},
["description", "warning"],
)
type OldValueSpecBoolean = typeof matchOldValueSpecBoolean._TYPE
const matchOldValueSpecObject = object(
{
type: literals("object"),
spec: _matchOldConfigSpec,
name: string,
description: string,
warning: string,
},
["description", "warning"],
)
type OldValueSpecObject = typeof matchOldValueSpecObject._TYPE
const matchOldValueSpecEnum = object(
{
values: array(string),
"value-names": dictionary([string, string]),
type: literals("enum"),
default: string,
name: string,
description: string,
warning: string,
},
["description", "warning"],
)
type OldValueSpecEnum = typeof matchOldValueSpecEnum._TYPE
const matchOldUnionTagSpec = object(
{
id: string, // The name of the field containing one of the union variants
"variant-names": dictionary([string, string]), // The name of each variant
name: string,
description: string,
warning: string,
},
["description", "warning"],
)
const matchOldValueSpecUnion = object({
type: literals("union"),
tag: matchOldUnionTagSpec,
variants: dictionary([string, _matchOldConfigSpec]),
default: string,
})
type OldValueSpecUnion = typeof matchOldValueSpecUnion._TYPE
const [matchOldUniqueBy, setOldUniqueBy] = deferred<OldUniqueBy>()
type OldUniqueBy =
| null
| string
| { any: OldUniqueBy[] }
| { all: OldUniqueBy[] }
setOldUniqueBy(
anyOf(
nill,
string,
object({ any: array(matchOldUniqueBy) }),
object({ all: array(matchOldUniqueBy) }),
),
)
const matchOldListValueSpecObject = object(
{
spec: _matchOldConfigSpec, // this is a mapped type of the config object at this level, replacing the object's values with specs on those values
"unique-by": matchOldUniqueBy, // indicates whether duplicates can be permitted in the list
"display-as": string, // this should be a handlebars template which can make use of the entire config which corresponds to 'spec'
},
["display-as", "unique-by"],
)
const matchOldListValueSpecString = object(
{
masked: boolean,
copyable: boolean,
pattern: string,
"pattern-description": string,
placeholder: string,
},
["pattern", "pattern-description", "placeholder", "copyable", "masked"],
)
const matchOldListValueSpecEnum = object({
values: array(string),
"value-names": dictionary([string, string]),
})
const matchOldListValueSpecNumber = object(
{
range: string,
integral: boolean,
units: string,
placeholder: anyOf(number, string),
},
["units", "placeholder"],
)
// represents a spec for a list
const matchOldValueSpecList = every(
object(
{
type: literals("list"),
range: string, // '[0,1]' (inclusive) OR '[0,*)' (right unbounded), normal math rules
default: anyOf(
array(string),
array(number),
array(matchOldDefaultString),
array(object),
),
name: string,
description: string,
warning: string,
},
["description", "warning"],
),
anyOf(
object({
subtype: literals("string"),
spec: matchOldListValueSpecString,
}),
object({
subtype: literals("enum"),
spec: matchOldListValueSpecEnum,
}),
object({
subtype: literals("object"),
spec: matchOldListValueSpecObject,
}),
object({
subtype: literals("number"),
spec: matchOldListValueSpecNumber,
}),
),
)
type OldValueSpecList = typeof matchOldValueSpecList._TYPE
const matchOldValueSpecPointer = every(
object({
type: literal("pointer"),
}),
anyOf(
object({
subtype: literal("package"),
target: literals("tor-key", "tor-address", "lan-address"),
"package-id": string,
interface: string,
}),
object({
subtype: literal("package"),
target: literals("config"),
"package-id": string,
selector: string,
multi: boolean,
}),
),
)
type OldValueSpecPointer = typeof matchOldValueSpecPointer._TYPE
export const matchOldValueSpec = anyOf(
matchOldValueSpecString,
matchOldValueSpecNumber,
matchOldValueSpecBoolean,
matchOldValueSpecObject,
matchOldValueSpecEnum,
matchOldValueSpecList,
matchOldValueSpecUnion,
matchOldValueSpecPointer,
)
type OldValueSpec = typeof matchOldValueSpec._TYPE
setMatchOldConfigSpec(dictionary([string, matchOldValueSpec]))
export class Range {
min?: number
max?: number
minInclusive!: boolean
maxInclusive!: boolean
static from(s: string = "(*,*)"): Range {
const r = new Range()
r.minInclusive = s.startsWith("[")
r.maxInclusive = s.endsWith("]")
const [minStr, maxStr] = s.split(",").map((a) => a.trim())
r.min = minStr === "(*" ? undefined : Number(minStr.slice(1))
r.max = maxStr === "*)" ? undefined : Number(maxStr.slice(0, -1))
return r
}
}

View File

@@ -0,0 +1,108 @@
import { System } from "../../Interfaces/System"
import { Effects } from "../../Models/Effects"
import { T, utils } from "@start9labs/start-sdk"
import { Optional } from "ts-matches/lib/parsers/interfaces"
export const STARTOS_JS_LOCATION = "/usr/lib/startos/package/index.js"
type RunningMain = {
stop: () => Promise<void>
}
export class SystemForStartOs implements System {
private runningMain: RunningMain | undefined
static of() {
return new SystemForStartOs(require(STARTOS_JS_LOCATION))
}
constructor(readonly abi: T.ABI) {
this
}
async containerInit(effects: Effects): Promise<void> {
return void (await this.abi.containerInit({ effects }))
}
async packageInit(
effects: Effects,
timeoutMs: number | null = null,
): Promise<void> {
return void (await this.abi.packageInit({ effects }))
}
async packageUninit(
effects: Effects,
nextVersion: Optional<string> = null,
timeoutMs: number | null = null,
): Promise<void> {
return void (await this.abi.packageUninit({ effects, nextVersion }))
}
async createBackup(
effects: T.Effects,
timeoutMs: number | null,
): Promise<void> {
return void (await this.abi.createBackup({
effects,
}))
}
async restoreBackup(
effects: T.Effects,
timeoutMs: number | null,
): Promise<void> {
return void (await this.abi.restoreBackup({
effects,
}))
}
getActionInput(
effects: Effects,
id: string,
timeoutMs: number | null,
): Promise<T.ActionInput | null> {
const action = this.abi.actions.get(id)
if (!action) throw new Error(`Action ${id} not found`)
return action.getInput({ effects })
}
runAction(
effects: Effects,
id: string,
input: unknown,
timeoutMs: number | null,
): Promise<T.ActionResult | null> {
const action = this.abi.actions.get(id)
if (!action) throw new Error(`Action ${id} not found`)
return action.run({ effects, input })
}
async exit(): Promise<void> {}
async start(effects: Effects): Promise<void> {
if (this.runningMain) return
effects.constRetry = utils.once(() => effects.restart())
let mainOnTerm: () => Promise<void> | undefined
const started = async (onTerm: () => Promise<void>) => {
await effects.setMainStatus({ status: "running" })
mainOnTerm = onTerm
return null
}
const daemons = await (
await this.abi.main({
effects,
started,
})
).build()
this.runningMain = {
stop: async () => {
if (mainOnTerm) await mainOnTerm()
await daemons.term()
},
}
}
async stop(): Promise<void> {
if (this.runningMain) {
try {
await this.runningMain.stop()
} finally {
this.runningMain = undefined
}
}
}
}

View File

@@ -0,0 +1,22 @@
import * as fs from "node:fs/promises"
import { System } from "../../Interfaces/System"
import { EMBASSY_JS_LOCATION, SystemForEmbassy } from "./SystemForEmbassy"
import { STARTOS_JS_LOCATION, SystemForStartOs } from "./SystemForStartOs"
export async function getSystem(): Promise<System> {
if (
await fs.access(STARTOS_JS_LOCATION).then(
() => true,
() => false,
)
) {
return SystemForStartOs.of()
} else if (
await fs.access(EMBASSY_JS_LOCATION).then(
() => true,
() => false,
)
) {
return SystemForEmbassy.of()
}
throw new Error(`${STARTOS_JS_LOCATION} not found`)
}

View File

@@ -0,0 +1,4 @@
import { GetDependency } from "./GetDependency"
import { System } from "./System"
export type AllGetDependencies = GetDependency<"system", Promise<System>>

View File

@@ -0,0 +1,3 @@
export type GetDependency<K extends string, T> = {
[OtherK in K]: () => T
}

View File

@@ -0,0 +1,50 @@
import { types as T } from "@start9labs/start-sdk"
import { Effects } from "../Models/Effects"
import { CallbackHolder } from "../Models/CallbackHolder"
import { Optional } from "ts-matches/lib/parsers/interfaces"
export type Procedure =
| "/packageInit"
| "/packageUninit"
| "/backup/create"
| "/backup/restore"
| `/actions/${string}/getInput`
| `/actions/${string}/run`
export type ExecuteResult =
| { ok: unknown }
| { err: { code: number; message: string } }
export type System = {
containerInit(effects: T.Effects): Promise<void>
start(effects: T.Effects): Promise<void>
stop(): Promise<void>
packageInit(effects: Effects, timeoutMs: number | null): Promise<void>
packageUninit(
effects: Effects,
nextVersion: Optional<string>,
timeoutMs: number | null,
): Promise<void>
createBackup(effects: T.Effects, timeoutMs: number | null): Promise<void>
restoreBackup(effects: T.Effects, timeoutMs: number | null): Promise<void>
runAction(
effects: Effects,
actionId: string,
input: unknown,
timeoutMs: number | null,
): Promise<T.ActionResult | null>
getActionInput(
effects: Effects,
actionId: string,
timeoutMs: number | null,
): Promise<T.ActionInput | null>
exit(): Promise<void>
}
export type RunningMain = {
callbacks: CallbackHolder
stop(): Promise<void>
}

View File

@@ -0,0 +1,62 @@
import { T } from "@start9labs/start-sdk"
const CallbackIdCell = { inc: 1 }
const callbackRegistry = new FinalizationRegistry(
async (options: { cbs: Map<number, Function>; effects: T.Effects }) => {
await options.effects.clearCallbacks({
only: Array.from(options.cbs.keys()),
})
},
)
export class CallbackHolder {
constructor(private effects?: T.Effects) {}
private callbacks = new Map<number, Function>()
private children: WeakRef<CallbackHolder>[] = []
private newId() {
return CallbackIdCell.inc++
}
addCallback(callback?: Function) {
if (!callback) {
return
}
const id = this.newId()
console.error("adding callback", id)
this.callbacks.set(id, callback)
if (this.effects)
callbackRegistry.register(this, {
cbs: this.callbacks,
effects: this.effects,
})
return id
}
child(): CallbackHolder {
const child = new CallbackHolder()
this.children.push(new WeakRef(child))
return child
}
removeChild(child: CallbackHolder) {
this.children = this.children.filter((c) => {
const ref = c.deref()
return ref && ref !== child
})
}
private getCallback(index: number): Function | undefined {
let callback = this.callbacks.get(index)
if (callback) this.callbacks.delete(index)
else {
for (let i = 0; i < this.children.length; i++) {
callback = this.children[i].deref()?.getCallback(index)
if (callback) return callback
}
}
return callback
}
callCallback(index: number, args: any[]): Promise<unknown> {
const callback = this.getCallback(index)
if (!callback) return Promise.resolve()
return Promise.resolve().then(() => callback(...args))
}
}

View File

@@ -0,0 +1,47 @@
import {
object,
literal,
string,
boolean,
array,
dictionary,
literals,
number,
Parser,
some,
} from "ts-matches"
import { matchDuration } from "./Duration"
const VolumeId = string
const Path = string
export type VolumeId = string
export type Path = string
export const matchDockerProcedure = object(
{
type: literal("docker"),
image: string,
system: boolean,
entrypoint: string,
args: array(string),
mounts: dictionary([VolumeId, Path]),
"io-format": literals(
"json",
"json-pretty",
"yaml",
"cbor",
"toml",
"toml-pretty",
),
"sigterm-timeout": some(number, matchDuration),
inject: boolean,
},
["io-format", "sigterm-timeout", "system", "args", "inject", "mounts"],
{
"sigterm-timeout": 30,
inject: false,
args: [],
},
)
export type DockerProcedure = typeof matchDockerProcedure._TYPE

View File

@@ -0,0 +1,30 @@
import { string } from "ts-matches"
export type TimeUnit = "d" | "h" | "s" | "ms" | "m" | "µs" | "ns"
export type Duration = `${number}${TimeUnit}`
const durationRegex = /^([0-9]*(\.[0-9]+)?)(ns|µs|ms|s|m|d)$/
export const matchDuration = string.refine(isDuration)
export function isDuration(value: string): value is Duration {
return durationRegex.test(value)
}
export function duration(timeValue: number, timeUnit: TimeUnit = "s") {
return `${timeValue > 0 ? timeValue : 0}${timeUnit}` as Duration
}
const unitsToSeconds: Record<string, number> = {
ns: 1e-9,
µs: 1e-6,
ms: 0.001,
s: 1,
m: 60,
h: 3600,
d: 86400,
}
export function fromDuration(duration: Duration | number): number {
if (typeof duration === "number") return duration
const [, num, , unit] = duration.match(durationRegex) || []
return Number(num) * unitsToSeconds[unit]
}

View File

@@ -0,0 +1,3 @@
import { types as T } from "@start9labs/start-sdk"
export type Effects = T.Effects

View File

@@ -0,0 +1,30 @@
import { literals, some, string } from "ts-matches"
type NestedPath<A extends string, B extends string> = `/${A}/${string}/${B}`
type NestedPaths = NestedPath<"actions", "run" | "getInput">
// prettier-ignore
type UnNestPaths<A> =
A extends `${infer A}/${infer B}` ? [...UnNestPaths<A>, ... UnNestPaths<B>] :
[A]
export function unNestPath<A extends string>(a: A): UnNestPaths<A> {
return a.split("/") as UnNestPaths<A>
}
function isNestedPath(path: string): path is NestedPaths {
const paths = path.split("/")
if (paths.length !== 4) return false
if (paths[1] === "actions" && (paths[3] === "run" || paths[3] === "getInput"))
return true
return false
}
export const jsonPath = some(
literals(
"/packageInit",
"/packageUninit",
"/backup/create",
"/backup/restore",
),
string.refine(isNestedPath, "isNestedPath"),
)
export type JsonPath = typeof jsonPath._TYPE

View File

@@ -0,0 +1,22 @@
import * as fs from "node:fs/promises"
export const BACKUP = "backup"
export class Volume {
readonly path: string
constructor(
readonly volumeId: string,
_path = "",
) {
if (volumeId.toLowerCase() === BACKUP) {
this.path = `/media/startos/backup${!_path ? "" : `/${_path}`}`
} else {
this.path = `/media/startos/volumes/${volumeId}${!_path ? "" : `/${_path}`}`
}
}
async exists() {
return fs.stat(this.path).then(
() => true,
() => false,
)
}
}

View File

@@ -0,0 +1,42 @@
import { RpcListener } from "./Adapters/RpcListener"
import { SystemForEmbassy } from "./Adapters/Systems/SystemForEmbassy"
import { AllGetDependencies } from "./Interfaces/AllGetDependencies"
import { getSystem } from "./Adapters/Systems"
const getDependencies: AllGetDependencies = {
system: getSystem,
}
new RpcListener(getDependencies)
/**
So, this is going to be sent into a running container along with any of the other node modules that are going to be needed and used.
Once the container is started, we will go into a loading/ await state.
This is the init system, and it will always be running, and it will be waiting for a command to be sent to it.
Each command will be a stopable promise. And an example is going to be something like an action/ main/ or just a query into the types.
A command will be sent an object which are the effects, and the effects will be things like the file system, the network, the process, and the os.
*/
// So OS Adapter
// ==============
/**
* Why: So when the we call from the os we enter or leave here?
*/
/**
Command: This is a command that the
There are
*/
/**
TODO:
Should I separate those adapter in/out?
*/

View File

@@ -0,0 +1,26 @@
{
"include": ["./**/*.ts"],
"exclude": ["dist"],
"inputs": ["./src/index.ts"],
"compilerOptions": {
"module": "Node16",
"strict": true,
"outDir": "dist",
"preserveConstEnums": true,
"sourceMap": true,
"target": "ES2022",
"pretty": true,
"declaration": true,
"noImplicitAny": true,
"esModuleInterop": true,
"types": ["node", "jest"],
"moduleResolution": "Node16",
"skipLibCheck": true,
"resolveJsonModule": true
},
"ts-node": {
"compilerOptions": {
"module": "commonjs"
}
}
}

View File

@@ -0,0 +1,51 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
if mountpoint tmp/combined; then sudo umount -R tmp/combined; fi
if mountpoint tmp/lower; then sudo umount tmp/lower; fi
sudo rm -rf tmp
mkdir -p tmp/lower tmp/upper tmp/work tmp/combined
if which squashfuse > /dev/null; then
sudo squashfuse debian.${ARCH}.squashfs tmp/lower
else
sudo mount debian.${ARCH}.squashfs tmp/lower
fi
sudo mount -t overlay -olowerdir=tmp/lower,upperdir=tmp/upper,workdir=tmp/work overlay tmp/combined
QEMU=
if [ "$ARCH" != "$(uname -m)" ]; then
QEMU=/usr/bin/qemu-${ARCH}-static
if ! which qemu-$ARCH-static > /dev/null; then
>&2 echo qemu-user-static is required for cross-platform builds
sudo umount tmp/combined
sudo umount tmp/lower
sudo rm -rf tmp
exit 1
fi
sudo cp $(which qemu-$ARCH-static) tmp/combined${QEMU}
fi
sudo mkdir -p tmp/combined/usr/lib/startos/
sudo rsync -a --copy-unsafe-links dist/ tmp/combined/usr/lib/startos/init/
sudo chown -R 0:0 tmp/combined/usr/lib/startos/
sudo cp container-runtime.service tmp/combined/lib/systemd/system/container-runtime.service
sudo chown 0:0 tmp/combined/lib/systemd/system/container-runtime.service
sudo cp ../core/target/$ARCH-unknown-linux-musl/release/containerbox tmp/combined/usr/bin/start-cli
sudo chown 0:0 tmp/combined/usr/bin/start-cli
echo container-runtime | sha256sum | head -c 32 | cat - <(echo) | sudo tee tmp/combined/etc/machine-id
cat deb-install.sh | sudo systemd-nspawn --console=pipe -D tmp/combined $QEMU /bin/bash
sudo truncate -s 0 tmp/combined/etc/machine-id
if [ -n "$QEMU" ]; then
sudo rm tmp/combined${QEMU}
fi
rm -f rootfs.${ARCH}.squashfs
mkdir -p ../build/lib/container-runtime
sudo mksquashfs tmp/combined rootfs.${ARCH}.squashfs
sudo umount tmp/combined
sudo umount tmp/lower
sudo rm -rf tmp

1
core/.gitignore vendored
View File

@@ -8,3 +8,4 @@ secrets.db
.env
.editorconfig
proptest-regressions/**/*
/startos/bindings/*

5712
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,10 +1,3 @@
[workspace]
members = [
"container-init",
"helpers",
"js-engine",
"models",
"snapshot-creator",
"startos",
]
members = ["helpers", "models", "startos"]

View File

@@ -8,9 +8,6 @@
## Structure
- `startos`: This contains the core library for StartOS that supports building `startbox`.
- `container-init` (ignore: deprecated)
- `js-engine`: This contains the library required to build `deno` to support running `.js` maintainer scripts for v0.3
- `snapshot-creator`: This contains a binary used to build `v8` runtime snapshots, required for initializing `start-deno`
- `helpers`: This contains utility functions used across both `startos` and `js-engine`
- `models`: This contains types that are shared across `startos`, `js-engine`, and `helpers`
@@ -24,8 +21,6 @@ several different names for different behaviour:
`startd` and control it similarly to the UI
- `start-sdk`: This is a CLI tool that aids in building and packaging services
you wish to deploy to StartOS
- `start-deno`: This is a CLI tool invoked by startd to run `.js` maintainer scripts for v0.3
- `avahi-alias`: This is a CLI tool invoked by startd to create aliases in `avahi` for mDNS
## Questions

54
core/build-cli.sh Executable file
View File

@@ -0,0 +1,54 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -ea
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
if [ "$ARCH" = "arm64" ]; then
ARCH="aarch64"
fi
if [ -z "$KERNEL_NAME" ]; then
KERNEL_NAME=$(uname -s)
fi
if [ -z "$TARGET" ]; then
if [ "$KERNEL_NAME" = "Linux" ]; then
TARGET="$ARCH-unknown-linux-musl"
elif [ "$KERNEL_NAME" = "Darwin" ]; then
TARGET="$ARCH-apple-darwin"
else
>&2 echo "unknown kernel $KERNEL_NAME"
exit 1
fi
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
if which zig > /dev/null && [ "$ENFORCE_USE_DOCKER" != 1 ]; do
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
RUSTFLAGS=$RUSTFLAGS sh -c "cd core && cargo zigbuild --release --no-default-features --features cli,$FEATURES --locked --bin start-cli --target=$TARGET"
else
alias 'rust-zig-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/cargo-zigbuild'
RUSTFLAGS=$RUSTFLAGS rust-zig-builder sh -c "cd core && cargo zigbuild --release --no-default-features --features cli,$FEATURES --locked --bin start-cli --target=$TARGET"
if [ "$(ls -nd core/target/$TARGET/release/start-cli | awk '{ print $3 }')" != "$UID" ]; then
rust-zig-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi
fi

36
core/build-containerbox.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -ea
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
if [ "$ARCH" = "arm64" ]; then
ARCH="aarch64"
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features container-runtime,$FEATURES --locked --bin containerbox --target=$ARCH-unknown-linux-musl"
if [ "$(ls -nd core/target/$ARCH-unknown-linux-musl/release/containerbox | awk '{ print $3 }')" != "$UID" ]; then
rust-musl-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi

View File

@@ -1,45 +0,0 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
alias 'rust-gnu-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$(pwd)":/home/rust/src -w /home/rust/src -P start9/rust-arm-cross:aarch64'
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
set +e
fail=
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
if ! rust-gnu-builder sh -c "(cd core && cargo build --release --features avahi-alias,$FEATURES --locked --bin startbox --target=$ARCH-unknown-linux-gnu)"; then
fail=true
fi
for ARCH in x86_64 aarch64
do
if ! rust-musl-builder sh -c "(cd core && cargo build --release --locked --bin container-init)"; then
fail=true
fi
done
set -e
cd core
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
if [ -n "$fail" ]; then
exit 1
fi

36
core/build-registrybox.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -ea
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
if [ "$ARCH" = "arm64" ]; then
ARCH="aarch64"
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features cli,registry,$FEATURES --locked --bin registrybox --target=$ARCH-unknown-linux-musl"
if [ "$(ls -nd core/target/$ARCH-unknown-linux-musl/release/registrybox | awk '{ print $3 }')" != "$UID" ]; then
rust-musl-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi

36
core/build-startbox.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -ea
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
if [ "$ARCH" = "arm64" ]; then
ARCH="aarch64"
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features cli,daemon,$FEATURES --locked --bin startbox --target=$ARCH-unknown-linux-musl"
if [ "$(ls -nd core/target/$ARCH-unknown-linux-musl/release/startbox | awk '{ print $3 }')" != "$UID" ]; then
rust-musl-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi

36
core/build-ts.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -ea
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
if [ "$ARCH" = "arm64" ]; then
ARCH="aarch64"
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-musl-builder sh -c "cd core && cargo test --release --features=test,$FEATURES 'export_bindings_' && chown \$UID:\$UID startos/bindings"
if [ "$(ls -nd core/startos/bindings | awk '{ print $3 }')" != "$UID" ]; then
rust-musl-builder sh -c "cd core && chown -R $UID:$UID startos/bindings && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi

View File

@@ -1,39 +0,0 @@
#!/bin/bash
# Reason for this being is that we need to create a snapshot for the deno runtime. It wants to pull 3 files from build, and during the creation it gets embedded, but for some
# reason during the actual runtime it is looking for them. So this will create a docker in arm that creates the snaphot needed for the arm
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
alias 'rust-gnu-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$(pwd)":/home/rust/src -w /home/rust/src -P start9/rust-arm-cross:aarch64'
echo "Building "
cd ..
rust-gnu-builder sh -c "(cd core/ && cargo build -p snapshot_creator --release --target=${ARCH}-unknown-linux-gnu)"
cd -
if [ "$ARCH" = "aarch64" ]; then
DOCKER_ARCH='arm64/v8'
elif [ "$ARCH" = "x86_64" ]; then
DOCKER_ARCH='amd64'
fi
echo "Creating Arm v8 Snapshot"
docker run $USE_TTY --platform "linux/${DOCKER_ARCH}" --mount type=bind,src=$(pwd),dst=/mnt ubuntu:22.04 /bin/sh -c "cd /mnt && /mnt/target/${ARCH}-unknown-linux-gnu/release/snapshot_creator"
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
sudo chown $USER JS_SNAPSHOT.bin
sudo chmod 0644 JS_SNAPSHOT.bin
sudo mv -f JS_SNAPSHOT.bin ./js-engine/src/artifacts/JS_SNAPSHOT.${ARCH}.bin

View File

@@ -1,39 +0,0 @@
[package]
name = "container-init"
version = "0.1.0"
edition = "2021"
rust = "1.66"
[features]
dev = []
metal = []
sound = []
unstable = []
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
async-stream = "0.3"
# cgroups-rs = "0.2"
color-eyre = "0.6"
futures = "0.3"
serde = { version = "1", features = ["derive", "rc"] }
serde_json = "1"
helpers = { path = "../helpers" }
imbl = "2"
nix = { version = "0.27", features = ["process", "signal"] }
tokio = { version = "1", features = ["full"] }
tokio-stream = { version = "0.1", features = ["io-util", "sync", "net"] }
tracing = "0.1"
tracing-error = "0.2"
tracing-futures = "0.2"
tracing-subscriber = { version = "0.3", features = ["env-filter"] }
yajrc = { version = "*", git = "https://github.com/dr-bonez/yajrc.git", branch = "develop" }
[target.'cfg(target_os = "linux")'.dependencies]
procfs = "0.15"
[profile.test]
opt-level = 3
[profile.dev.package.backtrace]
opt-level = 3

View File

@@ -1,214 +0,0 @@
use nix::unistd::Pid;
use serde::{Deserialize, Serialize, Serializer};
use yajrc::RpcMethod;
/// Know what the process is called
#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ProcessId(pub u32);
impl From<ProcessId> for Pid {
fn from(pid: ProcessId) -> Self {
Pid::from_raw(pid.0 as i32)
}
}
impl From<Pid> for ProcessId {
fn from(pid: Pid) -> Self {
ProcessId(pid.as_raw() as u32)
}
}
impl From<i32> for ProcessId {
fn from(pid: i32) -> Self {
ProcessId(pid as u32)
}
}
#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct ProcessGroupId(pub u32);
#[derive(Debug, Serialize, Deserialize, Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
#[serde(rename_all = "kebab-case")]
pub enum OutputStrategy {
Inherit,
Collect,
}
#[derive(Debug, Clone, Copy)]
pub struct RunCommand;
impl Serialize for RunCommand {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(Self.as_str(), serializer)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct RunCommandParams {
pub gid: Option<ProcessGroupId>,
pub command: String,
pub args: Vec<String>,
pub output: OutputStrategy,
}
impl RpcMethod for RunCommand {
type Params = RunCommandParams;
type Response = ProcessId;
fn as_str<'a>(&'a self) -> &'a str {
"command"
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum LogLevel {
Trace(String),
Warn(String),
Error(String),
Info(String),
Debug(String),
}
impl LogLevel {
pub fn trace(&self) {
match self {
LogLevel::Trace(x) => tracing::trace!("{}", x),
LogLevel::Warn(x) => tracing::warn!("{}", x),
LogLevel::Error(x) => tracing::error!("{}", x),
LogLevel::Info(x) => tracing::info!("{}", x),
LogLevel::Debug(x) => tracing::debug!("{}", x),
}
}
}
#[derive(Debug, Clone, Copy)]
pub struct Log;
impl Serialize for Log {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(Self.as_str(), serializer)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct LogParams {
pub gid: Option<ProcessGroupId>,
pub level: LogLevel,
}
impl RpcMethod for Log {
type Params = LogParams;
type Response = ();
fn as_str<'a>(&'a self) -> &'a str {
"log"
}
}
#[derive(Debug, Clone, Copy)]
pub struct ReadLineStdout;
impl Serialize for ReadLineStdout {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(Self.as_str(), serializer)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReadLineStdoutParams {
pub pid: ProcessId,
}
impl RpcMethod for ReadLineStdout {
type Params = ReadLineStdoutParams;
type Response = String;
fn as_str<'a>(&'a self) -> &'a str {
"read-line-stdout"
}
}
#[derive(Debug, Clone, Copy)]
pub struct ReadLineStderr;
impl Serialize for ReadLineStderr {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(Self.as_str(), serializer)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ReadLineStderrParams {
pub pid: ProcessId,
}
impl RpcMethod for ReadLineStderr {
type Params = ReadLineStderrParams;
type Response = String;
fn as_str<'a>(&'a self) -> &'a str {
"read-line-stderr"
}
}
#[derive(Debug, Clone, Copy)]
pub struct Output;
impl Serialize for Output {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(Self.as_str(), serializer)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct OutputParams {
pub pid: ProcessId,
}
impl RpcMethod for Output {
type Params = OutputParams;
type Response = String;
fn as_str<'a>(&'a self) -> &'a str {
"output"
}
}
#[derive(Debug, Clone, Copy)]
pub struct SendSignal;
impl Serialize for SendSignal {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(Self.as_str(), serializer)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SendSignalParams {
pub pid: ProcessId,
pub signal: u32,
}
impl RpcMethod for SendSignal {
type Params = SendSignalParams;
type Response = ();
fn as_str<'a>(&'a self) -> &'a str {
"signal"
}
}
#[derive(Debug, Clone, Copy)]
pub struct SignalGroup;
impl Serialize for SignalGroup {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
Serialize::serialize(Self.as_str(), serializer)
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct SignalGroupParams {
pub gid: ProcessGroupId,
pub signal: u32,
}
impl RpcMethod for SignalGroup {
type Params = SignalGroupParams;
type Response = ();
fn as_str<'a>(&'a self) -> &'a str {
"signal-group"
}
}

View File

@@ -1,428 +0,0 @@
use std::collections::BTreeMap;
use std::ops::DerefMut;
use std::os::unix::process::ExitStatusExt;
use std::process::Stdio;
use std::sync::Arc;
use container_init::{
LogParams, OutputParams, OutputStrategy, ProcessGroupId, ProcessId, RunCommandParams,
SendSignalParams, SignalGroupParams,
};
use futures::StreamExt;
use helpers::NonDetachingJoinHandle;
use nix::errno::Errno;
use nix::sys::signal::Signal;
use serde::{Deserialize, Serialize};
use serde_json::json;
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader};
use tokio::process::{Child, Command};
use tokio::select;
use tokio::sync::{watch, Mutex};
use yajrc::{Id, RpcError};
/// Outputs embedded in the JSONRpc output of the executable.
#[derive(Debug, Clone, Serialize)]
#[serde(untagged)]
enum Output {
Command(ProcessId),
ReadLineStdout(String),
ReadLineStderr(String),
Output(String),
Log,
Signal,
SignalGroup,
}
#[derive(Debug, Clone, Serialize, Deserialize)]
#[serde(tag = "method", content = "params", rename_all = "kebab-case")]
enum Input {
/// Run a new command, with the args
Command(RunCommandParams),
/// Want to log locall on the service rather than the eos
Log(LogParams),
// /// Get a line of stdout from the command
// ReadLineStdout(ReadLineStdoutParams),
// /// Get a line of stderr from the command
// ReadLineStderr(ReadLineStderrParams),
/// Get output of command
Output(OutputParams),
/// Send the sigterm to the process
Signal(SendSignalParams),
/// Signal a group of processes
SignalGroup(SignalGroupParams),
}
#[derive(Deserialize)]
struct IncomingRpc {
id: Id,
#[serde(flatten)]
input: Input,
}
struct ChildInfo {
gid: Option<ProcessGroupId>,
child: Arc<Mutex<Option<Child>>>,
output: Option<InheritOutput>,
}
struct InheritOutput {
_thread: NonDetachingJoinHandle<()>,
stdout: watch::Receiver<String>,
stderr: watch::Receiver<String>,
}
struct HandlerMut {
processes: BTreeMap<ProcessId, ChildInfo>,
// groups: BTreeMap<ProcessGroupId, Cgroup>,
}
#[derive(Clone)]
struct Handler {
children: Arc<Mutex<HandlerMut>>,
}
impl Handler {
fn new() -> Self {
Handler {
children: Arc::new(Mutex::new(HandlerMut {
processes: BTreeMap::new(),
// groups: BTreeMap::new(),
})),
}
}
async fn handle(&self, req: Input) -> Result<Output, RpcError> {
Ok(match req {
Input::Command(RunCommandParams {
gid,
command,
args,
output,
}) => Output::Command(self.command(gid, command, args, output).await?),
// Input::ReadLineStdout(ReadLineStdoutParams { pid }) => {
// Output::ReadLineStdout(self.read_line_stdout(pid).await?)
// }
// Input::ReadLineStderr(ReadLineStderrParams { pid }) => {
// Output::ReadLineStderr(self.read_line_stderr(pid).await?)
// }
Input::Log(LogParams { gid: _, level }) => {
level.trace();
Output::Log
}
Input::Output(OutputParams { pid }) => Output::Output(self.output(pid).await?),
Input::Signal(SendSignalParams { pid, signal }) => {
self.signal(pid, signal).await?;
Output::Signal
}
Input::SignalGroup(SignalGroupParams { gid, signal }) => {
self.signal_group(gid, signal).await?;
Output::SignalGroup
}
})
}
async fn command(
&self,
gid: Option<ProcessGroupId>,
command: String,
args: Vec<String>,
output: OutputStrategy,
) -> Result<ProcessId, RpcError> {
let mut cmd = Command::new(command);
cmd.args(args);
cmd.kill_on_drop(true);
cmd.stdout(Stdio::piped());
cmd.stderr(Stdio::piped());
let mut child = cmd.spawn().map_err(|e| {
let mut err = yajrc::INTERNAL_ERROR.clone();
err.data = Some(json!(e.to_string()));
err
})?;
let pid = ProcessId(child.id().ok_or_else(|| {
let mut err = yajrc::INTERNAL_ERROR.clone();
err.data = Some(json!("Child has no pid"));
err
})?);
let output = match output {
OutputStrategy::Inherit => {
let (stdout_send, stdout) = watch::channel(String::new());
let (stderr_send, stderr) = watch::channel(String::new());
if let (Some(child_stdout), Some(child_stderr)) =
(child.stdout.take(), child.stderr.take())
{
Some(InheritOutput {
_thread: tokio::spawn(async move {
tokio::join!(
async {
if let Err(e) = async {
let mut lines = BufReader::new(child_stdout).lines();
while let Some(line) = lines.next_line().await? {
tracing::info!("({}): {}", pid.0, line);
let _ = stdout_send.send(line);
}
Ok::<_, std::io::Error>(())
}
.await
{
tracing::error!(
"Error reading stdout of pid {}: {}",
pid.0,
e
);
}
},
async {
if let Err(e) = async {
let mut lines = BufReader::new(child_stderr).lines();
while let Some(line) = lines.next_line().await? {
tracing::warn!("({}): {}", pid.0, line);
let _ = stderr_send.send(line);
}
Ok::<_, std::io::Error>(())
}
.await
{
tracing::error!(
"Error reading stdout of pid {}: {}",
pid.0,
e
);
}
}
);
})
.into(),
stdout,
stderr,
})
} else {
None
}
}
OutputStrategy::Collect => None,
};
self.children.lock().await.processes.insert(
pid,
ChildInfo {
gid,
child: Arc::new(Mutex::new(Some(child))),
output,
},
);
Ok(pid)
}
async fn output(&self, pid: ProcessId) -> Result<String, RpcError> {
let not_found = || {
let mut err = yajrc::INTERNAL_ERROR.clone();
err.data = Some(json!(format!("Child with pid {} not found", pid.0)));
err
};
let mut child = {
self.children
.lock()
.await
.processes
.get(&pid)
.ok_or_else(not_found)?
.child
.clone()
}
.lock_owned()
.await;
if let Some(child) = child.take() {
let output = child.wait_with_output().await?;
if output.status.success() {
Ok(String::from_utf8(output.stdout).map_err(|_| yajrc::PARSE_ERROR)?)
} else {
Err(RpcError {
code: output
.status
.code()
.or_else(|| output.status.signal().map(|s| 128 + s))
.unwrap_or(0),
message: "Command failed".into(),
data: Some(json!(String::from_utf8(if output.stderr.is_empty() {
output.stdout
} else {
output.stderr
})
.map_err(|_| yajrc::PARSE_ERROR)?)),
})
}
} else {
Err(not_found())
}
}
async fn signal(&self, pid: ProcessId, signal: u32) -> Result<(), RpcError> {
let not_found = || {
let mut err = yajrc::INTERNAL_ERROR.clone();
err.data = Some(json!(format!("Child with pid {} not found", pid.0)));
err
};
Self::killall(pid, Signal::try_from(signal as i32)?)?;
if signal == 9 {
self.children
.lock()
.await
.processes
.remove(&pid)
.ok_or_else(not_found)?;
}
Ok(())
}
async fn signal_group(&self, gid: ProcessGroupId, signal: u32) -> Result<(), RpcError> {
let mut to_kill = Vec::new();
{
let mut children_ref = self.children.lock().await;
let children = std::mem::take(&mut children_ref.deref_mut().processes);
for (pid, child_info) in children {
if child_info.gid == Some(gid) {
to_kill.push(pid);
} else {
children_ref.processes.insert(pid, child_info);
}
}
}
for pid in to_kill {
tracing::info!("Killing pid {}", pid.0);
Self::killall(pid, Signal::try_from(signal as i32)?)?;
}
Ok(())
}
fn killall(pid: ProcessId, signal: Signal) -> Result<(), RpcError> {
for proc in procfs::process::all_processes()? {
let stat = proc?.stat()?;
if ProcessId::from(stat.ppid) == pid {
Self::killall(stat.pid.into(), signal)?;
}
}
if let Err(e) = nix::sys::signal::kill(pid.into(), Some(signal)) {
if e != Errno::ESRCH {
tracing::error!("Failed to kill pid {}: {}", pid.0, e);
}
}
Ok(())
}
async fn graceful_exit(self) {
let kill_all = futures::stream::iter(
std::mem::take(&mut self.children.lock().await.deref_mut().processes).into_iter(),
)
.for_each_concurrent(None, |(pid, child)| async move {
let _ = Self::killall(pid, Signal::SIGTERM);
if let Some(child) = child.child.lock().await.take() {
let _ = child.wait_with_output().await;
}
});
kill_all.await
}
}
#[tokio::main]
async fn main() {
use tokio::signal::unix::{signal, SignalKind};
let mut sigint = signal(SignalKind::interrupt()).unwrap();
let mut sigterm = signal(SignalKind::terminate()).unwrap();
let mut sigquit = signal(SignalKind::quit()).unwrap();
let mut sighangup = signal(SignalKind::hangup()).unwrap();
use tracing_error::ErrorLayer;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter};
let filter_layer = EnvFilter::new("container_init=debug");
let fmt_layer = fmt::layer().with_target(true);
tracing_subscriber::registry()
.with(filter_layer)
.with(fmt_layer)
.with(ErrorLayer::default())
.init();
color_eyre::install().unwrap();
let handler = Handler::new();
let handler_thread = async {
let listener = tokio::net::UnixListener::bind("/start9/sockets/rpc.sock")?;
loop {
let (stream, _) = listener.accept().await?;
let (r, w) = stream.into_split();
let mut lines = BufReader::new(r).lines();
let handler = handler.clone();
tokio::spawn(async move {
let w = Arc::new(Mutex::new(w));
while let Some(line) = lines.next_line().await.transpose() {
let handler = handler.clone();
let w = w.clone();
tokio::spawn(async move {
if let Err(e) = async {
let req = serde_json::from_str::<IncomingRpc>(&line?)?;
match handler.handle(req.input).await {
Ok(output) => {
if w.lock().await.write_all(
format!("{}\n", json!({ "id": req.id, "jsonrpc": "2.0", "result": output }))
.as_bytes(),
)
.await.is_err() {
tracing::error!("Error sending to {id:?}", id = req.id);
}
}
Err(e) =>
if w
.lock()
.await
.write_all(
format!("{}\n", json!({ "id": req.id, "jsonrpc": "2.0", "error": e }))
.as_bytes(),
)
.await.is_err() {
tracing::error!("Handle + Error sending to {id:?}", id = req.id);
},
}
Ok::<_, color_eyre::Report>(())
}
.await
{
tracing::error!("Error parsing RPC request: {}", e);
tracing::debug!("{:?}", e);
}
});
}
Ok::<_, std::io::Error>(())
});
}
#[allow(unreachable_code)]
Ok::<_, std::io::Error>(())
};
select! {
res = handler_thread => {
match res {
Ok(()) => tracing::debug!("Done with inputs/outputs"),
Err(e) => {
tracing::error!("Error reading RPC input: {}", e);
tracing::debug!("{:?}", e);
}
}
},
_ = sigint.recv() => {
tracing::debug!("SIGINT");
},
_ = sigterm.recv() => {
tracing::debug!("SIGTERM");
},
_ = sigquit.recv() => {
tracing::debug!("SIGQUIT");
},
_ = sighangup.recv() => {
tracing::debug!("SIGHUP");
}
}
handler.graceful_exit().await;
::std::process::exit(0)
}

View File

@@ -11,9 +11,9 @@ futures = "0.3.28"
lazy_async_pool = "0.3.3"
models = { path = "../models" }
pin-project = "1.1.3"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "master" }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
tokio = { version = "1", features = ["full"] }
tokio-stream = { version = "0.1.14", features = ["io-util", "sync"] }
tracing = "0.1.39"
yajrc = { version = "*", git = "https://github.com/dr-bonez/yajrc.git", branch = "develop" }

View File

@@ -6,16 +6,15 @@ use std::time::Duration;
use color_eyre::eyre::{eyre, Context, Error};
use futures::future::BoxFuture;
use futures::FutureExt;
use models::ResultExt;
use tokio::fs::File;
use tokio::sync::oneshot;
use tokio::task::{JoinError, JoinHandle, LocalSet};
mod byte_replacement_reader;
mod rpc_client;
mod rsync;
mod script_dir;
pub use byte_replacement_reader::*;
pub use rpc_client::{RpcClient, UnixRpcClient};
pub use rsync::*;
pub use script_dir::*;
@@ -52,7 +51,8 @@ pub async fn canonicalize(
}
let path = path.as_ref();
if tokio::fs::metadata(path).await.is_err() {
if let (Some(parent), Some(file_name)) = (path.parent(), path.file_name()) {
let parent = path.parent().unwrap_or(Path::new("."));
if let Some(file_name) = path.file_name() {
if create_parent && tokio::fs::metadata(parent).await.is_err() {
return Ok(create_canonical_folder(parent).await?.join(file_name));
} else {
@@ -177,7 +177,7 @@ impl Drop for AtomicFile {
if let Some(file) = self.file.take() {
drop(file);
let path = std::mem::take(&mut self.tmp_path);
tokio::spawn(async move { tokio::fs::remove_file(path).await.unwrap() });
tokio::spawn(async move { tokio::fs::remove_file(path).await.log_err() });
}
}
}

View File

@@ -1,10 +1,14 @@
use std::path::{Path, PathBuf};
use models::{PackageId, Version};
use models::{PackageId, VersionString};
pub const PKG_SCRIPT_DIR: &str = "package-data/scripts";
pub fn script_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, version: &Version) -> PathBuf {
pub fn script_dir<P: AsRef<Path>>(
datadir: P,
pkg_id: &PackageId,
version: &VersionString,
) -> PathBuf {
datadir
.as_ref()
.join(&*PKG_SCRIPT_DIR)

View File

@@ -2,17 +2,18 @@
cd "$(dirname "${BASH_SOURCE[0]}")"
set -e
set -ea
shopt -s expand_aliases
web="../web/dist/static"
[ -d "$web" ] || mkdir -p "$web"
if [ -z "$PLATFORM" ]; then
export PLATFORM=$(uname -m)
PLATFORM=$(uname -m)
fi
cargo install --path=./startos --no-default-features --features=js-engine,sdk,cli --locked
startbox_loc=$(which startbox)
ln -sf $startbox_loc $(dirname $startbox_loc)/start-cli
ln -sf $startbox_loc $(dirname $startbox_loc)/start-sdk
if [ "$PLATFORM" = "arm64" ]; then
PLATFORM="aarch64"
fi
cargo install --path=./startos --no-default-features --features=cli,docker,registry --bin start-cli --locked

Some files were not shown because too many files have changed in this diff Show More