Compare commits

..

178 Commits

Author SHA1 Message Date
Matt Hill
871f78b570 Feature/server status restarting (#2503)
* extend `server-info`

* add restarting, shutting down to FE status bar

* fix build

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-11-08 02:31:18 -07:00
Aiden McClelland
753fbc0c5c disable CoW for journal (#2501) 2023-11-07 18:16:02 +00:00
Aiden McClelland
748277aa0e do not wait for input on btrfs repair (#2500) 2023-11-07 09:21:32 -07:00
Aiden McClelland
bf40a9ef6d improve Invoke api (#2499)
* improve Invoke api

* fix formatting
2023-11-06 17:26:45 -07:00
Lucy
733000eaa2 fix docs links (#2498)
* fix docs links

* forgot to save file

* fix docs links and small updates to ca wizard

* add downloaded filename

* fix skip detail
2023-11-06 17:24:15 -07:00
Lucy
6a399a7250 Fix/setup (#2495)
* move enter-click directive to shared

* allow enter click to continue to login in kiosk mode; adjust styling

* cleanup

* add styling to ca wizard

* rebase new changes

* mobile fixes

* cleanup

* cleanup

* update styling

* cleanup import

* minor css changes

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-11-02 17:35:55 -06:00
Aiden McClelland
7ba22f1a09 mute errors due to failed incoming network connections (#2497)
* mute errors due to failed incoming network connections

* fix log entry formatting

* Update cleanDanglingImages

* Update cleanDanglingImages
2023-11-02 17:33:41 -06:00
Aiden McClelland
f54f950f81 fix js backups (#2496) 2023-11-02 23:13:48 +00:00
Matt Hill
4625711606 better UX for http/https switching (#2494)
* open https in same tab

* open http in same tab, use windowRef instead of window
2023-11-02 09:34:00 -06:00
Aiden McClelland
5735ea2b3c change grub os selection to say "StartOS" (#2493)
* change grub os selection to say "StartOS"

* readd "v" to motd
2023-11-01 18:35:18 -06:00
Matt Hill
b597d0366a fix time display bug and type metrics (#2490)
* fix time display bug and type metrics

* change metrics response

* nullable temp

* rename percentage used

* match frontend types

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-11-01 22:30:54 +00:00
Matt Hill
9c6dcc4a43 use keys to complete setup and redesign final buttons (#2492) 2023-11-01 22:30:42 +00:00
Aiden McClelland
27c5464cb6 use low mem for all argon2 configs (#2491)
* reduce argon2 memory usage

* update FE argon2

* fix missing typedefs

* use low mem for all argon2 configs
2023-11-01 22:28:59 +00:00
Matt Hill
1dad7965d2 rework ca-wiz and add icons to menu for warnings (#2486)
* rework ca-wiz and add icons to menu for warnings

* remove root CA button from home page

* load fonts before calling complete in setup wiz
2023-11-01 19:36:56 +00:00
Aiden McClelland
c14ca1d7fd use existing dependency icon if available (#2489) 2023-11-01 19:23:14 +00:00
Mariusz Kogen
2b9e7432b8 Updated motd with new logo (#2488) 2023-11-01 19:22:49 +00:00
Aiden McClelland
547747ff74 continuous deployment (#2485)
* continuous deployment

* fix

* escape braces in format string

* Update upload-ota.sh

* curl fail on http error
2023-11-01 19:22:34 +00:00
J H
e5b137b331 fix: Logging in deno to filter out the info (#2487)
Co-authored-by: jh <jh@Desktop.hsd1.co.comcast.net>
2023-10-31 15:38:00 -06:00
Aiden McClelland
9e554bdecd cleanup network keys on uninstall (#2484) 2023-10-30 16:43:00 +00:00
Aiden McClelland
765b542264 actually enable zram during migration (#2483)
actually enable zram during mifration
2023-10-27 23:34:02 +00:00
Aiden McClelland
182a095420 use old secret key derivation function (#2482)
* use old secret key derivation function

* compat

* cargo
2023-10-27 23:32:21 +00:00
Aiden McClelland
0865cffddf add 1 day margin on start time (#2481) 2023-10-27 18:56:06 +00:00
Aiden McClelland
5a312b9900 use correct sigterm_timeout (#2480) 2023-10-27 18:55:55 +00:00
Matt Hill
af2b2f33c2 Fix/ntp (#2479)
* rework ntp faiure handling and display to user

* uptime in seconds

* change how we handle ntp

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-10-26 23:33:57 +00:00
Aaron Dewes
9aa08dfb9b Delete Cargo.lock (#2477) 2023-10-26 22:16:52 +00:00
Matt Hill
b28c673133 closes #2454 (#2478) 2023-10-26 16:02:53 -06:00
Matt Hill
9a545f176d diplay restoring when restoring (#2476) 2023-10-25 12:08:39 -06:00
Lucy
65728eb6ab allow tab completion on final setup stage in kiosk mode (#2473) 2023-10-25 00:24:11 -06:00
Aiden McClelland
531e037974 Bugfix/argon2 mem usage (#2474)
* reduce argon2 memory usage

* update FE argon2

* fix missing typedefs
2023-10-25 00:17:29 -06:00
Aiden McClelland
a96467cb3e fix raspi build (#2472)
* fix raspi build

* Update build.sh
2023-10-24 21:27:33 +00:00
Aiden McClelland
6e92a7d93d Bugfix/output timeout (#2471)
* Fix: Test with the buf reader never finishing

* fix NoOutput deserialization

---------

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
2023-10-23 21:46:46 -06:00
Aiden McClelland
740e63da2b enable zram by default (#2470) 2023-10-23 21:01:52 +00:00
J H
a69cae22dd chore: Cleaning up the stream to a regular stream (#2468) 2023-10-23 20:41:54 +00:00
Aiden McClelland
8ea3c3c29e consolidate and streamline build (#2469)
* consolidate and streamline build

* fix workflow syntax

* fix workflow syntax

* fix workflow syntax

* fix workflow syntax

* fix build scripts

* only build platform-specific system images

* fix build script

* more build fixes

* fix

* fix compat build for x86

* wat

* checkout

* Prevent rebuild of compiled artifacts

* Update startos-iso.yaml

* Update startos-iso.yaml

* fix raspi build

* handle missing platform better

* reduce arm vcpus

* remove arch and platform from fe config, add to patch db

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-10-23 20:40:00 +00:00
Aiden McClelland
63ab739b3d update Cargo.lock 2023-10-19 09:08:15 -06:00
Aiden McClelland
58bb788034 chore: update dependencies (#2465)
* chore: update dependencies

* fix crypto

* update deno

* update release notes
2023-10-18 22:53:54 +00:00
J H
9e633b37e7 Fix/quarantine deno (#2466)
* fix: Move the deno embedded into a seperate binary.

This should be the quick hacky way of making sure that the memory leaks wont happen

* fix:
2023-10-18 22:02:45 +00:00
Lucy
bb6a4842bd Update/misc fe (#2463)
* update to use transparent icon; add icon to login

* update setup mocks to imitate reality

* update webmanifest version

* fix version in webmanifest

* reset icons with background; update login page style

* adjust login header

* cleanup + adjust icon size

* revert icon

* cleanup and reposition error message
2023-10-18 12:24:48 -06:00
Aiden McClelland
246727995d add tokio-console if unstable (#2461)
* add tokio-console if `unstable`

* instrument vhost controller
2023-10-17 19:25:44 +00:00
Aiden McClelland
202695096a Feature/simple syncdb (#2464)
* simplify db sync on rpc endpoints

* switch to patch-db master

* update fe for websocket only stream

* fix api

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-10-17 15:49:58 +00:00
J H
afbab293a8 Chore: remove an arc mutex that wasn't neccessary (#2462) 2023-10-16 22:26:59 +00:00
Aiden McClelland
78faf888af fix some causes of start wonkiness on update (#2458)
* fix some causes of start wonkiness on update

* fix race condition with manager

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* only restart if running

* fix start function

* clean up clode

* fix restart logic

---------

Co-authored-by: J H <Blu-J@users.noreply.github.com>
2023-10-16 18:34:12 +00:00
Matt Hill
5164c21923 stop while starting or restarting (#2460) 2023-10-16 18:23:12 +00:00
Aiden McClelland
edcd1a3c5b only use first sensor of each group for temp reporting (#2457) 2023-10-16 18:19:24 +00:00
Lucy
532ab9128f add apollo review badge and update badges with icons (#2456)
* add apollo review badge and update badges with icons

* fix mastodon
2023-10-16 12:16:44 -06:00
Aiden McClelland
a3072aacc2 add firmware updater (#2455) 2023-10-16 17:42:42 +00:00
Lucy
27296d8880 update docs links (#2452)
* update docs links

* update backups links

* add anchor tag back to trust root ca link

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-10-13 09:02:59 -06:00
J H
8549b9bc37 fix: Add in logging for the podman in the system logs (#2451)
* fix: Add in logging for the podman in the system logs

* https as default for main tor address

---------

Co-authored-by: agent <kn0wmad@protonmail.com>
2023-10-10 14:13:56 -06:00
Aiden McClelland
7632373097 fix cors middleware (#2450)
* fix cors response

* fix cors properly
2023-10-09 17:34:27 -06:00
Matt Hill
23b0674ac0 fix cert name and show ca wiz on http ip (#2448) 2023-10-06 16:40:11 -06:00
Matt Hill
01f0484a0e fix health check error (#2447) 2023-10-06 15:25:58 -06:00
Mariusz Kogen
3ca9035fdb Use the correct OS name (#2445)
Use correct OS name
2023-10-06 09:29:54 -06:00
Matt Hill
caaf9d26db Fix/patch fe (#2444)
* clear caches on logout

* fix uninstall pkg missing error
2023-10-05 19:04:10 -06:00
Aiden McClelland
eb521b2332 enable trimming in luks (#2443) 2023-10-05 23:40:44 +00:00
Aiden McClelland
68c29ab99e allow UNSET country code for wifi (#2442) 2023-10-05 22:14:51 +00:00
Matt Hill
f12b7f4319 fix cert endpoint 2023-10-05 14:42:41 -06:00
Aiden McClelland
7db331320a Update LICENSE (#2441)
* Update LICENSE

* update README.md

* update release notes
2023-10-05 19:37:31 +00:00
Aiden McClelland
97ad8a85c3 update cargo lock 2023-10-05 08:59:24 -06:00
Aiden McClelland
6f588196cb set governor to "performance" if available (#2438)
* set governor to "performance" if available

* add linux-cpupower

* fix: Boolean blindness, thanks @dr-bones

---------

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
2023-10-04 20:52:56 +00:00
Aiden McClelland
20241c27ee prevent stack overflow on shutdown (#2440)
* prevent stack overflow on shutdown

* fix

---------

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
2023-10-04 19:51:58 +00:00
Matt Hill
05d6aea37f remove hard coded timeout 2023-10-04 13:06:49 -06:00
Matt Hill
7e0e7860cd cancel old request and base interval on tor (#2439) 2023-10-04 13:00:49 -06:00
J H
a0afd7b8ed fixing: Reimplement https://github.com/Start9Labs/start-os/pull/2391 (#2437)
* fixing: Reimplement https://github.com/Start9Labs/start-os/pull/2391

* remove the none thing
2023-10-04 18:06:43 +00:00
Matt Hill
500369ab2b Update/logos (#2435)
* update logos to startos icon

* readme too

* fix spelling
2023-10-03 10:53:29 -06:00
Aiden McClelland
dc26d5c0c8 Bugfix/var tmp (#2434)
* mount /var/tmp to data drive

* clear var tmp on restart
2023-10-02 21:50:05 +00:00
Aiden McClelland
0def02f604 mount /var/tmp to data drive (#2433) 2023-10-02 21:18:39 +00:00
J H
0ffa9167da feat: Add in the ssl_size (#2432)
* feat: Add in the ssl_size

* chore: Changes for the naming and other things.
2023-10-02 21:15:24 +00:00
Aiden McClelland
a110e8f241 make migration more resilient 2023-10-02 10:02:40 -06:00
gStart9
491f363392 Shore up Firefox kiosk mode (#2422)
* Shore up Firefox kiosk mode

* Bash shell for kiosk user

* enable-kiosk script final-ish touches

* make script idempotent

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-09-30 00:32:15 +00:00
Matt Hill
33a67bf7b4 bump FE version and release notes (#2429)
* bump FE version and release notes

* change cargo.toml version

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-09-29 22:32:10 +00:00
Matt Hill
1e6f583431 only emit when things change (#2428)
* only emit when things change

* remove log

* remove test call

* more efficient, thanks BluJ

* point free
2023-09-29 14:36:40 -06:00
J H
5e3412d735 feat: Change all the dependency errors at once (#2427)
* feat: Change all the dependency errors at once

* remove deprecated dependency-errors field

* set pointers to [] by default

* chore: Something about fixing the build

* fix migration

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-09-29 12:08:53 -06:00
Matt Hill
e6e4cd63f3 fix welcome page 2023-09-28 14:54:20 -06:00
Aiden McClelland
f5da5f4ef0 registry admin script (#2426)
* registryadmin scripts

Add `start-sdk publish` command which can potentially replace
the Haskell implementation of `registry-publish upload`

* restructure modules

---------

Co-authored-by: Sam Sartor <me@samsartor.com>
2023-09-28 17:19:31 +00:00
J H
9a202cc124 Refactor/patch db (#2415)
* the only way to begin is by beginning

* chore: Convert over 3444 migration

* fix imports

* wip

* feat: convert volume

* convert: system.rs

* wip(convert): Setup

* wip properties

* wip notifications

* wip

* wip migration

* wip init

* wip auth/control

* wip action

* wip control

* wiip 034

* wip 344

* wip some more versions converted

* feat: Reserialize the version of the db

* wip rest of the versions

* wip s9pk/manifest

* wip wifi

* chore: net/keys

* chore: net/dns

* wip net/dhcp

* wip manager manager-map

* gut dependency errors

* wip update/mod

* detect breakages locally for updates

* wip: manager/mod

* wip: manager/health

* wip: backup/target/mod

* fix: Typo addresses

* clean control.rs

* fix system package id

* switch to btreemap for now

* config wip

* wip manager/mod

* install wip

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* chore: Update the last of the errors

* feat: Change the prelude de to borrow

* feat: Adding in some more things

* chore: add to the prelude

* chore: Small fixes

* chore: Fixing the small errors

* wip: Cleaning up check errors

* wip: Fix some of the issues

* chore: Fix setup

* chore:fix version

* chore: prelude, mod, http_reader

* wip backup_bulk

* chore: Last of the errors

* upadte package.json

* chore: changes needed for a build

* chore: Removing some of the linting errors in the manager

* chore: Some linting 101

* fix: Wrong order of who owns what

* chore: Remove the unstable

* chore: Remove the test in the todo

* @dr-bonez did a refactoring on the backup

* chore: Make sure that there can only be one override guard at a time

* resolve most todos

* wip: Add some more tracing to debug an error

* wip: Use a mv instead of rename

* wip: Revert some of the missing code segments found earlier

* chore: Make the build

* chore: Something about the lib looks like it iis broken

* wip: More instrument and dev working

* kill netdummy before creating it

* better db analysis tools

* fixes from testing

* fix: Make add start the service

* fix status after install

* make wormhole

* fix missing icon file

* fix data url for icons

* fix: Bad deser

* bugfixes

* fix: Backup

* fix: Some of the restor

* fix: Restoring works

* update frontend patch-db types

* hack it in (#2424)

* hack it in

* optimize

* slightly cleaner

* handle config pointers

* dependency config errs

* fix compat

* cache docker

* fix dependency expectation

* fix dependency auto-config

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: J H <Blu-J@users.noreply.github.com>
Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
2023-09-27 21:46:48 +00:00
Aiden McClelland
c305deab52 do not require auth for cert (#2425)
* do not require auth for cert

* use unauthenticated cert path

---------

Co-authored-by: Lucy Cifferello <12953208+elvece@users.noreply.github.com>
2023-09-27 20:12:07 +00:00
Lucy
0daaf3b1ec enable switching to https on login page (#2406)
* enable switching to https on login page

* add trust Root CA to http login page

* add node-jose back for setup wiz

* add tooltips, branding, logic for launch box spinner display, and enable config to toggle https mode on mocks

* cleanup

* copy changes

* style fixes

* abstract component, fix https mocks

* always show login from localhost

* launch .local when on IP

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-09-26 12:47:47 -06:00
J H
8e21504bdb fix: initialize images before netdummy (#2418) 2023-09-14 20:06:44 +00:00
Mariusz Kogen
fcf1be52ac Add tor-check to CLI (#2412)
* Link tor-check

* Add tor-check.sh

* Improve code readability

* Link tor-check from postinst
2023-09-13 12:25:00 -06:00
Mariusz Kogen
394bc9ceb8 SDK install script fix (#2411) 2023-09-07 23:40:04 -06:00
Aiden McClelland
e3786592b2 Feature/remove bollard (#2396)
* wip

* remove bollard, add podman feature

* fix error message parsing

* fix subcommand

* fix typo

* use com.docker.network.bridge.name for podman

* fix parse error

* handle podman network interface nuance

* add libyajl2

* use podman repos

* manually add criu

* do not manually require criu

* remove docker images during cleanup stage

* force removal

* increase img size

* Update startos-iso.yaml

* don't remove docker
2023-08-24 19:20:48 -06:00
Aiden McClelland
d6eaf8d3d9 disable docker memory accounting (#2399) 2023-08-23 16:47:43 +00:00
J H
b1c23336e3 Refactor/service manager (#2401)
* wip: Pulling in the features of the refactor since march

* chore: Fixes to make the system able to build

* chore: Adding in the documentation for the manager stuff

* feat: Restarting and wait for stop

* feat: Add a soft shutdown not commit to db.

* chore: Remove the comments of bluj

* chore: Clean up some of the linting errors

* chore: Clean up the signal

* chore: Some more cleanup

* fix: The configure

* fix: A missing config

* fix: typo

* chore: Remove a comment of BLUJ that needed to be removed
2023-08-23 00:08:55 -06:00
Jadi
44c5073dea backend: sdk init: output file location. fixes #1854 (#2393)
* `start-sdk init` used to run completely silent. Now we are
  showing the current/generated developer.key.pem based on
  ticket https://github.com/Start9Labs/start-os/issues/1854
2023-08-17 22:24:03 +00:00
J H
b7593fac44 Fixes: Builds for the macs (#2397)
* Fixes: Builds for the macs

* misc: Allow the feature flags run during the build for the avahi tools
2023-08-17 22:23:33 +00:00
J H
af116794c4 fix: Add in the code to create the life check for the ui to keep the … (#2391)
* fix: Add in the code to create the life check for the ui to keep the ws alive

* Update Cargo.toml

* Update rpc.rs
2023-08-16 18:43:17 +00:00
Jadi
88c85e1d8a frontend: ui: bugfix: consistent password length. (#2394)
fronend: ui: bugfix: consistent password length.

* The password set dialogue forces maxlenght=64 but when logging in,
  the dialogue does not forces this. This makes an issue when the user
  copy/pastes a longer than 64 character password in boxes. closes #2375
2023-08-16 07:59:05 -06:00
Aiden McClelland
9322b3d07e be resilient to bad lshw output (#2390) 2023-08-08 17:36:14 -06:00
Lucy
55f5329817 update readme layout and assets (#2382)
* update readme layout and assets

* Update README.md

---------

Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
2023-08-02 21:45:14 -04:00
Matt Hill
79d92c30f8 Update README.md (#2381) 2023-08-02 12:37:07 -06:00
Aiden McClelland
73229501c2 Feature/hw filtering (#2368)
* update deno

* add proxy

* remove query params, now auto added by BE

* add hardware requirements and BE reg query params

* update query params for BE requests

* allow multiple arches in hw reqs

* explain git hash mismatch

* require lshw

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-08-02 09:52:38 -06:00
Reckless_Satoshi
32ca91a7c9 add qr code to insights->about->tor (#2379)
* add qr code to insights->about->tor

* fix address PR feedback from @elvece; inject modelCtrl in ctor
2023-08-01 17:06:47 -04:00
Aiden McClelland
9e03ac084e add cli & rpc to edit db with jq syntax (#2372)
* add cli & rpc to edit db with jq syntax

* build fixes

* fix build

* fix build

* update cargo.lock
2023-07-25 16:22:58 -06:00
Aiden McClelland
082c51109d fix missing parent dir (#2373) 2023-07-25 10:07:10 -06:00
Aiden McClelland
8f44c75dc3 switch back to github caching (#2371)
* switch back to github caching

* remove npm and cargo cache

* misc fixes
2023-07-25 10:06:57 -06:00
Aiden McClelland
234f0d75e8 mute unexpected eof & protect against fd leaks (#2369) 2023-07-20 17:40:30 +00:00
Lucy
564186a1f9 Fix/mistake reorganize (#2366)
* revert patch for string parsing fix due to out of date yq version

* reorganize conditionals

* use ng-container

* alertButton needs to be outside of template or container
2023-07-19 10:54:18 -06:00
Lucy
ccdb477dbb Fix/pwa refresh (#2359)
* fix ROFS error on os install

* attempt to prompt browser to update manifest data with id and modified start_url

* update icon with better shape for ios

* add additional options for refreshing on pwas

* add loader to pwa reload

* fix pwa icon and add icon for ios

* add logic for refresh display depending on if pwa

* fix build for ui; fix numeric parsing error on osx

* typo

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-07-19 09:11:23 -06:00
Aiden McClelland
5f92f9e965 fix ROFS error on os install (#2364) 2023-07-19 08:50:02 -06:00
Aiden McClelland
c2db4390bb single platform builds (#2365) 2023-07-18 19:50:27 -06:00
Matt Hill
11c21b5259 Fix bugs (#2360)
* fix reset tor, delete http redirect, show message for tor http, update release notes

* potentially fix doubel req to registries

* change language arund LAN and root ca

* link locally instead of docs
2023-07-18 12:38:52 -06:00
Aiden McClelland
3cd9e17e3f migrate tor address to https (#2358) 2023-07-18 12:08:34 -06:00
Aiden McClelland
1982ce796f update deno (#2361) 2023-07-18 11:59:00 -06:00
Aiden McClelland
825e18a551 version bump (#2357)
* version bump

* update welcome page

---------

Co-authored-by: Lucy Cifferello <12953208+elvece@users.noreply.github.com>
2023-07-14 14:58:19 -06:00
Aiden McClelland
9ff0128fb1 support http2 alpn handshake (#2354)
* support http2 alpn handshake

* fix protocol name

* switch to https for tor

* update setup wizard and main ui to accommodate https (#2356)

* update setup wizard and main ui to accommodate https

* update wording in download doc

* fix accidential conversion of tor https for services and allow ws still

* redirect to https if available

* fix replaces to only search at beginning and ignore localhost when checking for https

---------

Co-authored-by: Lucy <12953208+elvece@users.noreply.github.com>
2023-07-14 14:58:02 -06:00
Matt Hill
36c3617204 permit IP for cifs backups (#2342)
* permit IP for cifs backups

* allow ip instead of hostname (#2347)

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-07-14 18:52:33 +00:00
Aiden McClelland
90a9db3a91 disable encryption for new raspi setups (#2348)
* disable encryption for new raspi setups

* use config instead of OS_ARCH

* fixes from testing
2023-07-14 18:30:52 +00:00
Aiden McClelland
59d6795d9e fix all references embassyd -> startd (#2355) 2023-07-14 18:29:20 +00:00
Aiden McClelland
2c07cf50fa better transfer progress (#2350)
* better transfer progress

* frontend for calculating transfer size

* fixes from testing

* improve internal api

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-07-13 19:40:53 -06:00
Aiden McClelland
cc0e525dc5 fix incoherent when removing (#2332)
* fix incoherent when removing

* include all packages for current dependents
2023-07-13 20:36:48 +00:00
Aiden McClelland
73bd973109 delete disk guid on reflash (#2334)
* delete disk guid on reflash

* delete unnecessary files before copy
2023-07-13 20:36:35 +00:00
Aiden McClelland
a7e501d874 pack compressed assets into single binary (#2344)
* pack compressed assets into single binary

* update naming

* tweaks

* fix build

* fix cargo lock

* rename CLI

* remove explicit ref name
2023-07-12 22:51:05 +00:00
Matt Hill
4676f0595c add reset password to UI (#2341) 2023-07-11 17:23:40 -06:00
kn0wmad
1d3d70e8d6 Update README.md (#2337)
* Update README.md

* Update README.md
2023-07-07 10:23:31 -06:00
Mariusz Kogen
bada88157e Auto-define the OS_ARCH variable. (#2329) 2023-06-30 20:34:10 +00:00
J H
13f3137701 fix: Make check-version posix compliant (#2331)
We found that we couldn't compile this on the mac arm os
2023-06-29 22:28:13 +00:00
Aiden McClelland
d3316ff6ff make it faster (#2328)
* make it faster

* better pipelining

* remove unnecessary test

* use tmpfs for debspawn

* don't download intermediate artifacts

* fix upload dir path

* switch to buildjet

* use buildjet cache on buildjet runner

* native builds when fast

* remove quotes

* always use buildjet cache

* remove newlines

* delete data after done with it

* skip aarch64 for fast dev builds

* don't tmpfs for arm

* don't try to remove debspawn tmpdir
2023-06-28 13:37:26 -06:00
kn0wmad
1b384e61b4 maint/minor UI typo fixes (#2330)
* Minor copy fixes

* Contact link fixes
2023-06-28 13:03:33 -06:00
Matt Hill
addea20cab Update README 2023-06-27 10:10:01 -06:00
Matt Hill
fac23f2f57 update README 2023-06-27 10:06:42 -06:00
Aiden McClelland
bffe1ccb3d use a more resourced runner for production builds (#2322) 2023-06-26 16:27:11 +00:00
Matt Hill
e577434fe6 Update bug-report.yml 2023-06-25 13:38:26 -06:00
Matt Hill
5d1d9827e4 Update bug-report.yml 2023-06-25 13:35:45 -06:00
Aiden McClelland
dd28ad20ef use port instead of pidof to detect tor going down (#2320)
* use port instead of pidof to detect tor going down

* fix errors

* healthcheck timeout
2023-06-23 13:06:00 -06:00
Aiden McClelland
ef416ef60b prevent tor from spinning if a service is in a crash loop (#2316) 2023-06-22 18:09:59 +00:00
Aiden McClelland
95b3b55971 fix rootflags for btrfs update (#2315) 2023-06-21 15:26:27 +00:00
Aiden McClelland
b3f32ae03e don't use cp when over cifs 2023-06-21 00:36:36 +00:00
Aiden McClelland
c7472174e5 fix btrfs rootflags 2023-06-21 00:36:36 +00:00
gStart9
2ad749354d Add qemu-guest-agent for advanced VM shutdown options (#2309) 2023-06-21 00:36:36 +00:00
Aiden McClelland
4ed9d2ea22 add grub-common to build 2023-06-21 00:36:36 +00:00
Lucy Cifferello
280eb47de7 update marketplace project to include mime type pipe for icons 2023-06-21 00:36:36 +00:00
Aiden McClelland
324a12b0ff reset config after pg_upgrade 2023-06-21 00:36:36 +00:00
Aiden McClelland
a2543ccddc trim fs name 2023-06-21 00:36:36 +00:00
Aiden McClelland
22666412c3 use fsck instead of e2fsck 2023-06-21 00:36:36 +00:00
Aiden McClelland
dd58044cdf fix build 2023-06-21 00:36:36 +00:00
Aiden McClelland
10312d89d7 fix ipv6 2023-06-21 00:36:36 +00:00
Aiden McClelland
b4c0d877cb fix postgres migration 2023-06-21 00:36:36 +00:00
Aiden McClelland
e95d56a5d0 fix update-grub2 2023-06-21 00:36:36 +00:00
Aiden McClelland
90424e8329 install fixes 2023-06-21 00:36:36 +00:00
Aiden McClelland
1bfeb42a06 force btrfs creation 2023-06-21 00:36:36 +00:00
Aiden McClelland
a936f92954 use postgres user 2023-06-21 00:36:36 +00:00
Aiden McClelland
0bc514ec17 include old pg 2023-06-21 00:36:36 +00:00
Aiden McClelland
a2cf4001af improve invoke error reporting 2023-06-21 00:36:36 +00:00
Aiden McClelland
cb4e12a68c fix build 2023-06-21 00:36:36 +00:00
Aiden McClelland
a7f5124dfe postgresql migration 2023-06-21 00:36:36 +00:00
Aiden McClelland
ccbf71c5e7 fix ipv6 2023-06-21 00:36:36 +00:00
Aiden McClelland
04bf5f58d9 fix tor listener bug 2023-06-21 00:36:36 +00:00
Aiden McClelland
ab3f5956d4 ipv6 2023-06-21 00:36:36 +00:00
Aiden McClelland
c1fe8e583f backup target mount/umount 2023-06-21 00:36:36 +00:00
Lucy Cifferello
fd166c4433 do not load array buffer into memory 2023-06-21 00:36:36 +00:00
Aiden McClelland
f29c7ba4f2 don't wait for install to complete on sideload 2023-06-21 00:36:36 +00:00
Aiden McClelland
88869e9710 gpu acceleration 2023-06-21 00:36:36 +00:00
Aiden McClelland
f8404ab043 btrfs 2023-06-21 00:36:36 +00:00
Aiden McClelland
9fa5d1ff9e suite independent 2023-06-21 00:36:36 +00:00
Aiden McClelland
483f353fd0 backup luks headers 2023-06-21 00:36:36 +00:00
Aiden McClelland
a11bf5b5c7 bookworm 2023-06-21 00:36:36 +00:00
Aiden McClelland
d4113ff753 re-add server version and version range 2023-06-21 00:36:36 +00:00
Aiden McClelland
1969f036fa deser full server info 2023-06-21 00:36:36 +00:00
Matt Hill
8c90e01016 hide range ip addresses, update release notes 2023-06-15 13:20:37 -06:00
Matt Hill
756c5c9b99 small spelling mistake 2023-06-11 15:04:59 -06:00
Lucy Cifferello
ee54b355af fix compliation error on widgets page 2023-06-11 15:04:59 -06:00
Lucy Cifferello
26cbbc0c56 adjust start9 registry icon 2023-06-11 15:04:59 -06:00
Aiden McClelland
f4f719d52a misc fixes 2023-06-11 15:04:59 -06:00
Aiden McClelland
f2071d8b7e update zram bool 2023-06-11 15:04:59 -06:00
Aiden McClelland
df88a55784 v0.3.4.3 2023-06-11 15:04:59 -06:00
Matt Hill
3ccbc626ff experimental features for zram and reset tor (#2299)
* experimental features for zram and reset tor

* zram backend

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-06-11 15:04:59 -06:00
Aiden McClelland
71a15cf222 add diskUsage effect (#2297) 2023-06-11 15:04:59 -06:00
Aiden McClelland
26ddf769b1 remove overload restart rule 2023-06-11 15:04:59 -06:00
Aiden McClelland
3137387c0c only set static hostname 2023-06-11 15:04:59 -06:00
Aiden McClelland
fc142cfde8 reset tor (#2296)
* reset tor

* Update tor.rs

* timeout connect

* handle stuck bootstrapping
2023-06-11 15:04:59 -06:00
Aiden McClelland
b0503fa507 Bugfix/incoherent (#2293)
* debug incoherent error

* fix incoherent error

* use new debspawn
2023-06-11 15:04:59 -06:00
Matt Hill
b86a97c9c0 add resetTor to rpc client 2023-06-11 15:04:59 -06:00
Lucy Cifferello
eb6cd23772 update registry icon 2023-06-11 15:04:59 -06:00
Matt Hill
efae1e7e6c add Tor logs to UI 2023-06-11 15:04:59 -06:00
Lucy Cifferello
19d55b840e add registry icon to preloader 2023-06-11 15:04:59 -06:00
Lucy Cifferello
cc0c1d05ab update frontend to 0.3.4.3 2023-06-11 15:04:59 -06:00
Lucy Cifferello
f088f65d5a update branding 2023-06-11 15:04:59 -06:00
Lucy Cifferello
5441b5a06b add missing items to preloader 2023-06-11 15:04:59 -06:00
411 changed files with 23318 additions and 35317 deletions

View File

@@ -12,25 +12,23 @@ body:
options:
- label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already report this problem.
required: true
- type: input
attributes:
label: Server Hardware
description: On what hardware are you running StartOS? Please be as detailed as possible!
placeholder: Pi (8GB) w/ 32GB microSD & Samsung T7 SSD
validations:
required: true
- type: input
attributes:
label: StartOS Version
description: What version of StartOS are you running?
placeholder: e.g. 0.3.4.2
placeholder: e.g. 0.3.4.3
validations:
required: true
- type: dropdown
attributes:
label: Device
description: What device are you using to connect to your server?
options:
- Phone/tablet
- Laptop/Desktop
validations:
required: true
- type: dropdown
attributes:
label: Device OS
label: Client OS
description: What operating system is your device running?
options:
- MacOS
@@ -45,7 +43,7 @@ body:
required: true
- type: input
attributes:
label: Device OS Version
label: Client OS Version
description: What version is your device OS?
validations:
required: true

View File

@@ -1,29 +0,0 @@
# This folder contains GitHub Actions workflows for building the project
## backend
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
This workflow uses the actions and docker/setup-buildx-action@v1 to prepare the environment for aarch64 cross complilation using docker buildx.
When execution of aarch64 containers is required the action docker/setup-qemu-action@v1 is added.
A matrix-strategy has been used to build for both x86_64 and aarch64 platforms in parallel.
### Running unittests
Unittests are run using [cargo-nextest]( https://nexte.st/). First the sources are (cross-)compiled and archived. The archive is then run on the correct platform.
## frontend
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
This workflow builds the frontends.
## product
Runs: when a pull request targets the master or next branch and when a change to the master or next branch is made
This workflow builds everything, re-using the backend and frontend workflows.
The download and extraction order of artifacts is relevant to `make`, as it checks the file timestamps to decide which targets need to be executed.
Result: eos.img
## a note on uploading artifacts
Artifacts are used to share data between jobs. File permissions are not maintained during artifact upload. Where file permissions are relevant, the workaround using tar has been used. See (here)[https://github.com/actions/upload-artifact#maintaining-file-permissions-and-case-sensitive-files].

View File

@@ -1,233 +0,0 @@
name: Backend
on:
workflow_call:
workflow_dispatch:
env:
RUST_VERSION: "1.67.1"
ENVIRONMENT: "dev"
jobs:
build_libs:
name: Build libs
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
snapshot_command: ./build-v8-snapshot.sh
artifact_name: js_snapshot
artifact_path: libs/js_engine/src/artifacts/JS_SNAPSHOT.bin
- target: aarch64
snapshot_command: ./build-arm-v8-snapshot.sh
artifact_name: arm_js_snapshot
artifact_path: libs/js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: ${{ matrix.target == 'aarch64' }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
if: ${{ matrix.target == 'aarch64' }}
- name: "Install Rust"
run: |
rustup toolchain install ${{ env.RUST_VERSION }} --profile minimal --no-self-update
rustup default ${{ inputs.rust }}
shell: bash
if: ${{ matrix.target == 'x86_64' }}
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
libs/target/
key: ${{ runner.os }}-cargo-libs-${{ matrix.target }}-${{ hashFiles('libs/Cargo.lock') }}
- name: Build v8 snapshot
run: ${{ matrix.snapshot_command }}
working-directory: libs
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.artifact_name }}
path: ${{ matrix.artifact_path }}
build_backend:
name: Build backend
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
snapshot_download: js_snapshot
- target: aarch64
snapshot_download: arm_js_snapshot
runs-on: ubuntu-latest
timeout-minutes: 120
needs: build_libs
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Download ${{ matrix.snapshot_download }} artifact
uses: actions/download-artifact@v3
with:
name: ${{ matrix.snapshot_download }}
path: libs/js_engine/src/artifacts/
- name: "Install Rust"
run: |
rustup toolchain install ${{ env.RUST_VERSION }} --profile minimal --no-self-update
rustup default ${{ inputs.rust }}
shell: bash
if: ${{ matrix.target == 'x86_64' }}
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
backend/target/
key: ${{ runner.os }}-cargo-backend-${{ matrix.target }}-${{ hashFiles('backend/Cargo.lock') }}
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install libavahi-client-dev
if: ${{ matrix.target == 'x86_64' }}
- name: Check Git Hash
run: ./check-git-hash.sh
- name: Check Environment
run: ./check-environment.sh
- name: Build backend
run: make ARCH=${{ matrix.target }} backend
- name: 'Tar files to preserve file permissions'
run: make ARCH=${{ matrix.target }} backend-${{ matrix.target }}.tar
- uses: actions/upload-artifact@v3
with:
name: backend-${{ matrix.target }}
path: backend-${{ matrix.target }}.tar
- name: Install nextest
uses: taiki-e/install-action@nextest
- name: Build and archive tests
run: cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu
working-directory: backend
if: ${{ matrix.target == 'x86_64' }}
- name: Build and archive tests
run: |
docker run --rm \
-v "$HOME/.cargo/registry":/root/.cargo/registry \
-v "$(pwd)":/home/rust/src \
-P start9/rust-arm-cross:aarch64 \
sh -c 'cd /home/rust/src/backend &&
rustup install ${{ env.RUST_VERSION }} &&
rustup override set ${{ env.RUST_VERSION }} &&
rustup target add aarch64-unknown-linux-gnu &&
curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin &&
cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu'
if: ${{ matrix.target == 'aarch64' }}
- name: Reset permissions
run: sudo chown -R $USER target
working-directory: backend
if: ${{ matrix.target == 'aarch64' }}
- name: Upload archive to workflow
uses: actions/upload-artifact@v3
with:
name: nextest-archive-${{ matrix.target }}
path: backend/nextest-archive-${{ matrix.target }}.tar.zst
run_tests_backend:
name: Test backend
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
- target: aarch64
runs-on: ubuntu-latest
timeout-minutes: 60
needs: build_backend
env:
CARGO_TERM_COLOR: always
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: ${{ matrix.target == 'aarch64' }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
if: ${{ matrix.target == 'aarch64' }}
- run: mkdir -p ~/.cargo/bin
if: ${{ matrix.target == 'x86_64' }}
- name: Install nextest
uses: taiki-e/install-action@v2
with:
tool: nextest@0.9.47
if: ${{ matrix.target == 'x86_64' }}
- name: Download archive
uses: actions/download-artifact@v3
with:
name: nextest-archive-${{ matrix.target }}
- name: Download nextest (aarch64)
run: wget -O nextest-aarch64.tar.gz https://get.nexte.st/0.9.47/linux-arm
if: ${{ matrix.target == 'aarch64' }}
- name: Run tests
run: |
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --no-fail-fast --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
--filter-expr 'not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))'
if: ${{ matrix.target == 'x86_64' }}
- name: Run tests
run: |
docker run --rm --platform linux/arm64/v8 \
-v "/home/runner/.cargo/registry":/usr/local/cargo/registry \
-v "$(pwd)":/home/rust/src \
-e CARGO_TERM_COLOR=${{ env.CARGO_TERM_COLOR }} \
-P ubuntu:20.04 \
sh -c '
apt-get update &&
apt-get install -y ca-certificates &&
apt-get install -y rsync &&
cd /home/rust/src &&
mkdir -p ~/.cargo/bin &&
tar -zxvf nextest-aarch64.tar.gz -C ${CARGO_HOME:-~/.cargo}/bin &&
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
--filter-expr "not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))"'
if: ${{ matrix.target == 'aarch64' }}

View File

@@ -1,46 +0,0 @@
name: Frontend
on:
workflow_call:
workflow_dispatch:
env:
NODEJS_VERSION: '18.15.0'
ENVIRONMENT: "dev"
jobs:
frontend:
name: Build frontend
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- uses: actions/setup-node@v3
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Get npm cache directory
id: npm-cache-dir
run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
- uses: actions/cache@v3
id: npm-cache
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Build frontends
run: make frontends
- name: 'Tar files to preserve file permissions'
run: tar -cvf frontend.tar ENVIRONMENT.txt GIT_HASH.txt VERSION.txt frontend/dist frontend/config.json
- uses: actions/upload-artifact@v3
with:
name: frontend
path: frontend.tar

View File

@@ -1,37 +0,0 @@
name: Reusable Workflow
on:
workflow_call:
inputs:
build_command:
required: true
type: string
artifact_name:
required: true
type: string
artifact_path:
required: true
type: string
env:
ENVIRONMENT: "dev"
jobs:
generic_build_job:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build image
run: ${{ inputs.build_command }}
- uses: actions/upload-artifact@v3
with:
name: ${{ inputs.artifact_name }}
path: ${{ inputs.artifact_path }}

View File

@@ -8,10 +8,36 @@ on:
type: choice
description: Environment
options:
- "<NONE>"
- NONE
- dev
- unstable
- dev-unstable
- docker
- dev-docker
- dev-unstable-docker
runner:
type: choice
description: Runner
options:
- standard
- fast
platform:
type: choice
description: Platform
options:
- ALL
- x86_64
- x86_64-nonfree
- aarch64
- aarch64-nonfree
- raspberrypi
deploy:
type: choice
description: Deploy
options:
- NONE
- alpha
- beta
push:
branches:
- master
@@ -23,118 +49,138 @@ on:
env:
NODEJS_VERSION: "18.15.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''<NONE>''] }}'
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
dpkg:
name: Build dpkg
compile:
name: Compile Base Binaries
strategy:
fail-fast: false
fail-fast: true
matrix:
platform:
[x86_64, x86_64-nonfree, aarch64, aarch64-nonfree, raspberrypi]
runs-on: ubuntu-22.04
arch: >-
${{
fromJson('{
"x86_64": ["x86_64"],
"x86_64-nonfree": ["x86_64"],
"aarch64": ["aarch64"],
"aarch64-nonfree": ["aarch64"],
"raspberrypi": ["aarch64"],
"ALL": ["x86_64", "aarch64"]
}')[github.event.inputs.platform || 'ALL']
}}
runs-on: ${{ fromJson('["ubuntu-22.04", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
steps:
- uses: actions/checkout@v3
with:
repository: Start9Labs/embassy-os-deb
- run: |
sudo mount -t tmpfs tmpfs .
if: ${{ github.event.inputs.runner == 'fast' }}
- uses: actions/checkout@v3
with:
submodules: recursive
path: embassyos-0.3.x
- run: |
cp -r debian embassyos-0.3.x/
VERSION=0.3.x ./control.sh
cp embassyos-0.3.x/backend/embassyd.service embassyos-0.3.x/debian/embassyos.embassyd.service
cp embassyos-0.3.x/backend/embassy-init.service embassyos-0.3.x/debian/embassyos.embassy-init.service
- uses: actions/setup-node@v3
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Get npm cache directory
id: npm-cache-dir
run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
- uses: actions/cache@v3
id: npm-cache
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install \
debmake \
debhelper-compat \
crossbuild-essential-arm64
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Run build
run: "make VERSION=0.3.x TAG=${{ github.ref_name }}"
env:
OS_ARCH: ${{ matrix.platform }}
- name: Make
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.platform }}.deb
path: embassyos_0.3.x-1_*.deb
iso:
name: Build iso
name: compiled-${{ matrix.arch }}.tar
path: compiled-${{ matrix.arch }}.tar
image:
name: Build Image
needs: [compile]
strategy:
fail-fast: false
matrix:
platform:
[x86_64, x86_64-nonfree, aarch64, aarch64-nonfree, raspberrypi]
runs-on: ubuntu-22.04
needs: [dpkg]
platform: >-
${{
fromJson(
format(
'[
["{0}"],
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "raspberrypi"]
]',
github.event.inputs.platform || 'ALL'
)
)[(github.event.inputs.platform || 'ALL') == 'ALL']
}}
runs-on: >-
${{
fromJson(
format(
'["ubuntu-22.04", "{0}"]',
fromJson('{
"x86_64": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
}')[matrix.platform]
)
)[github.event.inputs.runner == 'fast']
}}
env:
ARCH: >-
${{
fromJson('{
"x86_64": "x86_64",
"x86_64-nonfree": "x86_64",
"aarch64": "aarch64",
"aarch64-nonfree": "aarch64",
"raspberrypi": "aarch64",
}')[matrix.platform]
}}
steps:
- uses: actions/checkout@v3
with:
repository: Start9Labs/startos-image-recipes
submodules: recursive
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y qemu-user-static
wget http://ftp.us.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.1-1_all.deb
sha256sum ./debspawn_0.6.1-1_all.deb | grep fb8a3f588438ff9ef51e713ec1d83306db893f0aa97447565e28bbba9c6e90c6
sudo apt-get install -y ./debspawn_0.6.1-1_all.deb
wget https://deb.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.2-1_all.deb
sha256sum ./debspawn_0.6.2-1_all.deb | grep 37ef27458cb1e35e8bce4d4f639b06b4b3866fc0b9191ec6b9bd157afd06a817
sudo apt-get install -y ./debspawn_0.6.2-1_all.deb
- name: Configure debspawn
run: |
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo mkdir -p /var/tmp/debspawn
- uses: actions/cache@v3
with:
path: /var/lib/debspawn
key: ${{ runner.os }}-debspawn-init-bullseye
- run: sudo mount -t tmpfs tmpfs /var/tmp/debspawn
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree') }}
- name: Make build container
run: "debspawn list | grep bullseye || debspawn create bullseye"
- run: "mkdir -p overlays/deb"
- name: Download dpkg
- name: Download compiled artifacts
uses: actions/download-artifact@v3
with:
name: ${{ matrix.platform }}.deb
path: overlays/deb
name: compiled-${{ env.ARCH }}.tar
- name: Run build
- name: Extract compiled artifacts
run: tar -xvf compiled-${{ env.ARCH }}.tar
- name: Prevent rebuild of compiled artifacts
run: |
./run-local-build.sh ${{ matrix.platform }}
mkdir -p frontend/dist/raw
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
- name: Run iso build
run: PLATFORM=${{ matrix.platform }} make iso
if: ${{ matrix.platform != 'raspberrypi' }}
- name: Run img build
run: PLATFORM=${{ matrix.platform }} make img
if: ${{ matrix.platform == 'raspberrypi' }}
- uses: actions/upload-artifact@v3
with:
@@ -147,27 +193,45 @@ jobs:
path: results/*.iso
if: ${{ matrix.platform != 'raspberrypi' }}
image:
name: Build image
runs-on: ubuntu-22.04
timeout-minutes: 60
needs: [iso]
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Download raspberrypi.squashfs artifact
uses: actions/download-artifact@v3
with:
name: raspberrypi.squashfs
- run: mv startos-*_raspberrypi.squashfs startos.raspberrypi.squashfs
- name: Build image
run: make startos_raspberrypi.img
- uses: actions/upload-artifact@v3
with:
name: raspberrypi.img
path: startos-*_raspberrypi.img
name: ${{ matrix.platform }}.img
path: results/*.img
if: ${{ matrix.platform == 'raspberrypi' }}
- name: Upload OTA to registry
run: >-
PLATFORM=${{ matrix.platform }} make upload-ota TARGET="${{
fromJson('{
"alpha": "alpha-registry-x.start9.com",
"beta": "beta-registry.start9.com",
}')[github.event.inputs.deploy]
}}" KEY="${{
fromJson(
format('{{
"alpha": "{0}",
"beta": "{1}",
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
)[github.event.inputs.deploy]
}}"
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
index:
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
needs: [image]
runs-on: ubuntu-22.04
steps:
- run: >-
curl "https://${{
fromJson('{
"alpha": "alpha-registry-x.start9.com",
"beta": "beta-registry.start9.com",
}')[github.event.inputs.deploy]
}}:8443/resync.cgi?key=${{
fromJson(
format('{{
"alpha": "{0}",
"beta": "{1}",
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
)[github.event.inputs.deploy]
}}"

8
.gitignore vendored
View File

@@ -16,13 +16,15 @@ deploy_web.sh
secrets.db
.vscode/
/cargo-deps/**/*
/PLATFORM.txt
/ENVIRONMENT.txt
/GIT_HASH.txt
/VERSION.txt
/embassyos-*.tar.gz
/eos-*.tar.gz
/*.deb
/target
/*.squashfs
/debian
/DEBIAN
/results
/dpkg-workdir
/compiled.tar
/compiled-*.tar

5607
Cargo.lock generated

File diff suppressed because it is too large Load Diff

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2023 Start9 Labs, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,42 +0,0 @@
# START9 NON-COMMERCIAL LICENSE v1
Version 1, 22 September 2022
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
### 1.Definitions
"License" means version 1 of the Start9 Non-Commercial License.
"Licensor" means the Start9 Labs, Inc, or its successor(s) in interest, or a future assignee of the copyright.
"You" (or "Your") means an individual or organization exercising permissions granted by this License.
"Source Code" for a work means the preferred form of the work for making modifications to it.
"Object Code" means any non-source form of a work, including the machine-language output by a compiler or assembler.
"Work" means any work of authorship, whether in Source or Object form, made available under this License.
"Derivative Work" means any work, whether in Source or Object form, that is based on (or derived from) the Work.
"Distribute" means to convey or to publish and generally has the same meaning here as under U.S. Copyright law.
"Sell" means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including, without limitation, fees for hosting, consulting, or support services), a product or service whose value derives, entirely or substantially, from the functionality of the Work or Derivative Work.
### 2. Grant of Rights
Subject to the terms of this license, the Licensor grants you, the licensee, a non-exclusive, worldwide, royalty-free copyright license to access, audit, copy, modify, compile, run, test, distribute, or otherwise use the Software.
### 3. Limitations
1. The grant of rights under the License does NOT include, and the License does NOT grant You the right to Sell the Work or Derivative Work.
2. If you Distribute the Work or Derivative Work, you expressly undertake not to remove or modify, in any manner, the copyright notices attached to the Work or displayed in any output of the Work when run, and to reproduce these notices, in an identical manner, in any distributed copies of the Work or Derivative Work together with a copy of this License.
3. If you Distribute a Derivative Work, it must carry prominent notices stating that it has been modified from the Work, providing a relevant date.
### 4. Contributions
You hereby grant to Licensor a perpetual, irrevocable, worldwide, non-exclusive, royalty-free license to use and exploit any Derivative Work of which you are the author.
### 5. Disclaimer
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. LICENSOR HAS NO OBLIGATION TO SUPPORT RECIPIENTS OF THE SOFTWARE.

224
Makefile
View File

@@ -1,43 +1,59 @@
OS_ARCH := $(shell echo "${OS_ARCH}")
ARCH := $(shell if [ "$(OS_ARCH)" = "raspberrypi" ]; then echo aarch64; else echo $(OS_ARCH) | sed 's/-nonfree$$//g'; fi)
ENVIRONMENT_FILE = $(shell ./check-environment.sh)
GIT_HASH_FILE = $(shell ./check-git-hash.sh)
VERSION_FILE = $(shell ./check-version.sh)
EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-sdk backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias libs/target/aarch64-unknown-linux-musl/release/embassy_container_init libs/target/x86_64-unknown-linux-musl/release/embassy_container_init
EMBASSY_UIS := frontend/dist/ui frontend/dist/setup-wizard frontend/dist/diagnostic-ui frontend/dist/install-wizard
BUILD_SRC := $(shell find build)
EMBASSY_SRC := backend/embassyd.service backend/embassy-init.service $(EMBASSY_UIS) $(BUILD_SRC)
COMPAT_SRC := $(shell find system-images/compat/ -not -path 'system-images/compat/target/*' -and -not -name *.tar -and -not -name target)
UTILS_SRC := $(shell find system-images/utils/ -not -name *.tar)
BINFMT_SRC := $(shell find system-images/binfmt/ -not -name *.tar)
BACKEND_SRC := $(shell find backend/src) $(shell find backend/migrations) $(shell find patch-db/*/src) $(shell find libs/*/src) libs/*/Cargo.toml backend/Cargo.toml backend/Cargo.lock
FRONTEND_SHARED_SRC := $(shell find frontend/projects/shared) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/package.json frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json
FRONTEND_UI_SRC := $(shell find frontend/projects/ui)
FRONTEND_SETUP_WIZARD_SRC := $(shell find frontend/projects/setup-wizard)
FRONTEND_DIAGNOSTIC_UI_SRC := $(shell find frontend/projects/diagnostic-ui)
FRONTEND_INSTALL_WIZARD_SRC := $(shell find frontend/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell find patch-db/client -not -path patch-db/client/dist)
PLATFORM_FILE := $(shell ./check-platform.sh)
ENVIRONMENT_FILE := $(shell ./check-environment.sh)
GIT_HASH_FILE := $(shell ./check-git-hash.sh)
VERSION_FILE := $(shell ./check-version.sh)
BASENAME := $(shell ./basename.sh)
PLATFORM := $(shell if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)
ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/startbox libs/target/aarch64-unknown-linux-musl/release/embassy_container_init libs/target/x86_64-unknown-linux-musl/release/embassy_container_init
EMBASSY_UIS := frontend/dist/raw/ui frontend/dist/raw/setup-wizard frontend/dist/raw/diagnostic-ui frontend/dist/raw/install-wizard
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts
DEBIAN_SRC := $(shell git ls-files debian/)
IMAGE_RECIPE_SRC := $(shell git ls-files image-recipe/)
EMBASSY_SRC := backend/startd.service $(BUILD_SRC)
COMPAT_SRC := $(shell git ls-files system-images/compat/)
UTILS_SRC := $(shell git ls-files system-images/utils/)
BINFMT_SRC := $(shell git ls-files system-images/binfmt/)
BACKEND_SRC := $(shell git ls-files backend) $(shell git ls-files --recurse-submodules patch-db) $(shell git ls-files libs) frontend/dist/static
FRONTEND_SHARED_SRC := $(shell git ls-files frontend/projects/shared) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json
FRONTEND_UI_SRC := $(shell git ls-files frontend/projects/ui)
FRONTEND_SETUP_WIZARD_SRC := $(shell git ls-files frontend/projects/setup-wizard)
FRONTEND_DIAGNOSTIC_UI_SRC := $(shell git ls-files frontend/projects/diagnostic-ui)
FRONTEND_INSTALL_WIZARD_SRC := $(shell git ls-files frontend/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip)
ALL_TARGETS := $(EMBASSY_BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar $(EMBASSY_SRC) $(shell if [ "$(OS_ARCH)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep; fi) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
TAR_BIN := $(shell which gtar || which tar)
COMPILED_TARGETS := $(EMBASSY_BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar
ALL_TARGETS := $(EMBASSY_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console; fi') $(PLATFORM_FILE)
ifeq ($(REMOTE),)
mkdir = mkdir -p $1
rm = rm -rf $1
cp = cp -r $1 $2
ln = ln -sf $1 $2
else
mkdir = ssh $(REMOTE) 'mkdir -p $1'
rm = ssh $(REMOTE) 'sudo rm -rf $1'
ifeq ($(SSHPASS),)
ssh = ssh $(REMOTE) $1
else
ssh = sshpass -p $(SSHPASS) ssh $(REMOTE) $1
endif
mkdir = $(call ssh,'sudo mkdir -p $1')
rm = $(call ssh,'sudo rm -rf $1')
ln = $(call ssh,'sudo ln -sf $1 $2')
define cp
tar --transform "s|^$1|x|" -czv -f- $1 | ssh $(REMOTE) "sudo tar --transform 's|^x|$2|' -xzv -f- -C /"
$(TAR_BIN) --transform "s|^$1|x|" -czv -f- $1 | $(call ssh,"sudo tar --transform 's|^x|$2|' -xzv -f- -C /")
endef
endif
.DELETE_ON_ERROR:
.PHONY: all gzip install clean format sdk snapshots frontends ui backend reflash startos_raspberrypi.img sudo
.PHONY: all metadata install clean format sdk snapshots frontends ui backend reflash deb $(IMAGE_TYPE) squashfs sudo wormhole docker-buildx
all: $(ALL_TARGETS)
metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
sudo:
sudo true
@@ -54,9 +70,13 @@ clean:
rm -rf patch-db/client/dist
rm -rf patch-db/target
rm -rf cargo-deps
rm ENVIRONMENT.txt
rm GIT_HASH.txt
rm VERSION.txt
rm -rf dpkg-workdir
rm -rf image-recipe/deb
rm -rf results
rm -f ENVIRONMENT.txt
rm -f PLATFORM.txt
rm -f GIT_HASH.txt
rm -f VERSION.txt
format:
cd backend && cargo +nightly fmt
@@ -65,77 +85,93 @@ format:
sdk:
cd backend/ && ./install-sdk.sh
startos_raspberrypi.img: $(BUILD_SRC) startos.raspberrypi.squashfs $(VERSION_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep | sudo
./build/raspberrypi/make-image.sh
deb: results/$(BASENAME).deb
debian/control: build/lib/depends build/lib/conflicts
./debuild/control.sh
results/$(BASENAME).deb: dpkg-build.sh $(DEBIAN_SRC) $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
PLATFORM=$(PLATFORM) ./dpkg-build.sh
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
squashfs: results/$(BASENAME).squashfs
results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_SRC) results/$(BASENAME).deb
./image-recipe/run-local-build.sh "results/$(BASENAME).deb"
# For creating os images. DO NOT USE
install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/bin)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init,$(DESTDIR)/usr/bin/embassy-init)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd,$(DESTDIR)/usr/bin/embassyd)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli,$(DESTDIR)/usr/bin/embassy-cli)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias,$(DESTDIR)/usr/bin/avahi-alias)
if [ "$(OS_ARCH)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/startbox,$(DESTDIR)/usr/bin/startbox)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-deno)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/avahi-alias)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/embassy-cli)
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then $(call cp,cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); fi
$(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,backend/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
$(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/embassy)
$(call cp,build/lib,$(DESTDIR)/usr/lib/embassy)
$(call rm,$(DESTDIR)/usr/lib/startos)
$(call cp,build/lib,$(DESTDIR)/usr/lib/startos)
$(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/embassy/ENVIRONMENT.txt)
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/embassy/GIT_HASH.txt)
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/embassy/VERSION.txt)
$(call cp,PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
$(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
$(call mkdir,$(DESTDIR)/usr/lib/embassy/container)
$(call cp,libs/target/aarch64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/embassy/container/embassy_container_init.arm64)
$(call cp,libs/target/x86_64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/embassy/container/embassy_container_init.amd64)
$(call mkdir,$(DESTDIR)/usr/lib/startos/container)
$(call cp,libs/target/aarch64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/startos/container/embassy_container_init.arm64)
$(call cp,libs/target/x86_64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/startos/container/embassy_container_init.amd64)
$(call mkdir,$(DESTDIR)/usr/lib/embassy/system-images)
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/compat.tar)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/utils.tar)
$(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/binfmt.tar)
$(call mkdir,$(DESTDIR)/usr/lib/startos/system-images)
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/compat.tar)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/utils.tar)
$(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/binfmt.tar)
$(call mkdir,$(DESTDIR)/var/www/html)
$(call cp,frontend/dist/diagnostic-ui,$(DESTDIR)/var/www/html/diagnostic)
$(call cp,frontend/dist/setup-wizard,$(DESTDIR)/var/www/html/setup)
$(call cp,frontend/dist/install-wizard,$(DESTDIR)/var/www/html/install)
$(call cp,frontend/dist/ui,$(DESTDIR)/var/www/html/main)
$(call cp,index.html,$(DESTDIR)/var/www/html/index.html)
update-overlay:
update-overlay: $(ALL_TARGETS)
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@if [ "`ssh $(REMOTE) 'cat /usr/lib/embassy/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "Embassy requires migrations: update-overlay is unavailable." && false; fi
@if ssh $(REMOTE) "pidof embassy-init"; then >&2 echo "Embassy in INIT: update-overlay is unavailable." && false; fi
ssh $(REMOTE) "sudo systemctl stop embassyd"
$(MAKE) install REMOTE=$(REMOTE) OS_ARCH=$(OS_ARCH)
ssh $(REMOTE) "sudo systemctl start embassyd"
@if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
$(call ssh,"sudo systemctl stop startd")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
$(call ssh,"sudo systemctl start startd")
update:
wormhole: backend/target/$(ARCH)-unknown-linux-gnu/release/startbox
@wormhole send backend/target/$(ARCH)-unknown-linux-gnu/release/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
update: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
ssh $(REMOTE) "sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/"
$(MAKE) install REMOTE=$(REMOTE) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH)
ssh $(REMOTE) "sudo touch /media/embassy/config/upgrade && sudo sync && sudo reboot"
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo NO_SYNC=1 /media/embassy/next/usr/lib/startos/scripts/chroot-and-upgrade "apt-get install -y $(shell cat ./build/lib/depends)"')
emulate-reflash:
emulate-reflash: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
ssh $(REMOTE) "sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/"
$(MAKE) install REMOTE=$(REMOTE) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH)
ssh $(REMOTE) "sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot"
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
$(call ssh,"sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot")
system-images/compat/docker-images/aarch64.tar system-images/compat/docker-images/x86_64.tar: $(COMPAT_SRC)
cd system-images/compat && make
upload-ota: results/$(BASENAME).squashfs
TARGET=$(TARGET) KEY=$(KEY) ./upload-ota.sh
system-images/utils/docker-images/aarch64.tar system-images/utils/docker-images/x86_64.tar: $(UTILS_SRC)
cd system-images/utils && make
build/lib/depends build/lib/conflicts: build/dpkg-deps/*
build/dpkg-deps/generate.sh
system-images/binfmt/docker-images/aarch64.tar system-images/binfmt/docker-images/x86_64.tar: $(BINFMT_SRC)
cd system-images/binfmt && make
system-images/compat/docker-images/$(ARCH).tar: $(COMPAT_SRC) backend/Cargo.lock | docker-buildx
cd system-images/compat && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
raspios.img:
wget --continue https://downloads.raspberrypi.org/raspios_lite_arm64/images/raspios_lite_arm64-2022-01-28/2022-01-28-raspios-bullseye-arm64-lite.zip
unzip 2022-01-28-raspios-bullseye-arm64-lite.zip
mv 2022-01-28-raspios-bullseye-arm64-lite.img raspios.img
system-images/utils/docker-images/$(ARCH).tar: $(UTILS_SRC) | docker-buildx
cd system-images/utils && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
system-images/binfmt/docker-images/$(ARCH).tar: $(BINFMT_SRC) | docker-buildx
cd system-images/binfmt && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
snapshots: libs/snapshot_creator/Cargo.toml
cd libs/ && ./build-v8-snapshot.sh
@@ -148,27 +184,26 @@ $(EMBASSY_BINS): $(BACKEND_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) frontend/pa
frontend/node_modules: frontend/package.json
npm --prefix frontend ci
frontend/dist/ui: $(FRONTEND_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
frontend/dist/raw/ui: $(FRONTEND_UI_SRC) $(FRONTEND_SHARED_SRC)
npm --prefix frontend run build:ui
frontend/dist/setup-wizard: $(FRONTEND_SETUP_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
frontend/dist/raw/setup-wizard: $(FRONTEND_SETUP_WIZARD_SRC) $(FRONTEND_SHARED_SRC)
npm --prefix frontend run build:setup
frontend/dist/diagnostic-ui: $(FRONTEND_DIAGNOSTIC_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
frontend/dist/raw/diagnostic-ui: $(FRONTEND_DIAGNOSTIC_UI_SRC) $(FRONTEND_SHARED_SRC)
npm --prefix frontend run build:dui
frontend/dist/install-wizard: $(FRONTEND_INSTALL_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
frontend/dist/raw/install-wizard: $(FRONTEND_INSTALL_WIZARD_SRC) $(FRONTEND_SHARED_SRC)
npm --prefix frontend run build:install-wiz
frontend/dist/static: $(EMBASSY_UIS) $(ENVIRONMENT_FILE)
./compress-uis.sh
frontend/config.json: $(GIT_HASH_FILE) frontend/config-sample.json
jq '.useMocks = false' frontend/config-sample.json > frontend/config.json
jq '.packageArch = "$(ARCH)"' frontend/config.json > frontend/config.json.tmp
jq '.osArch = "$(OS_ARCH)"' frontend/config.json.tmp > frontend/config.json
rm frontend/config.json.tmp
npm --prefix frontend run-script build-config
jq '.useMocks = false' frontend/config-sample.json | jq '.gitHash = "$(shell cat GIT_HASH.txt)"' > frontend/config.json
frontend/patchdb-ui-seed.json: frontend/package.json
jq '."ack-welcome" = $(shell yq '.version' frontend/package.json)' frontend/patchdb-ui-seed.json > ui-seed.tmp
jq '."ack-welcome" = $(shell jq '.version' frontend/package.json)' frontend/patchdb-ui-seed.json > ui-seed.tmp
mv ui-seed.tmp frontend/patchdb-ui-seed.json
patch-db/client/node_modules: patch-db/client/package.json
@@ -179,20 +214,17 @@ patch-db/client/dist: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules
npm --prefix frontend run build:deps
# used by github actions
backend-$(ARCH).tar: $(EMBASSY_BINS)
compiled-$(ARCH).tar: $(COMPILED_TARGETS) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
tar -cvf $@ $^
# this is a convenience step to build all frontends - it is not referenced elsewhere in this file
frontends: $(EMBASSY_UIS)
# this is a convenience step to build the UI
ui: frontend/dist/ui
# used by github actions
backend: $(EMBASSY_BINS)
cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast:
./build-cargo-dep.sh nc-broadcast
ui: frontend/dist/raw/ui
cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep:
./build-cargo-dep.sh pi-beep
ARCH=aarch64 ./build-cargo-dep.sh pi-beep
cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console:
ARCH=$(ARCH) ./build-cargo-dep.sh tokio-console

125
README.md
View File

@@ -1,51 +1,84 @@
# StartOS
[![version](https://img.shields.io/github/v/tag/Start9Labs/start-os?color=success)](https://github.com/Start9Labs/start-os/releases)
[![build](https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg)](https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml)
[![community](https://img.shields.io/badge/community-matrix-yellow)](https://matrix.to/#/#community:matrix.start9labs.com)
[![community](https://img.shields.io/badge/community-telegram-informational)](https://t.me/start9_labs)
[![support](https://img.shields.io/badge/support-docs-important)](https://docs.start9.com)
[![developer](https://img.shields.io/badge/developer-matrix-blueviolet)](https://matrix.to/#/#community-dev:matrix.start9labs.com)
[![website](https://img.shields.io/website?down_color=lightgrey&down_message=offline&up_color=green&up_message=online&url=https%3A%2F%2Fstart9.com)](https://start9.com)
[![mastodon](https://img.shields.io/mastodon/follow/000000001?domain=https%3A%2F%2Fmastodon.start9labs.com&label=Follow&style=social)](http://mastodon.start9labs.com)
[![twitter](https://img.shields.io/twitter/follow/start9labs?label=Follow)](https://twitter.com/start9labs)
### _Welcome to the era of Sovereign Computing_ ###
StartOS is a browser-based, graphical operating system for a personal server. StartOS facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services. It is the most advanced, secure, reliable, and user friendly personal server OS in the world.
## Running StartOS
There are multiple ways to get your hands on StartOS.
### :moneybag: Buy a Start9 server
This is the most convenient option. Simply [buy a server](https://start9.com) from Start9 and plug it in. Depending on where you live, shipping costs and import duties will vary.
### :construction_worker: Build your own server
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have your own hardware.
1. You want to save on shipping costs.
1. You prefer not to divulge your physical address.
1. You just like building things.
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
### :hammer_and_wrench: Build StartOS from Source
StartOS can be built from source, for personal use, for free.
A detailed guide for doing so can be found [here](https://github.com/Start9Labs/start-os/blob/master/build/README.md).
## :heart: Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://docs.start9.com/latest/contribute/) or [here](https://github.com/Start9Labs/start-os/blob/master/CONTRIBUTING.md).
## UI Screenshots
<div align="center">
<img src="frontend/projects/shared/assets/img/icon.png" alt="StartOS Logo" width="16%" />
<h1 style="margin-top: 0;">StartOS</h1>
<a href="https://github.com/Start9Labs/start-os/releases">
<img alt="GitHub release (with filter)" src="https://img.shields.io/github/v/release/start9labs/start-os?logo=github">
</a>
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
</a>
<a href="https://heyapollo.com/product/startos">
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
</a>
<a href="https://twitter.com/start9labs">
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
</a>
<a href="https://mastodon.start9labs.com">
<img src="https://img.shields.io/mastodon/follow/000000001?domain=https%3A%2F%2Fmastodon.start9labs.com&label=Follow&style=social">
</a>
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
</a>
<a href="https://t.me/start9_labs">
<img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
</a>
<a href="https://docs.start9.com">
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
</a>
<a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
</a>
<a href="https://start9.com">
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
</a>
</div>
<br />
<div align="center">
<h3>
Welcome to the era of Sovereign Computing
</h3>
<p>
StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
</p>
</div>
<br />
<p align="center">
<img src="assets/StartOS.png" alt="StartOS" width="85%">
</p>
<br />
## Running StartOS
There are multiple ways to get started with StartOS:
### 💰 Buy a Start9 server
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
### 👷 Build your own server
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have hardware
1. You want to save on shipping costs
1. You prefer not to divulge your physical address
1. You just like building things
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
## ❤️ Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
To report security issues, please email our security team - security@start9.com.
## 🌎 Marketplace
There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
## 🖥️ User Interface Screenshots
<p align="center">
<img src="assets/preferences.png" alt="StartOS Preferences" width="49%">
<img src="assets/ghost.png" alt="StartOS Ghost Service" width="49%">
</p>
<p align="center">
<img src="assets/synapse-health-check.png" alt="StartOS Synapse Health Checks" width="49%">
<img src="assets/sideload.png" alt="StartOS Sideload Service" width="49%">
<img src="assets/registry.png" alt="StartOS Marketplace" width="49%">
<img src="assets/community.png" alt="StartOS Community Registry" width="49%">
<img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%">
<img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%">
<img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%">
<img src="assets/system.png" alt="StartOS System Settings" width="49%">
<img src="assets/welcome.png" alt="StartOS System Settings" width="49%">
<img src="assets/logs.png" alt="StartOS System Settings" width="49%">
</p>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 191 KiB

After

Width:  |  Height:  |  Size: 2.1 MiB

BIN
assets/btcpay.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 396 KiB

BIN
assets/c-lightning.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

BIN
assets/community.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 591 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 281 KiB

BIN
assets/logs.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

BIN
assets/nextcloud.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 319 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 266 KiB

BIN
assets/registry.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 521 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 213 KiB

BIN
assets/system.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 331 KiB

BIN
assets/welcome.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": []
},
"hash": "1ce5254f27de971fd87f5ab66d300f2b22433c86617a0dbf796bf2170186dd2e"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930"
}

View File

@@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "path",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "username",
"type_info": "Text"
},
{
"ordinal": 3,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false,
false,
false,
true
]
},
"hash": "28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec"
}

View File

@@ -0,0 +1,34 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "fingerprint",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "openssh_pubkey",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false
]
},
"hash": "4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997"
}

View File

@@ -0,0 +1,50 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "logged_in",
"type_info": "Timestamp"
},
{
"ordinal": 2,
"name": "logged_out",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "last_active",
"type_info": "Timestamp"
},
{
"ordinal": 4,
"name": "user_agent",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "metadata",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
true,
false,
true,
false
]
},
"hash": "4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "4bcfbefb1eb3181343871a1cd7fc3afb81c2be5c681cfa8b4be0ce70610e9c3a"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT password FROM account",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a"
}

View File

@@ -0,0 +1,23 @@
{
"db_name": "PostgreSQL",
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text",
"Text"
]
},
"nullable": [
false
]
},
"hash": "687688055e63d27123cdc89a5bbbd8361776290a9411d527eaf1fdb40bef399d"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca"
}

View File

@@ -0,0 +1,24 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": [
false
]
},
"hash": "770c1017734720453dc87b58c385b987c5af5807151ff71a59000014586752e0"
}

View File

@@ -0,0 +1,65 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "package_id",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "code",
"type_info": "Int4"
},
{
"ordinal": 4,
"name": "level",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "title",
"type_info": "Text"
},
{
"ordinal": 6,
"name": "message",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "data",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4",
"Int8"
]
},
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
]
},
"hash": "7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210"
}

View File

@@ -0,0 +1,19 @@
{
"db_name": "PostgreSQL",
"query": "\n INSERT INTO account (\n id,\n server_id,\n hostname,\n password,\n network_key,\n root_ca_key_pem,\n root_ca_cert_pem\n ) VALUES (\n 0, $1, $2, $3, $4, $5, $6\n ) ON CONFLICT (id) DO UPDATE SET\n server_id = EXCLUDED.server_id,\n hostname = EXCLUDED.hostname,\n password = EXCLUDED.password,\n network_key = EXCLUDED.network_key,\n root_ca_key_pem = EXCLUDED.root_ca_key_pem,\n root_ca_cert_pem = EXCLUDED.root_ca_cert_pem\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Bytea",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "7c7a3549c997eb75bf964ea65fbb98a73045adf618696cd838d79203ef5383fb"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM tor WHERE package = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576"
}

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": []
},
"hash": "8951b9126fbf60dbb5997241e11e3526b70bccf3e407327917294a993bc17ed5"
}

View File

@@ -0,0 +1,64 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "package_id",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "code",
"type_info": "Int4"
},
{
"ordinal": 4,
"name": "level",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "title",
"type_info": "Text"
},
{
"ordinal": 6,
"name": "message",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "data",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int8"
]
},
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
]
},
"hash": "94d471bb374b4965c6cbedf8c17bbf6bea226d38efaf6559923c79a36d5ca08c"
}

View File

@@ -0,0 +1,44 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, hostname, path, username, password FROM cifs_shares",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "path",
"type_info": "Text"
},
{
"ordinal": 3,
"name": "username",
"type_info": "Text"
},
{
"ordinal": 4,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
false,
false,
true
]
},
"hash": "95c4ab4c645f3302568c6ff13d85ab58252362694cf0f56999bf60194d20583a"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM cifs_shares WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": []
},
"hash": "a60d6e66719325b08dc4ecfacaf337527233c84eee758ac9be967906e5841d27"
}

View File

@@ -0,0 +1,32 @@
{
"db_name": "PostgreSQL",
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "fingerprint",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "openssh_pubkey",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
false
]
},
"hash": "a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e"
}

View File

@@ -0,0 +1,18 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text",
"Int4"
]
},
"nullable": []
},
"hash": "b1147beaaabbed89f2ab8c1e13ec4393a9a8fde2833cf096af766a979d94dee6"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM network_keys WHERE package = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "b203820ee1c553a4b246eac74b79bd10d5717b2a0ddecf22330b7d531aac7c5d"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT openssh_pubkey FROM ssh_keys",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "openssh_pubkey",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca"
}

View File

@@ -0,0 +1,19 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Int4",
"Text",
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "da71f94b29798d1738d2b10b9a721ea72db8cfb362e7181c8226d9297507c62b"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM notifications WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": []
},
"hash": "e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT tor_key FROM account WHERE id = 0",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "tor_key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": []
},
"nullable": [
true
]
},
"hash": "e545696735f202f9d13cf22a561f3ff3f9aed7f90027a9ba97634bcb47d772f0"
}

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d"
}

View File

@@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT\n network_keys.package,\n network_keys.interface,\n network_keys.key,\n tor.key AS \"tor_key?\"\n FROM\n network_keys\n LEFT JOIN\n tor\n ON\n network_keys.package = tor.package\n AND\n network_keys.interface = tor.interface\n WHERE\n network_keys.package = $1\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "package",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "interface",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "key",
"type_info": "Bytea"
},
{
"ordinal": 3,
"name": "tor_key?",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
false
]
},
"hash": "e95322a8e2ae3b93f1e974b24c0b81803f1e9ec9e8ebbf15cafddfc1c5a028ed"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM notifications WHERE id < $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": []
},
"hash": "eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a"
}

View File

@@ -0,0 +1,25 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text"
]
},
"nullable": [
false
]
},
"hash": "ecc765d8205c0876956f95f76944ac6a5f34dd820c4073b7728c7067aab9fded"
}

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT network_key FROM account WHERE id = 0",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "network_key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "f7d2dae84613bcef330f7403352cc96547f3f6dbec11bf2eadfaf53ad8ab51b5"
}

View File

@@ -0,0 +1,62 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM account WHERE id = 0",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "password",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "tor_key",
"type_info": "Bytea"
},
{
"ordinal": 3,
"name": "server_id",
"type_info": "Text"
},
{
"ordinal": 4,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "network_key",
"type_info": "Bytea"
},
{
"ordinal": 6,
"name": "root_ca_key_pem",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "root_ca_cert_pem",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
true,
true,
true,
false,
false,
false
]
},
"hash": "fe6e4f09f3028e5b6b6259e86cbad285680ce157aae9d7837ac020c8b2945e7f"
}

4190
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
[package]
authors = ["Aiden McClelland <me@drbonez.dev>"]
description = "The core of StartOS"
documentation = "https://docs.rs/embassy-os"
documentation = "https://docs.rs/start-os"
edition = "2021"
keywords = [
"self-hosted",
@@ -11,158 +11,160 @@ keywords = [
"full-node",
"lightning",
]
name = "embassy-os"
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.3.4-rev.2"
version = "0.3.5"
license = "MIT"
[lib]
name = "embassy"
name = "startos"
path = "src/lib.rs"
[[bin]]
name = "embassyd"
path = "src/bin/embassyd.rs"
[[bin]]
name = "embassy-init"
path = "src/bin/embassy-init.rs"
[[bin]]
name = "embassy-sdk"
path = "src/bin/embassy-sdk.rs"
[[bin]]
name = "embassy-cli"
path = "src/bin/embassy-cli.rs"
[[bin]]
name = "avahi-alias"
path = "src/bin/avahi-alias.rs"
name = "startbox"
path = "src/main.rs"
[features]
avahi = ["avahi-sys"]
default = ["avahi", "js_engine"]
avahi-alias = ["avahi"]
cli = []
daemon = []
default = ["cli", "sdk", "daemon", "js_engine"]
dev = []
unstable = ["patch-db/unstable"]
docker = []
sdk = []
unstable = ["console-subscriber", "tokio/tracing"]
[dependencies]
aes = { version = "0.7.5", features = ["ctr"] }
async-compression = { version = "0.3.15", features = [
async-compression = { version = "0.4.4", features = [
"gzip",
"brotli",
"tokio",
] }
async-stream = "0.3.3"
async-trait = "0.1.56"
async-stream = "0.3.5"
async-trait = "0.1.74"
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [
"dynamic",
], optional = true }
base32 = "0.4.0"
base64 = "0.13.0"
base64ct = "1.5.1"
base64 = "0.21.4"
base64ct = "1.6.0"
basic-cookies = "0.1.4"
bollard = "0.13.0"
bytes = "1"
chrono = { version = "0.4.19", features = ["serde"] }
clap = "3.2.8"
color-eyre = "0.6.1"
cookie = "0.16.2"
cookie_store = "0.19.0"
chrono = { version = "0.4.31", features = ["serde"] }
clap = "3.2.25"
color-eyre = "0.6.2"
console = "0.15.7"
console-subscriber = { version = "0.2", optional = true }
cookie = "0.18.0"
cookie_store = "0.20.0"
current_platform = "0.2.0"
digest = "0.10.3"
digest-old = { package = "digest", version = "0.9.0" }
digest = "0.10.7"
divrem = "1.0.0"
ed25519 = { version = "1.5.2", features = ["pkcs8", "pem", "alloc"] }
ed25519-dalek = { version = "1.0.1", features = ["serde"] }
ed25519 = { version = "2.2.3", features = ["pkcs8", "pem", "alloc"] }
ed25519-dalek = { version = "2.0.0", features = [
"serde",
"zeroize",
"rand_core",
"digest",
] }
ed25519-dalek-v1 = { package = "ed25519-dalek", version = "1" }
embassy_container_init = { path = "../libs/embassy_container_init" }
emver = { version = "0.1.7", git = "https://github.com/Start9Labs/emver-rs.git", features = [
"serde",
] }
fd-lock-rs = "0.1.4"
futures = "0.3.21"
git-version = "0.3.5"
gpt = "3.0.0"
futures = "0.3.28"
gpt = "3.1.0"
helpers = { path = "../libs/helpers" }
embassy_container_init = { path = "../libs/embassy_container_init" }
hex = "0.4.3"
hmac = "0.12.1"
http = "0.2.8"
hyper = { version = "0.14.20", features = ["full"] }
hyper-ws-listener = "0.2.0"
imbl = "2.0.0"
indexmap = { version = "1.9.1", features = ["serde"] }
ipnet = { version = "2.7.1", features = ["serde"] }
http = "0.2.9"
hyper = { version = "0.14.27", features = ["full"] }
hyper-ws-listener = "0.3.0"
imbl = "2.0.2"
imbl-value = { git = "https://github.com/Start9Labs/imbl-value.git" }
include_dir = "0.7.3"
indexmap = { version = "2.0.2", features = ["serde"] }
indicatif = { version = "0.17.7", features = ["tokio"] }
ipnet = { version = "2.8.0", features = ["serde"] }
iprange = { version = "0.6.7", features = ["serde"] }
isocountry = "0.3.2"
itertools = "0.10.3"
josekit = "0.8.1"
itertools = "0.11.0"
jaq-core = "0.10.1"
jaq-std = "0.10.0"
josekit = "0.8.4"
js_engine = { path = '../libs/js_engine', optional = true }
jsonpath_lib = "0.3.0"
jsonpath_lib = { git = "https://github.com/Start9Labs/jsonpath.git" }
lazy_static = "1.4.0"
libc = "0.2.126"
log = "0.4.17"
mbrman = "0.5.0"
libc = "0.2.149"
log = "0.4.20"
mbrman = "0.5.2"
models = { version = "*", path = "../libs/models" }
nix = "0.25.0"
nom = "7.1.1"
num = "0.4.0"
num_enum = "0.5.7"
openssh-keys = "0.5.0"
openssl = { version = "0.10.41", features = ["vendored"] }
new_mime_guess = "4"
nix = { version = "0.27.1", features = ["user", "process", "signal", "fs"] }
nom = "7.1.3"
num = "0.4.1"
num_enum = "0.7.0"
openssh-keys = "0.6.2"
openssl = { version = "0.10.57", features = ["vendored"] }
p256 = { version = "0.13.2", features = ["pem"] }
patch-db = { version = "*", path = "../patch-db/patch-db", features = [
"trace",
] }
p256 = { version = "0.12.0", features = ["pem"] }
pbkdf2 = "0.11.0"
pin-project = "1.0.11"
pkcs8 = { version = "0.9.0", features = ["std"] }
pbkdf2 = "0.12.2"
pin-project = "1.1.3"
pkcs8 = { version = "0.10.2", features = ["std"] }
prettytable-rs = "0.10.0"
proptest = "1.0.0"
proptest-derive = "0.3.0"
proptest = "1.3.1"
proptest-derive = "0.4.0"
rand = { version = "0.8.5", features = ["std"] }
rand-old = { package = "rand", version = "0.7.3" }
regex = "1.6.0"
reqwest = { version = "0.11.11", features = ["stream", "json", "socks"] }
reqwest_cookie_store = "0.5.0"
rpassword = "7.0.0"
regex = "1.10.2"
reqwest = { version = "0.11.22", features = ["stream", "json", "socks"] }
reqwest_cookie_store = "0.6.0"
rpassword = "7.2.0"
rpc-toolkit = "0.2.2"
rust-argon2 = "1.0.0"
rust-argon2 = "2.0.0"
scopeguard = "1.1" # because avahi-sys fucks your shit up
serde = { version = "1.0.139", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.0" }
serde_json = "1.0.82"
serde_toml = { package = "toml", version = "0.5.9" }
serde_with = { version = "2.0.1", features = ["macros", "json"] }
serde_yaml = "0.9.11"
serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.1" }
serde_json = "1.0"
serde_toml = { package = "toml", version = "0.8.2" }
serde_with = { version = "3.4.0", features = ["macros", "json"] }
serde_yaml = "0.9.25"
sha2 = "0.10.2"
sha2-old = { package = "sha2", version = "0.9.9" }
simple-logging = "2.0.2"
sqlx = { version = "0.6.0", features = [
sqlx = { version = "0.7.2", features = [
"chrono",
"offline",
"runtime-tokio-rustls",
"postgres",
] }
ssh-key = { version = "0.5.1", features = ["ed25519"] }
stderrlog = "0.5.3"
tar = "0.4.38"
thiserror = "1.0.31"
tokio = { version = "1.23", features = ["full"] }
tokio-stream = { version = "0.1.11", features = ["io-util", "sync", "net"] }
sscanf = "0.4.1"
ssh-key = { version = "0.6.2", features = ["ed25519"] }
stderrlog = "0.5.4"
tar = "0.4.40"
thiserror = "1.0.49"
tokio = { version = "1", features = ["full"] }
tokio-rustls = "0.24.1"
tokio-socks = "0.5.1"
tokio-stream = { version = "0.1.14", features = ["io-util", "sync", "net"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.17.1", features = ["native-tls"] }
tokio-rustls = "0.23.4"
tokio-util = { version = "0.7.3", features = ["io"] }
tokio-tungstenite = { version = "0.20.1", features = ["native-tls"] }
tokio-util = { version = "0.7.9", features = ["io"] }
torut = "0.2.1"
tracing = "0.1.35"
tracing = "0.1.39"
tracing-error = "0.2.0"
tracing-futures = "0.2.5"
tracing-subscriber = { version = "0.3.14", features = ["env-filter"] }
trust-dns-server = "0.22.0"
typed-builder = "0.10.0"
url = { version = "2.2.2", features = ["serde"] }
uuid = { version = "1.1.2", features = ["v4"] }
zeroize = "1.5.7"
tracing-journald = "0.3.0"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
trust-dns-server = "0.23.1"
typed-builder = "0.17.0"
url = { version = "2.4.1", features = ["serde"] }
urlencoding = "2.1.3"
uuid = { version = "1.4.1", features = ["v4"] }
zeroize = "1.6.0"
[profile.test]
opt-level = 3

View File

@@ -5,6 +5,7 @@
- Recommended: [rust-analyzer](https://rust-analyzer.github.io/)
- [Docker](https://docs.docker.com/get-docker/)
- [Rust ARM64 Build Container](https://github.com/Start9Labs/rust-arm-builder)
- Mac `brew install gnu-tar`
- Scripts (run within the `./backend` directory)
- `build-prod.sh` - compiles a release build of the artifacts for running on
ARM64
@@ -12,18 +13,17 @@
## Structure
The StartOS backend is broken up into 4 different binaries:
The StartOS backend is packed into a single binary `startbox` that is symlinked under
several different names for different behaviour:
- embassyd: This is the main workhorse of StartOS - any new functionality you
- startd: This is the main workhorse of StartOS - any new functionality you
want will likely go here
- embassy-init: This is the component responsible for allowing you to set up
your device, and handles system initialization on startup
- embassy-cli: This is a CLI tool that will allow you to issue commands to
embassyd and control it similarly to the UI
- embassy-sdk: This is a CLI tool that aids in building and packaging services
- start-cli: This is a CLI tool that will allow you to issue commands to
startd and control it similarly to the UI
- start-sdk: This is a CLI tool that aids in building and packaging services
you wish to deploy to StartOS
Finally there is a library `embassy` that supports all four of these tools.
Finally there is a library `startos` that supports all of these tools.
See [here](/backend/Cargo.toml) for details.

View File

@@ -1,24 +0,0 @@
#!/bin/bash
set -e
shopt -s expand_aliases
if [ "$0" != "./build-dev.sh" ]; then
>&2 echo "Must be run from backend directory"
exit 1
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
cd ..
rust-arm64-builder sh -c "(cd backend && cargo build --locked)"
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd

View File

@@ -1,23 +0,0 @@
#!/bin/bash
set -e
shopt -s expand_aliases
if [ "$0" != "./build-portable-dev.sh" ]; then
>&2 echo "Must be run from backend directory"
exit 1
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
cd ..
rust-musl-builder sh -c "(cd backend && cargo +beta build --target=x86_64-unknown-linux-musl --no-default-features --locked)"
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo

View File

@@ -3,11 +3,6 @@
set -e
shopt -s expand_aliases
if [ -z "$OS_ARCH" ]; then
>&2 echo '$OS_ARCH is required'
exit 1
fi
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
@@ -22,46 +17,28 @@ if tty -s; then
USE_TTY="-it"
fi
alias 'rust-gnu-builder'='docker run $USE_TTY --rm -e "OS_ARCH=$OS_ARCH" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P start9/rust-arm-cross:aarch64'
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "OS_ARCH=$OS_ARCH" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
cd ..
FLAGS=""
if [[ "$ENVIRONMENT" =~ (^|-)unstable($|-) ]]; then
FLAGS="unstable,$FLAGS"
fi
if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then
FLAGS="dev,$FLAGS"
fi
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
alias 'rust-gnu-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$(pwd)":/home/rust/src -w /home/rust/src -P start9/rust-arm-cross:aarch64'
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
set +e
fail=
if [[ "$FLAGS" = "" ]]; then
rust-gnu-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --locked --target=$ARCH-unknown-linux-gnu)"
if test $? -ne 0; then
fail=true
fi
for ARCH in x86_64 aarch64
do
rust-musl-builder sh -c "(git config --global --add safe.directory '*'; cd libs && cargo build --release --locked --bin embassy_container_init )"
if test $? -ne 0; then
fail=true
fi
done
else
echo "FLAGS=$FLAGS"
rust-gnu-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS --locked --target=$ARCH-unknown-linux-gnu)"
if test $? -ne 0; then
fail=true
fi
for ARCH in x86_64 aarch64
do
rust-musl-builder sh -c "(git config --global --add safe.directory '*'; cd libs && cargo build --release --features $FLAGS --locked --bin embassy_container_init)"
if test $? -ne 0; then
fail=true
fi
done
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-gnu-builder sh -c "(cd backend && cargo build --release --features avahi-alias,$FEATURES --locked --target=$ARCH-unknown-linux-gnu)"
if test $? -ne 0; then
fail=true
fi
for ARCH in x86_64 aarch64
do
rust-musl-builder sh -c "(cd libs && cargo build --release --locked --bin embassy_container_init)"
if test $? -ne 0; then
fail=true
fi
done
set -e
cd backend
@@ -72,5 +49,3 @@ sudo chown -R $USER ../libs/target
if [ -n "$fail" ]; then
exit 1
fi
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd

View File

@@ -1,15 +0,0 @@
[Unit]
Description=Embassy Init
After=network-online.target
Requires=network-online.target
Wants=avahi-daemon.service
[Service]
Type=oneshot
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/bin/embassy-init
RemainAfterExit=true
StandardOutput=append:/var/log/embassy-init.log
[Install]
WantedBy=embassyd.service

View File

@@ -1,17 +0,0 @@
[Unit]
Description=Embassy Daemon
After=embassy-init.service
Requires=embassy-init.service
[Service]
Type=simple
Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/bin/embassyd
Restart=always
RestartSec=3
ManagedOOMPreference=avoid
CPUAccounting=true
CPUWeight=1000
[Install]
WantedBy=multi-user.target

View File

@@ -8,4 +8,14 @@ if [ "$0" != "./install-sdk.sh" ]; then
exit 1
fi
cargo install --bin=embassy-sdk --bin=embassy-cli --path=. --no-default-features --features=js_engine --locked
frontend="../frontend/dist/static"
[ -d "$frontend" ] || mkdir -p "$frontend"
if [ -z "$PLATFORM" ]; then
export PLATFORM=$(uname -m)
fi
cargo install --path=. --no-default-features --features=js_engine,sdk,cli --locked
startbox_loc=$(which startbox)
ln -sf $startbox_loc $(dirname $startbox_loc)/start-cli
ln -sf $startbox_loc $(dirname $startbox_loc)/start-sdk

View File

@@ -1,744 +0,0 @@
{
"db": "PostgreSQL",
"1ce5254f27de971fd87f5ab66d300f2b22433c86617a0dbf796bf2170186dd2e": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING"
},
"21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1"
},
"28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec": {
"describe": {
"columns": [
{
"name": "hostname",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "path",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "username",
"ordinal": 2,
"type_info": "Text"
},
{
"name": "password",
"ordinal": 3,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false,
true
],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1"
},
"4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997": {
"describe": {
"columns": [
{
"name": "fingerprint",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "openssh_pubkey",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false
],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1"
},
"4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "logged_in",
"ordinal": 1,
"type_info": "Timestamp"
},
{
"name": "logged_out",
"ordinal": 2,
"type_info": "Timestamp"
},
{
"name": "last_active",
"ordinal": 3,
"type_info": "Timestamp"
},
{
"name": "user_agent",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "metadata",
"ordinal": 5,
"type_info": "Text"
}
],
"nullable": [
false,
false,
true,
false,
true,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
},
"4bcfbefb1eb3181343871a1cd7fc3afb81c2be5c681cfa8b4be0ce70610e9c3a": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1"
},
"629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": {
"describe": {
"columns": [
{
"name": "password",
"ordinal": 0,
"type_info": "Text"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT password FROM account"
},
"687688055e63d27123cdc89a5bbbd8361776290a9411d527eaf1fdb40bef399d": {
"describe": {
"columns": [
{
"name": "key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text"
]
}
},
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2"
},
"6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
},
"770c1017734720453dc87b58c385b987c5af5807151ff71a59000014586752e0": {
"describe": {
"columns": [
{
"name": "key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key"
},
"7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "package_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Timestamp"
},
{
"name": "code",
"ordinal": 3,
"type_info": "Int4"
},
{
"name": "level",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "title",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "message",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "data",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
],
"parameters": {
"Left": [
"Int4",
"Int8"
]
}
},
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2"
},
"7c7a3549c997eb75bf964ea65fbb98a73045adf618696cd838d79203ef5383fb": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Bytea",
"Text",
"Text"
]
}
},
"query": "\n INSERT INTO account (\n id,\n server_id,\n hostname,\n password,\n network_key,\n root_ca_key_pem,\n root_ca_cert_pem\n ) VALUES (\n 0, $1, $2, $3, $4, $5, $6\n ) ON CONFLICT (id) DO UPDATE SET\n server_id = EXCLUDED.server_id,\n hostname = EXCLUDED.hostname,\n password = EXCLUDED.password,\n network_key = EXCLUDED.network_key,\n root_ca_key_pem = EXCLUDED.root_ca_key_pem,\n root_ca_cert_pem = EXCLUDED.root_ca_cert_pem\n "
},
"7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "DELETE FROM tor WHERE package = $1"
},
"8951b9126fbf60dbb5997241e11e3526b70bccf3e407327917294a993bc17ed5": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING"
},
"94d471bb374b4965c6cbedf8c17bbf6bea226d38efaf6559923c79a36d5ca08c": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "package_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Timestamp"
},
{
"name": "code",
"ordinal": 3,
"type_info": "Int4"
},
{
"name": "level",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "title",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "message",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "data",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
],
"parameters": {
"Left": [
"Int8"
]
}
},
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT $1"
},
"95c4ab4c645f3302568c6ff13d85ab58252362694cf0f56999bf60194d20583a": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "hostname",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "path",
"ordinal": 2,
"type_info": "Text"
},
{
"name": "username",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "password",
"ordinal": 4,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false,
false,
true
],
"parameters": {
"Left": []
}
},
"query": "SELECT id, hostname, path, username, password FROM cifs_shares"
},
"a60d6e66719325b08dc4ecfacaf337527233c84eee758ac9be967906e5841d27": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "DELETE FROM cifs_shares WHERE id = $1"
},
"a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e": {
"describe": {
"columns": [
{
"name": "fingerprint",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "openssh_pubkey",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys"
},
"b1147beaaabbed89f2ab8c1e13ec4393a9a8fde2833cf096af766a979d94dee6": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text",
"Int4"
]
}
},
"query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5"
},
"d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": {
"describe": {
"columns": [
{
"name": "openssh_pubkey",
"ordinal": 0,
"type_info": "Text"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT openssh_pubkey FROM ssh_keys"
},
"da71f94b29798d1738d2b10b9a721ea72db8cfb362e7181c8226d9297507c62b": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Int4",
"Text",
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)"
},
"e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "DELETE FROM notifications WHERE id = $1"
},
"e545696735f202f9d13cf22a561f3ff3f9aed7f90027a9ba97634bcb47d772f0": {
"describe": {
"columns": [
{
"name": "tor_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
true
],
"parameters": {
"Left": []
}
},
"query": "SELECT tor_key FROM account WHERE id = 0"
},
"e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)"
},
"e95322a8e2ae3b93f1e974b24c0b81803f1e9ec9e8ebbf15cafddfc1c5a028ed": {
"describe": {
"columns": [
{
"name": "package",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "interface",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "tor_key?",
"ordinal": 3,
"type_info": "Bytea"
}
],
"nullable": [
false,
false,
false,
false
],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "\n SELECT\n network_keys.package,\n network_keys.interface,\n network_keys.key,\n tor.key AS \"tor_key?\"\n FROM\n network_keys\n LEFT JOIN\n tor\n ON\n network_keys.package = tor.package\n AND\n network_keys.interface = tor.interface\n WHERE\n network_keys.package = $1\n "
},
"eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "DELETE FROM notifications WHERE id < $1"
},
"ecc765d8205c0876956f95f76944ac6a5f34dd820c4073b7728c7067aab9fded": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id"
},
"f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)"
},
"f7d2dae84613bcef330f7403352cc96547f3f6dbec11bf2eadfaf53ad8ab51b5": {
"describe": {
"columns": [
{
"name": "network_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT network_key FROM account WHERE id = 0"
},
"fe6e4f09f3028e5b6b6259e86cbad285680ce157aae9d7837ac020c8b2945e7f": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "password",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "tor_key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "server_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "hostname",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "network_key",
"ordinal": 5,
"type_info": "Bytea"
},
{
"name": "root_ca_key_pem",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "root_ca_cert_pem",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
false,
true,
true,
true,
false,
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT * FROM account WHERE id = 0"
}
}

View File

@@ -1,5 +1,6 @@
use ed25519_dalek::{ExpandedSecretKey, SecretKey};
use models::ResultExt;
use std::time::SystemTime;
use ed25519_dalek::SecretKey;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use sqlx::PgExecutor;
@@ -7,13 +8,14 @@ use sqlx::PgExecutor;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key;
use crate::net::ssl::{generate_key, make_root_cert};
use crate::Error;
use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
fn hash_password(password: &str) -> Result<String, Error> {
argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
&argon2::Config::rfc9106_low_mem(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)
}
@@ -28,11 +30,11 @@ pub struct AccountInfo {
pub root_ca_cert: X509,
}
impl AccountInfo {
pub fn new(password: &str) -> Result<Self, Error> {
pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> {
let server_id = generate_id();
let hostname = generate_hostname();
let root_ca_key = generate_key()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname)?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?;
Ok(Self {
server_id,
hostname,
@@ -51,13 +53,23 @@ impl AccountInfo {
let server_id = r.server_id.unwrap_or_else(generate_id);
let hostname = r.hostname.map(Hostname).unwrap_or_else(generate_hostname);
let password = r.password;
let network_key = SecretKey::from_bytes(&r.network_key)?;
let network_key = SecretKey::try_from(r.network_key).map_err(|e| {
Error::new(
eyre!("expected vec of len 32, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?;
let tor_key = if let Some(k) = &r.tor_key {
ExpandedSecretKey::from_bytes(k)?
<[u8; 64]>::try_from(&k[..]).map_err(|_| {
Error::new(
eyre!("expected vec of len 64, got len {}", k.len()),
ErrorKind::ParseDbField,
)
})?
} else {
ExpandedSecretKey::from(&network_key)
ed25519_expand_key(&network_key)
};
let key = Key::from_pair(None, network_key.to_bytes(), tor_key.to_bytes());
let key = Key::from_pair(None, network_key, tor_key);
let root_ca_key = PKey::private_key_from_pem(r.root_ca_key_pem.as_bytes())?;
let root_ca_cert = X509::from_pem(r.root_ca_cert_pem.as_bytes())?;

View File

@@ -11,6 +11,7 @@ use tracing::instrument;
use crate::config::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::prelude::*;
use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
@@ -59,13 +60,13 @@ impl Action {
#[instrument(skip_all)]
pub fn validate(
&self,
container: &Option<DockerContainers>,
_container: &Option<DockerContainers>,
eos_version: &Version,
volumes: &Volumes,
image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> {
self.implementation
.validate(container, eos_version, volumes, image_ids, true)
.validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| {
(
crate::ErrorKind::ValidateS9pk,
@@ -130,18 +131,17 @@ pub async fn action(
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<ActionResult, Error> {
let mut db = ctx.db.handle();
let manifest = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&pkg_id)
.and_then(|p| p.installed())
.expect(&mut db)
let manifest = ctx
.db
.peek()
.await
.with_kind(crate::ErrorKind::NotFound)?
.manifest()
.get(&mut db)
.await?
.to_owned();
.as_package_data()
.as_idx(&pkg_id)
.or_not_found(&pkg_id)?
.as_installed()
.or_not_found(&pkg_id)?
.as_manifest()
.de()?;
if let Some(action) = manifest.actions.0.get(&action_id) {
action

View File

@@ -5,7 +5,6 @@ use chrono::{DateTime, Utc};
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use josekit::jwk::Jwk;
use patch_db::{DbHandle, LockReceipt};
use rpc_toolkit::command;
use rpc_toolkit::command_helpers::prelude::{RequestParts, ResponseParts};
use rpc_toolkit::yajrc::RpcError;
@@ -17,6 +16,7 @@ use tracing::instrument;
use crate::context::{CliContext, RpcContext};
use crate::middleware::auth::{AsLogoutSessionId, HasLoggedOutSessions, HashSessionToken};
use crate::middleware::encrypt::EncryptedWire;
use crate::prelude::*;
use crate::util::display_none;
use crate::util::serde::{display_serializable, IoFormat};
use crate::{ensure_code, Error, ResultExt};
@@ -84,7 +84,7 @@ fn gen_pwd() {
argon2::hash_encoded(
b"testing1234",
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default()
&argon2::Config::rfc9106_low_mem()
)
.unwrap()
)
@@ -160,7 +160,7 @@ pub async fn login(
) -> Result<(), Error> {
let password = password.unwrap_or_default().decrypt(&ctx)?;
let mut handle = ctx.secret_store.acquire().await?;
check_password_against_db(&mut handle, &password).await?;
check_password_against_db(handle.as_mut(), &password).await?;
let hash_token = HashSessionToken::new();
let user_agent = req.headers.get("user-agent").and_then(|h| h.to_str().ok());
@@ -172,7 +172,7 @@ pub async fn login(
user_agent,
metadata,
)
.execute(&mut handle)
.execute(handle.as_mut())
.await?;
res.headers.insert(
"set-cookie",
@@ -263,7 +263,7 @@ pub async fn list(
sessions: sqlx::query!(
"SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
)
.fetch_all(&mut ctx.secret_store.acquire().await?)
.fetch_all(ctx.secret_store.acquire().await?.as_mut())
.await?
.into_iter()
.map(|row| {
@@ -343,27 +343,6 @@ async fn cli_reset_password(
Ok(())
}
pub struct SetPasswordReceipt(LockReceipt<String, ()>);
impl SetPasswordReceipt {
pub async fn new<Db: DbHandle>(db: &mut Db) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let password_hash = crate::db::DatabaseModel::new()
.server_info()
.password_hash()
.make_locker(patch_db::LockType::Write)
.add_to_keys(locks);
move |skeleton_key| Ok(Self(password_hash.verify(skeleton_key)?))
}
}
#[command(
rename = "reset-password",
custom_cli(cli_reset_password(async, context(CliContext))),
@@ -389,13 +368,14 @@ pub async fn reset_password(
}
account.set_password(&new_password)?;
account.save(&ctx.secret_store).await?;
crate::db::DatabaseModel::new()
.server_info()
.password_hash()
.put(&mut ctx.db.handle(), &account.password)
.await?;
Ok(())
let account_password = &account.password;
ctx.db
.mutate(|d| {
d.as_server_info_mut()
.as_password_hash_mut()
.ser(account_password)
})
.await
}
#[command(

View File

@@ -1,13 +1,17 @@
use std::collections::{BTreeMap, BTreeSet};
use std::path::PathBuf;
use std::collections::BTreeMap;
use std::panic::UnwindSafe;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use chrono::Utc;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use helpers::AtomicFile;
use patch_db::{DbHandle, LockType, PatchDbHandle};
use imbl::OrdSet;
use models::Version;
use rpc_toolkit::command;
use tokio::io::AsyncWriteExt;
use tokio::sync::Mutex;
use tracing::instrument;
use super::target::BackupTargetId;
@@ -17,25 +21,27 @@ use crate::backup::os::OsBackup;
use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext;
use crate::db::model::BackupProgress;
use crate::db::package::get_packages;
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard;
use crate::manager::BackupReturn;
use crate::notifications::NotificationLevel;
use crate::prelude::*;
use crate::s9pk::manifest::PackageId;
use crate::status::MainStatus;
use crate::util::display_none;
use crate::util::io::dir_copy;
use crate::util::serde::IoFormat;
use crate::version::VersionT;
use crate::{Error, ErrorKind, ResultExt};
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<BTreeSet<PackageId>, Error> {
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<OrdSet<PackageId>, Error> {
arg.split(',')
.map(|s| s.trim().parse().map_err(Error::from))
.map(|s| s.trim().parse::<PackageId>().map_err(Error::from))
.collect()
}
#[command(rename = "create", display(display_none))]
#[instrument(skip_all)]
#[instrument(skip(ctx, old_password, password))]
pub async fn backup_all(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId,
@@ -47,55 +53,52 @@ pub async fn backup_all(
long = "package-ids",
parse(parse_comma_separated)
)]
package_ids: Option<BTreeSet<PackageId>>,
package_ids: Option<OrdSet<PackageId>>,
#[arg] password: crate::auth::PasswordType,
) -> Result<(), Error> {
let mut db = ctx.db.handle();
let db = ctx.db.peek().await;
let old_password_decrypted = old_password
.as_ref()
.unwrap_or(&password)
.clone()
.decrypt(&ctx)?;
let password = password.decrypt(&ctx)?;
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?;
check_password_against_db(ctx.secret_store.acquire().await?.as_mut(), &password).await?;
let fs = target_id
.load(&mut ctx.secret_store.acquire().await?)
.load(ctx.secret_store.acquire().await?.as_mut())
.await?;
let mut backup_guard = BackupMountGuard::mount(
TmpMountGuard::mount(&fs, ReadWrite).await?,
&old_password_decrypted,
)
.await?;
let all_packages = crate::db::DatabaseModel::new()
.package_data()
.get(&mut db)
.await?
.0
.keys()
.into_iter()
.cloned()
.collect();
let package_ids = package_ids.unwrap_or(all_packages);
let package_ids = if let Some(ids) = package_ids {
ids.into_iter()
.flat_map(|package_id| {
let version = db
.as_package_data()
.as_idx(&package_id)?
.as_manifest()
.as_version()
.de()
.ok()?;
Some((package_id, version))
})
.collect()
} else {
get_packages(db.clone())?.into_iter().collect()
};
if old_password.is_some() {
backup_guard.change_password(&password)?;
}
assure_backing_up(&mut db, &package_ids).await?;
assure_backing_up(&ctx.db, &package_ids).await?;
tokio::task::spawn(async move {
let backup_res = perform_backup(&ctx, &mut db, backup_guard, &package_ids).await;
let backup_progress = crate::db::DatabaseModel::new()
.server_info()
.status_info()
.backup_progress();
backup_progress
.clone()
.lock(&mut db, LockType::Write)
.await
.expect("failed to lock server status");
let backup_res = perform_backup(&ctx, backup_guard, &package_ids).await;
match backup_res {
Ok(report) if report.iter().all(|(_, rep)| rep.error.is_none()) => ctx
.notification_manager
.notify(
&mut db,
ctx.db.clone(),
None,
NotificationLevel::Success,
"Backup Complete".to_owned(),
@@ -105,7 +108,10 @@ pub async fn backup_all(
attempted: true,
error: None,
},
packages: report,
packages: report
.into_iter()
.map(|((package_id, _), value)| (package_id, value))
.collect(),
},
None,
)
@@ -114,7 +120,7 @@ pub async fn backup_all(
Ok(report) => ctx
.notification_manager
.notify(
&mut db,
ctx.db.clone(),
None,
NotificationLevel::Warning,
"Backup Complete".to_owned(),
@@ -124,7 +130,10 @@ pub async fn backup_all(
attempted: true,
error: None,
},
packages: report,
packages: report
.into_iter()
.map(|((package_id, _), value)| (package_id, value))
.collect(),
},
None,
)
@@ -135,7 +144,7 @@ pub async fn backup_all(
tracing::debug!("{:?}", e);
ctx.notification_manager
.notify(
&mut db,
ctx.db.clone(),
None,
NotificationLevel::Error,
"Backup Failed".to_owned(),
@@ -153,196 +162,113 @@ pub async fn backup_all(
.expect("failed to send notification");
}
}
backup_progress
.delete(&mut db)
.await
.expect("failed to change server status");
ctx.db
.mutate(|v| {
v.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut()
.ser(&None)
})
.await?;
Ok::<(), Error>(())
});
Ok(())
}
#[instrument(skip_all)]
#[instrument(skip(db, packages))]
async fn assure_backing_up(
db: &mut PatchDbHandle,
packages: impl IntoIterator<Item = &PackageId>,
db: &PatchDb,
packages: impl IntoIterator<Item = &(PackageId, Version)> + UnwindSafe + Send,
) -> Result<(), Error> {
let mut tx = db.begin().await?;
let mut backing_up = crate::db::DatabaseModel::new()
.server_info()
.status_info()
.backup_progress()
.get_mut(&mut tx)
.await?;
if backing_up
.iter()
.flat_map(|x| x.values())
.fold(false, |acc, x| {
if !x.complete {
return true;
}
acc
})
{
return Err(Error::new(
eyre!("Server is already backing up!"),
crate::ErrorKind::InvalidRequest,
));
}
*backing_up = Some(
packages
.into_iter()
.map(|x| (x.clone(), BackupProgress { complete: false }))
.collect(),
);
backing_up.save(&mut tx).await?;
tx.commit().await?;
Ok(())
db.mutate(|v| {
let backing_up = v
.as_server_info_mut()
.as_status_info_mut()
.as_backup_progress_mut();
if backing_up
.clone()
.de()?
.iter()
.flat_map(|x| x.values())
.fold(false, |acc, x| {
if !x.complete {
return true;
}
acc
})
{
return Err(Error::new(
eyre!("Server is already backing up!"),
ErrorKind::InvalidRequest,
));
}
backing_up.ser(&Some(
packages
.into_iter()
.map(|(x, _)| (x.clone(), BackupProgress { complete: false }))
.collect(),
))?;
Ok(())
})
.await
}
#[instrument(skip_all)]
async fn perform_backup<Db: DbHandle>(
#[instrument(skip(ctx, backup_guard))]
async fn perform_backup(
ctx: &RpcContext,
mut db: Db,
mut backup_guard: BackupMountGuard<TmpMountGuard>,
package_ids: &BTreeSet<PackageId>,
) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> {
backup_guard: BackupMountGuard<TmpMountGuard>,
package_ids: &OrdSet<(PackageId, Version)>,
) -> Result<BTreeMap<(PackageId, Version), PackageBackupReport>, Error> {
let mut backup_report = BTreeMap::new();
for package_id in crate::db::DatabaseModel::new()
.package_data()
.keys(&mut db)
.await?
.into_iter()
.filter(|id| package_ids.contains(id))
{
let mut tx = db.begin().await?; // for lock scope
let installed_model = if let Some(installed_model) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&package_id)
.and_then(|m| m.installed())
.check(&mut tx)
.await?
{
installed_model
} else {
continue;
};
let main_status_model = installed_model.clone().status().main();
let backup_guard = Arc::new(Mutex::new(backup_guard));
main_status_model.lock(&mut tx, LockType::Write).await?;
let (started, health) = match main_status_model.get(&mut tx).await?.into_owned() {
MainStatus::Starting { .. } => (Some(Utc::now()), Default::default()),
MainStatus::Running { started, health } => (Some(started), health.clone()),
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restarting => {
(None, Default::default())
for package_id in package_ids {
let (response, _report) = match ctx
.managers
.get(package_id)
.await
.ok_or_else(|| Error::new(eyre!("Manager not found"), ErrorKind::InvalidRequest))?
.backup(backup_guard.clone())
.await
{
BackupReturn::Ran { report, res } => (res, report),
BackupReturn::AlreadyRunning(report) => {
backup_report.insert(package_id.clone(), report);
continue;
}
MainStatus::BackingUp { .. } => {
BackupReturn::Error(error) => {
tracing::warn!("Backup thread error");
tracing::debug!("{error:?}");
backup_report.insert(
package_id,
package_id.clone(),
PackageBackupReport {
error: Some(
"Can't do backup because service is in a backing up state".to_owned(),
),
error: Some("Backup thread error".to_owned()),
},
);
continue;
}
};
main_status_model
.put(
&mut tx,
&MainStatus::BackingUp {
started,
health: health.clone(),
},
)
.await?;
tx.save().await?; // drop locks
let manifest = installed_model.clone().manifest().get(&mut db).await?;
ctx.managers
.get(&(manifest.id.clone(), manifest.version.clone()))
.await
.ok_or_else(|| {
Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest)
})?
.synchronize()
.await;
let mut tx = db.begin().await?;
installed_model.lock(&mut tx, LockType::Write).await?;
let guard = backup_guard.mount_package_backup(&package_id).await?;
let res = manifest
.backup
.create(
ctx,
&mut tx,
&package_id,
&manifest.title,
&manifest.version,
&manifest.interfaces,
&manifest.volumes,
)
.await;
guard.unmount().await?;
backup_report.insert(
package_id.clone(),
PackageBackupReport {
error: res.as_ref().err().map(|e| e.to_string()),
error: response.as_ref().err().map(|e| e.to_string()),
},
);
if let Ok(pkg_meta) = res {
installed_model
.last_backup()
.put(&mut tx, &Some(pkg_meta.timestamp))
.await?;
if let Ok(pkg_meta) = response {
backup_guard
.lock()
.await
.metadata
.package_backups
.insert(package_id.clone(), pkg_meta);
.insert(package_id.0.clone(), pkg_meta);
}
main_status_model
.put(
&mut tx,
&match started {
Some(started) => MainStatus::Running { started, health },
None => MainStatus::Stopped,
},
)
.await?;
let mut backup_progress = crate::db::DatabaseModel::new()
.server_info()
.status_info()
.backup_progress()
.get_mut(&mut tx)
.await?;
if backup_progress.is_none() {
*backup_progress = Some(Default::default());
}
if let Some(mut backup_progress) = backup_progress
.as_mut()
.and_then(|bp| bp.get_mut(&package_id))
{
(*backup_progress).complete = true;
}
backup_progress.save(&mut tx).await?;
tx.save().await?;
}
let ui = crate::db::DatabaseModel::new()
.ui()
.get(&mut db)
.await?
.into_owned();
let ui = ctx.db.peek().await.into_ui().de()?;
let mut os_backup_file = AtomicFile::new(
backup_guard.as_ref().join("os-backup.cbor"),
backup_guard.lock().await.as_ref().join("os-backup.cbor"),
None::<PathBuf>,
)
.await
@@ -358,7 +284,28 @@ async fn perform_backup<Db: DbHandle>(
.await
.with_kind(ErrorKind::Filesystem)?;
let luks_folder_old = backup_guard.lock().await.as_ref().join("luks.old");
if tokio::fs::metadata(&luks_folder_old).await.is_ok() {
tokio::fs::remove_dir_all(&luks_folder_old).await?;
}
let luks_folder_bak = backup_guard.lock().await.as_ref().join("luks");
if tokio::fs::metadata(&luks_folder_bak).await.is_ok() {
tokio::fs::rename(&luks_folder_bak, &luks_folder_old).await?;
}
let luks_folder = Path::new("/media/embassy/config/luks");
if tokio::fs::metadata(&luks_folder).await.is_ok() {
dir_copy(&luks_folder, &luks_folder_bak, None).await?;
}
let timestamp = Some(Utc::now());
let mut backup_guard = Arc::try_unwrap(backup_guard)
.map_err(|_err| {
Error::new(
eyre!("Backup guard could not ensure that the others where dropped"),
ErrorKind::Unknown,
)
})?
.into_inner();
backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into();
backup_guard.unencrypted_metadata.full = true;
@@ -367,10 +314,9 @@ async fn perform_backup<Db: DbHandle>(
backup_guard.save_and_unmount().await?;
crate::db::DatabaseModel::new()
.server_info()
.last_backup()
.put(&mut db, &timestamp)
ctx.db
.mutate(|v| v.as_server_info_mut().as_last_backup_mut().ser(&timestamp))
.await?;
Ok(backup_report)
}

View File

@@ -1,11 +1,11 @@
use std::collections::{BTreeMap, BTreeSet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use chrono::{DateTime, Utc};
use color_eyre::eyre::eyre;
use helpers::AtomicFile;
use models::ImageId;
use patch_db::{DbHandle, HasModel};
use models::{ImageId, OptionExt};
use reqwest::Url;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
@@ -15,10 +15,11 @@ use tracing::instrument;
use self::target::PackageBackupInfo;
use crate::context::RpcContext;
use crate::dependencies::reconfigure_dependents_with_live_pointers;
use crate::install::PKG_ARCHIVE_DIR;
use crate::net::interface::{InterfaceId, Interfaces};
use crate::manager::manager_seed::ManagerSeed;
use crate::net::interface::InterfaceId;
use crate::net::keys::Key;
use crate::prelude::*;
use crate::procedure::docker::DockerContainers;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
@@ -47,7 +48,7 @@ pub struct ServerBackupReport {
#[derive(Debug, Deserialize, Serialize)]
pub struct PackageBackupReport {
error: Option<String>,
pub error: Option<String>,
}
#[command(subcommands(backup_bulk::backup_all, target::target))]
@@ -71,6 +72,7 @@ struct BackupMetadata {
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]
pub struct BackupActions {
pub create: PackageProcedure,
pub restore: PackageProcedure,
@@ -78,34 +80,29 @@ pub struct BackupActions {
impl BackupActions {
pub fn validate(
&self,
container: &Option<DockerContainers>,
_container: &Option<DockerContainers>,
eos_version: &Version,
volumes: &Volumes,
image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> {
self.create
.validate(container, eos_version, volumes, image_ids, false)
.validate(eos_version, volumes, image_ids, false)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Create"))?;
self.restore
.validate(container, eos_version, volumes, image_ids, false)
.validate(eos_version, volumes, image_ids, false)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Restore"))?;
Ok(())
}
#[instrument(skip_all)]
pub async fn create<Db: DbHandle>(
&self,
ctx: &RpcContext,
db: &mut Db,
pkg_id: &PackageId,
pkg_title: &str,
pkg_version: &Version,
interfaces: &Interfaces,
volumes: &Volumes,
) -> Result<PackageBackupInfo, Error> {
let mut volumes = volumes.to_readonly();
pub async fn create(&self, seed: Arc<ManagerSeed>) -> Result<PackageBackupInfo, Error> {
let manifest = &seed.manifest;
let mut volumes = seed.manifest.volumes.to_readonly();
let ctx = &seed.ctx;
let pkg_id = &manifest.id;
let pkg_version = &manifest.version;
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: false });
let backup_dir = backup_dir(pkg_id);
let backup_dir = backup_dir(&manifest.id);
if tokio::fs::metadata(&backup_dir).await.is_err() {
tokio::fs::create_dir_all(&backup_dir).await?
}
@@ -122,29 +119,29 @@ impl BackupActions {
.await?
.map_err(|e| eyre!("{}", e.1))
.with_kind(crate::ErrorKind::Backup)?;
let (network_keys, tor_keys) = Key::for_package(&ctx.secret_store, pkg_id)
.await?
.into_iter()
.filter_map(|k| {
let interface = k.interface().map(|(_, i)| i)?;
Some((
(interface.clone(), Base64(k.as_bytes())),
(interface, Base32(k.tor_key().as_bytes())),
))
})
.unzip();
let marketplace_url = crate::db::DatabaseModel::new()
.package_data()
.idx_model(pkg_id)
.expect(db)
.await?
.installed()
.expect(db)
.await?
.marketplace_url()
.get(db)
.await?
.into_owned();
let (network_keys, tor_keys): (Vec<_>, Vec<_>) =
Key::for_package(&ctx.secret_store, pkg_id)
.await?
.into_iter()
.filter_map(|k| {
let interface = k.interface().map(|(_, i)| i)?;
Some((
(interface.clone(), Base64(k.as_bytes())),
(interface, Base32(k.tor_key().as_bytes())),
))
})
.unzip();
let marketplace_url = ctx
.db
.peek()
.await
.as_package_data()
.as_idx(&pkg_id)
.or_not_found(pkg_id)?
.expect_as_installed()?
.as_installed()
.as_marketplace_url()
.de()?;
let tmp_path = Path::new(BACKUP_DIR)
.join(pkg_id)
.join(format!("{}.s9pk", pkg_id));
@@ -172,6 +169,8 @@ impl BackupActions {
let mut outfile = AtomicFile::new(&metadata_path, None::<PathBuf>)
.await
.with_kind(ErrorKind::Filesystem)?;
let network_keys = network_keys.into_iter().collect();
let tor_keys = tor_keys.into_iter().collect();
outfile
.write_all(&IoFormat::Cbor.to_vec(&BackupMetadata {
timestamp,
@@ -183,22 +182,20 @@ impl BackupActions {
outfile.save().await.with_kind(ErrorKind::Filesystem)?;
Ok(PackageBackupInfo {
os_version: Current::new().semver().into(),
title: pkg_title.to_owned(),
title: manifest.title.clone(),
version: pkg_version.clone(),
timestamp,
})
}
#[instrument(skip_all)]
pub async fn restore<Db: DbHandle>(
pub async fn restore(
&self,
ctx: &RpcContext,
db: &mut Db,
pkg_id: &PackageId,
pkg_version: &Version,
interfaces: &Interfaces,
volumes: &Volumes,
) -> Result<(), Error> {
) -> Result<Option<Url>, Error> {
let mut volumes = volumes.clone();
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: true });
self.restore
@@ -223,32 +220,7 @@ impl BackupActions {
)
})?,
)?;
let pde = crate::db::DatabaseModel::new()
.package_data()
.idx_model(pkg_id)
.expect(db)
.await?
.installed()
.expect(db)
.await?;
pde.marketplace_url()
.put(db, &metadata.marketplace_url)
.await?;
let entry = crate::db::DatabaseModel::new()
.package_data()
.idx_model(pkg_id)
.expect(db)
.await?
.installed()
.expect(db)
.await?
.get(db)
.await?;
let receipts = crate::config::ConfigReceipts::new(db).await?;
reconfigure_dependents_with_live_pointers(ctx, db, &receipts, &entry).await?;
Ok(())
Ok(metadata.marketplace_url)
}
}

View File

@@ -1,12 +1,13 @@
use openssl::pkey::PKey;
use openssl::x509::X509;
use patch_db::Value;
use serde::{Deserialize, Serialize};
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key;
use crate::prelude::*;
use crate::util::serde::Base64;
use crate::Error;
use openssl::pkey::PKey;
use openssl::x509::X509;
use serde::{Deserialize, Serialize};
use serde_json::Value;
pub struct OsBackup {
pub account: AccountInfo,
@@ -19,11 +20,11 @@ impl<'de> Deserialize<'de> for OsBackup {
{
let tagged = OsBackupSerDe::deserialize(deserializer)?;
match tagged.version {
0 => serde_json::from_value::<OsBackupV0>(tagged.rest)
0 => patch_db::value::from_value::<OsBackupV0>(tagged.rest)
.map_err(serde::de::Error::custom)?
.project()
.map_err(serde::de::Error::custom),
1 => serde_json::from_value::<OsBackupV1>(tagged.rest)
1 => patch_db::value::from_value::<OsBackupV1>(tagged.rest)
.map_err(serde::de::Error::custom)?
.project()
.map_err(serde::de::Error::custom),
@@ -40,7 +41,7 @@ impl Serialize for OsBackup {
{
OsBackupSerDe {
version: 1,
rest: serde_json::to_value(
rest: patch_db::value::to_value(
&OsBackupV1::unproject(self).map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,

View File

@@ -5,11 +5,9 @@ use std::sync::Arc;
use std::time::Duration;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use futures::{future::BoxFuture, stream};
use futures::{FutureExt, StreamExt};
use futures::future::BoxFuture;
use futures::{stream, FutureExt, StreamExt};
use openssl::x509::X509;
use patch_db::{DbHandle, PatchDbHandle};
use rpc_toolkit::command;
use sqlx::Connection;
use tokio::fs::File;
@@ -21,7 +19,7 @@ use crate::backup::os::OsBackup;
use crate::backup::BackupMetadata;
use crate::context::rpc::RpcContextConfig;
use crate::context::{RpcContext, SetupContext};
use crate::db::model::{PackageDataEntry, StaticFiles};
use crate::db::model::{PackageDataEntry, PackageDataEntryRestoring, StaticFiles};
use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard};
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard;
@@ -30,6 +28,7 @@ use crate::init::init;
use crate::install::progress::InstallProgress;
use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR};
use crate::notifications::NotificationLevel;
use crate::prelude::*;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::s9pk::reader::S9pkReader;
use crate::setup::SetupStatus;
@@ -37,7 +36,6 @@ use crate::util::display_none;
use crate::util::io::dir_size;
use crate::util::serde::IoFormat;
use crate::volume::{backup_dir, BACKUP_DIR, PKG_VOLUME_DIR};
use crate::{Error, ResultExt};
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<PackageId>, Error> {
arg.split(',')
@@ -46,33 +44,31 @@ fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<PackageId>, Er
}
#[command(rename = "restore", display(display_none))]
#[instrument(skip_all)]
#[instrument(skip(ctx, password))]
pub async fn restore_packages_rpc(
#[context] ctx: RpcContext,
#[arg(parse(parse_comma_separated))] ids: Vec<PackageId>,
#[arg(rename = "target-id")] target_id: BackupTargetId,
#[arg] password: String,
) -> Result<(), Error> {
let mut db = ctx.db.handle();
let fs = target_id
.load(&mut ctx.secret_store.acquire().await?)
.load(ctx.secret_store.acquire().await?.as_mut())
.await?;
let backup_guard =
BackupMountGuard::mount(TmpMountGuard::mount(&fs, ReadWrite).await?, &password).await?;
let (backup_guard, tasks, _) = restore_packages(&ctx, &mut db, backup_guard, ids).await?;
let (backup_guard, tasks, _) = restore_packages(&ctx, backup_guard, ids).await?;
tokio::spawn(async move {
stream::iter(tasks.into_iter().map(|x| (x, ctx.clone())))
.for_each_concurrent(5, |(res, ctx)| async move {
let mut db = ctx.db.handle();
match res.await {
(Ok(_), _) => (),
(Err(err), package_id) => {
if let Err(err) = ctx
.notification_manager
.notify(
&mut db,
ctx.db.clone(),
Some(package_id.clone()),
NotificationLevel::Error,
"Restoration Failure".to_string(),
@@ -109,7 +105,7 @@ async fn approximate_progress(
if tokio::fs::metadata(&dir).await.is_err() {
*size = 0;
} else {
*size = dir_size(&dir).await?;
*size = dir_size(&dir, None).await?;
}
}
Ok(())
@@ -169,7 +165,7 @@ impl ProgressInfo {
}
}
#[instrument(skip_all)]
#[instrument(skip(ctx))]
pub async fn recover_full_embassy(
ctx: SetupContext,
disk_guid: Arc<String>,
@@ -184,20 +180,18 @@ pub async fn recover_full_embassy(
.await?;
let os_backup_path = backup_guard.as_ref().join("os-backup.cbor");
let mut os_backup: OsBackup =
IoFormat::Cbor.from_slice(&tokio::fs::read(&os_backup_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
os_backup_path.display().to_string(),
)
})?)?;
let mut os_backup: OsBackup = IoFormat::Cbor.from_slice(
&tokio::fs::read(&os_backup_path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, os_backup_path.display().to_string()))?,
)?;
os_backup.account.password = argon2::hash_encoded(
embassy_password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
&argon2::Config::rfc9106_low_mem(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
.with_kind(ErrorKind::PasswordHashGeneration)?;
let secret_store = ctx.secret_store().await?;
@@ -211,27 +205,24 @@ pub async fn recover_full_embassy(
let rpc_ctx = RpcContext::init(ctx.config_path.clone(), disk_guid.clone()).await?;
let mut db = rpc_ctx.db.handle();
let ids = backup_guard
let ids: Vec<_> = backup_guard
.metadata
.package_backups
.keys()
.cloned()
.collect();
let (backup_guard, tasks, progress_info) =
restore_packages(&rpc_ctx, &mut db, backup_guard, ids).await?;
restore_packages(&rpc_ctx, backup_guard, ids).await?;
let task_consumer_rpc_ctx = rpc_ctx.clone();
tokio::select! {
_ = async move {
stream::iter(tasks.into_iter().map(|x| (x, task_consumer_rpc_ctx.clone())))
.for_each_concurrent(5, |(res, ctx)| async move {
let mut db = ctx.db.handle();
match res.await {
(Ok(_), _) => (),
(Err(err), package_id) => {
if let Err(err) = ctx.notification_manager.notify(
&mut db,
ctx.db.clone(),
Some(package_id.clone()),
NotificationLevel::Error,
"Restoration Failure".to_string(), format!("Error restoring package {}: {}", package_id,err), (), None).await{
@@ -261,9 +252,9 @@ pub async fn recover_full_embassy(
))
}
#[instrument(skip(ctx, backup_guard))]
async fn restore_packages(
ctx: &RpcContext,
db: &mut PatchDbHandle,
backup_guard: BackupMountGuard<TmpMountGuard>,
ids: Vec<PackageId>,
) -> Result<
@@ -274,7 +265,7 @@ async fn restore_packages(
),
Error,
> {
let guards = assure_restoring(ctx, db, ids, &backup_guard).await?;
let guards = assure_restoring(ctx, ids, &backup_guard).await?;
let mut progress_info = ProgressInfo::default();
@@ -282,10 +273,12 @@ async fn restore_packages(
for (manifest, guard) in guards {
let id = manifest.id.clone();
let (progress, task) = restore_package(ctx.clone(), manifest, guard).await?;
progress_info.package_installs.insert(id.clone(), progress);
progress_info
.package_installs
.insert(id.clone(), progress.clone());
progress_info
.src_volume_size
.insert(id.clone(), dir_size(backup_dir(&id)).await?);
.insert(id.clone(), dir_size(backup_dir(&id), None).await?);
progress_info.target_volume_size.insert(id.clone(), 0);
let package_id = id.clone();
tasks.push(
@@ -306,23 +299,20 @@ async fn restore_packages(
Ok((backup_guard, tasks, progress_info))
}
#[instrument(skip_all)]
#[instrument(skip(ctx, backup_guard))]
async fn assure_restoring(
ctx: &RpcContext,
db: &mut PatchDbHandle,
ids: Vec<PackageId>,
backup_guard: &BackupMountGuard<TmpMountGuard>,
) -> Result<Vec<(Manifest, PackageBackupMountGuard)>, Error> {
let mut tx = db.begin().await?;
let mut guards = Vec::with_capacity(ids.len());
let mut insert_packages = BTreeMap::new();
for id in ids {
let mut model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&id)
.get_mut(&mut tx)
.await?;
let peek = ctx.db.peek().await;
let model = peek.as_package_data().as_idx(&id);
if !model.is_none() {
return Err(Error::new(
@@ -330,14 +320,15 @@ async fn assure_restoring(
crate::ErrorKind::InvalidRequest,
));
}
let guard = backup_guard.mount_package_backup(&id).await?;
let s9pk_path = Path::new(BACKUP_DIR).join(&id).join(format!("{}.s9pk", id));
let mut rdr = S9pkReader::open(&s9pk_path, false).await?;
let manifest = rdr.manifest().await?;
let version = manifest.version.clone();
let progress = InstallProgress::new(Some(tokio::fs::metadata(&s9pk_path).await?.len()));
let progress = Arc::new(InstallProgress::new(Some(
tokio::fs::metadata(&s9pk_path).await?.len(),
)));
let public_dir_path = ctx
.datadir
@@ -361,22 +352,29 @@ async fn assure_restoring(
let mut dst = File::create(&icon_path).await?;
tokio::io::copy(&mut rdr.icon().await?, &mut dst).await?;
dst.sync_all().await?;
*model = Some(PackageDataEntry::Restoring {
install_progress: progress.clone(),
static_files: StaticFiles::local(&id, &version, manifest.assets.icon_type()),
manifest: manifest.clone(),
});
model.save(&mut tx).await?;
insert_packages.insert(
id.clone(),
PackageDataEntry::Restoring(PackageDataEntryRestoring {
install_progress: progress.clone(),
static_files: StaticFiles::local(&id, &version, manifest.assets.icon_type()),
manifest: manifest.clone(),
}),
);
guards.push((manifest, guard));
}
tx.commit().await?;
ctx.db
.mutate(|db| {
for (id, package) in insert_packages {
db.as_package_data_mut().insert(&id, &package)?;
}
Ok(())
})
.await?;
Ok(guards)
}
#[instrument(skip_all)]
#[instrument(skip(ctx, guard))]
async fn restore_package<'a>(
ctx: RpcContext,
manifest: Manifest,
@@ -388,13 +386,11 @@ async fn restore_package<'a>(
.join(format!("{}.s9pk", id));
let metadata_path = Path::new(BACKUP_DIR).join(&id).join("metadata.cbor");
let metadata: BackupMetadata =
IoFormat::Cbor.from_slice(&tokio::fs::read(&metadata_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
metadata_path.display().to_string(),
)
})?)?;
let metadata: BackupMetadata = IoFormat::Cbor.from_slice(
&tokio::fs::read(&metadata_path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, metadata_path.display().to_string()))?,
)?;
let mut secrets = ctx.secret_store.acquire().await?;
let mut secrets_tx = secrets.begin().await?;
@@ -402,48 +398,59 @@ async fn restore_package<'a>(
let k = key.0.as_slice();
sqlx::query!(
"INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
*id,
*iface,
id.to_string(),
iface.to_string(),
k,
)
.execute(&mut secrets_tx).await?;
.execute(secrets_tx.as_mut()).await?;
}
// DEPRECATED
for (iface, key) in metadata.tor_keys {
let k = key.0.as_slice();
sqlx::query!(
"INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
*id,
*iface,
id.to_string(),
iface.to_string(),
k,
)
.execute(&mut secrets_tx).await?;
.execute(secrets_tx.as_mut()).await?;
}
secrets_tx.commit().await?;
drop(secrets);
let len = tokio::fs::metadata(&s9pk_path)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
s9pk_path.display().to_string(),
)
})?
.with_ctx(|_| (ErrorKind::Filesystem, s9pk_path.display().to_string()))?
.len();
let file = File::open(&s9pk_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
s9pk_path.display().to_string(),
)
})?;
let file = File::open(&s9pk_path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, s9pk_path.display().to_string()))?;
let progress = InstallProgress::new(Some(len));
let marketplace_url = metadata.marketplace_url;
let progress = Arc::new(progress);
ctx.db
.mutate(|db| {
db.as_package_data_mut().insert(
&id,
&PackageDataEntry::Restoring(PackageDataEntryRestoring {
install_progress: progress.clone(),
static_files: StaticFiles::local(
&id,
&manifest.version,
manifest.assets.icon_type(),
),
manifest: manifest.clone(),
}),
)
})
.await?;
Ok((
progress.clone(),
async move {
download_install_s9pk(&ctx, &manifest, None, progress, file).await?;
download_install_s9pk(ctx, manifest, marketplace_url, progress, file, None).await?;
guard.unmount().await?;

View File

@@ -12,9 +12,9 @@ use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::util::{recovery_info, EmbassyOsRecoveryInfo};
use crate::prelude::*;
use crate::util::display_none;
use crate::util::serde::KeyVal;
use crate::Error;
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
@@ -84,7 +84,7 @@ pub async fn update(
} else {
return Err(Error::new(
eyre!("Backup Target ID {} Not Found", id),
crate::ErrorKind::NotFound,
ErrorKind::NotFound,
));
};
let cifs = Cifs {
@@ -112,7 +112,7 @@ pub async fn update(
{
return Err(Error::new(
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
crate::ErrorKind::NotFound,
ErrorKind::NotFound,
));
};
Ok(KeyVal {
@@ -134,7 +134,7 @@ pub async fn remove(#[context] ctx: RpcContext, #[arg] id: BackupTargetId) -> Re
} else {
return Err(Error::new(
eyre!("Backup Target ID {} Not Found", id),
crate::ErrorKind::NotFound,
ErrorKind::NotFound,
));
};
if sqlx::query!("DELETE FROM cifs_shares WHERE id = $1", id)
@@ -145,7 +145,7 @@ pub async fn remove(#[context] ctx: RpcContext, #[arg] id: BackupTargetId) -> Re
{
return Err(Error::new(
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
crate::ErrorKind::NotFound,
ErrorKind::NotFound,
));
};
Ok(())

View File

@@ -11,6 +11,7 @@ use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use sqlx::{Executor, Postgres};
use tokio::sync::Mutex;
use tracing::instrument;
use self::cifs::CifsBackupTarget;
@@ -21,10 +22,10 @@ use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::filesystem::{FileSystem, MountType, ReadWrite};
use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::util::PartitionInfo;
use crate::prelude::*;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::{deserialize_from_str, display_serializable, serialize_display};
use crate::util::Version;
use crate::Error;
use crate::util::{display_none, Version};
pub mod cifs;
@@ -42,7 +43,7 @@ pub enum BackupTarget {
Cifs(CifsBackupTarget),
}
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub enum BackupTargetId {
Disk { logicalname: PathBuf },
Cifs { id: i32 },
@@ -71,14 +72,14 @@ impl std::fmt::Display for BackupTargetId {
impl std::str::FromStr for BackupTargetId {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once("-") {
match s.split_once('-') {
Some(("disk", logicalname)) => Ok(BackupTargetId::Disk {
logicalname: Path::new(logicalname).to_owned(),
}),
Some(("cifs", id)) => Ok(BackupTargetId::Cifs { id: id.parse()? }),
_ => Err(Error::new(
eyre!("Invalid Backup Target ID"),
crate::ErrorKind::InvalidBackupTargetId,
ErrorKind::InvalidBackupTargetId,
)),
}
}
@@ -129,7 +130,7 @@ impl FileSystem for BackupTargetFS {
}
}
#[command(subcommands(cifs::cifs, list, info))]
#[command(subcommands(cifs::cifs, list, info, mount, umount))]
pub fn target() -> Result<(), Error> {
Ok(())
}
@@ -141,7 +142,7 @@ pub async fn list(
let mut sql_handle = ctx.secret_store.acquire().await?;
let (disks_res, cifs) = tokio::try_join!(
crate::disk::util::list(&ctx.os_partitions),
cifs::list(&mut sql_handle),
cifs::list(sql_handle.as_mut()),
)?;
Ok(disks_res
.into_iter()
@@ -212,7 +213,7 @@ fn display_backup_info(info: BackupInfo, matches: &ArgMatches) {
]);
for (id, info) in info.package_backups {
let row = row![
id.as_str(),
&*id,
info.version.as_str(),
info.os_version.as_str(),
&info.timestamp.to_string(),
@@ -223,7 +224,7 @@ fn display_backup_info(info: BackupInfo, matches: &ArgMatches) {
}
#[command(display(display_backup_info))]
#[instrument(skip_all)]
#[instrument(skip(ctx, password))]
pub async fn info(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId,
@@ -232,7 +233,7 @@ pub async fn info(
let guard = BackupMountGuard::mount(
TmpMountGuard::mount(
&target_id
.load(&mut ctx.secret_store.acquire().await?)
.load(ctx.secret_store.acquire().await?.as_mut())
.await?,
ReadWrite,
)
@@ -247,3 +248,60 @@ pub async fn info(
Ok(res)
}
lazy_static::lazy_static! {
static ref USER_MOUNTS: Mutex<BTreeMap<BackupTargetId, BackupMountGuard<TmpMountGuard>>> =
Mutex::new(BTreeMap::new());
}
#[command]
#[instrument(skip_all)]
pub async fn mount(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId,
#[arg] password: String,
) -> Result<String, Error> {
let mut mounts = USER_MOUNTS.lock().await;
if let Some(existing) = mounts.get(&target_id) {
return Ok(existing.as_ref().display().to_string());
}
let guard = BackupMountGuard::mount(
TmpMountGuard::mount(
&target_id
.clone()
.load(ctx.secret_store.acquire().await?.as_mut())
.await?,
ReadWrite,
)
.await?,
&password,
)
.await?;
let res = guard.as_ref().display().to_string();
mounts.insert(target_id, guard);
Ok(res)
}
#[command(display(display_none))]
#[instrument(skip_all)]
pub async fn umount(
#[context] _ctx: RpcContext,
#[arg(rename = "target-id")] target_id: Option<BackupTargetId>,
) -> Result<(), Error> {
let mut mounts = USER_MOUNTS.lock().await;
if let Some(target_id) = target_id {
if let Some(existing) = mounts.remove(&target_id) {
existing.unmount().await?;
}
} else {
for (_, existing) in std::mem::take(&mut *mounts) {
existing.unmount().await?;
}
}
Ok(())
}

View File

@@ -14,7 +14,7 @@ fn log_str_error(action: &str, e: i32) {
}
}
fn main() {
pub fn main() {
let aliases: Vec<_> = std::env::args().skip(1).collect();
unsafe {
let simple_poll = avahi_sys::avahi_simple_poll_new();

View File

@@ -0,0 +1,9 @@
pub fn renamed(old: &str, new: &str) -> ! {
eprintln!("{old} has been renamed to {new}");
std::process::exit(1)
}
pub fn removed(name: &str) -> ! {
eprintln!("{name} has been removed");
std::process::exit(1)
}

59
backend/src/bins/mod.rs Normal file
View File

@@ -0,0 +1,59 @@
use std::path::Path;
#[cfg(feature = "avahi-alias")]
pub mod avahi_alias;
pub mod deprecated;
#[cfg(feature = "cli")]
pub mod start_cli;
#[cfg(feature = "js_engine")]
pub mod start_deno;
#[cfg(feature = "daemon")]
pub mod start_init;
#[cfg(feature = "sdk")]
pub mod start_sdk;
#[cfg(feature = "daemon")]
pub mod startd;
fn select_executable(name: &str) -> Option<fn()> {
match name {
#[cfg(feature = "avahi-alias")]
"avahi-alias" => Some(avahi_alias::main),
#[cfg(feature = "js_engine")]
"start-deno" => Some(start_deno::main),
#[cfg(feature = "cli")]
"start-cli" => Some(start_cli::main),
#[cfg(feature = "sdk")]
"start-sdk" => Some(start_sdk::main),
#[cfg(feature = "daemon")]
"startd" => Some(startd::main),
"embassy-cli" => Some(|| deprecated::renamed("embassy-cli", "start-cli")),
"embassy-sdk" => Some(|| deprecated::renamed("embassy-sdk", "start-sdk")),
"embassyd" => Some(|| deprecated::renamed("embassyd", "startd")),
"embassy-init" => Some(|| deprecated::removed("embassy-init")),
_ => None,
}
}
pub fn startbox() {
let args = std::env::args().take(2).collect::<Vec<_>>();
if let Some(x) = args
.get(0)
.and_then(|s| Path::new(&*s).file_name())
.and_then(|s| s.to_str())
.and_then(|s| select_executable(&s))
{
x()
} else if let Some(x) = args.get(1).and_then(|s| select_executable(&s)) {
x()
} else {
eprintln!(
"unknown executable: {}",
args.get(0)
.filter(|x| &**x != "startbox")
.or_else(|| args.get(1))
.map(|s| s.as_str())
.unwrap_or("N/A")
);
std::process::exit(1);
}
}

View File

@@ -1,21 +1,22 @@
use clap::Arg;
use embassy::context::CliContext;
use embassy::util::logger::EmbassyLogger;
use embassy::version::{Current, VersionT};
use embassy::Error;
use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError;
use serde_json::Value;
use crate::context::CliContext;
use crate::util::logger::EmbassyLogger;
use crate::version::{Current, VersionT};
use crate::Error;
lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string();
}
fn inner_main() -> Result<(), Error> {
run_cli!({
command: embassy::main_api,
command: crate::main_api,
app: app => app
.name("Embassy CLI")
.name("StartOS CLI")
.version(&**VERSION_STRING)
.arg(
clap::Arg::with_name("config")
@@ -48,7 +49,7 @@ fn inner_main() -> Result<(), Error> {
Ok(())
}
fn main() {
pub fn main() {
match inner_main() {
Ok(_) => (),
Err(e) => {

View File

@@ -0,0 +1,134 @@
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, run_cli, Context};
use serde_json::Value;
use crate::procedure::js_scripts::ExecuteArgs;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::{display_serializable, parse_stdin_deserializable};
use crate::version::{Current, VersionT};
use crate::Error;
lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string();
}
struct DenoContext;
impl Context for DenoContext {}
#[command(subcommands(execute, sandbox))]
fn deno_api() -> Result<(), Error> {
Ok(())
}
#[command(cli_only, display(display_serializable))]
async fn execute(
#[arg(stdin, parse(parse_stdin_deserializable))] arg: ExecuteArgs,
) -> Result<Result<Value, (i32, String)>, Error> {
let ExecuteArgs {
procedure,
directory,
pkg_id,
pkg_version,
name,
volumes,
input,
} = arg;
PackageLogger::init(&pkg_id);
procedure
.execute_impl(&directory, &pkg_id, &pkg_version, name, &volumes, input)
.await
}
#[command(cli_only, display(display_serializable))]
async fn sandbox(
#[arg(stdin, parse(parse_stdin_deserializable))] arg: ExecuteArgs,
) -> Result<Result<Value, (i32, String)>, Error> {
let ExecuteArgs {
procedure,
directory,
pkg_id,
pkg_version,
name,
volumes,
input,
} = arg;
PackageLogger::init(&pkg_id);
procedure
.sandboxed_impl(&directory, &pkg_id, &pkg_version, &volumes, input, name)
.await
}
use tracing::Subscriber;
use tracing_subscriber::util::SubscriberInitExt;
#[derive(Clone)]
struct PackageLogger {}
impl PackageLogger {
fn base_subscriber(id: &PackageId) -> impl Subscriber {
use tracing_error::ErrorLayer;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter};
let filter_layer = EnvFilter::default().add_directive(
format!("{}=warn", std::module_path!().split("::").next().unwrap())
.parse()
.unwrap(),
);
let fmt_layer = fmt::layer().with_writer(std::io::stderr).with_target(true);
let journald_layer = tracing_journald::layer()
.unwrap()
.with_syslog_identifier(format!("{id}.embassy"));
let sub = tracing_subscriber::registry()
.with(filter_layer)
.with(fmt_layer)
.with(journald_layer)
.with(ErrorLayer::default());
sub
}
pub fn init(id: &PackageId) -> Self {
Self::base_subscriber(id).init();
color_eyre::install().unwrap_or_else(|_| tracing::warn!("tracing too many times"));
Self {}
}
}
fn inner_main() -> Result<(), Error> {
run_cli!({
command: deno_api,
app: app => app
.name("StartOS Deno Executor")
.version(&**VERSION_STRING),
context: _m => DenoContext,
exit: |e: RpcError| {
match e.data {
Some(Value::String(s)) => eprintln!("{}: {}", e.message, s),
Some(Value::Object(o)) => if let Some(Value::String(s)) = o.get("details") {
eprintln!("{}: {}", e.message, s);
if let Some(Value::String(s)) = o.get("debug") {
tracing::debug!("{}", s)
}
}
Some(a) => eprintln!("{}: {}", e.message, a),
None => eprintln!("{}", e.message),
}
std::process::exit(e.code);
}
});
Ok(())
}
pub fn main() {
match inner_main() {
Ok(_) => (),
Err(e) => {
eprintln!("{}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -1,39 +1,48 @@
use std::net::{Ipv6Addr, SocketAddr};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use embassy::context::rpc::RpcContextConfig;
use embassy::context::{DiagnosticContext, InstallContext, SetupContext};
use embassy::disk::fsck::RepairStrategy;
use embassy::disk::main::DEFAULT_PASSWORD;
use embassy::disk::REPAIR_DISK_PATH;
use embassy::init::STANDBY_MODE_PATH;
use embassy::net::web_server::WebServer;
use embassy::shutdown::Shutdown;
use embassy::sound::CHIME;
use embassy::util::logger::EmbassyLogger;
use embassy::util::Invoke;
use embassy::{Error, ErrorKind, ResultExt, OS_ARCH};
use tokio::process::Command;
use tracing::instrument;
use crate::context::rpc::RpcContextConfig;
use crate::context::{DiagnosticContext, InstallContext, SetupContext};
use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::REPAIR_DISK_PATH;
use crate::firmware::update_firmware;
use crate::init::STANDBY_MODE_PATH;
use crate::net::web_server::WebServer;
use crate::shutdown::Shutdown;
use crate::sound::CHIME;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, PLATFORM};
#[instrument(skip_all)]
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if update_firmware().await?.0 {
return Ok(Some(Shutdown {
export_args: None,
restart: true,
}));
}
Command::new("ln")
.arg("-sf")
.arg("/usr/lib/embassy/scripts/fake-apt")
.arg("/usr/lib/startos/scripts/fake-apt")
.arg("/usr/local/bin/apt")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
Command::new("ln")
.arg("-sf")
.arg("/usr/lib/embassy/scripts/fake-apt")
.arg("/usr/lib/startos/scripts/fake-apt")
.arg("/usr/local/bin/apt-get")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
Command::new("ln")
.arg("-sf")
.arg("/usr/lib/embassy/scripts/fake-apt")
.arg("/usr/lib/startos/scripts/fake-apt")
.arg("/usr/local/bin/aptitude")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
@@ -59,7 +68,11 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
let ctx = InstallContext::init(cfg_path).await?;
let server = WebServer::install(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let server = WebServer::install(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
@@ -73,7 +86,7 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
server.shutdown().await;
Command::new("reboot")
.invoke(embassy::ErrorKind::Unknown)
.invoke(crate::ErrorKind::Unknown)
.await?;
} else if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
@@ -81,7 +94,11 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
{
let ctx = SetupContext::init(cfg_path).await?;
let server = WebServer::setup(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let server = WebServer::setup(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
@@ -107,7 +124,7 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
let guid_string = tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?;
let guid = guid_string.trim();
let requires_reboot = embassy::disk::main::import(
let requires_reboot = crate::disk::main::import(
guid,
cfg.datadir(),
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
@@ -115,25 +132,29 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
} else {
RepairStrategy::Preen
},
DEFAULT_PASSWORD,
if guid.ends_with("_UNENC") {
None
} else {
Some(DEFAULT_PASSWORD)
},
)
.await?;
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
tokio::fs::remove_file(REPAIR_DISK_PATH)
.await
.with_ctx(|_| (embassy::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
.with_ctx(|_| (crate::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
}
if requires_reboot.0 {
embassy::disk::main::export(guid, cfg.datadir()).await?;
crate::disk::main::export(guid, cfg.datadir()).await?;
Command::new("reboot")
.invoke(embassy::ErrorKind::Unknown)
.invoke(crate::ErrorKind::Unknown)
.await?;
}
tracing::info!("Loaded Disk");
embassy::init::init(&cfg).await?;
crate::init::init(&cfg).await?;
}
Ok(())
Ok(None)
}
async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
@@ -156,53 +177,58 @@ async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
#[instrument(skip_all)]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if OS_ARCH == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
embassy::sound::SHUTDOWN.play().await?;
crate::sound::SHUTDOWN.play().await?;
futures::future::pending::<()>().await;
}
embassy::sound::BEP.play().await?;
crate::sound::BEP.play().await?;
run_script_if_exists("/media/embassy/config/preinit.sh").await;
let res = if let Err(e) = setup_or_init(cfg_path.clone()).await {
async move {
tracing::error!("{}", e.source);
tracing::debug!("{}", e.source);
embassy::sound::BEETHOVEN.play().await?;
let res = match setup_or_init(cfg_path.clone()).await {
Err(e) => {
async move {
tracing::error!("{}", e.source);
tracing::debug!("{}", e.source);
crate::sound::BEETHOVEN.play().await?;
let ctx = DiagnosticContext::init(
cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new(
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
))
} else {
None
},
e,
)
.await?;
let ctx = DiagnosticContext::init(
cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new(
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
))
} else {
None
},
e,
)
.await?;
let server = WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let server = WebServer::diagnostic(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();
server.shutdown().await;
server.shutdown().await;
Ok(shutdown)
Ok(shutdown)
}
.await
}
.await
} else {
Ok(None)
Ok(s) => Ok(s),
};
run_script_if_exists("/media/embassy/config/postinit.sh").await;
@@ -210,8 +236,8 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
res
}
fn main() {
let matches = clap::App::new("embassy-init")
pub fn main() {
let matches = clap::App::new("start-init")
.arg(
clap::Arg::with_name("config")
.short('c')
@@ -220,8 +246,6 @@ fn main() {
)
.get_matches();
EmbassyLogger::init();
let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()

View File

@@ -1,20 +1,21 @@
use embassy::context::SdkContext;
use embassy::util::logger::EmbassyLogger;
use embassy::version::{Current, VersionT};
use embassy::Error;
use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError;
use serde_json::Value;
use crate::context::SdkContext;
use crate::util::logger::EmbassyLogger;
use crate::version::{Current, VersionT};
use crate::Error;
lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string();
}
fn inner_main() -> Result<(), Error> {
run_cli!({
command: embassy::portable_api,
command: crate::portable_api,
app: app => app
.name("Embassy SDK")
.name("StartOS SDK")
.version(&**VERSION_STRING)
.arg(
clap::Arg::with_name("config")
@@ -47,7 +48,7 @@ fn inner_main() -> Result<(), Error> {
Ok(())
}
fn main() {
pub fn main() {
match inner_main() {
Ok(_) => (),
Err(e) => {

View File

@@ -1,20 +1,22 @@
use std::net::{Ipv6Addr, SocketAddr};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use color_eyre::eyre::eyre;
use embassy::context::{DiagnosticContext, RpcContext};
use embassy::net::web_server::WebServer;
use embassy::shutdown::Shutdown;
use embassy::system::launch_metrics_task;
use embassy::util::logger::EmbassyLogger;
use embassy::{Error, ErrorKind, ResultExt};
use futures::{FutureExt, TryFutureExt};
use tokio::signal::unix::signal;
use tracing::instrument;
use crate::context::{DiagnosticContext, RpcContext};
use crate::net::web_server::WebServer;
use crate::shutdown::Shutdown;
use crate::system::launch_metrics_task;
use crate::util::logger::EmbassyLogger;
use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip_all)]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, server, shutdown) = {
let (rpc_ctx, server, shutdown) = async {
let rpc_ctx = RpcContext::init(
cfg_path,
Arc::new(
@@ -25,8 +27,12 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
),
)
.await?;
embassy::hostname::sync_hostname(&rpc_ctx.account.read().await.hostname).await?;
let server = WebServer::main(([0, 0, 0, 0], 80).into(), rpc_ctx.clone()).await?;
crate::hostname::sync_hostname(&rpc_ctx.account.read().await.hostname).await?;
let server = WebServer::main(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
rpc_ctx.clone(),
)
.await?;
let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
@@ -66,7 +72,7 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
.await
});
embassy::sound::CHIME.play().await?;
crate::sound::CHIME.play().await?;
metrics_task
.map_err(|e| {
@@ -85,8 +91,9 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
sig_handler.abort();
(rpc_ctx, server, shutdown)
};
Ok::<_, Error>((rpc_ctx, server, shutdown))
}
.await?;
server.shutdown().await;
rpc_ctx.shutdown().await?;
@@ -95,8 +102,15 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
Ok(shutdown)
}
fn main() {
let matches = clap::App::new("embassyd")
pub fn main() {
EmbassyLogger::init();
if !Path::new("/run/embassy/initialized").exists() {
super::start_init::main();
std::fs::write("/run/embassy/initialized", "").unwrap();
}
let matches = clap::App::new("startd")
.arg(
clap::Arg::with_name("config")
.short('c')
@@ -105,8 +119,6 @@ fn main() {
)
.get_matches();
EmbassyLogger::init();
let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = {
@@ -121,7 +133,7 @@ fn main() {
async {
tracing::error!("{}", e.source);
tracing::debug!("{:?}", e.source);
embassy::sound::BEETHOVEN.play().await?;
crate::sound::BEETHOVEN.play().await?;
let ctx = DiagnosticContext::init(
cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid")
@@ -141,8 +153,11 @@ fn main() {
)
.await?;
let server =
WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let server = WebServer::diagnostic(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
let mut shutdown = ctx.shutdown.subscribe();

View File

@@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet};
use color_eyre::eyre::eyre;
use models::ImageId;
use nix::sys::signal::Signal;
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use tracing::instrument;
@@ -10,6 +9,7 @@ use tracing::instrument;
use super::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::dependencies::Dependencies;
use crate::prelude::*;
use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
@@ -18,7 +18,7 @@ use crate::util::Version;
use crate::volume::Volumes;
use crate::{Error, ResultExt};
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct ConfigRes {
pub config: Option<Config>,
@@ -26,6 +26,7 @@ pub struct ConfigRes {
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]
pub struct ConfigActions {
pub get: PackageProcedure,
pub set: PackageProcedure,
@@ -34,16 +35,16 @@ impl ConfigActions {
#[instrument(skip_all)]
pub fn validate(
&self,
container: &Option<DockerContainers>,
_container: &Option<DockerContainers>,
eos_version: &Version,
volumes: &Volumes,
image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> {
self.get
.validate(container, eos_version, volumes, image_ids, true)
.validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Get"))?;
self.set
.validate(container, eos_version, volumes, image_ids, true)
.validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Set"))?;
Ok(())
}
@@ -99,7 +100,6 @@ impl ConfigActions {
})
})?;
Ok(SetResult {
signal: res.signal,
depends_on: res
.depends_on
.into_iter()
@@ -112,9 +112,5 @@ impl ConfigActions {
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct SetResult {
#[serde(default)]
#[serde(deserialize_with = "crate::util::serde::deserialize_from_str_opt")]
#[serde(serialize_with = "crate::util::serde::serialize_display_opt")]
pub signal: Option<Signal>,
pub depends_on: BTreeMap<PackageId, BTreeSet<HealthCheckId>>,
}

View File

@@ -1,28 +1,21 @@
use std::collections::{BTreeMap, BTreeSet};
use std::collections::BTreeMap;
use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration;
use color_eyre::eyre::eyre;
use futures::future::{BoxFuture, FutureExt};
use indexmap::IndexSet;
use itertools::Itertools;
use patch_db::{DbHandle, LockReceipt, LockTarget, LockTargetId, LockType, Verifier};
use rand::SeedableRng;
use models::{ErrorKind, OptionExt};
use patch_db::value::InternedString;
use patch_db::Value;
use regex::Regex;
use rpc_toolkit::command;
use serde_json::Value;
use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents};
use crate::dependencies::{
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
BreakTransitiveReceipts, BreakageRes, Dependencies, DependencyConfig, DependencyError,
DependencyErrors, DependencyReceipt, TaggedDependencyError, TryHealReceipts,
};
use crate::install::cleanup::{remove_from_current_dependents_lists, UpdateDependencyReceipts};
use crate::procedure::docker::DockerContainers;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::prelude::*;
use crate::s9pk::manifest::PackageId;
use crate::util::display_none;
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
use crate::Error;
@@ -34,10 +27,10 @@ pub mod util;
pub use spec::{ConfigSpec, Defaultable};
use util::NumRange;
use self::action::{ConfigActions, ConfigRes};
use self::spec::{ConfigPointerReceipts, PackagePointerSpec, ValueSpecPointer};
use self::action::ConfigRes;
use self::spec::ValueSpecPointer;
pub type Config = serde_json::Map<String, Value>;
pub type Config = patch_db::value::InOMap<InternedString, Value>;
pub trait TypeOf {
fn type_of(&self) -> &'static str;
}
@@ -81,7 +74,7 @@ pub struct TimeoutError;
#[derive(Clone, Debug, thiserror::Error)]
pub struct NoMatchWithPath {
pub path: Vec<String>,
pub path: Vec<InternedString>,
pub error: MatchError,
}
impl NoMatchWithPath {
@@ -91,7 +84,7 @@ impl NoMatchWithPath {
error,
}
}
pub fn prepend(mut self, seg: String) -> Self {
pub fn prepend(mut self, seg: InternedString) -> Self {
self.path.push(seg);
self
}
@@ -110,9 +103,9 @@ impl From<NoMatchWithPath> for Error {
#[derive(Clone, Debug, thiserror::Error)]
pub enum MatchError {
#[error("String {0:?} Does Not Match Pattern {1}")]
Pattern(String, Regex),
Pattern(Arc<String>, Regex),
#[error("String {0:?} Is Not In Enum {1:?}")]
Enum(String, IndexSet<String>),
Enum(Arc<String>, IndexSet<String>),
#[error("Field Is Not Nullable")]
NotNullable,
#[error("Length Mismatch: expected {0}, actual: {1}")]
@@ -124,11 +117,11 @@ pub enum MatchError {
#[error("Number Is Not Integral: {0}")]
NonIntegral(f64),
#[error("Variant {0:?} Is Not In Union {1:?}")]
Union(String, IndexSet<String>),
Union(Arc<String>, IndexSet<String>),
#[error("Variant Is Missing Tag {0:?}")]
MissingTag(String),
MissingTag(InternedString),
#[error("Property {0:?} Of Variant {1:?} Conflicts With Union Tag")]
PropertyMatchesUnionTag(String, String),
PropertyMatchesUnionTag(InternedString, String),
#[error("Name of Property {0:?} Conflicts With Map Tag Name")]
PropertyNameMatchesMapTag(String),
#[error("Pointer Is Invalid: {0}")]
@@ -164,55 +157,6 @@ pub fn config(#[arg] id: PackageId) -> Result<PackageId, Error> {
Ok(id)
}
pub struct ConfigGetReceipts {
manifest_volumes: LockReceipt<crate::volume::Volumes, ()>,
manifest_version: LockReceipt<crate::util::Version, ()>,
manifest_config: LockReceipt<Option<ConfigActions>, ()>,
}
impl ConfigGetReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks, id);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<LockTargetId>,
id: &PackageId,
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
let manifest_version = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.manifest().version())
.make_locker(LockType::Write)
.add_to_keys(locks);
let manifest_volumes = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.manifest().volumes())
.make_locker(LockType::Write)
.add_to_keys(locks);
let manifest_config = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.manifest().config())
.make_locker(LockType::Write)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
manifest_volumes: manifest_volumes.verify(skeleton_key)?,
manifest_version: manifest_version.verify(skeleton_key)?,
manifest_config: manifest_config.verify(skeleton_key)?,
})
}
}
}
#[command(display(display_serializable))]
#[instrument(skip_all)]
pub async fn get(
@@ -222,16 +166,21 @@ pub async fn get(
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<ConfigRes, Error> {
let mut db = ctx.db.handle();
let receipts = ConfigGetReceipts::new(&mut db, &id).await?;
let action = receipts
.manifest_config
.get(&mut db)
.await?
let db = ctx.db.peek().await;
let manifest = db
.as_package_data()
.as_idx(&id)
.or_not_found(&id)?
.as_installed()
.or_not_found(&id)?
.as_manifest();
let action = manifest
.as_config()
.de()?
.ok_or_else(|| Error::new(eyre!("{} has no config", id), crate::ErrorKind::NotFound))?;
let volumes = receipts.manifest_volumes.get(&mut db).await?;
let version = receipts.manifest_version.get(&mut db).await?;
let volumes = manifest.as_volumes().de()?;
let version = manifest.as_version().de()?;
action.get(&ctx, &id, &version, &volumes).await
}
@@ -252,192 +201,33 @@ pub fn set(
Ok((id, config, timeout.map(|d| *d)))
}
/// So, the new locking finds all the possible locks and lifts them up into a bundle of locks.
/// Then this bundle will be passed down into the functions that will need to touch the db, and
/// instead of doing the locks down in the system, we have already done the locks and can
/// do the operation on the db.
/// An UnlockedLock has two types, the type of setting and getting from the db, and the second type
/// is the keys that we need to insert on getting/setting because we have included wild cards into the paths.
pub struct ConfigReceipts {
pub dependency_receipt: DependencyReceipt,
pub config_receipts: ConfigPointerReceipts,
pub update_dependency_receipts: UpdateDependencyReceipts,
pub try_heal_receipts: TryHealReceipts,
pub break_transitive_receipts: BreakTransitiveReceipts,
configured: LockReceipt<bool, String>,
config_actions: LockReceipt<ConfigActions, String>,
dependencies: LockReceipt<Dependencies, String>,
volumes: LockReceipt<crate::volume::Volumes, String>,
version: LockReceipt<crate::util::Version, String>,
manifest: LockReceipt<Manifest, String>,
system_pointers: LockReceipt<Vec<spec::SystemPointerSpec>, String>,
pub current_dependents: LockReceipt<CurrentDependents, String>,
pub current_dependencies: LockReceipt<CurrentDependencies, String>,
dependency_errors: LockReceipt<DependencyErrors, String>,
manifest_dependencies_config: LockReceipt<DependencyConfig, (String, String)>,
docker_containers: LockReceipt<DockerContainers, String>,
}
impl ConfigReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
let dependency_receipt = DependencyReceipt::setup(locks);
let config_receipts = ConfigPointerReceipts::setup(locks);
let update_dependency_receipts = UpdateDependencyReceipts::setup(locks);
let break_transitive_receipts = BreakTransitiveReceipts::setup(locks);
let try_heal_receipts = TryHealReceipts::setup(locks);
let configured: LockTarget<bool, String> = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.status().configured())
.make_locker(LockType::Write)
.add_to_keys(locks);
let config_actions = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.and_then(|x| x.manifest().config())
.make_locker(LockType::Read)
.add_to_keys(locks);
let dependencies = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().dependencies())
.make_locker(LockType::Read)
.add_to_keys(locks);
let volumes = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().volumes())
.make_locker(LockType::Read)
.add_to_keys(locks);
let version = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().version())
.make_locker(LockType::Read)
.add_to_keys(locks);
let manifest = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest())
.make_locker(LockType::Read)
.add_to_keys(locks);
let system_pointers = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.system_pointers())
.make_locker(LockType::Write)
.add_to_keys(locks);
let current_dependents = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.current_dependents())
.make_locker(LockType::Write)
.add_to_keys(locks);
let current_dependencies = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.current_dependencies())
.make_locker(LockType::Write)
.add_to_keys(locks);
let dependency_errors = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.status().dependency_errors())
.make_locker(LockType::Write)
.add_to_keys(locks);
let manifest_dependencies_config = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.and_then(|x| x.manifest().dependencies().star().config())
.make_locker(LockType::Write)
.add_to_keys(locks);
let docker_containers = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.and_then(|x| x.manifest().containers())
.make_locker(LockType::Write)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
dependency_receipt: dependency_receipt(skeleton_key)?,
config_receipts: config_receipts(skeleton_key)?,
try_heal_receipts: try_heal_receipts(skeleton_key)?,
break_transitive_receipts: break_transitive_receipts(skeleton_key)?,
update_dependency_receipts: update_dependency_receipts(skeleton_key)?,
configured: configured.verify(skeleton_key)?,
config_actions: config_actions.verify(skeleton_key)?,
dependencies: dependencies.verify(skeleton_key)?,
volumes: volumes.verify(skeleton_key)?,
version: version.verify(skeleton_key)?,
manifest: manifest.verify(skeleton_key)?,
system_pointers: system_pointers.verify(skeleton_key)?,
current_dependents: current_dependents.verify(skeleton_key)?,
current_dependencies: current_dependencies.verify(skeleton_key)?,
dependency_errors: dependency_errors.verify(skeleton_key)?,
manifest_dependencies_config: manifest_dependencies_config.verify(skeleton_key)?,
docker_containers: docker_containers.verify(skeleton_key)?,
})
}
}
}
#[command(rename = "dry", display(display_serializable))]
#[instrument(skip_all)]
pub async fn set_dry(
#[context] ctx: RpcContext,
#[parent_data] (id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
) -> Result<BreakageRes, Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let mut breakages = BTreeMap::new();
let locks = ConfigReceipts::new(&mut tx).await?;
configure(
&ctx,
&mut tx,
&id,
config,
&timeout,
true,
&mut BTreeMap::new(),
&mut breakages,
&locks,
)
.await?;
) -> Result<BTreeMap<PackageId, String>, Error> {
let breakages = BTreeMap::new();
let overrides = Default::default();
locks.configured.set(&mut tx, true, &id).await?;
tx.abort().await?;
Ok(BreakageRes(breakages))
let configure_context = ConfigureContext {
breakages,
timeout,
config,
dry_run: true,
overrides,
};
let breakages = configure(&ctx, &id, configure_context).await?;
Ok(breakages)
}
pub struct ConfigureContext {
pub breakages: BTreeMap<PackageId, String>,
pub timeout: Option<Duration>,
pub config: Option<Config>,
pub overrides: BTreeMap<PackageId, Config>,
pub dry_run: bool,
}
#[instrument(skip_all)]
@@ -445,393 +235,53 @@ pub async fn set_impl(
ctx: RpcContext,
(id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let mut breakages = BTreeMap::new();
let locks = ConfigReceipts::new(&mut tx).await?;
configure(
&ctx,
&mut tx,
&id,
let breakages = BTreeMap::new();
let overrides = Default::default();
let configure_context = ConfigureContext {
breakages,
timeout,
config,
&timeout,
false,
&mut BTreeMap::new(),
&mut breakages,
&locks,
)
.await?;
tx.commit().await?;
dry_run: false,
overrides,
};
configure(&ctx, &id, configure_context).await?;
Ok(())
}
#[instrument(skip_all)]
pub async fn configure<'a, Db: DbHandle>(
pub async fn configure(
ctx: &RpcContext,
db: &'a mut Db,
id: &PackageId,
config: Option<Config>,
timeout: &Option<Duration>,
dry_run: bool,
overrides: &mut BTreeMap<PackageId, Config>,
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
receipts: &ConfigReceipts,
) -> Result<(), Error> {
configure_rec(
ctx, db, id, config, timeout, dry_run, overrides, breakages, receipts,
)
.await?;
receipts.configured.set(db, true, &id).await?;
Ok(())
configure_context: ConfigureContext,
) -> Result<BTreeMap<PackageId, String>, Error> {
let db = ctx.db.peek().await;
let package = db
.as_package_data()
.as_idx(id)
.or_not_found(&id)?
.as_installed()
.or_not_found(&id)?;
let version = package.as_manifest().as_version().de()?;
ctx.managers
.get(&(id.clone(), version.clone()))
.await
.ok_or_else(|| {
Error::new(
eyre!("There is no manager running for {id:?} and {version:?}"),
ErrorKind::Unknown,
)
})?
.configure(configure_context)
.await
}
#[instrument(skip_all)]
pub fn configure_rec<'a, Db: DbHandle>(
ctx: &'a RpcContext,
db: &'a mut Db,
id: &'a PackageId,
config: Option<Config>,
timeout: &'a Option<Duration>,
dry_run: bool,
overrides: &'a mut BTreeMap<PackageId, Config>,
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
receipts: &'a ConfigReceipts,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
// fetch data from db
let action = receipts
.config_actions
.get(db, id)
.await?
.ok_or_else(not_found)?;
let dependencies = receipts
.dependencies
.get(db, id)
.await?
.ok_or_else(not_found)?;
let volumes = receipts.volumes.get(db, id).await?.ok_or_else(not_found)?;
let is_needs_config = !receipts
.configured
.get(db, id)
.await?
.ok_or_else(not_found)?;
let version = receipts.version.get(db, id).await?.ok_or_else(not_found)?;
// get current config and current spec
let ConfigRes {
config: old_config,
spec,
} = action.get(ctx, id, &version, &volumes).await?;
// determine new config to use
let mut config = if let Some(config) = config.or_else(|| old_config.clone()) {
config
} else {
spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)?
};
let manifest = receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
spec.validate(&manifest)?;
spec.matches(&config)?; // check that new config matches spec
spec.update(
ctx,
db,
&manifest,
&*overrides,
&mut config,
&receipts.config_receipts,
macro_rules! not_found {
($x:expr) => {
crate::Error::new(
color_eyre::eyre::eyre!("Could not find {} at {}:{}", $x, module_path!(), line!()),
crate::ErrorKind::Incoherent,
)
.await?; // dereference pointers in the new config
// create backreferences to pointers
let mut sys = receipts
.system_pointers
.get(db, &id)
.await?
.ok_or_else(not_found)?;
sys.truncate(0);
let mut current_dependencies: CurrentDependencies = CurrentDependencies(
dependencies
.0
.iter()
.filter_map(|(id, info)| {
if info.requirement.required() {
Some((id.clone(), CurrentDependencyInfo::default()))
} else {
None
}
})
.collect(),
);
for ptr in spec.pointers(&config)? {
match ptr {
ValueSpecPointer::Package(pkg_ptr) => {
if let Some(current_dependency) =
current_dependencies.0.get_mut(pkg_ptr.package_id())
{
current_dependency.pointers.push(pkg_ptr);
} else {
current_dependencies.0.insert(
pkg_ptr.package_id().to_owned(),
CurrentDependencyInfo {
pointers: vec![pkg_ptr],
health_checks: BTreeSet::new(),
},
);
}
}
ValueSpecPointer::System(s) => sys.push(s),
}
}
receipts.system_pointers.set(db, sys, &id).await?;
let signal = if !dry_run {
// run config action
let res = action
.set(ctx, id, &version, &dependencies, &volumes, &config)
.await?;
// track dependencies with no pointers
for (package_id, health_checks) in res.depends_on.into_iter() {
if let Some(current_dependency) = current_dependencies.0.get_mut(&package_id) {
current_dependency.health_checks.extend(health_checks);
} else {
current_dependencies.0.insert(
package_id,
CurrentDependencyInfo {
pointers: Vec::new(),
health_checks,
},
);
}
}
// track dependency health checks
current_dependencies = current_dependencies.map(|x| {
x.into_iter()
.filter(|(dep_id, _)| {
if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) {
tracing::warn!("Illegal dependency specified: {}", dep_id);
false
} else {
true
}
})
.collect()
});
res.signal
} else {
None
};
// update dependencies
let prev_current_dependencies = receipts
.current_dependencies
.get(db, &id)
.await?
.unwrap_or_default();
remove_from_current_dependents_lists(
db,
id,
&prev_current_dependencies,
&receipts.current_dependents,
)
.await?; // remove previous
add_dependent_to_current_dependents_lists(
db,
id,
&current_dependencies,
&receipts.current_dependents,
)
.await?; // add new
current_dependencies.0.remove(id);
receipts
.current_dependencies
.set(db, current_dependencies.clone(), &id)
.await?;
let errs = receipts
.dependency_errors
.get(db, &id)
.await?
.ok_or_else(not_found)?;
tracing::warn!("Dependency Errors: {:?}", errs);
let errs = DependencyErrors::init(
ctx,
db,
&manifest,
&current_dependencies,
&receipts.dependency_receipt.try_heal,
)
.await?;
receipts.dependency_errors.set(db, errs, &id).await?;
// cache current config for dependents
overrides.insert(id.clone(), config.clone());
// handle dependents
let dependents = receipts
.current_dependents
.get(db, id)
.await?
.ok_or_else(not_found)?;
let prev = if is_needs_config { None } else { old_config }
.map(Value::Object)
.unwrap_or_default();
let next = Value::Object(config.clone());
for (dependent, dep_info) in dependents.0.iter().filter(|(dep_id, _)| dep_id != &id) {
let dependent_container = receipts.docker_containers.get(db, &dependent).await?;
let dependent_container = &dependent_container;
// check if config passes dependent check
if let Some(cfg) = receipts
.manifest_dependencies_config
.get(db, (&dependent, &id))
.await?
{
let manifest = receipts
.manifest
.get(db, &dependent)
.await?
.ok_or_else(not_found)?;
if let Err(error) = cfg
.check(
ctx,
dependent_container,
dependent,
&manifest.version,
&manifest.volumes,
id,
&config,
)
.await?
{
let dep_err = DependencyError::ConfigUnsatisfied { error };
break_transitive(
db,
dependent,
id,
dep_err,
breakages,
&receipts.break_transitive_receipts,
)
.await?;
}
// handle backreferences
for ptr in &dep_info.pointers {
if let PackagePointerSpec::Config(cfg_ptr) = ptr {
if cfg_ptr.select(&next) != cfg_ptr.select(&prev) {
if let Err(e) = configure_rec(
ctx, db, dependent, None, timeout, dry_run, overrides, breakages,
receipts,
)
.await
{
if e.kind == crate::ErrorKind::ConfigRulesViolation {
break_transitive(
db,
dependent,
id,
DependencyError::ConfigUnsatisfied {
error: format!("{}", e),
},
breakages,
&receipts.break_transitive_receipts,
)
.await?;
} else {
return Err(e);
}
}
}
}
}
heal_all_dependents_transitive(ctx, db, id, &receipts.dependency_receipt).await?;
}
}
if let Some(signal) = signal {
match ctx.managers.get(&(id.clone(), version.clone())).await {
None => {
// in theory this should never happen, which indicates this function should be moved behind the
// Manager interface
return Err(Error::new(
eyre!("Manager Not Found for package being configured"),
crate::ErrorKind::Incoherent,
));
}
Some(m) => {
m.signal(&signal).await?;
}
}
}
Ok(())
}
.boxed()
}
#[instrument(skip_all)]
pub fn not_found() -> Error {
Error::new(eyre!("Could not find"), crate::ErrorKind::Incoherent)
}
/// We want to have a double check that the paths are what we expect them to be.
/// Found that earlier the paths where not what we expected them to be.
#[tokio::test]
async fn ensure_creation_of_config_paths_makes_sense() {
let mut fake = patch_db::test_utils::NoOpDb();
let config_locks = ConfigReceipts::new(&mut fake).await.unwrap();
assert_eq!(
&format!("{}", config_locks.configured.lock.glob),
"/package-data/*/installed/status/configured"
);
assert_eq!(
&format!("{}", config_locks.config_actions.lock.glob),
"/package-data/*/installed/manifest/config"
);
assert_eq!(
&format!("{}", config_locks.dependencies.lock.glob),
"/package-data/*/installed/manifest/dependencies"
);
assert_eq!(
&format!("{}", config_locks.volumes.lock.glob),
"/package-data/*/installed/manifest/volumes"
);
assert_eq!(
&format!("{}", config_locks.version.lock.glob),
"/package-data/*/installed/manifest/version"
);
assert_eq!(
&format!("{}", config_locks.volumes.lock.glob),
"/package-data/*/installed/manifest/volumes"
);
assert_eq!(
&format!("{}", config_locks.manifest.lock.glob),
"/package-data/*/installed/manifest"
);
assert_eq!(
&format!("{}", config_locks.manifest.lock.glob),
"/package-data/*/installed/manifest"
);
assert_eq!(
&format!("{}", config_locks.system_pointers.lock.glob),
"/package-data/*/installed/system-pointers"
);
assert_eq!(
&format!("{}", config_locks.current_dependents.lock.glob),
"/package-data/*/installed/current-dependents"
);
assert_eq!(
&format!("{}", config_locks.dependency_errors.lock.glob),
"/package-data/*/installed/status/dependency-errors"
);
assert_eq!(
&format!("{}", config_locks.manifest_dependencies_config.lock.glob),
"/package-data/*/installed/manifest/dependencies/*/config"
);
assert_eq!(
&format!("{}", config_locks.system_pointers.lock.glob),
"/package-data/*/installed/system-pointers"
);
};
}
pub(crate) use not_found;

View File

@@ -1,4 +1,4 @@
use std::borrow::{Borrow, Cow};
use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet};
use std::fmt;
use std::fmt::Debug;
@@ -9,15 +9,16 @@ use std::sync::Arc;
use std::time::Duration;
use async_trait::async_trait;
use imbl::Vector;
use imbl_value::InternedString;
use indexmap::{IndexMap, IndexSet};
use itertools::Itertools;
use jsonpath_lib::Compiled as CompiledJsonPath;
use patch_db::{DbHandle, LockReceipt, LockType};
use patch_db::value::{Number, Value};
use rand::{CryptoRng, Rng};
use regex::Regex;
use serde::de::{MapAccess, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::{Number, Value};
use sqlx::PgPool;
use super::util::{self, CharSet, NumRange, UniqueBy, STATIC_NULL};
@@ -26,8 +27,8 @@ use crate::config::ConfigurationError;
use crate::context::RpcContext;
use crate::net::interface::InterfaceId;
use crate::net::keys::Key;
use crate::prelude::*;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::Error;
// Config Value Specifications
#[async_trait]
@@ -39,14 +40,12 @@ pub trait ValueSpec {
// since not all inVariant can be checked by the type
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath>;
// update is to fill in values for environment pointers recursively
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError>;
// returns all pointers that are live in the provided config
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath>;
@@ -106,7 +105,7 @@ where
rng: &mut R,
timeout: &Option<Duration>,
) -> Result<Value, Self::Error> {
self.gen_with(self.default_spec().borrow(), rng, timeout)
self.gen_with(self.default_spec(), rng, timeout)
}
}
@@ -156,17 +155,15 @@ where
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.inner.validate(manifest)
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
self.inner
.update(ctx, db, manifest, config_overrides, value, receipts)
.update(ctx, manifest, config_overrides, value)
.await
}
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -201,17 +198,15 @@ where
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.inner.validate(manifest)
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
self.inner
.update(ctx, db, manifest, config_overrides, value, receipts)
.update(ctx, manifest, config_overrides, value)
.await
}
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -279,17 +274,15 @@ where
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.inner.validate(manifest)
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
self.inner
.update(ctx, db, manifest, config_overrides, value, receipts)
.update(ctx, manifest, config_overrides, value)
.await
}
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -394,48 +387,22 @@ impl ValueSpec for ValueSpecAny {
ValueSpecAny::Pointer(a) => a.validate(manifest),
}
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
match self {
ValueSpecAny::Boolean(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Enum(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::List(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Number(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Object(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::String(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Union(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Pointer(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Boolean(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecAny::Enum(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecAny::List(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecAny::Number(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecAny::Object(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecAny::String(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecAny::Union(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecAny::Pointer(a) => a.update(ctx, manifest, config_overrides, value).await,
}
}
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -513,14 +480,12 @@ impl ValueSpec for ValueSpecBoolean {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(())
}
async fn update<Db: DbHandle>(
async fn update(
&self,
_ctx: &RpcContext,
_db: &mut Db,
_manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>,
_value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
Ok(())
}
@@ -584,7 +549,7 @@ impl ValueSpec for ValueSpecEnum {
fn matches(&self, val: &Value) -> Result<(), NoMatchWithPath> {
match val {
Value::String(b) => {
if self.values.contains(b) {
if self.values.contains(&**b) {
Ok(())
} else {
Err(NoMatchWithPath::new(MatchError::Enum(
@@ -603,14 +568,12 @@ impl ValueSpec for ValueSpecEnum {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(())
}
async fn update<Db: DbHandle>(
async fn update(
&self,
_ctx: &RpcContext,
_db: &mut Db,
_manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>,
_value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
Ok(())
}
@@ -628,7 +591,7 @@ impl ValueSpec for ValueSpecEnum {
}
}
impl DefaultableWith for ValueSpecEnum {
type DefaultSpec = String;
type DefaultSpec = Arc<String>;
type Error = crate::util::Never;
fn gen_with<R: Rng + CryptoRng + Sync + Send + Send>(
@@ -666,13 +629,13 @@ where
.map(|(i, v)| {
self.spec
.matches(v)
.map_err(|e| e.prepend(format!("{}", i)))?;
.map_err(|e| e.prepend(InternedString::from_display(&i)))?;
if l.iter()
.enumerate()
.any(|(i2, v2)| i != i2 && self.spec.eq(v, v2))
{
Err(NoMatchWithPath::new(MatchError::ListUniquenessViolation)
.prepend(format!("{}", i)))
.prepend(InternedString::from_display(&i)))
} else {
Ok(())
}
@@ -690,25 +653,19 @@ where
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.spec.validate(manifest)
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
if let Value::Array(ref mut ls) = value {
for (i, val) in ls.into_iter().enumerate() {
match self
.spec
.update(ctx, db, manifest, config_overrides, val, receipts)
.await
{
Err(ConfigurationError::NoMatch(e)) => {
Err(ConfigurationError::NoMatch(e.prepend(format!("{}", i))))
}
for (i, val) in ls.iter_mut().enumerate() {
match self.spec.update(ctx, manifest, config_overrides, val).await {
Err(ConfigurationError::NoMatch(e)) => Err(ConfigurationError::NoMatch(
e.prepend(InternedString::from_display(&i)),
)),
a => a,
}?;
}
@@ -755,9 +712,9 @@ where
rng: &mut R,
timeout: &Option<Duration>,
) -> Result<Value, Self::Error> {
let mut res = Vec::new();
let mut res = Vector::new();
for spec_member in spec.iter() {
res.push(self.spec.gen_with(spec_member, rng, timeout)?);
res.push_back(self.spec.gen_with(spec_member, rng, timeout)?);
}
Ok(Value::Array(res))
}
@@ -798,36 +755,19 @@ impl ValueSpec for ValueSpecList {
ValueSpecList::Union(a) => a.validate(manifest),
}
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
match self {
ValueSpecList::Enum(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecList::Number(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecList::Object(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecList::String(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecList::Union(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecList::Enum(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecList::Number(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecList::Object(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecList::String(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecList::Union(a) => a.update(ctx, manifest, config_overrides, value).await,
}
}
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -885,7 +825,7 @@ impl Defaultable for ValueSpecList {
)
.contains(&ret.len())
{
ret.push(
ret.push_back(
a.inner
.inner
.spec
@@ -941,14 +881,12 @@ impl ValueSpec for ValueSpecNumber {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(())
}
async fn update<Db: DbHandle>(
async fn update(
&self,
_ctx: &RpcContext,
_db: &mut Db,
_manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>,
_value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
Ok(())
}
@@ -1005,19 +943,15 @@ impl ValueSpec for ValueSpecObject {
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.spec.validate(manifest)
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
if let Value::Object(o) = value {
self.spec
.update(ctx, db, manifest, config_overrides, o, receipts)
.await
self.spec.update(ctx, manifest, config_overrides, o).await
} else {
Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
MatchError::InvalidType("object", value.type_of()),
@@ -1074,11 +1008,11 @@ impl Defaultable for ValueSpecObject {
}
#[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ConfigSpec(pub IndexMap<String, ValueSpecAny>);
pub struct ConfigSpec(pub IndexMap<InternedString, ValueSpecAny>);
impl ConfigSpec {
pub fn matches(&self, value: &Config) -> Result<(), NoMatchWithPath> {
for (key, val) in self.0.iter() {
if let Some(v) = value.get(key) {
if let Some(v) = value.get(&**key) {
val.matches(v).map_err(|e| e.prepend(key.clone()))?;
} else {
val.matches(&Value::Null)
@@ -1108,27 +1042,21 @@ impl ConfigSpec {
Ok(())
}
pub async fn update<Db: DbHandle>(
pub async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
cfg: &mut Config,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
for (k, vs) in self.0.iter() {
match cfg.get_mut(k) {
None => {
let mut v = Value::Null;
vs.update(ctx, db, manifest, config_overrides, &mut v, receipts)
.await?;
vs.update(ctx, manifest, config_overrides, &mut v).await?;
cfg.insert(k.clone(), v);
}
Some(v) => match vs
.update(ctx, db, manifest, config_overrides, v, receipts)
.await
{
Some(v) => match vs.update(ctx, manifest, config_overrides, v).await {
Err(ConfigurationError::NoMatch(e)) => {
Err(ConfigurationError::NoMatch(e.prepend(k.clone())))
}
@@ -1247,7 +1175,7 @@ impl<'de> Deserialize<'de> for ValueSpecString {
})
}
}
const FIELDS: &'static [&'static str] = &[
const FIELDS: &[&str] = &[
"pattern",
"pattern-description",
"textarea",
@@ -1268,7 +1196,7 @@ impl ValueSpec for ValueSpecString {
Ok(())
} else {
Err(NoMatchWithPath::new(MatchError::Pattern(
s.to_owned(),
s.clone(),
pattern.pattern.clone(),
)))
}
@@ -1286,14 +1214,12 @@ impl ValueSpec for ValueSpecString {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(())
}
async fn update<Db: DbHandle>(
async fn update(
&self,
_ctx: &RpcContext,
_db: &mut Db,
_manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>,
_value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
Ok(())
}
@@ -1352,11 +1278,11 @@ pub enum DefaultString {
Entropy(Entropy),
}
impl DefaultString {
pub fn gen<R: Rng + CryptoRng + Sync + Send>(&self, rng: &mut R) -> String {
match self {
pub fn gen<R: Rng + CryptoRng + Sync + Send>(&self, rng: &mut R) -> Arc<String> {
Arc::new(match self {
DefaultString::Literal(s) => s.clone(),
DefaultString::Entropy(e) => e.gen(rng),
}
})
}
}
@@ -1380,7 +1306,7 @@ impl Entropy {
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct UnionTag {
pub id: String,
pub id: InternedString,
pub name: String,
pub description: Option<String>,
pub variant_names: BTreeMap<String, String>,
@@ -1401,7 +1327,7 @@ impl<'de> serde::de::Deserialize<'de> for ValueSpecUnion {
#[serde(rename_all = "kebab-case")]
#[serde(untagged)]
pub enum _UnionTag {
Old(String),
Old(InternedString),
New(UnionTag),
}
#[derive(Deserialize)]
@@ -1419,7 +1345,7 @@ impl<'de> serde::de::Deserialize<'de> for ValueSpecUnion {
tag: match u.tag {
_UnionTag::Old(id) => UnionTag {
id: id.clone(),
name: id,
name: id.to_string(),
description: None,
variant_names: u
.variants
@@ -1461,10 +1387,10 @@ impl ValueSpec for ValueSpecUnion {
fn matches(&self, value: &Value) -> Result<(), NoMatchWithPath> {
match value {
Value::Object(o) => {
if let Some(Value::String(ref tag)) = o.get(&self.tag.id) {
if let Some(obj_spec) = self.variants.get(tag) {
if let Some(Value::String(ref tag)) = o.get(&*self.tag.id) {
if let Some(obj_spec) = self.variants.get(&**tag) {
let mut without_tag = o.clone();
without_tag.remove(&self.tag.id);
without_tag.remove(&*self.tag.id);
obj_spec.matches(&without_tag)
} else {
Err(NoMatchWithPath::new(MatchError::Union(
@@ -1487,7 +1413,7 @@ impl ValueSpec for ValueSpecUnion {
}
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
for (name, variant) in &self.variants {
if variant.0.get(&self.tag.id).is_some() {
if variant.0.get(&*self.tag.id).is_some() {
return Err(NoMatchWithPath::new(MatchError::PropertyMatchesUnionTag(
self.tag.id.clone(),
name.clone(),
@@ -1497,28 +1423,23 @@ impl ValueSpec for ValueSpecUnion {
}
Ok(())
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
if let Value::Object(o) = value {
match o.get(&self.tag.id) {
match o.get(&*self.tag.id) {
None => Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
MatchError::MissingTag(self.tag.id.clone()),
))),
Some(Value::String(tag)) => match self.variants.get(tag) {
Some(Value::String(tag)) => match self.variants.get(&**tag) {
None => Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
MatchError::Union(tag.clone(), self.variants.keys().cloned().collect()),
))),
Some(spec) => {
spec.update(ctx, db, manifest, config_overrides, o, receipts)
.await
}
Some(spec) => spec.update(ctx, manifest, config_overrides, o).await,
},
Some(other) => Err(ConfigurationError::NoMatch(
NoMatchWithPath::new(MatchError::InvalidType("string", other.type_of()))
@@ -1533,11 +1454,11 @@ impl ValueSpec for ValueSpecUnion {
}
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
if let Value::Object(o) = value {
match o.get(&self.tag.id) {
match o.get(&*self.tag.id) {
None => Err(NoMatchWithPath::new(MatchError::MissingTag(
self.tag.id.clone(),
))),
Some(Value::String(tag)) => match self.variants.get(tag) {
Some(Value::String(tag)) => match self.variants.get(&**tag) {
None => Err(NoMatchWithPath::new(MatchError::Union(
tag.clone(),
self.variants.keys().cloned().collect(),
@@ -1559,8 +1480,8 @@ impl ValueSpec for ValueSpecUnion {
}
fn requires(&self, id: &PackageId, value: &Value) -> bool {
if let Value::Object(o) = value {
match o.get(&self.tag.id) {
Some(Value::String(tag)) => match self.variants.get(tag) {
match o.get(&*self.tag.id) {
Some(Value::String(tag)) => match self.variants.get(&**tag) {
None => false,
Some(spec) => spec.requires(id, o),
},
@@ -1578,7 +1499,7 @@ impl ValueSpec for ValueSpecUnion {
}
}
impl DefaultableWith for ValueSpecUnion {
type DefaultSpec = String;
type DefaultSpec = Arc<String>;
type Error = ConfigurationError;
fn gen_with<R: Rng + CryptoRng + Sync + Send>(
@@ -1587,7 +1508,7 @@ impl DefaultableWith for ValueSpecUnion {
rng: &mut R,
timeout: &Option<Duration>,
) -> Result<Value, Self::Error> {
let variant = if let Some(v) = self.variants.get(spec) {
let variant = if let Some(v) = self.variants.get(&**spec) {
v
} else {
return Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
@@ -1643,24 +1564,16 @@ impl ValueSpec for ValueSpecPointer {
ValueSpecPointer::System(a) => a.validate(manifest),
}
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
match self {
ValueSpecPointer::Package(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecPointer::System(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecPointer::Package(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecPointer::System(a) => a.update(ctx, manifest, config_overrides, value).await,
}
}
fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -1697,23 +1610,17 @@ impl PackagePointerSpec {
PackagePointerSpec::Config(ConfigPointer { package_id, .. }) => package_id,
}
}
async fn deref<Db: DbHandle>(
async fn deref(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
receipts: &ConfigPointerReceipts,
) -> Result<Value, ConfigurationError> {
match &self {
PackagePointerSpec::TorKey(key) => key.deref(&manifest.id, &ctx.secret_store).await,
PackagePointerSpec::TorAddress(tor) => {
tor.deref(db, &receipts.interface_addresses_receipt).await
}
PackagePointerSpec::LanAddress(lan) => {
lan.deref(db, &receipts.interface_addresses_receipt).await
}
PackagePointerSpec::Config(cfg) => cfg.deref(ctx, db, config_overrides, receipts).await,
PackagePointerSpec::TorAddress(tor) => tor.deref(ctx).await,
PackagePointerSpec::LanAddress(lan) => lan.deref(ctx).await,
PackagePointerSpec::Config(cfg) => cfg.deref(ctx, config_overrides).await,
}
}
}
@@ -1754,18 +1661,14 @@ impl ValueSpec for PackagePointerSpec {
_ => Ok(()),
}
}
async fn update<Db: DbHandle>(
async fn update(
&self,
ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
*value = self
.deref(ctx, db, manifest, config_overrides, receipts)
.await?;
*value = self.deref(ctx, manifest, config_overrides).await?;
Ok(())
}
fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -1788,18 +1691,19 @@ pub struct TorAddressPointer {
interface: InterfaceId,
}
impl TorAddressPointer {
async fn deref<Db: DbHandle>(
&self,
db: &mut Db,
receipt: &InterfaceAddressesReceipt,
) -> Result<Value, ConfigurationError> {
let addr = receipt
.interface_addresses
.get(db, (&self.package_id, &self.interface))
async fn deref(&self, ctx: &RpcContext) -> Result<Value, ConfigurationError> {
let addr = ctx
.db
.peek()
.await
.map_err(|e| ConfigurationError::SystemError(Error::from(e)))?
.and_then(|addresses| addresses.tor_address);
Ok(addr.to_owned().map(Value::String).unwrap_or(Value::Null))
.as_package_data()
.as_idx(&self.package_id)
.and_then(|pde| pde.as_installed())
.and_then(|i| i.as_interface_addresses().as_idx(&self.interface))
.and_then(|a| a.as_tor_address().de().transpose())
.transpose()
.map_err(|e| ConfigurationError::SystemError(e))?;
Ok(addr.map(Arc::new).map(Value::String).unwrap_or(Value::Null))
}
}
impl fmt::Display for TorAddressPointer {
@@ -1813,39 +1717,6 @@ impl fmt::Display for TorAddressPointer {
}
}
pub struct InterfaceAddressesReceipt {
interface_addresses: LockReceipt<crate::db::model::InterfaceAddresses, (String, String)>,
}
impl InterfaceAddressesReceipt {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
// let cleanup_receipts = CleanupFailedReceipts::setup(locks);
let interface_addresses = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.interface_addresses().star())
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
// cleanup_receipts: cleanup_receipts(skeleton_key)?,
interface_addresses: interface_addresses.verify(skeleton_key)?,
})
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct LanAddressPointer {
@@ -1862,73 +1733,26 @@ impl fmt::Display for LanAddressPointer {
}
}
impl LanAddressPointer {
async fn deref<Db: DbHandle>(
&self,
db: &mut Db,
receipts: &InterfaceAddressesReceipt,
) -> Result<Value, ConfigurationError> {
let addr = receipts
.interface_addresses
.get(db, (&self.package_id, &self.interface))
async fn deref(&self, ctx: &RpcContext) -> Result<Value, ConfigurationError> {
let addr = ctx
.db
.peek()
.await
.ok()
.flatten()
.and_then(|x| x.lan_address);
Ok(addr.to_owned().map(Value::String).unwrap_or(Value::Null))
.as_package_data()
.as_idx(&self.package_id)
.and_then(|pde| pde.as_installed())
.and_then(|i| i.as_interface_addresses().as_idx(&self.interface))
.and_then(|a| a.as_lan_address().de().transpose())
.transpose()
.map_err(|e| ConfigurationError::SystemError(e))?;
Ok(addr
.to_owned()
.map(Arc::new)
.map(Value::String)
.unwrap_or(Value::Null))
}
}
pub struct ConfigPointerReceipts {
interface_addresses_receipt: InterfaceAddressesReceipt,
manifest_volumes: LockReceipt<crate::volume::Volumes, String>,
manifest_version: LockReceipt<crate::util::Version, String>,
config_actions: LockReceipt<super::action::ConfigActions, String>,
}
impl ConfigPointerReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let interface_addresses_receipt = InterfaceAddressesReceipt::setup(locks);
let manifest_volumes = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().volumes())
.make_locker(LockType::Read)
.add_to_keys(locks);
let manifest_version = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().version())
.make_locker(LockType::Read)
.add_to_keys(locks);
let config_actions = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.and_then(|x| x.manifest().config())
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
interface_addresses_receipt: interface_addresses_receipt(skeleton_key)?,
manifest_volumes: manifest_volumes.verify(skeleton_key)?,
config_actions: config_actions.verify(skeleton_key)?,
manifest_version: manifest_version.verify(skeleton_key)?,
})
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct ConfigPointer {
@@ -1940,25 +1764,34 @@ impl ConfigPointer {
pub fn select(&self, val: &Value) -> Value {
self.selector.select(self.multi, val)
}
async fn deref<Db: DbHandle>(
async fn deref(
&self,
ctx: &RpcContext,
db: &mut Db,
config_overrides: &BTreeMap<PackageId, Config>,
receipts: &ConfigPointerReceipts,
) -> Result<Value, ConfigurationError> {
if let Some(cfg) = config_overrides.get(&self.package_id) {
Ok(self.select(&Value::Object(cfg.clone())))
} else {
let id = &self.package_id;
let version = receipts.manifest_version.get(db, id).await.ok().flatten();
let cfg_actions = receipts.config_actions.get(db, id).await.ok().flatten();
let volumes = receipts.manifest_volumes.get(db, id).await.ok().flatten();
if let (Some(version), Some(cfg_actions), Some(volumes)) =
(&version, &cfg_actions, &volumes)
{
let db = ctx.db.peek().await;
let manifest = db.as_package_data().as_idx(id).map(|pde| pde.as_manifest());
let cfg_actions = manifest.and_then(|m| m.as_config().transpose_ref());
if let (Some(manifest), Some(cfg_actions)) = (manifest, cfg_actions) {
let cfg_res = cfg_actions
.get(ctx, &self.package_id, version, volumes)
.de()
.map_err(|e| ConfigurationError::SystemError(e))?
.get(
ctx,
&self.package_id,
&manifest
.as_version()
.de()
.map_err(|e| ConfigurationError::SystemError(e))?,
&manifest
.as_volumes()
.de()
.map_err(|e| ConfigurationError::SystemError(e))?,
)
.await
.map_err(|e| ConfigurationError::SystemError(e))?;
if let Some(cfg) = cfg_res.config {
@@ -1990,7 +1823,7 @@ pub struct ConfigSelector {
}
impl ConfigSelector {
fn select(&self, multi: bool, val: &Value) -> Value {
let selected = self.compiled.select(&val).ok().unwrap_or_else(Vec::new);
let selected = self.compiled.select(&val).ok().unwrap_or_else(Vector::new);
if multi {
Value::Array(selected.into_iter().cloned().collect())
} else {
@@ -2061,18 +1894,19 @@ impl TorKeyPointer {
));
}
let key = Key::for_interface(
&mut secrets
secrets
.acquire()
.await
.map_err(|e| ConfigurationError::SystemError(e.into()))?,
.map_err(|e| ConfigurationError::SystemError(e.into()))?
.as_mut(),
Some((self.package_id.clone(), self.interface.clone())),
)
.await
.map_err(ConfigurationError::SystemError)?;
Ok(Value::String(base32::encode(
Ok(Value::String(Arc::new(base32::encode(
base32::Alphabet::RFC4648 { padding: false },
&key.tor_key().as_bytes(),
)))
))))
}
}
impl fmt::Display for TorKeyPointer {
@@ -2092,7 +1926,7 @@ impl fmt::Display for SystemPointerSpec {
}
}
impl SystemPointerSpec {
async fn deref<Db: DbHandle>(&self, _db: &mut Db) -> Result<Value, ConfigurationError> {
async fn deref(&self, _ctx: &RpcContext) -> Result<Value, ConfigurationError> {
#[allow(unreachable_code)]
Ok(match *self {})
}
@@ -2115,17 +1949,14 @@ impl ValueSpec for SystemPointerSpec {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(())
}
async fn update<Db: DbHandle>(
async fn update(
&self,
_ctx: &RpcContext,
db: &mut Db,
ctx: &RpcContext,
_manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> {
*value = self.deref(db).await?;
*value = self.deref(ctx).await?;
Ok(())
}
fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {

View File

@@ -1,9 +1,9 @@
use std::borrow::Cow;
use std::ops::{Bound, RangeBounds, RangeInclusive};
use patch_db::Value;
use rand::distributions::Distribution;
use rand::Rng;
use serde_json::Value;
use super::Config;
@@ -321,7 +321,7 @@ impl UniqueBy {
match self {
UniqueBy::Any(any) => any.iter().any(|u| u.eq(lhs, rhs)),
UniqueBy::All(all) => all.iter().all(|u| u.eq(lhs, rhs)),
UniqueBy::Exactly(key) => lhs.get(key) == rhs.get(key),
UniqueBy::Exactly(key) => lhs.get(&**key) == rhs.get(&**key),
UniqueBy::NotUnique => false,
}
}

View File

@@ -6,8 +6,7 @@ use std::sync::Arc;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use cookie::Cookie;
use cookie_store::CookieStore;
use cookie_store::{CookieStore, RawCookie};
use josekit::jwk::Jwk;
use reqwest::Proxy;
use reqwest_cookie_store::CookieStoreMutex;
@@ -17,12 +16,11 @@ use rpc_toolkit::Context;
use serde::Deserialize;
use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::util::config::{load_config_from_paths, local_config_path};
use crate::ResultExt;
use super::setup::CURRENT_SECRET;
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct CliContextConfig {
@@ -112,7 +110,10 @@ impl CliContext {
};
if let Ok(local) = std::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH) {
store
.insert_raw(&Cookie::new("local", local), &"http://localhost".parse()?)
.insert_raw(
&RawCookie::new("local", local),
&"http://localhost".parse()?,
)
.with_kind(crate::ErrorKind::Network)?;
}
store

View File

@@ -4,40 +4,42 @@ use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use bollard::Docker;
use helpers::to_tmp_path;
use josekit::jwk::Jwk;
use patch_db::json_ptr::JsonPointer;
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb};
use reqwest::Url;
use patch_db::PatchDb;
use reqwest::{Client, Proxy, Url};
use rpc_toolkit::Context;
use serde::Deserialize;
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tokio::time::Instant;
use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::account::AccountInfo;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::db::model::{CurrentDependents, Database, PackageDataEntryMatchModelRef};
use crate::db::prelude::PatchDbExt;
use crate::dependencies::compute_dependency_config_errs;
use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader};
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
use crate::init::init_postgres;
use crate::install::cleanup::{cleanup_failed, uninstall};
use crate::manager::ManagerMap;
use crate::middleware::auth::HashSessionToken;
use crate::net::net_controller::NetController;
use crate::net::ssl::SslManager;
use crate::net::ssl::{root_ca_start_time, SslManager};
use crate::net::wifi::WpaCli;
use crate::notifications::NotificationManager;
use crate::shutdown::Shutdown;
use crate::status::{MainStatus, Status};
use crate::status::MainStatus;
use crate::system::get_mem_info;
use crate::util::config::load_config_from_paths;
use crate::util::lshw::{lshw, LshwDevice};
use crate::{Error, ErrorKind, ResultExt};
use super::setup::CURRENT_SECRET;
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct RpcContextConfig {
@@ -96,15 +98,6 @@ impl RpcContextConfig {
.run(&secret_store)
.await
.with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir().join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows.unwrap_or(25000),
self.migration_prefetch_rows.unwrap_or(100_000),
)
.await?;
}
Ok(secret_store)
}
}
@@ -119,7 +112,6 @@ pub struct RpcContextSeed {
pub db: PatchDb,
pub secret_store: PgPool,
pub account: RwLock<AccountInfo>,
pub docker: Docker,
pub net_controller: Arc<NetController>,
pub managers: ManagerMap,
pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
@@ -130,51 +122,21 @@ pub struct RpcContextSeed {
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
pub wifi_manager: Option<Arc<RwLock<WpaCli>>>,
pub current_secret: Arc<Jwk>,
pub client: Client,
pub hardware: Hardware,
pub start_time: Instant,
}
pub struct RpcCleanReceipts {
cleanup_receipts: CleanupFailedReceipts,
packages: LockReceipt<crate::db::model::AllPackageData, ()>,
package: LockReceipt<crate::db::model::PackageDataEntry, String>,
}
impl RpcCleanReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let cleanup_receipts = CleanupFailedReceipts::setup(locks);
let packages = crate::db::DatabaseModel::new()
.package_data()
.make_locker(LockType::Write)
.add_to_keys(locks);
let package = crate::db::DatabaseModel::new()
.package_data()
.star()
.make_locker(LockType::Write)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
cleanup_receipts: cleanup_receipts(skeleton_key)?,
packages: packages.verify(skeleton_key)?,
package: package.verify(skeleton_key)?,
})
}
}
pub struct Hardware {
pub devices: Vec<LshwDevice>,
pub ram: u64,
}
#[derive(Clone)]
pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext {
#[instrument(skip_all)]
pub async fn init<P: AsRef<Path> + Send + 'static>(
pub async fn init<P: AsRef<Path> + Send + Sync + 'static>(
cfg_path: Option<P>,
disk_guid: Arc<String>,
) -> Result<Self, Error> {
@@ -190,18 +152,15 @@ impl RpcContext {
let account = AccountInfo::load(&secret_store).await?;
let db = base.db(&account).await?;
tracing::info!("Opened PatchDB");
let mut docker = Docker::connect_with_unix_defaults()?;
docker.set_timeout(Duration::from_secs(600));
tracing::info!("Connected to Docker");
let net_controller = Arc::new(
NetController::init(
base.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
base.dns_bind
.as_ref()
.map(|v| v.as_slice())
.as_deref()
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
SslManager::new(&account)?,
SslManager::new(&account, root_ca_start_time().await?)?,
&account.hostname,
&account.key,
)
@@ -209,9 +168,12 @@ impl RpcContext {
);
tracing::info!("Initialized Net Controller");
let managers = ManagerMap::default();
let metrics_cache = RwLock::new(None);
let metrics_cache = RwLock::<Option<crate::system::Metrics>>::new(None);
let notification_manager = NotificationManager::new(secret_store.clone());
tracing::info!("Initialized Notification Manager");
let tor_proxy_url = format!("socks5h://{tor_proxy}");
let devices = lshw().await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
let seed = Arc::new(RpcContextSeed {
is_closed: AtomicBool::new(false),
datadir: base.datadir().to_path_buf(),
@@ -222,7 +184,6 @@ impl RpcContext {
db,
secret_store,
account: RwLock::new(account),
docker,
net_controller,
managers,
metrics_cache,
@@ -244,19 +205,23 @@ impl RpcContext {
)
})?,
),
client: Client::builder()
.proxy(Proxy::custom(move |url| {
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {
Some(tor_proxy_url.clone())
} else {
None
}
}))
.build()
.with_kind(crate::ErrorKind::ParseUrl)?,
hardware: Hardware { devices, ram },
start_time: Instant::now(),
});
let res = Self(seed);
res.cleanup().await?;
let res = Self(seed.clone());
res.cleanup_and_initialize().await?;
tracing::info!("Cleaned up transient states");
res.managers
.init(
&res,
&mut res.db.handle(),
&mut res.secret_store.acquire().await?,
)
.await?;
tracing::info!("Initialized Package Managers");
Ok(res)
}
@@ -270,81 +235,150 @@ impl RpcContext {
Ok(())
}
#[instrument(skip_all)]
pub async fn cleanup(&self) -> Result<(), Error> {
let mut db = self.db.handle();
let receipts = RpcCleanReceipts::new(&mut db).await?;
for (package_id, package) in receipts.packages.get(&mut db).await?.0 {
if let Err(e) = async {
match package {
PackageDataEntry::Installing { .. }
| PackageDataEntry::Restoring { .. }
| PackageDataEntry::Updating { .. } => {
cleanup_failed(self, &mut db, &package_id, &receipts.cleanup_receipts)
.await?;
}
PackageDataEntry::Removing { .. } => {
uninstall(
self,
&mut db,
&mut self.secret_store.acquire().await?,
&package_id,
)
.await?;
}
PackageDataEntry::Installed {
installed,
static_files,
manifest,
} => {
for (volume_id, volume_info) in &*manifest.volumes {
let tmp_path = to_tmp_path(volume_info.path_for(
&self.datadir,
&package_id,
&manifest.version,
&volume_id,
))
.with_kind(ErrorKind::Filesystem)?;
if tokio::fs::metadata(&tmp_path).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_path).await?;
}
}
let status = installed.status;
let main = match status.main {
MainStatus::BackingUp { started, .. } => {
if let Some(_) = started {
MainStatus::Starting { restarting: false }
} else {
MainStatus::Stopped
}
}
MainStatus::Running { .. } => {
MainStatus::Starting { restarting: false }
}
a => a.clone(),
};
let new_package = PackageDataEntry::Installed {
installed: InstalledPackageDataEntry {
status: Status { main, ..status },
..installed
},
static_files,
manifest,
};
receipts
.package
.set(&mut db, new_package, &package_id)
.await?;
#[instrument(skip(self))]
pub async fn cleanup_and_initialize(&self) -> Result<(), Error> {
self.db
.mutate(|f| {
let mut current_dependents = f
.as_package_data()
.keys()?
.into_iter()
.map(|k| (k.clone(), BTreeMap::new()))
.collect::<BTreeMap<_, _>>();
for (package_id, package) in f.as_package_data_mut().as_entries_mut()? {
for (k, v) in package
.as_installed_mut()
.into_iter()
.flat_map(|i| i.clone().into_current_dependencies().into_entries())
.flatten()
{
let mut entry: BTreeMap<_, _> =
current_dependents.remove(&k).unwrap_or_default();
entry.insert(package_id.clone(), v.de()?);
current_dependents.insert(k, entry);
}
}
Ok::<_, Error>(())
}
.await
{
for (package_id, current_dependents) in current_dependents {
if let Some(deps) = f
.as_package_data_mut()
.as_idx_mut(&package_id)
.and_then(|pde| pde.expect_as_installed_mut().ok())
.map(|i| i.as_installed_mut().as_current_dependents_mut())
{
deps.ser(&CurrentDependents(current_dependents))?;
} else if let Some(deps) = f
.as_package_data_mut()
.as_idx_mut(&package_id)
.and_then(|pde| pde.expect_as_removing_mut().ok())
.map(|i| i.as_removing_mut().as_current_dependents_mut())
{
deps.ser(&CurrentDependents(current_dependents))?;
}
}
Ok(())
})
.await?;
let peek = self.db.peek().await;
for (package_id, package) in peek.as_package_data().as_entries()?.into_iter() {
let action = match package.as_match() {
PackageDataEntryMatchModelRef::Installing(_)
| PackageDataEntryMatchModelRef::Restoring(_)
| PackageDataEntryMatchModelRef::Updating(_) => {
cleanup_failed(self, &package_id).await
}
PackageDataEntryMatchModelRef::Removing(_) => {
uninstall(
self,
self.secret_store.acquire().await?.as_mut(),
&package_id,
)
.await
}
PackageDataEntryMatchModelRef::Installed(m) => {
let version = m.as_manifest().as_version().clone().de()?;
let volumes = m.as_manifest().as_volumes().de()?;
for (volume_id, volume_info) in &*volumes {
let tmp_path = to_tmp_path(volume_info.path_for(
&self.datadir,
&package_id,
&version,
volume_id,
))
.with_kind(ErrorKind::Filesystem)?;
if tokio::fs::metadata(&tmp_path).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_path).await?;
}
}
Ok(())
}
_ => continue,
};
if let Err(e) = action {
tracing::error!("Failed to clean up package {}: {}", package_id, e);
tracing::debug!("{:?}", e);
}
}
let peek = self
.db
.mutate(|v| {
for (_, pde) in v.as_package_data_mut().as_entries_mut()? {
let status = pde
.expect_as_installed_mut()?
.as_installed_mut()
.as_status_mut()
.as_main_mut();
let running = status.clone().de()?.running();
status.ser(&if running {
MainStatus::Starting
} else {
MainStatus::Stopped
})?;
}
Ok(v.clone())
})
.await?;
self.managers.init(self.clone(), peek.clone()).await?;
tracing::info!("Initialized Package Managers");
let mut all_dependency_config_errs = BTreeMap::new();
for (package_id, package) in peek.as_package_data().as_entries()?.into_iter() {
let package = package.clone();
if let Some(current_dependencies) = package
.as_installed()
.and_then(|x| x.as_current_dependencies().de().ok())
{
let manifest = package.as_manifest().de()?;
all_dependency_config_errs.insert(
package_id.clone(),
compute_dependency_config_errs(
self,
&peek,
&manifest,
&current_dependencies,
&Default::default(),
)
.await?,
);
}
}
self.db
.mutate(|v| {
for (package_id, errs) in all_dependency_config_errs {
if let Some(config_errors) = v
.as_package_data_mut()
.as_idx_mut(&package_id)
.and_then(|pde| pde.as_installed_mut())
.map(|i| i.as_status_mut().as_dependency_config_errors_mut())
{
config_errors.ser(&errs)?;
}
}
Ok(())
})
.await?;
Ok(())
}
@@ -402,7 +436,7 @@ impl RpcContext {
}
impl AsRef<Jwk> for RpcContext {
fn as_ref(&self) -> &Jwk {
&*CURRENT_SECRET
&CURRENT_SECRET
}
}
impl Context for RpcContext {}
@@ -416,7 +450,7 @@ impl Deref for RpcContext {
tracing_error::SpanTrace::capture()
);
}
&*self.0
&self.0
}
}
impl Drop for RpcContext {

View File

@@ -7,8 +7,8 @@ use rpc_toolkit::Context;
use serde::Deserialize;
use tracing::instrument;
use crate::prelude::*;
use crate::util::config::{load_config_from_paths, local_config_path};
use crate::{Error, ResultExt};
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
@@ -50,21 +50,21 @@ impl SdkContext {
}
/// BLOCKING
#[instrument(skip_all)]
pub fn developer_key(&self) -> Result<ed25519_dalek::Keypair, Error> {
pub fn developer_key(&self) -> Result<ed25519_dalek::SigningKey, Error> {
if !self.developer_key_path.exists() {
return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized));
return Err(Error::new(eyre!("Developer Key does not exist! Please run `start-sdk init` before running this command."), crate::ErrorKind::Uninitialized));
}
let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
&std::fs::read_to_string(&self.developer_key_path)?,
)
.with_kind(crate::ErrorKind::Pem)?;
let secret = ed25519_dalek::SecretKey::from_bytes(&pair.secret_key[..])?;
let public = if let Some(public) = pair.public_key {
ed25519_dalek::PublicKey::from_bytes(&public[..])?
} else {
(&secret).into()
};
Ok(ed25519_dalek::Keypair { secret, public })
let secret = ed25519_dalek::SecretKey::try_from(&pair.secret_key[..]).map_err(|_| {
Error::new(
eyre!("pkcs8 key is of incorrect length"),
ErrorKind::OpenSsl,
)
})?;
Ok(secret.into())
}
}
impl std::ops::Deref for SdkContext {

View File

@@ -17,7 +17,7 @@ use tracing::instrument;
use crate::account::AccountInfo;
use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader};
use crate::init::init_postgres;
use crate::setup::SetupStatus;
use crate::util::config::load_config_from_paths;
use crate::{Error, ResultExt};
@@ -45,6 +45,8 @@ pub struct SetupContextConfig {
pub migration_batch_rows: Option<usize>,
pub migration_prefetch_rows: Option<usize>,
pub datadir: Option<PathBuf>,
#[serde(default)]
pub disable_encryption: bool,
}
impl SetupContextConfig {
#[instrument(skip_all)]
@@ -75,6 +77,7 @@ pub struct SetupContextSeed {
pub config_path: Option<PathBuf>,
pub migration_batch_rows: usize,
pub migration_prefetch_rows: usize,
pub disable_encryption: bool,
pub shutdown: Sender<()>,
pub datadir: PathBuf,
pub selected_v2_drive: RwLock<Option<PathBuf>>,
@@ -102,6 +105,7 @@ impl SetupContext {
config_path: path.as_ref().map(|p| p.as_ref().to_owned()),
migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000),
migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000),
disable_encryption: cfg.disable_encryption,
shutdown,
datadir,
selected_v2_drive: RwLock::new(None),
@@ -132,15 +136,6 @@ impl SetupContext {
.run(&secret_store)
.await
.with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir.join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows,
self.migration_prefetch_rows,
)
.await?;
}
Ok(secret_store)
}
}

View File

@@ -1,208 +1,92 @@
use std::collections::BTreeMap;
use color_eyre::eyre::eyre;
use patch_db::{DbHandle, LockReceipt, LockType};
use rpc_toolkit::command;
use tracing::instrument;
use crate::context::RpcContext;
use crate::dependencies::{
break_all_dependents_transitive, heal_all_dependents_transitive, BreakageRes, DependencyError,
DependencyReceipt, TaggedDependencyError,
};
use crate::prelude::*;
use crate::s9pk::manifest::PackageId;
use crate::status::MainStatus;
use crate::util::display_none;
use crate::util::serde::display_serializable;
use crate::Error;
#[derive(Clone)]
pub struct StartReceipts {
dependency_receipt: DependencyReceipt,
status: LockReceipt<MainStatus, ()>,
version: LockReceipt<crate::util::Version, ()>,
}
impl StartReceipts {
pub async fn new(db: &mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks, id);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
id: &PackageId,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let dependency_receipt = DependencyReceipt::setup(locks);
let status = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.status().main())
.make_locker(LockType::Write)
.add_to_keys(locks);
let version = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.manifest().version())
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
dependency_receipt: dependency_receipt(skeleton_key)?,
status: status.verify(skeleton_key)?,
version: version.verify(skeleton_key)?,
})
}
}
}
#[command(display(display_none), metadata(sync_db = true))]
#[instrument(skip_all)]
pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let receipts = StartReceipts::new(&mut tx, &id).await?;
let version = receipts.version.get(&mut tx).await?;
receipts
.status
.set(&mut tx, MainStatus::Starting { restarting: false })
.await?;
heal_all_dependents_transitive(&ctx, &mut tx, &id, &receipts.dependency_receipt).await?;
tx.commit().await?;
drop(receipts);
let peek = ctx.db.peek().await;
let version = peek
.as_package_data()
.as_idx(&id)
.or_not_found(&id)?
.as_installed()
.or_not_found(&id)?
.as_manifest()
.as_version()
.de()?;
ctx.managers
.get(&(id, version))
.await
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
.synchronize()
.start()
.await;
Ok(())
}
#[derive(Clone)]
pub struct StopReceipts {
breaks: crate::dependencies::BreakTransitiveReceipts,
status: LockReceipt<MainStatus, ()>,
}
impl StopReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
let mut locks = Vec::new();
#[command(display(display_none), metadata(sync_db = true))]
pub async fn stop(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<MainStatus, Error> {
let peek = ctx.db.peek().await;
let version = peek
.as_package_data()
.as_idx(&id)
.or_not_found(&id)?
.as_installed()
.or_not_found(&id)?
.as_manifest()
.as_version()
.de()?;
let setup = Self::setup(&mut locks, id);
Ok(setup(&db.lock_all(locks).await?)?)
}
let last_statuts = ctx
.db
.mutate(|v| {
v.as_package_data_mut()
.as_idx_mut(&id)
.and_then(|x| x.as_installed_mut())
.ok_or_else(|| Error::new(eyre!("{} is not installed", id), ErrorKind::NotFound))?
.as_status_mut()
.as_main_mut()
.replace(&MainStatus::Stopping)
})
.await?;
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
id: &PackageId,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let breaks = crate::dependencies::BreakTransitiveReceipts::setup(locks);
let status = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.status().main())
.make_locker(LockType::Write)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
breaks: breaks(skeleton_key)?,
status: status.verify(skeleton_key)?,
})
}
}
}
#[instrument(skip_all)]
pub async fn stop_common<Db: DbHandle>(
db: &mut Db,
id: &PackageId,
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
) -> Result<MainStatus, Error> {
let mut tx = db.begin().await?;
let receipts = StopReceipts::new(&mut tx, id).await?;
let last_status = receipts.status.get(&mut tx).await?;
receipts.status.set(&mut tx, MainStatus::Stopping).await?;
tx.save().await?;
break_all_dependents_transitive(
db,
id,
DependencyError::NotRunning,
breakages,
&receipts.breaks,
)
.await?;
Ok(last_status)
}
#[command(
subcommands(self(stop_impl(async)), stop_dry),
display(display_none),
metadata(sync_db = true)
)]
pub fn stop(#[arg] id: PackageId) -> Result<PackageId, Error> {
Ok(id)
}
#[command(rename = "dry", display(display_serializable))]
#[instrument(skip_all)]
pub async fn stop_dry(
#[context] ctx: RpcContext,
#[parent_data] id: PackageId,
) -> Result<BreakageRes, Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let mut breakages = BTreeMap::new();
stop_common(&mut tx, &id, &mut breakages).await?;
tx.abort().await?;
Ok(BreakageRes(breakages))
}
#[instrument(skip_all)]
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<MainStatus, Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let last_statuts = stop_common(&mut tx, &id, &mut BTreeMap::new()).await?;
tx.commit().await?;
ctx.managers
.get(&(id, version))
.await
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
.stop()
.await;
Ok(last_statuts)
}
#[command(display(display_none), metadata(sync_db = true))]
pub async fn restart(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let peek = ctx.db.peek().await;
let version = peek
.as_package_data()
.as_idx(&id)
.or_not_found(&id)?
.expect_as_installed()?
.as_manifest()
.as_version()
.de()?;
let mut status = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&id)
.and_then(|pde| pde.installed())
.map(|i| i.status().main())
.get_mut(&mut tx)
.await?;
if !matches!(&*status, Some(MainStatus::Running { .. })) {
return Err(Error::new(
eyre!("{} is not running", id),
crate::ErrorKind::InvalidRequest,
));
}
*status = Some(MainStatus::Restarting);
status.save(&mut tx).await?;
tx.commit().await?;
ctx.managers
.get(&(id, version))
.await
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
.restart()
.await;
Ok(())
}

View File

@@ -1,7 +1,9 @@
pub mod model;
pub mod package;
pub mod prelude;
use std::future::Future;
use std::path::PathBuf;
use std::sync::Arc;
use futures::{FutureExt, SinkExt, StreamExt};
@@ -21,11 +23,11 @@ use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::WebSocketStream;
use tracing::instrument;
pub use self::model::DatabaseModel;
use crate::context::RpcContext;
use crate::context::{CliContext, RpcContext};
use crate::middleware::auth::{HasValidSession, HashSessionToken};
use crate::prelude::*;
use crate::util::display_none;
use crate::util::serde::{display_serializable, IoFormat};
use crate::{Error, ResultExt};
#[instrument(skip_all)]
async fn ws_handler<
@@ -35,11 +37,11 @@ async fn ws_handler<
session: Option<(HasValidSession, HashSessionToken)>,
ws_fut: WSFut,
) -> Result<(), Error> {
let (dump, sub) = ctx.db.dump_and_sub().await?;
let (dump, sub) = ctx.db.dump_and_sub().await;
let mut stream = ws_fut
.await
.with_kind(crate::ErrorKind::Network)?
.with_kind(crate::ErrorKind::Unknown)?;
.with_kind(ErrorKind::Network)?
.with_kind(ErrorKind::Unknown)?;
if let Some((session, token)) = session {
let kill = subscribe_to_session_kill(&ctx, token).await;
@@ -53,7 +55,7 @@ async fn ws_handler<
reason: "UNAUTHORIZED".into(),
}))
.await
.with_kind(crate::ErrorKind::Network)?;
.with_kind(ErrorKind::Network)?;
}
Ok(())
@@ -80,6 +82,8 @@ async fn deal_with_messages(
mut sub: patch_db::Subscriber,
mut stream: WebSocketStream<Upgraded>,
) -> Result<(), Error> {
let mut timer = tokio::time::interval(tokio::time::Duration::from_secs(5));
loop {
futures::select! {
_ = (&mut kill).fuse() => {
@@ -90,18 +94,18 @@ async fn deal_with_messages(
reason: "UNAUTHORIZED".into(),
}))
.await
.with_kind(crate::ErrorKind::Network)?;
.with_kind(ErrorKind::Network)?;
return Ok(())
}
new_rev = sub.recv().fuse() => {
let rev = new_rev.expect("UNREACHABLE: patch-db is dropped");
stream
.send(Message::Text(serde_json::to_string(&rev).with_kind(crate::ErrorKind::Serialization)?))
.send(Message::Text(serde_json::to_string(&rev).with_kind(ErrorKind::Serialization)?))
.await
.with_kind(crate::ErrorKind::Network)?;
.with_kind(ErrorKind::Network)?;
}
message = stream.next().fuse() => {
let message = message.transpose().with_kind(crate::ErrorKind::Network)?;
let message = message.transpose().with_kind(ErrorKind::Network)?;
match message {
None => {
tracing::info!("Closing WebSocket: Stream Finished");
@@ -110,6 +114,13 @@ async fn deal_with_messages(
_ => (),
}
}
// This is trying to give a health checks to the home to keep the ui alive.
_ = timer.tick().fuse() => {
stream
.send(Message::Ping(vec![]))
.await
.with_kind(crate::ErrorKind::Network)?;
}
}
}
}
@@ -121,10 +132,10 @@ async fn send_dump(
) -> Result<(), Error> {
stream
.send(Message::Text(
serde_json::to_string(&dump).with_kind(crate::ErrorKind::Serialization)?,
serde_json::to_string(&dump).with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(crate::ErrorKind::Network)?;
.with_kind(ErrorKind::Network)?;
Ok(())
}
@@ -139,7 +150,7 @@ pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<B
{
Ok(a) => Some(a),
Err(e) => {
if e.kind != crate::ErrorKind::Authorization {
if e.kind != ErrorKind::Authorization {
tracing::error!("Error Authenticating Websocket: {}", e);
tracing::debug!("{:?}", e);
}
@@ -147,7 +158,7 @@ pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<B
}
};
let req = Request::from_parts(parts, body);
let (res, ws_fut) = hyper_ws_listener::create_ws(req).with_kind(crate::ErrorKind::Network)?;
let (res, ws_fut) = hyper_ws_listener::create_ws(req).with_kind(ErrorKind::Network)?;
if let Some(ws_fut) = ws_fut {
tokio::task::spawn(async move {
match ws_handler(ctx, session, ws_fut).await {
@@ -163,7 +174,7 @@ pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<B
Ok(res)
}
#[command(subcommands(revisions, dump, put))]
#[command(subcommands(dump, put, apply))]
pub fn db() -> Result<(), RpcError> {
Ok(())
}
@@ -175,28 +186,164 @@ pub enum RevisionsRes {
Dump(Dump),
}
#[command(display(display_serializable))]
pub async fn revisions(
#[context] ctx: RpcContext,
#[arg] since: u64,
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<RevisionsRes, Error> {
Ok(match ctx.db.sync(since).await? {
Ok(revs) => RevisionsRes::Revisions(revs),
Err(dump) => RevisionsRes::Dump(dump),
})
#[instrument(skip_all)]
async fn cli_dump(
ctx: CliContext,
_format: Option<IoFormat>,
path: Option<PathBuf>,
) -> Result<Dump, RpcError> {
let dump = if let Some(path) = path {
PatchDb::open(path).await?.dump().await
} else {
rpc_toolkit::command_helpers::call_remote(
ctx,
"db.dump",
serde_json::json!({}),
std::marker::PhantomData::<Dump>,
)
.await?
.result?
};
Ok(dump)
}
#[command(display(display_serializable))]
#[command(
custom_cli(cli_dump(async, context(CliContext))),
display(display_serializable)
)]
pub async fn dump(
#[context] ctx: RpcContext,
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
#[allow(unused_variables)]
#[arg]
path: Option<PathBuf>,
) -> Result<Dump, Error> {
Ok(ctx.db.dump().await?)
Ok(ctx.db.dump().await)
}
fn apply_expr(input: jaq_core::Val, expr: &str) -> Result<jaq_core::Val, Error> {
let (expr, errs) = jaq_core::parse::parse(expr, jaq_core::parse::main());
let Some(expr) = expr else {
return Err(Error::new(
eyre!("Failed to parse expression: {:?}", errs),
crate::ErrorKind::InvalidRequest,
));
};
let mut errs = Vec::new();
let mut defs = jaq_core::Definitions::core();
for def in jaq_std::std() {
defs.insert(def, &mut errs);
}
let filter = defs.finish(expr, Vec::new(), &mut errs);
if !errs.is_empty() {
return Err(Error::new(
eyre!("Failed to compile expression: {:?}", errs),
crate::ErrorKind::InvalidRequest,
));
};
let inputs = jaq_core::RcIter::new(std::iter::empty());
let mut res_iter = filter.run(jaq_core::Ctx::new([], &inputs), input);
let Some(res) = res_iter
.next()
.transpose()
.map_err(|e| eyre!("{e}"))
.with_kind(crate::ErrorKind::Deserialization)?
else {
return Err(Error::new(
eyre!("expr returned no results"),
crate::ErrorKind::InvalidRequest,
));
};
if res_iter.next().is_some() {
return Err(Error::new(
eyre!("expr returned too many results"),
crate::ErrorKind::InvalidRequest,
));
}
Ok(res)
}
#[instrument(skip_all)]
async fn cli_apply(ctx: CliContext, expr: String, path: Option<PathBuf>) -> Result<(), RpcError> {
if let Some(path) = path {
PatchDb::open(path)
.await?
.mutate(|db| {
let res = apply_expr(
serde_json::to_value(patch_db::Value::from(db.clone()))
.with_kind(ErrorKind::Deserialization)?
.into(),
&expr,
)?;
db.ser(
&serde_json::from_value::<model::Database>(res.clone().into()).with_ctx(
|_| {
(
crate::ErrorKind::Deserialization,
"result does not match database model",
)
},
)?,
)
})
.await?;
} else {
rpc_toolkit::command_helpers::call_remote(
ctx,
"db.apply",
serde_json::json!({ "expr": expr }),
std::marker::PhantomData::<()>,
)
.await?
.result?;
}
Ok(())
}
#[command(
custom_cli(cli_apply(async, context(CliContext))),
display(display_none)
)]
pub async fn apply(
#[context] ctx: RpcContext,
#[arg] expr: String,
#[allow(unused_variables)]
#[arg]
path: Option<PathBuf>,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
let res = apply_expr(
serde_json::to_value(patch_db::Value::from(db.clone()))
.with_kind(ErrorKind::Deserialization)?
.into(),
&expr,
)?;
db.ser(
&serde_json::from_value::<model::Database>(res.clone().into()).with_ctx(|_| {
(
crate::ErrorKind::Deserialization,
"result does not match database model",
)
})?,
)
})
.await
}
#[command(subcommands(ui))]
@@ -216,7 +363,7 @@ pub async fn ui(
) -> Result<(), Error> {
let ptr = "/ui"
.parse::<JsonPointer>()
.with_kind(crate::ErrorKind::Database)?
.with_kind(ErrorKind::Database)?
+ &pointer;
ctx.db.put(&ptr, &value).await?;
Ok(())

View File

@@ -4,52 +4,52 @@ use std::sync::Arc;
use chrono::{DateTime, Utc};
use emver::VersionRange;
use imbl_value::InternedString;
use ipnet::{Ipv4Net, Ipv6Net};
use isocountry::CountryCode;
use itertools::Itertools;
use models::{DataUrl, HealthCheckId, InterfaceId};
use openssl::hash::MessageDigest;
use patch_db::json_ptr::JsonPointer;
use patch_db::{HasModel, Map, MapModel, OptionModel};
use patch_db::{HasModel, Value};
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use ssh_key::public::Ed25519PublicKey;
use crate::account::AccountInfo;
use crate::config::spec::{PackagePointerSpec, SystemPointerSpec};
use crate::config::spec::PackagePointerSpec;
use crate::install::progress::InstallProgress;
use crate::net::interface::InterfaceId;
use crate::net::utils::{get_iface_ipv4_addr, get_iface_ipv6_addr};
use crate::s9pk::manifest::{Manifest, ManifestModel, PackageId};
use crate::status::health_check::HealthCheckId;
use crate::prelude::*;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::Status;
use crate::util::Version;
use crate::version::{Current, VersionT};
use crate::Error;
use crate::{ARCH, PLATFORM};
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
// #[macro_debug]
pub struct Database {
#[model]
pub server_info: ServerInfo,
#[model]
pub package_data: AllPackageData,
pub ui: Value,
}
impl Database {
pub fn init(account: &AccountInfo) -> Self {
let lan_address = account.hostname.lan_address().parse().unwrap();
// TODO
Database {
server_info: ServerInfo {
arch: get_arch(),
platform: get_platform(),
id: account.server_id.clone(),
version: Current::new().semver().into(),
hostname: Some(account.hostname.no_dot_host_name()),
hostname: account.hostname.no_dot_host_name(),
last_backup: None,
last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(),
lan_address,
tor_address: format!("http://{}", account.key.tor_address())
tor_address: format!("https://{}", account.key.tor_address())
.parse()
.unwrap(),
ip_info: BTreeMap::new(),
@@ -57,6 +57,8 @@ impl Database {
backup_progress: None,
updated: false,
update_progress: None,
shutting_down: false,
restarting: false,
},
wifi: WifiInfo {
ssids: Vec::new(),
@@ -79,7 +81,8 @@ impl Database {
.iter()
.map(|x| format!("{x:X}"))
.join(":"),
system_start_time: Utc::now().to_rfc3339(),
ntp_synced: false,
zram: true,
},
package_data: AllPackageData::default(),
ui: serde_json::from_str(include_str!("../../../frontend/patchdb-ui-seed.json"))
@@ -87,17 +90,27 @@ impl Database {
}
}
}
impl DatabaseModel {
pub fn new() -> Self {
Self::from(JsonPointer::default())
}
pub type DatabaseModel = Model<Database>;
fn get_arch() -> InternedString {
(*ARCH).into()
}
fn get_platform() -> InternedString {
(&*PLATFORM).into()
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct ServerInfo {
#[serde(default = "get_arch")]
pub arch: InternedString,
#[serde(default = "get_platform")]
pub platform: InternedString,
pub id: String,
pub hostname: Option<String>,
pub hostname: String,
pub version: Version,
pub last_backup: Option<DateTime<Utc>>,
/// Used in the wifi to determine the region to set the system to
@@ -105,9 +118,7 @@ pub struct ServerInfo {
pub eos_version_compat: VersionRange,
pub lan_address: Url,
pub tor_address: Url,
#[model]
pub ip_info: BTreeMap<String, IpInfo>,
#[model]
#[serde(default)]
pub status_info: ServerStatus,
pub wifi: WifiInfo,
@@ -116,11 +127,15 @@ pub struct ServerInfo {
pub password_hash: String,
pub pubkey: String,
pub ca_fingerprint: String,
pub system_start_time: String,
#[serde(default)]
pub ntp_synced: bool,
#[serde(default)]
pub zram: bool,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct IpInfo {
pub ipv4_range: Option<Ipv4Net>,
pub ipv4: Option<Ipv4Addr>,
@@ -141,29 +156,35 @@ impl IpInfo {
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]
pub struct BackupProgress {
pub complete: bool,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct ServerStatus {
#[model]
pub backup_progress: Option<BTreeMap<PackageId, BackupProgress>>,
pub updated: bool,
#[model]
pub update_progress: Option<UpdateProgress>,
#[serde(default)]
pub shutting_down: bool,
#[serde(default)]
pub restarting: bool,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct UpdateProgress {
pub size: Option<u64>,
pub downloaded: u64,
}
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct WifiInfo {
pub ssids: Vec<String>,
pub selected: Option<String>,
@@ -190,16 +211,11 @@ pub struct AllPackageData(pub BTreeMap<PackageId, PackageDataEntry>);
impl Map for AllPackageData {
type Key = PackageId;
type Value = PackageDataEntry;
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
self.0.get(key)
}
}
impl HasModel for AllPackageData {
type Model = MapModel<Self>;
}
#[derive(Debug, Deserialize, Serialize)]
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct StaticFiles {
license: String,
instructions: String,
@@ -215,120 +231,231 @@ impl StaticFiles {
}
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct PackageDataEntryInstalling {
pub static_files: StaticFiles,
pub manifest: Manifest,
pub install_progress: Arc<InstallProgress>,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct PackageDataEntryUpdating {
pub static_files: StaticFiles,
pub manifest: Manifest,
pub installed: InstalledPackageInfo,
pub install_progress: Arc<InstallProgress>,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct PackageDataEntryRestoring {
pub static_files: StaticFiles,
pub manifest: Manifest,
pub install_progress: Arc<InstallProgress>,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct PackageDataEntryRemoving {
pub static_files: StaticFiles,
pub manifest: Manifest,
pub removing: InstalledPackageInfo,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct PackageDataEntryInstalled {
pub static_files: StaticFiles,
pub manifest: Manifest,
pub installed: InstalledPackageInfo,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(tag = "state")]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
// #[macro_debug]
pub enum PackageDataEntry {
#[serde(rename_all = "kebab-case")]
Installing {
static_files: StaticFiles,
manifest: Manifest,
install_progress: Arc<InstallProgress>,
},
#[serde(rename_all = "kebab-case")]
Updating {
static_files: StaticFiles,
manifest: Manifest,
installed: InstalledPackageDataEntry,
install_progress: Arc<InstallProgress>,
},
#[serde(rename_all = "kebab-case")]
Restoring {
static_files: StaticFiles,
manifest: Manifest,
install_progress: Arc<InstallProgress>,
},
#[serde(rename_all = "kebab-case")]
Removing {
static_files: StaticFiles,
manifest: Manifest,
removing: InstalledPackageDataEntry,
},
#[serde(rename_all = "kebab-case")]
Installed {
static_files: StaticFiles,
manifest: Manifest,
installed: InstalledPackageDataEntry,
},
Installing(PackageDataEntryInstalling),
Updating(PackageDataEntryUpdating),
Restoring(PackageDataEntryRestoring),
Removing(PackageDataEntryRemoving),
Installed(PackageDataEntryInstalled),
}
impl PackageDataEntry {
pub fn installed(&self) -> Option<&InstalledPackageDataEntry> {
match self {
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
impl Model<PackageDataEntry> {
pub fn expect_into_installed(self) -> Result<Model<PackageDataEntryInstalled>, Error> {
if let PackageDataEntryMatchModel::Installed(a) = self.into_match() {
Ok(a)
} else {
Err(Error::new(
eyre!("package is not in installed state"),
ErrorKind::InvalidRequest,
))
}
}
pub fn installed_mut(&mut self) -> Option<&mut InstalledPackageDataEntry> {
match self {
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
pub fn expect_as_installed(&self) -> Result<&Model<PackageDataEntryInstalled>, Error> {
if let PackageDataEntryMatchModelRef::Installed(a) = self.as_match() {
Ok(a)
} else {
Err(Error::new(
eyre!("package is not in installed state"),
ErrorKind::InvalidRequest,
))
}
}
pub fn into_installed(self) -> Option<InstalledPackageDataEntry> {
match self {
Self::Installing { .. } | Self::Restoring { .. } | Self::Removing { .. } => None,
Self::Updating { installed, .. } | Self::Installed { installed, .. } => Some(installed),
pub fn expect_as_installed_mut(
&mut self,
) -> Result<&mut Model<PackageDataEntryInstalled>, Error> {
if let PackageDataEntryMatchModelMut::Installed(a) = self.as_match_mut() {
Ok(a)
} else {
Err(Error::new(
eyre!("package is not in installed state"),
ErrorKind::InvalidRequest,
))
}
}
pub fn manifest(self) -> Manifest {
match self {
PackageDataEntry::Installing { manifest, .. } => manifest,
PackageDataEntry::Updating { manifest, .. } => manifest,
PackageDataEntry::Restoring { manifest, .. } => manifest,
PackageDataEntry::Removing { manifest, .. } => manifest,
PackageDataEntry::Installed { manifest, .. } => manifest,
pub fn expect_into_removing(self) -> Result<Model<PackageDataEntryRemoving>, Error> {
if let PackageDataEntryMatchModel::Removing(a) = self.into_match() {
Ok(a)
} else {
Err(Error::new(
eyre!("package is not in removing state"),
ErrorKind::InvalidRequest,
))
}
}
pub fn manifest_borrow(&self) -> &Manifest {
match self {
PackageDataEntry::Installing { manifest, .. } => manifest,
PackageDataEntry::Updating { manifest, .. } => manifest,
PackageDataEntry::Restoring { manifest, .. } => manifest,
PackageDataEntry::Removing { manifest, .. } => manifest,
PackageDataEntry::Installed { manifest, .. } => manifest,
pub fn expect_as_removing(&self) -> Result<&Model<PackageDataEntryRemoving>, Error> {
if let PackageDataEntryMatchModelRef::Removing(a) = self.as_match() {
Ok(a)
} else {
Err(Error::new(
eyre!("package is not in removing state"),
ErrorKind::InvalidRequest,
))
}
}
}
impl PackageDataEntryModel {
pub fn installed(self) -> OptionModel<InstalledPackageDataEntry> {
self.0.child("installed").into()
pub fn expect_as_removing_mut(
&mut self,
) -> Result<&mut Model<PackageDataEntryRemoving>, Error> {
if let PackageDataEntryMatchModelMut::Removing(a) = self.as_match_mut() {
Ok(a)
} else {
Err(Error::new(
eyre!("package is not in removing state"),
ErrorKind::InvalidRequest,
))
}
}
pub fn removing(self) -> OptionModel<InstalledPackageDataEntry> {
self.0.child("removing").into()
pub fn expect_as_installing_mut(
&mut self,
) -> Result<&mut Model<PackageDataEntryInstalling>, Error> {
if let PackageDataEntryMatchModelMut::Installing(a) = self.as_match_mut() {
Ok(a)
} else {
Err(Error::new(
eyre!("package is not in installing state"),
ErrorKind::InvalidRequest,
))
}
}
pub fn install_progress(self) -> OptionModel<InstallProgress> {
self.0.child("install-progress").into()
pub fn into_manifest(self) -> Model<Manifest> {
match self.into_match() {
PackageDataEntryMatchModel::Installing(a) => a.into_manifest(),
PackageDataEntryMatchModel::Updating(a) => a.into_installed().into_manifest(),
PackageDataEntryMatchModel::Restoring(a) => a.into_manifest(),
PackageDataEntryMatchModel::Removing(a) => a.into_manifest(),
PackageDataEntryMatchModel::Installed(a) => a.into_manifest(),
PackageDataEntryMatchModel::Error(_) => Model::from(Value::Null),
}
}
pub fn manifest(self) -> ManifestModel {
self.0.child("manifest").into()
pub fn as_manifest(&self) -> &Model<Manifest> {
match self.as_match() {
PackageDataEntryMatchModelRef::Installing(a) => a.as_manifest(),
PackageDataEntryMatchModelRef::Updating(a) => a.as_installed().as_manifest(),
PackageDataEntryMatchModelRef::Restoring(a) => a.as_manifest(),
PackageDataEntryMatchModelRef::Removing(a) => a.as_manifest(),
PackageDataEntryMatchModelRef::Installed(a) => a.as_manifest(),
PackageDataEntryMatchModelRef::Error(_) => (&Value::Null).into(),
}
}
pub fn into_installed(self) -> Option<Model<InstalledPackageInfo>> {
match self.into_match() {
PackageDataEntryMatchModel::Installing(_) => None,
PackageDataEntryMatchModel::Updating(a) => Some(a.into_installed()),
PackageDataEntryMatchModel::Restoring(_) => None,
PackageDataEntryMatchModel::Removing(_) => None,
PackageDataEntryMatchModel::Installed(a) => Some(a.into_installed()),
PackageDataEntryMatchModel::Error(_) => None,
}
}
pub fn as_installed(&self) -> Option<&Model<InstalledPackageInfo>> {
match self.as_match() {
PackageDataEntryMatchModelRef::Installing(_) => None,
PackageDataEntryMatchModelRef::Updating(a) => Some(a.as_installed()),
PackageDataEntryMatchModelRef::Restoring(_) => None,
PackageDataEntryMatchModelRef::Removing(_) => None,
PackageDataEntryMatchModelRef::Installed(a) => Some(a.as_installed()),
PackageDataEntryMatchModelRef::Error(_) => None,
}
}
pub fn as_installed_mut(&mut self) -> Option<&mut Model<InstalledPackageInfo>> {
match self.as_match_mut() {
PackageDataEntryMatchModelMut::Installing(_) => None,
PackageDataEntryMatchModelMut::Updating(a) => Some(a.as_installed_mut()),
PackageDataEntryMatchModelMut::Restoring(_) => None,
PackageDataEntryMatchModelMut::Removing(_) => None,
PackageDataEntryMatchModelMut::Installed(a) => Some(a.as_installed_mut()),
PackageDataEntryMatchModelMut::Error(_) => None,
}
}
pub fn as_install_progress(&self) -> Option<&Model<Arc<InstallProgress>>> {
match self.as_match() {
PackageDataEntryMatchModelRef::Installing(a) => Some(a.as_install_progress()),
PackageDataEntryMatchModelRef::Updating(a) => Some(a.as_install_progress()),
PackageDataEntryMatchModelRef::Restoring(a) => Some(a.as_install_progress()),
PackageDataEntryMatchModelRef::Removing(_) => None,
PackageDataEntryMatchModelRef::Installed(_) => None,
PackageDataEntryMatchModelRef::Error(_) => None,
}
}
pub fn as_install_progress_mut(&mut self) -> Option<&mut Model<Arc<InstallProgress>>> {
match self.as_match_mut() {
PackageDataEntryMatchModelMut::Installing(a) => Some(a.as_install_progress_mut()),
PackageDataEntryMatchModelMut::Updating(a) => Some(a.as_install_progress_mut()),
PackageDataEntryMatchModelMut::Restoring(a) => Some(a.as_install_progress_mut()),
PackageDataEntryMatchModelMut::Removing(_) => None,
PackageDataEntryMatchModelMut::Installed(_) => None,
PackageDataEntryMatchModelMut::Error(_) => None,
}
}
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct InstalledPackageDataEntry {
#[model]
#[model = "Model<Self>"]
pub struct InstalledPackageInfo {
pub status: Status,
pub marketplace_url: Option<Url>,
#[serde(default)]
#[serde(with = "crate::util::serde::ed25519_pubkey")]
pub developer_key: ed25519_dalek::PublicKey,
#[model]
pub developer_key: ed25519_dalek::VerifyingKey,
pub manifest: Manifest,
pub last_backup: Option<DateTime<Utc>>,
#[model]
pub system_pointers: Vec<SystemPointerSpec>,
#[model]
pub dependency_info: BTreeMap<PackageId, StaticDependencyInfo>,
#[model]
pub current_dependents: CurrentDependents,
#[model]
pub current_dependencies: CurrentDependencies,
#[model]
pub interface_addresses: InterfaceAddressMap,
}
#[derive(Debug, Clone, Deserialize, Serialize)]
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
pub struct CurrentDependents(pub BTreeMap<PackageId, CurrentDependencyInfo>);
impl CurrentDependents {
pub fn map(
@@ -344,12 +471,6 @@ impl CurrentDependents {
impl Map for CurrentDependents {
type Key = PackageId;
type Value = CurrentDependencyInfo;
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
self.0.get(key)
}
}
impl HasModel for CurrentDependents {
type Model = MapModel<Self>;
}
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
@@ -368,25 +489,22 @@ impl CurrentDependencies {
impl Map for CurrentDependencies {
type Key = PackageId;
type Value = CurrentDependencyInfo;
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
self.0.get(key)
}
}
impl HasModel for CurrentDependencies {
type Model = MapModel<Self>;
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct StaticDependencyInfo {
pub manifest: Option<Manifest>,
pub icon: String,
pub title: String,
pub icon: DataUrl<'static>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct CurrentDependencyInfo {
pub pointers: Vec<PackagePointerSpec>,
#[serde(default)]
pub pointers: BTreeSet<PackagePointerSpec>,
pub health_checks: BTreeSet<HealthCheckId>,
}
@@ -395,27 +513,12 @@ pub struct InterfaceAddressMap(pub BTreeMap<InterfaceId, InterfaceAddresses>);
impl Map for InterfaceAddressMap {
type Key = InterfaceId;
type Value = InterfaceAddresses;
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
self.0.get(key)
}
}
impl HasModel for InterfaceAddressMap {
type Model = MapModel<Self>;
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct InterfaceAddresses {
#[model]
pub tor_address: Option<String>,
#[model]
pub lan_address: Option<String>,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct RecoveredPackageInfo {
pub title: String,
pub icon: String,
pub version: Version,
}

View File

@@ -1,75 +1,22 @@
use patch_db::{DbHandle, LockReceipt, LockTargetId, LockType, Verifier};
use models::Version;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::Error;
use crate::prelude::*;
use crate::s9pk::manifest::PackageId;
pub struct PackageReceipts {
package_data: LockReceipt<super::model::AllPackageData, ()>,
}
impl PackageReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
let package_data = crate::db::DatabaseModel::new()
.package_data()
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
package_data: package_data.verify(&skeleton_key)?,
})
}
}
}
pub async fn get_packages<Db: DbHandle>(
db: &mut Db,
receipts: &PackageReceipts,
) -> Result<Vec<PackageId>, Error> {
let packages = receipts.package_data.get(db).await?;
Ok(packages.0.keys().cloned().collect())
}
pub struct ManifestReceipts {
manifest: LockReceipt<Manifest, String>,
}
impl ManifestReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks, id);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<LockTargetId>,
_id: &PackageId,
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
let manifest = crate::db::DatabaseModel::new()
.package_data()
.star()
.manifest()
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
manifest: manifest.verify(&skeleton_key)?,
})
}
}
}
pub async fn get_manifest<Db: DbHandle>(
db: &mut Db,
pkg: &PackageId,
receipts: &ManifestReceipts,
) -> Result<Option<Manifest>, Error> {
Ok(receipts.manifest.get(db, pkg).await?)
pub fn get_packages(db: Peeked) -> Result<Vec<(PackageId, Version)>, Error> {
Ok(db
.as_package_data()
.keys()?
.into_iter()
.flat_map(|package_id| {
let version = db
.as_package_data()
.as_idx(&package_id)?
.as_manifest()
.as_version()
.de()
.ok()?;
Some((package_id, version))
})
.collect())
}

382
backend/src/db/prelude.rs Normal file
View File

@@ -0,0 +1,382 @@
use std::collections::BTreeMap;
use std::marker::PhantomData;
use std::panic::UnwindSafe;
use patch_db::value::InternedString;
pub use patch_db::{HasModel, PatchDb, Value};
use serde::de::DeserializeOwned;
use serde::Serialize;
use crate::db::model::DatabaseModel;
use crate::prelude::*;
pub type Peeked = Model<super::model::Database>;
pub fn to_value<T>(value: &T) -> Result<Value, Error>
where
T: Serialize,
{
patch_db::value::to_value(value).with_kind(ErrorKind::Serialization)
}
pub fn from_value<T>(value: Value) -> Result<T, Error>
where
T: DeserializeOwned,
{
patch_db::value::from_value(value).with_kind(ErrorKind::Deserialization)
}
#[async_trait::async_trait]
pub trait PatchDbExt {
async fn peek(&self) -> DatabaseModel;
async fn mutate<U: UnwindSafe + Send>(
&self,
f: impl FnOnce(&mut DatabaseModel) -> Result<U, Error> + UnwindSafe + Send,
) -> Result<U, Error>;
async fn map_mutate(
&self,
f: impl FnOnce(DatabaseModel) -> Result<DatabaseModel, Error> + UnwindSafe + Send,
) -> Result<DatabaseModel, Error>;
}
#[async_trait::async_trait]
impl PatchDbExt for PatchDb {
async fn peek(&self) -> DatabaseModel {
DatabaseModel::from(self.dump().await.value)
}
async fn mutate<U: UnwindSafe + Send>(
&self,
f: impl FnOnce(&mut DatabaseModel) -> Result<U, Error> + UnwindSafe + Send,
) -> Result<U, Error> {
Ok(self
.apply_function(|mut v| {
let model = <&mut DatabaseModel>::from(&mut v);
let res = f(model)?;
Ok::<_, Error>((v, res))
})
.await?
.1)
}
async fn map_mutate(
&self,
f: impl FnOnce(DatabaseModel) -> Result<DatabaseModel, Error> + UnwindSafe + Send,
) -> Result<DatabaseModel, Error> {
Ok(DatabaseModel::from(
self.apply_function(|v| f(DatabaseModel::from(v)).map(|a| (a.into(), ())))
.await?
.0,
))
}
}
/// &mut Model<T> <=> &mut Value
#[repr(transparent)]
#[derive(Debug)]
pub struct Model<T> {
value: Value,
phantom: PhantomData<T>,
}
impl<T: DeserializeOwned> Model<T> {
pub fn de(&self) -> Result<T, Error> {
from_value(self.value.clone())
}
}
impl<T: Serialize> Model<T> {
pub fn new(value: &T) -> Result<Self, Error> {
Ok(Self::from(to_value(value)?))
}
pub fn ser(&mut self, value: &T) -> Result<(), Error> {
self.value = to_value(value)?;
Ok(())
}
}
impl<T: Serialize + DeserializeOwned> Model<T> {
pub fn replace(&mut self, value: &T) -> Result<T, Error> {
let orig = self.de()?;
self.ser(value)?;
Ok(orig)
}
}
impl<T> Clone for Model<T> {
fn clone(&self) -> Self {
Self {
value: self.value.clone(),
phantom: PhantomData,
}
}
}
impl<T> From<Value> for Model<T> {
fn from(value: Value) -> Self {
Self {
value,
phantom: PhantomData,
}
}
}
impl<T> From<Model<T>> for Value {
fn from(value: Model<T>) -> Self {
value.value
}
}
impl<'a, T> From<&'a Value> for &'a Model<T> {
fn from(value: &'a Value) -> Self {
unsafe { std::mem::transmute(value) }
}
}
impl<'a, T> From<&'a Model<T>> for &'a Value {
fn from(value: &'a Model<T>) -> Self {
unsafe { std::mem::transmute(value) }
}
}
impl<'a, T> From<&'a mut Value> for &mut Model<T> {
fn from(value: &'a mut Value) -> Self {
unsafe { std::mem::transmute(value) }
}
}
impl<'a, T> From<&'a mut Model<T>> for &mut Value {
fn from(value: &'a mut Model<T>) -> Self {
unsafe { std::mem::transmute(value) }
}
}
impl<T> patch_db::Model<T> for Model<T> {
type Model<U> = Model<U>;
}
impl<T> Model<Option<T>> {
pub fn transpose(self) -> Option<Model<T>> {
use patch_db::ModelExt;
if self.value.is_null() {
None
} else {
Some(self.transmute(|a| a))
}
}
pub fn transpose_ref(&self) -> Option<&Model<T>> {
use patch_db::ModelExt;
if self.value.is_null() {
None
} else {
Some(self.transmute_ref(|a| a))
}
}
pub fn transpose_mut(&mut self) -> Option<&mut Model<T>> {
use patch_db::ModelExt;
if self.value.is_null() {
None
} else {
Some(self.transmute_mut(|a| a))
}
}
pub fn from_option(opt: Option<Model<T>>) -> Self {
use patch_db::ModelExt;
match opt {
Some(a) => a.transmute(|a| a),
None => Self::from_value(Value::Null),
}
}
}
pub trait Map: DeserializeOwned + Serialize {
type Key;
type Value;
}
impl<A, B> Map for BTreeMap<A, B>
where
A: serde::Serialize + serde::de::DeserializeOwned + Ord,
B: serde::Serialize + serde::de::DeserializeOwned,
{
type Key = A;
type Value = B;
}
impl<T: Map> Model<T>
where
T::Key: AsRef<str>,
T::Value: Serialize,
{
pub fn insert(&mut self, key: &T::Key, value: &T::Value) -> Result<(), Error> {
use serde::ser::Error;
let v = patch_db::value::to_value(value)?;
match &mut self.value {
Value::Object(o) => {
o.insert(InternedString::intern(key.as_ref()), v);
Ok(())
}
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
kind: patch_db::value::ErrorKind::Serialization,
}
.into()),
}
}
pub fn insert_model(&mut self, key: &T::Key, value: Model<T::Value>) -> Result<(), Error> {
use patch_db::ModelExt;
use serde::ser::Error;
let v = value.into_value();
match &mut self.value {
Value::Object(o) => {
o.insert(InternedString::intern(key.as_ref()), v);
Ok(())
}
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
kind: patch_db::value::ErrorKind::Serialization,
}
.into()),
}
}
}
impl<T: Map> Model<T>
where
T::Key: DeserializeOwned + Ord + Clone,
{
pub fn keys(&self) -> Result<Vec<T::Key>, Error> {
use serde::de::Error;
use serde::Deserialize;
match &self.value {
Value::Object(o) => o
.keys()
.cloned()
.map(|k| {
T::Key::deserialize(patch_db::value::de::InternedStringDeserializer::from(k))
.map_err(|e| {
patch_db::value::Error {
kind: patch_db::value::ErrorKind::Deserialization,
source: e,
}
.into()
})
})
.collect(),
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
kind: patch_db::value::ErrorKind::Deserialization,
}
.into()),
}
}
pub fn into_entries(self) -> Result<Vec<(T::Key, Model<T::Value>)>, Error> {
use patch_db::ModelExt;
use serde::de::Error;
use serde::Deserialize;
match self.value {
Value::Object(o) => o
.into_iter()
.map(|(k, v)| {
Ok((
T::Key::deserialize(patch_db::value::de::InternedStringDeserializer::from(
k,
))
.with_kind(ErrorKind::Deserialization)?,
Model::from_value(v),
))
})
.collect(),
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
kind: patch_db::value::ErrorKind::Deserialization,
}
.into()),
}
}
pub fn as_entries(&self) -> Result<Vec<(T::Key, &Model<T::Value>)>, Error> {
use patch_db::ModelExt;
use serde::de::Error;
use serde::Deserialize;
match &self.value {
Value::Object(o) => o
.iter()
.map(|(k, v)| {
Ok((
T::Key::deserialize(patch_db::value::de::InternedStringDeserializer::from(
k.clone(),
))
.with_kind(ErrorKind::Deserialization)?,
Model::value_as(v),
))
})
.collect(),
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
kind: patch_db::value::ErrorKind::Deserialization,
}
.into()),
}
}
pub fn as_entries_mut(&mut self) -> Result<Vec<(T::Key, &mut Model<T::Value>)>, Error> {
use patch_db::ModelExt;
use serde::de::Error;
use serde::Deserialize;
match &mut self.value {
Value::Object(o) => o
.iter_mut()
.map(|(k, v)| {
Ok((
T::Key::deserialize(patch_db::value::de::InternedStringDeserializer::from(
k.clone(),
))
.with_kind(ErrorKind::Deserialization)?,
Model::value_as_mut(v),
))
})
.collect(),
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
kind: patch_db::value::ErrorKind::Deserialization,
}
.into()),
}
}
}
impl<T: Map> Model<T>
where
T::Key: AsRef<str>,
{
pub fn into_idx(self, key: &T::Key) -> Option<Model<T::Value>> {
use patch_db::ModelExt;
match &self.value {
Value::Object(o) if o.contains_key(key.as_ref()) => Some(self.transmute(|v| {
use patch_db::value::index::Index;
key.as_ref().index_into_owned(v).unwrap()
})),
_ => None,
}
}
pub fn as_idx<'a>(&'a self, key: &T::Key) -> Option<&'a Model<T::Value>> {
use patch_db::ModelExt;
match &self.value {
Value::Object(o) if o.contains_key(key.as_ref()) => Some(self.transmute_ref(|v| {
use patch_db::value::index::Index;
key.as_ref().index_into(v).unwrap()
})),
_ => None,
}
}
pub fn as_idx_mut<'a>(&'a mut self, key: &T::Key) -> Option<&'a mut Model<T::Value>> {
use patch_db::ModelExt;
match &mut self.value {
Value::Object(o) if o.contains_key(key.as_ref()) => Some(self.transmute_mut(|v| {
use patch_db::value::index::Index;
key.as_ref().index_or_insert(v)
})),
_ => None,
}
}
pub fn remove(&mut self, key: &T::Key) -> Result<Option<Model<T::Value>>, Error> {
use serde::ser::Error;
match &mut self.value {
Value::Object(o) => {
let v = o.remove(key.as_ref());
Ok(v.map(patch_db::ModelExt::from_value))
}
v => Err(patch_db::value::Error {
source: patch_db::value::ErrorSource::custom(format!("expected object found {v}")),
kind: patch_db::value::ErrorKind::Serialization,
}
.into()),
}
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -3,7 +3,8 @@ use std::io::Write;
use std::path::Path;
use ed25519::pkcs8::EncodePrivateKey;
use ed25519_dalek::Keypair;
use ed25519::PublicKeyBytes;
use ed25519_dalek::{SigningKey, VerifyingKey};
use rpc_toolkit::command;
use tracing::instrument;
@@ -21,11 +22,11 @@ pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
}
tracing::info!("Generating new developer key...");
let keypair = Keypair::generate(&mut rand_old::thread_rng());
let secret = SigningKey::generate(&mut rand::thread_rng());
tracing::info!("Writing key to {}", ctx.developer_key_path.display());
let keypair_bytes = ed25519::KeypairBytes {
secret_key: keypair.secret.to_bytes(),
public_key: Some(keypair.public.to_bytes()),
secret_key: secret.to_bytes(),
public_key: Some(PublicKeyBytes(VerifyingKey::from(&secret).to_bytes())),
};
let mut dev_key_file = File::create(&ctx.developer_key_path)?;
dev_key_file.write_all(
@@ -35,6 +36,15 @@ pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
.as_bytes(),
)?;
dev_key_file.sync_all()?;
println!(
"New developer key generated at {}",
ctx.developer_key_path.display()
);
} else {
println!(
"Developer key already exists at {}",
ctx.developer_key_path.display()
);
}
Ok(())
}

View File

@@ -12,8 +12,6 @@ use crate::shutdown::Shutdown;
use crate::util::display_none;
use crate::Error;
pub const SYSTEMD_UNIT: &'static str = "embassy-init";
#[command(subcommands(error, logs, exit, restart, forget_disk, disk, rebuild))]
pub fn diagnostic() -> Result<(), Error> {
Ok(())
@@ -30,7 +28,7 @@ pub async fn logs(
#[arg] cursor: Option<String>,
#[arg] before: bool,
) -> Result<LogResponse, Error> {
Ok(fetch_logs(LogSource::Service(SYSTEMD_UNIT), limit, cursor, before).await?)
Ok(fetch_logs(LogSource::System, limit, cursor, before).await?)
}
#[command(display(display_none))]
@@ -43,8 +41,10 @@ pub fn exit(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
pub fn restart(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
ctx.shutdown
.send(Some(Shutdown {
datadir: ctx.datadir.clone(),
disk_guid: ctx.disk_guid.clone(),
export_args: ctx
.disk_guid
.clone()
.map(|guid| (guid, ctx.datadir.clone())),
restart: true,
}))
.expect("receiver dropped");

View File

@@ -0,0 +1,32 @@
use std::path::Path;
use tokio::process::Command;
use tracing::instrument;
use crate::disk::fsck::RequiresReboot;
use crate::util::Invoke;
use crate::Error;
#[instrument(skip_all)]
pub async fn btrfs_check_readonly(logicalname: impl AsRef<Path>) -> Result<RequiresReboot, Error> {
Command::new("btrfs")
.arg("check")
.arg("--readonly")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Ok(RequiresReboot(false))
}
pub async fn btrfs_check_repair(logicalname: impl AsRef<Path>) -> Result<RequiresReboot, Error> {
Command::new("btrfs")
.arg("check")
.arg("--repair")
.arg("--force")
.arg(logicalname.as_ref())
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Ok(RequiresReboot(false))
}

Some files were not shown because too many files have changed in this diff Show More