Compare commits

...

210 Commits

Author SHA1 Message Date
Matt Hill
871f78b570 Feature/server status restarting (#2503)
* extend `server-info`

* add restarting, shutting down to FE status bar

* fix build

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-11-08 02:31:18 -07:00
Aiden McClelland
753fbc0c5c disable CoW for journal (#2501) 2023-11-07 18:16:02 +00:00
Aiden McClelland
748277aa0e do not wait for input on btrfs repair (#2500) 2023-11-07 09:21:32 -07:00
Aiden McClelland
bf40a9ef6d improve Invoke api (#2499)
* improve Invoke api

* fix formatting
2023-11-06 17:26:45 -07:00
Lucy
733000eaa2 fix docs links (#2498)
* fix docs links

* forgot to save file

* fix docs links and small updates to ca wizard

* add downloaded filename

* fix skip detail
2023-11-06 17:24:15 -07:00
Lucy
6a399a7250 Fix/setup (#2495)
* move enter-click directive to shared

* allow enter click to continue to login in kiosk mode; adjust styling

* cleanup

* add styling to ca wizard

* rebase new changes

* mobile fixes

* cleanup

* cleanup

* update styling

* cleanup import

* minor css changes

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-11-02 17:35:55 -06:00
Aiden McClelland
7ba22f1a09 mute errors due to failed incoming network connections (#2497)
* mute errors due to failed incoming network connections

* fix log entry formatting

* Update cleanDanglingImages

* Update cleanDanglingImages
2023-11-02 17:33:41 -06:00
Aiden McClelland
f54f950f81 fix js backups (#2496) 2023-11-02 23:13:48 +00:00
Matt Hill
4625711606 better UX for http/https switching (#2494)
* open https in same tab

* open http in same tab, use windowRef instead of window
2023-11-02 09:34:00 -06:00
Aiden McClelland
5735ea2b3c change grub os selection to say "StartOS" (#2493)
* change grub os selection to say "StartOS"

* readd "v" to motd
2023-11-01 18:35:18 -06:00
Matt Hill
b597d0366a fix time display bug and type metrics (#2490)
* fix time display bug and type metrics

* change metrics response

* nullable temp

* rename percentage used

* match frontend types

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-11-01 22:30:54 +00:00
Matt Hill
9c6dcc4a43 use keys to complete setup and redesign final buttons (#2492) 2023-11-01 22:30:42 +00:00
Aiden McClelland
27c5464cb6 use low mem for all argon2 configs (#2491)
* reduce argon2 memory usage

* update FE argon2

* fix missing typedefs

* use low mem for all argon2 configs
2023-11-01 22:28:59 +00:00
Matt Hill
1dad7965d2 rework ca-wiz and add icons to menu for warnings (#2486)
* rework ca-wiz and add icons to menu for warnings

* remove root CA button from home page

* load fonts before calling complete in setup wiz
2023-11-01 19:36:56 +00:00
Aiden McClelland
c14ca1d7fd use existing dependency icon if available (#2489) 2023-11-01 19:23:14 +00:00
Mariusz Kogen
2b9e7432b8 Updated motd with new logo (#2488) 2023-11-01 19:22:49 +00:00
Aiden McClelland
547747ff74 continuous deployment (#2485)
* continuous deployment

* fix

* escape braces in format string

* Update upload-ota.sh

* curl fail on http error
2023-11-01 19:22:34 +00:00
J H
e5b137b331 fix: Logging in deno to filter out the info (#2487)
Co-authored-by: jh <jh@Desktop.hsd1.co.comcast.net>
2023-10-31 15:38:00 -06:00
Aiden McClelland
9e554bdecd cleanup network keys on uninstall (#2484) 2023-10-30 16:43:00 +00:00
Aiden McClelland
765b542264 actually enable zram during migration (#2483)
actually enable zram during mifration
2023-10-27 23:34:02 +00:00
Aiden McClelland
182a095420 use old secret key derivation function (#2482)
* use old secret key derivation function

* compat

* cargo
2023-10-27 23:32:21 +00:00
Aiden McClelland
0865cffddf add 1 day margin on start time (#2481) 2023-10-27 18:56:06 +00:00
Aiden McClelland
5a312b9900 use correct sigterm_timeout (#2480) 2023-10-27 18:55:55 +00:00
Matt Hill
af2b2f33c2 Fix/ntp (#2479)
* rework ntp faiure handling and display to user

* uptime in seconds

* change how we handle ntp

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-10-26 23:33:57 +00:00
Aaron Dewes
9aa08dfb9b Delete Cargo.lock (#2477) 2023-10-26 22:16:52 +00:00
Matt Hill
b28c673133 closes #2454 (#2478) 2023-10-26 16:02:53 -06:00
Matt Hill
9a545f176d diplay restoring when restoring (#2476) 2023-10-25 12:08:39 -06:00
Lucy
65728eb6ab allow tab completion on final setup stage in kiosk mode (#2473) 2023-10-25 00:24:11 -06:00
Aiden McClelland
531e037974 Bugfix/argon2 mem usage (#2474)
* reduce argon2 memory usage

* update FE argon2

* fix missing typedefs
2023-10-25 00:17:29 -06:00
Aiden McClelland
a96467cb3e fix raspi build (#2472)
* fix raspi build

* Update build.sh
2023-10-24 21:27:33 +00:00
Aiden McClelland
6e92a7d93d Bugfix/output timeout (#2471)
* Fix: Test with the buf reader never finishing

* fix NoOutput deserialization

---------

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
2023-10-23 21:46:46 -06:00
Aiden McClelland
740e63da2b enable zram by default (#2470) 2023-10-23 21:01:52 +00:00
J H
a69cae22dd chore: Cleaning up the stream to a regular stream (#2468) 2023-10-23 20:41:54 +00:00
Aiden McClelland
8ea3c3c29e consolidate and streamline build (#2469)
* consolidate and streamline build

* fix workflow syntax

* fix workflow syntax

* fix workflow syntax

* fix workflow syntax

* fix build scripts

* only build platform-specific system images

* fix build script

* more build fixes

* fix

* fix compat build for x86

* wat

* checkout

* Prevent rebuild of compiled artifacts

* Update startos-iso.yaml

* Update startos-iso.yaml

* fix raspi build

* handle missing platform better

* reduce arm vcpus

* remove arch and platform from fe config, add to patch db

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-10-23 20:40:00 +00:00
Aiden McClelland
63ab739b3d update Cargo.lock 2023-10-19 09:08:15 -06:00
Aiden McClelland
58bb788034 chore: update dependencies (#2465)
* chore: update dependencies

* fix crypto

* update deno

* update release notes
2023-10-18 22:53:54 +00:00
J H
9e633b37e7 Fix/quarantine deno (#2466)
* fix: Move the deno embedded into a seperate binary.

This should be the quick hacky way of making sure that the memory leaks wont happen

* fix:
2023-10-18 22:02:45 +00:00
Lucy
bb6a4842bd Update/misc fe (#2463)
* update to use transparent icon; add icon to login

* update setup mocks to imitate reality

* update webmanifest version

* fix version in webmanifest

* reset icons with background; update login page style

* adjust login header

* cleanup + adjust icon size

* revert icon

* cleanup and reposition error message
2023-10-18 12:24:48 -06:00
Aiden McClelland
246727995d add tokio-console if unstable (#2461)
* add tokio-console if `unstable`

* instrument vhost controller
2023-10-17 19:25:44 +00:00
Aiden McClelland
202695096a Feature/simple syncdb (#2464)
* simplify db sync on rpc endpoints

* switch to patch-db master

* update fe for websocket only stream

* fix api

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-10-17 15:49:58 +00:00
J H
afbab293a8 Chore: remove an arc mutex that wasn't neccessary (#2462) 2023-10-16 22:26:59 +00:00
Aiden McClelland
78faf888af fix some causes of start wonkiness on update (#2458)
* fix some causes of start wonkiness on update

* fix race condition with manager

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* only restart if running

* fix start function

* clean up clode

* fix restart logic

---------

Co-authored-by: J H <Blu-J@users.noreply.github.com>
2023-10-16 18:34:12 +00:00
Matt Hill
5164c21923 stop while starting or restarting (#2460) 2023-10-16 18:23:12 +00:00
Aiden McClelland
edcd1a3c5b only use first sensor of each group for temp reporting (#2457) 2023-10-16 18:19:24 +00:00
Lucy
532ab9128f add apollo review badge and update badges with icons (#2456)
* add apollo review badge and update badges with icons

* fix mastodon
2023-10-16 12:16:44 -06:00
Aiden McClelland
a3072aacc2 add firmware updater (#2455) 2023-10-16 17:42:42 +00:00
Lucy
27296d8880 update docs links (#2452)
* update docs links

* update backups links

* add anchor tag back to trust root ca link

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-10-13 09:02:59 -06:00
J H
8549b9bc37 fix: Add in logging for the podman in the system logs (#2451)
* fix: Add in logging for the podman in the system logs

* https as default for main tor address

---------

Co-authored-by: agent <kn0wmad@protonmail.com>
2023-10-10 14:13:56 -06:00
Aiden McClelland
7632373097 fix cors middleware (#2450)
* fix cors response

* fix cors properly
2023-10-09 17:34:27 -06:00
Matt Hill
23b0674ac0 fix cert name and show ca wiz on http ip (#2448) 2023-10-06 16:40:11 -06:00
Matt Hill
01f0484a0e fix health check error (#2447) 2023-10-06 15:25:58 -06:00
Mariusz Kogen
3ca9035fdb Use the correct OS name (#2445)
Use correct OS name
2023-10-06 09:29:54 -06:00
Matt Hill
caaf9d26db Fix/patch fe (#2444)
* clear caches on logout

* fix uninstall pkg missing error
2023-10-05 19:04:10 -06:00
Aiden McClelland
eb521b2332 enable trimming in luks (#2443) 2023-10-05 23:40:44 +00:00
Aiden McClelland
68c29ab99e allow UNSET country code for wifi (#2442) 2023-10-05 22:14:51 +00:00
Matt Hill
f12b7f4319 fix cert endpoint 2023-10-05 14:42:41 -06:00
Aiden McClelland
7db331320a Update LICENSE (#2441)
* Update LICENSE

* update README.md

* update release notes
2023-10-05 19:37:31 +00:00
Aiden McClelland
97ad8a85c3 update cargo lock 2023-10-05 08:59:24 -06:00
Aiden McClelland
6f588196cb set governor to "performance" if available (#2438)
* set governor to "performance" if available

* add linux-cpupower

* fix: Boolean blindness, thanks @dr-bones

---------

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
2023-10-04 20:52:56 +00:00
Aiden McClelland
20241c27ee prevent stack overflow on shutdown (#2440)
* prevent stack overflow on shutdown

* fix

---------

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
2023-10-04 19:51:58 +00:00
Matt Hill
05d6aea37f remove hard coded timeout 2023-10-04 13:06:49 -06:00
Matt Hill
7e0e7860cd cancel old request and base interval on tor (#2439) 2023-10-04 13:00:49 -06:00
J H
a0afd7b8ed fixing: Reimplement https://github.com/Start9Labs/start-os/pull/2391 (#2437)
* fixing: Reimplement https://github.com/Start9Labs/start-os/pull/2391

* remove the none thing
2023-10-04 18:06:43 +00:00
Matt Hill
500369ab2b Update/logos (#2435)
* update logos to startos icon

* readme too

* fix spelling
2023-10-03 10:53:29 -06:00
Aiden McClelland
dc26d5c0c8 Bugfix/var tmp (#2434)
* mount /var/tmp to data drive

* clear var tmp on restart
2023-10-02 21:50:05 +00:00
Aiden McClelland
0def02f604 mount /var/tmp to data drive (#2433) 2023-10-02 21:18:39 +00:00
J H
0ffa9167da feat: Add in the ssl_size (#2432)
* feat: Add in the ssl_size

* chore: Changes for the naming and other things.
2023-10-02 21:15:24 +00:00
Aiden McClelland
a110e8f241 make migration more resilient 2023-10-02 10:02:40 -06:00
gStart9
491f363392 Shore up Firefox kiosk mode (#2422)
* Shore up Firefox kiosk mode

* Bash shell for kiosk user

* enable-kiosk script final-ish touches

* make script idempotent

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-09-30 00:32:15 +00:00
Matt Hill
33a67bf7b4 bump FE version and release notes (#2429)
* bump FE version and release notes

* change cargo.toml version

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-09-29 22:32:10 +00:00
Matt Hill
1e6f583431 only emit when things change (#2428)
* only emit when things change

* remove log

* remove test call

* more efficient, thanks BluJ

* point free
2023-09-29 14:36:40 -06:00
J H
5e3412d735 feat: Change all the dependency errors at once (#2427)
* feat: Change all the dependency errors at once

* remove deprecated dependency-errors field

* set pointers to [] by default

* chore: Something about fixing the build

* fix migration

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-09-29 12:08:53 -06:00
Matt Hill
e6e4cd63f3 fix welcome page 2023-09-28 14:54:20 -06:00
Aiden McClelland
f5da5f4ef0 registry admin script (#2426)
* registryadmin scripts

Add `start-sdk publish` command which can potentially replace
the Haskell implementation of `registry-publish upload`

* restructure modules

---------

Co-authored-by: Sam Sartor <me@samsartor.com>
2023-09-28 17:19:31 +00:00
J H
9a202cc124 Refactor/patch db (#2415)
* the only way to begin is by beginning

* chore: Convert over 3444 migration

* fix imports

* wip

* feat: convert volume

* convert: system.rs

* wip(convert): Setup

* wip properties

* wip notifications

* wip

* wip migration

* wip init

* wip auth/control

* wip action

* wip control

* wiip 034

* wip 344

* wip some more versions converted

* feat: Reserialize the version of the db

* wip rest of the versions

* wip s9pk/manifest

* wip wifi

* chore: net/keys

* chore: net/dns

* wip net/dhcp

* wip manager manager-map

* gut dependency errors

* wip update/mod

* detect breakages locally for updates

* wip: manager/mod

* wip: manager/health

* wip: backup/target/mod

* fix: Typo addresses

* clean control.rs

* fix system package id

* switch to btreemap for now

* config wip

* wip manager/mod

* install wip

Co-authored-by: J H <Blu-J@users.noreply.github.com>

* chore: Update the last of the errors

* feat: Change the prelude de to borrow

* feat: Adding in some more things

* chore: add to the prelude

* chore: Small fixes

* chore: Fixing the small errors

* wip: Cleaning up check errors

* wip: Fix some of the issues

* chore: Fix setup

* chore:fix version

* chore: prelude, mod, http_reader

* wip backup_bulk

* chore: Last of the errors

* upadte package.json

* chore: changes needed for a build

* chore: Removing some of the linting errors in the manager

* chore: Some linting 101

* fix: Wrong order of who owns what

* chore: Remove the unstable

* chore: Remove the test in the todo

* @dr-bonez did a refactoring on the backup

* chore: Make sure that there can only be one override guard at a time

* resolve most todos

* wip: Add some more tracing to debug an error

* wip: Use a mv instead of rename

* wip: Revert some of the missing code segments found earlier

* chore: Make the build

* chore: Something about the lib looks like it iis broken

* wip: More instrument and dev working

* kill netdummy before creating it

* better db analysis tools

* fixes from testing

* fix: Make add start the service

* fix status after install

* make wormhole

* fix missing icon file

* fix data url for icons

* fix: Bad deser

* bugfixes

* fix: Backup

* fix: Some of the restor

* fix: Restoring works

* update frontend patch-db types

* hack it in (#2424)

* hack it in

* optimize

* slightly cleaner

* handle config pointers

* dependency config errs

* fix compat

* cache docker

* fix dependency expectation

* fix dependency auto-config

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Matt Hill <mattnine@protonmail.com>
Co-authored-by: J H <Blu-J@users.noreply.github.com>
Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
2023-09-27 21:46:48 +00:00
Aiden McClelland
c305deab52 do not require auth for cert (#2425)
* do not require auth for cert

* use unauthenticated cert path

---------

Co-authored-by: Lucy Cifferello <12953208+elvece@users.noreply.github.com>
2023-09-27 20:12:07 +00:00
Lucy
0daaf3b1ec enable switching to https on login page (#2406)
* enable switching to https on login page

* add trust Root CA to http login page

* add node-jose back for setup wiz

* add tooltips, branding, logic for launch box spinner display, and enable config to toggle https mode on mocks

* cleanup

* copy changes

* style fixes

* abstract component, fix https mocks

* always show login from localhost

* launch .local when on IP

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-09-26 12:47:47 -06:00
J H
8e21504bdb fix: initialize images before netdummy (#2418) 2023-09-14 20:06:44 +00:00
Mariusz Kogen
fcf1be52ac Add tor-check to CLI (#2412)
* Link tor-check

* Add tor-check.sh

* Improve code readability

* Link tor-check from postinst
2023-09-13 12:25:00 -06:00
Mariusz Kogen
394bc9ceb8 SDK install script fix (#2411) 2023-09-07 23:40:04 -06:00
Aiden McClelland
e3786592b2 Feature/remove bollard (#2396)
* wip

* remove bollard, add podman feature

* fix error message parsing

* fix subcommand

* fix typo

* use com.docker.network.bridge.name for podman

* fix parse error

* handle podman network interface nuance

* add libyajl2

* use podman repos

* manually add criu

* do not manually require criu

* remove docker images during cleanup stage

* force removal

* increase img size

* Update startos-iso.yaml

* don't remove docker
2023-08-24 19:20:48 -06:00
Aiden McClelland
d6eaf8d3d9 disable docker memory accounting (#2399) 2023-08-23 16:47:43 +00:00
J H
b1c23336e3 Refactor/service manager (#2401)
* wip: Pulling in the features of the refactor since march

* chore: Fixes to make the system able to build

* chore: Adding in the documentation for the manager stuff

* feat: Restarting and wait for stop

* feat: Add a soft shutdown not commit to db.

* chore: Remove the comments of bluj

* chore: Clean up some of the linting errors

* chore: Clean up the signal

* chore: Some more cleanup

* fix: The configure

* fix: A missing config

* fix: typo

* chore: Remove a comment of BLUJ that needed to be removed
2023-08-23 00:08:55 -06:00
Jadi
44c5073dea backend: sdk init: output file location. fixes #1854 (#2393)
* `start-sdk init` used to run completely silent. Now we are
  showing the current/generated developer.key.pem based on
  ticket https://github.com/Start9Labs/start-os/issues/1854
2023-08-17 22:24:03 +00:00
J H
b7593fac44 Fixes: Builds for the macs (#2397)
* Fixes: Builds for the macs

* misc: Allow the feature flags run during the build for the avahi tools
2023-08-17 22:23:33 +00:00
J H
af116794c4 fix: Add in the code to create the life check for the ui to keep the … (#2391)
* fix: Add in the code to create the life check for the ui to keep the ws alive

* Update Cargo.toml

* Update rpc.rs
2023-08-16 18:43:17 +00:00
Jadi
88c85e1d8a frontend: ui: bugfix: consistent password length. (#2394)
fronend: ui: bugfix: consistent password length.

* The password set dialogue forces maxlenght=64 but when logging in,
  the dialogue does not forces this. This makes an issue when the user
  copy/pastes a longer than 64 character password in boxes. closes #2375
2023-08-16 07:59:05 -06:00
Aiden McClelland
9322b3d07e be resilient to bad lshw output (#2390) 2023-08-08 17:36:14 -06:00
Lucy
55f5329817 update readme layout and assets (#2382)
* update readme layout and assets

* Update README.md

---------

Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
2023-08-02 21:45:14 -04:00
Matt Hill
79d92c30f8 Update README.md (#2381) 2023-08-02 12:37:07 -06:00
Aiden McClelland
73229501c2 Feature/hw filtering (#2368)
* update deno

* add proxy

* remove query params, now auto added by BE

* add hardware requirements and BE reg query params

* update query params for BE requests

* allow multiple arches in hw reqs

* explain git hash mismatch

* require lshw

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-08-02 09:52:38 -06:00
Reckless_Satoshi
32ca91a7c9 add qr code to insights->about->tor (#2379)
* add qr code to insights->about->tor

* fix address PR feedback from @elvece; inject modelCtrl in ctor
2023-08-01 17:06:47 -04:00
Aiden McClelland
9e03ac084e add cli & rpc to edit db with jq syntax (#2372)
* add cli & rpc to edit db with jq syntax

* build fixes

* fix build

* fix build

* update cargo.lock
2023-07-25 16:22:58 -06:00
Aiden McClelland
082c51109d fix missing parent dir (#2373) 2023-07-25 10:07:10 -06:00
Aiden McClelland
8f44c75dc3 switch back to github caching (#2371)
* switch back to github caching

* remove npm and cargo cache

* misc fixes
2023-07-25 10:06:57 -06:00
Aiden McClelland
234f0d75e8 mute unexpected eof & protect against fd leaks (#2369) 2023-07-20 17:40:30 +00:00
Lucy
564186a1f9 Fix/mistake reorganize (#2366)
* revert patch for string parsing fix due to out of date yq version

* reorganize conditionals

* use ng-container

* alertButton needs to be outside of template or container
2023-07-19 10:54:18 -06:00
Lucy
ccdb477dbb Fix/pwa refresh (#2359)
* fix ROFS error on os install

* attempt to prompt browser to update manifest data with id and modified start_url

* update icon with better shape for ios

* add additional options for refreshing on pwas

* add loader to pwa reload

* fix pwa icon and add icon for ios

* add logic for refresh display depending on if pwa

* fix build for ui; fix numeric parsing error on osx

* typo

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-07-19 09:11:23 -06:00
Aiden McClelland
5f92f9e965 fix ROFS error on os install (#2364) 2023-07-19 08:50:02 -06:00
Aiden McClelland
c2db4390bb single platform builds (#2365) 2023-07-18 19:50:27 -06:00
Matt Hill
11c21b5259 Fix bugs (#2360)
* fix reset tor, delete http redirect, show message for tor http, update release notes

* potentially fix doubel req to registries

* change language arund LAN and root ca

* link locally instead of docs
2023-07-18 12:38:52 -06:00
Aiden McClelland
3cd9e17e3f migrate tor address to https (#2358) 2023-07-18 12:08:34 -06:00
Aiden McClelland
1982ce796f update deno (#2361) 2023-07-18 11:59:00 -06:00
Aiden McClelland
825e18a551 version bump (#2357)
* version bump

* update welcome page

---------

Co-authored-by: Lucy Cifferello <12953208+elvece@users.noreply.github.com>
2023-07-14 14:58:19 -06:00
Aiden McClelland
9ff0128fb1 support http2 alpn handshake (#2354)
* support http2 alpn handshake

* fix protocol name

* switch to https for tor

* update setup wizard and main ui to accommodate https (#2356)

* update setup wizard and main ui to accommodate https

* update wording in download doc

* fix accidential conversion of tor https for services and allow ws still

* redirect to https if available

* fix replaces to only search at beginning and ignore localhost when checking for https

---------

Co-authored-by: Lucy <12953208+elvece@users.noreply.github.com>
2023-07-14 14:58:02 -06:00
Matt Hill
36c3617204 permit IP for cifs backups (#2342)
* permit IP for cifs backups

* allow ip instead of hostname (#2347)

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-07-14 18:52:33 +00:00
Aiden McClelland
90a9db3a91 disable encryption for new raspi setups (#2348)
* disable encryption for new raspi setups

* use config instead of OS_ARCH

* fixes from testing
2023-07-14 18:30:52 +00:00
Aiden McClelland
59d6795d9e fix all references embassyd -> startd (#2355) 2023-07-14 18:29:20 +00:00
Aiden McClelland
2c07cf50fa better transfer progress (#2350)
* better transfer progress

* frontend for calculating transfer size

* fixes from testing

* improve internal api

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2023-07-13 19:40:53 -06:00
Aiden McClelland
cc0e525dc5 fix incoherent when removing (#2332)
* fix incoherent when removing

* include all packages for current dependents
2023-07-13 20:36:48 +00:00
Aiden McClelland
73bd973109 delete disk guid on reflash (#2334)
* delete disk guid on reflash

* delete unnecessary files before copy
2023-07-13 20:36:35 +00:00
Aiden McClelland
a7e501d874 pack compressed assets into single binary (#2344)
* pack compressed assets into single binary

* update naming

* tweaks

* fix build

* fix cargo lock

* rename CLI

* remove explicit ref name
2023-07-12 22:51:05 +00:00
Matt Hill
4676f0595c add reset password to UI (#2341) 2023-07-11 17:23:40 -06:00
kn0wmad
1d3d70e8d6 Update README.md (#2337)
* Update README.md

* Update README.md
2023-07-07 10:23:31 -06:00
Mariusz Kogen
bada88157e Auto-define the OS_ARCH variable. (#2329) 2023-06-30 20:34:10 +00:00
J H
13f3137701 fix: Make check-version posix compliant (#2331)
We found that we couldn't compile this on the mac arm os
2023-06-29 22:28:13 +00:00
Aiden McClelland
d3316ff6ff make it faster (#2328)
* make it faster

* better pipelining

* remove unnecessary test

* use tmpfs for debspawn

* don't download intermediate artifacts

* fix upload dir path

* switch to buildjet

* use buildjet cache on buildjet runner

* native builds when fast

* remove quotes

* always use buildjet cache

* remove newlines

* delete data after done with it

* skip aarch64 for fast dev builds

* don't tmpfs for arm

* don't try to remove debspawn tmpdir
2023-06-28 13:37:26 -06:00
kn0wmad
1b384e61b4 maint/minor UI typo fixes (#2330)
* Minor copy fixes

* Contact link fixes
2023-06-28 13:03:33 -06:00
Matt Hill
addea20cab Update README 2023-06-27 10:10:01 -06:00
Matt Hill
fac23f2f57 update README 2023-06-27 10:06:42 -06:00
Aiden McClelland
bffe1ccb3d use a more resourced runner for production builds (#2322) 2023-06-26 16:27:11 +00:00
Matt Hill
e577434fe6 Update bug-report.yml 2023-06-25 13:38:26 -06:00
Matt Hill
5d1d9827e4 Update bug-report.yml 2023-06-25 13:35:45 -06:00
Aiden McClelland
dd28ad20ef use port instead of pidof to detect tor going down (#2320)
* use port instead of pidof to detect tor going down

* fix errors

* healthcheck timeout
2023-06-23 13:06:00 -06:00
Aiden McClelland
ef416ef60b prevent tor from spinning if a service is in a crash loop (#2316) 2023-06-22 18:09:59 +00:00
Aiden McClelland
95b3b55971 fix rootflags for btrfs update (#2315) 2023-06-21 15:26:27 +00:00
Aiden McClelland
b3f32ae03e don't use cp when over cifs 2023-06-21 00:36:36 +00:00
Aiden McClelland
c7472174e5 fix btrfs rootflags 2023-06-21 00:36:36 +00:00
gStart9
2ad749354d Add qemu-guest-agent for advanced VM shutdown options (#2309) 2023-06-21 00:36:36 +00:00
Aiden McClelland
4ed9d2ea22 add grub-common to build 2023-06-21 00:36:36 +00:00
Lucy Cifferello
280eb47de7 update marketplace project to include mime type pipe for icons 2023-06-21 00:36:36 +00:00
Aiden McClelland
324a12b0ff reset config after pg_upgrade 2023-06-21 00:36:36 +00:00
Aiden McClelland
a2543ccddc trim fs name 2023-06-21 00:36:36 +00:00
Aiden McClelland
22666412c3 use fsck instead of e2fsck 2023-06-21 00:36:36 +00:00
Aiden McClelland
dd58044cdf fix build 2023-06-21 00:36:36 +00:00
Aiden McClelland
10312d89d7 fix ipv6 2023-06-21 00:36:36 +00:00
Aiden McClelland
b4c0d877cb fix postgres migration 2023-06-21 00:36:36 +00:00
Aiden McClelland
e95d56a5d0 fix update-grub2 2023-06-21 00:36:36 +00:00
Aiden McClelland
90424e8329 install fixes 2023-06-21 00:36:36 +00:00
Aiden McClelland
1bfeb42a06 force btrfs creation 2023-06-21 00:36:36 +00:00
Aiden McClelland
a936f92954 use postgres user 2023-06-21 00:36:36 +00:00
Aiden McClelland
0bc514ec17 include old pg 2023-06-21 00:36:36 +00:00
Aiden McClelland
a2cf4001af improve invoke error reporting 2023-06-21 00:36:36 +00:00
Aiden McClelland
cb4e12a68c fix build 2023-06-21 00:36:36 +00:00
Aiden McClelland
a7f5124dfe postgresql migration 2023-06-21 00:36:36 +00:00
Aiden McClelland
ccbf71c5e7 fix ipv6 2023-06-21 00:36:36 +00:00
Aiden McClelland
04bf5f58d9 fix tor listener bug 2023-06-21 00:36:36 +00:00
Aiden McClelland
ab3f5956d4 ipv6 2023-06-21 00:36:36 +00:00
Aiden McClelland
c1fe8e583f backup target mount/umount 2023-06-21 00:36:36 +00:00
Lucy Cifferello
fd166c4433 do not load array buffer into memory 2023-06-21 00:36:36 +00:00
Aiden McClelland
f29c7ba4f2 don't wait for install to complete on sideload 2023-06-21 00:36:36 +00:00
Aiden McClelland
88869e9710 gpu acceleration 2023-06-21 00:36:36 +00:00
Aiden McClelland
f8404ab043 btrfs 2023-06-21 00:36:36 +00:00
Aiden McClelland
9fa5d1ff9e suite independent 2023-06-21 00:36:36 +00:00
Aiden McClelland
483f353fd0 backup luks headers 2023-06-21 00:36:36 +00:00
Aiden McClelland
a11bf5b5c7 bookworm 2023-06-21 00:36:36 +00:00
Aiden McClelland
d4113ff753 re-add server version and version range 2023-06-21 00:36:36 +00:00
Aiden McClelland
1969f036fa deser full server info 2023-06-21 00:36:36 +00:00
Matt Hill
8c90e01016 hide range ip addresses, update release notes 2023-06-15 13:20:37 -06:00
Matt Hill
756c5c9b99 small spelling mistake 2023-06-11 15:04:59 -06:00
Lucy Cifferello
ee54b355af fix compliation error on widgets page 2023-06-11 15:04:59 -06:00
Lucy Cifferello
26cbbc0c56 adjust start9 registry icon 2023-06-11 15:04:59 -06:00
Aiden McClelland
f4f719d52a misc fixes 2023-06-11 15:04:59 -06:00
Aiden McClelland
f2071d8b7e update zram bool 2023-06-11 15:04:59 -06:00
Aiden McClelland
df88a55784 v0.3.4.3 2023-06-11 15:04:59 -06:00
Matt Hill
3ccbc626ff experimental features for zram and reset tor (#2299)
* experimental features for zram and reset tor

* zram backend

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-06-11 15:04:59 -06:00
Aiden McClelland
71a15cf222 add diskUsage effect (#2297) 2023-06-11 15:04:59 -06:00
Aiden McClelland
26ddf769b1 remove overload restart rule 2023-06-11 15:04:59 -06:00
Aiden McClelland
3137387c0c only set static hostname 2023-06-11 15:04:59 -06:00
Aiden McClelland
fc142cfde8 reset tor (#2296)
* reset tor

* Update tor.rs

* timeout connect

* handle stuck bootstrapping
2023-06-11 15:04:59 -06:00
Aiden McClelland
b0503fa507 Bugfix/incoherent (#2293)
* debug incoherent error

* fix incoherent error

* use new debspawn
2023-06-11 15:04:59 -06:00
Matt Hill
b86a97c9c0 add resetTor to rpc client 2023-06-11 15:04:59 -06:00
Lucy Cifferello
eb6cd23772 update registry icon 2023-06-11 15:04:59 -06:00
Matt Hill
efae1e7e6c add Tor logs to UI 2023-06-11 15:04:59 -06:00
Lucy Cifferello
19d55b840e add registry icon to preloader 2023-06-11 15:04:59 -06:00
Lucy Cifferello
cc0c1d05ab update frontend to 0.3.4.3 2023-06-11 15:04:59 -06:00
Lucy Cifferello
f088f65d5a update branding 2023-06-11 15:04:59 -06:00
Lucy Cifferello
5441b5a06b add missing items to preloader 2023-06-11 15:04:59 -06:00
gStart9
efc56c0a88 Add crda to build/lib/depends (#2283) 2023-05-24 15:54:33 -07:00
kn0wmad
321fca2c0a Replace some user-facing Embassy language (#2281) 2023-05-22 13:23:20 -06:00
Matt Hill
bbd66e9cb0 fix nav link (#2279) 2023-05-18 18:11:27 -06:00
Aiden McClelland
eb0277146c wait for tor (#2278) 2023-05-17 22:17:27 -06:00
Aiden McClelland
10ee32ec48 always generate snake-oil (#2277) 2023-05-17 15:09:27 -06:00
Aiden McClelland
bdb4be89ff Bugfix/pi config (#2276)
* move some install scripts to init

* fix pi config.txt

* move some image stuff to the squashfs build

* no need to clean up fake-apt

* use max temp
2023-05-16 16:06:25 -06:00
Aiden McClelland
61445e0b56 build fixes (#2275)
* move some install scripts to init

* handle fake-apt in init

* rename
2023-05-15 16:34:30 -06:00
Aiden McClelland
f15a010e0e Update build badge (#2274)
Update README.md
2023-05-14 00:01:58 -06:00
Lucy C
58747004fe Fix/misc frontend (#2273)
* update pwa icon to official latest

* fix bug if icon is null in assets

* dismiss modal when connecting to a new registry
2023-05-12 14:48:16 -06:00
Lucy C
e7ff1eb66b display icons based on mime type (#2271)
* display icons based on mime type

* Update frontend/projects/marketplace/src/pipes/mime-type.pipe.ts

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* fixes

---------

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2023-05-12 12:20:05 -06:00
Matt Hill
4a00bd4797 ensure lan address present before getting cert name (#2272) 2023-05-12 12:18:39 -06:00
Aiden McClelland
2e6fc7e4a0 v0.3.4.2 (#2269) 2023-05-12 00:35:50 -06:00
Aiden McClelland
4a8f323be7 external rename (#2265)
* backend rename

* rename embassy and closes #2179

* update root ca name on disk

* update MOTD

* update readmes

* your server typo

* another tiny typo

* fix png name

* Update backend/src/net/wifi.rs

Co-authored-by: Lucy C <12953208+elvece@users.noreply.github.com>

* changes needed due to rebase

---------

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
Co-authored-by: Lucy C <12953208+elvece@users.noreply.github.com>
2023-05-11 16:48:52 -06:00
Aiden McClelland
c7d82102ed Bugfix/gpt reflash (#2266)
* debug entry

* update magic numbers

* remove dbg

* fix hostname

* fix reinstall logic
2023-05-11 14:16:19 -06:00
Aiden McClelland
068b861edc overhaul OS build (#2244)
* create init resize for pi

* wip

* defer to OS_ARCH env var

* enable password auth in live image

* use correct live image path

* reorder dependencies

* add grub-common as dependency

* add more depends

* reorder grub

* include systemd-resolved

* misc fixes

* remove grub from dependencies

* imports

* ssh and raspi builds

* fix resolvectl

* generate snake-oil on install

* update raspi build process

* script fixes

* fix resize and config

* add psmisc

* new workflows

* include img

* pass through OS_ARCH env var

* require OS_ARCH

* allow dispatching production builds

* configurable environment

* pass through OS_ARCH on compat build

* fix syntax error

* crossbuild dependencies

* include libavahi-client for cross builds

* reorder add-arch

* add ports

* switch existing repos to amd64

* explicitly install libc6

* add more bullshit

* fix some errors

* use ignored shlibs

* remove ubuntu ports

* platform deb

* Update depends

* Update startos-iso.yaml

* Update startos-iso.yaml

* require pi-beep

* add bios boot, fix environment

* Update startos-iso.yaml

* inline deb

* Update startos-iso.yaml

* allow ssh password auth in live build

* sync hostname on livecd

* require curl
2023-05-05 00:54:09 -06:00
kn0wmad
3c908c6a09 Update README.md (#2261)
Minor typo fix
2023-05-02 06:26:54 -06:00
Lucy C
ba3805786c Feature/pwa (#2246)
* setup ui project with pwa configurations

* enable service worker config to work with ionic livereload

* fix service worker key placement

* update webmanifest names

* cleanup

* shrink logo size

* fix package build

* build fix

* fix icon size in webmanifest
2023-04-11 10:36:25 -06:00
Aiden McClelland
70afb197f1 don't attempt docker load if s9pk corrupted (#2236) 2023-03-21 11:23:44 -06:00
Aiden McClelland
d966e35054 fix migration 2023-03-17 18:58:49 -06:00
Aiden McClelland
1675570291 fix test 2023-03-17 14:42:32 -06:00
Aiden McClelland
9b88de656e version bump (#2232)
* version bump

* welcome notes

* 0341 release notes

---------

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2023-03-17 12:55:21 -06:00
Aiden McClelland
3d39b5653d don't blow up if s9pk fails to load (#2231) 2023-03-17 12:09:24 -06:00
J H
eb5f7f64ad feat: Default to no owner for rsync (#2230) 2023-03-17 12:09:13 -06:00
Aiden McClelland
9fc0164c4d better logging of health (#2228) 2023-03-17 12:09:01 -06:00
Aiden McClelland
65eb520cca disable apt and add script for persisting apt pkgs (#2225)
* disable apt and add script for persisting apt pkgs

* fix typo

* exit 1 on fake-apt

* readd fake-apt after upgrade

* fix typo

* remove finicky protection

* fix build
2023-03-17 12:08:49 -06:00
Aiden McClelland
f7f07932b4 update registry rsync script (#2227) 2023-03-17 10:05:58 -06:00
Aiden McClelland
de52494039 fix loading authcookie into cookie store on ssh (#2226) 2023-03-17 10:05:12 -06:00
Matt Hill
4d87ee2bb6 update display obj on union change (#2224)
* update display obj on union change

* deelete unnecessary changes

* more efficient

* fix: properly change height of form object

* more config examples

---------

Co-authored-by: waterplea <alexander@inkin.ru>
2023-03-17 11:57:26 -04:00
Matt Hill
d0ba0936ca remove taiga icons (#2222) 2023-03-15 12:29:24 -06:00
Matt Hill
b08556861f Fix/stupid updates (#2221)
one more thing
2023-03-15 12:23:25 -06:00
Aiden McClelland
c96628ad49 do not log parameters 2023-03-15 12:19:11 -06:00
Matt Hill
a615882b3f fix more bugs with updates tab... (#2219) 2023-03-15 11:33:54 -06:00
496 changed files with 25272 additions and 37103 deletions

View File

@@ -1,6 +1,6 @@
name: 🐛 Bug Report name: 🐛 Bug Report
description: Create a report to help us improve embassyOS description: Create a report to help us improve StartOS
title: '[bug]: ' title: "[bug]: "
labels: [Bug, Needs Triage] labels: [Bug, Needs Triage]
assignees: assignees:
- MattDHill - MattDHill
@@ -10,27 +10,25 @@ body:
label: Prerequisites label: Prerequisites
description: Please confirm you have completed the following. description: Please confirm you have completed the following.
options: options:
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already report this problem. - label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already report this problem.
required: true required: true
- type: input - type: input
attributes: attributes:
label: embassyOS Version label: Server Hardware
description: What version of embassyOS are you running? description: On what hardware are you running StartOS? Please be as detailed as possible!
placeholder: e.g. 0.3.0 placeholder: Pi (8GB) w/ 32GB microSD & Samsung T7 SSD
validations:
required: true
- type: input
attributes:
label: StartOS Version
description: What version of StartOS are you running?
placeholder: e.g. 0.3.4.3
validations: validations:
required: true required: true
- type: dropdown - type: dropdown
attributes: attributes:
label: Device label: Client OS
description: What device are you using to connect to Embassy?
options:
- Phone/tablet
- Laptop/Desktop
validations:
required: true
- type: dropdown
attributes:
label: Device OS
description: What operating system is your device running? description: What operating system is your device running?
options: options:
- MacOS - MacOS
@@ -45,14 +43,14 @@ body:
required: true required: true
- type: input - type: input
attributes: attributes:
label: Device OS Version label: Client OS Version
description: What version is your device OS? description: What version is your device OS?
validations: validations:
required: true required: true
- type: dropdown - type: dropdown
attributes: attributes:
label: Browser label: Browser
description: What browser are you using to connect to Embassy? description: What browser are you using to connect to your server?
options: options:
- Firefox - Firefox
- Brave - Brave

View File

@@ -1,6 +1,6 @@
name: 💡 Feature Request name: 💡 Feature Request
description: Suggest an idea for embassyOS description: Suggest an idea for StartOS
title: '[feat]: ' title: "[feat]: "
labels: [Enhancement] labels: [Enhancement]
assignees: assignees:
- MattDHill - MattDHill
@@ -10,7 +10,7 @@ body:
label: Prerequisites label: Prerequisites
description: Please confirm you have completed the following. description: Please confirm you have completed the following.
options: options:
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already suggest this feature. - label: I have searched for [existing issues](https://github.com/start9labs/start-os/issues) that already suggest this feature.
required: true required: true
- type: textarea - type: textarea
attributes: attributes:
@@ -27,7 +27,7 @@ body:
- type: textarea - type: textarea
attributes: attributes:
label: Describe Preferred Solution label: Describe Preferred Solution
description: How you want this feature added to embassyOS? description: How you want this feature added to StartOS?
- type: textarea - type: textarea
attributes: attributes:
label: Describe Alternatives label: Describe Alternatives

View File

@@ -1,29 +0,0 @@
# This folder contains GitHub Actions workflows for building the project
## backend
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
This workflow uses the actions and docker/setup-buildx-action@v1 to prepare the environment for aarch64 cross complilation using docker buildx.
When execution of aarch64 containers is required the action docker/setup-qemu-action@v1 is added.
A matrix-strategy has been used to build for both x86_64 and aarch64 platforms in parallel.
### Running unittests
Unittests are run using [cargo-nextest]( https://nexte.st/). First the sources are (cross-)compiled and archived. The archive is then run on the correct platform.
## frontend
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
This workflow builds the frontends.
## product
Runs: when a pull request targets the master or next branch and when a change to the master or next branch is made
This workflow builds everything, re-using the backend and frontend workflows.
The download and extraction order of artifacts is relevant to `make`, as it checks the file timestamps to decide which targets need to be executed.
Result: eos.img
## a note on uploading artifacts
Artifacts are used to share data between jobs. File permissions are not maintained during artifact upload. Where file permissions are relevant, the workaround using tar has been used. See (here)[https://github.com/actions/upload-artifact#maintaining-file-permissions-and-case-sensitive-files].

View File

@@ -1,233 +0,0 @@
name: Backend
on:
workflow_call:
workflow_dispatch:
env:
RUST_VERSION: "1.67.1"
ENVIRONMENT: "dev"
jobs:
build_libs:
name: Build libs
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
snapshot_command: ./build-v8-snapshot.sh
artifact_name: js_snapshot
artifact_path: libs/js_engine/src/artifacts/JS_SNAPSHOT.bin
- target: aarch64
snapshot_command: ./build-arm-v8-snapshot.sh
artifact_name: arm_js_snapshot
artifact_path: libs/js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: ${{ matrix.target == 'aarch64' }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
if: ${{ matrix.target == 'aarch64' }}
- name: "Install Rust"
run: |
rustup toolchain install ${{ env.RUST_VERSION }} --profile minimal --no-self-update
rustup default ${{ inputs.rust }}
shell: bash
if: ${{ matrix.target == 'x86_64' }}
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
libs/target/
key: ${{ runner.os }}-cargo-libs-${{ matrix.target }}-${{ hashFiles('libs/Cargo.lock') }}
- name: Build v8 snapshot
run: ${{ matrix.snapshot_command }}
working-directory: libs
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.artifact_name }}
path: ${{ matrix.artifact_path }}
build_backend:
name: Build backend
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
snapshot_download: js_snapshot
- target: aarch64
snapshot_download: arm_js_snapshot
runs-on: ubuntu-latest
timeout-minutes: 120
needs: build_libs
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Download ${{ matrix.snapshot_download }} artifact
uses: actions/download-artifact@v3
with:
name: ${{ matrix.snapshot_download }}
path: libs/js_engine/src/artifacts/
- name: "Install Rust"
run: |
rustup toolchain install ${{ env.RUST_VERSION }} --profile minimal --no-self-update
rustup default ${{ inputs.rust }}
shell: bash
if: ${{ matrix.target == 'x86_64' }}
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
backend/target/
key: ${{ runner.os }}-cargo-backend-${{ matrix.target }}-${{ hashFiles('backend/Cargo.lock') }}
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install libavahi-client-dev
if: ${{ matrix.target == 'x86_64' }}
- name: Check Git Hash
run: ./check-git-hash.sh
- name: Check Environment
run: ./check-environment.sh
- name: Build backend
run: make ARCH=${{ matrix.target }} backend
- name: 'Tar files to preserve file permissions'
run: make ARCH=${{ matrix.target }} backend-${{ matrix.target }}.tar
- uses: actions/upload-artifact@v3
with:
name: backend-${{ matrix.target }}
path: backend-${{ matrix.target }}.tar
- name: Install nextest
uses: taiki-e/install-action@nextest
- name: Build and archive tests
run: cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu
working-directory: backend
if: ${{ matrix.target == 'x86_64' }}
- name: Build and archive tests
run: |
docker run --rm \
-v "$HOME/.cargo/registry":/root/.cargo/registry \
-v "$(pwd)":/home/rust/src \
-P start9/rust-arm-cross:aarch64 \
sh -c 'cd /home/rust/src/backend &&
rustup install ${{ env.RUST_VERSION }} &&
rustup override set ${{ env.RUST_VERSION }} &&
rustup target add aarch64-unknown-linux-gnu &&
curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin &&
cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu'
if: ${{ matrix.target == 'aarch64' }}
- name: Reset permissions
run: sudo chown -R $USER target
working-directory: backend
if: ${{ matrix.target == 'aarch64' }}
- name: Upload archive to workflow
uses: actions/upload-artifact@v3
with:
name: nextest-archive-${{ matrix.target }}
path: backend/nextest-archive-${{ matrix.target }}.tar.zst
run_tests_backend:
name: Test backend
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
- target: aarch64
runs-on: ubuntu-latest
timeout-minutes: 60
needs: build_backend
env:
CARGO_TERM_COLOR: always
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
if: ${{ matrix.target == 'aarch64' }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
if: ${{ matrix.target == 'aarch64' }}
- run: mkdir -p ~/.cargo/bin
if: ${{ matrix.target == 'x86_64' }}
- name: Install nextest
uses: taiki-e/install-action@v2
with:
tool: nextest@0.9.47
if: ${{ matrix.target == 'x86_64' }}
- name: Download archive
uses: actions/download-artifact@v3
with:
name: nextest-archive-${{ matrix.target }}
- name: Download nextest (aarch64)
run: wget -O nextest-aarch64.tar.gz https://get.nexte.st/0.9.47/linux-arm
if: ${{ matrix.target == 'aarch64' }}
- name: Run tests
run: |
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --no-fail-fast --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
--filter-expr 'not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))'
if: ${{ matrix.target == 'x86_64' }}
- name: Run tests
run: |
docker run --rm --platform linux/arm64/v8 \
-v "/home/runner/.cargo/registry":/usr/local/cargo/registry \
-v "$(pwd)":/home/rust/src \
-e CARGO_TERM_COLOR=${{ env.CARGO_TERM_COLOR }} \
-P ubuntu:20.04 \
sh -c '
apt-get update &&
apt-get install -y ca-certificates &&
apt-get install -y rsync &&
cd /home/rust/src &&
mkdir -p ~/.cargo/bin &&
tar -zxvf nextest-aarch64.tar.gz -C ${CARGO_HOME:-~/.cargo}/bin &&
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
--filter-expr "not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))"'
if: ${{ matrix.target == 'aarch64' }}

View File

@@ -1,63 +0,0 @@
name: Debian Package
on:
workflow_call:
workflow_dispatch:
env:
NODEJS_VERSION: '16.11.0'
ENVIRONMENT: "dev"
jobs:
dpkg:
name: Build dpkg
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
repository: Start9Labs/embassy-os-deb
- uses: actions/checkout@v3
with:
submodules: recursive
path: embassyos-0.3.x
- run: |
cp -r debian embassyos-0.3.x/
VERSION=0.3.x ./control.sh
cp embassyos-0.3.x/backend/embassyd.service embassyos-0.3.x/debian/embassyos.embassyd.service
cp embassyos-0.3.x/backend/embassy-init.service embassyos-0.3.x/debian/embassyos.embassy-init.service
- uses: actions/setup-node@v3
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Get npm cache directory
id: npm-cache-dir
run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
- uses: actions/cache@v3
id: npm-cache
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install debmake debhelper-compat
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Run build
run: "make VERSION=0.3.x TAG=${{ github.ref_name }}"
- uses: actions/upload-artifact@v3
with:
name: deb
path: embassyos_0.3.x-1_amd64.deb

View File

@@ -1,46 +0,0 @@
name: Frontend
on:
workflow_call:
workflow_dispatch:
env:
NODEJS_VERSION: '16.11.0'
ENVIRONMENT: "dev"
jobs:
frontend:
name: Build frontend
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- uses: actions/setup-node@v3
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Get npm cache directory
id: npm-cache-dir
run: |
echo "dir=$(npm config get cache)" >> $GITHUB_OUTPUT
- uses: actions/cache@v3
id: npm-cache
with:
path: ${{ steps.npm-cache-dir.outputs.dir }}
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.os }}-node-
- name: Build frontends
run: make frontends
- name: 'Tar files to preserve file permissions'
run: tar -cvf frontend.tar ENVIRONMENT.txt GIT_HASH.txt VERSION.txt frontend/dist frontend/config.json
- uses: actions/upload-artifact@v3
with:
name: frontend
path: frontend.tar

View File

@@ -1,129 +0,0 @@
name: Build Pipeline
on:
workflow_dispatch:
push:
branches:
- master
- next
pull_request:
branches:
- master
- next
env:
ENVIRONMENT: "dev"
jobs:
compat:
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make system-images/compat/docker-images/aarch64.tar
artifact_name: compat.tar
artifact_path: system-images/compat/docker-images/aarch64.tar
utils:
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make system-images/utils/docker-images/aarch64.tar
artifact_name: utils.tar
artifact_path: system-images/utils/docker-images/aarch64.tar
binfmt:
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make system-images/binfmt/docker-images/aarch64.tar
artifact_name: binfmt.tar
artifact_path: system-images/binfmt/docker-images/aarch64.tar
backend:
uses: ./.github/workflows/backend.yaml
frontend:
uses: ./.github/workflows/frontend.yaml
image:
name: Build image
runs-on: ubuntu-latest
timeout-minutes: 60
needs: [compat,utils,binfmt,backend,frontend]
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Download compat.tar artifact
uses: actions/download-artifact@v3
with:
name: compat.tar
path: system-images/compat/docker-images/
- name: Download utils.tar artifact
uses: actions/download-artifact@v3
with:
name: utils.tar
path: system-images/utils/docker-images/
- name: Download binfmt.tar artifact
uses: actions/download-artifact@v3
with:
name: binfmt.tar
path: system-images/binfmt/docker-images/
- name: Download js_snapshot artifact
uses: actions/download-artifact@v3
with:
name: js_snapshot
path: libs/js_engine/src/artifacts/
- name: Download arm_js_snapshot artifact
uses: actions/download-artifact@v3
with:
name: arm_js_snapshot
path: libs/js_engine/src/artifacts/
- name: Download backend artifact
uses: actions/download-artifact@v3
with:
name: backend-aarch64
- name: 'Extract backend'
run:
tar -mxvf backend-aarch64.tar
- name: Download frontend artifact
uses: actions/download-artifact@v3
with:
name: frontend
- name: Skip frontend build
run: |
mkdir frontend/node_modules
mkdir frontend/dist
mkdir patch-db/client/node_modules
mkdir patch-db/client/dist
- name: 'Extract frontend'
run: |
tar -mxvf frontend.tar frontend/config.json
tar -mxvf frontend.tar frontend/dist
tar -xvf frontend.tar GIT_HASH.txt
tar -xvf frontend.tar ENVIRONMENT.txt
tar -xvf frontend.tar VERSION.txt
rm frontend.tar
- name: Cache raspiOS
id: cache-raspios
uses: actions/cache@v3
with:
path: raspios.img
key: cache-raspios
- name: Build image
run: |
make V=1 eos_raspberrypi-uninit.img --debug
- uses: actions/upload-artifact@v3
with:
name: image
path: eos_raspberrypi-uninit.img

View File

@@ -1,70 +0,0 @@
name: PureOS Based ISO
on:
workflow_call:
workflow_dispatch:
push:
branches:
- master
- next
pull_request:
branches:
- master
- next
env:
ENVIRONMENT: "dev"
jobs:
dpkg:
uses: ./.github/workflows/debian.yaml
iso:
name: Build iso
runs-on: ubuntu-22.04
needs: [dpkg]
steps:
- uses: actions/checkout@v3
with:
repository: Start9Labs/eos-image-recipes
- name: Install dependencies
run: |
sudo apt update
wget http://ftp.us.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.1-1_all.deb
sha256sum ./debspawn_0.6.1-1_all.deb | grep fb8a3f588438ff9ef51e713ec1d83306db893f0aa97447565e28bbba9c6e90c6
sudo apt-get install -y ./debspawn_0.6.1-1_all.deb
wget https://repo.pureos.net/pureos/pool/main/d/debootstrap/debootstrap_1.0.125pureos1_all.deb
sudo apt-get install -y --allow-downgrades ./debootstrap_1.0.125pureos1_all.deb
wget https://repo.pureos.net/pureos/pool/main/p/pureos-archive-keyring/pureos-archive-keyring_2021.11.0_all.deb
sudo apt-get install -y ./pureos-archive-keyring_2021.11.0_all.deb
- name: Configure debspawn
run: |
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
- uses: actions/cache@v3
with:
path: /var/lib/debspawn
key: ${{ runner.os }}-debspawn-init-byzantium
- name: Make build container
run: "debspawn list | grep byzantium || debspawn create --with-init byzantium"
- run: "mkdir -p overlays/vendor/root"
- name: Download dpkg
uses: actions/download-artifact@v3
with:
name: deb
path: overlays/vendor/root
- name: Run build
run: |
./run-local-build.sh --no-fakemachine byzantium none custom "" true
- uses: actions/upload-artifact@v3
with:
name: iso
path: results/*.iso

View File

@@ -1,37 +0,0 @@
name: Reusable Workflow
on:
workflow_call:
inputs:
build_command:
required: true
type: string
artifact_name:
required: true
type: string
artifact_path:
required: true
type: string
env:
ENVIRONMENT: "dev"
jobs:
generic_build_job:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Build image
run: ${{ inputs.build_command }}
- uses: actions/upload-artifact@v3
with:
name: ${{ inputs.artifact_name }}
path: ${{ inputs.artifact_path }}

237
.github/workflows/startos-iso.yaml vendored Normal file
View File

@@ -0,0 +1,237 @@
name: Debian-based ISO and SquashFS
on:
workflow_call:
workflow_dispatch:
inputs:
environment:
type: choice
description: Environment
options:
- NONE
- dev
- unstable
- dev-unstable
- docker
- dev-docker
- dev-unstable-docker
runner:
type: choice
description: Runner
options:
- standard
- fast
platform:
type: choice
description: Platform
options:
- ALL
- x86_64
- x86_64-nonfree
- aarch64
- aarch64-nonfree
- raspberrypi
deploy:
type: choice
description: Deploy
options:
- NONE
- alpha
- beta
push:
branches:
- master
- next
pull_request:
branches:
- master
- next
env:
NODEJS_VERSION: "18.15.0"
ENVIRONMENT: '${{ fromJson(format(''["{0}", ""]'', github.event.inputs.environment || ''dev''))[github.event.inputs.environment == ''NONE''] }}'
jobs:
compile:
name: Compile Base Binaries
strategy:
fail-fast: true
matrix:
arch: >-
${{
fromJson('{
"x86_64": ["x86_64"],
"x86_64-nonfree": ["x86_64"],
"aarch64": ["aarch64"],
"aarch64-nonfree": ["aarch64"],
"raspberrypi": ["aarch64"],
"ALL": ["x86_64", "aarch64"]
}')[github.event.inputs.platform || 'ALL']
}}
runs-on: ${{ fromJson('["ubuntu-22.04", "buildjet-32vcpu-ubuntu-2204"]')[github.event.inputs.runner == 'fast'] }}
steps:
- run: |
sudo mount -t tmpfs tmpfs .
if: ${{ github.event.inputs.runner == 'fast' }}
- uses: actions/checkout@v3
with:
submodules: recursive
- uses: actions/setup-node@v3
with:
node-version: ${{ env.NODEJS_VERSION }}
- name: Set up QEMU
uses: docker/setup-qemu-action@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Make
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
- uses: actions/upload-artifact@v3
with:
name: compiled-${{ matrix.arch }}.tar
path: compiled-${{ matrix.arch }}.tar
image:
name: Build Image
needs: [compile]
strategy:
fail-fast: false
matrix:
platform: >-
${{
fromJson(
format(
'[
["{0}"],
["x86_64", "x86_64-nonfree", "aarch64", "aarch64-nonfree", "raspberrypi"]
]',
github.event.inputs.platform || 'ALL'
)
)[(github.event.inputs.platform || 'ALL') == 'ALL']
}}
runs-on: >-
${{
fromJson(
format(
'["ubuntu-22.04", "{0}"]',
fromJson('{
"x86_64": "buildjet-8vcpu-ubuntu-2204",
"x86_64-nonfree": "buildjet-8vcpu-ubuntu-2204",
"aarch64": "buildjet-8vcpu-ubuntu-2204-arm",
"aarch64-nonfree": "buildjet-8vcpu-ubuntu-2204-arm",
"raspberrypi": "buildjet-8vcpu-ubuntu-2204-arm",
}')[matrix.platform]
)
)[github.event.inputs.runner == 'fast']
}}
env:
ARCH: >-
${{
fromJson('{
"x86_64": "x86_64",
"x86_64-nonfree": "x86_64",
"aarch64": "aarch64",
"aarch64-nonfree": "aarch64",
"raspberrypi": "aarch64",
}')[matrix.platform]
}}
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Install dependencies
run: |
sudo apt-get update
sudo apt-get install -y qemu-user-static
wget https://deb.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.2-1_all.deb
sha256sum ./debspawn_0.6.2-1_all.deb | grep 37ef27458cb1e35e8bce4d4f639b06b4b3866fc0b9191ec6b9bd157afd06a817
sudo apt-get install -y ./debspawn_0.6.2-1_all.deb
- name: Configure debspawn
run: |
sudo mkdir -p /etc/debspawn/
echo "AllowUnsafePermissions=true" | sudo tee /etc/debspawn/global.toml
sudo mkdir -p /var/tmp/debspawn
- run: sudo mount -t tmpfs tmpfs /var/tmp/debspawn
if: ${{ github.event.inputs.runner == 'fast' && (matrix.platform == 'x86_64' || matrix.platform == 'x86_64-nonfree') }}
- name: Download compiled artifacts
uses: actions/download-artifact@v3
with:
name: compiled-${{ env.ARCH }}.tar
- name: Extract compiled artifacts
run: tar -xvf compiled-${{ env.ARCH }}.tar
- name: Prevent rebuild of compiled artifacts
run: |
mkdir -p frontend/dist/raw
PLATFORM=${{ matrix.platform }} make -t compiled-${{ env.ARCH }}.tar
- name: Run iso build
run: PLATFORM=${{ matrix.platform }} make iso
if: ${{ matrix.platform != 'raspberrypi' }}
- name: Run img build
run: PLATFORM=${{ matrix.platform }} make img
if: ${{ matrix.platform == 'raspberrypi' }}
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.platform }}.squashfs
path: results/*.squashfs
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.platform }}.iso
path: results/*.iso
if: ${{ matrix.platform != 'raspberrypi' }}
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.platform }}.img
path: results/*.img
if: ${{ matrix.platform == 'raspberrypi' }}
- name: Upload OTA to registry
run: >-
PLATFORM=${{ matrix.platform }} make upload-ota TARGET="${{
fromJson('{
"alpha": "alpha-registry-x.start9.com",
"beta": "beta-registry.start9.com",
}')[github.event.inputs.deploy]
}}" KEY="${{
fromJson(
format('{{
"alpha": "{0}",
"beta": "{1}",
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
)[github.event.inputs.deploy]
}}"
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
index:
if: ${{ github.event.inputs.deploy != '' && github.event.inputs.deploy != 'NONE' }}
needs: [image]
runs-on: ubuntu-22.04
steps:
- run: >-
curl "https://${{
fromJson('{
"alpha": "alpha-registry-x.start9.com",
"beta": "beta-registry.start9.com",
}')[github.event.inputs.deploy]
}}:8443/resync.cgi?key=${{
fromJson(
format('{{
"alpha": "{0}",
"beta": "{1}",
}}', secrets.ALPHA_INDEX_KEY, secrets.BETA_INDEX_KEY)
)[github.event.inputs.deploy]
}}"

8
.gitignore vendored
View File

@@ -16,13 +16,15 @@ deploy_web.sh
secrets.db secrets.db
.vscode/ .vscode/
/cargo-deps/**/* /cargo-deps/**/*
/PLATFORM.txt
/ENVIRONMENT.txt /ENVIRONMENT.txt
/GIT_HASH.txt /GIT_HASH.txt
/VERSION.txt /VERSION.txt
/embassyos-*.tar.gz
/eos-*.tar.gz /eos-*.tar.gz
/*.deb /*.deb
/target /target
/*.squashfs /*.squashfs
/debian /results
/DEBIAN /dpkg-workdir
/compiled.tar
/compiled-*.tar

View File

@@ -1,6 +1,6 @@
<!-- omit in toc --> <!-- omit in toc -->
# Contributing to Embassy OS # Contributing to StartOS
First off, thanks for taking the time to contribute! ❤️ First off, thanks for taking the time to contribute! ❤️
@@ -19,7 +19,7 @@ forward to your contributions. 🎉
> - Tweet about it > - Tweet about it
> - Refer this project in your project's readme > - Refer this project in your project's readme
> - Mention the project at local meetups and tell your friends/colleagues > - Mention the project at local meetups and tell your friends/colleagues
> - Buy an [Embassy](https://start9labs.com) > - Buy a [Start9 server](https://start9.com)
<!-- omit in toc --> <!-- omit in toc -->
@@ -49,7 +49,7 @@ forward to your contributions. 🎉
> [Documentation](https://docs.start9labs.com). > [Documentation](https://docs.start9labs.com).
Before you ask a question, it is best to search for existing Before you ask a question, it is best to search for existing
[Issues](https://github.com/Start9Labs/embassy-os/issues) that might help you. [Issues](https://github.com/Start9Labs/start-os/issues) that might help you.
In case you have found a suitable issue and still need clarification, you can In case you have found a suitable issue and still need clarification, you can
write your question in this issue. It is also advisable to search the internet write your question in this issue. It is also advisable to search the internet
for answers first. for answers first.
@@ -57,7 +57,7 @@ for answers first.
If you then still feel the need to ask a question and need clarification, we If you then still feel the need to ask a question and need clarification, we
recommend the following: recommend the following:
- Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new). - Open an [Issue](https://github.com/Start9Labs/start-os/issues/new).
- Provide as much context as you can about what you're running into. - Provide as much context as you can about what you're running into.
- Provide project and platform versions, depending on what seems relevant. - Provide project and platform versions, depending on what seems relevant.
@@ -105,7 +105,7 @@ steps in advance to help us fix any potential bug as fast as possible.
- To see if other users have experienced (and potentially already solved) the - To see if other users have experienced (and potentially already solved) the
same issue you are having, check if there is not already a bug report existing same issue you are having, check if there is not already a bug report existing
for your bug or error in the for your bug or error in the
[bug tracker](https://github.com/Start9Labs/embassy-os/issues?q=label%3Abug). [bug tracker](https://github.com/Start9Labs/start-os/issues?q=label%3Abug).
- Also make sure to search the internet (including Stack Overflow) to see if - Also make sure to search the internet (including Stack Overflow) to see if
users outside of the GitHub community have discussed the issue. users outside of the GitHub community have discussed the issue.
- Collect information about the bug: - Collect information about the bug:
@@ -131,7 +131,7 @@ steps in advance to help us fix any potential bug as fast as possible.
We use GitHub issues to track bugs and errors. If you run into an issue with the We use GitHub issues to track bugs and errors. If you run into an issue with the
project: project:
- Open an [Issue](https://github.com/Start9Labs/embassy-os/issues/new/choose) - Open an [Issue](https://github.com/Start9Labs/start-os/issues/new/choose)
selecting the appropriate type. selecting the appropriate type.
- Explain the behavior you would expect and the actual behavior. - Explain the behavior you would expect and the actual behavior.
- Please provide as much context as possible and describe the _reproduction - Please provide as much context as possible and describe the _reproduction
@@ -155,8 +155,7 @@ Once it's filed:
### Suggesting Enhancements ### Suggesting Enhancements
This section guides you through submitting an enhancement suggestion for Embassy This section guides you through submitting an enhancement suggestion for StartOS, **including completely new features and minor improvements to existing
OS, **including completely new features and minor improvements to existing
functionality**. Following these guidelines will help maintainers and the functionality**. Following these guidelines will help maintainers and the
community to understand your suggestion and find related suggestions. community to understand your suggestion and find related suggestions.
@@ -168,7 +167,7 @@ community to understand your suggestion and find related suggestions.
- Read the [documentation](https://start9.com/latest/user-manual) carefully and - Read the [documentation](https://start9.com/latest/user-manual) carefully and
find out if the functionality is already covered, maybe by an individual find out if the functionality is already covered, maybe by an individual
configuration. configuration.
- Perform a [search](https://github.com/Start9Labs/embassy-os/issues) to see if - Perform a [search](https://github.com/Start9Labs/start-os/issues) to see if
the enhancement has already been suggested. If it has, add a comment to the the enhancement has already been suggested. If it has, add a comment to the
existing issue instead of opening a new one. existing issue instead of opening a new one.
- Find out whether your idea fits with the scope and aims of the project. It's - Find out whether your idea fits with the scope and aims of the project. It's
@@ -182,7 +181,7 @@ community to understand your suggestion and find related suggestions.
#### How Do I Submit a Good Enhancement Suggestion? #### How Do I Submit a Good Enhancement Suggestion?
Enhancement suggestions are tracked as Enhancement suggestions are tracked as
[GitHub issues](https://github.com/Start9Labs/embassy-os/issues). [GitHub issues](https://github.com/Start9Labs/start-os/issues).
- Use a **clear and descriptive title** for the issue to identify the - Use a **clear and descriptive title** for the issue to identify the
suggestion. suggestion.
@@ -197,7 +196,7 @@ Enhancement suggestions are tracked as
macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast)
or [this tool](https://github.com/GNOME/byzanz) on Linux. or [this tool](https://github.com/GNOME/byzanz) on Linux.
<!-- this should only be included if the project has a GUI --> <!-- this should only be included if the project has a GUI -->
- **Explain why this enhancement would be useful** to most Embassy OS users. You - **Explain why this enhancement would be useful** to most StartOS users. You
may also want to point out the other projects that solved it better and which may also want to point out the other projects that solved it better and which
could serve as inspiration. could serve as inspiration.
@@ -205,24 +204,24 @@ Enhancement suggestions are tracked as
### Project Structure ### Project Structure
embassyOS is composed of the following components. Please visit the README for StartOS is composed of the following components. Please visit the README for
each component to understand the dependency requirements and installation each component to understand the dependency requirements and installation
instructions. instructions.
- [`backend`](backend/README.md) (Rust) is a command line utility, daemon, and - [`backend`](backend/README.md) (Rust) is a command line utility, daemon, and
software development kit that sets up and manages services and their software development kit that sets up and manages services and their
environments, provides the interface for the ui, manages system state, and environments, provides the interface for the ui, manages system state, and
provides utilities for packaging services for embassyOS. provides utilities for packaging services for StartOS.
- [`build`](build/README.md) contains scripts and necessary for deploying - [`build`](build/README.md) contains scripts and necessary for deploying
embassyOS to a debian/raspbian system. StartOS to a debian/raspbian system.
- [`frontend`](frontend/README.md) (Typescript Ionic Angular) is the code that - [`frontend`](frontend/README.md) (Typescript Ionic Angular) is the code that
is deployed to the browser to provide the user interface for embassyOS. is deployed to the browser to provide the user interface for StartOS.
- `projects/ui` - Code for the user interface that is displayed when embassyOS - `projects/ui` - Code for the user interface that is displayed when StartOS
is running normally. is running normally.
- `projects/setup-wizard`(frontend/README.md) - Code for the user interface - `projects/setup-wizard`(frontend/README.md) - Code for the user interface
that is displayed during the setup and recovery process for embassyOS. that is displayed during the setup and recovery process for StartOS.
- `projects/diagnostic-ui` - Code for the user interface that is displayed - `projects/diagnostic-ui` - Code for the user interface that is displayed
when something has gone wrong with starting up embassyOS, which provides when something has gone wrong with starting up StartOS, which provides
helpful debugging tools. helpful debugging tools.
- `libs` (Rust) is a set of standalone crates that were separated out of - `libs` (Rust) is a set of standalone crates that were separated out of
`backend` for the purpose of portability `backend` for the purpose of portability
@@ -232,18 +231,18 @@ instructions.
[client](https://github.com/Start9Labs/patch-db/tree/master/client) with its [client](https://github.com/Start9Labs/patch-db/tree/master/client) with its
own dependency and installation requirements. own dependency and installation requirements.
- `system-images` - (Docker, Rust) A suite of utility Docker images that are - `system-images` - (Docker, Rust) A suite of utility Docker images that are
preloaded with embassyOS to assist with functions relating to services (eg. preloaded with StartOS to assist with functions relating to services (eg.
configuration, backups, health checks). configuration, backups, health checks).
### Your First Code Contribution ### Your First Code Contribution
#### Setting Up Your Development Environment #### Setting Up Your Development Environment
First, clone the embassyOS repository and from the project root, pull in the First, clone the StartOS repository and from the project root, pull in the
submodules for dependent libraries. submodules for dependent libraries.
```sh ```sh
git clone https://github.com/Start9Labs/embassy-os.git git clone https://github.com/Start9Labs/start-os.git
git submodule update --init --recursive git submodule update --init --recursive
``` ```
@@ -254,7 +253,7 @@ to, follow the installation requirements listed in that component's README
#### Building The Raspberry Pi Image #### Building The Raspberry Pi Image
This step is for setting up an environment in which to test your code changes if This step is for setting up an environment in which to test your code changes if
you do not yet have a embassyOS. you do not yet have a StartOS.
- Requirements - Requirements
- `ext4fs` (available if running on the Linux kernel) - `ext4fs` (available if running on the Linux kernel)
@@ -262,7 +261,7 @@ you do not yet have a embassyOS.
- GNU Make - GNU Make
- Building - Building
- see setup instructions [here](build/README.md) - see setup instructions [here](build/README.md)
- run `make embassyos-raspi.img ARCH=aarch64` from the project root - run `make startos-raspi.img ARCH=aarch64` from the project root
### Improving The Documentation ### Improving The Documentation
@@ -286,7 +285,7 @@ seamless and intuitive experience.
### Formatting ### Formatting
Each component of embassyOS contains its own style guide. Code must be formatted Each component of StartOS contains its own style guide. Code must be formatted
with the formatter designated for each component. These are outlined within each with the formatter designated for each component. These are outlined within each
component folder's README. component folder's README.
@@ -306,7 +305,7 @@ component. i.e. `backend: update to tokio v0.3`.
The body of a pull request should contain sufficient description of what the The body of a pull request should contain sufficient description of what the
changes do, as well as a justification. You should include references to any changes do, as well as a justification. You should include references to any
relevant [issues](https://github.com/Start9Labs/embassy-os/issues). relevant [issues](https://github.com/Start9Labs/start-os/issues).
### Rebasing Changes ### Rebasing Changes

5607
Cargo.lock generated

File diff suppressed because it is too large Load Diff

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2023 Start9 Labs, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,42 +0,0 @@
# START9 NON-COMMERCIAL LICENSE v1
Version 1, 22 September 2022
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
### 1.Definitions
"License" means version 1 of the Start9 Non-Commercial License.
"Licensor" means the Start9 Labs, Inc, or its successor(s) in interest, or a future assignee of the copyright.
"You" (or "Your") means an individual or organization exercising permissions granted by this License.
"Source Code" for a work means the preferred form of the work for making modifications to it.
"Object Code" means any non-source form of a work, including the machine-language output by a compiler or assembler.
"Work" means any work of authorship, whether in Source or Object form, made available under this License.
"Derivative Work" means any work, whether in Source or Object form, that is based on (or derived from) the Work.
"Distribute" means to convey or to publish and generally has the same meaning here as under U.S. Copyright law.
"Sell" means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including, without limitation, fees for hosting, consulting, or support services), a product or service whose value derives, entirely or substantially, from the functionality of the Work or Derivative Work.
### 2. Grant of Rights
Subject to the terms of this license, the Licensor grants you, the licensee, a non-exclusive, worldwide, royalty-free copyright license to access, audit, copy, modify, compile, run, test, distribute, or otherwise use the Software.
### 3. Limitations
1. The grant of rights under the License does NOT include, and the License does NOT grant You the right to Sell the Work or Derivative Work.
2. If you Distribute the Work or Derivative Work, you expressly undertake not to remove or modify, in any manner, the copyright notices attached to the Work or displayed in any output of the Work when run, and to reproduce these notices, in an identical manner, in any distributed copies of the Work or Derivative Work together with a copy of this License.
3. If you Distribute a Derivative Work, it must carry prominent notices stating that it has been modified from the Work, providing a relevant date.
### 4. Contributions
You hereby grant to Licensor a perpetual, irrevocable, worldwide, non-exclusive, royalty-free license to use and exploit any Derivative Work of which you are the author.
### 5. Disclaimer
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. LICENSOR HAS NO OBLIGATION TO SUPPORT RECIPIENTS OF THE SOFTWARE.

235
Makefile
View File

@@ -1,54 +1,63 @@
RASPI_TARGETS := eos_raspberrypi-uninit.img eos_raspberrypi-uninit.tar.gz PLATFORM_FILE := $(shell ./check-platform.sh)
OS_ARCH := $(shell if echo $(RASPI_TARGETS) | grep -qw "$(MAKECMDGOALS)"; then echo raspberrypi; else uname -m; fi) ENVIRONMENT_FILE := $(shell ./check-environment.sh)
ARCH := $(shell if [ "$(OS_ARCH)" = "raspberrypi" ]; then echo aarch64; else echo $(OS_ARCH); fi) GIT_HASH_FILE := $(shell ./check-git-hash.sh)
ENVIRONMENT_FILE = $(shell ./check-environment.sh) VERSION_FILE := $(shell ./check-version.sh)
GIT_HASH_FILE = $(shell ./check-git-hash.sh) BASENAME := $(shell ./basename.sh)
VERSION_FILE = $(shell ./check-version.sh) PLATFORM := $(shell if [ -f ./PLATFORM.txt ]; then cat ./PLATFORM.txt; else echo unknown; fi)
EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-sdk backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias libs/target/aarch64-unknown-linux-musl/release/embassy_container_init libs/target/x86_64-unknown-linux-musl/release/embassy_container_init ARCH := $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo aarch64; else echo $(PLATFORM) | sed 's/-nonfree$$//g'; fi)
EMBASSY_UIS := frontend/dist/ui frontend/dist/setup-wizard frontend/dist/diagnostic-ui frontend/dist/install-wizard IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo iso; fi)
BUILD_SRC := $(shell find build) EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/startbox libs/target/aarch64-unknown-linux-musl/release/embassy_container_init libs/target/x86_64-unknown-linux-musl/release/embassy_container_init
EMBASSY_SRC := backend/embassyd.service backend/embassy-init.service $(EMBASSY_UIS) $(BUILD_SRC) EMBASSY_UIS := frontend/dist/raw/ui frontend/dist/raw/setup-wizard frontend/dist/raw/diagnostic-ui frontend/dist/raw/install-wizard
COMPAT_SRC := $(shell find system-images/compat/ -not -path 'system-images/compat/target/*' -and -not -name *.tar -and -not -name target) BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts
UTILS_SRC := $(shell find system-images/utils/ -not -name *.tar) DEBIAN_SRC := $(shell git ls-files debian/)
BINFMT_SRC := $(shell find system-images/binfmt/ -not -name *.tar) IMAGE_RECIPE_SRC := $(shell git ls-files image-recipe/)
BACKEND_SRC := $(shell find backend/src) $(shell find backend/migrations) $(shell find patch-db/*/src) $(shell find libs/*/src) libs/*/Cargo.toml backend/Cargo.toml backend/Cargo.lock EMBASSY_SRC := backend/startd.service $(BUILD_SRC)
FRONTEND_SHARED_SRC := $(shell find frontend/projects/shared) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/package.json frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json COMPAT_SRC := $(shell git ls-files system-images/compat/)
FRONTEND_UI_SRC := $(shell find frontend/projects/ui) UTILS_SRC := $(shell git ls-files system-images/utils/)
FRONTEND_SETUP_WIZARD_SRC := $(shell find frontend/projects/setup-wizard) BINFMT_SRC := $(shell git ls-files system-images/binfmt/)
FRONTEND_DIAGNOSTIC_UI_SRC := $(shell find frontend/projects/diagnostic-ui) BACKEND_SRC := $(shell git ls-files backend) $(shell git ls-files --recurse-submodules patch-db) $(shell git ls-files libs) frontend/dist/static
FRONTEND_INSTALL_WIZARD_SRC := $(shell find frontend/projects/install-wizard) FRONTEND_SHARED_SRC := $(shell git ls-files frontend/projects/shared) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json
PATCH_DB_CLIENT_SRC := $(shell find patch-db/client -not -path patch-db/client/dist) FRONTEND_UI_SRC := $(shell git ls-files frontend/projects/ui)
FRONTEND_SETUP_WIZARD_SRC := $(shell git ls-files frontend/projects/setup-wizard)
FRONTEND_DIAGNOSTIC_UI_SRC := $(shell git ls-files frontend/projects/diagnostic-ui)
FRONTEND_INSTALL_WIZARD_SRC := $(shell git ls-files frontend/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip) GZIP_BIN := $(shell which pigz || which gzip)
ALL_TARGETS := $(EMBASSY_BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar $(EMBASSY_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) TAR_BIN := $(shell which gtar || which tar)
COMPILED_TARGETS := $(EMBASSY_BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar
ALL_TARGETS := $(EMBASSY_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console; fi') $(PLATFORM_FILE)
ifeq ($(REMOTE),) ifeq ($(REMOTE),)
mkdir = mkdir -p $1 mkdir = mkdir -p $1
rm = rm -rf $1 rm = rm -rf $1
cp = cp -r $1 $2 cp = cp -r $1 $2
ln = ln -sf $1 $2
else else
mkdir = ssh $(REMOTE) 'mkdir -p $1' ifeq ($(SSHPASS),)
rm = ssh $(REMOTE) 'sudo rm -rf $1' ssh = ssh $(REMOTE) $1
else
ssh = sshpass -p $(SSHPASS) ssh $(REMOTE) $1
endif
mkdir = $(call ssh,'sudo mkdir -p $1')
rm = $(call ssh,'sudo rm -rf $1')
ln = $(call ssh,'sudo ln -sf $1 $2')
define cp define cp
tar --transform "s|^$1|x|" -czv -f- $1 | ssh $(REMOTE) "sudo tar --transform 's|^x|$2|' -xzv -f- -C /" $(TAR_BIN) --transform "s|^$1|x|" -czv -f- $1 | $(call ssh,"sudo tar --transform 's|^x|$2|' -xzv -f- -C /")
endef endef
endif endif
.DELETE_ON_ERROR: .DELETE_ON_ERROR:
.PHONY: all gzip install clean format sdk snapshots frontends ui backend reflash eos_raspberrypi.img sudo .PHONY: all metadata install clean format sdk snapshots frontends ui backend reflash deb $(IMAGE_TYPE) squashfs sudo wormhole docker-buildx
all: $(ALL_TARGETS) all: $(ALL_TARGETS)
metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
sudo: sudo:
sudo true sudo true
clean: clean:
rm -f 2022-01-28-raspios-bullseye-arm64-lite.zip
rm -f raspios.img
rm -f eos_raspberrypi-uninit.img
rm -f eos_raspberrypi-uninit.tar.gz
rm -f ubuntu.img
rm -f product_key.txt
rm -f system-images/**/*.tar rm -f system-images/**/*.tar
rm -rf system-images/compat/target rm -rf system-images/compat/target
rm -rf backend/target rm -rf backend/target
@@ -61,9 +70,13 @@ clean:
rm -rf patch-db/client/dist rm -rf patch-db/client/dist
rm -rf patch-db/target rm -rf patch-db/target
rm -rf cargo-deps rm -rf cargo-deps
rm ENVIRONMENT.txt rm -rf dpkg-workdir
rm GIT_HASH.txt rm -rf image-recipe/deb
rm VERSION.txt rm -rf results
rm -f ENVIRONMENT.txt
rm -f PLATFORM.txt
rm -f GIT_HASH.txt
rm -f VERSION.txt
format: format:
cd backend && cargo +nightly fmt cd backend && cargo +nightly fmt
@@ -72,85 +85,93 @@ format:
sdk: sdk:
cd backend/ && ./install-sdk.sh cd backend/ && ./install-sdk.sh
eos_raspberrypi-uninit.img: $(ALL_TARGETS) raspios.img cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep | sudo deb: results/$(BASENAME).deb
! test -f eos_raspberrypi-uninit.img || rm eos_raspberrypi-uninit.img
./build/raspberry-pi/make-image.sh
lite-upgrade.img: raspios.img cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep $(BUILD_SRC) eos.raspberrypi.squashfs debian/control: build/lib/depends build/lib/conflicts
! test -f lite-upgrade.img || rm lite-upgrade.img ./debuild/control.sh
./build/raspberry-pi/make-upgrade-image.sh
eos_raspberrypi.img: raspios.img $(BUILD_SRC) eos.raspberrypi.squashfs $(VERSION_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) | sudo results/$(BASENAME).deb: dpkg-build.sh $(DEBIAN_SRC) $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
! test -f eos_raspberrypi.img || rm eos_raspberrypi.img PLATFORM=$(PLATFORM) ./dpkg-build.sh
./build/raspberry-pi/make-initialized-image.sh
$(IMAGE_TYPE): results/$(BASENAME).$(IMAGE_TYPE)
squashfs: results/$(BASENAME).squashfs
results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_SRC) results/$(BASENAME).deb
./image-recipe/run-local-build.sh "results/$(BASENAME).deb"
# For creating os images. DO NOT USE # For creating os images. DO NOT USE
install: $(ALL_TARGETS) install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/bin) $(call mkdir,$(DESTDIR)/usr/bin)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init,$(DESTDIR)/usr/bin/embassy-init) $(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/startbox,$(DESTDIR)/usr/bin/startbox)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd,$(DESTDIR)/usr/bin/embassyd) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli,$(DESTDIR)/usr/bin/embassy-cli) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias,$(DESTDIR)/usr/bin/avahi-alias) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-deno)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/avahi-alias)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/embassy-cli)
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then $(call cp,cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); fi
$(call mkdir,$(DESTDIR)/lib/systemd/system)
$(call cp,backend/startd.service,$(DESTDIR)/lib/systemd/system/startd.service)
$(call mkdir,$(DESTDIR)/usr/lib) $(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/embassy) $(call rm,$(DESTDIR)/usr/lib/startos)
$(call cp,build/lib,$(DESTDIR)/usr/lib/embassy) $(call cp,build/lib,$(DESTDIR)/usr/lib/startos)
$(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/embassy/ENVIRONMENT.txt) $(call cp,PLATFORM.txt,$(DESTDIR)/usr/lib/startos/PLATFORM.txt)
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/embassy/GIT_HASH.txt) $(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/startos/ENVIRONMENT.txt)
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/embassy/VERSION.txt) $(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
$(call mkdir,$(DESTDIR)/usr/lib/embassy/container) $(call mkdir,$(DESTDIR)/usr/lib/startos/container)
$(call cp,libs/target/aarch64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/embassy/container/embassy_container_init.arm64) $(call cp,libs/target/aarch64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/startos/container/embassy_container_init.arm64)
$(call cp,libs/target/x86_64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/embassy/container/embassy_container_init.amd64) $(call cp,libs/target/x86_64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/startos/container/embassy_container_init.amd64)
$(call mkdir,$(DESTDIR)/usr/lib/embassy/system-images) $(call mkdir,$(DESTDIR)/usr/lib/startos/system-images)
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/compat.tar) $(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/compat.tar)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/utils.tar) $(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/utils.tar)
$(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/binfmt.tar) $(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/binfmt.tar)
$(call mkdir,$(DESTDIR)/var/www/html) update-overlay: $(ALL_TARGETS)
$(call cp,frontend/dist/diagnostic-ui,$(DESTDIR)/var/www/html/diagnostic)
$(call cp,frontend/dist/setup-wizard,$(DESTDIR)/var/www/html/setup)
$(call cp,frontend/dist/install-wizard,$(DESTDIR)/var/www/html/install)
$(call cp,frontend/dist/ui,$(DESTDIR)/var/www/html/main)
$(call cp,index.html,$(DESTDIR)/var/www/html/index.html)
update-overlay:
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m" @echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m" @echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@if [ "`ssh $(REMOTE) 'cat /usr/lib/embassy/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "Embassy requires migrations: update-overlay is unavailable." && false; fi @if [ "`ssh $(REMOTE) 'cat /usr/lib/startos/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "StartOS requires migrations: update-overlay is unavailable." && false; fi
@if ssh $(REMOTE) "pidof embassy-init"; then >&2 echo "Embassy in INIT: update-overlay is unavailable." && false; fi $(call ssh,"sudo systemctl stop startd")
ssh $(REMOTE) "sudo systemctl stop embassyd" $(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
$(MAKE) install REMOTE=$(REMOTE) OS_ARCH=$(OS_ARCH) $(call ssh,"sudo systemctl start startd")
ssh $(REMOTE) "sudo systemctl start embassyd"
update: wormhole: backend/target/$(ARCH)-unknown-linux-gnu/release/startbox
@wormhole send backend/target/$(ARCH)-unknown-linux-gnu/release/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
update: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
ssh $(REMOTE) "sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/" $(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH) $(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
ssh $(REMOTE) "sudo touch /media/embassy/config/upgrade && sudo sync && sudo reboot" $(call ssh,'sudo NO_SYNC=1 /media/embassy/next/usr/lib/startos/scripts/chroot-and-upgrade "apt-get install -y $(shell cat ./build/lib/depends)"')
emulate-reflash: emulate-reflash: $(ALL_TARGETS)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
ssh $(REMOTE) "sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/" $(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH) $(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next PLATFORM=$(PLATFORM)
ssh $(REMOTE) "sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot" $(call ssh,"sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot")
system-images/compat/docker-images/aarch64.tar system-images/compat/docker-images/x86_64.tar: $(COMPAT_SRC) upload-ota: results/$(BASENAME).squashfs
cd system-images/compat && make TARGET=$(TARGET) KEY=$(KEY) ./upload-ota.sh
system-images/utils/docker-images/aarch64.tar system-images/utils/docker-images/x86_64.tar: $(UTILS_SRC) build/lib/depends build/lib/conflicts: build/dpkg-deps/*
cd system-images/utils && make build/dpkg-deps/generate.sh
system-images/binfmt/docker-images/aarch64.tar system-images/binfmt/docker-images/x86_64.tar: $(BINFMT_SRC) system-images/compat/docker-images/$(ARCH).tar: $(COMPAT_SRC) backend/Cargo.lock | docker-buildx
cd system-images/binfmt && make cd system-images/compat && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
raspios.img: system-images/utils/docker-images/$(ARCH).tar: $(UTILS_SRC) | docker-buildx
wget --continue https://downloads.raspberrypi.org/raspios_lite_arm64/images/raspios_lite_arm64-2022-01-28/2022-01-28-raspios-bullseye-arm64-lite.zip cd system-images/utils && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
unzip 2022-01-28-raspios-bullseye-arm64-lite.zip
mv 2022-01-28-raspios-bullseye-arm64-lite.img raspios.img system-images/binfmt/docker-images/$(ARCH).tar: $(BINFMT_SRC) | docker-buildx
cd system-images/binfmt && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
snapshots: libs/snapshot_creator/Cargo.toml snapshots: libs/snapshot_creator/Cargo.toml
cd libs/ && ./build-v8-snapshot.sh cd libs/ && ./build-v8-snapshot.sh
@@ -163,27 +184,26 @@ $(EMBASSY_BINS): $(BACKEND_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) frontend/pa
frontend/node_modules: frontend/package.json frontend/node_modules: frontend/package.json
npm --prefix frontend ci npm --prefix frontend ci
frontend/dist/ui: $(FRONTEND_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE) frontend/dist/raw/ui: $(FRONTEND_UI_SRC) $(FRONTEND_SHARED_SRC)
npm --prefix frontend run build:ui npm --prefix frontend run build:ui
frontend/dist/setup-wizard: $(FRONTEND_SETUP_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE) frontend/dist/raw/setup-wizard: $(FRONTEND_SETUP_WIZARD_SRC) $(FRONTEND_SHARED_SRC)
npm --prefix frontend run build:setup npm --prefix frontend run build:setup
frontend/dist/diagnostic-ui: $(FRONTEND_DIAGNOSTIC_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE) frontend/dist/raw/diagnostic-ui: $(FRONTEND_DIAGNOSTIC_UI_SRC) $(FRONTEND_SHARED_SRC)
npm --prefix frontend run build:dui npm --prefix frontend run build:dui
frontend/dist/install-wizard: $(FRONTEND_INSTALL_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE) frontend/dist/raw/install-wizard: $(FRONTEND_INSTALL_WIZARD_SRC) $(FRONTEND_SHARED_SRC)
npm --prefix frontend run build:install-wiz npm --prefix frontend run build:install-wiz
frontend/dist/static: $(EMBASSY_UIS) $(ENVIRONMENT_FILE)
./compress-uis.sh
frontend/config.json: $(GIT_HASH_FILE) frontend/config-sample.json frontend/config.json: $(GIT_HASH_FILE) frontend/config-sample.json
jq '.useMocks = false' frontend/config-sample.json > frontend/config.json jq '.useMocks = false' frontend/config-sample.json | jq '.gitHash = "$(shell cat GIT_HASH.txt)"' > frontend/config.json
jq '.packageArch = "$(ARCH)"' frontend/config.json > frontend/config.json.tmp
jq '.osArch = "$(OS_ARCH)"' frontend/config.json.tmp > frontend/config.json
rm frontend/config.json.tmp
npm --prefix frontend run-script build-config
frontend/patchdb-ui-seed.json: frontend/package.json frontend/patchdb-ui-seed.json: frontend/package.json
jq '."ack-welcome" = "$(shell yq '.version' frontend/package.json)"' frontend/patchdb-ui-seed.json > ui-seed.tmp jq '."ack-welcome" = $(shell jq '.version' frontend/package.json)' frontend/patchdb-ui-seed.json > ui-seed.tmp
mv ui-seed.tmp frontend/patchdb-ui-seed.json mv ui-seed.tmp frontend/patchdb-ui-seed.json
patch-db/client/node_modules: patch-db/client/package.json patch-db/client/node_modules: patch-db/client/package.json
@@ -194,20 +214,17 @@ patch-db/client/dist: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules
npm --prefix frontend run build:deps npm --prefix frontend run build:deps
# used by github actions # used by github actions
backend-$(ARCH).tar: $(EMBASSY_BINS) compiled-$(ARCH).tar: $(COMPILED_TARGETS) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
tar -cvf $@ $^ tar -cvf $@ $^
# this is a convenience step to build all frontends - it is not referenced elsewhere in this file # this is a convenience step to build all frontends - it is not referenced elsewhere in this file
frontends: $(EMBASSY_UIS) frontends: $(EMBASSY_UIS)
# this is a convenience step to build the UI # this is a convenience step to build the UI
ui: frontend/dist/ui ui: frontend/dist/raw/ui
# used by github actions
backend: $(EMBASSY_BINS)
cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast:
./build-cargo-dep.sh nc-broadcast
cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep: cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep:
./build-cargo-dep.sh pi-beep ARCH=aarch64 ./build-cargo-dep.sh pi-beep
cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console:
ARCH=$(ARCH) ./build-cargo-dep.sh tokio-console

113
README.md
View File

@@ -1,51 +1,84 @@
# embassyOS <div align="center">
[![version](https://img.shields.io/github/v/tag/Start9Labs/embassy-os?color=success)](https://github.com/Start9Labs/embassy-os/releases) <img src="frontend/projects/shared/assets/img/icon.png" alt="StartOS Logo" width="16%" />
[![build](https://github.com/Start9Labs/embassy-os/actions/workflows/product.yaml/badge.svg)](https://github.com/Start9Labs/embassy-os/actions/workflows/product.yaml) <h1 style="margin-top: 0;">StartOS</h1>
[![community](https://img.shields.io/badge/community-matrix-yellow)](https://matrix.to/#/#community:matrix.start9labs.com) <a href="https://github.com/Start9Labs/start-os/releases">
[![community](https://img.shields.io/badge/community-telegram-informational)](https://t.me/start9_labs) <img alt="GitHub release (with filter)" src="https://img.shields.io/github/v/release/start9labs/start-os?logo=github">
[![support](https://img.shields.io/badge/support-docs-important)](https://docs.start9.com) </a>
[![developer](https://img.shields.io/badge/developer-matrix-blueviolet)](https://matrix.to/#/#community-dev:matrix.start9labs.com) <a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
[![website](https://img.shields.io/website?down_color=lightgrey&down_message=offline&up_color=green&up_message=online&url=https%3A%2F%2Fstart9.com)](https://start9.com) <img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
</a>
<a href="https://heyapollo.com/product/startos">
<img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
</a>
<a href="https://twitter.com/start9labs">
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
</a>
<a href="https://mastodon.start9labs.com">
<img src="https://img.shields.io/mastodon/follow/000000001?domain=https%3A%2F%2Fmastodon.start9labs.com&label=Follow&style=social">
</a>
<a href="https://matrix.to/#/#community:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
</a>
<a href="https://t.me/start9_labs">
<img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
</a>
<a href="https://docs.start9.com">
<img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
</a>
<a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
<img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
</a>
<a href="https://start9.com">
<img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
</a>
</div>
<br />
<div align="center">
<h3>
Welcome to the era of Sovereign Computing
</h3>
<p>
StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
</p>
</div>
<br />
<p align="center">
<img src="assets/StartOS.png" alt="StartOS" width="85%">
</p>
<br />
[![mastodon](https://img.shields.io/mastodon/follow/000000001?domain=https%3A%2F%2Fmastodon.start9labs.com&label=Follow&style=social)](http://mastodon.start9labs.com) ## Running StartOS
[![twitter](https://img.shields.io/twitter/follow/start9labs?label=Follow)](https://twitter.com/start9labs) There are multiple ways to get started with StartOS:
### _Welcome to the era of Sovereign Computing_ ### ### 💰 Buy a Start9 server
This is the most convenient option. Simply [buy a server](https://store.start9.com) from Start9 and plug it in.
embassyOS is a browser-based, graphical operating system for a personal server. embassyOS facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services. It is the most advanced, secure, reliable, and user friendly personal server OS in the world. ### 👷 Build your own server
This option is easier than you might imagine, and there are 4 reasons why you might prefer it:
## Running embassyOS 1. You already have hardware
There are multiple ways to get your hands on embassyOS. 1. You want to save on shipping costs
1. You prefer not to divulge your physical address
### :moneybag: Buy an Embassy 1. You just like building things
This is the most convenient option. Simply [buy an Embassy](https://start9.com) from Start9 and plug it in. Depending on where you live, shipping costs and import duties will vary.
### :construction_worker: Build your own Embassy
While not as convenient as buying an Embassy, this option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have your own hardware.
1. You want to save on shipping costs.
1. You prefer not to divulge your physical address.
1. You just like building things.
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy). To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
### :hammer_and_wrench: Build embassyOS from Source ## ❤️ Contributing
There are multiple ways to contribute: work directly on StartOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://start9.com/contribute/).
embassyOS can be built from source, for personal use, for free. To report security issues, please email our security team - security@start9.com.
A detailed guide for doing so can be found [here](https://github.com/Start9Labs/embassy-os/blob/master/build/README.md).
## :heart: Contributing ## 🌎 Marketplace
There are multiple ways to contribute: work directly on embassyOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://docs.start9.com/latest/contribute/) or [here](https://github.com/Start9Labs/embassy-os/blob/master/CONTRIBUTING.md). There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
## 🖥️ User Interface Screenshots
## UI Screenshots
<p align="center"> <p align="center">
<img src="assets/embassyOS.png" alt="embassyOS" width="85%"> <img src="assets/registry.png" alt="StartOS Marketplace" width="49%">
</p> <img src="assets/community.png" alt="StartOS Community Registry" width="49%">
<p align="center"> <img src="assets/c-lightning.png" alt="StartOS NextCloud Service" width="49%">
<img src="assets/eOS-preferences.png" alt="Embassy Preferences" width="49%"> <img src="assets/btcpay.png" alt="StartOS BTCPay Service" width="49%">
<img src="assets/eOS-ghost.png" alt="Embassy Ghost Service" width="49%"> <img src="assets/nextcloud.png" alt="StartOS System Settings" width="49%">
</p> <img src="assets/system.png" alt="StartOS System Settings" width="49%">
<p align="center"> <img src="assets/welcome.png" alt="StartOS System Settings" width="49%">
<img src="assets/eOS-synapse-health-check.png" alt="Embassy Synapse Health Checks" width="49%"> <img src="assets/logs.png" alt="StartOS System Settings" width="49%">
<img src="assets/eOS-sideload.png" alt="Embassy Sideload Service" width="49%">
</p> </p>

BIN
assets/StartOS.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.1 MiB

BIN
assets/btcpay.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 396 KiB

BIN
assets/c-lightning.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

BIN
assets/community.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 591 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 281 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 266 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 213 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 191 KiB

BIN
assets/logs.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.6 MiB

BIN
assets/nextcloud.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 319 KiB

BIN
assets/registry.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 521 KiB

BIN
assets/system.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 331 KiB

BIN
assets/welcome.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 402 KiB

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": []
},
"hash": "1ce5254f27de971fd87f5ab66d300f2b22433c86617a0dbf796bf2170186dd2e"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930"
}

View File

@@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "path",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "username",
"type_info": "Text"
},
{
"ordinal": 3,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false,
false,
false,
true
]
},
"hash": "28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec"
}

View File

@@ -0,0 +1,34 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "fingerprint",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "openssh_pubkey",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false
]
},
"hash": "4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997"
}

View File

@@ -0,0 +1,50 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "logged_in",
"type_info": "Timestamp"
},
{
"ordinal": 2,
"name": "logged_out",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "last_active",
"type_info": "Timestamp"
},
{
"ordinal": 4,
"name": "user_agent",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "metadata",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
true,
false,
true,
false
]
},
"hash": "4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "4bcfbefb1eb3181343871a1cd7fc3afb81c2be5c681cfa8b4be0ce70610e9c3a"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT password FROM account",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a"
}

View File

@@ -0,0 +1,23 @@
{
"db_name": "PostgreSQL",
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text",
"Text"
]
},
"nullable": [
false
]
},
"hash": "687688055e63d27123cdc89a5bbbd8361776290a9411d527eaf1fdb40bef399d"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca"
}

View File

@@ -0,0 +1,24 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": [
false
]
},
"hash": "770c1017734720453dc87b58c385b987c5af5807151ff71a59000014586752e0"
}

View File

@@ -0,0 +1,65 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "package_id",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "code",
"type_info": "Int4"
},
{
"ordinal": 4,
"name": "level",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "title",
"type_info": "Text"
},
{
"ordinal": 6,
"name": "message",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "data",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4",
"Int8"
]
},
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
]
},
"hash": "7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210"
}

View File

@@ -0,0 +1,19 @@
{
"db_name": "PostgreSQL",
"query": "\n INSERT INTO account (\n id,\n server_id,\n hostname,\n password,\n network_key,\n root_ca_key_pem,\n root_ca_cert_pem\n ) VALUES (\n 0, $1, $2, $3, $4, $5, $6\n ) ON CONFLICT (id) DO UPDATE SET\n server_id = EXCLUDED.server_id,\n hostname = EXCLUDED.hostname,\n password = EXCLUDED.password,\n network_key = EXCLUDED.network_key,\n root_ca_key_pem = EXCLUDED.root_ca_key_pem,\n root_ca_cert_pem = EXCLUDED.root_ca_cert_pem\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Bytea",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "7c7a3549c997eb75bf964ea65fbb98a73045adf618696cd838d79203ef5383fb"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM tor WHERE package = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576"
}

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": []
},
"hash": "8951b9126fbf60dbb5997241e11e3526b70bccf3e407327917294a993bc17ed5"
}

View File

@@ -0,0 +1,64 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "package_id",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "code",
"type_info": "Int4"
},
{
"ordinal": 4,
"name": "level",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "title",
"type_info": "Text"
},
{
"ordinal": 6,
"name": "message",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "data",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int8"
]
},
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
]
},
"hash": "94d471bb374b4965c6cbedf8c17bbf6bea226d38efaf6559923c79a36d5ca08c"
}

View File

@@ -0,0 +1,44 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, hostname, path, username, password FROM cifs_shares",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "path",
"type_info": "Text"
},
{
"ordinal": 3,
"name": "username",
"type_info": "Text"
},
{
"ordinal": 4,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
false,
false,
true
]
},
"hash": "95c4ab4c645f3302568c6ff13d85ab58252362694cf0f56999bf60194d20583a"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM cifs_shares WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": []
},
"hash": "a60d6e66719325b08dc4ecfacaf337527233c84eee758ac9be967906e5841d27"
}

View File

@@ -0,0 +1,32 @@
{
"db_name": "PostgreSQL",
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "fingerprint",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "openssh_pubkey",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
false
]
},
"hash": "a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e"
}

View File

@@ -0,0 +1,18 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text",
"Int4"
]
},
"nullable": []
},
"hash": "b1147beaaabbed89f2ab8c1e13ec4393a9a8fde2833cf096af766a979d94dee6"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM network_keys WHERE package = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "b203820ee1c553a4b246eac74b79bd10d5717b2a0ddecf22330b7d531aac7c5d"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT openssh_pubkey FROM ssh_keys",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "openssh_pubkey",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca"
}

View File

@@ -0,0 +1,19 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Int4",
"Text",
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "da71f94b29798d1738d2b10b9a721ea72db8cfb362e7181c8226d9297507c62b"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM notifications WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": []
},
"hash": "e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT tor_key FROM account WHERE id = 0",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "tor_key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": []
},
"nullable": [
true
]
},
"hash": "e545696735f202f9d13cf22a561f3ff3f9aed7f90027a9ba97634bcb47d772f0"
}

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d"
}

View File

@@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT\n network_keys.package,\n network_keys.interface,\n network_keys.key,\n tor.key AS \"tor_key?\"\n FROM\n network_keys\n LEFT JOIN\n tor\n ON\n network_keys.package = tor.package\n AND\n network_keys.interface = tor.interface\n WHERE\n network_keys.package = $1\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "package",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "interface",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "key",
"type_info": "Bytea"
},
{
"ordinal": 3,
"name": "tor_key?",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
false
]
},
"hash": "e95322a8e2ae3b93f1e974b24c0b81803f1e9ec9e8ebbf15cafddfc1c5a028ed"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM notifications WHERE id < $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": []
},
"hash": "eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a"
}

View File

@@ -0,0 +1,25 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text"
]
},
"nullable": [
false
]
},
"hash": "ecc765d8205c0876956f95f76944ac6a5f34dd820c4073b7728c7067aab9fded"
}

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT network_key FROM account WHERE id = 0",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "network_key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "f7d2dae84613bcef330f7403352cc96547f3f6dbec11bf2eadfaf53ad8ab51b5"
}

View File

@@ -0,0 +1,62 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM account WHERE id = 0",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "password",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "tor_key",
"type_info": "Bytea"
},
{
"ordinal": 3,
"name": "server_id",
"type_info": "Text"
},
{
"ordinal": 4,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "network_key",
"type_info": "Bytea"
},
{
"ordinal": 6,
"name": "root_ca_key_pem",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "root_ca_cert_pem",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
true,
true,
true,
false,
false,
false
]
},
"hash": "fe6e4f09f3028e5b6b6259e86cbad285680ce157aae9d7837ac020c8b2945e7f"
}

4190
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,7 +1,7 @@
[package] [package]
authors = ["Aiden McClelland <me@drbonez.dev>"] authors = ["Aiden McClelland <me@drbonez.dev>"]
description = "The core of the Start9 Embassy Operating System" description = "The core of StartOS"
documentation = "https://docs.rs/embassy-os" documentation = "https://docs.rs/start-os"
edition = "2021" edition = "2021"
keywords = [ keywords = [
"self-hosted", "self-hosted",
@@ -11,158 +11,160 @@ keywords = [
"full-node", "full-node",
"lightning", "lightning",
] ]
name = "embassy-os" name = "start-os"
readme = "README.md" readme = "README.md"
repository = "https://github.com/Start9Labs/embassy-os" repository = "https://github.com/Start9Labs/start-os"
version = "0.3.4" version = "0.3.5"
license = "MIT"
[lib] [lib]
name = "embassy" name = "startos"
path = "src/lib.rs" path = "src/lib.rs"
[[bin]] [[bin]]
name = "embassyd" name = "startbox"
path = "src/bin/embassyd.rs" path = "src/main.rs"
[[bin]]
name = "embassy-init"
path = "src/bin/embassy-init.rs"
[[bin]]
name = "embassy-sdk"
path = "src/bin/embassy-sdk.rs"
[[bin]]
name = "embassy-cli"
path = "src/bin/embassy-cli.rs"
[[bin]]
name = "avahi-alias"
path = "src/bin/avahi-alias.rs"
[features] [features]
avahi = ["avahi-sys"] avahi = ["avahi-sys"]
default = ["avahi", "js_engine"] avahi-alias = ["avahi"]
cli = []
daemon = []
default = ["cli", "sdk", "daemon", "js_engine"]
dev = [] dev = []
unstable = ["patch-db/unstable"] docker = []
sdk = []
unstable = ["console-subscriber", "tokio/tracing"]
[dependencies] [dependencies]
aes = { version = "0.7.5", features = ["ctr"] } aes = { version = "0.7.5", features = ["ctr"] }
async-compression = { version = "0.3.15", features = [ async-compression = { version = "0.4.4", features = [
"gzip", "gzip",
"brotli", "brotli",
"tokio", "tokio",
] } ] }
async-stream = "0.3.3" async-stream = "0.3.5"
async-trait = "0.1.56" async-trait = "0.1.74"
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [ avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [
"dynamic", "dynamic",
], optional = true } ], optional = true }
base32 = "0.4.0" base32 = "0.4.0"
base64 = "0.13.0" base64 = "0.21.4"
base64ct = "1.5.1" base64ct = "1.6.0"
basic-cookies = "0.1.4" basic-cookies = "0.1.4"
bollard = "0.13.0"
bytes = "1" bytes = "1"
chrono = { version = "0.4.19", features = ["serde"] } chrono = { version = "0.4.31", features = ["serde"] }
clap = "3.2.8" clap = "3.2.25"
color-eyre = "0.6.1" color-eyre = "0.6.2"
cookie = "0.16.2" console = "0.15.7"
cookie_store = "0.19.0" console-subscriber = { version = "0.2", optional = true }
cookie = "0.18.0"
cookie_store = "0.20.0"
current_platform = "0.2.0" current_platform = "0.2.0"
digest = "0.10.3" digest = "0.10.7"
digest-old = { package = "digest", version = "0.9.0" }
divrem = "1.0.0" divrem = "1.0.0"
ed25519 = { version = "1.5.2", features = ["pkcs8", "pem", "alloc"] } ed25519 = { version = "2.2.3", features = ["pkcs8", "pem", "alloc"] }
ed25519-dalek = { version = "1.0.1", features = ["serde"] } ed25519-dalek = { version = "2.0.0", features = [
"serde",
"zeroize",
"rand_core",
"digest",
] }
ed25519-dalek-v1 = { package = "ed25519-dalek", version = "1" }
embassy_container_init = { path = "../libs/embassy_container_init" }
emver = { version = "0.1.7", git = "https://github.com/Start9Labs/emver-rs.git", features = [ emver = { version = "0.1.7", git = "https://github.com/Start9Labs/emver-rs.git", features = [
"serde", "serde",
] } ] }
fd-lock-rs = "0.1.4" fd-lock-rs = "0.1.4"
futures = "0.3.21" futures = "0.3.28"
git-version = "0.3.5" gpt = "3.1.0"
gpt = "3.0.0"
helpers = { path = "../libs/helpers" } helpers = { path = "../libs/helpers" }
embassy_container_init = { path = "../libs/embassy_container_init" }
hex = "0.4.3" hex = "0.4.3"
hmac = "0.12.1" hmac = "0.12.1"
http = "0.2.8" http = "0.2.9"
hyper = { version = "0.14.20", features = ["full"] } hyper = { version = "0.14.27", features = ["full"] }
hyper-ws-listener = "0.2.0" hyper-ws-listener = "0.3.0"
imbl = "2.0.0" imbl = "2.0.2"
indexmap = { version = "1.9.1", features = ["serde"] } imbl-value = { git = "https://github.com/Start9Labs/imbl-value.git" }
ipnet = { version = "2.7.1", features = ["serde"] } include_dir = "0.7.3"
indexmap = { version = "2.0.2", features = ["serde"] }
indicatif = { version = "0.17.7", features = ["tokio"] }
ipnet = { version = "2.8.0", features = ["serde"] }
iprange = { version = "0.6.7", features = ["serde"] } iprange = { version = "0.6.7", features = ["serde"] }
isocountry = "0.3.2" isocountry = "0.3.2"
itertools = "0.10.3" itertools = "0.11.0"
josekit = "0.8.1" jaq-core = "0.10.1"
jaq-std = "0.10.0"
josekit = "0.8.4"
js_engine = { path = '../libs/js_engine', optional = true } js_engine = { path = '../libs/js_engine', optional = true }
jsonpath_lib = "0.3.0" jsonpath_lib = { git = "https://github.com/Start9Labs/jsonpath.git" }
lazy_static = "1.4.0" lazy_static = "1.4.0"
libc = "0.2.126" libc = "0.2.149"
log = "0.4.17" log = "0.4.20"
mbrman = "0.5.0" mbrman = "0.5.2"
models = { version = "*", path = "../libs/models" } models = { version = "*", path = "../libs/models" }
nix = "0.25.0" new_mime_guess = "4"
nom = "7.1.1" nix = { version = "0.27.1", features = ["user", "process", "signal", "fs"] }
num = "0.4.0" nom = "7.1.3"
num_enum = "0.5.7" num = "0.4.1"
openssh-keys = "0.5.0" num_enum = "0.7.0"
openssl = { version = "0.10.41", features = ["vendored"] } openssh-keys = "0.6.2"
openssl = { version = "0.10.57", features = ["vendored"] }
p256 = { version = "0.13.2", features = ["pem"] }
patch-db = { version = "*", path = "../patch-db/patch-db", features = [ patch-db = { version = "*", path = "../patch-db/patch-db", features = [
"trace", "trace",
] } ] }
p256 = { version = "0.12.0", features = ["pem"] } pbkdf2 = "0.12.2"
pbkdf2 = "0.11.0" pin-project = "1.1.3"
pin-project = "1.0.11" pkcs8 = { version = "0.10.2", features = ["std"] }
pkcs8 = { version = "0.9.0", features = ["std"] }
prettytable-rs = "0.10.0" prettytable-rs = "0.10.0"
proptest = "1.0.0" proptest = "1.3.1"
proptest-derive = "0.3.0" proptest-derive = "0.4.0"
rand = { version = "0.8.5", features = ["std"] } rand = { version = "0.8.5", features = ["std"] }
rand-old = { package = "rand", version = "0.7.3" } regex = "1.10.2"
regex = "1.6.0" reqwest = { version = "0.11.22", features = ["stream", "json", "socks"] }
reqwest = { version = "0.11.11", features = ["stream", "json", "socks"] } reqwest_cookie_store = "0.6.0"
reqwest_cookie_store = "0.5.0" rpassword = "7.2.0"
rpassword = "7.0.0"
rpc-toolkit = "0.2.2" rpc-toolkit = "0.2.2"
rust-argon2 = "1.0.0" rust-argon2 = "2.0.0"
scopeguard = "1.1" # because avahi-sys fucks your shit up scopeguard = "1.1" # because avahi-sys fucks your shit up
serde = { version = "1.0.139", features = ["derive", "rc"] } serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.0" } serde_cbor = { package = "ciborium", version = "0.2.1" }
serde_json = "1.0.82" serde_json = "1.0"
serde_toml = { package = "toml", version = "0.5.9" } serde_toml = { package = "toml", version = "0.8.2" }
serde_with = { version = "2.0.1", features = ["macros", "json"] } serde_with = { version = "3.4.0", features = ["macros", "json"] }
serde_yaml = "0.9.11" serde_yaml = "0.9.25"
sha2 = "0.10.2" sha2 = "0.10.2"
sha2-old = { package = "sha2", version = "0.9.9" }
simple-logging = "2.0.2" simple-logging = "2.0.2"
sqlx = { version = "0.6.0", features = [ sqlx = { version = "0.7.2", features = [
"chrono", "chrono",
"offline",
"runtime-tokio-rustls", "runtime-tokio-rustls",
"postgres", "postgres",
] } ] }
ssh-key = { version = "0.5.1", features = ["ed25519"] } sscanf = "0.4.1"
stderrlog = "0.5.3" ssh-key = { version = "0.6.2", features = ["ed25519"] }
tar = "0.4.38" stderrlog = "0.5.4"
thiserror = "1.0.31" tar = "0.4.40"
tokio = { version = "1.23", features = ["full"] } thiserror = "1.0.49"
tokio-stream = { version = "0.1.11", features = ["io-util", "sync", "net"] } tokio = { version = "1", features = ["full"] }
tokio-rustls = "0.24.1"
tokio-socks = "0.5.1"
tokio-stream = { version = "0.1.14", features = ["io-util", "sync", "net"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" } tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.17.1", features = ["native-tls"] } tokio-tungstenite = { version = "0.20.1", features = ["native-tls"] }
tokio-rustls = "0.23.4" tokio-util = { version = "0.7.9", features = ["io"] }
tokio-util = { version = "0.7.3", features = ["io"] }
torut = "0.2.1" torut = "0.2.1"
tracing = "0.1.35" tracing = "0.1.39"
tracing-error = "0.2.0" tracing-error = "0.2.0"
tracing-futures = "0.2.5" tracing-futures = "0.2.5"
tracing-subscriber = { version = "0.3.14", features = ["env-filter"] } tracing-journald = "0.3.0"
trust-dns-server = "0.22.0" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
typed-builder = "0.10.0" trust-dns-server = "0.23.1"
url = { version = "2.2.2", features = ["serde"] } typed-builder = "0.17.0"
uuid = { version = "1.1.2", features = ["v4"] } url = { version = "2.4.1", features = ["serde"] }
zeroize = "1.5.7" urlencoding = "2.1.3"
uuid = { version = "1.4.1", features = ["v4"] }
zeroize = "1.6.0"
[profile.test] [profile.test]
opt-level = 3 opt-level = 3

View File

@@ -1,36 +1,36 @@
# embassyOS Backend # StartOS Backend
- Requirements: - Requirements:
- [Install Rust](https://rustup.rs) - [Install Rust](https://rustup.rs)
- Recommended: [rust-analyzer](https://rust-analyzer.github.io/) - Recommended: [rust-analyzer](https://rust-analyzer.github.io/)
- [Docker](https://docs.docker.com/get-docker/) - [Docker](https://docs.docker.com/get-docker/)
- [Rust ARM64 Build Container](https://github.com/Start9Labs/rust-arm-builder) - [Rust ARM64 Build Container](https://github.com/Start9Labs/rust-arm-builder)
- Scripts (run withing the `./backend` directory) - Mac `brew install gnu-tar`
- Scripts (run within the `./backend` directory)
- `build-prod.sh` - compiles a release build of the artifacts for running on - `build-prod.sh` - compiles a release build of the artifacts for running on
ARM64 ARM64
- A Linux computer or VM - A Linux computer or VM
## Structure ## Structure
The embassyOS backend is broken up into 4 different binaries: The StartOS backend is packed into a single binary `startbox` that is symlinked under
several different names for different behaviour:
- embassyd: This is the main workhorse of embassyOS - any new functionality you - startd: This is the main workhorse of StartOS - any new functionality you
want will likely go here want will likely go here
- embassy-init: This is the component responsible for allowing you to set up - start-cli: This is a CLI tool that will allow you to issue commands to
your device, and handles system initialization on startup startd and control it similarly to the UI
- embassy-cli: This is a CLI tool that will allow you to issue commands to - start-sdk: This is a CLI tool that aids in building and packaging services
embassyd and control it similarly to the UI you wish to deploy to StartOS
- embassy-sdk: This is a CLI tool that aids in building and packaging services
you wish to deploy to the Embassy
Finally there is a library `embassy` that supports all four of these tools. Finally there is a library `startos` that supports all of these tools.
See [here](/backend/Cargo.toml) for details. See [here](/backend/Cargo.toml) for details.
## Building ## Building
You can build the entire operating system image using `make` from the root of You can build the entire operating system image using `make` from the root of
the embassyOS project. This will subsequently invoke the build scripts above to the StartOS project. This will subsequently invoke the build scripts above to
actually create the requisite binaries and put them onto the final operating actually create the requisite binaries and put them onto the final operating
system image. system image.

View File

@@ -1,24 +0,0 @@
#!/bin/bash
set -e
shopt -s expand_aliases
if [ "$0" != "./build-dev.sh" ]; then
>&2 echo "Must be run from backend directory"
exit 1
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
cd ..
rust-arm64-builder sh -c "(cd backend && cargo build --locked)"
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd

View File

@@ -1,23 +0,0 @@
#!/bin/bash
set -e
shopt -s expand_aliases
if [ "$0" != "./build-portable-dev.sh" ]; then
>&2 echo "Must be run from backend directory"
exit 1
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
cd ..
rust-musl-builder sh -c "(cd backend && cargo +beta build --target=x86_64-unknown-linux-musl --no-default-features --locked)"
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo

View File

@@ -17,46 +17,28 @@ if tty -s; then
USE_TTY="-it" USE_TTY="-it"
fi fi
alias 'rust-gnu-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P start9/rust-arm-cross:aarch64'
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
cd .. cd ..
FLAGS="" FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
if [[ "$ENVIRONMENT" =~ (^|-)unstable($|-) ]]; then RUSTFLAGS=""
FLAGS="unstable,$FLAGS"
fi alias 'rust-gnu-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$(pwd)":/home/rust/src -w /home/rust/src -P start9/rust-arm-cross:aarch64'
if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
FLAGS="dev,$FLAGS"
fi
set +e set +e
fail= fail=
if [[ "$FLAGS" = "" ]]; then echo "FEATURES=\"$FEATURES\""
rust-gnu-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --locked --target=$ARCH-unknown-linux-gnu)" echo "RUSTFLAGS=\"$RUSTFLAGS\""
if test $? -ne 0; then rust-gnu-builder sh -c "(cd backend && cargo build --release --features avahi-alias,$FEATURES --locked --target=$ARCH-unknown-linux-gnu)"
fail=true if test $? -ne 0; then
fi fail=true
for ARCH in x86_64 aarch64
do
rust-musl-builder sh -c "(git config --global --add safe.directory '*'; cd libs && cargo build --release --locked --bin embassy_container_init )"
if test $? -ne 0; then
fail=true
fi
done
else
echo "FLAGS=$FLAGS"
rust-gnu-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS --locked --target=$ARCH-unknown-linux-gnu)"
if test $? -ne 0; then
fail=true
fi
for ARCH in x86_64 aarch64
do
rust-musl-builder sh -c "(git config --global --add safe.directory '*'; cd libs && cargo build --release --features $FLAGS --locked --bin embassy_container_init)"
if test $? -ne 0; then
fail=true
fi
done
fi fi
for ARCH in x86_64 aarch64
do
rust-musl-builder sh -c "(cd libs && cargo build --release --locked --bin embassy_container_init)"
if test $? -ne 0; then
fail=true
fi
done
set -e set -e
cd backend cd backend
@@ -67,5 +49,3 @@ sudo chown -R $USER ../libs/target
if [ -n "$fail" ]; then if [ -n "$fail" ]; then
exit 1 exit 1
fi fi
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd

View File

@@ -1,15 +0,0 @@
[Unit]
Description=Embassy Init
After=network-online.target
Requires=network-online.target
Wants=avahi-daemon.service
[Service]
Type=oneshot
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/bin/embassy-init
RemainAfterExit=true
StandardOutput=append:/var/log/embassy-init.log
[Install]
WantedBy=embassyd.service

View File

@@ -1,17 +0,0 @@
[Unit]
Description=Embassy Daemon
After=embassy-init.service
Requires=embassy-init.service
[Service]
Type=simple
Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/bin/embassyd
Restart=always
RestartSec=3
ManagedOOMPreference=avoid
CPUAccounting=true
CPUWeight=1000
[Install]
WantedBy=multi-user.target

View File

@@ -8,4 +8,14 @@ if [ "$0" != "./install-sdk.sh" ]; then
exit 1 exit 1
fi fi
cargo install --bin=embassy-sdk --bin=embassy-cli --path=. --no-default-features --features=js_engine --locked frontend="../frontend/dist/static"
[ -d "$frontend" ] || mkdir -p "$frontend"
if [ -z "$PLATFORM" ]; then
export PLATFORM=$(uname -m)
fi
cargo install --path=. --no-default-features --features=js_engine,sdk,cli --locked
startbox_loc=$(which startbox)
ln -sf $startbox_loc $(dirname $startbox_loc)/start-cli
ln -sf $startbox_loc $(dirname $startbox_loc)/start-sdk

View File

@@ -1,744 +0,0 @@
{
"db": "PostgreSQL",
"1ce5254f27de971fd87f5ab66d300f2b22433c86617a0dbf796bf2170186dd2e": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING"
},
"21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1"
},
"28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec": {
"describe": {
"columns": [
{
"name": "hostname",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "path",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "username",
"ordinal": 2,
"type_info": "Text"
},
{
"name": "password",
"ordinal": 3,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false,
true
],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1"
},
"4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997": {
"describe": {
"columns": [
{
"name": "fingerprint",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "openssh_pubkey",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false
],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1"
},
"4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "logged_in",
"ordinal": 1,
"type_info": "Timestamp"
},
{
"name": "logged_out",
"ordinal": 2,
"type_info": "Timestamp"
},
{
"name": "last_active",
"ordinal": 3,
"type_info": "Timestamp"
},
{
"name": "user_agent",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "metadata",
"ordinal": 5,
"type_info": "Text"
}
],
"nullable": [
false,
false,
true,
false,
true,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
},
"4bcfbefb1eb3181343871a1cd7fc3afb81c2be5c681cfa8b4be0ce70610e9c3a": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1"
},
"629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": {
"describe": {
"columns": [
{
"name": "password",
"ordinal": 0,
"type_info": "Text"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT password FROM account"
},
"687688055e63d27123cdc89a5bbbd8361776290a9411d527eaf1fdb40bef399d": {
"describe": {
"columns": [
{
"name": "key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text"
]
}
},
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2"
},
"6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
},
"770c1017734720453dc87b58c385b987c5af5807151ff71a59000014586752e0": {
"describe": {
"columns": [
{
"name": "key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key"
},
"7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "package_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Timestamp"
},
{
"name": "code",
"ordinal": 3,
"type_info": "Int4"
},
{
"name": "level",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "title",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "message",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "data",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
],
"parameters": {
"Left": [
"Int4",
"Int8"
]
}
},
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2"
},
"7c7a3549c997eb75bf964ea65fbb98a73045adf618696cd838d79203ef5383fb": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Bytea",
"Text",
"Text"
]
}
},
"query": "\n INSERT INTO account (\n id,\n server_id,\n hostname,\n password,\n network_key,\n root_ca_key_pem,\n root_ca_cert_pem\n ) VALUES (\n 0, $1, $2, $3, $4, $5, $6\n ) ON CONFLICT (id) DO UPDATE SET\n server_id = EXCLUDED.server_id,\n hostname = EXCLUDED.hostname,\n password = EXCLUDED.password,\n network_key = EXCLUDED.network_key,\n root_ca_key_pem = EXCLUDED.root_ca_key_pem,\n root_ca_cert_pem = EXCLUDED.root_ca_cert_pem\n "
},
"7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "DELETE FROM tor WHERE package = $1"
},
"8951b9126fbf60dbb5997241e11e3526b70bccf3e407327917294a993bc17ed5": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING"
},
"94d471bb374b4965c6cbedf8c17bbf6bea226d38efaf6559923c79a36d5ca08c": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "package_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Timestamp"
},
{
"name": "code",
"ordinal": 3,
"type_info": "Int4"
},
{
"name": "level",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "title",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "message",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "data",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
],
"parameters": {
"Left": [
"Int8"
]
}
},
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT $1"
},
"95c4ab4c645f3302568c6ff13d85ab58252362694cf0f56999bf60194d20583a": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "hostname",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "path",
"ordinal": 2,
"type_info": "Text"
},
{
"name": "username",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "password",
"ordinal": 4,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false,
false,
true
],
"parameters": {
"Left": []
}
},
"query": "SELECT id, hostname, path, username, password FROM cifs_shares"
},
"a60d6e66719325b08dc4ecfacaf337527233c84eee758ac9be967906e5841d27": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "DELETE FROM cifs_shares WHERE id = $1"
},
"a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e": {
"describe": {
"columns": [
{
"name": "fingerprint",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "openssh_pubkey",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys"
},
"b1147beaaabbed89f2ab8c1e13ec4393a9a8fde2833cf096af766a979d94dee6": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text",
"Int4"
]
}
},
"query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5"
},
"d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": {
"describe": {
"columns": [
{
"name": "openssh_pubkey",
"ordinal": 0,
"type_info": "Text"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT openssh_pubkey FROM ssh_keys"
},
"da71f94b29798d1738d2b10b9a721ea72db8cfb362e7181c8226d9297507c62b": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Int4",
"Text",
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)"
},
"e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "DELETE FROM notifications WHERE id = $1"
},
"e545696735f202f9d13cf22a561f3ff3f9aed7f90027a9ba97634bcb47d772f0": {
"describe": {
"columns": [
{
"name": "tor_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
true
],
"parameters": {
"Left": []
}
},
"query": "SELECT tor_key FROM account WHERE id = 0"
},
"e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)"
},
"e95322a8e2ae3b93f1e974b24c0b81803f1e9ec9e8ebbf15cafddfc1c5a028ed": {
"describe": {
"columns": [
{
"name": "package",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "interface",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "tor_key?",
"ordinal": 3,
"type_info": "Bytea"
}
],
"nullable": [
false,
false,
false,
false
],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "\n SELECT\n network_keys.package,\n network_keys.interface,\n network_keys.key,\n tor.key AS \"tor_key?\"\n FROM\n network_keys\n LEFT JOIN\n tor\n ON\n network_keys.package = tor.package\n AND\n network_keys.interface = tor.interface\n WHERE\n network_keys.package = $1\n "
},
"eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "DELETE FROM notifications WHERE id < $1"
},
"ecc765d8205c0876956f95f76944ac6a5f34dd820c4073b7728c7067aab9fded": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id"
},
"f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)"
},
"f7d2dae84613bcef330f7403352cc96547f3f6dbec11bf2eadfaf53ad8ab51b5": {
"describe": {
"columns": [
{
"name": "network_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT network_key FROM account WHERE id = 0"
},
"fe6e4f09f3028e5b6b6259e86cbad285680ce157aae9d7837ac020c8b2945e7f": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "password",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "tor_key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "server_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "hostname",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "network_key",
"ordinal": 5,
"type_info": "Bytea"
},
{
"name": "root_ca_key_pem",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "root_ca_cert_pem",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
false,
true,
true,
true,
false,
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT * FROM account WHERE id = 0"
}
}

View File

@@ -1,5 +1,6 @@
use ed25519_dalek::{ExpandedSecretKey, SecretKey}; use std::time::SystemTime;
use models::ResultExt;
use ed25519_dalek::SecretKey;
use openssl::pkey::{PKey, Private}; use openssl::pkey::{PKey, Private};
use openssl::x509::X509; use openssl::x509::X509;
use sqlx::PgExecutor; use sqlx::PgExecutor;
@@ -7,13 +8,14 @@ use sqlx::PgExecutor;
use crate::hostname::{generate_hostname, generate_id, Hostname}; use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key; use crate::net::keys::Key;
use crate::net::ssl::{generate_key, make_root_cert}; use crate::net::ssl::{generate_key, make_root_cert};
use crate::Error; use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
fn hash_password(password: &str) -> Result<String, Error> { fn hash_password(password: &str) -> Result<String, Error> {
argon2::hash_encoded( argon2::hash_encoded(
password.as_bytes(), password.as_bytes(),
&rand::random::<[u8; 16]>()[..], &rand::random::<[u8; 16]>()[..],
&argon2::Config::default(), &argon2::Config::rfc9106_low_mem(),
) )
.with_kind(crate::ErrorKind::PasswordHashGeneration) .with_kind(crate::ErrorKind::PasswordHashGeneration)
} }
@@ -28,11 +30,11 @@ pub struct AccountInfo {
pub root_ca_cert: X509, pub root_ca_cert: X509,
} }
impl AccountInfo { impl AccountInfo {
pub fn new(password: &str) -> Result<Self, Error> { pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> {
let server_id = generate_id(); let server_id = generate_id();
let hostname = generate_hostname(); let hostname = generate_hostname();
let root_ca_key = generate_key()?; let root_ca_key = generate_key()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname)?; let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?;
Ok(Self { Ok(Self {
server_id, server_id,
hostname, hostname,
@@ -51,13 +53,23 @@ impl AccountInfo {
let server_id = r.server_id.unwrap_or_else(generate_id); let server_id = r.server_id.unwrap_or_else(generate_id);
let hostname = r.hostname.map(Hostname).unwrap_or_else(generate_hostname); let hostname = r.hostname.map(Hostname).unwrap_or_else(generate_hostname);
let password = r.password; let password = r.password;
let network_key = SecretKey::from_bytes(&r.network_key)?; let network_key = SecretKey::try_from(r.network_key).map_err(|e| {
Error::new(
eyre!("expected vec of len 32, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?;
let tor_key = if let Some(k) = &r.tor_key { let tor_key = if let Some(k) = &r.tor_key {
ExpandedSecretKey::from_bytes(k)? <[u8; 64]>::try_from(&k[..]).map_err(|_| {
Error::new(
eyre!("expected vec of len 64, got len {}", k.len()),
ErrorKind::ParseDbField,
)
})?
} else { } else {
ExpandedSecretKey::from(&network_key) ed25519_expand_key(&network_key)
}; };
let key = Key::from_pair(None, network_key.to_bytes(), tor_key.to_bytes()); let key = Key::from_pair(None, network_key, tor_key);
let root_ca_key = PKey::private_key_from_pem(r.root_ca_key_pem.as_bytes())?; let root_ca_key = PKey::private_key_from_pem(r.root_ca_key_pem.as_bytes())?;
let root_ca_cert = X509::from_pem(r.root_ca_cert_pem.as_bytes())?; let root_ca_cert = X509::from_pem(r.root_ca_cert_pem.as_bytes())?;

View File

@@ -11,6 +11,7 @@ use tracing::instrument;
use crate::config::{Config, ConfigSpec}; use crate::config::{Config, ConfigSpec};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::prelude::*;
use crate::procedure::docker::DockerContainers; use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName}; use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
@@ -56,16 +57,16 @@ pub struct Action {
pub input_spec: ConfigSpec, pub input_spec: ConfigSpec,
} }
impl Action { impl Action {
#[instrument] #[instrument(skip_all)]
pub fn validate( pub fn validate(
&self, &self,
container: &Option<DockerContainers>, _container: &Option<DockerContainers>,
eos_version: &Version, eos_version: &Version,
volumes: &Volumes, volumes: &Volumes,
image_ids: &BTreeSet<ImageId>, image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> { ) -> Result<(), Error> {
self.implementation self.implementation
.validate(container, eos_version, volumes, image_ids, true) .validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| { .with_ctx(|_| {
( (
crate::ErrorKind::ValidateS9pk, crate::ErrorKind::ValidateS9pk,
@@ -74,7 +75,7 @@ impl Action {
}) })
} }
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn execute( pub async fn execute(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
@@ -120,7 +121,7 @@ fn display_action_result(action_result: ActionResult, matches: &ArgMatches) {
} }
#[command(about = "Executes an action", display(display_action_result))] #[command(about = "Executes an action", display(display_action_result))]
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn action( pub async fn action(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(rename = "id")] pkg_id: PackageId, #[arg(rename = "id")] pkg_id: PackageId,
@@ -130,18 +131,17 @@ pub async fn action(
#[arg(long = "format")] #[arg(long = "format")]
format: Option<IoFormat>, format: Option<IoFormat>,
) -> Result<ActionResult, Error> { ) -> Result<ActionResult, Error> {
let mut db = ctx.db.handle(); let manifest = ctx
let manifest = crate::db::DatabaseModel::new() .db
.package_data() .peek()
.idx_model(&pkg_id)
.and_then(|p| p.installed())
.expect(&mut db)
.await .await
.with_kind(crate::ErrorKind::NotFound)? .as_package_data()
.manifest() .as_idx(&pkg_id)
.get(&mut db) .or_not_found(&pkg_id)?
.await? .as_installed()
.to_owned(); .or_not_found(&pkg_id)?
.as_manifest()
.de()?;
if let Some(action) = manifest.actions.0.get(&action_id) { if let Some(action) = manifest.actions.0.get(&action_id) {
action action

View File

@@ -5,7 +5,6 @@ use chrono::{DateTime, Utc};
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use josekit::jwk::Jwk; use josekit::jwk::Jwk;
use patch_db::{DbHandle, LockReceipt};
use rpc_toolkit::command; use rpc_toolkit::command;
use rpc_toolkit::command_helpers::prelude::{RequestParts, ResponseParts}; use rpc_toolkit::command_helpers::prelude::{RequestParts, ResponseParts};
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
@@ -17,6 +16,7 @@ use tracing::instrument;
use crate::context::{CliContext, RpcContext}; use crate::context::{CliContext, RpcContext};
use crate::middleware::auth::{AsLogoutSessionId, HasLoggedOutSessions, HashSessionToken}; use crate::middleware::auth::{AsLogoutSessionId, HasLoggedOutSessions, HashSessionToken};
use crate::middleware::encrypt::EncryptedWire; use crate::middleware::encrypt::EncryptedWire;
use crate::prelude::*;
use crate::util::display_none; use crate::util::display_none;
use crate::util::serde::{display_serializable, IoFormat}; use crate::util::serde::{display_serializable, IoFormat};
use crate::{ensure_code, Error, ResultExt}; use crate::{ensure_code, Error, ResultExt};
@@ -84,13 +84,13 @@ fn gen_pwd() {
argon2::hash_encoded( argon2::hash_encoded(
b"testing1234", b"testing1234",
&rand::random::<[u8; 16]>()[..], &rand::random::<[u8; 16]>()[..],
&argon2::Config::default() &argon2::Config::rfc9106_low_mem()
) )
.unwrap() .unwrap()
) )
} }
#[instrument(skip(ctx, password))] #[instrument(skip_all)]
async fn cli_login( async fn cli_login(
ctx: CliContext, ctx: CliContext,
password: Option<PasswordType>, password: Option<PasswordType>,
@@ -145,7 +145,7 @@ where
display(display_none), display(display_none),
metadata(authenticated = false) metadata(authenticated = false)
)] )]
#[instrument(skip(ctx, password))] #[instrument(skip_all)]
pub async fn login( pub async fn login(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[request] req: &RequestParts, #[request] req: &RequestParts,
@@ -160,7 +160,7 @@ pub async fn login(
) -> Result<(), Error> { ) -> Result<(), Error> {
let password = password.unwrap_or_default().decrypt(&ctx)?; let password = password.unwrap_or_default().decrypt(&ctx)?;
let mut handle = ctx.secret_store.acquire().await?; let mut handle = ctx.secret_store.acquire().await?;
check_password_against_db(&mut handle, &password).await?; check_password_against_db(handle.as_mut(), &password).await?;
let hash_token = HashSessionToken::new(); let hash_token = HashSessionToken::new();
let user_agent = req.headers.get("user-agent").and_then(|h| h.to_str().ok()); let user_agent = req.headers.get("user-agent").and_then(|h| h.to_str().ok());
@@ -172,7 +172,7 @@ pub async fn login(
user_agent, user_agent,
metadata, metadata,
) )
.execute(&mut handle) .execute(handle.as_mut())
.await?; .await?;
res.headers.insert( res.headers.insert(
"set-cookie", "set-cookie",
@@ -183,7 +183,7 @@ pub async fn login(
} }
#[command(display(display_none), metadata(authenticated = false))] #[command(display(display_none), metadata(authenticated = false))]
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn logout( pub async fn logout(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[request] req: &RequestParts, #[request] req: &RequestParts,
@@ -250,7 +250,7 @@ fn display_sessions(arg: SessionList, matches: &ArgMatches) {
} }
#[command(display(display_sessions))] #[command(display(display_sessions))]
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn list( pub async fn list(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[request] req: &RequestParts, #[request] req: &RequestParts,
@@ -263,7 +263,7 @@ pub async fn list(
sessions: sqlx::query!( sessions: sqlx::query!(
"SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP" "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
) )
.fetch_all(&mut ctx.secret_store.acquire().await?) .fetch_all(ctx.secret_store.acquire().await?.as_mut())
.await? .await?
.into_iter() .into_iter()
.map(|row| { .map(|row| {
@@ -296,7 +296,7 @@ impl AsLogoutSessionId for KillSessionId {
} }
#[command(display(display_none))] #[command(display(display_none))]
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn kill( pub async fn kill(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(parse(parse_comma_separated))] ids: Vec<String>, #[arg(parse(parse_comma_separated))] ids: Vec<String>,
@@ -305,7 +305,7 @@ pub async fn kill(
Ok(()) Ok(())
} }
#[instrument(skip(ctx, old_password, new_password))] #[instrument(skip_all)]
async fn cli_reset_password( async fn cli_reset_password(
ctx: CliContext, ctx: CliContext,
old_password: Option<PasswordType>, old_password: Option<PasswordType>,
@@ -343,33 +343,12 @@ async fn cli_reset_password(
Ok(()) Ok(())
} }
pub struct SetPasswordReceipt(LockReceipt<String, ()>);
impl SetPasswordReceipt {
pub async fn new<Db: DbHandle>(db: &mut Db) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let password_hash = crate::db::DatabaseModel::new()
.server_info()
.password_hash()
.make_locker(patch_db::LockType::Write)
.add_to_keys(locks);
move |skeleton_key| Ok(Self(password_hash.verify(skeleton_key)?))
}
}
#[command( #[command(
rename = "reset-password", rename = "reset-password",
custom_cli(cli_reset_password(async, context(CliContext))), custom_cli(cli_reset_password(async, context(CliContext))),
display(display_none) display(display_none)
)] )]
#[instrument(skip(ctx, old_password, new_password))] #[instrument(skip_all)]
pub async fn reset_password( pub async fn reset_password(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[arg(rename = "old-password")] old_password: Option<PasswordType>, #[arg(rename = "old-password")] old_password: Option<PasswordType>,
@@ -389,13 +368,14 @@ pub async fn reset_password(
} }
account.set_password(&new_password)?; account.set_password(&new_password)?;
account.save(&ctx.secret_store).await?; account.save(&ctx.secret_store).await?;
crate::db::DatabaseModel::new() let account_password = &account.password;
.server_info() ctx.db
.password_hash() .mutate(|d| {
.put(&mut ctx.db.handle(), &account.password) d.as_server_info_mut()
.await?; .as_password_hash_mut()
.ser(account_password)
Ok(()) })
.await
} }
#[command( #[command(
@@ -403,7 +383,7 @@ pub async fn reset_password(
display(display_none), display(display_none),
metadata(authenticated = false) metadata(authenticated = false)
)] )]
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn get_pubkey(#[context] ctx: RpcContext) -> Result<Jwk, RpcError> { pub async fn get_pubkey(#[context] ctx: RpcContext) -> Result<Jwk, RpcError> {
let secret = ctx.as_ref().clone(); let secret = ctx.as_ref().clone();
let pub_key = secret.to_public_key()?; let pub_key = secret.to_public_key()?;

View File

@@ -1,13 +1,17 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::BTreeMap;
use std::path::PathBuf; use std::panic::UnwindSafe;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use chrono::Utc; use chrono::Utc;
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use helpers::AtomicFile; use helpers::AtomicFile;
use patch_db::{DbHandle, LockType, PatchDbHandle}; use imbl::OrdSet;
use models::Version;
use rpc_toolkit::command; use rpc_toolkit::command;
use tokio::io::AsyncWriteExt; use tokio::io::AsyncWriteExt;
use tokio::sync::Mutex;
use tracing::instrument; use tracing::instrument;
use super::target::BackupTargetId; use super::target::BackupTargetId;
@@ -17,20 +21,22 @@ use crate::backup::os::OsBackup;
use crate::backup::{BackupReport, ServerBackupReport}; use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::BackupProgress; use crate::db::model::BackupProgress;
use crate::db::package::get_packages;
use crate::disk::mount::backup::BackupMountGuard; use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite; use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard; use crate::disk::mount::guard::TmpMountGuard;
use crate::manager::BackupReturn;
use crate::notifications::NotificationLevel; use crate::notifications::NotificationLevel;
use crate::prelude::*;
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::status::MainStatus;
use crate::util::display_none; use crate::util::display_none;
use crate::util::io::dir_copy;
use crate::util::serde::IoFormat; use crate::util::serde::IoFormat;
use crate::version::VersionT; use crate::version::VersionT;
use crate::{Error, ErrorKind, ResultExt};
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<BTreeSet<PackageId>, Error> { fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<OrdSet<PackageId>, Error> {
arg.split(',') arg.split(',')
.map(|s| s.trim().parse().map_err(Error::from)) .map(|s| s.trim().parse::<PackageId>().map_err(Error::from))
.collect() .collect()
} }
@@ -47,55 +53,52 @@ pub async fn backup_all(
long = "package-ids", long = "package-ids",
parse(parse_comma_separated) parse(parse_comma_separated)
)] )]
package_ids: Option<BTreeSet<PackageId>>, package_ids: Option<OrdSet<PackageId>>,
#[arg] password: crate::auth::PasswordType, #[arg] password: crate::auth::PasswordType,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut db = ctx.db.handle(); let db = ctx.db.peek().await;
let old_password_decrypted = old_password let old_password_decrypted = old_password
.as_ref() .as_ref()
.unwrap_or(&password) .unwrap_or(&password)
.clone() .clone()
.decrypt(&ctx)?; .decrypt(&ctx)?;
let password = password.decrypt(&ctx)?; let password = password.decrypt(&ctx)?;
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?; check_password_against_db(ctx.secret_store.acquire().await?.as_mut(), &password).await?;
let fs = target_id let fs = target_id
.load(&mut ctx.secret_store.acquire().await?) .load(ctx.secret_store.acquire().await?.as_mut())
.await?; .await?;
let mut backup_guard = BackupMountGuard::mount( let mut backup_guard = BackupMountGuard::mount(
TmpMountGuard::mount(&fs, ReadWrite).await?, TmpMountGuard::mount(&fs, ReadWrite).await?,
&old_password_decrypted, &old_password_decrypted,
) )
.await?; .await?;
let all_packages = crate::db::DatabaseModel::new() let package_ids = if let Some(ids) = package_ids {
.package_data() ids.into_iter()
.get(&mut db) .flat_map(|package_id| {
.await? let version = db
.0 .as_package_data()
.keys() .as_idx(&package_id)?
.into_iter() .as_manifest()
.cloned() .as_version()
.collect(); .de()
let package_ids = package_ids.unwrap_or(all_packages); .ok()?;
Some((package_id, version))
})
.collect()
} else {
get_packages(db.clone())?.into_iter().collect()
};
if old_password.is_some() { if old_password.is_some() {
backup_guard.change_password(&password)?; backup_guard.change_password(&password)?;
} }
assure_backing_up(&mut db, &package_ids).await?; assure_backing_up(&ctx.db, &package_ids).await?;
tokio::task::spawn(async move { tokio::task::spawn(async move {
let backup_res = perform_backup(&ctx, &mut db, backup_guard, &package_ids).await; let backup_res = perform_backup(&ctx, backup_guard, &package_ids).await;
let backup_progress = crate::db::DatabaseModel::new()
.server_info()
.status_info()
.backup_progress();
backup_progress
.clone()
.lock(&mut db, LockType::Write)
.await
.expect("failed to lock server status");
match backup_res { match backup_res {
Ok(report) if report.iter().all(|(_, rep)| rep.error.is_none()) => ctx Ok(report) if report.iter().all(|(_, rep)| rep.error.is_none()) => ctx
.notification_manager .notification_manager
.notify( .notify(
&mut db, ctx.db.clone(),
None, None,
NotificationLevel::Success, NotificationLevel::Success,
"Backup Complete".to_owned(), "Backup Complete".to_owned(),
@@ -105,7 +108,10 @@ pub async fn backup_all(
attempted: true, attempted: true,
error: None, error: None,
}, },
packages: report, packages: report
.into_iter()
.map(|((package_id, _), value)| (package_id, value))
.collect(),
}, },
None, None,
) )
@@ -114,7 +120,7 @@ pub async fn backup_all(
Ok(report) => ctx Ok(report) => ctx
.notification_manager .notification_manager
.notify( .notify(
&mut db, ctx.db.clone(),
None, None,
NotificationLevel::Warning, NotificationLevel::Warning,
"Backup Complete".to_owned(), "Backup Complete".to_owned(),
@@ -124,7 +130,10 @@ pub async fn backup_all(
attempted: true, attempted: true,
error: None, error: None,
}, },
packages: report, packages: report
.into_iter()
.map(|((package_id, _), value)| (package_id, value))
.collect(),
}, },
None, None,
) )
@@ -135,7 +144,7 @@ pub async fn backup_all(
tracing::debug!("{:?}", e); tracing::debug!("{:?}", e);
ctx.notification_manager ctx.notification_manager
.notify( .notify(
&mut db, ctx.db.clone(),
None, None,
NotificationLevel::Error, NotificationLevel::Error,
"Backup Failed".to_owned(), "Backup Failed".to_owned(),
@@ -153,196 +162,113 @@ pub async fn backup_all(
.expect("failed to send notification"); .expect("failed to send notification");
} }
} }
backup_progress ctx.db
.delete(&mut db) .mutate(|v| {
.await v.as_server_info_mut()
.expect("failed to change server status"); .as_status_info_mut()
.as_backup_progress_mut()
.ser(&None)
})
.await?;
Ok::<(), Error>(())
}); });
Ok(()) Ok(())
} }
#[instrument(skip(db, packages))] #[instrument(skip(db, packages))]
async fn assure_backing_up( async fn assure_backing_up(
db: &mut PatchDbHandle, db: &PatchDb,
packages: impl IntoIterator<Item = &PackageId>, packages: impl IntoIterator<Item = &(PackageId, Version)> + UnwindSafe + Send,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut tx = db.begin().await?; db.mutate(|v| {
let mut backing_up = crate::db::DatabaseModel::new() let backing_up = v
.server_info() .as_server_info_mut()
.status_info() .as_status_info_mut()
.backup_progress() .as_backup_progress_mut();
.get_mut(&mut tx) if backing_up
.await?; .clone()
.de()?
if backing_up .iter()
.iter() .flat_map(|x| x.values())
.flat_map(|x| x.values()) .fold(false, |acc, x| {
.fold(false, |acc, x| { if !x.complete {
if !x.complete { return true;
return true; }
} acc
acc })
}) {
{ return Err(Error::new(
return Err(Error::new( eyre!("Server is already backing up!"),
eyre!("Server is already backing up!"), ErrorKind::InvalidRequest,
crate::ErrorKind::InvalidRequest, ));
)); }
} backing_up.ser(&Some(
*backing_up = Some( packages
packages .into_iter()
.into_iter() .map(|(x, _)| (x.clone(), BackupProgress { complete: false }))
.map(|x| (x.clone(), BackupProgress { complete: false })) .collect(),
.collect(), ))?;
); Ok(())
backing_up.save(&mut tx).await?; })
tx.commit().await?; .await
Ok(())
} }
#[instrument(skip(ctx, db, backup_guard))] #[instrument(skip(ctx, backup_guard))]
async fn perform_backup<Db: DbHandle>( async fn perform_backup(
ctx: &RpcContext, ctx: &RpcContext,
mut db: Db, backup_guard: BackupMountGuard<TmpMountGuard>,
mut backup_guard: BackupMountGuard<TmpMountGuard>, package_ids: &OrdSet<(PackageId, Version)>,
package_ids: &BTreeSet<PackageId>, ) -> Result<BTreeMap<(PackageId, Version), PackageBackupReport>, Error> {
) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> {
let mut backup_report = BTreeMap::new(); let mut backup_report = BTreeMap::new();
for package_id in crate::db::DatabaseModel::new() let backup_guard = Arc::new(Mutex::new(backup_guard));
.package_data()
.keys(&mut db)
.await?
.into_iter()
.filter(|id| package_ids.contains(id))
{
let mut tx = db.begin().await?; // for lock scope
let installed_model = if let Some(installed_model) = crate::db::DatabaseModel::new()
.package_data()
.idx_model(&package_id)
.and_then(|m| m.installed())
.check(&mut tx)
.await?
{
installed_model
} else {
continue;
};
let main_status_model = installed_model.clone().status().main();
main_status_model.lock(&mut tx, LockType::Write).await?; for package_id in package_ids {
let (started, health) = match main_status_model.get(&mut tx).await?.into_owned() { let (response, _report) = match ctx
MainStatus::Starting { .. } => (Some(Utc::now()), Default::default()), .managers
MainStatus::Running { started, health } => (Some(started), health.clone()), .get(package_id)
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restarting => { .await
(None, Default::default()) .ok_or_else(|| Error::new(eyre!("Manager not found"), ErrorKind::InvalidRequest))?
.backup(backup_guard.clone())
.await
{
BackupReturn::Ran { report, res } => (res, report),
BackupReturn::AlreadyRunning(report) => {
backup_report.insert(package_id.clone(), report);
continue;
} }
MainStatus::BackingUp { .. } => { BackupReturn::Error(error) => {
tracing::warn!("Backup thread error");
tracing::debug!("{error:?}");
backup_report.insert( backup_report.insert(
package_id, package_id.clone(),
PackageBackupReport { PackageBackupReport {
error: Some( error: Some("Backup thread error".to_owned()),
"Can't do backup because service is in a backing up state".to_owned(),
),
}, },
); );
continue; continue;
} }
}; };
main_status_model
.put(
&mut tx,
&MainStatus::BackingUp {
started,
health: health.clone(),
},
)
.await?;
tx.save().await?; // drop locks
let manifest = installed_model.clone().manifest().get(&mut db).await?;
ctx.managers
.get(&(manifest.id.clone(), manifest.version.clone()))
.await
.ok_or_else(|| {
Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest)
})?
.synchronize()
.await;
let mut tx = db.begin().await?;
installed_model.lock(&mut tx, LockType::Write).await?;
let guard = backup_guard.mount_package_backup(&package_id).await?;
let res = manifest
.backup
.create(
ctx,
&mut tx,
&package_id,
&manifest.title,
&manifest.version,
&manifest.interfaces,
&manifest.volumes,
)
.await;
guard.unmount().await?;
backup_report.insert( backup_report.insert(
package_id.clone(), package_id.clone(),
PackageBackupReport { PackageBackupReport {
error: res.as_ref().err().map(|e| e.to_string()), error: response.as_ref().err().map(|e| e.to_string()),
}, },
); );
if let Ok(pkg_meta) = res { if let Ok(pkg_meta) = response {
installed_model
.last_backup()
.put(&mut tx, &Some(pkg_meta.timestamp))
.await?;
backup_guard backup_guard
.lock()
.await
.metadata .metadata
.package_backups .package_backups
.insert(package_id.clone(), pkg_meta); .insert(package_id.0.clone(), pkg_meta);
} }
main_status_model
.put(
&mut tx,
&match started {
Some(started) => MainStatus::Running { started, health },
None => MainStatus::Stopped,
},
)
.await?;
let mut backup_progress = crate::db::DatabaseModel::new()
.server_info()
.status_info()
.backup_progress()
.get_mut(&mut tx)
.await?;
if backup_progress.is_none() {
*backup_progress = Some(Default::default());
}
if let Some(mut backup_progress) = backup_progress
.as_mut()
.and_then(|bp| bp.get_mut(&package_id))
{
(*backup_progress).complete = true;
}
backup_progress.save(&mut tx).await?;
tx.save().await?;
} }
let ui = crate::db::DatabaseModel::new() let ui = ctx.db.peek().await.into_ui().de()?;
.ui()
.get(&mut db)
.await?
.into_owned();
let mut os_backup_file = AtomicFile::new( let mut os_backup_file = AtomicFile::new(
backup_guard.as_ref().join("os-backup.cbor"), backup_guard.lock().await.as_ref().join("os-backup.cbor"),
None::<PathBuf>, None::<PathBuf>,
) )
.await .await
@@ -358,7 +284,28 @@ async fn perform_backup<Db: DbHandle>(
.await .await
.with_kind(ErrorKind::Filesystem)?; .with_kind(ErrorKind::Filesystem)?;
let luks_folder_old = backup_guard.lock().await.as_ref().join("luks.old");
if tokio::fs::metadata(&luks_folder_old).await.is_ok() {
tokio::fs::remove_dir_all(&luks_folder_old).await?;
}
let luks_folder_bak = backup_guard.lock().await.as_ref().join("luks");
if tokio::fs::metadata(&luks_folder_bak).await.is_ok() {
tokio::fs::rename(&luks_folder_bak, &luks_folder_old).await?;
}
let luks_folder = Path::new("/media/embassy/config/luks");
if tokio::fs::metadata(&luks_folder).await.is_ok() {
dir_copy(&luks_folder, &luks_folder_bak, None).await?;
}
let timestamp = Some(Utc::now()); let timestamp = Some(Utc::now());
let mut backup_guard = Arc::try_unwrap(backup_guard)
.map_err(|_err| {
Error::new(
eyre!("Backup guard could not ensure that the others where dropped"),
ErrorKind::Unknown,
)
})?
.into_inner();
backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into(); backup_guard.unencrypted_metadata.version = crate::version::Current::new().semver().into();
backup_guard.unencrypted_metadata.full = true; backup_guard.unencrypted_metadata.full = true;
@@ -367,10 +314,9 @@ async fn perform_backup<Db: DbHandle>(
backup_guard.save_and_unmount().await?; backup_guard.save_and_unmount().await?;
crate::db::DatabaseModel::new() ctx.db
.server_info() .mutate(|v| v.as_server_info_mut().as_last_backup_mut().ser(&timestamp))
.last_backup()
.put(&mut db, &timestamp)
.await?; .await?;
Ok(backup_report) Ok(backup_report)
} }

View File

@@ -1,11 +1,11 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc;
use chrono::{DateTime, Utc}; use chrono::{DateTime, Utc};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use helpers::AtomicFile; use helpers::AtomicFile;
use models::ImageId; use models::{ImageId, OptionExt};
use patch_db::{DbHandle, HasModel};
use reqwest::Url; use reqwest::Url;
use rpc_toolkit::command; use rpc_toolkit::command;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
@@ -15,10 +15,11 @@ use tracing::instrument;
use self::target::PackageBackupInfo; use self::target::PackageBackupInfo;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::dependencies::reconfigure_dependents_with_live_pointers;
use crate::install::PKG_ARCHIVE_DIR; use crate::install::PKG_ARCHIVE_DIR;
use crate::net::interface::{InterfaceId, Interfaces}; use crate::manager::manager_seed::ManagerSeed;
use crate::net::interface::InterfaceId;
use crate::net::keys::Key; use crate::net::keys::Key;
use crate::prelude::*;
use crate::procedure::docker::DockerContainers; use crate::procedure::docker::DockerContainers;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName}; use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
@@ -47,7 +48,7 @@ pub struct ServerBackupReport {
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
pub struct PackageBackupReport { pub struct PackageBackupReport {
error: Option<String>, pub error: Option<String>,
} }
#[command(subcommands(backup_bulk::backup_all, target::target))] #[command(subcommands(backup_bulk::backup_all, target::target))]
@@ -71,6 +72,7 @@ struct BackupMetadata {
} }
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)] #[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]
pub struct BackupActions { pub struct BackupActions {
pub create: PackageProcedure, pub create: PackageProcedure,
pub restore: PackageProcedure, pub restore: PackageProcedure,
@@ -78,34 +80,29 @@ pub struct BackupActions {
impl BackupActions { impl BackupActions {
pub fn validate( pub fn validate(
&self, &self,
container: &Option<DockerContainers>, _container: &Option<DockerContainers>,
eos_version: &Version, eos_version: &Version,
volumes: &Volumes, volumes: &Volumes,
image_ids: &BTreeSet<ImageId>, image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> { ) -> Result<(), Error> {
self.create self.create
.validate(container, eos_version, volumes, image_ids, false) .validate(eos_version, volumes, image_ids, false)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Create"))?; .with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Create"))?;
self.restore self.restore
.validate(container, eos_version, volumes, image_ids, false) .validate(eos_version, volumes, image_ids, false)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Restore"))?; .with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Restore"))?;
Ok(()) Ok(())
} }
#[instrument(skip(ctx, db))] #[instrument(skip_all)]
pub async fn create<Db: DbHandle>( pub async fn create(&self, seed: Arc<ManagerSeed>) -> Result<PackageBackupInfo, Error> {
&self, let manifest = &seed.manifest;
ctx: &RpcContext, let mut volumes = seed.manifest.volumes.to_readonly();
db: &mut Db, let ctx = &seed.ctx;
pkg_id: &PackageId, let pkg_id = &manifest.id;
pkg_title: &str, let pkg_version = &manifest.version;
pkg_version: &Version,
interfaces: &Interfaces,
volumes: &Volumes,
) -> Result<PackageBackupInfo, Error> {
let mut volumes = volumes.to_readonly();
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: false }); volumes.insert(VolumeId::Backup, Volume::Backup { readonly: false });
let backup_dir = backup_dir(pkg_id); let backup_dir = backup_dir(&manifest.id);
if tokio::fs::metadata(&backup_dir).await.is_err() { if tokio::fs::metadata(&backup_dir).await.is_err() {
tokio::fs::create_dir_all(&backup_dir).await? tokio::fs::create_dir_all(&backup_dir).await?
} }
@@ -122,29 +119,29 @@ impl BackupActions {
.await? .await?
.map_err(|e| eyre!("{}", e.1)) .map_err(|e| eyre!("{}", e.1))
.with_kind(crate::ErrorKind::Backup)?; .with_kind(crate::ErrorKind::Backup)?;
let (network_keys, tor_keys) = Key::for_package(&ctx.secret_store, pkg_id) let (network_keys, tor_keys): (Vec<_>, Vec<_>) =
.await? Key::for_package(&ctx.secret_store, pkg_id)
.into_iter() .await?
.filter_map(|k| { .into_iter()
let interface = k.interface().map(|(_, i)| i)?; .filter_map(|k| {
Some(( let interface = k.interface().map(|(_, i)| i)?;
(interface.clone(), Base64(k.as_bytes())), Some((
(interface, Base32(k.tor_key().as_bytes())), (interface.clone(), Base64(k.as_bytes())),
)) (interface, Base32(k.tor_key().as_bytes())),
}) ))
.unzip(); })
let marketplace_url = crate::db::DatabaseModel::new() .unzip();
.package_data() let marketplace_url = ctx
.idx_model(pkg_id) .db
.expect(db) .peek()
.await? .await
.installed() .as_package_data()
.expect(db) .as_idx(&pkg_id)
.await? .or_not_found(pkg_id)?
.marketplace_url() .expect_as_installed()?
.get(db) .as_installed()
.await? .as_marketplace_url()
.into_owned(); .de()?;
let tmp_path = Path::new(BACKUP_DIR) let tmp_path = Path::new(BACKUP_DIR)
.join(pkg_id) .join(pkg_id)
.join(format!("{}.s9pk", pkg_id)); .join(format!("{}.s9pk", pkg_id));
@@ -172,6 +169,8 @@ impl BackupActions {
let mut outfile = AtomicFile::new(&metadata_path, None::<PathBuf>) let mut outfile = AtomicFile::new(&metadata_path, None::<PathBuf>)
.await .await
.with_kind(ErrorKind::Filesystem)?; .with_kind(ErrorKind::Filesystem)?;
let network_keys = network_keys.into_iter().collect();
let tor_keys = tor_keys.into_iter().collect();
outfile outfile
.write_all(&IoFormat::Cbor.to_vec(&BackupMetadata { .write_all(&IoFormat::Cbor.to_vec(&BackupMetadata {
timestamp, timestamp,
@@ -183,22 +182,20 @@ impl BackupActions {
outfile.save().await.with_kind(ErrorKind::Filesystem)?; outfile.save().await.with_kind(ErrorKind::Filesystem)?;
Ok(PackageBackupInfo { Ok(PackageBackupInfo {
os_version: Current::new().semver().into(), os_version: Current::new().semver().into(),
title: pkg_title.to_owned(), title: manifest.title.clone(),
version: pkg_version.clone(), version: pkg_version.clone(),
timestamp, timestamp,
}) })
} }
#[instrument(skip(ctx, db))] #[instrument(skip_all)]
pub async fn restore<Db: DbHandle>( pub async fn restore(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
pkg_id: &PackageId, pkg_id: &PackageId,
pkg_version: &Version, pkg_version: &Version,
interfaces: &Interfaces,
volumes: &Volumes, volumes: &Volumes,
) -> Result<(), Error> { ) -> Result<Option<Url>, Error> {
let mut volumes = volumes.clone(); let mut volumes = volumes.clone();
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: true }); volumes.insert(VolumeId::Backup, Volume::Backup { readonly: true });
self.restore self.restore
@@ -223,32 +220,7 @@ impl BackupActions {
) )
})?, })?,
)?; )?;
let pde = crate::db::DatabaseModel::new()
.package_data()
.idx_model(pkg_id)
.expect(db)
.await?
.installed()
.expect(db)
.await?;
pde.marketplace_url()
.put(db, &metadata.marketplace_url)
.await?;
let entry = crate::db::DatabaseModel::new() Ok(metadata.marketplace_url)
.package_data()
.idx_model(pkg_id)
.expect(db)
.await?
.installed()
.expect(db)
.await?
.get(db)
.await?;
let receipts = crate::config::ConfigReceipts::new(db).await?;
reconfigure_dependents_with_live_pointers(ctx, db, &receipts, &entry).await?;
Ok(())
} }
} }

View File

@@ -1,12 +1,13 @@
use openssl::pkey::PKey;
use openssl::x509::X509;
use patch_db::Value;
use serde::{Deserialize, Serialize};
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, generate_id, Hostname}; use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key; use crate::net::keys::Key;
use crate::prelude::*;
use crate::util::serde::Base64; use crate::util::serde::Base64;
use crate::Error;
use openssl::pkey::PKey;
use openssl::x509::X509;
use serde::{Deserialize, Serialize};
use serde_json::Value;
pub struct OsBackup { pub struct OsBackup {
pub account: AccountInfo, pub account: AccountInfo,
@@ -19,11 +20,11 @@ impl<'de> Deserialize<'de> for OsBackup {
{ {
let tagged = OsBackupSerDe::deserialize(deserializer)?; let tagged = OsBackupSerDe::deserialize(deserializer)?;
match tagged.version { match tagged.version {
0 => serde_json::from_value::<OsBackupV0>(tagged.rest) 0 => patch_db::value::from_value::<OsBackupV0>(tagged.rest)
.map_err(serde::de::Error::custom)? .map_err(serde::de::Error::custom)?
.project() .project()
.map_err(serde::de::Error::custom), .map_err(serde::de::Error::custom),
1 => serde_json::from_value::<OsBackupV1>(tagged.rest) 1 => patch_db::value::from_value::<OsBackupV1>(tagged.rest)
.map_err(serde::de::Error::custom)? .map_err(serde::de::Error::custom)?
.project() .project()
.map_err(serde::de::Error::custom), .map_err(serde::de::Error::custom),
@@ -40,7 +41,7 @@ impl Serialize for OsBackup {
{ {
OsBackupSerDe { OsBackupSerDe {
version: 1, version: 1,
rest: serde_json::to_value( rest: patch_db::value::to_value(
&OsBackupV1::unproject(self).map_err(serde::ser::Error::custom)?, &OsBackupV1::unproject(self).map_err(serde::ser::Error::custom)?,
) )
.map_err(serde::ser::Error::custom)?, .map_err(serde::ser::Error::custom)?,

View File

@@ -5,11 +5,9 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::eyre; use futures::future::BoxFuture;
use futures::{future::BoxFuture, stream}; use futures::{stream, FutureExt, StreamExt};
use futures::{FutureExt, StreamExt};
use openssl::x509::X509; use openssl::x509::X509;
use patch_db::{DbHandle, PatchDbHandle};
use rpc_toolkit::command; use rpc_toolkit::command;
use sqlx::Connection; use sqlx::Connection;
use tokio::fs::File; use tokio::fs::File;
@@ -21,7 +19,7 @@ use crate::backup::os::OsBackup;
use crate::backup::BackupMetadata; use crate::backup::BackupMetadata;
use crate::context::rpc::RpcContextConfig; use crate::context::rpc::RpcContextConfig;
use crate::context::{RpcContext, SetupContext}; use crate::context::{RpcContext, SetupContext};
use crate::db::model::{PackageDataEntry, StaticFiles}; use crate::db::model::{PackageDataEntry, PackageDataEntryRestoring, StaticFiles};
use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard}; use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard};
use crate::disk::mount::filesystem::ReadWrite; use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard; use crate::disk::mount::guard::TmpMountGuard;
@@ -30,6 +28,7 @@ use crate::init::init;
use crate::install::progress::InstallProgress; use crate::install::progress::InstallProgress;
use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR}; use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR};
use crate::notifications::NotificationLevel; use crate::notifications::NotificationLevel;
use crate::prelude::*;
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::s9pk::reader::S9pkReader; use crate::s9pk::reader::S9pkReader;
use crate::setup::SetupStatus; use crate::setup::SetupStatus;
@@ -37,7 +36,6 @@ use crate::util::display_none;
use crate::util::io::dir_size; use crate::util::io::dir_size;
use crate::util::serde::IoFormat; use crate::util::serde::IoFormat;
use crate::volume::{backup_dir, BACKUP_DIR, PKG_VOLUME_DIR}; use crate::volume::{backup_dir, BACKUP_DIR, PKG_VOLUME_DIR};
use crate::{Error, ResultExt};
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<PackageId>, Error> { fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<PackageId>, Error> {
arg.split(',') arg.split(',')
@@ -53,26 +51,24 @@ pub async fn restore_packages_rpc(
#[arg(rename = "target-id")] target_id: BackupTargetId, #[arg(rename = "target-id")] target_id: BackupTargetId,
#[arg] password: String, #[arg] password: String,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut db = ctx.db.handle();
let fs = target_id let fs = target_id
.load(&mut ctx.secret_store.acquire().await?) .load(ctx.secret_store.acquire().await?.as_mut())
.await?; .await?;
let backup_guard = let backup_guard =
BackupMountGuard::mount(TmpMountGuard::mount(&fs, ReadWrite).await?, &password).await?; BackupMountGuard::mount(TmpMountGuard::mount(&fs, ReadWrite).await?, &password).await?;
let (backup_guard, tasks, _) = restore_packages(&ctx, &mut db, backup_guard, ids).await?; let (backup_guard, tasks, _) = restore_packages(&ctx, backup_guard, ids).await?;
tokio::spawn(async move { tokio::spawn(async move {
stream::iter(tasks.into_iter().map(|x| (x, ctx.clone()))) stream::iter(tasks.into_iter().map(|x| (x, ctx.clone())))
.for_each_concurrent(5, |(res, ctx)| async move { .for_each_concurrent(5, |(res, ctx)| async move {
let mut db = ctx.db.handle();
match res.await { match res.await {
(Ok(_), _) => (), (Ok(_), _) => (),
(Err(err), package_id) => { (Err(err), package_id) => {
if let Err(err) = ctx if let Err(err) = ctx
.notification_manager .notification_manager
.notify( .notify(
&mut db, ctx.db.clone(),
Some(package_id.clone()), Some(package_id.clone()),
NotificationLevel::Error, NotificationLevel::Error,
"Restoration Failure".to_string(), "Restoration Failure".to_string(),
@@ -109,7 +105,7 @@ async fn approximate_progress(
if tokio::fs::metadata(&dir).await.is_err() { if tokio::fs::metadata(&dir).await.is_err() {
*size = 0; *size = 0;
} else { } else {
*size = dir_size(&dir).await?; *size = dir_size(&dir, None).await?;
} }
} }
Ok(()) Ok(())
@@ -184,20 +180,18 @@ pub async fn recover_full_embassy(
.await?; .await?;
let os_backup_path = backup_guard.as_ref().join("os-backup.cbor"); let os_backup_path = backup_guard.as_ref().join("os-backup.cbor");
let mut os_backup: OsBackup = let mut os_backup: OsBackup = IoFormat::Cbor.from_slice(
IoFormat::Cbor.from_slice(&tokio::fs::read(&os_backup_path).await.with_ctx(|_| { &tokio::fs::read(&os_backup_path)
( .await
crate::ErrorKind::Filesystem, .with_ctx(|_| (ErrorKind::Filesystem, os_backup_path.display().to_string()))?,
os_backup_path.display().to_string(), )?;
)
})?)?;
os_backup.account.password = argon2::hash_encoded( os_backup.account.password = argon2::hash_encoded(
embassy_password.as_bytes(), embassy_password.as_bytes(),
&rand::random::<[u8; 16]>()[..], &rand::random::<[u8; 16]>()[..],
&argon2::Config::default(), &argon2::Config::rfc9106_low_mem(),
) )
.with_kind(crate::ErrorKind::PasswordHashGeneration)?; .with_kind(ErrorKind::PasswordHashGeneration)?;
let secret_store = ctx.secret_store().await?; let secret_store = ctx.secret_store().await?;
@@ -211,27 +205,24 @@ pub async fn recover_full_embassy(
let rpc_ctx = RpcContext::init(ctx.config_path.clone(), disk_guid.clone()).await?; let rpc_ctx = RpcContext::init(ctx.config_path.clone(), disk_guid.clone()).await?;
let mut db = rpc_ctx.db.handle(); let ids: Vec<_> = backup_guard
let ids = backup_guard
.metadata .metadata
.package_backups .package_backups
.keys() .keys()
.cloned() .cloned()
.collect(); .collect();
let (backup_guard, tasks, progress_info) = let (backup_guard, tasks, progress_info) =
restore_packages(&rpc_ctx, &mut db, backup_guard, ids).await?; restore_packages(&rpc_ctx, backup_guard, ids).await?;
let task_consumer_rpc_ctx = rpc_ctx.clone(); let task_consumer_rpc_ctx = rpc_ctx.clone();
tokio::select! { tokio::select! {
_ = async move { _ = async move {
stream::iter(tasks.into_iter().map(|x| (x, task_consumer_rpc_ctx.clone()))) stream::iter(tasks.into_iter().map(|x| (x, task_consumer_rpc_ctx.clone())))
.for_each_concurrent(5, |(res, ctx)| async move { .for_each_concurrent(5, |(res, ctx)| async move {
let mut db = ctx.db.handle();
match res.await { match res.await {
(Ok(_), _) => (), (Ok(_), _) => (),
(Err(err), package_id) => { (Err(err), package_id) => {
if let Err(err) = ctx.notification_manager.notify( if let Err(err) = ctx.notification_manager.notify(
&mut db, ctx.db.clone(),
Some(package_id.clone()), Some(package_id.clone()),
NotificationLevel::Error, NotificationLevel::Error,
"Restoration Failure".to_string(), format!("Error restoring package {}: {}", package_id,err), (), None).await{ "Restoration Failure".to_string(), format!("Error restoring package {}: {}", package_id,err), (), None).await{
@@ -261,9 +252,9 @@ pub async fn recover_full_embassy(
)) ))
} }
#[instrument(skip(ctx, backup_guard))]
async fn restore_packages( async fn restore_packages(
ctx: &RpcContext, ctx: &RpcContext,
db: &mut PatchDbHandle,
backup_guard: BackupMountGuard<TmpMountGuard>, backup_guard: BackupMountGuard<TmpMountGuard>,
ids: Vec<PackageId>, ids: Vec<PackageId>,
) -> Result< ) -> Result<
@@ -274,7 +265,7 @@ async fn restore_packages(
), ),
Error, Error,
> { > {
let guards = assure_restoring(ctx, db, ids, &backup_guard).await?; let guards = assure_restoring(ctx, ids, &backup_guard).await?;
let mut progress_info = ProgressInfo::default(); let mut progress_info = ProgressInfo::default();
@@ -282,10 +273,12 @@ async fn restore_packages(
for (manifest, guard) in guards { for (manifest, guard) in guards {
let id = manifest.id.clone(); let id = manifest.id.clone();
let (progress, task) = restore_package(ctx.clone(), manifest, guard).await?; let (progress, task) = restore_package(ctx.clone(), manifest, guard).await?;
progress_info.package_installs.insert(id.clone(), progress); progress_info
.package_installs
.insert(id.clone(), progress.clone());
progress_info progress_info
.src_volume_size .src_volume_size
.insert(id.clone(), dir_size(backup_dir(&id)).await?); .insert(id.clone(), dir_size(backup_dir(&id), None).await?);
progress_info.target_volume_size.insert(id.clone(), 0); progress_info.target_volume_size.insert(id.clone(), 0);
let package_id = id.clone(); let package_id = id.clone();
tasks.push( tasks.push(
@@ -306,23 +299,20 @@ async fn restore_packages(
Ok((backup_guard, tasks, progress_info)) Ok((backup_guard, tasks, progress_info))
} }
#[instrument(skip(ctx, db, backup_guard))] #[instrument(skip(ctx, backup_guard))]
async fn assure_restoring( async fn assure_restoring(
ctx: &RpcContext, ctx: &RpcContext,
db: &mut PatchDbHandle,
ids: Vec<PackageId>, ids: Vec<PackageId>,
backup_guard: &BackupMountGuard<TmpMountGuard>, backup_guard: &BackupMountGuard<TmpMountGuard>,
) -> Result<Vec<(Manifest, PackageBackupMountGuard)>, Error> { ) -> Result<Vec<(Manifest, PackageBackupMountGuard)>, Error> {
let mut tx = db.begin().await?;
let mut guards = Vec::with_capacity(ids.len()); let mut guards = Vec::with_capacity(ids.len());
let mut insert_packages = BTreeMap::new();
for id in ids { for id in ids {
let mut model = crate::db::DatabaseModel::new() let peek = ctx.db.peek().await;
.package_data()
.idx_model(&id) let model = peek.as_package_data().as_idx(&id);
.get_mut(&mut tx)
.await?;
if !model.is_none() { if !model.is_none() {
return Err(Error::new( return Err(Error::new(
@@ -330,14 +320,15 @@ async fn assure_restoring(
crate::ErrorKind::InvalidRequest, crate::ErrorKind::InvalidRequest,
)); ));
} }
let guard = backup_guard.mount_package_backup(&id).await?; let guard = backup_guard.mount_package_backup(&id).await?;
let s9pk_path = Path::new(BACKUP_DIR).join(&id).join(format!("{}.s9pk", id)); let s9pk_path = Path::new(BACKUP_DIR).join(&id).join(format!("{}.s9pk", id));
let mut rdr = S9pkReader::open(&s9pk_path, false).await?; let mut rdr = S9pkReader::open(&s9pk_path, false).await?;
let manifest = rdr.manifest().await?; let manifest = rdr.manifest().await?;
let version = manifest.version.clone(); let version = manifest.version.clone();
let progress = InstallProgress::new(Some(tokio::fs::metadata(&s9pk_path).await?.len())); let progress = Arc::new(InstallProgress::new(Some(
tokio::fs::metadata(&s9pk_path).await?.len(),
)));
let public_dir_path = ctx let public_dir_path = ctx
.datadir .datadir
@@ -361,18 +352,25 @@ async fn assure_restoring(
let mut dst = File::create(&icon_path).await?; let mut dst = File::create(&icon_path).await?;
tokio::io::copy(&mut rdr.icon().await?, &mut dst).await?; tokio::io::copy(&mut rdr.icon().await?, &mut dst).await?;
dst.sync_all().await?; dst.sync_all().await?;
insert_packages.insert(
*model = Some(PackageDataEntry::Restoring { id.clone(),
install_progress: progress.clone(), PackageDataEntry::Restoring(PackageDataEntryRestoring {
static_files: StaticFiles::local(&id, &version, manifest.assets.icon_type()), install_progress: progress.clone(),
manifest: manifest.clone(), static_files: StaticFiles::local(&id, &version, manifest.assets.icon_type()),
}); manifest: manifest.clone(),
model.save(&mut tx).await?; }),
);
guards.push((manifest, guard)); guards.push((manifest, guard));
} }
ctx.db
tx.commit().await?; .mutate(|db| {
for (id, package) in insert_packages {
db.as_package_data_mut().insert(&id, &package)?;
}
Ok(())
})
.await?;
Ok(guards) Ok(guards)
} }
@@ -388,13 +386,11 @@ async fn restore_package<'a>(
.join(format!("{}.s9pk", id)); .join(format!("{}.s9pk", id));
let metadata_path = Path::new(BACKUP_DIR).join(&id).join("metadata.cbor"); let metadata_path = Path::new(BACKUP_DIR).join(&id).join("metadata.cbor");
let metadata: BackupMetadata = let metadata: BackupMetadata = IoFormat::Cbor.from_slice(
IoFormat::Cbor.from_slice(&tokio::fs::read(&metadata_path).await.with_ctx(|_| { &tokio::fs::read(&metadata_path)
( .await
crate::ErrorKind::Filesystem, .with_ctx(|_| (ErrorKind::Filesystem, metadata_path.display().to_string()))?,
metadata_path.display().to_string(), )?;
)
})?)?;
let mut secrets = ctx.secret_store.acquire().await?; let mut secrets = ctx.secret_store.acquire().await?;
let mut secrets_tx = secrets.begin().await?; let mut secrets_tx = secrets.begin().await?;
@@ -402,48 +398,59 @@ async fn restore_package<'a>(
let k = key.0.as_slice(); let k = key.0.as_slice();
sqlx::query!( sqlx::query!(
"INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING", "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
*id, id.to_string(),
*iface, iface.to_string(),
k, k,
) )
.execute(&mut secrets_tx).await?; .execute(secrets_tx.as_mut()).await?;
} }
// DEPRECATED // DEPRECATED
for (iface, key) in metadata.tor_keys { for (iface, key) in metadata.tor_keys {
let k = key.0.as_slice(); let k = key.0.as_slice();
sqlx::query!( sqlx::query!(
"INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING", "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
*id, id.to_string(),
*iface, iface.to_string(),
k, k,
) )
.execute(&mut secrets_tx).await?; .execute(secrets_tx.as_mut()).await?;
} }
secrets_tx.commit().await?; secrets_tx.commit().await?;
drop(secrets); drop(secrets);
let len = tokio::fs::metadata(&s9pk_path) let len = tokio::fs::metadata(&s9pk_path)
.await .await
.with_ctx(|_| { .with_ctx(|_| (ErrorKind::Filesystem, s9pk_path.display().to_string()))?
(
crate::ErrorKind::Filesystem,
s9pk_path.display().to_string(),
)
})?
.len(); .len();
let file = File::open(&s9pk_path).await.with_ctx(|_| { let file = File::open(&s9pk_path)
( .await
crate::ErrorKind::Filesystem, .with_ctx(|_| (ErrorKind::Filesystem, s9pk_path.display().to_string()))?;
s9pk_path.display().to_string(),
)
})?;
let progress = InstallProgress::new(Some(len)); let progress = InstallProgress::new(Some(len));
let marketplace_url = metadata.marketplace_url;
let progress = Arc::new(progress);
ctx.db
.mutate(|db| {
db.as_package_data_mut().insert(
&id,
&PackageDataEntry::Restoring(PackageDataEntryRestoring {
install_progress: progress.clone(),
static_files: StaticFiles::local(
&id,
&manifest.version,
manifest.assets.icon_type(),
),
manifest: manifest.clone(),
}),
)
})
.await?;
Ok(( Ok((
progress.clone(), progress.clone(),
async move { async move {
download_install_s9pk(&ctx, &manifest, None, progress, file).await?; download_install_s9pk(ctx, manifest, marketplace_url, progress, file, None).await?;
guard.unmount().await?; guard.unmount().await?;

View File

@@ -12,9 +12,9 @@ use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::filesystem::ReadOnly; use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::guard::TmpMountGuard; use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::util::{recovery_info, EmbassyOsRecoveryInfo}; use crate::disk::util::{recovery_info, EmbassyOsRecoveryInfo};
use crate::prelude::*;
use crate::util::display_none; use crate::util::display_none;
use crate::util::serde::KeyVal; use crate::util::serde::KeyVal;
use crate::Error;
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
@@ -84,7 +84,7 @@ pub async fn update(
} else { } else {
return Err(Error::new( return Err(Error::new(
eyre!("Backup Target ID {} Not Found", id), eyre!("Backup Target ID {} Not Found", id),
crate::ErrorKind::NotFound, ErrorKind::NotFound,
)); ));
}; };
let cifs = Cifs { let cifs = Cifs {
@@ -112,7 +112,7 @@ pub async fn update(
{ {
return Err(Error::new( return Err(Error::new(
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }), eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
crate::ErrorKind::NotFound, ErrorKind::NotFound,
)); ));
}; };
Ok(KeyVal { Ok(KeyVal {
@@ -134,7 +134,7 @@ pub async fn remove(#[context] ctx: RpcContext, #[arg] id: BackupTargetId) -> Re
} else { } else {
return Err(Error::new( return Err(Error::new(
eyre!("Backup Target ID {} Not Found", id), eyre!("Backup Target ID {} Not Found", id),
crate::ErrorKind::NotFound, ErrorKind::NotFound,
)); ));
}; };
if sqlx::query!("DELETE FROM cifs_shares WHERE id = $1", id) if sqlx::query!("DELETE FROM cifs_shares WHERE id = $1", id)
@@ -145,7 +145,7 @@ pub async fn remove(#[context] ctx: RpcContext, #[arg] id: BackupTargetId) -> Re
{ {
return Err(Error::new( return Err(Error::new(
eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }), eyre!("Backup Target ID {} Not Found", BackupTargetId::Cifs { id }),
crate::ErrorKind::NotFound, ErrorKind::NotFound,
)); ));
}; };
Ok(()) Ok(())

View File

@@ -11,6 +11,7 @@ use rpc_toolkit::command;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use sha2::Sha256; use sha2::Sha256;
use sqlx::{Executor, Postgres}; use sqlx::{Executor, Postgres};
use tokio::sync::Mutex;
use tracing::instrument; use tracing::instrument;
use self::cifs::CifsBackupTarget; use self::cifs::CifsBackupTarget;
@@ -21,10 +22,10 @@ use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::filesystem::{FileSystem, MountType, ReadWrite}; use crate::disk::mount::filesystem::{FileSystem, MountType, ReadWrite};
use crate::disk::mount::guard::TmpMountGuard; use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::util::PartitionInfo; use crate::disk::util::PartitionInfo;
use crate::prelude::*;
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::serde::{deserialize_from_str, display_serializable, serialize_display}; use crate::util::serde::{deserialize_from_str, display_serializable, serialize_display};
use crate::util::Version; use crate::util::{display_none, Version};
use crate::Error;
pub mod cifs; pub mod cifs;
@@ -42,7 +43,7 @@ pub enum BackupTarget {
Cifs(CifsBackupTarget), Cifs(CifsBackupTarget),
} }
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)] #[derive(Debug, PartialEq, Eq, PartialOrd, Ord, Clone)]
pub enum BackupTargetId { pub enum BackupTargetId {
Disk { logicalname: PathBuf }, Disk { logicalname: PathBuf },
Cifs { id: i32 }, Cifs { id: i32 },
@@ -71,14 +72,14 @@ impl std::fmt::Display for BackupTargetId {
impl std::str::FromStr for BackupTargetId { impl std::str::FromStr for BackupTargetId {
type Err = Error; type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> { fn from_str(s: &str) -> Result<Self, Self::Err> {
match s.split_once("-") { match s.split_once('-') {
Some(("disk", logicalname)) => Ok(BackupTargetId::Disk { Some(("disk", logicalname)) => Ok(BackupTargetId::Disk {
logicalname: Path::new(logicalname).to_owned(), logicalname: Path::new(logicalname).to_owned(),
}), }),
Some(("cifs", id)) => Ok(BackupTargetId::Cifs { id: id.parse()? }), Some(("cifs", id)) => Ok(BackupTargetId::Cifs { id: id.parse()? }),
_ => Err(Error::new( _ => Err(Error::new(
eyre!("Invalid Backup Target ID"), eyre!("Invalid Backup Target ID"),
crate::ErrorKind::InvalidBackupTargetId, ErrorKind::InvalidBackupTargetId,
)), )),
} }
} }
@@ -129,7 +130,7 @@ impl FileSystem for BackupTargetFS {
} }
} }
#[command(subcommands(cifs::cifs, list, info))] #[command(subcommands(cifs::cifs, list, info, mount, umount))]
pub fn target() -> Result<(), Error> { pub fn target() -> Result<(), Error> {
Ok(()) Ok(())
} }
@@ -141,7 +142,7 @@ pub async fn list(
let mut sql_handle = ctx.secret_store.acquire().await?; let mut sql_handle = ctx.secret_store.acquire().await?;
let (disks_res, cifs) = tokio::try_join!( let (disks_res, cifs) = tokio::try_join!(
crate::disk::util::list(&ctx.os_partitions), crate::disk::util::list(&ctx.os_partitions),
cifs::list(&mut sql_handle), cifs::list(sql_handle.as_mut()),
)?; )?;
Ok(disks_res Ok(disks_res
.into_iter() .into_iter()
@@ -212,7 +213,7 @@ fn display_backup_info(info: BackupInfo, matches: &ArgMatches) {
]); ]);
for (id, info) in info.package_backups { for (id, info) in info.package_backups {
let row = row![ let row = row![
id.as_str(), &*id,
info.version.as_str(), info.version.as_str(),
info.os_version.as_str(), info.os_version.as_str(),
&info.timestamp.to_string(), &info.timestamp.to_string(),
@@ -232,7 +233,7 @@ pub async fn info(
let guard = BackupMountGuard::mount( let guard = BackupMountGuard::mount(
TmpMountGuard::mount( TmpMountGuard::mount(
&target_id &target_id
.load(&mut ctx.secret_store.acquire().await?) .load(ctx.secret_store.acquire().await?.as_mut())
.await?, .await?,
ReadWrite, ReadWrite,
) )
@@ -247,3 +248,60 @@ pub async fn info(
Ok(res) Ok(res)
} }
lazy_static::lazy_static! {
static ref USER_MOUNTS: Mutex<BTreeMap<BackupTargetId, BackupMountGuard<TmpMountGuard>>> =
Mutex::new(BTreeMap::new());
}
#[command]
#[instrument(skip_all)]
pub async fn mount(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId,
#[arg] password: String,
) -> Result<String, Error> {
let mut mounts = USER_MOUNTS.lock().await;
if let Some(existing) = mounts.get(&target_id) {
return Ok(existing.as_ref().display().to_string());
}
let guard = BackupMountGuard::mount(
TmpMountGuard::mount(
&target_id
.clone()
.load(ctx.secret_store.acquire().await?.as_mut())
.await?,
ReadWrite,
)
.await?,
&password,
)
.await?;
let res = guard.as_ref().display().to_string();
mounts.insert(target_id, guard);
Ok(res)
}
#[command(display(display_none))]
#[instrument(skip_all)]
pub async fn umount(
#[context] _ctx: RpcContext,
#[arg(rename = "target-id")] target_id: Option<BackupTargetId>,
) -> Result<(), Error> {
let mut mounts = USER_MOUNTS.lock().await;
if let Some(target_id) = target_id {
if let Some(existing) = mounts.remove(&target_id) {
existing.unmount().await?;
}
} else {
for (_, existing) in std::mem::take(&mut *mounts) {
existing.unmount().await?;
}
}
Ok(())
}

View File

@@ -1,207 +0,0 @@
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use embassy::context::rpc::RpcContextConfig;
use embassy::context::{DiagnosticContext, InstallContext, SetupContext};
use embassy::disk::fsck::RepairStrategy;
use embassy::disk::main::DEFAULT_PASSWORD;
use embassy::disk::REPAIR_DISK_PATH;
use embassy::init::STANDBY_MODE_PATH;
use embassy::net::web_server::WebServer;
use embassy::shutdown::Shutdown;
use embassy::sound::CHIME;
use embassy::util::logger::EmbassyLogger;
use embassy::util::Invoke;
use embassy::{Error, ErrorKind, ResultExt, IS_RASPBERRY_PI};
use tokio::process::Command;
use tracing::instrument;
#[instrument]
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
if tokio::fs::metadata("/cdrom").await.is_ok() {
let ctx = InstallContext::init(cfg_path).await?;
let server = WebServer::install(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
ctx.shutdown
.subscribe()
.recv()
.await
.expect("context dropped");
server.shutdown().await;
Command::new("reboot")
.invoke(embassy::ErrorKind::Unknown)
.await?;
} else if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_err()
{
let ctx = SetupContext::init(cfg_path).await?;
let server = WebServer::setup(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
ctx.shutdown
.subscribe()
.recv()
.await
.expect("context dropped");
server.shutdown().await;
tokio::task::yield_now().await;
if let Err(e) = Command::new("killall")
.arg("firefox-esr")
.invoke(ErrorKind::NotFound)
.await
{
tracing::error!("Failed to kill kiosk: {}", e);
tracing::debug!("{:?}", e);
}
} else {
let cfg = RpcContextConfig::load(cfg_path).await?;
let guid_string = tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?;
let guid = guid_string.trim();
let requires_reboot = embassy::disk::main::import(
guid,
cfg.datadir(),
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive
} else {
RepairStrategy::Preen
},
DEFAULT_PASSWORD,
)
.await?;
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
tokio::fs::remove_file(REPAIR_DISK_PATH)
.await
.with_ctx(|_| (embassy::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
}
if requires_reboot.0 {
embassy::disk::main::export(guid, cfg.datadir()).await?;
Command::new("reboot")
.invoke(embassy::ErrorKind::Unknown)
.await?;
}
tracing::info!("Loaded Disk");
embassy::init::init(&cfg).await?;
}
Ok(())
}
async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
let script = path.as_ref();
if script.exists() {
match Command::new("/bin/bash").arg(script).spawn() {
Ok(mut c) => {
if let Err(e) = c.wait().await {
tracing::error!("Error Running {}: {}", script.display(), e);
tracing::debug!("{:?}", e);
}
}
Err(e) => {
tracing::error!("Error Running {}: {}", script.display(), e);
tracing::debug!("{:?}", e);
}
}
}
}
#[instrument]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if *IS_RASPBERRY_PI && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
embassy::sound::SHUTDOWN.play().await?;
futures::future::pending::<()>().await;
}
embassy::sound::BEP.play().await?;
run_script_if_exists("/media/embassy/config/preinit.sh").await;
let res = if let Err(e) = setup_or_init(cfg_path.clone()).await {
async move {
tracing::error!("{}", e.source);
tracing::debug!("{}", e.source);
embassy::sound::BEETHOVEN.play().await?;
let ctx = DiagnosticContext::init(
cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new(
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
))
} else {
None
},
e,
)
.await?;
let server = WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();
server.shutdown().await;
Ok(shutdown)
}
.await
} else {
Ok(None)
};
run_script_if_exists("/media/embassy/config/postinit.sh").await;
res
}
fn main() {
let matches = clap::App::new("embassy-init")
.arg(
clap::Arg::with_name("config")
.short('c')
.long("config")
.takes_value(true),
)
.get_matches();
EmbassyLogger::init();
let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("failed to initialize runtime");
rt.block_on(inner_main(cfg_path))
};
match res {
Ok(Some(shutdown)) => shutdown.execute(),
Ok(None) => (),
Err(e) => {
eprintln!("{}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -14,7 +14,7 @@ fn log_str_error(action: &str, e: i32) {
} }
} }
fn main() { pub fn main() {
let aliases: Vec<_> = std::env::args().skip(1).collect(); let aliases: Vec<_> = std::env::args().skip(1).collect();
unsafe { unsafe {
let simple_poll = avahi_sys::avahi_simple_poll_new(); let simple_poll = avahi_sys::avahi_simple_poll_new();

View File

@@ -0,0 +1,9 @@
pub fn renamed(old: &str, new: &str) -> ! {
eprintln!("{old} has been renamed to {new}");
std::process::exit(1)
}
pub fn removed(name: &str) -> ! {
eprintln!("{name} has been removed");
std::process::exit(1)
}

59
backend/src/bins/mod.rs Normal file
View File

@@ -0,0 +1,59 @@
use std::path::Path;
#[cfg(feature = "avahi-alias")]
pub mod avahi_alias;
pub mod deprecated;
#[cfg(feature = "cli")]
pub mod start_cli;
#[cfg(feature = "js_engine")]
pub mod start_deno;
#[cfg(feature = "daemon")]
pub mod start_init;
#[cfg(feature = "sdk")]
pub mod start_sdk;
#[cfg(feature = "daemon")]
pub mod startd;
fn select_executable(name: &str) -> Option<fn()> {
match name {
#[cfg(feature = "avahi-alias")]
"avahi-alias" => Some(avahi_alias::main),
#[cfg(feature = "js_engine")]
"start-deno" => Some(start_deno::main),
#[cfg(feature = "cli")]
"start-cli" => Some(start_cli::main),
#[cfg(feature = "sdk")]
"start-sdk" => Some(start_sdk::main),
#[cfg(feature = "daemon")]
"startd" => Some(startd::main),
"embassy-cli" => Some(|| deprecated::renamed("embassy-cli", "start-cli")),
"embassy-sdk" => Some(|| deprecated::renamed("embassy-sdk", "start-sdk")),
"embassyd" => Some(|| deprecated::renamed("embassyd", "startd")),
"embassy-init" => Some(|| deprecated::removed("embassy-init")),
_ => None,
}
}
pub fn startbox() {
let args = std::env::args().take(2).collect::<Vec<_>>();
if let Some(x) = args
.get(0)
.and_then(|s| Path::new(&*s).file_name())
.and_then(|s| s.to_str())
.and_then(|s| select_executable(&s))
{
x()
} else if let Some(x) = args.get(1).and_then(|s| select_executable(&s)) {
x()
} else {
eprintln!(
"unknown executable: {}",
args.get(0)
.filter(|x| &**x != "startbox")
.or_else(|| args.get(1))
.map(|s| s.as_str())
.unwrap_or("N/A")
);
std::process::exit(1);
}
}

View File

@@ -1,21 +1,22 @@
use clap::Arg; use clap::Arg;
use embassy::context::CliContext;
use embassy::util::logger::EmbassyLogger;
use embassy::version::{Current, VersionT};
use embassy::Error;
use rpc_toolkit::run_cli; use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
use serde_json::Value; use serde_json::Value;
use crate::context::CliContext;
use crate::util::logger::EmbassyLogger;
use crate::version::{Current, VersionT};
use crate::Error;
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string(); static ref VERSION_STRING: String = Current::new().semver().to_string();
} }
fn inner_main() -> Result<(), Error> { fn inner_main() -> Result<(), Error> {
run_cli!({ run_cli!({
command: embassy::main_api, command: crate::main_api,
app: app => app app: app => app
.name("Embassy CLI") .name("StartOS CLI")
.version(&**VERSION_STRING) .version(&**VERSION_STRING)
.arg( .arg(
clap::Arg::with_name("config") clap::Arg::with_name("config")
@@ -48,7 +49,7 @@ fn inner_main() -> Result<(), Error> {
Ok(()) Ok(())
} }
fn main() { pub fn main() {
match inner_main() { match inner_main() {
Ok(_) => (), Ok(_) => (),
Err(e) => { Err(e) => {

View File

@@ -0,0 +1,134 @@
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, run_cli, Context};
use serde_json::Value;
use crate::procedure::js_scripts::ExecuteArgs;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::{display_serializable, parse_stdin_deserializable};
use crate::version::{Current, VersionT};
use crate::Error;
lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string();
}
struct DenoContext;
impl Context for DenoContext {}
#[command(subcommands(execute, sandbox))]
fn deno_api() -> Result<(), Error> {
Ok(())
}
#[command(cli_only, display(display_serializable))]
async fn execute(
#[arg(stdin, parse(parse_stdin_deserializable))] arg: ExecuteArgs,
) -> Result<Result<Value, (i32, String)>, Error> {
let ExecuteArgs {
procedure,
directory,
pkg_id,
pkg_version,
name,
volumes,
input,
} = arg;
PackageLogger::init(&pkg_id);
procedure
.execute_impl(&directory, &pkg_id, &pkg_version, name, &volumes, input)
.await
}
#[command(cli_only, display(display_serializable))]
async fn sandbox(
#[arg(stdin, parse(parse_stdin_deserializable))] arg: ExecuteArgs,
) -> Result<Result<Value, (i32, String)>, Error> {
let ExecuteArgs {
procedure,
directory,
pkg_id,
pkg_version,
name,
volumes,
input,
} = arg;
PackageLogger::init(&pkg_id);
procedure
.sandboxed_impl(&directory, &pkg_id, &pkg_version, &volumes, input, name)
.await
}
use tracing::Subscriber;
use tracing_subscriber::util::SubscriberInitExt;
#[derive(Clone)]
struct PackageLogger {}
impl PackageLogger {
fn base_subscriber(id: &PackageId) -> impl Subscriber {
use tracing_error::ErrorLayer;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter};
let filter_layer = EnvFilter::default().add_directive(
format!("{}=warn", std::module_path!().split("::").next().unwrap())
.parse()
.unwrap(),
);
let fmt_layer = fmt::layer().with_writer(std::io::stderr).with_target(true);
let journald_layer = tracing_journald::layer()
.unwrap()
.with_syslog_identifier(format!("{id}.embassy"));
let sub = tracing_subscriber::registry()
.with(filter_layer)
.with(fmt_layer)
.with(journald_layer)
.with(ErrorLayer::default());
sub
}
pub fn init(id: &PackageId) -> Self {
Self::base_subscriber(id).init();
color_eyre::install().unwrap_or_else(|_| tracing::warn!("tracing too many times"));
Self {}
}
}
fn inner_main() -> Result<(), Error> {
run_cli!({
command: deno_api,
app: app => app
.name("StartOS Deno Executor")
.version(&**VERSION_STRING),
context: _m => DenoContext,
exit: |e: RpcError| {
match e.data {
Some(Value::String(s)) => eprintln!("{}: {}", e.message, s),
Some(Value::Object(o)) => if let Some(Value::String(s)) = o.get("details") {
eprintln!("{}: {}", e.message, s);
if let Some(Value::String(s)) = o.get("debug") {
tracing::debug!("{}", s)
}
}
Some(a) => eprintln!("{}: {}", e.message, a),
None => eprintln!("{}", e.message),
}
std::process::exit(e.code);
}
});
Ok(())
}
pub fn main() {
match inner_main() {
Ok(_) => (),
Err(e) => {
eprintln!("{}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -0,0 +1,268 @@
use std::net::{Ipv6Addr, SocketAddr};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use tokio::process::Command;
use tracing::instrument;
use crate::context::rpc::RpcContextConfig;
use crate::context::{DiagnosticContext, InstallContext, SetupContext};
use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::REPAIR_DISK_PATH;
use crate::firmware::update_firmware;
use crate::init::STANDBY_MODE_PATH;
use crate::net::web_server::WebServer;
use crate::shutdown::Shutdown;
use crate::sound::CHIME;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, PLATFORM};
#[instrument(skip_all)]
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if update_firmware().await?.0 {
return Ok(Some(Shutdown {
export_args: None,
restart: true,
}));
}
Command::new("ln")
.arg("-sf")
.arg("/usr/lib/startos/scripts/fake-apt")
.arg("/usr/local/bin/apt")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
Command::new("ln")
.arg("-sf")
.arg("/usr/lib/startos/scripts/fake-apt")
.arg("/usr/local/bin/apt-get")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
Command::new("ln")
.arg("-sf")
.arg("/usr/lib/startos/scripts/fake-apt")
.arg("/usr/local/bin/aptitude")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
Command::new("make-ssl-cert")
.arg("generate-default-snakeoil")
.arg("--force-overwrite")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
if tokio::fs::metadata("/run/live/medium").await.is_ok() {
Command::new("sed")
.arg("-i")
.arg("s/PasswordAuthentication no/PasswordAuthentication yes/g")
.arg("/etc/ssh/sshd_config")
.invoke(crate::ErrorKind::Filesystem)
.await?;
Command::new("systemctl")
.arg("reload")
.arg("ssh")
.invoke(crate::ErrorKind::OpenSsh)
.await?;
let ctx = InstallContext::init(cfg_path).await?;
let server = WebServer::install(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
ctx.shutdown
.subscribe()
.recv()
.await
.expect("context dropped");
server.shutdown().await;
Command::new("reboot")
.invoke(crate::ErrorKind::Unknown)
.await?;
} else if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_err()
{
let ctx = SetupContext::init(cfg_path).await?;
let server = WebServer::setup(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
ctx.shutdown
.subscribe()
.recv()
.await
.expect("context dropped");
server.shutdown().await;
tokio::task::yield_now().await;
if let Err(e) = Command::new("killall")
.arg("firefox-esr")
.invoke(ErrorKind::NotFound)
.await
{
tracing::error!("Failed to kill kiosk: {}", e);
tracing::debug!("{:?}", e);
}
} else {
let cfg = RpcContextConfig::load(cfg_path).await?;
let guid_string = tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?;
let guid = guid_string.trim();
let requires_reboot = crate::disk::main::import(
guid,
cfg.datadir(),
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
RepairStrategy::Aggressive
} else {
RepairStrategy::Preen
},
if guid.ends_with("_UNENC") {
None
} else {
Some(DEFAULT_PASSWORD)
},
)
.await?;
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
tokio::fs::remove_file(REPAIR_DISK_PATH)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
}
if requires_reboot.0 {
crate::disk::main::export(guid, cfg.datadir()).await?;
Command::new("reboot")
.invoke(crate::ErrorKind::Unknown)
.await?;
}
tracing::info!("Loaded Disk");
crate::init::init(&cfg).await?;
}
Ok(None)
}
async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
let script = path.as_ref();
if script.exists() {
match Command::new("/bin/bash").arg(script).spawn() {
Ok(mut c) => {
if let Err(e) = c.wait().await {
tracing::error!("Error Running {}: {}", script.display(), e);
tracing::debug!("{:?}", e);
}
}
Err(e) => {
tracing::error!("Error Running {}: {}", script.display(), e);
tracing::debug!("{:?}", e);
}
}
}
}
#[instrument(skip_all)]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
crate::sound::SHUTDOWN.play().await?;
futures::future::pending::<()>().await;
}
crate::sound::BEP.play().await?;
run_script_if_exists("/media/embassy/config/preinit.sh").await;
let res = match setup_or_init(cfg_path.clone()).await {
Err(e) => {
async move {
tracing::error!("{}", e.source);
tracing::debug!("{}", e.source);
crate::sound::BEETHOVEN.play().await?;
let ctx = DiagnosticContext::init(
cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await
.is_ok()
{
Some(Arc::new(
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await?
.trim()
.to_owned(),
))
} else {
None
},
e,
)
.await?;
let server = WebServer::diagnostic(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();
server.shutdown().await;
Ok(shutdown)
}
.await
}
Ok(s) => Ok(s),
};
run_script_if_exists("/media/embassy/config/postinit.sh").await;
res
}
pub fn main() {
let matches = clap::App::new("start-init")
.arg(
clap::Arg::with_name("config")
.short('c')
.long("config")
.takes_value(true),
)
.get_matches();
let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("failed to initialize runtime");
rt.block_on(inner_main(cfg_path))
};
match res {
Ok(Some(shutdown)) => shutdown.execute(),
Ok(None) => (),
Err(e) => {
eprintln!("{}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -1,20 +1,21 @@
use embassy::context::SdkContext;
use embassy::util::logger::EmbassyLogger;
use embassy::version::{Current, VersionT};
use embassy::Error;
use rpc_toolkit::run_cli; use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError; use rpc_toolkit::yajrc::RpcError;
use serde_json::Value; use serde_json::Value;
use crate::context::SdkContext;
use crate::util::logger::EmbassyLogger;
use crate::version::{Current, VersionT};
use crate::Error;
lazy_static::lazy_static! { lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string(); static ref VERSION_STRING: String = Current::new().semver().to_string();
} }
fn inner_main() -> Result<(), Error> { fn inner_main() -> Result<(), Error> {
run_cli!({ run_cli!({
command: embassy::portable_api, command: crate::portable_api,
app: app => app app: app => app
.name("Embassy SDK") .name("StartOS SDK")
.version(&**VERSION_STRING) .version(&**VERSION_STRING)
.arg( .arg(
clap::Arg::with_name("config") clap::Arg::with_name("config")
@@ -47,7 +48,7 @@ fn inner_main() -> Result<(), Error> {
Ok(()) Ok(())
} }
fn main() { pub fn main() {
match inner_main() { match inner_main() {
Ok(_) => (), Ok(_) => (),
Err(e) => { Err(e) => {

View File

@@ -1,20 +1,22 @@
use std::net::{Ipv6Addr, SocketAddr};
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use embassy::context::{DiagnosticContext, RpcContext};
use embassy::net::web_server::WebServer;
use embassy::shutdown::Shutdown;
use embassy::system::launch_metrics_task;
use embassy::util::logger::EmbassyLogger;
use embassy::{Error, ErrorKind, ResultExt};
use futures::{FutureExt, TryFutureExt}; use futures::{FutureExt, TryFutureExt};
use tokio::signal::unix::signal; use tokio::signal::unix::signal;
use tracing::instrument; use tracing::instrument;
#[instrument] use crate::context::{DiagnosticContext, RpcContext};
use crate::net::web_server::WebServer;
use crate::shutdown::Shutdown;
use crate::system::launch_metrics_task;
use crate::util::logger::EmbassyLogger;
use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip_all)]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> { async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, server, shutdown) = { let (rpc_ctx, server, shutdown) = async {
let rpc_ctx = RpcContext::init( let rpc_ctx = RpcContext::init(
cfg_path, cfg_path,
Arc::new( Arc::new(
@@ -25,8 +27,12 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
), ),
) )
.await?; .await?;
embassy::hostname::sync_hostname(&*rpc_ctx.account.read().await).await?; crate::hostname::sync_hostname(&rpc_ctx.account.read().await.hostname).await?;
let server = WebServer::main(([0, 0, 0, 0], 80).into(), rpc_ctx.clone()).await?; let server = WebServer::main(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
rpc_ctx.clone(),
)
.await?;
let mut shutdown_recv = rpc_ctx.shutdown.subscribe(); let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
@@ -66,7 +72,7 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
.await .await
}); });
embassy::sound::CHIME.play().await?; crate::sound::CHIME.play().await?;
metrics_task metrics_task
.map_err(|e| { .map_err(|e| {
@@ -85,8 +91,9 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
sig_handler.abort(); sig_handler.abort();
(rpc_ctx, server, shutdown) Ok::<_, Error>((rpc_ctx, server, shutdown))
}; }
.await?;
server.shutdown().await; server.shutdown().await;
rpc_ctx.shutdown().await?; rpc_ctx.shutdown().await?;
@@ -95,8 +102,15 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
Ok(shutdown) Ok(shutdown)
} }
fn main() { pub fn main() {
let matches = clap::App::new("embassyd") EmbassyLogger::init();
if !Path::new("/run/embassy/initialized").exists() {
super::start_init::main();
std::fs::write("/run/embassy/initialized", "").unwrap();
}
let matches = clap::App::new("startd")
.arg( .arg(
clap::Arg::with_name("config") clap::Arg::with_name("config")
.short('c') .short('c')
@@ -105,8 +119,6 @@ fn main() {
) )
.get_matches(); .get_matches();
EmbassyLogger::init();
let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned()); let cfg_path = matches.value_of("config").map(|p| Path::new(p).to_owned());
let res = { let res = {
@@ -121,7 +133,7 @@ fn main() {
async { async {
tracing::error!("{}", e.source); tracing::error!("{}", e.source);
tracing::debug!("{:?}", e.source); tracing::debug!("{:?}", e.source);
embassy::sound::BEETHOVEN.play().await?; crate::sound::BEETHOVEN.play().await?;
let ctx = DiagnosticContext::init( let ctx = DiagnosticContext::init(
cfg_path, cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid") if tokio::fs::metadata("/media/embassy/config/disk.guid")
@@ -141,8 +153,11 @@ fn main() {
) )
.await?; .await?;
let server = let server = WebServer::diagnostic(
WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?; SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(),
)
.await?;
let mut shutdown = ctx.shutdown.subscribe(); let mut shutdown = ctx.shutdown.subscribe();

View File

@@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use models::ImageId; use models::ImageId;
use nix::sys::signal::Signal;
use patch_db::HasModel; use patch_db::HasModel;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tracing::instrument; use tracing::instrument;
@@ -10,6 +9,7 @@ use tracing::instrument;
use super::{Config, ConfigSpec}; use super::{Config, ConfigSpec};
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::dependencies::Dependencies; use crate::dependencies::Dependencies;
use crate::prelude::*;
use crate::procedure::docker::DockerContainers; use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName}; use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
@@ -18,7 +18,7 @@ use crate::util::Version;
use crate::volume::Volumes; use crate::volume::Volumes;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
#[derive(Debug, Deserialize, Serialize, HasModel)] #[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct ConfigRes { pub struct ConfigRes {
pub config: Option<Config>, pub config: Option<Config>,
@@ -26,28 +26,29 @@ pub struct ConfigRes {
} }
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)] #[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]
pub struct ConfigActions { pub struct ConfigActions {
pub get: PackageProcedure, pub get: PackageProcedure,
pub set: PackageProcedure, pub set: PackageProcedure,
} }
impl ConfigActions { impl ConfigActions {
#[instrument] #[instrument(skip_all)]
pub fn validate( pub fn validate(
&self, &self,
container: &Option<DockerContainers>, _container: &Option<DockerContainers>,
eos_version: &Version, eos_version: &Version,
volumes: &Volumes, volumes: &Volumes,
image_ids: &BTreeSet<ImageId>, image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> { ) -> Result<(), Error> {
self.get self.get
.validate(container, eos_version, volumes, image_ids, true) .validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Get"))?; .with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Get"))?;
self.set self.set
.validate(container, eos_version, volumes, image_ids, true) .validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Set"))?; .with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Set"))?;
Ok(()) Ok(())
} }
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn get( pub async fn get(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
@@ -71,7 +72,7 @@ impl ConfigActions {
}) })
} }
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn set( pub async fn set(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
@@ -99,7 +100,6 @@ impl ConfigActions {
}) })
})?; })?;
Ok(SetResult { Ok(SetResult {
signal: res.signal,
depends_on: res depends_on: res
.depends_on .depends_on
.into_iter() .into_iter()
@@ -112,9 +112,5 @@ impl ConfigActions {
#[derive(Debug, Deserialize, Serialize)] #[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct SetResult { pub struct SetResult {
#[serde(default)]
#[serde(deserialize_with = "crate::util::serde::deserialize_from_str_opt")]
#[serde(serialize_with = "crate::util::serde::serialize_display_opt")]
pub signal: Option<Signal>,
pub depends_on: BTreeMap<PackageId, BTreeSet<HealthCheckId>>, pub depends_on: BTreeMap<PackageId, BTreeSet<HealthCheckId>>,
} }

View File

@@ -1,28 +1,21 @@
use std::collections::{BTreeMap, BTreeSet}; use std::collections::BTreeMap;
use std::path::PathBuf; use std::path::PathBuf;
use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use futures::future::{BoxFuture, FutureExt};
use indexmap::IndexSet; use indexmap::IndexSet;
use itertools::Itertools; use itertools::Itertools;
use patch_db::{DbHandle, LockReceipt, LockTarget, LockTargetId, LockType, Verifier}; use models::{ErrorKind, OptionExt};
use rand::SeedableRng; use patch_db::value::InternedString;
use patch_db::Value;
use regex::Regex; use regex::Regex;
use rpc_toolkit::command; use rpc_toolkit::command;
use serde_json::Value;
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents}; use crate::prelude::*;
use crate::dependencies::{ use crate::s9pk::manifest::PackageId;
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
BreakTransitiveReceipts, BreakageRes, Dependencies, DependencyConfig, DependencyError,
DependencyErrors, DependencyReceipt, TaggedDependencyError, TryHealReceipts,
};
use crate::install::cleanup::{remove_from_current_dependents_lists, UpdateDependencyReceipts};
use crate::procedure::docker::DockerContainers;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::util::display_none; use crate::util::display_none;
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat}; use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
use crate::Error; use crate::Error;
@@ -34,10 +27,10 @@ pub mod util;
pub use spec::{ConfigSpec, Defaultable}; pub use spec::{ConfigSpec, Defaultable};
use util::NumRange; use util::NumRange;
use self::action::{ConfigActions, ConfigRes}; use self::action::ConfigRes;
use self::spec::{ConfigPointerReceipts, PackagePointerSpec, ValueSpecPointer}; use self::spec::ValueSpecPointer;
pub type Config = serde_json::Map<String, Value>; pub type Config = patch_db::value::InOMap<InternedString, Value>;
pub trait TypeOf { pub trait TypeOf {
fn type_of(&self) -> &'static str; fn type_of(&self) -> &'static str;
} }
@@ -81,7 +74,7 @@ pub struct TimeoutError;
#[derive(Clone, Debug, thiserror::Error)] #[derive(Clone, Debug, thiserror::Error)]
pub struct NoMatchWithPath { pub struct NoMatchWithPath {
pub path: Vec<String>, pub path: Vec<InternedString>,
pub error: MatchError, pub error: MatchError,
} }
impl NoMatchWithPath { impl NoMatchWithPath {
@@ -91,7 +84,7 @@ impl NoMatchWithPath {
error, error,
} }
} }
pub fn prepend(mut self, seg: String) -> Self { pub fn prepend(mut self, seg: InternedString) -> Self {
self.path.push(seg); self.path.push(seg);
self self
} }
@@ -110,9 +103,9 @@ impl From<NoMatchWithPath> for Error {
#[derive(Clone, Debug, thiserror::Error)] #[derive(Clone, Debug, thiserror::Error)]
pub enum MatchError { pub enum MatchError {
#[error("String {0:?} Does Not Match Pattern {1}")] #[error("String {0:?} Does Not Match Pattern {1}")]
Pattern(String, Regex), Pattern(Arc<String>, Regex),
#[error("String {0:?} Is Not In Enum {1:?}")] #[error("String {0:?} Is Not In Enum {1:?}")]
Enum(String, IndexSet<String>), Enum(Arc<String>, IndexSet<String>),
#[error("Field Is Not Nullable")] #[error("Field Is Not Nullable")]
NotNullable, NotNullable,
#[error("Length Mismatch: expected {0}, actual: {1}")] #[error("Length Mismatch: expected {0}, actual: {1}")]
@@ -124,11 +117,11 @@ pub enum MatchError {
#[error("Number Is Not Integral: {0}")] #[error("Number Is Not Integral: {0}")]
NonIntegral(f64), NonIntegral(f64),
#[error("Variant {0:?} Is Not In Union {1:?}")] #[error("Variant {0:?} Is Not In Union {1:?}")]
Union(String, IndexSet<String>), Union(Arc<String>, IndexSet<String>),
#[error("Variant Is Missing Tag {0:?}")] #[error("Variant Is Missing Tag {0:?}")]
MissingTag(String), MissingTag(InternedString),
#[error("Property {0:?} Of Variant {1:?} Conflicts With Union Tag")] #[error("Property {0:?} Of Variant {1:?} Conflicts With Union Tag")]
PropertyMatchesUnionTag(String, String), PropertyMatchesUnionTag(InternedString, String),
#[error("Name of Property {0:?} Conflicts With Map Tag Name")] #[error("Name of Property {0:?} Conflicts With Map Tag Name")]
PropertyNameMatchesMapTag(String), PropertyNameMatchesMapTag(String),
#[error("Pointer Is Invalid: {0}")] #[error("Pointer Is Invalid: {0}")]
@@ -164,57 +157,8 @@ pub fn config(#[arg] id: PackageId) -> Result<PackageId, Error> {
Ok(id) Ok(id)
} }
pub struct ConfigGetReceipts {
manifest_volumes: LockReceipt<crate::volume::Volumes, ()>,
manifest_version: LockReceipt<crate::util::Version, ()>,
manifest_config: LockReceipt<Option<ConfigActions>, ()>,
}
impl ConfigGetReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks, id);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<LockTargetId>,
id: &PackageId,
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
let manifest_version = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.manifest().version())
.make_locker(LockType::Write)
.add_to_keys(locks);
let manifest_volumes = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.manifest().volumes())
.make_locker(LockType::Write)
.add_to_keys(locks);
let manifest_config = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.manifest().config())
.make_locker(LockType::Write)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
manifest_volumes: manifest_volumes.verify(skeleton_key)?,
manifest_version: manifest_version.verify(skeleton_key)?,
manifest_config: manifest_config.verify(skeleton_key)?,
})
}
}
}
#[command(display(display_serializable))] #[command(display(display_serializable))]
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn get( pub async fn get(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[parent_data] id: PackageId, #[parent_data] id: PackageId,
@@ -222,16 +166,21 @@ pub async fn get(
#[arg(long = "format")] #[arg(long = "format")]
format: Option<IoFormat>, format: Option<IoFormat>,
) -> Result<ConfigRes, Error> { ) -> Result<ConfigRes, Error> {
let mut db = ctx.db.handle(); let db = ctx.db.peek().await;
let receipts = ConfigGetReceipts::new(&mut db, &id).await?; let manifest = db
let action = receipts .as_package_data()
.manifest_config .as_idx(&id)
.get(&mut db) .or_not_found(&id)?
.await? .as_installed()
.or_not_found(&id)?
.as_manifest();
let action = manifest
.as_config()
.de()?
.ok_or_else(|| Error::new(eyre!("{} has no config", id), crate::ErrorKind::NotFound))?; .ok_or_else(|| Error::new(eyre!("{} has no config", id), crate::ErrorKind::NotFound))?;
let volumes = receipts.manifest_volumes.get(&mut db).await?; let volumes = manifest.as_volumes().de()?;
let version = receipts.manifest_version.get(&mut db).await?; let version = manifest.as_version().de()?;
action.get(&ctx, &id, &version, &volumes).await action.get(&ctx, &id, &version, &volumes).await
} }
@@ -240,7 +189,7 @@ pub async fn get(
display(display_none), display(display_none),
metadata(sync_db = true) metadata(sync_db = true)
)] )]
#[instrument] #[instrument(skip_all)]
pub fn set( pub fn set(
#[parent_data] id: PackageId, #[parent_data] id: PackageId,
#[allow(unused_variables)] #[allow(unused_variables)]
@@ -252,586 +201,87 @@ pub fn set(
Ok((id, config, timeout.map(|d| *d))) Ok((id, config, timeout.map(|d| *d)))
} }
/// So, the new locking finds all the possible locks and lifts them up into a bundle of locks.
/// Then this bundle will be passed down into the functions that will need to touch the db, and
/// instead of doing the locks down in the system, we have already done the locks and can
/// do the operation on the db.
/// An UnlockedLock has two types, the type of setting and getting from the db, and the second type
/// is the keys that we need to insert on getting/setting because we have included wild cards into the paths.
pub struct ConfigReceipts {
pub dependency_receipt: DependencyReceipt,
pub config_receipts: ConfigPointerReceipts,
pub update_dependency_receipts: UpdateDependencyReceipts,
pub try_heal_receipts: TryHealReceipts,
pub break_transitive_receipts: BreakTransitiveReceipts,
configured: LockReceipt<bool, String>,
config_actions: LockReceipt<ConfigActions, String>,
dependencies: LockReceipt<Dependencies, String>,
volumes: LockReceipt<crate::volume::Volumes, String>,
version: LockReceipt<crate::util::Version, String>,
manifest: LockReceipt<Manifest, String>,
system_pointers: LockReceipt<Vec<spec::SystemPointerSpec>, String>,
pub current_dependents: LockReceipt<CurrentDependents, String>,
pub current_dependencies: LockReceipt<CurrentDependencies, String>,
dependency_errors: LockReceipt<DependencyErrors, String>,
manifest_dependencies_config: LockReceipt<DependencyConfig, (String, String)>,
docker_containers: LockReceipt<DockerContainers, String>,
}
impl ConfigReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
let dependency_receipt = DependencyReceipt::setup(locks);
let config_receipts = ConfigPointerReceipts::setup(locks);
let update_dependency_receipts = UpdateDependencyReceipts::setup(locks);
let break_transitive_receipts = BreakTransitiveReceipts::setup(locks);
let try_heal_receipts = TryHealReceipts::setup(locks);
let configured: LockTarget<bool, String> = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.status().configured())
.make_locker(LockType::Write)
.add_to_keys(locks);
let config_actions = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.and_then(|x| x.manifest().config())
.make_locker(LockType::Read)
.add_to_keys(locks);
let dependencies = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().dependencies())
.make_locker(LockType::Read)
.add_to_keys(locks);
let volumes = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().volumes())
.make_locker(LockType::Read)
.add_to_keys(locks);
let version = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().version())
.make_locker(LockType::Read)
.add_to_keys(locks);
let manifest = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest())
.make_locker(LockType::Read)
.add_to_keys(locks);
let system_pointers = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.system_pointers())
.make_locker(LockType::Write)
.add_to_keys(locks);
let current_dependents = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.current_dependents())
.make_locker(LockType::Write)
.add_to_keys(locks);
let current_dependencies = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.current_dependencies())
.make_locker(LockType::Write)
.add_to_keys(locks);
let dependency_errors = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.status().dependency_errors())
.make_locker(LockType::Write)
.add_to_keys(locks);
let manifest_dependencies_config = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.and_then(|x| x.manifest().dependencies().star().config())
.make_locker(LockType::Write)
.add_to_keys(locks);
let docker_containers = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.and_then(|x| x.manifest().containers())
.make_locker(LockType::Write)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
dependency_receipt: dependency_receipt(skeleton_key)?,
config_receipts: config_receipts(skeleton_key)?,
try_heal_receipts: try_heal_receipts(skeleton_key)?,
break_transitive_receipts: break_transitive_receipts(skeleton_key)?,
update_dependency_receipts: update_dependency_receipts(skeleton_key)?,
configured: configured.verify(skeleton_key)?,
config_actions: config_actions.verify(skeleton_key)?,
dependencies: dependencies.verify(skeleton_key)?,
volumes: volumes.verify(skeleton_key)?,
version: version.verify(skeleton_key)?,
manifest: manifest.verify(skeleton_key)?,
system_pointers: system_pointers.verify(skeleton_key)?,
current_dependents: current_dependents.verify(skeleton_key)?,
current_dependencies: current_dependencies.verify(skeleton_key)?,
dependency_errors: dependency_errors.verify(skeleton_key)?,
manifest_dependencies_config: manifest_dependencies_config.verify(skeleton_key)?,
docker_containers: docker_containers.verify(skeleton_key)?,
})
}
}
}
#[command(rename = "dry", display(display_serializable))] #[command(rename = "dry", display(display_serializable))]
#[instrument(skip(ctx))] #[instrument(skip_all)]
pub async fn set_dry( pub async fn set_dry(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[parent_data] (id, config, timeout): (PackageId, Option<Config>, Option<Duration>), #[parent_data] (id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
) -> Result<BreakageRes, Error> { ) -> Result<BTreeMap<PackageId, String>, Error> {
let mut db = ctx.db.handle(); let breakages = BTreeMap::new();
let mut tx = db.begin().await?; let overrides = Default::default();
let mut breakages = BTreeMap::new();
let locks = ConfigReceipts::new(&mut tx).await?;
configure(
&ctx,
&mut tx,
&id,
config,
&timeout,
true,
&mut BTreeMap::new(),
&mut breakages,
&locks,
)
.await?;
locks.configured.set(&mut tx, true, &id).await?; let configure_context = ConfigureContext {
tx.abort().await?; breakages,
Ok(BreakageRes(breakages)) timeout,
config,
dry_run: true,
overrides,
};
let breakages = configure(&ctx, &id, configure_context).await?;
Ok(breakages)
} }
#[instrument(skip(ctx))] pub struct ConfigureContext {
pub breakages: BTreeMap<PackageId, String>,
pub timeout: Option<Duration>,
pub config: Option<Config>,
pub overrides: BTreeMap<PackageId, Config>,
pub dry_run: bool,
}
#[instrument(skip_all)]
pub async fn set_impl( pub async fn set_impl(
ctx: RpcContext, ctx: RpcContext,
(id, config, timeout): (PackageId, Option<Config>, Option<Duration>), (id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut db = ctx.db.handle(); let breakages = BTreeMap::new();
let mut tx = db.begin().await?; let overrides = Default::default();
let mut breakages = BTreeMap::new();
let locks = ConfigReceipts::new(&mut tx).await?; let configure_context = ConfigureContext {
configure( breakages,
&ctx, timeout,
&mut tx,
&id,
config, config,
&timeout, dry_run: false,
false, overrides,
&mut BTreeMap::new(), };
&mut breakages, configure(&ctx, &id, configure_context).await?;
&locks,
)
.await?;
tx.commit().await?;
Ok(()) Ok(())
} }
#[instrument(skip(ctx, db, receipts))] #[instrument(skip_all)]
pub async fn configure<'a, Db: DbHandle>( pub async fn configure(
ctx: &RpcContext, ctx: &RpcContext,
db: &'a mut Db,
id: &PackageId, id: &PackageId,
config: Option<Config>, configure_context: ConfigureContext,
timeout: &Option<Duration>, ) -> Result<BTreeMap<PackageId, String>, Error> {
dry_run: bool, let db = ctx.db.peek().await;
overrides: &mut BTreeMap<PackageId, Config>, let package = db
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>, .as_package_data()
receipts: &ConfigReceipts, .as_idx(id)
) -> Result<(), Error> { .or_not_found(&id)?
configure_rec( .as_installed()
ctx, db, id, config, timeout, dry_run, overrides, breakages, receipts, .or_not_found(&id)?;
) let version = package.as_manifest().as_version().de()?;
.await?; ctx.managers
receipts.configured.set(db, true, &id).await?; .get(&(id.clone(), version.clone()))
Ok(()) .await
.ok_or_else(|| {
Error::new(
eyre!("There is no manager running for {id:?} and {version:?}"),
ErrorKind::Unknown,
)
})?
.configure(configure_context)
.await
} }
#[instrument(skip(ctx, db, receipts))] macro_rules! not_found {
pub fn configure_rec<'a, Db: DbHandle>( ($x:expr) => {
ctx: &'a RpcContext, crate::Error::new(
db: &'a mut Db, color_eyre::eyre::eyre!("Could not find {} at {}:{}", $x, module_path!(), line!()),
id: &'a PackageId, crate::ErrorKind::Incoherent,
config: Option<Config>,
timeout: &'a Option<Duration>,
dry_run: bool,
overrides: &'a mut BTreeMap<PackageId, Config>,
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
receipts: &'a ConfigReceipts,
) -> BoxFuture<'a, Result<(), Error>> {
async move {
// fetch data from db
let action = receipts
.config_actions
.get(db, id)
.await?
.ok_or_else(not_found)?;
let dependencies = receipts
.dependencies
.get(db, id)
.await?
.ok_or_else(not_found)?;
let volumes = receipts.volumes.get(db, id).await?.ok_or_else(not_found)?;
let is_needs_config = !receipts
.configured
.get(db, id)
.await?
.ok_or_else(not_found)?;
let version = receipts.version.get(db, id).await?.ok_or_else(not_found)?;
// get current config and current spec
let ConfigRes {
config: old_config,
spec,
} = action.get(ctx, id, &version, &volumes).await?;
// determine new config to use
let mut config = if let Some(config) = config.or_else(|| old_config.clone()) {
config
} else {
spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)?
};
let manifest = receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
spec.validate(&manifest)?;
spec.matches(&config)?; // check that new config matches spec
spec.update(
ctx,
db,
&manifest,
&*overrides,
&mut config,
&receipts.config_receipts,
) )
.await?; // dereference pointers in the new config };
// create backreferences to pointers
let mut sys = receipts
.system_pointers
.get(db, &id)
.await?
.ok_or_else(not_found)?;
sys.truncate(0);
let mut current_dependencies: CurrentDependencies = CurrentDependencies(
dependencies
.0
.iter()
.filter_map(|(id, info)| {
if info.requirement.required() {
Some((id.clone(), CurrentDependencyInfo::default()))
} else {
None
}
})
.collect(),
);
for ptr in spec.pointers(&config)? {
match ptr {
ValueSpecPointer::Package(pkg_ptr) => {
if let Some(current_dependency) =
current_dependencies.0.get_mut(pkg_ptr.package_id())
{
current_dependency.pointers.push(pkg_ptr);
} else {
current_dependencies.0.insert(
pkg_ptr.package_id().to_owned(),
CurrentDependencyInfo {
pointers: vec![pkg_ptr],
health_checks: BTreeSet::new(),
},
);
}
}
ValueSpecPointer::System(s) => sys.push(s),
}
}
receipts.system_pointers.set(db, sys, &id).await?;
let signal = if !dry_run {
// run config action
let res = action
.set(ctx, id, &version, &dependencies, &volumes, &config)
.await?;
// track dependencies with no pointers
for (package_id, health_checks) in res.depends_on.into_iter() {
if let Some(current_dependency) = current_dependencies.0.get_mut(&package_id) {
current_dependency.health_checks.extend(health_checks);
} else {
current_dependencies.0.insert(
package_id,
CurrentDependencyInfo {
pointers: Vec::new(),
health_checks,
},
);
}
}
// track dependency health checks
current_dependencies = current_dependencies.map(|x| {
x.into_iter()
.filter(|(dep_id, _)| {
if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) {
tracing::warn!("Illegal dependency specified: {}", dep_id);
false
} else {
true
}
})
.collect()
});
res.signal
} else {
None
};
// update dependencies
let prev_current_dependencies = receipts
.current_dependencies
.get(db, &id)
.await?
.unwrap_or_default();
remove_from_current_dependents_lists(
db,
id,
&prev_current_dependencies,
&receipts.current_dependents,
)
.await?; // remove previous
add_dependent_to_current_dependents_lists(
db,
id,
&current_dependencies,
&receipts.current_dependents,
)
.await?; // add new
current_dependencies.0.remove(id);
receipts
.current_dependencies
.set(db, current_dependencies.clone(), &id)
.await?;
let errs = receipts
.dependency_errors
.get(db, &id)
.await?
.ok_or_else(not_found)?;
tracing::warn!("Dependency Errors: {:?}", errs);
let errs = DependencyErrors::init(
ctx,
db,
&manifest,
&current_dependencies,
&receipts.dependency_receipt.try_heal,
)
.await?;
receipts.dependency_errors.set(db, errs, &id).await?;
// cache current config for dependents
overrides.insert(id.clone(), config.clone());
// handle dependents
let dependents = receipts
.current_dependents
.get(db, id)
.await?
.ok_or_else(not_found)?;
let prev = if is_needs_config { None } else { old_config }
.map(Value::Object)
.unwrap_or_default();
let next = Value::Object(config.clone());
for (dependent, dep_info) in dependents.0.iter().filter(|(dep_id, _)| dep_id != &id) {
let dependent_container = receipts.docker_containers.get(db, &dependent).await?;
let dependent_container = &dependent_container;
// check if config passes dependent check
if let Some(cfg) = receipts
.manifest_dependencies_config
.get(db, (&dependent, &id))
.await?
{
let manifest = receipts
.manifest
.get(db, &dependent)
.await?
.ok_or_else(not_found)?;
if let Err(error) = cfg
.check(
ctx,
dependent_container,
dependent,
&manifest.version,
&manifest.volumes,
id,
&config,
)
.await?
{
let dep_err = DependencyError::ConfigUnsatisfied { error };
break_transitive(
db,
dependent,
id,
dep_err,
breakages,
&receipts.break_transitive_receipts,
)
.await?;
}
// handle backreferences
for ptr in &dep_info.pointers {
if let PackagePointerSpec::Config(cfg_ptr) = ptr {
if cfg_ptr.select(&next) != cfg_ptr.select(&prev) {
if let Err(e) = configure_rec(
ctx, db, dependent, None, timeout, dry_run, overrides, breakages,
receipts,
)
.await
{
if e.kind == crate::ErrorKind::ConfigRulesViolation {
break_transitive(
db,
dependent,
id,
DependencyError::ConfigUnsatisfied {
error: format!("{}", e),
},
breakages,
&receipts.break_transitive_receipts,
)
.await?;
} else {
return Err(e);
}
}
}
}
}
heal_all_dependents_transitive(ctx, db, id, &receipts.dependency_receipt).await?;
}
}
if let Some(signal) = signal {
match ctx.managers.get(&(id.clone(), version.clone())).await {
None => {
// in theory this should never happen, which indicates this function should be moved behind the
// Manager interface
return Err(Error::new(
eyre!("Manager Not Found for package being configured"),
crate::ErrorKind::Incoherent,
));
}
Some(m) => {
m.signal(&signal).await?;
}
}
}
Ok(())
}
.boxed()
}
#[instrument]
pub fn not_found() -> Error {
Error::new(eyre!("Could not find"), crate::ErrorKind::Incoherent)
}
/// We want to have a double check that the paths are what we expect them to be.
/// Found that earlier the paths where not what we expected them to be.
#[tokio::test]
async fn ensure_creation_of_config_paths_makes_sense() {
let mut fake = patch_db::test_utils::NoOpDb();
let config_locks = ConfigReceipts::new(&mut fake).await.unwrap();
assert_eq!(
&format!("{}", config_locks.configured.lock.glob),
"/package-data/*/installed/status/configured"
);
assert_eq!(
&format!("{}", config_locks.config_actions.lock.glob),
"/package-data/*/installed/manifest/config"
);
assert_eq!(
&format!("{}", config_locks.dependencies.lock.glob),
"/package-data/*/installed/manifest/dependencies"
);
assert_eq!(
&format!("{}", config_locks.volumes.lock.glob),
"/package-data/*/installed/manifest/volumes"
);
assert_eq!(
&format!("{}", config_locks.version.lock.glob),
"/package-data/*/installed/manifest/version"
);
assert_eq!(
&format!("{}", config_locks.volumes.lock.glob),
"/package-data/*/installed/manifest/volumes"
);
assert_eq!(
&format!("{}", config_locks.manifest.lock.glob),
"/package-data/*/installed/manifest"
);
assert_eq!(
&format!("{}", config_locks.manifest.lock.glob),
"/package-data/*/installed/manifest"
);
assert_eq!(
&format!("{}", config_locks.system_pointers.lock.glob),
"/package-data/*/installed/system-pointers"
);
assert_eq!(
&format!("{}", config_locks.current_dependents.lock.glob),
"/package-data/*/installed/current-dependents"
);
assert_eq!(
&format!("{}", config_locks.dependency_errors.lock.glob),
"/package-data/*/installed/status/dependency-errors"
);
assert_eq!(
&format!("{}", config_locks.manifest_dependencies_config.lock.glob),
"/package-data/*/installed/manifest/dependencies/*/config"
);
assert_eq!(
&format!("{}", config_locks.system_pointers.lock.glob),
"/package-data/*/installed/system-pointers"
);
} }
pub(crate) use not_found;

View File

@@ -1,4 +1,4 @@
use std::borrow::{Borrow, Cow}; use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet}; use std::collections::{BTreeMap, BTreeSet};
use std::fmt; use std::fmt;
use std::fmt::Debug; use std::fmt::Debug;
@@ -9,15 +9,16 @@ use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use async_trait::async_trait; use async_trait::async_trait;
use imbl::Vector;
use imbl_value::InternedString;
use indexmap::{IndexMap, IndexSet}; use indexmap::{IndexMap, IndexSet};
use itertools::Itertools; use itertools::Itertools;
use jsonpath_lib::Compiled as CompiledJsonPath; use jsonpath_lib::Compiled as CompiledJsonPath;
use patch_db::{DbHandle, LockReceipt, LockType}; use patch_db::value::{Number, Value};
use rand::{CryptoRng, Rng}; use rand::{CryptoRng, Rng};
use regex::Regex; use regex::Regex;
use serde::de::{MapAccess, Visitor}; use serde::de::{MapAccess, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer}; use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::{Number, Value};
use sqlx::PgPool; use sqlx::PgPool;
use super::util::{self, CharSet, NumRange, UniqueBy, STATIC_NULL}; use super::util::{self, CharSet, NumRange, UniqueBy, STATIC_NULL};
@@ -26,8 +27,8 @@ use crate::config::ConfigurationError;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::net::interface::InterfaceId; use crate::net::interface::InterfaceId;
use crate::net::keys::Key; use crate::net::keys::Key;
use crate::prelude::*;
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{Manifest, PackageId};
use crate::Error;
// Config Value Specifications // Config Value Specifications
#[async_trait] #[async_trait]
@@ -39,14 +40,12 @@ pub trait ValueSpec {
// since not all inVariant can be checked by the type // since not all inVariant can be checked by the type
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath>; fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath>;
// update is to fill in values for environment pointers recursively // update is to fill in values for environment pointers recursively
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError>; ) -> Result<(), ConfigurationError>;
// returns all pointers that are live in the provided config // returns all pointers that are live in the provided config
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath>; fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath>;
@@ -106,7 +105,7 @@ where
rng: &mut R, rng: &mut R,
timeout: &Option<Duration>, timeout: &Option<Duration>,
) -> Result<Value, Self::Error> { ) -> Result<Value, Self::Error> {
self.gen_with(self.default_spec().borrow(), rng, timeout) self.gen_with(self.default_spec(), rng, timeout)
} }
} }
@@ -156,17 +155,15 @@ where
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.inner.validate(manifest) self.inner.validate(manifest)
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
self.inner self.inner
.update(ctx, db, manifest, config_overrides, value, receipts) .update(ctx, manifest, config_overrides, value)
.await .await
} }
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> { fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -201,17 +198,15 @@ where
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.inner.validate(manifest) self.inner.validate(manifest)
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
self.inner self.inner
.update(ctx, db, manifest, config_overrides, value, receipts) .update(ctx, manifest, config_overrides, value)
.await .await
} }
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> { fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -279,17 +274,15 @@ where
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.inner.validate(manifest) self.inner.validate(manifest)
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
self.inner self.inner
.update(ctx, db, manifest, config_overrides, value, receipts) .update(ctx, manifest, config_overrides, value)
.await .await
} }
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> { fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -394,48 +387,22 @@ impl ValueSpec for ValueSpecAny {
ValueSpecAny::Pointer(a) => a.validate(manifest), ValueSpecAny::Pointer(a) => a.validate(manifest),
} }
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
match self { match self {
ValueSpecAny::Boolean(a) => { ValueSpecAny::Boolean(a) => a.update(ctx, manifest, config_overrides, value).await,
a.update(ctx, db, manifest, config_overrides, value, receipts) ValueSpecAny::Enum(a) => a.update(ctx, manifest, config_overrides, value).await,
.await ValueSpecAny::List(a) => a.update(ctx, manifest, config_overrides, value).await,
} ValueSpecAny::Number(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecAny::Enum(a) => { ValueSpecAny::Object(a) => a.update(ctx, manifest, config_overrides, value).await,
a.update(ctx, db, manifest, config_overrides, value, receipts) ValueSpecAny::String(a) => a.update(ctx, manifest, config_overrides, value).await,
.await ValueSpecAny::Union(a) => a.update(ctx, manifest, config_overrides, value).await,
} ValueSpecAny::Pointer(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecAny::List(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Number(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Object(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::String(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Union(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecAny::Pointer(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
} }
} }
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> { fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -513,14 +480,12 @@ impl ValueSpec for ValueSpecBoolean {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(()) Ok(())
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
_ctx: &RpcContext, _ctx: &RpcContext,
_db: &mut Db,
_manifest: &Manifest, _manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>, _config_overrides: &BTreeMap<PackageId, Config>,
_value: &mut Value, _value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
Ok(()) Ok(())
} }
@@ -584,7 +549,7 @@ impl ValueSpec for ValueSpecEnum {
fn matches(&self, val: &Value) -> Result<(), NoMatchWithPath> { fn matches(&self, val: &Value) -> Result<(), NoMatchWithPath> {
match val { match val {
Value::String(b) => { Value::String(b) => {
if self.values.contains(b) { if self.values.contains(&**b) {
Ok(()) Ok(())
} else { } else {
Err(NoMatchWithPath::new(MatchError::Enum( Err(NoMatchWithPath::new(MatchError::Enum(
@@ -603,14 +568,12 @@ impl ValueSpec for ValueSpecEnum {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(()) Ok(())
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
_ctx: &RpcContext, _ctx: &RpcContext,
_db: &mut Db,
_manifest: &Manifest, _manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>, _config_overrides: &BTreeMap<PackageId, Config>,
_value: &mut Value, _value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
Ok(()) Ok(())
} }
@@ -628,7 +591,7 @@ impl ValueSpec for ValueSpecEnum {
} }
} }
impl DefaultableWith for ValueSpecEnum { impl DefaultableWith for ValueSpecEnum {
type DefaultSpec = String; type DefaultSpec = Arc<String>;
type Error = crate::util::Never; type Error = crate::util::Never;
fn gen_with<R: Rng + CryptoRng + Sync + Send + Send>( fn gen_with<R: Rng + CryptoRng + Sync + Send + Send>(
@@ -666,13 +629,13 @@ where
.map(|(i, v)| { .map(|(i, v)| {
self.spec self.spec
.matches(v) .matches(v)
.map_err(|e| e.prepend(format!("{}", i)))?; .map_err(|e| e.prepend(InternedString::from_display(&i)))?;
if l.iter() if l.iter()
.enumerate() .enumerate()
.any(|(i2, v2)| i != i2 && self.spec.eq(v, v2)) .any(|(i2, v2)| i != i2 && self.spec.eq(v, v2))
{ {
Err(NoMatchWithPath::new(MatchError::ListUniquenessViolation) Err(NoMatchWithPath::new(MatchError::ListUniquenessViolation)
.prepend(format!("{}", i))) .prepend(InternedString::from_display(&i)))
} else { } else {
Ok(()) Ok(())
} }
@@ -690,25 +653,19 @@ where
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.spec.validate(manifest) self.spec.validate(manifest)
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
if let Value::Array(ref mut ls) = value { if let Value::Array(ref mut ls) = value {
for (i, val) in ls.into_iter().enumerate() { for (i, val) in ls.iter_mut().enumerate() {
match self match self.spec.update(ctx, manifest, config_overrides, val).await {
.spec Err(ConfigurationError::NoMatch(e)) => Err(ConfigurationError::NoMatch(
.update(ctx, db, manifest, config_overrides, val, receipts) e.prepend(InternedString::from_display(&i)),
.await )),
{
Err(ConfigurationError::NoMatch(e)) => {
Err(ConfigurationError::NoMatch(e.prepend(format!("{}", i))))
}
a => a, a => a,
}?; }?;
} }
@@ -755,9 +712,9 @@ where
rng: &mut R, rng: &mut R,
timeout: &Option<Duration>, timeout: &Option<Duration>,
) -> Result<Value, Self::Error> { ) -> Result<Value, Self::Error> {
let mut res = Vec::new(); let mut res = Vector::new();
for spec_member in spec.iter() { for spec_member in spec.iter() {
res.push(self.spec.gen_with(spec_member, rng, timeout)?); res.push_back(self.spec.gen_with(spec_member, rng, timeout)?);
} }
Ok(Value::Array(res)) Ok(Value::Array(res))
} }
@@ -798,36 +755,19 @@ impl ValueSpec for ValueSpecList {
ValueSpecList::Union(a) => a.validate(manifest), ValueSpecList::Union(a) => a.validate(manifest),
} }
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
match self { match self {
ValueSpecList::Enum(a) => { ValueSpecList::Enum(a) => a.update(ctx, manifest, config_overrides, value).await,
a.update(ctx, db, manifest, config_overrides, value, receipts) ValueSpecList::Number(a) => a.update(ctx, manifest, config_overrides, value).await,
.await ValueSpecList::Object(a) => a.update(ctx, manifest, config_overrides, value).await,
} ValueSpecList::String(a) => a.update(ctx, manifest, config_overrides, value).await,
ValueSpecList::Number(a) => { ValueSpecList::Union(a) => a.update(ctx, manifest, config_overrides, value).await,
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecList::Object(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecList::String(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
ValueSpecList::Union(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
} }
} }
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> { fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -885,7 +825,7 @@ impl Defaultable for ValueSpecList {
) )
.contains(&ret.len()) .contains(&ret.len())
{ {
ret.push( ret.push_back(
a.inner a.inner
.inner .inner
.spec .spec
@@ -941,14 +881,12 @@ impl ValueSpec for ValueSpecNumber {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(()) Ok(())
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
_ctx: &RpcContext, _ctx: &RpcContext,
_db: &mut Db,
_manifest: &Manifest, _manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>, _config_overrides: &BTreeMap<PackageId, Config>,
_value: &mut Value, _value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
Ok(()) Ok(())
} }
@@ -1005,19 +943,15 @@ impl ValueSpec for ValueSpecObject {
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
self.spec.validate(manifest) self.spec.validate(manifest)
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
if let Value::Object(o) = value { if let Value::Object(o) = value {
self.spec self.spec.update(ctx, manifest, config_overrides, o).await
.update(ctx, db, manifest, config_overrides, o, receipts)
.await
} else { } else {
Err(ConfigurationError::NoMatch(NoMatchWithPath::new( Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
MatchError::InvalidType("object", value.type_of()), MatchError::InvalidType("object", value.type_of()),
@@ -1074,11 +1008,11 @@ impl Defaultable for ValueSpecObject {
} }
#[derive(Clone, Debug, Default, Serialize, Deserialize)] #[derive(Clone, Debug, Default, Serialize, Deserialize)]
pub struct ConfigSpec(pub IndexMap<String, ValueSpecAny>); pub struct ConfigSpec(pub IndexMap<InternedString, ValueSpecAny>);
impl ConfigSpec { impl ConfigSpec {
pub fn matches(&self, value: &Config) -> Result<(), NoMatchWithPath> { pub fn matches(&self, value: &Config) -> Result<(), NoMatchWithPath> {
for (key, val) in self.0.iter() { for (key, val) in self.0.iter() {
if let Some(v) = value.get(key) { if let Some(v) = value.get(&**key) {
val.matches(v).map_err(|e| e.prepend(key.clone()))?; val.matches(v).map_err(|e| e.prepend(key.clone()))?;
} else { } else {
val.matches(&Value::Null) val.matches(&Value::Null)
@@ -1108,27 +1042,21 @@ impl ConfigSpec {
Ok(()) Ok(())
} }
pub async fn update<Db: DbHandle>( pub async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
cfg: &mut Config, cfg: &mut Config,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
for (k, vs) in self.0.iter() { for (k, vs) in self.0.iter() {
match cfg.get_mut(k) { match cfg.get_mut(k) {
None => { None => {
let mut v = Value::Null; let mut v = Value::Null;
vs.update(ctx, db, manifest, config_overrides, &mut v, receipts) vs.update(ctx, manifest, config_overrides, &mut v).await?;
.await?;
cfg.insert(k.clone(), v); cfg.insert(k.clone(), v);
} }
Some(v) => match vs Some(v) => match vs.update(ctx, manifest, config_overrides, v).await {
.update(ctx, db, manifest, config_overrides, v, receipts)
.await
{
Err(ConfigurationError::NoMatch(e)) => { Err(ConfigurationError::NoMatch(e)) => {
Err(ConfigurationError::NoMatch(e.prepend(k.clone()))) Err(ConfigurationError::NoMatch(e.prepend(k.clone())))
} }
@@ -1247,7 +1175,7 @@ impl<'de> Deserialize<'de> for ValueSpecString {
}) })
} }
} }
const FIELDS: &'static [&'static str] = &[ const FIELDS: &[&str] = &[
"pattern", "pattern",
"pattern-description", "pattern-description",
"textarea", "textarea",
@@ -1268,7 +1196,7 @@ impl ValueSpec for ValueSpecString {
Ok(()) Ok(())
} else { } else {
Err(NoMatchWithPath::new(MatchError::Pattern( Err(NoMatchWithPath::new(MatchError::Pattern(
s.to_owned(), s.clone(),
pattern.pattern.clone(), pattern.pattern.clone(),
))) )))
} }
@@ -1286,14 +1214,12 @@ impl ValueSpec for ValueSpecString {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(()) Ok(())
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
_ctx: &RpcContext, _ctx: &RpcContext,
_db: &mut Db,
_manifest: &Manifest, _manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>, _config_overrides: &BTreeMap<PackageId, Config>,
_value: &mut Value, _value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
Ok(()) Ok(())
} }
@@ -1352,11 +1278,11 @@ pub enum DefaultString {
Entropy(Entropy), Entropy(Entropy),
} }
impl DefaultString { impl DefaultString {
pub fn gen<R: Rng + CryptoRng + Sync + Send>(&self, rng: &mut R) -> String { pub fn gen<R: Rng + CryptoRng + Sync + Send>(&self, rng: &mut R) -> Arc<String> {
match self { Arc::new(match self {
DefaultString::Literal(s) => s.clone(), DefaultString::Literal(s) => s.clone(),
DefaultString::Entropy(e) => e.gen(rng), DefaultString::Entropy(e) => e.gen(rng),
} })
} }
} }
@@ -1380,7 +1306,7 @@ impl Entropy {
#[derive(Clone, Debug, Serialize, Deserialize)] #[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct UnionTag { pub struct UnionTag {
pub id: String, pub id: InternedString,
pub name: String, pub name: String,
pub description: Option<String>, pub description: Option<String>,
pub variant_names: BTreeMap<String, String>, pub variant_names: BTreeMap<String, String>,
@@ -1401,7 +1327,7 @@ impl<'de> serde::de::Deserialize<'de> for ValueSpecUnion {
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
#[serde(untagged)] #[serde(untagged)]
pub enum _UnionTag { pub enum _UnionTag {
Old(String), Old(InternedString),
New(UnionTag), New(UnionTag),
} }
#[derive(Deserialize)] #[derive(Deserialize)]
@@ -1419,7 +1345,7 @@ impl<'de> serde::de::Deserialize<'de> for ValueSpecUnion {
tag: match u.tag { tag: match u.tag {
_UnionTag::Old(id) => UnionTag { _UnionTag::Old(id) => UnionTag {
id: id.clone(), id: id.clone(),
name: id, name: id.to_string(),
description: None, description: None,
variant_names: u variant_names: u
.variants .variants
@@ -1461,10 +1387,10 @@ impl ValueSpec for ValueSpecUnion {
fn matches(&self, value: &Value) -> Result<(), NoMatchWithPath> { fn matches(&self, value: &Value) -> Result<(), NoMatchWithPath> {
match value { match value {
Value::Object(o) => { Value::Object(o) => {
if let Some(Value::String(ref tag)) = o.get(&self.tag.id) { if let Some(Value::String(ref tag)) = o.get(&*self.tag.id) {
if let Some(obj_spec) = self.variants.get(tag) { if let Some(obj_spec) = self.variants.get(&**tag) {
let mut without_tag = o.clone(); let mut without_tag = o.clone();
without_tag.remove(&self.tag.id); without_tag.remove(&*self.tag.id);
obj_spec.matches(&without_tag) obj_spec.matches(&without_tag)
} else { } else {
Err(NoMatchWithPath::new(MatchError::Union( Err(NoMatchWithPath::new(MatchError::Union(
@@ -1487,7 +1413,7 @@ impl ValueSpec for ValueSpecUnion {
} }
fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, manifest: &Manifest) -> Result<(), NoMatchWithPath> {
for (name, variant) in &self.variants { for (name, variant) in &self.variants {
if variant.0.get(&self.tag.id).is_some() { if variant.0.get(&*self.tag.id).is_some() {
return Err(NoMatchWithPath::new(MatchError::PropertyMatchesUnionTag( return Err(NoMatchWithPath::new(MatchError::PropertyMatchesUnionTag(
self.tag.id.clone(), self.tag.id.clone(),
name.clone(), name.clone(),
@@ -1497,28 +1423,23 @@ impl ValueSpec for ValueSpecUnion {
} }
Ok(()) Ok(())
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
if let Value::Object(o) = value { if let Value::Object(o) = value {
match o.get(&self.tag.id) { match o.get(&*self.tag.id) {
None => Err(ConfigurationError::NoMatch(NoMatchWithPath::new( None => Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
MatchError::MissingTag(self.tag.id.clone()), MatchError::MissingTag(self.tag.id.clone()),
))), ))),
Some(Value::String(tag)) => match self.variants.get(tag) { Some(Value::String(tag)) => match self.variants.get(&**tag) {
None => Err(ConfigurationError::NoMatch(NoMatchWithPath::new( None => Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
MatchError::Union(tag.clone(), self.variants.keys().cloned().collect()), MatchError::Union(tag.clone(), self.variants.keys().cloned().collect()),
))), ))),
Some(spec) => { Some(spec) => spec.update(ctx, manifest, config_overrides, o).await,
spec.update(ctx, db, manifest, config_overrides, o, receipts)
.await
}
}, },
Some(other) => Err(ConfigurationError::NoMatch( Some(other) => Err(ConfigurationError::NoMatch(
NoMatchWithPath::new(MatchError::InvalidType("string", other.type_of())) NoMatchWithPath::new(MatchError::InvalidType("string", other.type_of()))
@@ -1533,11 +1454,11 @@ impl ValueSpec for ValueSpecUnion {
} }
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> { fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
if let Value::Object(o) = value { if let Value::Object(o) = value {
match o.get(&self.tag.id) { match o.get(&*self.tag.id) {
None => Err(NoMatchWithPath::new(MatchError::MissingTag( None => Err(NoMatchWithPath::new(MatchError::MissingTag(
self.tag.id.clone(), self.tag.id.clone(),
))), ))),
Some(Value::String(tag)) => match self.variants.get(tag) { Some(Value::String(tag)) => match self.variants.get(&**tag) {
None => Err(NoMatchWithPath::new(MatchError::Union( None => Err(NoMatchWithPath::new(MatchError::Union(
tag.clone(), tag.clone(),
self.variants.keys().cloned().collect(), self.variants.keys().cloned().collect(),
@@ -1559,8 +1480,8 @@ impl ValueSpec for ValueSpecUnion {
} }
fn requires(&self, id: &PackageId, value: &Value) -> bool { fn requires(&self, id: &PackageId, value: &Value) -> bool {
if let Value::Object(o) = value { if let Value::Object(o) = value {
match o.get(&self.tag.id) { match o.get(&*self.tag.id) {
Some(Value::String(tag)) => match self.variants.get(tag) { Some(Value::String(tag)) => match self.variants.get(&**tag) {
None => false, None => false,
Some(spec) => spec.requires(id, o), Some(spec) => spec.requires(id, o),
}, },
@@ -1578,7 +1499,7 @@ impl ValueSpec for ValueSpecUnion {
} }
} }
impl DefaultableWith for ValueSpecUnion { impl DefaultableWith for ValueSpecUnion {
type DefaultSpec = String; type DefaultSpec = Arc<String>;
type Error = ConfigurationError; type Error = ConfigurationError;
fn gen_with<R: Rng + CryptoRng + Sync + Send>( fn gen_with<R: Rng + CryptoRng + Sync + Send>(
@@ -1587,7 +1508,7 @@ impl DefaultableWith for ValueSpecUnion {
rng: &mut R, rng: &mut R,
timeout: &Option<Duration>, timeout: &Option<Duration>,
) -> Result<Value, Self::Error> { ) -> Result<Value, Self::Error> {
let variant = if let Some(v) = self.variants.get(spec) { let variant = if let Some(v) = self.variants.get(&**spec) {
v v
} else { } else {
return Err(ConfigurationError::NoMatch(NoMatchWithPath::new( return Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
@@ -1643,24 +1564,16 @@ impl ValueSpec for ValueSpecPointer {
ValueSpecPointer::System(a) => a.validate(manifest), ValueSpecPointer::System(a) => a.validate(manifest),
} }
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
match self { match self {
ValueSpecPointer::Package(a) => { ValueSpecPointer::Package(a) => a.update(ctx, manifest, config_overrides, value).await,
a.update(ctx, db, manifest, config_overrides, value, receipts) ValueSpecPointer::System(a) => a.update(ctx, manifest, config_overrides, value).await,
.await
}
ValueSpecPointer::System(a) => {
a.update(ctx, db, manifest, config_overrides, value, receipts)
.await
}
} }
} }
fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> { fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -1697,23 +1610,17 @@ impl PackagePointerSpec {
PackagePointerSpec::Config(ConfigPointer { package_id, .. }) => package_id, PackagePointerSpec::Config(ConfigPointer { package_id, .. }) => package_id,
} }
} }
async fn deref<Db: DbHandle>( async fn deref(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
receipts: &ConfigPointerReceipts,
) -> Result<Value, ConfigurationError> { ) -> Result<Value, ConfigurationError> {
match &self { match &self {
PackagePointerSpec::TorKey(key) => key.deref(&manifest.id, &ctx.secret_store).await, PackagePointerSpec::TorKey(key) => key.deref(&manifest.id, &ctx.secret_store).await,
PackagePointerSpec::TorAddress(tor) => { PackagePointerSpec::TorAddress(tor) => tor.deref(ctx).await,
tor.deref(db, &receipts.interface_addresses_receipt).await PackagePointerSpec::LanAddress(lan) => lan.deref(ctx).await,
} PackagePointerSpec::Config(cfg) => cfg.deref(ctx, config_overrides).await,
PackagePointerSpec::LanAddress(lan) => {
lan.deref(db, &receipts.interface_addresses_receipt).await
}
PackagePointerSpec::Config(cfg) => cfg.deref(ctx, db, config_overrides, receipts).await,
} }
} }
} }
@@ -1754,18 +1661,14 @@ impl ValueSpec for PackagePointerSpec {
_ => Ok(()), _ => Ok(()),
} }
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
manifest: &Manifest, manifest: &Manifest,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
*value = self *value = self.deref(ctx, manifest, config_overrides).await?;
.deref(ctx, db, manifest, config_overrides, receipts)
.await?;
Ok(()) Ok(())
} }
fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> { fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
@@ -1788,18 +1691,19 @@ pub struct TorAddressPointer {
interface: InterfaceId, interface: InterfaceId,
} }
impl TorAddressPointer { impl TorAddressPointer {
async fn deref<Db: DbHandle>( async fn deref(&self, ctx: &RpcContext) -> Result<Value, ConfigurationError> {
&self, let addr = ctx
db: &mut Db, .db
receipt: &InterfaceAddressesReceipt, .peek()
) -> Result<Value, ConfigurationError> {
let addr = receipt
.interface_addresses
.get(db, (&self.package_id, &self.interface))
.await .await
.map_err(|e| ConfigurationError::SystemError(Error::from(e)))? .as_package_data()
.and_then(|addresses| addresses.tor_address); .as_idx(&self.package_id)
Ok(addr.to_owned().map(Value::String).unwrap_or(Value::Null)) .and_then(|pde| pde.as_installed())
.and_then(|i| i.as_interface_addresses().as_idx(&self.interface))
.and_then(|a| a.as_tor_address().de().transpose())
.transpose()
.map_err(|e| ConfigurationError::SystemError(e))?;
Ok(addr.map(Arc::new).map(Value::String).unwrap_or(Value::Null))
} }
} }
impl fmt::Display for TorAddressPointer { impl fmt::Display for TorAddressPointer {
@@ -1813,39 +1717,6 @@ impl fmt::Display for TorAddressPointer {
} }
} }
pub struct InterfaceAddressesReceipt {
interface_addresses: LockReceipt<crate::db::model::InterfaceAddresses, (String, String)>,
}
impl InterfaceAddressesReceipt {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
// let cleanup_receipts = CleanupFailedReceipts::setup(locks);
let interface_addresses = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.interface_addresses().star())
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
// cleanup_receipts: cleanup_receipts(skeleton_key)?,
interface_addresses: interface_addresses.verify(skeleton_key)?,
})
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct LanAddressPointer { pub struct LanAddressPointer {
@@ -1862,73 +1733,26 @@ impl fmt::Display for LanAddressPointer {
} }
} }
impl LanAddressPointer { impl LanAddressPointer {
async fn deref<Db: DbHandle>( async fn deref(&self, ctx: &RpcContext) -> Result<Value, ConfigurationError> {
&self, let addr = ctx
db: &mut Db, .db
receipts: &InterfaceAddressesReceipt, .peek()
) -> Result<Value, ConfigurationError> {
let addr = receipts
.interface_addresses
.get(db, (&self.package_id, &self.interface))
.await .await
.ok() .as_package_data()
.flatten() .as_idx(&self.package_id)
.and_then(|x| x.lan_address); .and_then(|pde| pde.as_installed())
Ok(addr.to_owned().map(Value::String).unwrap_or(Value::Null)) .and_then(|i| i.as_interface_addresses().as_idx(&self.interface))
.and_then(|a| a.as_lan_address().de().transpose())
.transpose()
.map_err(|e| ConfigurationError::SystemError(e))?;
Ok(addr
.to_owned()
.map(Arc::new)
.map(Value::String)
.unwrap_or(Value::Null))
} }
} }
pub struct ConfigPointerReceipts {
interface_addresses_receipt: InterfaceAddressesReceipt,
manifest_volumes: LockReceipt<crate::volume::Volumes, String>,
manifest_version: LockReceipt<crate::util::Version, String>,
config_actions: LockReceipt<super::action::ConfigActions, String>,
}
impl ConfigPointerReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let interface_addresses_receipt = InterfaceAddressesReceipt::setup(locks);
let manifest_volumes = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().volumes())
.make_locker(LockType::Read)
.add_to_keys(locks);
let manifest_version = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.map(|x| x.manifest().version())
.make_locker(LockType::Read)
.add_to_keys(locks);
let config_actions = crate::db::DatabaseModel::new()
.package_data()
.star()
.installed()
.and_then(|x| x.manifest().config())
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
interface_addresses_receipt: interface_addresses_receipt(skeleton_key)?,
manifest_volumes: manifest_volumes.verify(skeleton_key)?,
config_actions: config_actions.verify(skeleton_key)?,
manifest_version: manifest_version.verify(skeleton_key)?,
})
}
}
}
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct ConfigPointer { pub struct ConfigPointer {
@@ -1940,25 +1764,34 @@ impl ConfigPointer {
pub fn select(&self, val: &Value) -> Value { pub fn select(&self, val: &Value) -> Value {
self.selector.select(self.multi, val) self.selector.select(self.multi, val)
} }
async fn deref<Db: DbHandle>( async fn deref(
&self, &self,
ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
config_overrides: &BTreeMap<PackageId, Config>, config_overrides: &BTreeMap<PackageId, Config>,
receipts: &ConfigPointerReceipts,
) -> Result<Value, ConfigurationError> { ) -> Result<Value, ConfigurationError> {
if let Some(cfg) = config_overrides.get(&self.package_id) { if let Some(cfg) = config_overrides.get(&self.package_id) {
Ok(self.select(&Value::Object(cfg.clone()))) Ok(self.select(&Value::Object(cfg.clone())))
} else { } else {
let id = &self.package_id; let id = &self.package_id;
let version = receipts.manifest_version.get(db, id).await.ok().flatten(); let db = ctx.db.peek().await;
let cfg_actions = receipts.config_actions.get(db, id).await.ok().flatten(); let manifest = db.as_package_data().as_idx(id).map(|pde| pde.as_manifest());
let volumes = receipts.manifest_volumes.get(db, id).await.ok().flatten(); let cfg_actions = manifest.and_then(|m| m.as_config().transpose_ref());
if let (Some(version), Some(cfg_actions), Some(volumes)) = if let (Some(manifest), Some(cfg_actions)) = (manifest, cfg_actions) {
(&version, &cfg_actions, &volumes)
{
let cfg_res = cfg_actions let cfg_res = cfg_actions
.get(ctx, &self.package_id, version, volumes) .de()
.map_err(|e| ConfigurationError::SystemError(e))?
.get(
ctx,
&self.package_id,
&manifest
.as_version()
.de()
.map_err(|e| ConfigurationError::SystemError(e))?,
&manifest
.as_volumes()
.de()
.map_err(|e| ConfigurationError::SystemError(e))?,
)
.await .await
.map_err(|e| ConfigurationError::SystemError(e))?; .map_err(|e| ConfigurationError::SystemError(e))?;
if let Some(cfg) = cfg_res.config { if let Some(cfg) = cfg_res.config {
@@ -1990,7 +1823,7 @@ pub struct ConfigSelector {
} }
impl ConfigSelector { impl ConfigSelector {
fn select(&self, multi: bool, val: &Value) -> Value { fn select(&self, multi: bool, val: &Value) -> Value {
let selected = self.compiled.select(&val).ok().unwrap_or_else(Vec::new); let selected = self.compiled.select(&val).ok().unwrap_or_else(Vector::new);
if multi { if multi {
Value::Array(selected.into_iter().cloned().collect()) Value::Array(selected.into_iter().cloned().collect())
} else { } else {
@@ -2061,18 +1894,19 @@ impl TorKeyPointer {
)); ));
} }
let key = Key::for_interface( let key = Key::for_interface(
&mut secrets secrets
.acquire() .acquire()
.await .await
.map_err(|e| ConfigurationError::SystemError(e.into()))?, .map_err(|e| ConfigurationError::SystemError(e.into()))?
.as_mut(),
Some((self.package_id.clone(), self.interface.clone())), Some((self.package_id.clone(), self.interface.clone())),
) )
.await .await
.map_err(ConfigurationError::SystemError)?; .map_err(ConfigurationError::SystemError)?;
Ok(Value::String(base32::encode( Ok(Value::String(Arc::new(base32::encode(
base32::Alphabet::RFC4648 { padding: false }, base32::Alphabet::RFC4648 { padding: false },
&key.tor_key().as_bytes(), &key.tor_key().as_bytes(),
))) ))))
} }
} }
impl fmt::Display for TorKeyPointer { impl fmt::Display for TorKeyPointer {
@@ -2092,7 +1926,7 @@ impl fmt::Display for SystemPointerSpec {
} }
} }
impl SystemPointerSpec { impl SystemPointerSpec {
async fn deref<Db: DbHandle>(&self, _db: &mut Db) -> Result<Value, ConfigurationError> { async fn deref(&self, _ctx: &RpcContext) -> Result<Value, ConfigurationError> {
#[allow(unreachable_code)] #[allow(unreachable_code)]
Ok(match *self {}) Ok(match *self {})
} }
@@ -2115,17 +1949,14 @@ impl ValueSpec for SystemPointerSpec {
fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> { fn validate(&self, _manifest: &Manifest) -> Result<(), NoMatchWithPath> {
Ok(()) Ok(())
} }
async fn update<Db: DbHandle>( async fn update(
&self, &self,
_ctx: &RpcContext, ctx: &RpcContext,
db: &mut Db,
_manifest: &Manifest, _manifest: &Manifest,
_config_overrides: &BTreeMap<PackageId, Config>, _config_overrides: &BTreeMap<PackageId, Config>,
value: &mut Value, value: &mut Value,
_receipts: &ConfigPointerReceipts,
) -> Result<(), ConfigurationError> { ) -> Result<(), ConfigurationError> {
*value = self.deref(db).await?; *value = self.deref(ctx).await?;
Ok(()) Ok(())
} }
fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> { fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {

View File

@@ -1,9 +1,9 @@
use std::borrow::Cow; use std::borrow::Cow;
use std::ops::{Bound, RangeBounds, RangeInclusive}; use std::ops::{Bound, RangeBounds, RangeInclusive};
use patch_db::Value;
use rand::distributions::Distribution; use rand::distributions::Distribution;
use rand::Rng; use rand::Rng;
use serde_json::Value;
use super::Config; use super::Config;
@@ -321,7 +321,7 @@ impl UniqueBy {
match self { match self {
UniqueBy::Any(any) => any.iter().any(|u| u.eq(lhs, rhs)), UniqueBy::Any(any) => any.iter().any(|u| u.eq(lhs, rhs)),
UniqueBy::All(all) => all.iter().all(|u| u.eq(lhs, rhs)), UniqueBy::All(all) => all.iter().all(|u| u.eq(lhs, rhs)),
UniqueBy::Exactly(key) => lhs.get(key) == rhs.get(key), UniqueBy::Exactly(key) => lhs.get(&**key) == rhs.get(&**key),
UniqueBy::NotUnique => false, UniqueBy::NotUnique => false,
} }
} }

View File

@@ -6,8 +6,7 @@ use std::sync::Arc;
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use cookie::Cookie; use cookie_store::{CookieStore, RawCookie};
use cookie_store::CookieStore;
use josekit::jwk::Jwk; use josekit::jwk::Jwk;
use reqwest::Proxy; use reqwest::Proxy;
use reqwest_cookie_store::CookieStoreMutex; use reqwest_cookie_store::CookieStoreMutex;
@@ -17,12 +16,11 @@ use rpc_toolkit::Context;
use serde::Deserialize; use serde::Deserialize;
use tracing::instrument; use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH; use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::util::config::{load_config_from_paths, local_config_path}; use crate::util::config::{load_config_from_paths, local_config_path};
use crate::ResultExt; use crate::ResultExt;
use super::setup::CURRENT_SECRET;
#[derive(Debug, Default, Deserialize)] #[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct CliContextConfig { pub struct CliContextConfig {
@@ -54,7 +52,8 @@ impl Drop for CliContextSeed {
true, true,
) )
.unwrap(); .unwrap();
let store = self.cookie_store.lock().unwrap(); let mut store = self.cookie_store.lock().unwrap();
store.remove("localhost", "", "local");
store.save_json(&mut *writer).unwrap(); store.save_json(&mut *writer).unwrap();
writer.sync_all().unwrap(); writer.sync_all().unwrap();
std::fs::rename(tmp, &self.cookie_path).unwrap(); std::fs::rename(tmp, &self.cookie_path).unwrap();
@@ -68,7 +67,7 @@ const DEFAULT_PORT: u16 = 5959;
pub struct CliContext(Arc<CliContextSeed>); pub struct CliContext(Arc<CliContextSeed>);
impl CliContext { impl CliContext {
/// BLOCKING /// BLOCKING
#[instrument(skip(matches))] #[instrument(skip_all)]
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> { pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
let local_config_path = local_config_path(); let local_config_path = local_config_path();
let base: CliContextConfig = load_config_from_paths( let base: CliContextConfig = load_config_from_paths(
@@ -101,19 +100,25 @@ impl CliContext {
.unwrap_or(Path::new("/")) .unwrap_or(Path::new("/"))
.join(".cookies.json") .join(".cookies.json")
}); });
let cookie_store = Arc::new(CookieStoreMutex::new(if cookie_path.exists() { let cookie_store = Arc::new(CookieStoreMutex::new({
let mut store = CookieStore::load_json(BufReader::new(File::open(&cookie_path)?)) let mut store = if cookie_path.exists() {
.map_err(|e| eyre!("{}", e)) CookieStore::load_json(BufReader::new(File::open(&cookie_path)?))
.with_kind(crate::ErrorKind::Deserialization)?; .map_err(|e| eyre!("{}", e))
.with_kind(crate::ErrorKind::Deserialization)?
} else {
CookieStore::default()
};
if let Ok(local) = std::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH) { if let Ok(local) = std::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH) {
store store
.insert_raw(&Cookie::new("local", local), &"http://localhost".parse()?) .insert_raw(
&RawCookie::new("local", local),
&"http://localhost".parse()?,
)
.with_kind(crate::ErrorKind::Network)?; .with_kind(crate::ErrorKind::Network)?;
} }
store store
} else {
CookieStore::default()
})); }));
Ok(CliContext(Arc::new(CliContextSeed { Ok(CliContext(Arc::new(CliContextSeed {
base_url: url.clone(), base_url: url.clone(),
rpc_url: { rpc_url: {

View File

@@ -18,7 +18,7 @@ pub struct DiagnosticContextConfig {
pub datadir: Option<PathBuf>, pub datadir: Option<PathBuf>,
} }
impl DiagnosticContextConfig { impl DiagnosticContextConfig {
#[instrument(skip(path))] #[instrument(skip_all)]
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {
load_config_from_paths( load_config_from_paths(
@@ -52,7 +52,7 @@ pub struct DiagnosticContextSeed {
#[derive(Clone)] #[derive(Clone)]
pub struct DiagnosticContext(Arc<DiagnosticContextSeed>); pub struct DiagnosticContext(Arc<DiagnosticContextSeed>);
impl DiagnosticContext { impl DiagnosticContext {
#[instrument(skip(path))] #[instrument(skip_all)]
pub async fn init<P: AsRef<Path> + Send + 'static>( pub async fn init<P: AsRef<Path> + Send + 'static>(
path: Option<P>, path: Option<P>,
disk_guid: Option<Arc<String>>, disk_guid: Option<Arc<String>>,

View File

@@ -15,7 +15,7 @@ use crate::Error;
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct InstallContextConfig {} pub struct InstallContextConfig {}
impl InstallContextConfig { impl InstallContextConfig {
#[instrument(skip(path))] #[instrument(skip_all)]
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {
load_config_from_paths( load_config_from_paths(
@@ -38,7 +38,7 @@ pub struct InstallContextSeed {
#[derive(Clone)] #[derive(Clone)]
pub struct InstallContext(Arc<InstallContextSeed>); pub struct InstallContext(Arc<InstallContextSeed>);
impl InstallContext { impl InstallContext {
#[instrument(skip(path))] #[instrument(skip_all)]
pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let _cfg = InstallContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?; let _cfg = InstallContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?;
let (shutdown, _) = tokio::sync::broadcast::channel(1); let (shutdown, _) = tokio::sync::broadcast::channel(1);

View File

@@ -4,40 +4,42 @@ use std::ops::Deref;
use std::path::{Path, PathBuf}; use std::path::{Path, PathBuf};
use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration;
use bollard::Docker;
use helpers::to_tmp_path; use helpers::to_tmp_path;
use josekit::jwk::Jwk; use josekit::jwk::Jwk;
use patch_db::json_ptr::JsonPointer; use patch_db::json_ptr::JsonPointer;
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb}; use patch_db::PatchDb;
use reqwest::Url; use reqwest::{Client, Proxy, Url};
use rpc_toolkit::Context; use rpc_toolkit::Context;
use serde::Deserialize; use serde::Deserialize;
use sqlx::postgres::PgConnectOptions; use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool; use sqlx::PgPool;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock}; use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tokio::time::Instant;
use tracing::instrument; use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation}; use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry}; use crate::db::model::{CurrentDependents, Database, PackageDataEntryMatchModelRef};
use crate::db::prelude::PatchDbExt;
use crate::dependencies::compute_dependency_config_errs;
use crate::disk::OsPartitionInfo; use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader}; use crate::init::init_postgres;
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts}; use crate::install::cleanup::{cleanup_failed, uninstall};
use crate::manager::ManagerMap; use crate::manager::ManagerMap;
use crate::middleware::auth::HashSessionToken; use crate::middleware::auth::HashSessionToken;
use crate::net::net_controller::NetController; use crate::net::net_controller::NetController;
use crate::net::ssl::SslManager; use crate::net::ssl::{root_ca_start_time, SslManager};
use crate::net::wifi::WpaCli; use crate::net::wifi::WpaCli;
use crate::notifications::NotificationManager; use crate::notifications::NotificationManager;
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::status::{MainStatus, Status}; use crate::status::MainStatus;
use crate::system::get_mem_info;
use crate::util::config::load_config_from_paths; use crate::util::config::load_config_from_paths;
use crate::util::lshw::{lshw, LshwDevice};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
use super::setup::CURRENT_SECRET;
#[derive(Debug, Default, Deserialize)] #[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
pub struct RpcContextConfig { pub struct RpcContextConfig {
@@ -86,7 +88,7 @@ impl RpcContextConfig {
} }
Ok(db) Ok(db)
} }
#[instrument] #[instrument(skip_all)]
pub async fn secret_store(&self) -> Result<PgPool, Error> { pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(self.datadir()).await?; init_postgres(self.datadir()).await?;
let secret_store = let secret_store =
@@ -96,15 +98,6 @@ impl RpcContextConfig {
.run(&secret_store) .run(&secret_store)
.await .await
.with_kind(crate::ErrorKind::Database)?; .with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir().join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows.unwrap_or(25000),
self.migration_prefetch_rows.unwrap_or(100_000),
)
.await?;
}
Ok(secret_store) Ok(secret_store)
} }
} }
@@ -119,7 +112,6 @@ pub struct RpcContextSeed {
pub db: PatchDb, pub db: PatchDb,
pub secret_store: PgPool, pub secret_store: PgPool,
pub account: RwLock<AccountInfo>, pub account: RwLock<AccountInfo>,
pub docker: Docker,
pub net_controller: Arc<NetController>, pub net_controller: Arc<NetController>,
pub managers: ManagerMap, pub managers: ManagerMap,
pub metrics_cache: RwLock<Option<crate::system::Metrics>>, pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
@@ -130,51 +122,21 @@ pub struct RpcContextSeed {
pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>, pub rpc_stream_continuations: Mutex<BTreeMap<RequestGuid, RpcContinuation>>,
pub wifi_manager: Option<Arc<RwLock<WpaCli>>>, pub wifi_manager: Option<Arc<RwLock<WpaCli>>>,
pub current_secret: Arc<Jwk>, pub current_secret: Arc<Jwk>,
pub client: Client,
pub hardware: Hardware,
pub start_time: Instant,
} }
pub struct RpcCleanReceipts { pub struct Hardware {
cleanup_receipts: CleanupFailedReceipts, pub devices: Vec<LshwDevice>,
packages: LockReceipt<crate::db::model::AllPackageData, ()>, pub ram: u64,
package: LockReceipt<crate::db::model::PackageDataEntry, String>,
}
impl RpcCleanReceipts {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let cleanup_receipts = CleanupFailedReceipts::setup(locks);
let packages = crate::db::DatabaseModel::new()
.package_data()
.make_locker(LockType::Write)
.add_to_keys(locks);
let package = crate::db::DatabaseModel::new()
.package_data()
.star()
.make_locker(LockType::Write)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
cleanup_receipts: cleanup_receipts(skeleton_key)?,
packages: packages.verify(skeleton_key)?,
package: package.verify(skeleton_key)?,
})
}
}
} }
#[derive(Clone)] #[derive(Clone)]
pub struct RpcContext(Arc<RpcContextSeed>); pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext { impl RpcContext {
#[instrument(skip(cfg_path))] #[instrument(skip_all)]
pub async fn init<P: AsRef<Path> + Send + 'static>( pub async fn init<P: AsRef<Path> + Send + Sync + 'static>(
cfg_path: Option<P>, cfg_path: Option<P>,
disk_guid: Arc<String>, disk_guid: Arc<String>,
) -> Result<Self, Error> { ) -> Result<Self, Error> {
@@ -190,18 +152,15 @@ impl RpcContext {
let account = AccountInfo::load(&secret_store).await?; let account = AccountInfo::load(&secret_store).await?;
let db = base.db(&account).await?; let db = base.db(&account).await?;
tracing::info!("Opened PatchDB"); tracing::info!("Opened PatchDB");
let mut docker = Docker::connect_with_unix_defaults()?;
docker.set_timeout(Duration::from_secs(600));
tracing::info!("Connected to Docker");
let net_controller = Arc::new( let net_controller = Arc::new(
NetController::init( NetController::init(
base.tor_control base.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))), .unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
base.dns_bind base.dns_bind
.as_ref() .as_deref()
.map(|v| v.as_slice())
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]), .unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
SslManager::new(&account)?, SslManager::new(&account, root_ca_start_time().await?)?,
&account.hostname, &account.hostname,
&account.key, &account.key,
) )
@@ -209,9 +168,12 @@ impl RpcContext {
); );
tracing::info!("Initialized Net Controller"); tracing::info!("Initialized Net Controller");
let managers = ManagerMap::default(); let managers = ManagerMap::default();
let metrics_cache = RwLock::new(None); let metrics_cache = RwLock::<Option<crate::system::Metrics>>::new(None);
let notification_manager = NotificationManager::new(secret_store.clone()); let notification_manager = NotificationManager::new(secret_store.clone());
tracing::info!("Initialized Notification Manager"); tracing::info!("Initialized Notification Manager");
let tor_proxy_url = format!("socks5h://{tor_proxy}");
let devices = lshw().await?;
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
let seed = Arc::new(RpcContextSeed { let seed = Arc::new(RpcContextSeed {
is_closed: AtomicBool::new(false), is_closed: AtomicBool::new(false),
datadir: base.datadir().to_path_buf(), datadir: base.datadir().to_path_buf(),
@@ -222,7 +184,6 @@ impl RpcContext {
db, db,
secret_store, secret_store,
account: RwLock::new(account), account: RwLock::new(account),
docker,
net_controller, net_controller,
managers, managers,
metrics_cache, metrics_cache,
@@ -244,23 +205,27 @@ impl RpcContext {
) )
})?, })?,
), ),
client: Client::builder()
.proxy(Proxy::custom(move |url| {
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {
Some(tor_proxy_url.clone())
} else {
None
}
}))
.build()
.with_kind(crate::ErrorKind::ParseUrl)?,
hardware: Hardware { devices, ram },
start_time: Instant::now(),
}); });
let res = Self(seed); let res = Self(seed.clone());
res.cleanup().await?; res.cleanup_and_initialize().await?;
tracing::info!("Cleaned up transient states"); tracing::info!("Cleaned up transient states");
res.managers
.init(
&res,
&mut res.db.handle(),
&mut res.secret_store.acquire().await?,
)
.await?;
tracing::info!("Initialized Package Managers");
Ok(res) Ok(res)
} }
#[instrument(skip(self))] #[instrument(skip_all)]
pub async fn shutdown(self) -> Result<(), Error> { pub async fn shutdown(self) -> Result<(), Error> {
self.managers.empty().await?; self.managers.empty().await?;
self.secret_store.close().await; self.secret_store.close().await;
@@ -271,84 +236,153 @@ impl RpcContext {
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn cleanup(&self) -> Result<(), Error> { pub async fn cleanup_and_initialize(&self) -> Result<(), Error> {
let mut db = self.db.handle(); self.db
let receipts = RpcCleanReceipts::new(&mut db).await?; .mutate(|f| {
for (package_id, package) in receipts.packages.get(&mut db).await?.0 { let mut current_dependents = f
if let Err(e) = async { .as_package_data()
match package { .keys()?
PackageDataEntry::Installing { .. } .into_iter()
| PackageDataEntry::Restoring { .. } .map(|k| (k.clone(), BTreeMap::new()))
| PackageDataEntry::Updating { .. } => { .collect::<BTreeMap<_, _>>();
cleanup_failed(self, &mut db, &package_id, &receipts.cleanup_receipts) for (package_id, package) in f.as_package_data_mut().as_entries_mut()? {
.await?; for (k, v) in package
} .as_installed_mut()
PackageDataEntry::Removing { .. } => { .into_iter()
uninstall( .flat_map(|i| i.clone().into_current_dependencies().into_entries())
self, .flatten()
&mut db, {
&mut self.secret_store.acquire().await?, let mut entry: BTreeMap<_, _> =
&package_id, current_dependents.remove(&k).unwrap_or_default();
) entry.insert(package_id.clone(), v.de()?);
.await?; current_dependents.insert(k, entry);
}
PackageDataEntry::Installed {
installed,
static_files,
manifest,
} => {
for (volume_id, volume_info) in &*manifest.volumes {
let tmp_path = to_tmp_path(volume_info.path_for(
&self.datadir,
&package_id,
&manifest.version,
&volume_id,
))
.with_kind(ErrorKind::Filesystem)?;
if tokio::fs::metadata(&tmp_path).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_path).await?;
}
}
let status = installed.status;
let main = match status.main {
MainStatus::BackingUp { started, .. } => {
if let Some(_) = started {
MainStatus::Starting { restarting: false }
} else {
MainStatus::Stopped
}
}
MainStatus::Running { .. } => {
MainStatus::Starting { restarting: false }
}
a => a.clone(),
};
let new_package = PackageDataEntry::Installed {
installed: InstalledPackageDataEntry {
status: Status { main, ..status },
..installed
},
static_files,
manifest,
};
receipts
.package
.set(&mut db, new_package, &package_id)
.await?;
} }
} }
Ok::<_, Error>(()) for (package_id, current_dependents) in current_dependents {
} if let Some(deps) = f
.await .as_package_data_mut()
{ .as_idx_mut(&package_id)
.and_then(|pde| pde.expect_as_installed_mut().ok())
.map(|i| i.as_installed_mut().as_current_dependents_mut())
{
deps.ser(&CurrentDependents(current_dependents))?;
} else if let Some(deps) = f
.as_package_data_mut()
.as_idx_mut(&package_id)
.and_then(|pde| pde.expect_as_removing_mut().ok())
.map(|i| i.as_removing_mut().as_current_dependents_mut())
{
deps.ser(&CurrentDependents(current_dependents))?;
}
}
Ok(())
})
.await?;
let peek = self.db.peek().await;
for (package_id, package) in peek.as_package_data().as_entries()?.into_iter() {
let action = match package.as_match() {
PackageDataEntryMatchModelRef::Installing(_)
| PackageDataEntryMatchModelRef::Restoring(_)
| PackageDataEntryMatchModelRef::Updating(_) => {
cleanup_failed(self, &package_id).await
}
PackageDataEntryMatchModelRef::Removing(_) => {
uninstall(
self,
self.secret_store.acquire().await?.as_mut(),
&package_id,
)
.await
}
PackageDataEntryMatchModelRef::Installed(m) => {
let version = m.as_manifest().as_version().clone().de()?;
let volumes = m.as_manifest().as_volumes().de()?;
for (volume_id, volume_info) in &*volumes {
let tmp_path = to_tmp_path(volume_info.path_for(
&self.datadir,
&package_id,
&version,
volume_id,
))
.with_kind(ErrorKind::Filesystem)?;
if tokio::fs::metadata(&tmp_path).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_path).await?;
}
}
Ok(())
}
_ => continue,
};
if let Err(e) = action {
tracing::error!("Failed to clean up package {}: {}", package_id, e); tracing::error!("Failed to clean up package {}: {}", package_id, e);
tracing::debug!("{:?}", e); tracing::debug!("{:?}", e);
} }
} }
let peek = self
.db
.mutate(|v| {
for (_, pde) in v.as_package_data_mut().as_entries_mut()? {
let status = pde
.expect_as_installed_mut()?
.as_installed_mut()
.as_status_mut()
.as_main_mut();
let running = status.clone().de()?.running();
status.ser(&if running {
MainStatus::Starting
} else {
MainStatus::Stopped
})?;
}
Ok(v.clone())
})
.await?;
self.managers.init(self.clone(), peek.clone()).await?;
tracing::info!("Initialized Package Managers");
let mut all_dependency_config_errs = BTreeMap::new();
for (package_id, package) in peek.as_package_data().as_entries()?.into_iter() {
let package = package.clone();
if let Some(current_dependencies) = package
.as_installed()
.and_then(|x| x.as_current_dependencies().de().ok())
{
let manifest = package.as_manifest().de()?;
all_dependency_config_errs.insert(
package_id.clone(),
compute_dependency_config_errs(
self,
&peek,
&manifest,
&current_dependencies,
&Default::default(),
)
.await?,
);
}
}
self.db
.mutate(|v| {
for (package_id, errs) in all_dependency_config_errs {
if let Some(config_errors) = v
.as_package_data_mut()
.as_idx_mut(&package_id)
.and_then(|pde| pde.as_installed_mut())
.map(|i| i.as_status_mut().as_dependency_config_errors_mut())
{
config_errors.ser(&errs)?;
}
}
Ok(())
})
.await?;
Ok(()) Ok(())
} }
#[instrument(skip(self))] #[instrument(skip_all)]
pub async fn clean_continuations(&self) { pub async fn clean_continuations(&self) {
let mut continuations = self.rpc_stream_continuations.lock().await; let mut continuations = self.rpc_stream_continuations.lock().await;
let mut to_remove = Vec::new(); let mut to_remove = Vec::new();
@@ -362,7 +396,7 @@ impl RpcContext {
} }
} }
#[instrument(skip(self, handler))] #[instrument(skip_all)]
pub async fn add_continuation(&self, guid: RequestGuid, handler: RpcContinuation) { pub async fn add_continuation(&self, guid: RequestGuid, handler: RpcContinuation) {
self.clean_continuations().await; self.clean_continuations().await;
self.rpc_stream_continuations self.rpc_stream_continuations
@@ -402,7 +436,7 @@ impl RpcContext {
} }
impl AsRef<Jwk> for RpcContext { impl AsRef<Jwk> for RpcContext {
fn as_ref(&self) -> &Jwk { fn as_ref(&self) -> &Jwk {
&*CURRENT_SECRET &CURRENT_SECRET
} }
} }
impl Context for RpcContext {} impl Context for RpcContext {}
@@ -416,7 +450,7 @@ impl Deref for RpcContext {
tracing_error::SpanTrace::capture() tracing_error::SpanTrace::capture()
); );
} }
&*self.0 &self.0
} }
} }
impl Drop for RpcContext { impl Drop for RpcContext {

View File

@@ -7,8 +7,8 @@ use rpc_toolkit::Context;
use serde::Deserialize; use serde::Deserialize;
use tracing::instrument; use tracing::instrument;
use crate::prelude::*;
use crate::util::config::{load_config_from_paths, local_config_path}; use crate::util::config::{load_config_from_paths, local_config_path};
use crate::{Error, ResultExt};
#[derive(Debug, Default, Deserialize)] #[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
@@ -25,7 +25,7 @@ pub struct SdkContextSeed {
pub struct SdkContext(Arc<SdkContextSeed>); pub struct SdkContext(Arc<SdkContextSeed>);
impl SdkContext { impl SdkContext {
/// BLOCKING /// BLOCKING
#[instrument(skip(matches))] #[instrument(skip_all)]
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> { pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
let local_config_path = local_config_path(); let local_config_path = local_config_path();
let base: SdkContextConfig = load_config_from_paths( let base: SdkContextConfig = load_config_from_paths(
@@ -49,22 +49,22 @@ impl SdkContext {
}))) })))
} }
/// BLOCKING /// BLOCKING
#[instrument] #[instrument(skip_all)]
pub fn developer_key(&self) -> Result<ed25519_dalek::Keypair, Error> { pub fn developer_key(&self) -> Result<ed25519_dalek::SigningKey, Error> {
if !self.developer_key_path.exists() { if !self.developer_key_path.exists() {
return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized)); return Err(Error::new(eyre!("Developer Key does not exist! Please run `start-sdk init` before running this command."), crate::ErrorKind::Uninitialized));
} }
let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem( let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
&std::fs::read_to_string(&self.developer_key_path)?, &std::fs::read_to_string(&self.developer_key_path)?,
) )
.with_kind(crate::ErrorKind::Pem)?; .with_kind(crate::ErrorKind::Pem)?;
let secret = ed25519_dalek::SecretKey::from_bytes(&pair.secret_key[..])?; let secret = ed25519_dalek::SecretKey::try_from(&pair.secret_key[..]).map_err(|_| {
let public = if let Some(public) = pair.public_key { Error::new(
ed25519_dalek::PublicKey::from_bytes(&public[..])? eyre!("pkcs8 key is of incorrect length"),
} else { ErrorKind::OpenSsl,
(&secret).into() )
}; })?;
Ok(ed25519_dalek::Keypair { secret, public }) Ok(secret.into())
} }
} }
impl std::ops::Deref for SdkContext { impl std::ops::Deref for SdkContext {

View File

@@ -17,7 +17,7 @@ use tracing::instrument;
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::db::model::Database; use crate::db::model::Database;
use crate::disk::OsPartitionInfo; use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader}; use crate::init::init_postgres;
use crate::setup::SetupStatus; use crate::setup::SetupStatus;
use crate::util::config::load_config_from_paths; use crate::util::config::load_config_from_paths;
use crate::{Error, ResultExt}; use crate::{Error, ResultExt};
@@ -45,9 +45,11 @@ pub struct SetupContextConfig {
pub migration_batch_rows: Option<usize>, pub migration_batch_rows: Option<usize>,
pub migration_prefetch_rows: Option<usize>, pub migration_prefetch_rows: Option<usize>,
pub datadir: Option<PathBuf>, pub datadir: Option<PathBuf>,
#[serde(default)]
pub disable_encryption: bool,
} }
impl SetupContextConfig { impl SetupContextConfig {
#[instrument(skip(path))] #[instrument(skip_all)]
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || { tokio::task::spawn_blocking(move || {
load_config_from_paths( load_config_from_paths(
@@ -75,6 +77,7 @@ pub struct SetupContextSeed {
pub config_path: Option<PathBuf>, pub config_path: Option<PathBuf>,
pub migration_batch_rows: usize, pub migration_batch_rows: usize,
pub migration_prefetch_rows: usize, pub migration_prefetch_rows: usize,
pub disable_encryption: bool,
pub shutdown: Sender<()>, pub shutdown: Sender<()>,
pub datadir: PathBuf, pub datadir: PathBuf,
pub selected_v2_drive: RwLock<Option<PathBuf>>, pub selected_v2_drive: RwLock<Option<PathBuf>>,
@@ -92,7 +95,7 @@ impl AsRef<Jwk> for SetupContextSeed {
#[derive(Clone)] #[derive(Clone)]
pub struct SetupContext(Arc<SetupContextSeed>); pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext { impl SetupContext {
#[instrument(skip(path))] #[instrument(skip_all)]
pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> { pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let cfg = SetupContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?; let cfg = SetupContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?;
let (shutdown, _) = tokio::sync::broadcast::channel(1); let (shutdown, _) = tokio::sync::broadcast::channel(1);
@@ -102,6 +105,7 @@ impl SetupContext {
config_path: path.as_ref().map(|p| p.as_ref().to_owned()), config_path: path.as_ref().map(|p| p.as_ref().to_owned()),
migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000), migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000),
migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000), migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000),
disable_encryption: cfg.disable_encryption,
shutdown, shutdown,
datadir, datadir,
selected_v2_drive: RwLock::new(None), selected_v2_drive: RwLock::new(None),
@@ -110,7 +114,7 @@ impl SetupContext {
setup_result: RwLock::new(None), setup_result: RwLock::new(None),
}))) })))
} }
#[instrument(skip(self))] #[instrument(skip_all)]
pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> { pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> {
let db_path = self.datadir.join("main").join("embassy.db"); let db_path = self.datadir.join("main").join("embassy.db");
let db = PatchDb::open(&db_path) let db = PatchDb::open(&db_path)
@@ -122,7 +126,7 @@ impl SetupContext {
} }
Ok(db) Ok(db)
} }
#[instrument(skip(self))] #[instrument(skip_all)]
pub async fn secret_store(&self) -> Result<PgPool, Error> { pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(&self.datadir).await?; init_postgres(&self.datadir).await?;
let secret_store = let secret_store =
@@ -132,15 +136,6 @@ impl SetupContext {
.run(&secret_store) .run(&secret_store)
.await .await
.with_kind(crate::ErrorKind::Database)?; .with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir.join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows,
self.migration_prefetch_rows,
)
.await?;
}
Ok(secret_store) Ok(secret_store)
} }
} }

Some files were not shown because too many files have changed in this diff Show More