Compare commits

..

112 Commits

Author SHA1 Message Date
Aiden McClelland
2f8a25ae26 bump version (#1871) 2022-10-11 12:15:42 -06:00
Aiden McClelland
19bf80dfaf increase maximum avahi entry group size (#1869) 2022-10-10 16:50:33 -06:00
Lucy C
fbfaac9859 Update index.html copy and styling (#1855)
* Update index.html

Co-authored-by: Matt Hill <MattDHill@users.noreply.github.com>
2022-10-06 14:35:43 -06:00
Aiden McClelland
0c3d0dd525 disable concurrency and delete tmpdir before retry (#1846)
* disable concurrency and delete tmpdir before retry

* undo retry

* really limit usage of pgloader

* configurable

* no migrate notifications
2022-10-04 15:14:15 -06:00
Mariusz Kogen
1388632562 📸 screenshots update (#1853)
* images update

* proper file rename
2022-10-04 09:36:01 -06:00
Matt Hill
771ecaf3e5 update patch db (#1852) 2022-10-03 12:17:12 -06:00
Mariusz Kogen
2000a8f3ed 💫 Rebranding to embassyOS (#1851)
* name change to embassyOS

* filename change

* motd ASCII update

* some leftovers
2022-10-03 12:13:48 -06:00
Matt Hill
719cd5512c show connection bar right away (#1849) 2022-10-03 09:59:49 -06:00
Aiden McClelland
afb4536247 retry pgloader up to 5x (#1845) 2022-09-29 13:45:28 -06:00
Aiden McClelland
71b19e6582 handle multiple image tags having the same hash and increase timeout (#1844) 2022-09-29 09:52:04 -06:00
Lucy C
f37cfda365 update ts matches to fix properties ordering bug (#1843) 2022-09-29 08:42:26 -06:00
Aiden McClelland
f63a841cb5 reduce patch-db log level to warn (#1840) 2022-09-28 10:12:00 -06:00
Aiden McClelland
d469e802ad update patch db and enable logging (#1837) 2022-09-27 15:36:42 -06:00
Lucy C
1702c07481 Seed patchdb UI data (#1835)
* adjust types
for patchdb ui data and create seed file

* feat: For init and the migration use defaults

* fix update path

* update build for ui seed file

* fix accidential revert

* chore: Convert to do during the init

* chore: Update the commit message

Co-authored-by: BluJ <mogulslayer@gmail.com>
2022-09-26 17:36:37 -06:00
Aiden McClelland
31c5aebe90 play song during update (#1832)
* play song

* change song
2022-09-26 16:52:12 -06:00
Matt Hill
8cf84a6cf2 give name to logs file (#1833)
* give name to logs file
2022-09-26 15:40:38 -06:00
Aiden McClelland
18336e4d0a update patch-db (#1831) 2022-09-26 14:45:16 -06:00
Thomas Moerkerken
abf297d095 Bugfix/correctly package backend job (#1826)
* use makefile to create backend tar in pipeline

* correct for multiple architecture builds

* move duplication to shared workflow
2022-09-26 13:59:14 -06:00
Matt Hill
061a350cc6 Multiple (#1823)
* display preference for suto check and better messaging on properties page

* improve logs by a lot

* clean up

* fix searchbar and url in marketplace
2022-09-23 14:51:28 -06:00
Aiden McClelland
c85491cc71 ignore file not found error for delete (#1822) 2022-09-23 10:08:33 -06:00
Aiden McClelland
8b794c2299 perform system rebuild after updating (#1820)
* perform system rebuild after updating

* cleanup
2022-09-22 12:05:25 -06:00
Matt Hill
11b11375fd update license (#1819)
* update license

* update date
2022-09-22 12:03:44 -06:00
Aiden McClelland
c728f1a694 restructure initialization (#1816)
* reorder enabling of systemd-resolved

* set dns at end

* don't disable interfaces

* let networkmanager manage ifupdown

* restructure initialization

* use pigz when available

* cleanup

* fetch key before adding registry

* fix build

* update patch-db

* fix build

* fix build

* wait for network reinit

* add dynamic wait for up to 60s for network to reinit
2022-09-22 11:40:36 -06:00
Lucy C
28f9fa35e5 Fix/encryption (#1811)
* change encryption to use pubkey and only encrypt specific fields

* adjust script names for convenience

* remove unused fn

* fix build script name

* augment mocks

* remove log

* fix prod build

* feat: backend keys

* fix: Using the correct name with the public key

* chore: Fix the type for the encrypted

* chore: Add some tracing

* remove aes-js from package lock file

Co-authored-by: BluJ <mogulslayer@gmail.com>
2022-09-21 14:03:05 -06:00
Lucy C
f8ea2ebf62 add descriptions to marketplace list page (#1812)
* add descriptions to marketplace list page

* clean up unused styling

* rip descriptions from registry marketplace, use binary choice custom default and alternative messages

* cleanup

* fix selected type and remove uneeded conditional

* conditional color

* cleanup

* better comparision of marketplace url duplicates

* add logic to handle marketplace description display based on url

* decrease font size

* abstract helper fn to get url hostname; add error toast when adding duplicate marketplace

* move helper function to more appropriate file location

* rework marketplace list and don't worry about patch db firing before bootstrapped

* remove aes-js

* reinstall aes just to please things for now

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2022-09-21 12:41:19 -06:00
Mariusz Kogen
7575e8c1de 🏦 Start as embassy hostname from the begining (#1814)
* 🏦 start as embassy hostname from the begining

By this, we are limiting the number of hostname changes (from 3 to 2) during the first initial Embassy startup.

* Hostname change no longer needed
2022-09-21 10:56:52 -06:00
Mariusz Kogen
395db5f1cf 🧅 replace instead of adding tor repository (#1813)
I also removed the src line because we won't use the tor source repository anytime soon if ever.
2022-09-21 10:56:01 -06:00
J M
ee1acda7aa fix: Minor fix that matt wanted (#1808)
* fix: Minor fix that matt wanted

* Update backend/src/hostname.rs
2022-09-19 13:05:38 -06:00
Matt Hill
1150f4c438 clean up code and logs (#1809)
abstract base64 functions and clean up console logs
2022-09-19 12:59:41 -06:00
Matt Hill
f04b90d9c6 fix mrketplace swtiching (#1810) 2022-09-19 12:59:29 -06:00
Lucy C
53463077df Bugfix/marketplace add (#1805)
* remove falsey check when getting marketplaces, as no alts could exist

* filter boolean but start with object

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2022-09-15 13:30:49 -06:00
Matt Hill
e326c5be4a better RPC error message (#1803) 2022-09-15 13:30:27 -06:00
Aiden McClelland
e199dbc37b prevent cfg str generation from running forever (#1804) 2022-09-15 13:29:46 -06:00
J M
2e8bfcc74d fix: Deep is_parent was wrong and could be escapped (#1801)
* fix: Deep is_parent was wrong and could be escapped

* Update lib.rs
2022-09-15 12:53:56 -06:00
Aiden McClelland
ca53793e32 stop leaking avahi clients (#1802)
* stop leaking avahi clients

* separate avahi to its own binary
2022-09-15 12:53:46 -06:00
Mariusz Kogen
a5f31fbf4e 🎚️ reclaiming that precious RAM memory (#1799)
* 🎚️ reclaiming that precious RAM memory

This adds 60MB of usable RAM

* 🎚️ reclaiming that precious RAM memory - sudo

* Update build/write-image.sh

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2022-09-14 11:24:43 -06:00
Matt Hill
40d47c9f44 fix duplicate patch updates, add scroll button to setup success (#1800)
* fix duplicate patch updates, add scroll button to setup success

* update path

* update patch

* update patch
2022-09-14 11:24:22 -06:00
J M
67743b37bb fix: Bad cert of *.local.local is now fixed to correct. (#1798) 2022-09-12 16:27:46 -06:00
Aiden McClelland
36911d7ed6 use base64 for HTTP headers (#1795)
* use base64 for HTTP headers

* fe for base64 headers

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2022-09-12 13:20:49 -06:00
Aiden McClelland
5564154da2 update backend dependencies (#1796)
* update backend dependencies

* update compat
2022-09-12 13:20:40 -06:00
Matt Hill
27f9869b38 fix search to return more accurate results (#1792) 2022-09-10 13:31:39 -06:00
Aiden McClelland
f274747af3 fix init to exit on failure (#1788) 2022-09-10 13:31:03 -06:00
Matt Hill
05832b8b4b expect ui marketplace to be undefined (#1787) 2022-09-09 12:24:30 -06:00
Aiden McClelland
b9ce2bf2dc 0.3.2 final cleanup (#1782)
* bump version with stubbed release notes

* increase BE timeout

* 032 release notes

* hide developer menu for now

* remove unused sub/import

* remoce reconnect from disks res in setup wiz

* remove quirks

* flatten drives response

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2022-09-08 16:14:42 -06:00
J M
5442459b2d fix: Js deep dir (#1784)
* fix: Js deep dir

* Delete broken.log

* Delete test.log

* fix: Remove  the == parent
2022-09-08 13:57:13 -06:00
Stephen Chavez
f0466aaa56 pinning cargo dep versions for CLI (#1775)
* pinning cargo dep versions for CLI

* add --locked to the workflow

Co-authored-by: Stephen Chavez <stephen@start9labs.com>
2022-09-07 09:25:14 -06:00
Matt Hill
50111e37da remove product key from setup flow (#1750)
* remove product key flow from setup

* feat: backend turned off encryption + new Id + no package id

* implement new encryption scheme in FE

* decode response string

* crypto not working

* update setup wizard closes #1762

* feat: Get the encryption key

* fix: Get to recovery

* remove old code

* fix build

* fix: Install works for now

* fix bug in config for adding new list items

* dismiss action modal on success

* clear button in config

* wip: Currently broken in avahi mdns

* include headers with req/res and refactor patchDB init and usage

* fix: Can now run in the main

* flatline on failed init

* update patch DB

* add last-wifi-region to data model even though not used by FE

* chore: Fix the start.

* wip: Fix wrong order for getting hostname before sql has been
created

* fix edge case where union keys displayed as new when not new

* fix: Can start

* last backup color, markdown links always new tab, fix bug with login

* refactor to remove WithRevision

* resolve circular dep issue

* update submodule

* fix patch-db

* update patchDB

* update patch again

* escape error

* decodeuricomponent

* increase proxy buffer size

* increase proxy buffer size

* fix nginx

Co-authored-by: BluJ <mogulslayer@gmail.com>
Co-authored-by: BluJ <dragondef@gmail.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
2022-09-07 09:25:01 -06:00
Aiden McClelland
76682ebef0 switch to postgresql (#1763)
switch sqlx to postgresql
2022-09-01 10:32:01 -06:00
Matt Hill
705653465a use hostname from patchDB as default server name (#1758)
* replace offline toast with global indicator

* use hostname from patchDB as default server name

* add alert to marketplace delete and reword logout alert
2022-08-29 14:59:09 -06:00
Lucy C
8cd2fac9b9 Fix/empty properties (#1764)
* refactor properties utilities with updated ts-matches and allow for a null file

* update pacakges
2022-08-29 09:58:49 -06:00
Aiden McClelland
b2d7f4f606 [feat]: resumable downloads (#1746)
* optimize tests

* add test

* resumable downloads
2022-08-24 15:22:49 -06:00
Stephen Chavez
2dd31fa93f Disable bluetooth properly #862 (#1745)
add disable cmd for uart

Co-authored-by: Stephen Chavez <stephen@start9labs.com>
2022-08-22 14:16:11 -06:00
Thomas Moerkerken
df20d4f100 Set pipeline job timeouts and add ca-certificates to test container (#1753)
* set job timeout

* add ca-cert package to test container

* enable unittest

* include apt update

* set yes

* skip logs::test_logs unittest
2022-08-22 13:43:29 -06:00
Matt Hill
3ddeb5fa94 [Fix] websocket connecting and patchDB connection monitoring (#1738)
* refactor how we handle rpc responses and patchdb connection monitoring

* websockets only

* remove unused global error handlers

* chore: clear storage inside auth service

* feat: convert all global toasts to declarative approach (#1754)

* no more reference to serverID

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: waterplea <alexander@inkin.ru>
2022-08-22 10:53:52 -06:00
Thomas Moerkerken
70baed88f4 add x86 build and run unittests to backend pipeline (#1682)
* add build backend for x86_64

* remove unnecessary build steps

* install backend dependency

* build and run backend unittest

* fix cache key

* buildx is required, qemu is not.

* Add missing steps from Makefile

* fix build_command var is unused

* select more common test env

* update pipeline names and docs

* use variable for node version

* use correct artifact name

* update pipeline references

* skip unittest that needs ca-cert installed

* correct pipeline name

* use nextest pre-built binary; add term color

* fix cache permissions warning

* update documentation
2022-08-18 10:24:17 -06:00
Aiden McClelland
5ba0d594a2 Bugfix/dns (#1741)
* keep openresolv

* fix init
2022-08-18 10:23:04 -06:00
Stephen Chavez
6505c4054f Feat: HttpReader (#1733)
* The start of implementing http_reader, issue #1644

* structure done

* v1 of http_reader

* first http_header poc

* Add AsyncSeek trait

* remove import

* move import

* fix typo

* add error checking for async seek

* fix handling of positions

* rename variables

* Update backend/src/util/http_reader.rs

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* clean up work

* improve error handling, and change if statement.

Co-authored-by: Stephen Chavez <stephen@start9labs.com>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2022-08-18 10:22:51 -06:00
Matt Hill
e1c30a918b highlight instructions if not viewed (#1731) 2022-08-15 11:03:11 -06:00
Chris Guida
f812e208fa fix cli install (#1720) 2022-08-10 18:03:20 -05:00
Lucy C
9e7526c191 fix build for patch-db client for consistency (#1722) 2022-08-10 16:48:44 -06:00
Aiden McClelland
07194e52cd Update README.md (#1728) 2022-08-10 16:35:53 -06:00
Chris Guida
2f8d825970 [Feat] follow logs (#1714)
* tail logs

* add cli

* add FE

* abstract http to shared

* batch new logs

* file download for logs

* fix modal error when no config

Co-authored-by: Chris Guida <chrisguida@users.noreply.github.com>
Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
Co-authored-by: BluJ <mogulslayer@gmail.com>
2022-08-03 12:06:25 -06:00
J M
c44eb3a2c3 fix: Add modification to the max_user_watches (#1695)
* fix: Add modification to the max_user_watches

* chore: Move to initialization
2022-08-02 10:44:49 -06:00
Aiden McClelland
8207770369 write image to sparse-aware archive format (#1709) 2022-08-01 15:39:36 -06:00
Chris Guida
365952bbe9 Add build-essential to README.md (#1716)
Update README.md
2022-08-01 15:38:05 -06:00
Matt Hill
5404ebce1c observe response not events in http reqs to effectively use firstValueFrom 2022-07-29 17:43:23 -06:00
Matt Hill
13411f1830 closes #1710 2022-07-29 12:45:21 -06:00
Matt Hill
43090c9873 closes #1706 2022-07-29 12:45:21 -06:00
Matt Hill
34000fb9f0 closes #1711 2022-07-29 12:45:21 -06:00
Matt Hill
c2f9c6a38d display logs in local time while retaining iso format 2022-07-29 12:45:21 -06:00
Alex Inkin
a5c97d4c24 feat: migrate to Angular 14 and RxJS 7 (#1681)
* feat: migrate to Angular 14 and RxJS 7

* chore: update ng-qrcode

* chore: update patch-db

* chore: remove unnecessary generics
2022-07-27 21:31:46 -06:00
Aiden McClelland
9514b97ca0 Update README.md (#1703) 2022-07-27 18:10:52 -06:00
kn0wmad
22e84cc922 Update README.md (#1705)
* Update README.md

Updated release to 0.3.1

* use tag called latest

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2022-07-27 18:10:06 -06:00
Aiden McClelland
13b97296f5 formatting (#1698) 2022-07-27 18:00:48 -06:00
Aiden McClelland
d5f7e15dfb fix typo (#1702) 2022-07-26 17:32:37 -06:00
Aiden McClelland
7bf7b1e71e NO_KEY for CI images (#1700) 2022-07-26 17:32:28 -06:00
Matt Hill
7b17498722 set Matt as default assignee (#1697) 2022-07-26 15:12:31 -06:00
Aiden McClelland
3473633e43 sync blockdev after update (#1694) 2022-07-26 10:34:23 -06:00
Aiden McClelland
f455b8a007 ask for sudo password immediately during make (#1693) 2022-07-26 10:32:32 -06:00
Aiden McClelland
daabba12d3 honor shutdown from diagnostic ui (#1692) 2022-07-25 20:21:15 -06:00
Matt Hill
61864d082f messaging for restart, shutdown, rebuild (#1691)
* messaging for restart, shutdown, rebuild

* fix typo

* better messaging
2022-07-25 15:28:53 -06:00
Aiden McClelland
a7cd1e0ce6 sync data to fs before shutdown (#1690) 2022-07-25 15:23:40 -06:00
Matt Hill
0dd6d3a500 marketplace published at for service (#1689)
* at published timestamp to marketplace package show

* add todo
2022-07-25 12:47:50 -06:00
Aiden McClelland
bdb906bf26 add marketplace_url to backup metadata for service (#1688) 2022-07-25 12:43:40 -06:00
Aiden McClelland
61da050fe8 only validate mounts for inject if eos >=0.3.1.1 (#1686)
only validate mounts for inject if `>=0.3.1.1`
2022-07-25 12:20:24 -06:00
Matt Hill
83fe391796 replace bang with question mark in html (#1683) 2022-07-25 12:05:44 -06:00
Aiden McClelland
37657fa6ad issue notification when individual package restore fails (#1685) 2022-07-25 12:02:41 -06:00
Aiden McClelland
908a945b95 allow falsey rpc response (#1680)
* allow falsey rpc response

* better check for rpc error and remove extra function

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2022-07-25 10:16:04 -06:00
Aiden McClelland
36c720227f allow server.update to update to current version (#1679) 2022-07-22 14:10:09 -06:00
J M
c22c80d3b0 feat: atomic writing (#1673)
* feat: atomic writing

* Apply suggestions from code review

* clean up temp files on error

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2022-07-22 14:08:49 -06:00
Aiden McClelland
15af827cbc add standby mode (#1671)
* add standby mode

* fix standby mode to go before drive mount
2022-07-22 13:13:49 -06:00
Matt Hill
4a54c7ca87 draft releases notes for 0311 (#1677) 2022-07-22 11:17:18 -06:00
Alex Inkin
7b8a0eadf3 chore: enable strict mode (#1569)
* chore: enable strict mode

* refactor: remove sync data access from PatchDbService

* launchable even when no LAN url

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2022-07-22 09:51:08 -06:00
Aiden McClelland
9a01a0df8e refactor build process (#1675)
* add nc-broadcast to view initialization.sh logs

* include stderr

* refactor build

* add frontend/config.json as frontend dependency

* fix nc-broadcast

* always run all workflows

* address dependabot alerts

* fix build caching

* remove registries.json

* more efficient build
2022-07-21 15:18:44 -06:00
Aiden McClelland
ea2d77f536 lower log level for docker deser fallback message (#1672) 2022-07-21 12:13:46 -06:00
Aiden McClelland
e29003539b trust local ca (#1670) 2022-07-21 12:11:47 -06:00
Lucy C
97bdb2dd64 run build checks only when relevant FE changes (#1664) 2022-07-20 20:22:26 -06:00
Aiden McClelland
40d446ba32 fix migration, add logging (#1674)
* fix migration, add logging

* change stack overflow to runtime error
2022-07-20 16:00:25 -06:00
J M
5fa743755d feat: Make the rename effect (#1669)
* feat: Make the rename effect

* chore: Change to dst and src

* chore: update the remove file to use dst src
2022-07-20 13:42:54 -06:00
J M
0f027fefb8 chore: Update to have the new version 0.3.1.1 (#1668) 2022-07-19 10:03:14 -06:00
Matt Hill
56acb3f281 Mask chars beyond 16 (#1666)
fixes #1662
2022-07-18 18:31:53 -06:00
Lucy C
5268185604 add readme to system-images folder (#1665) 2022-07-18 15:48:42 -06:00
J M
635c3627c9 feat: Variable args (#1667)
* feat: Variable args

* chore: Make the assert error message not wrong
2022-07-18 15:46:32 -06:00
Chris Guida
009f7ddf84 sdk: don't allow mounts in inject actions (#1653) 2022-07-18 12:26:00 -06:00
J M
4526618c32 fix: Resolve fighting with NM (#1660) 2022-07-18 09:44:33 -06:00
Matt Hill
6dfd46197d handle case where selected union enum is invalid after migration (#1658)
* handle case where selected union enum is invalid after migration

* revert necessary ternary and fix types

Co-authored-by: Lucy Cifferello <12953208+elvece@users.noreply.github.com>
2022-07-17 13:42:36 -06:00
Aiden McClelland
778471d3cc Update product.yaml (#1638) 2022-07-13 11:15:45 -06:00
Aiden McClelland
bbcf2990f6 fix build (#1639) 2022-07-13 11:14:50 -06:00
Aiden McClelland
ac30ab223b return correct error on failed os download (#1636) 2022-07-12 12:48:18 -06:00
J M
50e7b479b5 Fix/receipts health (#1616)
* release lock on update progress (#1614)

* chore: remove the receipt

* chore: Remove the receipt

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
2022-07-12 12:48:01 -06:00
Aiden McClelland
1367428499 update backend dependencies (#1637) 2022-07-12 12:18:12 -06:00
Mariusz Kogen
e5de91cbe5 🐋 docker stats fix (#1630)
* entry obsolete...

* 🐋 docker stats fix
2022-07-11 18:18:22 -06:00
491 changed files with 26728 additions and 16065 deletions

View File

@@ -1,21 +1,21 @@
name: 🐛 Bug Report
description: Create a report to help us improve EmbassyOS
description: Create a report to help us improve embassyOS
title: '[bug]: '
labels: [Bug, Needs Triage]
assignees:
- dr-bonez
- MattDHill
body:
- type: checkboxes
attributes:
label: Prerequisites
description: Please confirm you have completed the following.
options:
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already report this problem, without success.
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already report this problem.
required: true
- type: input
attributes:
label: EmbassyOS Version
description: What version of EmbassyOS are you running?
label: embassyOS Version
description: What version of embassyOS are you running?
placeholder: e.g. 0.3.0
validations:
required: true

View File

@@ -1,16 +1,16 @@
name: 💡 Feature Request
description: Suggest an idea for EmbassyOS
description: Suggest an idea for embassyOS
title: '[feat]: '
labels: [Enhancement]
assignees:
- dr-bonez
- MattDHill
body:
- type: checkboxes
attributes:
label: Prerequisites
description: Please confirm you have completed the following.
options:
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already suggest this feature, without success.
- label: I have searched for [existing issues](https://github.com/start9labs/embassy-os/issues) that already suggest this feature.
required: true
- type: textarea
attributes:
@@ -27,7 +27,7 @@ body:
- type: textarea
attributes:
label: Describe Preferred Solution
description: How you want this feature added to EmbassyOS?
description: How you want this feature added to embassyOS?
- type: textarea
attributes:
label: Describe Alternatives

View File

@@ -1,20 +1,25 @@
# This folder contains GitHub Actions workflows for building the project
## backend-pr
Runs: when a pull request targets the master branch and changes the libs/ and/or backend/ folders
## backend
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
This workflow uses the actions docker/setup-qemu-action@v1 and docker/setup-buildx-action@v1 to prepare the environment for aarch64 cross complilation using docker buildx.
A matrix-strategy has been used for building the v8 snapshot instead of the makefile to allow parallel job execution.
This workflow uses the actions and docker/setup-buildx-action@v1 to prepare the environment for aarch64 cross complilation using docker buildx.
When execution of aarch64 containers is required the action docker/setup-qemu-action@v1 is added.
A matrix-strategy has been used to build for both x86_64 and aarch64 platforms in parallel.
## frontend-pr
Runs: when a pull request targets the master branch and changes the frontend/ folder
### Running unittests
Unittests are run using [cargo-nextest]( https://nexte.st/). First the sources are (cross-)compiled and archived. The archive is then run on the correct platform.
## frontend
Runs: manually (on: workflow_dispatch) or called by product-pipeline (on: workflow_call)
This workflow builds the frontends.
## product
Runs: when a change to the master branch is made
Runs: when a pull request targets the master or next branch and when a change to the master or next branch is made
This workflow builds everything, re-using the backend-pr and frontend-pr workflows.
This workflow builds everything, re-using the backend and frontend workflows.
The download and extraction order of artifacts is relevant to `make`, as it checks the file timestamps to decide which targets need to be executed.
Result: eos.img

View File

@@ -1,104 +0,0 @@
name: Backend PR
on:
workflow_call:
workflow_dispatch:
pull_request:
branches:
- master
paths:
- 'backend/**'
- 'libs/**'
jobs:
libs:
name: Build libs
strategy:
matrix:
target: [amd64, arm64]
include:
- target: amd64
snapshot_command: ./build-v8-snapshot.sh
artifact_name: js_snapshot
artifact_path: libs/js_engine/src/artifacts/JS_SNAPSHOT.bin
- target: arm64
snapshot_command: ./build-arm-v8-snapshot.sh
artifact_name: arm_js_snapshot
artifact_path: libs/js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-libs-${{ matrix.target }}-${{ hashFiles('libs/Cargo.lock') }}
- name: Build v8 snapshot
run: ${{ matrix.snapshot_command }}
working-directory: libs
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.artifact_name }}
path: ${{ matrix.artifact_path }}
backend:
name: Build backend
runs-on: ubuntu-latest
needs: libs
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Download arm_js_snapshot artifact
uses: actions/download-artifact@v3
with:
name: arm_js_snapshot
path: libs/js_engine/src/artifacts/
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-backend-${{ hashFiles('backend/Cargo.lock') }}
- name: Build backend
run: make backend
- name: 'Tar files to preserve file permissions'
run: tar -cvf backend.tar backend/target/aarch64-unknown-linux-gnu/release/embassy*
- uses: actions/upload-artifact@v3
with:
name: backend
path: backend.tar

240
.github/workflows/backend.yaml vendored Normal file
View File

@@ -0,0 +1,240 @@
name: Backend
on:
workflow_call:
workflow_dispatch:
env:
RUST_VERSION: "1.62.1"
jobs:
build_libs:
name: Build libs
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
snapshot_command: ./build-v8-snapshot.sh
artifact_name: js_snapshot
artifact_path: libs/js_engine/src/artifacts/JS_SNAPSHOT.bin
- target: aarch64
snapshot_command: ./build-arm-v8-snapshot.sh
artifact_name: arm_js_snapshot
artifact_path: libs/js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
if: ${{ matrix.target == 'aarch64' }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
if: ${{ matrix.target == 'aarch64' }}
- uses: actions-rs/toolchain@v1
with:
toolchain: ${{ env.RUST_VERSION }}
override: true
if: ${{ matrix.target == 'x86_64' }}
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
libs/target/
key: ${{ runner.os }}-cargo-libs-${{ matrix.target }}-${{ hashFiles('libs/Cargo.lock') }}
- name: Build v8 snapshot
run: ${{ matrix.snapshot_command }}
working-directory: libs
- uses: actions/upload-artifact@v3
with:
name: ${{ matrix.artifact_name }}
path: ${{ matrix.artifact_path }}
build_backend:
name: Build backend
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
snapshot_download: js_snapshot
- target: aarch64
snapshot_download: arm_js_snapshot
runs-on: ubuntu-latest
timeout-minutes: 120
needs: build_libs
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Download ${{ matrix.snapshot_download }} artifact
uses: actions/download-artifact@v3
with:
name: ${{ matrix.snapshot_download }}
path: libs/js_engine/src/artifacts/
- uses: actions-rs/toolchain@v1
with:
toolchain: ${{ env.RUST_VERSION }}
override: true
if: ${{ matrix.target == 'x86_64' }}
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
backend/target/
key: ${{ runner.os }}-cargo-backend-${{ matrix.target }}-${{ hashFiles('backend/Cargo.lock') }}
- name: Install dependencies
run: sudo apt-get install libavahi-client-dev
if: ${{ matrix.target == 'x86_64' }}
- name: Check Git Hash
run: ./check-git-hash.sh
- name: Check Environment
run: ./check-environment.sh
- name: Build backend
run: cargo build --release --target x86_64-unknown-linux-gnu --locked
working-directory: backend
if: ${{ matrix.target == 'x86_64' }}
- name: Build backend
run: |
docker run --rm \
-v "/home/runner/.cargo/registry":/root/.cargo/registry \
-v "$(pwd)":/home/rust/src \
-P start9/rust-arm-cross:aarch64 \
sh -c 'cd /home/rust/src/backend &&
rustup install ${{ env.RUST_VERSION }} &&
rustup override set ${{ env.RUST_VERSION }} &&
rustup target add aarch64-unknown-linux-gnu &&
cargo build --release --target ${{ matrix.target }}-unknown-linux-gnu --locked'
if: ${{ matrix.target == 'aarch64' }}
- name: 'Tar files to preserve file permissions'
run: make ARCH=${{ matrix.target }} backend-${{ matrix.target }}.tar
- uses: actions/upload-artifact@v3
with:
name: backend-${{ matrix.target }}
path: backend-${{ matrix.target }}.tar
- name: Install nextest
uses: taiki-e/install-action@nextest
- name: Build and archive tests
run: cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu
working-directory: backend
if: ${{ matrix.target == 'x86_64' }}
- name: Build and archive tests
run: |
docker run --rm \
-v "$HOME/.cargo/registry":/root/.cargo/registry \
-v "$(pwd)":/home/rust/src \
-P start9/rust-arm-cross:aarch64 \
sh -c 'cd /home/rust/src/backend &&
rustup install ${{ env.RUST_VERSION }} &&
rustup override set ${{ env.RUST_VERSION }} &&
rustup target add aarch64-unknown-linux-gnu &&
curl -LsSf https://get.nexte.st/latest/linux | tar zxf - -C ${CARGO_HOME:-~/.cargo}/bin &&
cargo nextest archive --archive-file nextest-archive-${{ matrix.target }}.tar.zst --target ${{ matrix.target }}-unknown-linux-gnu'
if: ${{ matrix.target == 'aarch64' }}
- name: Reset permissions
run: sudo chown -R $USER target
working-directory: backend
if: ${{ matrix.target == 'aarch64' }}
- name: Upload archive to workflow
uses: actions/upload-artifact@v3
with:
name: nextest-archive-${{ matrix.target }}
path: backend/nextest-archive-${{ matrix.target }}.tar.zst
run_tests_backend:
name: Test backend
strategy:
fail-fast: false
matrix:
target: [x86_64, aarch64]
include:
- target: x86_64
- target: aarch64
runs-on: ubuntu-latest
timeout-minutes: 60
needs: build_backend
env:
CARGO_TERM_COLOR: always
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
if: ${{ matrix.target == 'aarch64' }}
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
if: ${{ matrix.target == 'aarch64' }}
- run: mkdir -p ~/.cargo/bin
if: ${{ matrix.target == 'x86_64' }}
- name: Install nextest
uses: taiki-e/install-action@nextest
if: ${{ matrix.target == 'x86_64' }}
- name: Download archive
uses: actions/download-artifact@v3
with:
name: nextest-archive-${{ matrix.target }}
- name: Download nextest (aarch64)
run: wget -O nextest-aarch64.tar.gz https://get.nexte.st/latest/linux-arm
if: ${{ matrix.target == 'aarch64' }}
- name: Run tests
run: |
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --no-fail-fast --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
--filter-expr 'not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))'
if: ${{ matrix.target == 'x86_64' }}
- name: Run tests
run: |
docker run --rm --platform linux/arm64/v8 \
-v "/home/runner/.cargo/registry":/usr/local/cargo/registry \
-v "$(pwd)":/home/rust/src \
-e CARGO_TERM_COLOR=${{ env.CARGO_TERM_COLOR }} \
-P ubuntu:20.04 \
sh -c '
apt update &&
apt install -y ca-certificates &&
cd /home/rust/src &&
mkdir -p ~/.cargo/bin &&
tar -zxvf nextest-aarch64.tar.gz -C ${CARGO_HOME:-~/.cargo}/bin &&
${CARGO_HOME:-~/.cargo}/bin/cargo-nextest nextest run --archive-file nextest-archive-${{ matrix.target }}.tar.zst \
--filter-expr "not (test(system::test_get_temp) | test(net::tor::test) | test(system::test_get_disk_usage) | test(net::ssl::certificate_details_persist) | test(net::ssl::ca_details_persist))"'
if: ${{ matrix.target == 'aarch64' }}

View File

@@ -1,18 +1,17 @@
name: Frontend PR
name: Frontend
on:
workflow_call:
workflow_dispatch:
pull_request:
branches:
- master
paths:
- 'frontend/**'
env:
NODEJS_VERSION: '16'
jobs:
frontend:
name: Build frontend
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
@@ -20,7 +19,7 @@ jobs:
- uses: actions/setup-node@v3
with:
node-version: 16
node-version: ${{ env.NODEJS_VERSION }}
- name: Get npm cache directory
id: npm-cache-dir
@@ -38,7 +37,7 @@ jobs:
run: make frontends
- name: 'Tar files to preserve file permissions'
run: tar -cvf frontend.tar frontend/dist frontend/config.json
run: tar -cvf frontend.tar ENVIRONMENT.txt GIT_HASH.txt frontend/dist frontend/config.json
- uses: actions/upload-artifact@v3
with:

View File

@@ -5,77 +5,52 @@ on:
push:
branches:
- master
- next
pull_request:
branches:
- master
- next
jobs:
compat:
name: Build compat.tar
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
override: true
- uses: actions/cache@v3
with:
path: |
~/.cargo/bin/
~/.cargo/registry/index/
~/.cargo/registry/cache/
~/.cargo/git/db/
target/
key: ${{ runner.os }}-cargo-compat-${{ hashFiles('**/system-images/compat/Cargo.lock') }}
- name: Build image
run: make system-images/compat/compat.tar
- uses: actions/upload-artifact@v3
with:
name: compat.tar
path: system-images/compat/compat.tar
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make system-images/compat/compat.tar
artifact_name: compat.tar
artifact_path: system-images/compat/compat.tar
utils:
name: Build utils.tar
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up QEMU
uses: docker/setup-qemu-action@v1
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make system-images/utils/utils.tar
artifact_name: utils.tar
artifact_path: system-images/utils/utils.tar
- name: Build image
run: make system-images/utils/utils.tar
binfmt:
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make system-images/binfmt/binfmt.tar
artifact_name: binfmt.tar
artifact_path: system-images/binfmt/binfmt.tar
- uses: actions/upload-artifact@v3
with:
name: utils.tar
path: system-images/utils/utils.tar
nc-broadcast:
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast
artifact_name: nc-broadcast.tar
artifact_path: cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast
backend:
uses: ./.github/workflows/backend-pr.yaml
uses: ./.github/workflows/backend.yaml
frontend:
uses: ./.github/workflows/frontend-pr.yaml
uses: ./.github/workflows/frontend.yaml
image:
name: Build image
runs-on: ubuntu-latest
needs: [compat,utils,backend,frontend]
timeout-minutes: 60
needs: [compat,utils,binfmt,nc-broadcast,backend,frontend]
steps:
- uses: actions/checkout@v3
with:
@@ -93,6 +68,18 @@ jobs:
name: utils.tar
path: system-images/utils
- name: Download binfmt.tar artifact
uses: actions/download-artifact@v3
with:
name: binfmt.tar
path: system-images/binfmt
- name: Download nc-broadcast.tar artifact
uses: actions/download-artifact@v3
with:
name: nc-broadcast.tar
path: cargo-deps/aarch64-unknown-linux-gnu/release
- name: Download js_snapshot artifact
uses: actions/download-artifact@v3
with:
@@ -108,11 +95,11 @@ jobs:
- name: Download backend artifact
uses: actions/download-artifact@v3
with:
name: backend
name: backend-aarch64
- name: 'Extract backend'
run:
tar -mxvf backend.tar
tar -mxvf backend-aarch64.tar
- name: Download frontend artifact
uses: actions/download-artifact@v3
@@ -139,4 +126,12 @@ jobs:
key: cache-raspios
- name: Build image
run: "make V=1 eos.img --debug"
run: "make V=1 NO_KEY=1 eos.img --debug"
- name: Compress image
run: "make gzip"
- uses: actions/upload-artifact@v3
with:
name: image
path: eos.tar.gz

View File

@@ -0,0 +1,34 @@
name: Reusable Workflow
on:
workflow_call:
inputs:
build_command:
required: true
type: string
artifact_name:
required: true
type: string
artifact_path:
required: true
type: string
jobs:
generic_build_job:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- uses: actions/checkout@v3
with:
submodules: recursive
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v1
- name: Build image
run: ${{ inputs.build_command }}
- uses: actions/upload-artifact@v3
with:
name: ${{ inputs.artifact_name }}
path: ${{ inputs.artifact_path }}

4
.gitignore vendored
View File

@@ -12,3 +12,7 @@ deploy_web.sh
deploy_web.sh
secrets.db
.vscode/
/cargo-deps/**/*
/ENVIRONMENT.txt
/GIT_HASH.txt
/eos.tar.gz

3
.gitmodules vendored
View File

@@ -1,6 +1,3 @@
[submodule "rpc-toolkit"]
path = rpc-toolkit
url = https://github.com/Start9Labs/rpc-toolkit.git
[submodule "patch-db"]
path = patch-db
url = https://github.com/Start9Labs/patch-db.git

View File

@@ -136,21 +136,21 @@ Enhancement suggestions are tracked as [GitHub issues](https://github.com/Start9
### Project Structure
EmbassyOS is composed of the following components. Please visit the README for each component to understand the dependency requirements and installation instructions.
- [`ui`](frontend/README.md) (Typescript Ionic Angular) is the code that is deployed to the browser to provide the user interface for EmbassyOS.
- [`backend`](backend/README.md) (Rust) is a command line utility, daemon, and software development kit that sets up and manages services and their environments, provides the interface for the ui, manages system state, and provides utilities for packaging services for EmbassyOS.
embassyOS is composed of the following components. Please visit the README for each component to understand the dependency requirements and installation instructions.
- [`ui`](frontend/README.md) (Typescript Ionic Angular) is the code that is deployed to the browser to provide the user interface for embassyOS.
- [`backend`](backend/README.md) (Rust) is a command line utility, daemon, and software development kit that sets up and manages services and their environments, provides the interface for the ui, manages system state, and provides utilities for packaging services for embassyOS.
- `patch-db` - A diff based data store that is used to synchronize data between the front and backend.
- Notably, `patch-db` has a [client](https://github.com/Start9Labs/patch-db/tree/master/client) with its own dependency and installation requirements.
- `rpc-toolkit` - A library for generating an rpc server with cli bindings from Rust functions.
- `system-images` - (Docker, Rust) A suite of utility Docker images that are preloaded with EmbassyOS to assist with functions relating to services (eg. configuration, backups, health checks).
- [`setup-wizard`](frontend/README.md)- Code for the user interface that is displayed during the setup and recovery process for EmbassyOS.
- [`diagnostic-ui`](frontend/README.md) - Code for the user interface that is displayed when something has gone wrong with starting up EmbassyOS, which provides helpful debugging tools.
- `system-images` - (Docker, Rust) A suite of utility Docker images that are preloaded with embassyOS to assist with functions relating to services (eg. configuration, backups, health checks).
- [`setup-wizard`](frontend/README.md)- Code for the user interface that is displayed during the setup and recovery process for embassyOS.
- [`diagnostic-ui`](frontend/README.md) - Code for the user interface that is displayed when something has gone wrong with starting up embassyOS, which provides helpful debugging tools.
### Your First Code Contribution
#### Setting Up Your Development Environment
First, clone the EmbassyOS repository and from the project root, pull in the submodules for dependent libraries.
First, clone the embassyOS repository and from the project root, pull in the submodules for dependent libraries.
```sh
git clone https://github.com/Start9Labs/embassy-os.git
@@ -160,7 +160,7 @@ git submodule update --init --recursive
Depending on which component of the ecosystem you are interested in contributing to, follow the installation requirements listed in that component's README (linked [above](#project-structure))
#### Building The Image
This step is for setting up an environment in which to test your code changes if you do not yet have a EmbassyOS.
This step is for setting up an environment in which to test your code changes if you do not yet have a embassyOS.
- Requirements
- `ext4fs` (available if running on the Linux kernel)
@@ -177,7 +177,7 @@ Contributions in the form of setup guides for integrations with external applica
## Styleguides
### Formatting
Each component of EmbassyOS contains its own style guide. Code must be formatted with the formatter designated for each component. These are outlined within each component folder's README.
Each component of embassyOS contains its own style guide. Code must be formatted with the formatter designated for each component. These are outlined within each component folder's README.
### Atomic Commits
Commits [should be atomic](https://en.wikipedia.org/wiki/Atomic_commit#Atomic_commit_convention) and diffs should be easy to read.

View File

@@ -1,25 +1,42 @@
# START9 PERSONAL USE LICENSE v1.0
# START9 NON-COMMERCIAL LICENSE v1
Version 1, 22 September 2022
This license governs the use of the accompanying Software. If you use the Software, you accept this license. If you do not accept the license, do not use the Software.
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. **Definitions.**
1. “Licensor” means the copyright owner, Start9 Labs, Inc, or its successor(s) in interest, or a future assignee of the copyright.
2. “Source Code” means the preferred form of the Software for making modifications to it.
3. “Object Code” means any non-source form of the Software, including the machine-language output by a compiler or assembler.
4. “Distribute” means to convey or to publish and generally has the same meaning here as under U.S. Copyright law.
5. “Sell” means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including without limitation fees for hosting or consulting/support services related to the Software), a product or service whose value derives, entirely or substantially, from the functionality of the Software.
### 1.Definitions
2. **Grant of Rights.** Subject to the terms of this license, the Licensor grants you, the licensee, a non-exclusive, worldwide, royalty-free copyright license to:
1. Access, audit, copy, modify, compile, or distribute the Source Code or modifications to the Source Code.
2. Run, test, or otherwise use the Object Code.
"License" means version 1 of the Start9 Non-Commercial License.
3. **Limitations.**
1. The grant of rights under the License will NOT include, and the License does NOT grant you the right to:
1. Sell the Software or any derivative works based thereon.
2. Distribute the Object Code.
2. If you Distribute the Source Code, or if permission is separately granted to Distribute the Object Code, you expressly undertake not to remove, or modify, in any manner, the copyright notices attached to the Source Code, and displayed in any output of the Object Code when run, and to reproduce these notices, in an identical manner, in any distributed copies of the Software together with a copy of this license. If you Distribute a modified copy of the Software, or a derivative work based thereon, the work must carry prominent notices stating that you modified it, and giving a relevant date.
3. The terms of this license will apply to anyone who comes into possession of a copy of the Software, and any modifications or derivative works based thereon, made by anyone.
"Licensor" means the Start9 Labs, Inc, or its successor(s) in interest, or a future assignee of the copyright.
4. **Contributions.** You hereby grant to Licensor a perpetual, irrevocable, worldwide, non-exclusive, royalty-free license to use and exploit any modifications or derivative works based on the Source Code of which you are the author.
"You" (or "Your") means an individual or organization exercising permissions granted by this License.
5. **Disclaimer.** THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. LICENSOR HAS NO OBLIGATION TO SUPPORT RECIPIENTS OF THE SOFTWARE.
"Source Code" for a work means the preferred form of the work for making modifications to it.
"Object Code" means any non-source form of a work, including the machine-language output by a compiler or assembler.
"Work" means any work of authorship, whether in Source or Object form, made available under this License.
"Derivative Work" means any work, whether in Source or Object form, that is based on (or derived from) the Work.
"Distribute" means to convey or to publish and generally has the same meaning here as under U.S. Copyright law.
"Sell" means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including, without limitation, fees for hosting, consulting, or support services), a product or service whose value derives, entirely or substantially, from the functionality of the Work or Derivative Work.
### 2. Grant of Rights
Subject to the terms of this license, the Licensor grants you, the licensee, a non-exclusive, worldwide, royalty-free copyright license to access, audit, copy, modify, compile, run, test, distribute, or otherwise use the Software.
### 3. Limitations
1. The grant of rights under the License does NOT include, and the License does NOT grant You the right to Sell the Work or Derivative Work.
2. If you Distribute the Work or Derivative Work, you expressly undertake not to remove or modify, in any manner, the copyright notices attached to the Work or displayed in any output of the Work when run, and to reproduce these notices, in an identical manner, in any distributed copies of the Work or Derivative Work together with a copy of this License.
3. If you Distribute a Derivative Work, it must carry prominent notices stating that it has been modified from the Work, providing a relevant date.
### 4. Contributions
You hereby grant to Licensor a perpetual, irrevocable, worldwide, non-exclusive, royalty-free license to use and exploit any Derivative Work of which you are the author.
### 5. Disclaimer
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. LICENSOR HAS NO OBLIGATION TO SUPPORT RECIPIENTS OF THE SOFTWARE.

View File

@@ -1,20 +1,30 @@
EMBASSY_BINS := backend/target/aarch64-unknown-linux-gnu/release/embassyd backend/target/aarch64-unknown-linux-gnu/release/embassy-init backend/target/aarch64-unknown-linux-gnu/release/embassy-cli backend/target/aarch64-unknown-linux-gnu/release/embassy-sdk
ARCH = aarch64
ENVIRONMENT_FILE := $(shell ./check-environment.sh)
GIT_HASH_FILE := $(shell ./check-git-hash.sh)
EMBASSY_BINS := backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-sdk backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias
EMBASSY_UIS := frontend/dist/ui frontend/dist/setup-wizard frontend/dist/diagnostic-ui
EMBASSY_SRC := raspios.img product_key.txt $(EMBASSY_BINS) backend/embassyd.service backend/embassy-init.service $(EMBASSY_UIS) $(shell find build)
COMPAT_SRC := $(shell find system-images/compat/src)
UTILS_SRC := $(shell find system-images/utils/Dockerfile)
BACKEND_SRC := $(shell find backend/src) $(shell find patch-db/*/src) $(shell find rpc-toolkit/*/src) backend/Cargo.toml backend/Cargo.lock
FRONTEND_SRC := $(shell find frontend/projects) $(shell find frontend/assets)
PATCH_DB_CLIENT_SRC = $(shell find patch-db/client -not -path patch-db/client/dist)
GIT_REFS := $(shell find .git/refs/heads)
TMP_FILE := $(shell mktemp)
COMPAT_SRC := $(shell find system-images/compat/ -not -path 'system-images/compat/target/*' -and -not -name compat.tar -and -not -name target)
UTILS_SRC := $(shell find system-images/utils/ -not -name utils.tar)
BINFMT_SRC := $(shell find system-images/binfmt/ -not -name binfmt.tar)
BACKEND_SRC := $(shell find backend/src) $(shell find backend/migrations) $(shell find patch-db/*/src) backend/Cargo.toml backend/Cargo.lock
FRONTEND_SHARED_SRC := $(shell find frontend/projects/shared) $(shell find frontend/assets) $(shell ls -p frontend/ | grep -v / | sed 's/^/frontend\//g') frontend/node_modules frontend/config.json patch-db/client/dist frontend/patchdb-ui-seed.json
FRONTEND_UI_SRC := $(shell find frontend/projects/ui)
FRONTEND_SETUP_WIZARD_SRC := $(shell find frontend/projects/setup-wizard)
FRONTEND_DIAGNOSTIC_UI_SRC := $(shell find frontend/projects/diagnostic-ui)
PATCH_DB_CLIENT_SRC := $(shell find patch-db/client -not -path patch-db/client/dist)
GZIP_BIN := $(shell which pigz || which gzip)
$(shell sudo true)
.DELETE_ON_ERROR:
.PHONY: all gzip clean format sdk snapshots frontends ui backend
all: eos.img
gzip: eos.img
gzip -k eos.img
gzip: eos.tar.gz
eos.tar.gz: eos.img
tar --format=posix -cS -f- eos.img | $(GZIP_BIN) > eos.tar.gz
clean:
rm -f eos.img
@@ -27,20 +37,27 @@ clean:
rm -rf frontend/dist
rm -rf patch-db/client/node_modules
rm -rf patch-db/client/dist
sudo rm -rf cargo-deps
format:
cd backend && cargo +nightly fmt
cd libs && cargo +nightly fmt
sdk:
cd backend/ && ./install-sdk.sh
eos.img: $(EMBASSY_SRC) system-images/compat/compat.tar system-images/utils/utils.tar
eos.img: $(EMBASSY_SRC) system-images/compat/compat.tar system-images/utils/utils.tar system-images/binfmt/binfmt.tar cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
! test -f eos.img || rm eos.img
if [ "$(NO_KEY)" = "1" ]; then NO_KEY=1 ./build/make-image.sh; else ./build/make-image.sh; fi
system-images/compat/compat.tar: $(COMPAT_SRC)
cd system-images/compat && ./build.sh
cd system-images/compat && DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/compat --platform=linux/arm64 -o type=docker,dest=compat.tar .
cd system-images/compat && make
system-images/utils/utils.tar: $(UTILS_SRC)
cd system-images/utils && DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/utils --platform=linux/arm64 -o type=docker,dest=utils.tar .
cd system-images/utils && make
system-images/binfmt/binfmt.tar: $(BINFMT_SRC)
cd system-images/binfmt && make
raspios.img:
wget --continue https://downloads.raspberrypi.org/raspios_lite_arm64/images/raspios_lite_arm64-2022-01-28/2022-01-28-raspios-bullseye-arm64-lite.zip
@@ -57,31 +74,49 @@ snapshots: libs/snapshot-creator/Cargo.toml
cd libs/ && ./build-v8-snapshot.sh
cd libs/ && ./build-arm-v8-snapshot.sh
$(EMBASSY_BINS): $(BACKEND_SRC)
$(EMBASSY_BINS): $(BACKEND_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) frontend/patchdb-ui-seed.json
cd backend && ./build-prod.sh
touch $(EMBASSY_BINS)
frontend/node_modules: frontend/package.json
npm --prefix frontend ci
$(EMBASSY_UIS): $(FRONTEND_SRC) frontend/node_modules patch-db/client patch-db/client/dist frontend/config.json
npm --prefix frontend run build:all
frontend/dist/ui: $(FRONTEND_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
npm --prefix frontend run build:ui
frontend/config.json: .git/HEAD $(GIT_REFS)
frontend/dist/setup-wizard: $(FRONTEND_SETUP_WIZARD_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
npm --prefix frontend run build:setup
frontend/dist/diagnostic-ui: $(FRONTEND_DIAGNOSTIC_UI_SRC) $(FRONTEND_SHARED_SRC) $(ENVIRONMENT_FILE)
npm --prefix frontend run build:dui
frontend/config.json: $(GIT_HASH_FILE) frontend/config-sample.json
jq '.useMocks = false' frontend/config-sample.json > frontend/config.json
npm --prefix frontend run-script build-config
frontend/patchdb-ui-seed.json: frontend/package.json
jq '."ack-welcome" = "$(shell yq '.version' frontend/package.json)"' frontend/patchdb-ui-seed.json > ui-seed.tmp
mv ui-seed.tmp frontend/patchdb-ui-seed.json
patch-db/client/node_modules: patch-db/client/package.json
npm --prefix patch-db/client install
npm --prefix patch-db/client ci
patch-db/client/dist: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules
! test -d patch-db/client/dist || rm -rf patch-db/client/dist
npm --prefix patch-db/client run build
npm --prefix frontend run build:deps
# used by github actions
backend-$(ARCH).tar: $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(EMBASSY_BINS)
tar -cvf $@ $^
# this is a convenience step to build all frontends - it is not referenced elsewhere in this file
frontends: frontend/node_modules frontend/config.json $(EMBASSY_UIS)
frontends: $(EMBASSY_UIS)
# this is a convenience step to build the UI
ui: frontend/node_modules frontend/config.json frontend/dist/ui
ui: frontend/dist/ui
# this is a convenience step to build the backend
backend: $(EMBASSY_BINS)
# used by github actions
backend: $(EMBASSY_BINS)
cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast:
./build-cargo-dep.sh nc-broadcast

View File

@@ -1,5 +1,6 @@
# EmbassyOS
[![Version](https://img.shields.io/github/v/tag/Start9Labs/embassy-os?color=success)](https://github.com/Start9Labs/embassy-os/releases)
# embassyOS
[![version](https://img.shields.io/github/v/tag/Start9Labs/embassy-os?color=success)](https://github.com/Start9Labs/embassy-os/releases)
[![build](https://github.com/Start9Labs/embassy-os/actions/workflows/product.yaml/badge.svg)](https://github.com/Start9Labs/embassy-os/actions/workflows/product.yaml)
[![community](https://img.shields.io/badge/community-matrix-yellow)](https://matrix.to/#/#community:matrix.start9labs.com)
[![community](https://img.shields.io/badge/community-telegram-informational)](https://t.me/start9_labs)
[![support](https://img.shields.io/badge/support-docs-important)](https://docs.start9labs.com)
@@ -11,10 +12,10 @@
### _Welcome to the era of Sovereign Computing_ ###
EmbassyOS is a browser-based, graphical operating system for a personal server. EmbassyOS facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services. It is the most advanced, secure, reliable, and user friendly personal server OS in the world.
embassyOS is a browser-based, graphical operating system for a personal server. embassyOS facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services. It is the most advanced, secure, reliable, and user friendly personal server OS in the world.
## Running EmbassyOS
There are multiple ways to get your hands on EmbassyOS.
## Running embassyOS
There are multiple ways to get your hands on embassyOS.
### :moneybag: Buy an Embassy
This is the most convenient option. Simply [buy an Embassy](https://start9.com) from Start9 and plug it in. Depending on where you live, shipping costs and import duties will vary.
@@ -28,22 +29,23 @@ While not as convenient as buying an Embassy, this option is easier than you mig
To pursue this option, follow this [guide](https://start9.com/latest/diy).
### :hammer_and_wrench: Build EmbassyOS from Source
### :hammer_and_wrench: Build embassyOS from Source
EmbassyOS can be built from source, for personal use, for free.
embassyOS can be built from source, for personal use, for free.
A detailed guide for doing so can be found [here](https://github.com/Start9Labs/embassy-os/blob/master/build/README.md).
## :heart: Contributing
There are multiple ways to contribute: work directly on EmbassyOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://github.com/Start9Labs/embassy-os/blob/master/CONTRIBUTING.md).
There are multiple ways to contribute: work directly on embassyOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://github.com/Start9Labs/embassy-os/blob/master/CONTRIBUTING.md).
## UI Screenshots
<p align="center">
<img src="assets/EmbassyOS.png" alt="EmbassyOS" width="65%">
<img src="assets/embassyOS.png" alt="embassyOS" width="85%">
</p>
<p align="center">
<img src="assets/eos-services.png" alt="Embassy Services" width="45%">
<img src="assets/eos-preferences.png" alt="Embassy Preferences" width="45%">
<img src="assets/eOS-preferences.png" alt="Embassy Preferences" width="49%">
<img src="assets/eOS-ghost.png" alt="Embassy Ghost Service" width="49%">
</p>
<p align="center">
<img src="assets/eos-bitcoind-health-check.png" alt="Embassy Bitcoin Health Checks" width="45%"> <img src="assets/eos-logs.png" alt="Embassy Logs" width="45%">
<img src="assets/eOS-synapse-health-check.png" alt="Embassy Synapse Health Checks" width="49%">
<img src="assets/eOS-sideload.png" alt="Embassy Sideload Service" width="49%">
</p>

Binary file not shown.

Before

Width:  |  Height:  |  Size: 285 KiB

BIN
assets/eOS-ghost.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 281 KiB

BIN
assets/eOS-preferences.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 266 KiB

BIN
assets/eOS-sideload.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 213 KiB

BIN
assets/embassyOS.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 191 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 334 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 1.2 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 347 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 599 KiB

1631
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -14,7 +14,7 @@ keywords = [
name = "embassy-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/embassy-os"
version = "0.3.1"
version = "0.3.2-rev.1"
[lib]
name = "embassy"
@@ -36,6 +36,10 @@ path = "src/bin/embassy-sdk.rs"
name = "embassy-cli"
path = "src/bin/embassy-cli.rs"
[[bin]]
name = "avahi-alias"
path = "src/bin/avahi-alias.rs"
[features]
avahi = ["avahi-sys"]
default = ["avahi", "sound", "metal", "js_engine"]
@@ -47,103 +51,108 @@ unstable = ["patch-db/unstable"]
[dependencies]
aes = { version = "0.7.5", features = ["ctr"] }
async-stream = "0.3.3"
async-trait = "0.1.51"
async-trait = "0.1.56"
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [
"dynamic",
], optional = true }
base32 = "0.4.0"
base64 = "0.13.0"
base64ct = "1.5.0"
base64ct = "1.5.1"
basic-cookies = "0.1.4"
bollard = "0.11.0"
bollard = "0.13.0"
chrono = { version = "0.4.19", features = ["serde"] }
clap = "2.33"
color-eyre = "0.5"
cookie_store = "0.15.0"
clap = "3.2.8"
color-eyre = "0.6.1"
cookie_store = "0.16.1"
current_platform = "0.2.0"
digest = "0.10.3"
digest-old = { package = "digest", version = "0.9.0" }
divrem = "1.0.0"
ed25519 = { version = "1.5.2", features = ["pkcs8", "pem", "alloc"] }
ed25519-dalek = { version = "1.0.1", features = ["serde"] }
emver = { version = "0.1.6", features = ["serde"] }
fd-lock-rs = "0.1.3"
futures = "0.3.17"
fd-lock-rs = "0.1.4"
futures = "0.3.21"
git-version = "0.3.5"
helpers = { path = "../libs/helpers" }
hex = "0.4.3"
hmac = "0.12.1"
http = "0.2.5"
hyper = "0.14.13"
hyper-ws-listener = { git = "https://github.com/Start9Labs/hyper-ws-listener.git", branch = "main" }
imbl = "1.0.1"
indexmap = { version = "1.8.1", features = ["serde"] }
http = "0.2.8"
hyper = "0.14.20"
hyper-ws-listener = "0.2.0"
imbl = "2.0.0"
indexmap = { version = "1.9.1", features = ["serde"] }
isocountry = "0.3.2"
itertools = "0.10.1"
itertools = "0.10.3"
josekit = "0.8.1"
js_engine = { path = '../libs/js_engine', optional = true }
jsonpath_lib = "0.3.0"
lazy_static = "1.4"
libc = "0.2.103"
log = "0.4.14"
lazy_static = "1.4.0"
libc = "0.2.126"
log = "0.4.17"
models = { version = "*", path = "../libs/models" }
nix = "0.23.0"
nom = "7.0.0"
nix = "0.25.0"
nom = "7.1.1"
num = "0.4.0"
num_enum = "0.5.4"
num_enum = "0.5.7"
openssh-keys = "0.5.0"
openssl = { version = "0.10.36", features = ["vendored"] }
openssl = { version = "0.10.41", features = ["vendored"] }
patch-db = { version = "*", path = "../patch-db/patch-db", features = [
"trace",
] }
pbkdf2 = "0.11.0"
pin-project = "1.0.8"
pin-project = "1.0.11"
pkcs8 = { version = "0.9.0", features = ["std"] }
platforms = "1.1.0"
prettytable-rs = "0.8.0"
prettytable-rs = "0.9.0"
proptest = "1.0.0"
proptest-derive = "0.3.0"
rand = "0.7.3"
regex = "1.5.4"
reqwest = { version = "0.11.4", features = ["stream", "json", "socks"] }
reqwest_cookie_store = "0.2.0"
rpassword = "5.0.1"
rpc-toolkit = { version = "*", path = "../rpc-toolkit/rpc-toolkit" }
rust-argon2 = "0.8.3"
rand = { version = "0.8.5", features = ["std"] }
rand-old = { package = "rand", version = "0.7.3" }
regex = "1.6.0"
reqwest = { version = "0.11.11", features = ["stream", "json", "socks"] }
reqwest_cookie_store = "0.4.0"
rpassword = "7.0.0"
rpc-toolkit = "0.2.1"
rust-argon2 = "1.0.0"
scopeguard = "1.1" # because avahi-sys fucks your shit up
serde = { version = "1.0.130", features = ["derive", "rc"] }
serde = { version = "1.0.139", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.0" }
serde_json = "1.0.68"
serde_toml = { package = "toml", version = "0.5.8" }
serde_yaml = "0.8.21"
serde_json = "1.0.82"
serde_toml = { package = "toml", version = "0.5.9" }
serde_with = { version = "2.0.1", features = ["macros", "json"] }
serde_yaml = "0.9.11"
sha2 = "0.10.2"
sha2-old = { package = "sha2", version = "0.9.8" }
simple-logging = "2.0"
sqlx = { version = "0.5.11", features = [
sha2-old = { package = "sha2", version = "0.9.9" }
simple-logging = "2.0.2"
sqlx = { version = "0.6.0", features = [
"chrono",
"offline",
"runtime-tokio-rustls",
"sqlite",
"postgres",
] }
stderrlog = "0.5.1"
tar = "0.4.37"
thiserror = "1.0.29"
tokio = { version = "1.15.0", features = ["full"] }
tokio-compat-02 = "0.2.0"
tokio-stream = { version = "0.1.7", features = ["io-util", "sync"] }
stderrlog = "0.5.3"
tar = "0.4.38"
thiserror = "1.0.31"
tokio = { version = "1.19.2", features = ["full"] }
tokio-stream = { version = "0.1.9", features = ["io-util", "sync"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = "0.14.0"
tokio-util = { version = "0.6.8", features = ["io"] }
torut = "0.2.0"
tracing = "0.1"
tracing-error = "0.1"
tracing-futures = "0.2"
tracing-subscriber = "0.2"
trust-dns-server = "0.21.2"
typed-builder = "0.9.1"
tokio-tungstenite = { version = "0.17.1", features = ["native-tls"] }
tokio-util = { version = "0.7.3", features = ["io"] }
torut = "0.2.1"
tracing = "0.1.35"
tracing-error = "0.2.0"
tracing-futures = "0.2.5"
tracing-subscriber = { version = "0.3.14", features = ["env-filter"] }
trust-dns-server = "0.22.0"
typed-builder = "0.10.0"
url = { version = "2.2.2", features = ["serde"] }
uuid = { version = "1.1.2", features = ["v4"] }
[dependencies.serde_with]
features = ["macros", "json"]
version = "1.10.0"
[profile.test]
opt-level = 3
[profile.dev.package.backtrace]
opt-level = 3
[profile.dev.package.sqlx-macros]
opt-level = 3

View File

@@ -1,4 +1,4 @@
# EmbassyOS Backend
# embassyOS Backend
- Requirements:
- [Install Rust](https://rustup.rs)
@@ -12,9 +12,9 @@
## Structure
The EmbassyOS backend is broken up into 4 different binaries:
The embassyOS backend is broken up into 4 different binaries:
- embassyd: This is the main workhorse of EmbassyOS - any new functionality you want will likely go here
- embassyd: This is the main workhorse of embassyOS - any new functionality you want will likely go here
- embassy-init: This is the component responsible for allowing you to set up your device, and handles system initialization on startup
- embassy-cli: This is a CLI tool that will allow you to issue commands to embassyd and control it similarly to the UI
- embassy-sdk: This is a CLI tool that aids in building and packaging services you wish to deploy to the Embassy
@@ -25,7 +25,7 @@ See [here](/backend/Cargo.toml) for details.
## Building
You can build the entire operating system image using `make` from the root of the EmbassyOS project. This will subsequently invoke the build scripts above to actually create the requisite binaries and put them onto the final operating system image.
You can build the entire operating system image using `make` from the root of the embassyOS project. This will subsequently invoke the build scripts above to actually create the requisite binaries and put them onto the final operating system image.
## Questions

View File

@@ -16,6 +16,9 @@ fi
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
cd ..
rust-arm64-builder sh -c "(cd backend && cargo build)"
rust-arm64-builder sh -c "(cd backend && cargo build --locked)"
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd

View File

@@ -16,5 +16,8 @@ fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
cd ..
rust-musl-builder sh -c "(cd backend && cargo +beta build --target=x86_64-unknown-linux-musl --no-default-features)"
rust-musl-builder sh -c "(cd backend && cargo +beta build --target=x86_64-unknown-linux-musl --no-default-features --locked)"
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo

View File

@@ -16,5 +16,8 @@ fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
cd ..
rust-musl-builder sh -c "(cd backend && cargo +beta build --release --target=x86_64-unknown-linux-musl --no-default-features)"
rust-musl-builder sh -c "(cd backend && cargo +beta build --release --target=x86_64-unknown-linux-musl --no-default-features --locked)"
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo

View File

@@ -24,10 +24,14 @@ if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then
FLAGS="dev,$FLAGS"
fi
if [[ "$FLAGS" = "" ]]; then
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release)"
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --locked)"
else
echo "FLAGS=$FLAGS"
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS)"
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS --locked)"
fi
cd backend
sudo chown -R $USER target
sudo chown -R $USER ~/.cargo
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd

View File

@@ -6,9 +6,10 @@ Wants=avahi-daemon.service nginx.service tor.service
[Service]
Type=oneshot
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/local/bin/embassy-init
RemainAfterExit=true
StandardOutput=append:/var/log/embassy-init.log
[Install]
WantedBy=embassyd.service

View File

@@ -5,7 +5,7 @@ Requires=embassy-init.service
[Service]
Type=simple
Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug
Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug,patch_db=warn
ExecStart=/usr/local/bin/embassyd
Restart=always
RestartSec=3

View File

@@ -8,4 +8,4 @@ if [ "$0" != "./install-sdk.sh" ]; then
exit 1
fi
cargo install --bin=embassy-sdk --bin=embassy-cli --path=. --no-default-features --features=js_engine
cargo install --bin=embassy-sdk --bin=embassy-cli --path=. --no-default-features --features=js_engine --locked

View File

@@ -1,45 +1,47 @@
-- Add migration script here
CREATE TABLE IF NOT EXISTS tor
(
package TEXT NOT NULL,
interface TEXT NOT NULL,
key BLOB NOT NULL CHECK (length(key) = 64),
CREATE TABLE IF NOT EXISTS tor (
package TEXT NOT NULL,
interface TEXT NOT NULL,
key BYTEA NOT NULL CHECK (length(key) = 64),
PRIMARY KEY (package, interface)
);
CREATE TABLE IF NOT EXISTS session
(
id TEXT NOT NULL PRIMARY KEY,
CREATE TABLE IF NOT EXISTS session (
id TEXT NOT NULL PRIMARY KEY,
logged_in TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
logged_out TIMESTAMP,
last_active TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
user_agent TEXT,
metadata TEXT NOT NULL DEFAULT 'null'
metadata TEXT NOT NULL DEFAULT 'null'
);
CREATE TABLE IF NOT EXISTS account
(
id INTEGER PRIMARY KEY CHECK (id = 0),
CREATE TABLE IF NOT EXISTS account (
id SERIAL PRIMARY KEY CHECK (id = 0),
password TEXT NOT NULL,
tor_key BLOB NOT NULL CHECK (length(tor_key) = 64)
tor_key BYTEA NOT NULL CHECK (length(tor_key) = 64)
);
CREATE TABLE IF NOT EXISTS ssh_keys
(
fingerprint TEXT NOT NULL,
openssh_pubkey TEXT NOT NULL,
created_at TEXT NOT NULL,
CREATE TABLE IF NOT EXISTS ssh_keys (
fingerprint TEXT NOT NULL,
openssh_pubkey TEXT NOT NULL,
created_at TEXT NOT NULL,
PRIMARY KEY (fingerprint)
);
CREATE TABLE IF NOT EXISTS certificates
(
id INTEGER PRIMARY KEY, -- Root = 0, Int = 1, Other = 2..
CREATE TABLE IF NOT EXISTS certificates (
id SERIAL PRIMARY KEY,
-- Root = 0, Int = 1, Other = 2..
priv_key_pem TEXT NOT NULL,
certificate_pem TEXT NOT NULL,
lookup_string TEXT UNIQUE,
created_at TEXT,
updated_at TEXT
);
CREATE TABLE IF NOT EXISTS notifications
(
id INTEGER PRIMARY KEY,
ALTER SEQUENCE certificates_id_seq START 2 RESTART 2;
CREATE TABLE IF NOT EXISTS notifications (
id SERIAL PRIMARY KEY,
package_id TEXT,
created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,
code INTEGER NOT NULL,
@@ -48,9 +50,9 @@ CREATE TABLE IF NOT EXISTS notifications
message TEXT NOT NULL,
data TEXT
);
CREATE TABLE IF NOT EXISTS cifs_shares
(
id INTEGER PRIMARY KEY,
CREATE TABLE IF NOT EXISTS cifs_shares (
id SERIAL PRIMARY KEY,
hostname TEXT NOT NULL,
path TEXT NOT NULL,
username TEXT NOT NULL,

File diff suppressed because it is too large Load Diff

View File

@@ -3,21 +3,20 @@ use std::collections::{BTreeMap, BTreeSet};
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use indexmap::IndexSet;
pub use models::ActionId;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::config::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::id::{ ImageId};
use crate::id::ImageId;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
use crate::util::Version;
use crate::volume::Volumes;
use crate::{Error, ResultExt};
pub use models::ActionId;
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
pub struct Actions(pub BTreeMap<ActionId, Action>);
@@ -57,9 +56,14 @@ pub struct Action {
}
impl Action {
#[instrument]
pub fn validate(&self, volumes: &Volumes, image_ids: &BTreeSet<ImageId>) -> Result<(), Error> {
pub fn validate(
&self,
eos_version: &Version,
volumes: &Volumes,
image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> {
self.implementation
.validate(volumes, image_ids, true)
.validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| {
(
crate::ErrorKind::ValidateS9pk,
@@ -99,7 +103,7 @@ impl Action {
}
}
fn display_action_result(action_result: ActionResult, matches: &ArgMatches<'_>) {
fn display_action_result(action_result: ActionResult, matches: &ArgMatches) {
if matches.is_present("format") {
return display_serializable(action_result, matches);
}

File diff suppressed because it is too large Load Diff

7776
backend/src/assets/nouns.txt Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -10,7 +10,7 @@ use rpc_toolkit::command_helpers::prelude::{RequestParts, ResponseParts};
use rpc_toolkit::yajrc::RpcError;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use sqlx::{Executor, Sqlite};
use sqlx::{Executor, Postgres};
use tracing::instrument;
use crate::context::{CliContext, RpcContext};
@@ -24,10 +24,14 @@ pub fn auth() -> Result<(), Error> {
Ok(())
}
pub fn parse_metadata(_: &str, _: &ArgMatches<'_>) -> Result<Value, Error> {
Ok(serde_json::json!({
pub fn cli_metadata() -> Value {
serde_json::json!({
"platforms": ["cli"],
}))
})
}
pub fn parse_metadata(_: &str, _: &ArgMatches) -> Result<Value, Error> {
Ok(cli_metadata())
}
#[test]
@@ -52,7 +56,7 @@ async fn cli_login(
let password = if let Some(password) = password {
password
} else {
rpassword::prompt_password_stdout("Password: ")?
rpassword::prompt_password("Password: ")?
};
rpc_toolkit::command_helpers::call_remote(
@@ -83,7 +87,7 @@ pub fn check_password(hash: &str, password: &str) -> Result<(), Error> {
pub async fn check_password_against_db<Ex>(secrets: &mut Ex, password: &str) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let pw_hash = sqlx::query!("SELECT password FROM account")
.fetch_one(secrets)
@@ -106,7 +110,7 @@ pub async fn login(
#[arg] password: Option<String>,
#[arg(
parse(parse_metadata),
default = "",
default = "cli_metadata",
help = "RPC Only: This value cannot be overidden from the cli"
)]
metadata: Value,
@@ -120,7 +124,7 @@ pub async fn login(
let metadata = serde_json::to_string(&metadata).with_kind(crate::ErrorKind::Database)?;
let hash_token_hashed = hash_token.hashed();
sqlx::query!(
"INSERT INTO session (id, user_agent, metadata) VALUES (?, ?, ?)",
"INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)",
hash_token_hashed,
user_agent,
metadata,
@@ -169,7 +173,7 @@ pub async fn session() -> Result<(), Error> {
Ok(())
}
fn display_sessions(arg: SessionList, matches: &ArgMatches<'_>) {
fn display_sessions(arg: SessionList, matches: &ArgMatches) {
use prettytable::*;
if matches.is_present("format") {
@@ -199,7 +203,7 @@ fn display_sessions(arg: SessionList, matches: &ArgMatches<'_>) {
}
table.add_row(row);
}
table.print_tty(false);
table.print_tty(false).unwrap();
}
#[command(display(display_sessions))]
@@ -235,7 +239,7 @@ pub async fn list(
})
}
fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<Vec<String>, RpcError> {
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<String>, RpcError> {
Ok(arg.split(",").map(|s| s.trim().to_owned()).collect())
}
@@ -267,14 +271,14 @@ async fn cli_reset_password(
let old_password = if let Some(old_password) = old_password {
old_password
} else {
rpassword::prompt_password_stdout("Current Password: ")?
rpassword::prompt_password("Current Password: ")?
};
let new_password = if let Some(new_password) = new_password {
new_password
} else {
let new_password = rpassword::prompt_password_stdout("New Password: ")?;
if new_password != rpassword::prompt_password_stdout("Confirm: ")? {
let new_password = rpassword::prompt_password("New Password: ")?;
if new_password != rpassword::prompt_password("Confirm: ")? {
return Err(Error::new(
eyre!("Passwords do not match"),
crate::ErrorKind::IncorrectPassword,
@@ -324,7 +328,7 @@ pub async fn set_password<Db: DbHandle, Ex>(
password: &str,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let password = argon2::hash_encoded(
password.as_bytes(),
@@ -333,7 +337,7 @@ where
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
sqlx::query!("UPDATE account SET password = ?", password,)
sqlx::query!("UPDATE account SET password = $1", password,)
.execute(secrets)
.await?;

View File

@@ -1,12 +1,13 @@
use std::collections::{BTreeMap, BTreeSet};
use std::sync::Arc;
use std::path::PathBuf;
use chrono::Utc;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use helpers::AtomicFile;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use patch_db::{DbHandle, LockType, PatchDbHandle, Revision};
use patch_db::{DbHandle, LockType, PatchDbHandle};
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use serde_json::Value;
@@ -20,17 +21,16 @@ use crate::auth::check_password_against_db;
use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext;
use crate::db::model::BackupProgress;
use crate::db::util::WithRevision;
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard;
use crate::notifications::NotificationLevel;
use crate::s9pk::manifest::PackageId;
use crate::status::MainStatus;
use crate::util::display_none;
use crate::util::serde::IoFormat;
use crate::util::{display_none, AtomicFile};
use crate::version::VersionT;
use crate::Error;
use crate::{Error, ErrorKind, ResultExt};
#[derive(Debug)]
pub struct OsBackup {
@@ -114,7 +114,7 @@ impl Serialize for OsBackup {
}
}
fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<BTreeSet<PackageId>, Error> {
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<BTreeSet<PackageId>, Error> {
arg.split(',')
.map(|s| s.trim().parse().map_err(Error::from))
.collect()
@@ -133,7 +133,7 @@ pub async fn backup_all(
)]
package_ids: Option<BTreeSet<PackageId>>,
#[arg] password: String,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let mut db = ctx.db.handle();
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?;
let fs = target_id
@@ -157,7 +157,7 @@ pub async fn backup_all(
if old_password.is_some() {
backup_guard.change_password(&password)?;
}
let revision = assure_backing_up(&mut db, &package_ids).await?;
assure_backing_up(&mut db, &package_ids).await?;
tokio::task::spawn(async move {
let backup_res = perform_backup(&ctx, &mut db, backup_guard, &package_ids).await;
let backup_progress = crate::db::DatabaseModel::new()
@@ -236,17 +236,14 @@ pub async fn backup_all(
.await
.expect("failed to change server status");
});
Ok(WithRevision {
response: (),
revision,
})
Ok(())
}
#[instrument(skip(db, packages))]
async fn assure_backing_up(
db: &mut PatchDbHandle,
packages: impl IntoIterator<Item = &PackageId>,
) -> Result<Option<Arc<Revision>>, Error> {
) -> Result<(), Error> {
let mut tx = db.begin().await?;
let mut backing_up = crate::db::DatabaseModel::new()
.server_info()
@@ -277,7 +274,8 @@ async fn assure_backing_up(
.collect(),
);
backing_up.save(&mut tx).await?;
Ok(tx.commit(None).await?)
tx.commit().await?;
Ok(())
}
#[instrument(skip(ctx, db, backup_guard))]
@@ -364,6 +362,7 @@ async fn perform_backup<Db: DbHandle>(
.backup
.create(
ctx,
&mut tx,
&package_id,
&manifest.title,
&manifest.version,
@@ -424,7 +423,12 @@ async fn perform_backup<Db: DbHandle>(
.await?;
let (root_ca_key, root_ca_cert) = ctx.net_controller.ssl.export_root_ca().await?;
let mut os_backup_file = AtomicFile::new(backup_guard.as_ref().join("os-backup.cbor")).await?;
let mut os_backup_file = AtomicFile::new(
backup_guard.as_ref().join("os-backup.cbor"),
None::<PathBuf>,
)
.await
.with_kind(ErrorKind::Filesystem)?;
os_backup_file
.write_all(
&IoFormat::Cbor.to_vec(&OsBackup {
@@ -439,7 +443,10 @@ async fn perform_backup<Db: DbHandle>(
})?,
)
.await?;
os_backup_file.save().await?;
os_backup_file
.save()
.await
.with_kind(ErrorKind::Filesystem)?;
let timestamp = Some(Utc::now());

View File

@@ -1,12 +1,14 @@
use std::collections::{BTreeMap, BTreeSet};
use std::path::Path;
use std::path::{Path, PathBuf};
use chrono::{DateTime, Utc};
use color_eyre::eyre::eyre;
use helpers::AtomicFile;
use patch_db::{DbHandle, HasModel, LockType};
use reqwest::Url;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use sqlx::{Executor, Sqlite};
use sqlx::{Executor, Postgres};
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tracing::instrument;
@@ -20,10 +22,10 @@ use crate::net::interface::{InterfaceId, Interfaces};
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::serde::IoFormat;
use crate::util::{AtomicFile, Version};
use crate::util::Version;
use crate::version::{Current, VersionT};
use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR};
use crate::{Error, ResultExt};
use crate::{Error, ErrorKind, ResultExt};
pub mod backup_bulk;
pub mod restore;
@@ -60,6 +62,7 @@ pub fn package_backup() -> Result<(), Error> {
struct BackupMetadata {
pub timestamp: DateTime<Utc>,
pub tor_keys: BTreeMap<InterfaceId, String>,
pub marketplace_url: Option<Url>,
}
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
@@ -68,20 +71,26 @@ pub struct BackupActions {
pub restore: PackageProcedure,
}
impl BackupActions {
pub fn validate(&self, volumes: &Volumes, image_ids: &BTreeSet<ImageId>) -> Result<(), Error> {
pub fn validate(
&self,
eos_version: &Version,
volumes: &Volumes,
image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> {
self.create
.validate(volumes, image_ids, false)
.validate(eos_version, volumes, image_ids, false)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Create"))?;
self.restore
.validate(volumes, image_ids, false)
.validate(eos_version, volumes, image_ids, false)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Backup Restore"))?;
Ok(())
}
#[instrument(skip(ctx))]
pub async fn create(
#[instrument(skip(ctx, db))]
pub async fn create<Db: DbHandle>(
&self,
ctx: &RpcContext,
db: &mut Db,
pkg_id: &PackageId,
pkg_title: &str,
pkg_version: &Version,
@@ -119,6 +128,18 @@ impl BackupActions {
)
})
.collect();
let marketplace_url = crate::db::DatabaseModel::new()
.package_data()
.idx_model(pkg_id)
.expect(db)
.await?
.installed()
.expect(db)
.await?
.marketplace_url()
.get(db, true)
.await?
.into_owned();
let tmp_path = Path::new(BACKUP_DIR)
.join(pkg_id)
.join(format!("{}.s9pk", pkg_id));
@@ -129,7 +150,9 @@ impl BackupActions {
.join(pkg_version.as_str())
.join(format!("{}.s9pk", pkg_id));
let mut infile = File::open(&s9pk_path).await?;
let mut outfile = AtomicFile::new(&tmp_path).await?;
let mut outfile = AtomicFile::new(&tmp_path, None::<PathBuf>)
.await
.with_kind(ErrorKind::Filesystem)?;
tokio::io::copy(&mut infile, &mut *outfile)
.await
.with_ctx(|_| {
@@ -138,17 +161,20 @@ impl BackupActions {
format!("cp {} -> {}", s9pk_path.display(), tmp_path.display()),
)
})?;
outfile.save().await?;
outfile.save().await.with_kind(ErrorKind::Filesystem)?;
let timestamp = Utc::now();
let metadata_path = Path::new(BACKUP_DIR).join(pkg_id).join("metadata.cbor");
let mut outfile = AtomicFile::new(&metadata_path).await?;
let mut outfile = AtomicFile::new(&metadata_path, None::<PathBuf>)
.await
.with_kind(ErrorKind::Filesystem)?;
outfile
.write_all(&IoFormat::Cbor.to_vec(&BackupMetadata {
timestamp,
tor_keys,
marketplace_url,
})?)
.await?;
outfile.save().await?;
outfile.save().await.with_kind(ErrorKind::Filesystem)?;
Ok(PackageBackupInfo {
os_version: Current::new().semver().into(),
title: pkg_title.to_owned(),
@@ -169,7 +195,7 @@ impl BackupActions {
volumes: &Volumes,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let mut volumes = volumes.clone();
volumes.insert(VolumeId::Backup, Volume::Backup { readonly: true });
@@ -205,7 +231,7 @@ impl BackupActions {
)
})?;
sqlx::query!(
"REPLACE INTO tor (package, interface, key) VALUES (?, ?, ?)",
"INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET key = $3",
**pkg_id,
*iface,
key_vec,
@@ -217,17 +243,21 @@ impl BackupActions {
.package_data()
.lock(db, LockType::Write)
.await?;
crate::db::DatabaseModel::new()
let pde = crate::db::DatabaseModel::new()
.package_data()
.idx_model(pkg_id)
.expect(db)
.await?
.installed()
.expect(db)
.await?
.await?;
pde.clone()
.interface_addresses()
.put(db, &interfaces.install(&mut *secrets, pkg_id).await?)
.await?;
pde.marketplace_url()
.put(db, &metadata.marketplace_url)
.await?;
let entry = crate::db::DatabaseModel::new()
.package_data()

View File

@@ -9,7 +9,7 @@ use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::FutureExt;
use openssl::x509::X509;
use patch_db::{DbHandle, PatchDbHandle, Revision};
use patch_db::{DbHandle, PatchDbHandle};
use rpc_toolkit::command;
use tokio::fs::File;
use tokio::task::JoinHandle;
@@ -20,7 +20,6 @@ use super::target::BackupTargetId;
use crate::backup::backup_bulk::OsBackup;
use crate::context::{RpcContext, SetupContext};
use crate::db::model::{PackageDataEntry, StaticFiles};
use crate::db::util::WithRevision;
use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard};
use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::guard::TmpMountGuard;
@@ -37,7 +36,7 @@ use crate::util::serde::IoFormat;
use crate::volume::{backup_dir, BACKUP_DIR, PKG_VOLUME_DIR};
use crate::{Error, ResultExt};
fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<Vec<PackageId>, Error> {
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<PackageId>, Error> {
arg.split(',')
.map(|s| s.trim().parse().map_err(Error::from))
.collect()
@@ -50,32 +49,70 @@ pub async fn restore_packages_rpc(
#[arg(parse(parse_comma_separated))] ids: Vec<PackageId>,
#[arg(rename = "target-id")] target_id: BackupTargetId,
#[arg] password: String,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let mut db = ctx.db.handle();
let fs = target_id
.load(&mut ctx.secret_store.acquire().await?)
.await?;
let backup_guard = BackupMountGuard::mount(
TmpMountGuard::mount(&fs, ReadOnly).await?,
&password,
)
.await?;
let backup_guard =
BackupMountGuard::mount(TmpMountGuard::mount(&fs, ReadOnly).await?, &password).await?;
let (revision, backup_guard, tasks, _) =
restore_packages(&ctx, &mut db, backup_guard, ids).await?;
let (backup_guard, tasks, _) = restore_packages(&ctx, &mut db, backup_guard, ids).await?;
tokio::spawn(async {
futures::future::join_all(tasks).await;
tokio::spawn(async move {
let res = futures::future::join_all(tasks).await;
for res in res {
match res.with_kind(crate::ErrorKind::Unknown) {
Ok((Ok(_), _)) => (),
Ok((Err(err), package_id)) => {
if let Err(err) = ctx
.notification_manager
.notify(
&mut db,
Some(package_id.clone()),
NotificationLevel::Error,
"Restoration Failure".to_string(),
format!("Error restoring package {}: {}", package_id, err),
(),
None,
)
.await
{
tracing::error!("Failed to notify: {}", err);
tracing::debug!("{:?}", err);
};
tracing::error!("Error restoring package {}: {}", package_id, err);
tracing::debug!("{:?}", err);
}
Err(e) => {
if let Err(err) = ctx
.notification_manager
.notify(
&mut db,
None,
NotificationLevel::Error,
"Restoration Failure".to_string(),
format!("Error during restoration: {}", e),
(),
None,
)
.await
{
tracing::error!("Failed to notify: {}", err);
tracing::debug!("{:?}", err);
}
tracing::error!("Error restoring packages: {}", e);
tracing::debug!("{:?}", e);
}
}
}
if let Err(e) = backup_guard.unmount().await {
tracing::error!("Error unmounting backup drive: {}", e);
tracing::debug!("{:?}", e);
}
});
Ok(WithRevision {
response: (),
revision,
})
Ok(())
}
async fn approximate_progress(
@@ -179,7 +216,7 @@ pub async fn recover_full_embassy(
let key_vec = os_backup.tor_key.as_bytes().to_vec();
let secret_store = ctx.secret_store().await?;
sqlx::query!(
"REPLACE INTO account (id, password, tor_key) VALUES (?, ?, ?)",
"INSERT INTO account (id, password, tor_key) VALUES ($1, $2, $3) ON CONFLICT (id) DO UPDATE SET password = $2, tor_key = $3",
0,
password,
key_vec,
@@ -208,7 +245,7 @@ pub async fn recover_full_embassy(
.keys()
.cloned()
.collect();
let (_, backup_guard, tasks, progress_info) = restore_packages(
let (backup_guard, tasks, progress_info) = restore_packages(
&rpc_ctx,
&mut db,
backup_guard,
@@ -225,7 +262,7 @@ pub async fn recover_full_embassy(
if let Err(err) = rpc_ctx.notification_manager.notify(
&mut db,
Some(package_id.clone()),
NotificationLevel::Error,
NotificationLevel::Error,
"Restoration Failure".to_string(), format!("Error restoring package {}: {}", package_id,err), (), None).await{
tracing::error!("Failed to notify: {}", err);
tracing::debug!("{:?}", err);
@@ -237,8 +274,8 @@ pub async fn recover_full_embassy(
if let Err(err) = rpc_ctx.notification_manager.notify(
&mut db,
None,
NotificationLevel::Error,
"Restoration Failure".to_string(), format!("Error restoring ?: {}", e), (), None).await {
NotificationLevel::Error,
"Restoration Failure".to_string(), format!("Error during restoration: {}", e), (), None).await {
tracing::error!("Failed to notify: {}", err);
tracing::debug!("{:?}", err);
@@ -266,14 +303,13 @@ async fn restore_packages(
ids: Vec<PackageId>,
) -> Result<
(
Option<Arc<Revision>>,
BackupMountGuard<TmpMountGuard>,
Vec<JoinHandle<(Result<(), Error>, PackageId)>>,
ProgressInfo,
),
Error,
> {
let (revision, guards) = assure_restoring(ctx, db, ids, &backup_guard).await?;
let guards = assure_restoring(ctx, db, ids, &backup_guard).await?;
let mut progress_info = ProgressInfo::default();
@@ -301,7 +337,7 @@ async fn restore_packages(
));
}
Ok((revision, backup_guard, tasks, progress_info))
Ok((backup_guard, tasks, progress_info))
}
#[instrument(skip(ctx, db, backup_guard))]
@@ -310,13 +346,7 @@ async fn assure_restoring(
db: &mut PatchDbHandle,
ids: Vec<PackageId>,
backup_guard: &BackupMountGuard<TmpMountGuard>,
) -> Result<
(
Option<Arc<Revision>>,
Vec<(Manifest, PackageBackupMountGuard)>,
),
Error,
> {
) -> Result<Vec<(Manifest, PackageBackupMountGuard)>, Error> {
let mut tx = db.begin().await?;
let mut guards = Vec::with_capacity(ids.len());
@@ -376,7 +406,8 @@ async fn assure_restoring(
guards.push((manifest, guard));
}
Ok((tx.commit(None).await?, guards))
tx.commit().await?;
Ok(guards)
}
#[instrument(skip(ctx, guard))]

View File

@@ -4,7 +4,7 @@ use color_eyre::eyre::eyre;
use futures::TryStreamExt;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use sqlx::{Executor, Sqlite};
use sqlx::{Executor, Postgres};
use super::{BackupTarget, BackupTargetId};
use crate::context::RpcContext;
@@ -49,8 +49,8 @@ pub async fn add(
let embassy_os = recovery_info(&guard).await?;
guard.unmount().await?;
let path_string = Path::new("/").join(&cifs.path).display().to_string();
let id: u32 = sqlx::query!(
"INSERT INTO cifs_shares (hostname, path, username, password) VALUES (?, ?, ?, ?) RETURNING id AS \"id: u32\"",
let id: i32 = sqlx::query!(
"INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id",
cifs.hostname,
path_string,
cifs.username,
@@ -98,7 +98,7 @@ pub async fn update(
guard.unmount().await?;
let path_string = Path::new("/").join(&cifs.path).display().to_string();
if sqlx::query!(
"UPDATE cifs_shares SET hostname = ?, path = ?, username = ?, password = ? WHERE id = ?",
"UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5",
cifs.hostname,
path_string,
cifs.username,
@@ -137,7 +137,7 @@ pub async fn remove(#[context] ctx: RpcContext, #[arg] id: BackupTargetId) -> Re
crate::ErrorKind::NotFound,
));
};
if sqlx::query!("DELETE FROM cifs_shares WHERE id = ?", id)
if sqlx::query!("DELETE FROM cifs_shares WHERE id = $1", id)
.execute(&ctx.secret_store)
.await?
.rows_affected()
@@ -151,12 +151,12 @@ pub async fn remove(#[context] ctx: RpcContext, #[arg] id: BackupTargetId) -> Re
Ok(())
}
pub async fn load<Ex>(secrets: &mut Ex, id: u32) -> Result<Cifs, Error>
pub async fn load<Ex>(secrets: &mut Ex, id: i32) -> Result<Cifs, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let record = sqlx::query!(
"SELECT hostname, path, username, password FROM cifs_shares WHERE id = ?",
"SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1",
id
)
.fetch_one(secrets)
@@ -170,14 +170,13 @@ where
})
}
pub async fn list<Ex>(secrets: &mut Ex) -> Result<Vec<(u32, CifsBackupTarget)>, Error>
pub async fn list<Ex>(secrets: &mut Ex) -> Result<Vec<(i32, CifsBackupTarget)>, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let mut records = sqlx::query!(
"SELECT id AS \"id: u32\", hostname, path, username, password FROM cifs_shares"
)
.fetch_many(secrets);
let mut records =
sqlx::query!("SELECT id, hostname, path, username, password FROM cifs_shares")
.fetch_many(secrets);
let mut cifs = Vec::new();
while let Some(query_result) = records.try_next().await? {

View File

@@ -6,11 +6,11 @@ use chrono::{DateTime, Utc};
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use digest::OutputSizeUser;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use sqlx::{Executor, Sqlite};
use sqlx::{Executor, Postgres};
use tracing::instrument;
use self::cifs::CifsBackupTarget;
@@ -45,12 +45,12 @@ pub enum BackupTarget {
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum BackupTargetId {
Disk { logicalname: PathBuf },
Cifs { id: u32 },
Cifs { id: i32 },
}
impl BackupTargetId {
pub async fn load<Ex>(self, secrets: &mut Ex) -> Result<BackupTargetFS, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
Ok(match self {
BackupTargetId::Disk { logicalname } => {
@@ -134,7 +134,6 @@ pub fn target() -> Result<(), Error> {
Ok(())
}
// TODO: incorporate reconnect into this response as well
#[command(display(display_serializable))]
pub async fn list(
#[context] ctx: RpcContext,
@@ -143,7 +142,6 @@ pub async fn list(
let (disks_res, cifs) =
tokio::try_join!(crate::disk::util::list(), cifs::list(&mut sql_handle),)?;
Ok(disks_res
.disks
.into_iter()
.flat_map(|mut disk| {
std::mem::take(&mut disk.partitions)
@@ -186,7 +184,7 @@ pub struct PackageBackupInfo {
pub timestamp: DateTime<Utc>,
}
fn display_backup_info(info: BackupInfo, matches: &ArgMatches<'_>) {
fn display_backup_info(info: BackupInfo, matches: &ArgMatches) {
use prettytable::*;
if matches.is_present("format") {
@@ -219,7 +217,7 @@ fn display_backup_info(info: BackupInfo, matches: &ArgMatches<'_>) {
];
table.add_row(row);
}
table.print_tty(false);
table.print_tty(false).unwrap();
}
#[command(display(display_backup_info))]

View File

@@ -0,0 +1,163 @@
use avahi_sys::{
self, avahi_client_errno, avahi_entry_group_add_service, avahi_entry_group_commit,
avahi_strerror, AvahiClient,
};
fn log_str_error(action: &str, e: i32) {
unsafe {
let e_str = avahi_strerror(e);
eprintln!(
"Could not {}: {:?}",
action,
std::ffi::CStr::from_ptr(e_str)
);
}
}
fn main() {
let aliases: Vec<_> = std::env::args().skip(1).collect();
unsafe {
let simple_poll = avahi_sys::avahi_simple_poll_new();
let poll = avahi_sys::avahi_simple_poll_get(simple_poll);
let mut box_err = Box::pin(0 as i32);
let err_c: *mut i32 = box_err.as_mut().get_mut();
let avahi_client = avahi_sys::avahi_client_new(
poll,
avahi_sys::AvahiClientFlags::AVAHI_CLIENT_NO_FAIL,
Some(client_callback),
std::ptr::null_mut(),
err_c,
);
if avahi_client == std::ptr::null_mut::<AvahiClient>() {
log_str_error("create Avahi client", *box_err);
panic!("Failed to create Avahi Client");
}
let group = avahi_sys::avahi_entry_group_new(
avahi_client,
Some(entry_group_callback),
std::ptr::null_mut(),
);
if group == std::ptr::null_mut() {
log_str_error("create Avahi entry group", avahi_client_errno(avahi_client));
panic!("Failed to create Avahi Entry Group");
}
let mut hostname_buf = vec![0];
let hostname_raw = avahi_sys::avahi_client_get_host_name_fqdn(avahi_client);
hostname_buf.extend_from_slice(std::ffi::CStr::from_ptr(hostname_raw).to_bytes_with_nul());
let buflen = hostname_buf.len();
debug_assert!(hostname_buf.ends_with(b".local\0"));
debug_assert!(!hostname_buf[..(buflen - 7)].contains(&b'.'));
// assume fixed length prefix on hostname due to local address
hostname_buf[0] = (buflen - 8) as u8; // set the prefix length to len - 8 (leading byte, .local, nul) for the main address
hostname_buf[buflen - 7] = 5; // set the prefix length to 5 for "local"
let mut res;
let http_tcp_cstr =
std::ffi::CString::new("_http._tcp").expect("Could not cast _http._tcp to c string");
res = avahi_entry_group_add_service(
group,
avahi_sys::AVAHI_IF_UNSPEC,
avahi_sys::AVAHI_PROTO_UNSPEC,
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST,
hostname_raw,
http_tcp_cstr.as_ptr(),
std::ptr::null(),
std::ptr::null(),
443,
// below is a secret final argument that the type signature of this function does not tell you that it
// needs. This is because the C lib function takes a variable number of final arguments indicating the
// desired TXT records to add to this service entry. The way it decides when to stop taking arguments
// from the stack and dereferencing them is when it finds a null pointer...because fuck you, that's why.
// The consequence of this is that forgetting this last argument will cause segfaults or other undefined
// behavior. Welcome back to the stone age motherfucker.
std::ptr::null::<libc::c_char>(),
);
if res < avahi_sys::AVAHI_OK {
log_str_error("add service to Avahi entry group", res);
panic!("Failed to load Avahi services");
}
eprintln!("Published {:?}", std::ffi::CStr::from_ptr(hostname_raw));
for alias in aliases {
let lan_address = alias + ".local";
let lan_address_ptr = std::ffi::CString::new(lan_address)
.expect("Could not cast lan address to c string");
res = avahi_sys::avahi_entry_group_add_record(
group,
avahi_sys::AVAHI_IF_UNSPEC,
avahi_sys::AVAHI_PROTO_UNSPEC,
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST
| avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_ALLOW_MULTIPLE,
lan_address_ptr.as_ptr(),
avahi_sys::AVAHI_DNS_CLASS_IN as u16,
avahi_sys::AVAHI_DNS_TYPE_CNAME as u16,
avahi_sys::AVAHI_DEFAULT_TTL,
hostname_buf.as_ptr().cast(),
hostname_buf.len(),
);
if res < avahi_sys::AVAHI_OK {
log_str_error("add CNAME record to Avahi entry group", res);
panic!("Failed to load Avahi services");
}
eprintln!("Published {:?}", lan_address_ptr);
}
let commit_err = avahi_entry_group_commit(group);
if commit_err < avahi_sys::AVAHI_OK {
log_str_error("reset Avahi entry group", commit_err);
panic!("Failed to load Avahi services: reset");
}
}
std::thread::park()
}
unsafe extern "C" fn entry_group_callback(
_group: *mut avahi_sys::AvahiEntryGroup,
state: avahi_sys::AvahiEntryGroupState,
_userdata: *mut core::ffi::c_void,
) {
match state {
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_FAILURE => {
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_FAILURE");
}
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_COLLISION => {
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_COLLISION");
}
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_UNCOMMITED => {
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_UNCOMMITED");
}
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_ESTABLISHED => {
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_ESTABLISHED");
}
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_REGISTERING => {
eprintln!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_REGISTERING");
}
other => {
eprintln!("AvahiCallback: EntryGroupState = {}", other);
}
}
}
unsafe extern "C" fn client_callback(
_group: *mut avahi_sys::AvahiClient,
state: avahi_sys::AvahiClientState,
_userdata: *mut core::ffi::c_void,
) {
match state {
avahi_sys::AvahiClientState_AVAHI_CLIENT_FAILURE => {
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_FAILURE");
}
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_RUNNING => {
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_S_RUNNING");
}
avahi_sys::AvahiClientState_AVAHI_CLIENT_CONNECTING => {
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_CONNECTING");
}
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_COLLISION => {
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_S_COLLISION");
}
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_REGISTERING => {
eprintln!("AvahiCallback: ClientState = AVAHI_CLIENT_S_REGISTERING");
}
other => {
eprintln!("AvahiCallback: ClientState = {}", other);
}
}
}

View File

@@ -7,20 +7,24 @@ use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError;
use serde_json::Value;
lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string();
}
fn inner_main() -> Result<(), Error> {
run_cli!({
command: embassy::main_api,
app: app => app
.name("Embassy CLI")
.version(Current::new().semver().to_string().as_str())
.version(&**VERSION_STRING)
.arg(
clap::Arg::with_name("config")
.short("c")
.short('c')
.long("config")
.takes_value(true),
)
.arg(Arg::with_name("host").long("host").short("h").takes_value(true))
.arg(Arg::with_name("proxy").long("proxy").short("p").takes_value(true)),
.arg(Arg::with_name("host").long("host").short('h').takes_value(true))
.arg(Arg::with_name("proxy").long("proxy").short('p').takes_value(true)),
context: matches => {
EmbassyLogger::init();
CliContext::init(matches)?

View File

@@ -7,17 +7,16 @@ use embassy::context::{DiagnosticContext, SetupContext};
use embassy::disk::fsck::RepairStrategy;
use embassy::disk::main::DEFAULT_PASSWORD;
use embassy::disk::REPAIR_DISK_PATH;
use embassy::hostname::get_product_key;
use embassy::init::STANDBY_MODE_PATH;
use embassy::middleware::cors::cors;
use embassy::middleware::diagnostic::diagnostic;
use embassy::middleware::encrypt::encrypt;
#[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController;
use embassy::shutdown::Shutdown;
use embassy::sound::CHIME;
use embassy::util::logger::EmbassyLogger;
use embassy::util::Invoke;
use embassy::{Error, ResultExt};
use embassy::{Error, ErrorKind, ResultExt};
use http::StatusCode;
use rpc_toolkit::rpc_server;
use tokio::process::Command;
@@ -49,12 +48,6 @@ async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
.invoke(embassy::ErrorKind::Nginx)
.await?;
let ctx = SetupContext::init(cfg_path).await?;
let keysource_ctx = ctx.clone();
let keysource = move || {
let ctx = keysource_ctx.clone();
async move { ctx.product_key().await }
};
let encrypt = encrypt(keysource);
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
rpc_server!({
@@ -63,7 +56,6 @@ async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
status: status_fn,
middleware: [
cors,
encrypt,
]
})
.with_graceful_shutdown({
@@ -102,7 +94,7 @@ async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
.await?;
}
tracing::info!("Loaded Disk");
embassy::init::init(&cfg, &get_product_key().await?).await?;
embassy::init::init(&cfg).await?;
}
Ok(())
@@ -128,6 +120,13 @@ async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
#[instrument]
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
if tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
embassy::sound::SHUTDOWN.play().await?;
futures::future::pending::<()>().await;
}
embassy::sound::BEP.play().await?;
run_script_if_exists("/embassy-os/preinit.sh").await;
@@ -210,7 +209,7 @@ fn main() {
let matches = clap::App::new("embassyd")
.arg(
clap::Arg::with_name("config")
.short("c")
.short('c')
.long("config")
.takes_value(true),
)

View File

@@ -6,15 +6,19 @@ use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError;
use serde_json::Value;
lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string();
}
fn inner_main() -> Result<(), Error> {
run_cli!({
command: embassy::portable_api,
app: app => app
.name("Embassy SDK")
.version(Current::new().semver().to_string().as_str())
.version(&**VERSION_STRING)
.arg(
clap::Arg::with_name("config")
.short("c")
.short('c')
.long("config")
.takes_value(true),
),

View File

@@ -7,6 +7,7 @@ use embassy::core::rpc_continuations::RequestGuid;
use embassy::db::subscribe;
use embassy::middleware::auth::auth;
use embassy::middleware::cors::cors;
use embassy::middleware::db::db as db_middleware;
use embassy::middleware::diagnostic::diagnostic;
#[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController;
@@ -40,7 +41,6 @@ fn err_to_500(e: Error) -> Response<Body> {
#[instrument]
async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, shutdown) = {
embassy::hostname::sync_hostname().await?;
let rpc_ctx = RpcContext::init(
cfg_path,
Arc::new(
@@ -82,11 +82,13 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
});
let mut db = rpc_ctx.db.handle();
embassy::hostname::sync_hostname(&mut db).await?;
let receipts = embassy::context::rpc::RpcSetNginxReceipts::new(&mut db).await?;
rpc_ctx.set_nginx_conf(&mut db, receipts).await?;
drop(db);
let auth = auth(rpc_ctx.clone());
let db_middleware = db_middleware(rpc_ctx.clone());
let ctx = rpc_ctx.clone();
let server = rpc_server!({
command: embassy::main_api,
@@ -95,6 +97,7 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
middleware: [
cors,
auth,
db_middleware,
]
})
.with_graceful_shutdown({
@@ -112,29 +115,6 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
.await
});
let rev_cache_ctx = rpc_ctx.clone();
let revision_cache_task = tokio::spawn(async move {
let mut sub = rev_cache_ctx.db.subscribe();
let mut shutdown = rev_cache_ctx.shutdown.subscribe();
loop {
let rev = match tokio::select! {
a = sub.recv() => a,
_ = shutdown.recv() => break,
} {
Ok(a) => a,
Err(_) => {
rev_cache_ctx.revision_cache.write().await.truncate(0);
continue;
}
}; // TODO: handle falling behind
let mut cache = rev_cache_ctx.revision_cache.write().await;
cache.push_back(rev);
if cache.len() > rev_cache_ctx.revision_cache_size {
cache.pop_front();
}
}
});
let ws_ctx = rpc_ctx.clone();
let ws_server = {
let builder = Server::bind(&ws_ctx.bind_ws);
@@ -151,6 +131,33 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
"/ws/db" => {
Ok(subscribe(ctx, req).await.unwrap_or_else(err_to_500))
}
path if path.starts_with("/ws/rpc/") => {
match RequestGuid::from(
path.strip_prefix("/ws/rpc/").unwrap(),
) {
None => {
tracing::debug!("No Guid Path");
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(Body::empty())
}
Some(guid) => {
match ctx.get_ws_continuation_handler(&guid).await {
Some(cont) => match cont(req).await {
Ok(r) => Ok(r),
Err(e) => Response::builder()
.status(
StatusCode::INTERNAL_SERVER_ERROR,
)
.body(Body::from(format!("{}", e))),
},
_ => Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::empty()),
}
}
}
}
path if path.starts_with("/rest/rpc/") => {
match RequestGuid::from(
path.strip_prefix("/rest/rpc/").unwrap(),
@@ -162,16 +169,12 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
.body(Body::empty())
}
Some(guid) => {
match ctx
.rpc_stream_continuations
.lock()
.await
.remove(&guid)
match ctx.get_rest_continuation_handler(&guid).await
{
None => Response::builder()
.status(StatusCode::NOT_FOUND)
.body(Body::empty()),
Some(cont) => match (cont.handler)(req).await {
Some(cont) => match cont(req).await {
Ok(r) => Ok(r),
Err(e) => Response::builder()
.status(
@@ -245,12 +248,6 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
ErrorKind::Unknown
))
.map_ok(|_| tracing::debug!("Metrics daemon Shutdown")),
revision_cache_task
.map_err(|e| Error::new(
eyre!("{}", e).wrap_err("Revision Cache daemon panicked!"),
ErrorKind::Unknown
))
.map_ok(|_| tracing::debug!("Revision Cache daemon Shutdown")),
ws_server
.map_err(|e| Error::new(e, ErrorKind::Network))
.map_ok(|_| tracing::debug!("WebSocket Server Shutdown")),
@@ -287,7 +284,7 @@ fn main() {
let matches = clap::App::new("embassyd")
.arg(
clap::Arg::with_name("config")
.short("c")
.short('c')
.long("config")
.takes_value(true),
)
@@ -343,6 +340,7 @@ fn main() {
e,
)
.await?;
let mut shutdown = ctx.shutdown.subscribe();
rpc_server!({
command: embassy::diagnostic_api,
context: ctx.clone(),
@@ -360,7 +358,7 @@ fn main() {
})
.await
.with_kind(embassy::ErrorKind::Network)?;
Ok::<_, Error>(None)
Ok::<_, Error>(shutdown.recv().await.with_kind(crate::ErrorKind::Unknown)?)
})()
.await
}

View File

@@ -31,12 +31,17 @@ pub struct ConfigActions {
}
impl ConfigActions {
#[instrument]
pub fn validate(&self, volumes: &Volumes, image_ids: &BTreeSet<ImageId>) -> Result<(), Error> {
pub fn validate(
&self,
eos_version: &Version,
volumes: &Volumes,
image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> {
self.get
.validate(volumes, image_ids, true)
.validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Get"))?;
self.set
.validate(volumes, image_ids, true)
.validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Set"))?;
Ok(())
}

View File

@@ -15,7 +15,6 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents};
use crate::db::util::WithRevision;
use crate::dependencies::{
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
BreakTransitiveReceipts, BreakageRes, Dependencies, DependencyConfig, DependencyError,
@@ -237,7 +236,8 @@ pub async fn get(
#[command(
subcommands(self(set_impl(async, context(RpcContext))), set_dry),
display(display_none)
display(display_none),
metadata(sync_db = true)
)]
#[instrument]
pub fn set(
@@ -247,9 +247,8 @@ pub fn set(
format: Option<IoFormat>,
#[arg(long = "timeout")] timeout: Option<crate::util::serde::Duration>,
#[arg(stdin, parse(parse_stdin_deserializable))] config: Option<Config>,
#[arg(rename = "expire-id", long = "expire-id")] expire_id: Option<String>,
) -> Result<(PackageId, Option<Config>, Option<Duration>, Option<String>), Error> {
Ok((id, config, timeout.map(|d| *d), expire_id))
) -> Result<(PackageId, Option<Config>, Option<Duration>), Error> {
Ok((id, config, timeout.map(|d| *d)))
}
/// So, the new locking finds all the possible locks and lifts them up into a bundle of locks.
@@ -407,12 +406,7 @@ impl ConfigReceipts {
#[instrument(skip(ctx))]
pub async fn set_dry(
#[context] ctx: RpcContext,
#[parent_data] (id, config, timeout, _): (
PackageId,
Option<Config>,
Option<Duration>,
Option<String>,
),
#[parent_data] (id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
) -> Result<BreakageRes, Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
@@ -439,8 +433,8 @@ pub async fn set_dry(
#[instrument(skip(ctx))]
pub async fn set_impl(
ctx: RpcContext,
(id, config, timeout, expire_id): (PackageId, Option<Config>, Option<Duration>, Option<String>),
) -> Result<WithRevision<()>, Error> {
(id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let mut breakages = BTreeMap::new();
@@ -457,10 +451,8 @@ pub async fn set_impl(
&locks,
)
.await?;
Ok(WithRevision {
response: (),
revision: tx.commit(expire_id).await?,
})
tx.commit().await?;
Ok(())
}
#[instrument(skip(ctx, db, receipts))]

View File

@@ -18,7 +18,7 @@ use regex::Regex;
use serde::de::{MapAccess, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
use serde_json::{Number, Value};
use sqlx::SqlitePool;
use sqlx::PgPool;
use super::util::{self, CharSet, NumRange, UniqueBy, STATIC_NULL};
use super::{Config, MatchError, NoMatchWithPath, TimeoutError, TypeOf};
@@ -1334,6 +1334,8 @@ impl DefaultableWith for ValueSpecString {
if &now.elapsed() > timeout {
return Err(TimeoutError);
}
} else {
return Ok(Value::String(candidate));
}
}
} else {
@@ -2050,7 +2052,7 @@ impl TorKeyPointer {
async fn deref(
&self,
source_package: &PackageId,
secrets: &SqlitePool,
secrets: &PgPool,
) -> Result<Value, ConfigurationError> {
if &self.package_id != source_package {
return Err(ConfigurationError::PermissionDenied(
@@ -2058,7 +2060,7 @@ impl TorKeyPointer {
));
}
let x = sqlx::query!(
"SELECT key FROM tor WHERE package = ? AND interface = ?",
"SELECT key FROM tor WHERE package = $1 AND interface = $2",
*self.package_id,
*self.interface
)

View File

@@ -16,7 +16,7 @@ impl CharSet {
self.0.iter().any(|r| r.0.contains(c))
}
pub fn gen<R: Rng>(&self, rng: &mut R) -> char {
let mut idx = rng.gen_range(0, self.1);
let mut idx = rng.gen_range(0..self.1);
for r in &self.0 {
if idx < r.1 {
return std::convert::TryFrom::try_from(
@@ -147,6 +147,44 @@ impl serde::ser::Serialize for CharSet {
}
}
pub trait MergeWith {
fn merge_with(&mut self, other: &serde_json::Value);
}
impl MergeWith for serde_json::Value {
fn merge_with(&mut self, other: &serde_json::Value) {
use serde_json::Value::Object;
if let (Object(orig), Object(ref other)) = (self, other) {
for (key, val) in other.into_iter() {
match (orig.get_mut(key), val) {
(Some(new_orig @ Object(_)), other @ Object(_)) => {
new_orig.merge_with(other);
}
(None, _) => {
orig.insert(key.clone(), val.clone());
}
_ => (),
}
}
}
}
}
#[test]
fn merge_with_tests() {
use serde_json::json;
let mut a = json!(
{"a": 1, "c": {"d": "123"}, "i": [1,2,3], "j": {}, "k":[1,2,3], "l": "test"}
);
a.merge_with(
&json!({"a":"a", "b": "b", "c":{"d":"d", "e":"e"}, "f":{"g":"g"}, "h": [1,2,3], "i":"i", "j":[1,2,3], "k":{}}),
);
assert_eq!(
a,
json!({"a": 1, "c": {"d": "123", "e":"e"}, "b":"b", "f": {"g":"g"}, "h":[1,2,3], "i":[1,2,3], "j": {}, "k":[1,2,3], "l": "test"})
)
}
pub mod serde_regex {
use regex::Regex;
use serde::*;

View File

@@ -7,22 +7,23 @@ use std::sync::Arc;
use std::time::Duration;
use bollard::Docker;
use helpers::to_tmp_path;
use patch_db::json_ptr::JsonPointer;
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb, Revision};
use reqwest::Url;
use rpc_toolkit::url::Host;
use rpc_toolkit::Context;
use serde::Deserialize;
use sqlx::sqlite::SqliteConnectOptions;
use sqlx::SqlitePool;
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use tokio::fs::File;
use tokio::process::Command;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tracing::instrument;
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::hostname::{derive_hostname, derive_id, get_product_key};
use crate::init::{init_postgres, pgloader};
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
use crate::manager::ManagerMap;
use crate::middleware::auth::HashSessionToken;
@@ -35,11 +36,13 @@ use crate::shutdown::Shutdown;
use crate::status::{MainStatus, Status};
use crate::util::io::from_yaml_async_reader;
use crate::util::{AsyncFileExt, Invoke};
use crate::{Error, ResultExt};
use crate::{Error, ErrorKind, ResultExt};
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct RpcContextConfig {
pub migration_batch_rows: Option<usize>,
pub migration_prefetch_rows: Option<usize>,
pub bind_rpc: Option<SocketAddr>,
pub bind_ws: Option<SocketAddr>,
pub bind_static: Option<SocketAddr>,
@@ -70,41 +73,42 @@ impl RpcContextConfig {
.as_deref()
.unwrap_or_else(|| Path::new("/embassy-data"))
}
pub async fn db(&self, secret_store: &SqlitePool, product_key: &str) -> Result<PatchDb, Error> {
let sid = derive_id(product_key);
let hostname = derive_hostname(&sid);
pub async fn db(&self, secret_store: &PgPool) -> Result<PatchDb, Error> {
let db_path = self.datadir().join("main").join("embassy.db");
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await? {
if !db.exists(&<JsonPointer>::default()).await {
db.put(
&<JsonPointer>::default(),
&Database::init(
sid,
&hostname,
&os_key(&mut secret_store.acquire().await?).await?,
password_hash(&mut secret_store.acquire().await?).await?,
),
None,
)
.await?;
}
Ok(db)
}
#[instrument]
pub async fn secret_store(&self) -> Result<SqlitePool, Error> {
let secret_store = SqlitePool::connect_with(
SqliteConnectOptions::new()
.filename(self.datadir().join("main").join("secrets.db"))
.create_if_missing(true)
.busy_timeout(Duration::from_secs(30)),
)
.await?;
pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(self.datadir()).await?;
let secret_store =
PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root"))
.await?;
sqlx::migrate!()
.run(&secret_store)
.await
.with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir().join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows.unwrap_or(25000),
self.migration_prefetch_rows.unwrap_or(100_000),
)
.await?;
}
Ok(secret_store)
}
}
@@ -117,7 +121,7 @@ pub struct RpcContextSeed {
pub datadir: PathBuf,
pub disk_guid: Arc<String>,
pub db: PatchDb,
pub secret_store: SqlitePool,
pub secret_store: PgPool,
pub docker: Docker,
pub net_controller: NetController,
pub managers: ManagerMap,
@@ -213,10 +217,11 @@ impl RpcContext {
)));
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let secret_store = base.secret_store().await?;
tracing::info!("Opened Sqlite DB");
let db = base.db(&secret_store, &get_product_key().await?).await?;
tracing::info!("Opened Pg DB");
let db = base.db(&secret_store).await?;
tracing::info!("Opened PatchDB");
let docker = Docker::connect_with_unix_defaults()?;
let mut docker = Docker::connect_with_unix_defaults()?;
docker.set_timeout(Duration::from_secs(600));
tracing::info!("Connected to Docker");
let net_controller = NetController::init(
([127, 0, 0, 1], 80).into(),
@@ -229,6 +234,7 @@ impl RpcContext {
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
secret_store.clone(),
None,
&mut db.handle(),
)
.await?;
tracing::info!("Initialized Net Controller");
@@ -336,6 +342,18 @@ impl RpcContext {
static_files,
manifest,
} => {
for (volume_id, volume_info) in &*manifest.volumes {
let tmp_path = to_tmp_path(volume_info.path_for(
&self.datadir,
&package_id,
&manifest.version,
&volume_id,
))
.with_kind(ErrorKind::Filesystem)?;
if tokio::fs::metadata(&tmp_path).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_path).await?;
}
}
let status = installed.status;
let main = match status.main {
MainStatus::BackingUp { started, .. } => {
@@ -374,6 +392,58 @@ impl RpcContext {
}
Ok(())
}
#[instrument(skip(self))]
pub async fn clean_continuations(&self) {
let mut continuations = self.rpc_stream_continuations.lock().await;
let mut to_remove = Vec::new();
for (guid, cont) in &*continuations {
if cont.is_timed_out() {
to_remove.push(guid.clone());
}
}
for guid in to_remove {
continuations.remove(&guid);
}
}
#[instrument(skip(self, handler))]
pub async fn add_continuation(&self, guid: RequestGuid, handler: RpcContinuation) {
self.clean_continuations().await;
self.rpc_stream_continuations
.lock()
.await
.insert(guid, handler);
}
pub async fn get_continuation_handler(&self, guid: &RequestGuid) -> Option<RestHandler> {
let mut continuations = self.rpc_stream_continuations.lock().await;
if let Some(cont) = continuations.remove(guid) {
cont.into_handler().await
} else {
None
}
}
pub async fn get_ws_continuation_handler(&self, guid: &RequestGuid) -> Option<RestHandler> {
let continuations = self.rpc_stream_continuations.lock().await;
if matches!(continuations.get(guid), Some(RpcContinuation::WebSocket(_))) {
drop(continuations);
self.get_continuation_handler(guid).await
} else {
None
}
}
pub async fn get_rest_continuation_handler(&self, guid: &RequestGuid) -> Option<RestHandler> {
let continuations = self.rpc_stream_continuations.lock().await;
if matches!(continuations.get(guid), Some(RpcContinuation::Rest(_))) {
drop(continuations);
self.get_continuation_handler(guid).await
} else {
None
}
}
}
impl Context for RpcContext {
fn host(&self) -> Host<&str> {

View File

@@ -2,15 +2,15 @@ use std::net::{IpAddr, SocketAddr};
use std::ops::Deref;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use josekit::jwk::Jwk;
use patch_db::json_ptr::JsonPointer;
use patch_db::PatchDb;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::Context;
use serde::{Deserialize, Serialize};
use sqlx::sqlite::SqliteConnectOptions;
use sqlx::SqlitePool;
use sqlx::postgres::PgConnectOptions;
use sqlx::PgPool;
use tokio::fs::File;
use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock;
@@ -18,7 +18,7 @@ use tracing::instrument;
use url::Host;
use crate::db::model::Database;
use crate::hostname::{derive_hostname, derive_id, get_product_key};
use crate::init::{init_postgres, pgloader};
use crate::net::tor::os_key;
use crate::setup::{password_hash, RecoveryStatus};
use crate::util::io::from_yaml_async_reader;
@@ -36,6 +36,8 @@ pub struct SetupResult {
#[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")]
pub struct SetupContextConfig {
pub migration_batch_rows: Option<usize>,
pub migration_prefetch_rows: Option<usize>,
pub bind_rpc: Option<SocketAddr>,
pub datadir: Option<PathBuf>,
}
@@ -64,15 +66,26 @@ impl SetupContextConfig {
pub struct SetupContextSeed {
pub config_path: Option<PathBuf>,
pub migration_batch_rows: usize,
pub migration_prefetch_rows: usize,
pub bind_rpc: SocketAddr,
pub shutdown: Sender<()>,
pub datadir: PathBuf,
/// Used to encrypt for hidding from snoopers for setups create password
/// Set via path
pub current_secret: Arc<Jwk>,
pub selected_v2_drive: RwLock<Option<PathBuf>>,
pub cached_product_key: RwLock<Option<Arc<String>>>,
pub recovery_status: RwLock<Option<Result<RecoveryStatus, RpcError>>>,
pub setup_result: RwLock<Option<(Arc<String>, SetupResult)>>,
}
impl AsRef<Jwk> for SetupContextSeed {
fn as_ref(&self) -> &Jwk {
&self.current_secret
}
}
#[derive(Clone)]
pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext {
@@ -83,9 +96,21 @@ impl SetupContext {
let datadir = cfg.datadir().to_owned();
Ok(Self(Arc::new(SetupContextSeed {
config_path: path.as_ref().map(|p| p.as_ref().to_owned()),
migration_batch_rows: cfg.migration_batch_rows.unwrap_or(25000),
migration_prefetch_rows: cfg.migration_prefetch_rows.unwrap_or(100_000),
bind_rpc: cfg.bind_rpc.unwrap_or(([127, 0, 0, 1], 5959).into()),
shutdown,
datadir,
current_secret: Arc::new(
Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).map_err(|e| {
tracing::debug!("{:?}", e);
tracing::error!("Couldn't generate ec key");
Error::new(
color_eyre::eyre::eyre!("Couldn't generate ec key"),
crate::ErrorKind::Unknown,
)
})?,
),
selected_v2_drive: RwLock::new(None),
cached_product_key: RwLock::new(None),
recovery_status: RwLock::new(None),
@@ -93,61 +118,44 @@ impl SetupContext {
})))
}
#[instrument(skip(self))]
pub async fn db(&self, secret_store: &SqlitePool) -> Result<PatchDb, Error> {
pub async fn db(&self, secret_store: &PgPool) -> Result<PatchDb, Error> {
let db_path = self.datadir.join("main").join("embassy.db");
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await? {
let pkey = self.product_key().await?;
let sid = derive_id(&*pkey);
let hostname = derive_hostname(&sid);
if !db.exists(&<JsonPointer>::default()).await {
db.put(
&<JsonPointer>::default(),
&Database::init(
sid,
&hostname,
&os_key(&mut secret_store.acquire().await?).await?,
password_hash(&mut secret_store.acquire().await?).await?,
),
None,
)
.await?;
}
Ok(db)
}
#[instrument(skip(self))]
pub async fn secret_store(&self) -> Result<SqlitePool, Error> {
let secret_store = SqlitePool::connect_with(
SqliteConnectOptions::new()
.filename(self.datadir.join("main").join("secrets.db"))
.create_if_missing(true)
.busy_timeout(Duration::from_secs(30)),
)
.await?;
pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(&self.datadir).await?;
let secret_store =
PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root"))
.await?;
sqlx::migrate!()
.run(&secret_store)
.await
.with_kind(crate::ErrorKind::Database)?;
let old_db_path = self.datadir.join("main/secrets.db");
if tokio::fs::metadata(&old_db_path).await.is_ok() {
pgloader(
&old_db_path,
self.migration_batch_rows,
self.migration_prefetch_rows,
)
.await?;
}
Ok(secret_store)
}
#[instrument(skip(self))]
pub async fn product_key(&self) -> Result<Arc<String>, Error> {
Ok(
if let Some(k) = {
let guard = self.cached_product_key.read().await;
let res = guard.clone();
drop(guard);
res
} {
k
} else {
let k = Arc::new(get_product_key().await?);
*self.cached_product_key.write().await = Some(k.clone());
k
},
)
}
}
impl Context for SetupContext {

View File

@@ -6,7 +6,6 @@ use rpc_toolkit::command;
use tracing::instrument;
use crate::context::RpcContext;
use crate::db::util::WithRevision;
use crate::dependencies::{
break_all_dependents_transitive, heal_all_dependents_transitive, BreakageRes, DependencyError,
DependencyReceipt, TaggedDependencyError,
@@ -61,12 +60,9 @@ impl StartReceipts {
}
}
#[command(display(display_none))]
#[command(display(display_none), metadata(sync_db = true))]
#[instrument(skip(ctx))]
pub async fn start(
#[context] ctx: RpcContext,
#[arg] id: PackageId,
) -> Result<WithRevision<()>, Error> {
pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
let receipts = StartReceipts::new(&mut tx, &id).await?;
@@ -77,7 +73,7 @@ pub async fn start(
.await?;
heal_all_dependents_transitive(&ctx, &mut tx, &id, &receipts.dependency_receipt).await?;
let revision = tx.commit(None).await?;
tx.commit().await?;
drop(receipts);
ctx.managers
@@ -87,10 +83,7 @@ pub async fn start(
.synchronize()
.await;
Ok(WithRevision {
revision,
response: (),
})
Ok(())
}
#[derive(Clone)]
pub struct StopReceipts {
@@ -150,7 +143,11 @@ async fn stop_common<Db: DbHandle>(
Ok(())
}
#[command(subcommands(self(stop_impl(async)), stop_dry), display(display_none))]
#[command(
subcommands(self(stop_impl(async)), stop_dry),
display(display_none),
metadata(sync_db = true)
)]
pub fn stop(#[arg] id: PackageId) -> Result<PackageId, Error> {
Ok(id)
}
@@ -173,23 +170,19 @@ pub async fn stop_dry(
}
#[instrument(skip(ctx))]
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevision<()>, Error> {
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
stop_common(&mut tx, &id, &mut BTreeMap::new()).await?;
Ok(WithRevision {
revision: tx.commit(None).await?,
response: (),
})
tx.commit().await?;
Ok(())
}
#[command(display(display_none))]
pub async fn restart(
#[context] ctx: RpcContext,
#[arg] id: PackageId,
) -> Result<WithRevision<()>, Error> {
#[command(display(display_none), metadata(sync_db = true))]
pub async fn restart(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
@@ -208,9 +201,7 @@ pub async fn restart(
}
*status = Some(MainStatus::Restarting);
status.save(&mut tx).await?;
tx.commit().await?;
Ok(WithRevision {
revision: tx.commit(None).await?,
response: (),
})
Ok(())
}

View File

@@ -1,20 +1,27 @@
use std::time::Instant;
use std::sync::Arc;
use std::time::Duration;
use futures::future::BoxFuture;
use http::{Request, Response};
use hyper::Body;
use futures::FutureExt;
use helpers::TimedResource;
use hyper::upgrade::Upgraded;
use hyper::{Body, Error as HyperError, Request, Response};
use rand::RngCore;
use tokio::task::JoinError;
use tokio_tungstenite::WebSocketStream;
use crate::{Error, ResultExt};
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, serde::Serialize, serde::Deserialize)]
pub struct RequestGuid<T: AsRef<str> = String>(T);
pub struct RequestGuid<T: AsRef<str> = String>(Arc<T>);
impl RequestGuid {
pub fn new() -> Self {
let mut buf = [0; 40];
rand::thread_rng().fill_bytes(&mut buf);
RequestGuid(base32::encode(
RequestGuid(Arc::new(base32::encode(
base32::Alphabet::RFC4648 { padding: false },
&buf,
))
)))
}
pub fn from(r: &str) -> Option<RequestGuid> {
@@ -26,7 +33,7 @@ impl RequestGuid {
return None;
}
}
Some(RequestGuid(r.to_owned()))
Some(RequestGuid(Arc::new(r.to_owned())))
}
}
#[test]
@@ -39,15 +46,71 @@ fn parse_guid() {
impl<T: AsRef<str>> std::fmt::Display for RequestGuid<T> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.as_ref().fmt(f)
(&*self.0).as_ref().fmt(f)
}
}
pub struct RpcContinuation {
pub created_at: Instant,
pub handler: Box<
dyn FnOnce(Request<Body>) -> BoxFuture<'static, Result<Response<Body>, crate::Error>>
+ Send
+ Sync,
>,
pub type RestHandler = Box<
dyn FnOnce(Request<Body>) -> BoxFuture<'static, Result<Response<Body>, crate::Error>> + Send,
>;
pub type WebSocketHandler = Box<
dyn FnOnce(
BoxFuture<'static, Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
) -> BoxFuture<'static, Result<(), Error>>
+ Send,
>;
pub enum RpcContinuation {
Rest(TimedResource<RestHandler>),
WebSocket(TimedResource<WebSocketHandler>),
}
impl RpcContinuation {
pub fn rest(handler: RestHandler, timeout: Duration) -> Self {
RpcContinuation::Rest(TimedResource::new(handler, timeout))
}
pub fn ws(handler: WebSocketHandler, timeout: Duration) -> Self {
RpcContinuation::WebSocket(TimedResource::new(handler, timeout))
}
pub fn is_timed_out(&self) -> bool {
match self {
RpcContinuation::Rest(a) => a.is_timed_out(),
RpcContinuation::WebSocket(a) => a.is_timed_out(),
}
}
pub async fn into_handler(self) -> Option<RestHandler> {
match self {
RpcContinuation::Rest(handler) => handler.get().await,
RpcContinuation::WebSocket(handler) => {
if let Some(handler) = handler.get().await {
Some(Box::new(
|req: Request<Body>| -> BoxFuture<'static, Result<Response<Body>, Error>> {
async move {
let (parts, body) = req.into_parts();
let req = Request::from_parts(parts, body);
let (res, ws_fut) = hyper_ws_listener::create_ws(req)
.with_kind(crate::ErrorKind::Network)?;
if let Some(ws_fut) = ws_fut {
tokio::task::spawn(async move {
match handler(ws_fut.boxed()).await {
Ok(()) => (),
Err(e) => {
tracing::error!("WebSocket Closed: {}", e);
tracing::debug!("{:?}", e);
}
}
});
}
Ok(res)
}
.boxed()
},
))
} else {
None
}
}
}
}
}

View File

@@ -1,92 +1,61 @@
pub mod model;
pub mod package;
pub mod util;
use std::borrow::Cow;
use std::future::Future;
use std::sync::Arc;
use std::time::Duration;
use color_eyre::eyre::eyre;
use futures::{FutureExt, SinkExt, StreamExt};
use patch_db::json_ptr::JsonPointer;
use patch_db::{Dump, Revision};
use rpc_toolkit::command;
use rpc_toolkit::hyper::upgrade::Upgraded;
use rpc_toolkit::hyper::{Body, Error as HyperError, Request, Response};
use rpc_toolkit::yajrc::{GenericRpcMethod, RpcError, RpcResponse};
use rpc_toolkit::yajrc::RpcError;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::sync::{broadcast, oneshot};
use tokio::sync::oneshot;
use tokio::task::JoinError;
use tokio_tungstenite::tungstenite::protocol::frame::coding::CloseCode;
use tokio_tungstenite::tungstenite::protocol::CloseFrame;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::WebSocketStream;
use tracing::instrument;
pub use self::model::DatabaseModel;
use self::util::WithRevision;
use crate::context::RpcContext;
use crate::middleware::auth::{HasValidSession, HashSessionToken};
use crate::util::serde::{display_serializable, IoFormat};
use crate::{Error, ResultExt};
#[instrument(skip(ctx, ws_fut))]
#[instrument(skip(ctx, session, ws_fut))]
async fn ws_handler<
WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
>(
ctx: RpcContext,
session: Option<(HasValidSession, HashSessionToken)>,
ws_fut: WSFut,
) -> Result<(), Error> {
let (dump, sub) = ctx.db.dump_and_sub().await;
let (dump, sub) = ctx.db.dump_and_sub().await?;
let mut stream = ws_fut
.await
.with_kind(crate::ErrorKind::Network)?
.with_kind(crate::ErrorKind::Unknown)?;
let (has_valid_session, token) = loop {
if let Some(Message::Text(cookie)) = stream
.next()
if let Some((session, token)) = session {
let kill = subscribe_to_session_kill(&ctx, token).await;
send_dump(session, &mut stream, dump).await?;
deal_with_messages(session, kill, sub, stream).await?;
} else {
stream
.close(Some(CloseFrame {
code: CloseCode::Error,
reason: "UNAUTHORIZED".into(),
}))
.await
.transpose()
.with_kind(crate::ErrorKind::Network)?
{
let cookie_str = serde_json::from_str::<Cow<str>>(&cookie)
.with_kind(crate::ErrorKind::Deserialization)?;
.with_kind(crate::ErrorKind::Network)?;
}
let id = basic_cookies::Cookie::parse(&cookie_str)
.with_kind(crate::ErrorKind::Authorization)?
.into_iter()
.find(|c| c.get_name() == "session")
.ok_or_else(|| {
Error::new(eyre!("UNAUTHORIZED"), crate::ErrorKind::Authorization)
})?;
let authenticated_session = HashSessionToken::from_cookie(&id);
match HasValidSession::from_session(&authenticated_session, &ctx).await {
Err(e) => {
stream
.send(Message::Text(
serde_json::to_string(
&RpcResponse::<GenericRpcMethod<String>>::from_result(Err::<
_,
RpcError,
>(
e.into()
)),
)
.with_kind(crate::ErrorKind::Serialization)?,
))
.await
.with_kind(crate::ErrorKind::Network)?;
return Ok(());
}
Ok(has_validation) => break (has_validation, authenticated_session),
}
}
};
let kill = subscribe_to_session_kill(&ctx, token).await;
send_dump(has_valid_session, &mut stream, dump).await?;
deal_with_messages(has_valid_session, kill, sub, stream).await?;
Ok(())
}
@@ -108,46 +77,32 @@ async fn subscribe_to_session_kill(
async fn deal_with_messages(
_has_valid_authentication: HasValidSession,
mut kill: oneshot::Receiver<()>,
mut sub: broadcast::Receiver<Arc<Revision>>,
mut sub: patch_db::Subscriber,
mut stream: WebSocketStream<Upgraded>,
) -> Result<(), Error> {
loop {
futures::select! {
_ = (&mut kill).fuse() => {
tracing::info!("Closing WebSocket: Reason: Session Terminated");
stream
.close(Some(CloseFrame {
code: CloseCode::Error,
reason: "UNAUTHORIZED".into(),
}))
.await
.with_kind(crate::ErrorKind::Network)?;
return Ok(())
}
new_rev = sub.recv().fuse() => {
let rev = new_rev.with_kind(crate::ErrorKind::Database)?;
let rev = new_rev.expect("UNREACHABLE: patch-db is dropped");
stream
.send(Message::Text(
serde_json::to_string(
&RpcResponse::<GenericRpcMethod<String>>::from_result(Ok::<_, RpcError>(
serde_json::to_value(&rev).with_kind(crate::ErrorKind::Serialization)?,
)),
)
.with_kind(crate::ErrorKind::Serialization)?,
))
.send(Message::Text(serde_json::to_string(&rev).with_kind(crate::ErrorKind::Serialization)?))
.await
.with_kind(crate::ErrorKind::Network)?;
}
message = stream.next().fuse() => {
let message = message.transpose().with_kind(crate::ErrorKind::Network)?;
match message {
Some(Message::Ping(a)) => {
stream
.send(Message::Pong(a))
.await
.with_kind(crate::ErrorKind::Network)?;
}
Some(Message::Close(frame)) => {
if let Some(reason) = frame.as_ref() {
tracing::info!("Closing WebSocket: Reason: {} {}", reason.code, reason.reason);
} else {
tracing::info!("Closing WebSocket: Reason: Unknown");
}
return Ok(())
}
None => {
tracing::info!("Closing WebSocket: Stream Finished");
return Ok(())
@@ -155,12 +110,6 @@ async fn deal_with_messages(
_ => (),
}
}
_ = tokio::time::sleep(Duration::from_secs(10)).fuse() => {
stream
.send(Message::Ping(Vec::new()))
.await
.with_kind(crate::ErrorKind::Network)?;
}
}
}
}
@@ -172,13 +121,7 @@ async fn send_dump(
) -> Result<(), Error> {
stream
.send(Message::Text(
serde_json::to_string(&RpcResponse::<GenericRpcMethod<String>>::from_result(Ok::<
_,
RpcError,
>(
serde_json::to_value(&dump).with_kind(crate::ErrorKind::Serialization)?,
)))
.with_kind(crate::ErrorKind::Serialization)?,
serde_json::to_string(&dump).with_kind(crate::ErrorKind::Serialization)?,
))
.await
.with_kind(crate::ErrorKind::Network)?;
@@ -187,11 +130,27 @@ async fn send_dump(
pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<Body>, Error> {
let (parts, body) = req.into_parts();
let session = match async {
let token = HashSessionToken::from_request_parts(&parts)?;
let session = HasValidSession::from_session(&token, &ctx).await?;
Ok::<_, Error>((session, token))
}
.await
{
Ok(a) => Some(a),
Err(e) => {
if e.kind != crate::ErrorKind::Authorization {
tracing::error!("Error Authenticating Websocket: {}", e);
tracing::debug!("{:?}", e);
}
None
}
};
let req = Request::from_parts(parts, body);
let (res, ws_fut) = hyper_ws_listener::create_ws(req).with_kind(crate::ErrorKind::Network)?;
if let Some(ws_fut) = ws_fut {
tokio::task::spawn(async move {
match ws_handler(ctx, ws_fut).await {
match ws_handler(ctx, session, ws_fut).await {
Ok(()) => (),
Err(e) => {
tracing::error!("WebSocket Closed: {}", e);
@@ -223,24 +182,11 @@ pub async fn revisions(
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<RevisionsRes, RpcError> {
let cache = ctx.revision_cache.read().await;
if cache
.front()
.map(|rev| rev.id <= since + 1)
.unwrap_or(false)
{
Ok(RevisionsRes::Revisions(
cache
.iter()
.skip_while(|rev| rev.id < since + 1)
.cloned()
.collect(),
))
} else {
drop(cache);
Ok(RevisionsRes::Dump(ctx.db.dump().await))
}
) -> Result<RevisionsRes, Error> {
Ok(match ctx.db.sync(since).await? {
Ok(revs) => RevisionsRes::Revisions(revs),
Err(dump) => RevisionsRes::Dump(dump),
})
}
#[command(display(display_serializable))]
@@ -249,8 +195,8 @@ pub async fn dump(
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<Dump, RpcError> {
Ok(ctx.db.dump().await)
) -> Result<Dump, Error> {
Ok(ctx.db.dump().await?)
}
#[command(subcommands(ui))]
@@ -267,13 +213,11 @@ pub async fn ui(
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let ptr = "/ui"
.parse::<JsonPointer>()
.with_kind(crate::ErrorKind::Database)?
+ &pointer;
Ok(WithRevision {
response: (),
revision: ctx.db.put(&ptr, &value, None).await?,
})
ctx.db.put(&ptr, &value).await?;
Ok(())
}

View File

@@ -12,6 +12,7 @@ use serde_json::Value;
use torut::onion::TorSecretKeyV3;
use crate::config::spec::{PackagePointerSpec, SystemPointerSpec};
use crate::hostname::{generate_hostname, generate_id};
use crate::install::progress::InstallProgress;
use crate::net::interface::InterfaceId;
use crate::s9pk::manifest::{Manifest, ManifestModel, PackageId};
@@ -32,21 +33,20 @@ pub struct Database {
pub ui: Value,
}
impl Database {
pub fn init(
id: String,
hostname: &str,
tor_key: &TorSecretKeyV3,
password_hash: String,
) -> Self {
pub fn init(tor_key: &TorSecretKeyV3, password_hash: String) -> Self {
let id = generate_id();
let my_hostname = generate_hostname();
let lan_address = my_hostname.lan_address().parse().unwrap();
// TODO
Database {
server_info: ServerInfo {
id,
version: Current::new().semver().into(),
hostname: Some(my_hostname.0),
last_backup: None,
last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(),
lan_address: format!("https://{}.local", hostname).parse().unwrap(),
lan_address,
tor_address: format!("http://{}", tor_key.public().get_onion_address())
.parse()
.unwrap(),
@@ -69,7 +69,8 @@ impl Database {
},
package_data: AllPackageData::default(),
recovered_packages: BTreeMap::new(),
ui: Value::Object(Default::default()),
ui: serde_json::from_str(include_str!("../../../frontend/patchdb-ui-seed.json"))
.unwrap(),
}
}
}
@@ -83,6 +84,7 @@ impl DatabaseModel {
#[serde(rename_all = "kebab-case")]
pub struct ServerInfo {
pub id: String,
pub hostname: Option<String>,
pub version: Version,
pub last_backup: Option<DateTime<Utc>>,
/// Used in the wifi to determine the region to set the system to

View File

@@ -1,10 +0,0 @@
use std::sync::Arc;
use patch_db::Revision;
use serde::{Deserialize, Serialize};
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct WithRevision<T> {
pub response: T,
pub revision: Option<Arc<Revision>>,
}

View File

@@ -21,7 +21,7 @@ pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
}
tracing::info!("Generating new developer key...");
let keypair = Keypair::generate(&mut rand::thread_rng());
let keypair = Keypair::generate(&mut rand_old::thread_rng());
tracing::info!("Writing key to {}", ctx.developer_key_path.display());
let keypair_bytes = ed25519::KeypairBytes {
secret_key: keypair.secret.to_bytes(),

View File

@@ -6,7 +6,7 @@ use rpc_toolkit::yajrc::RpcError;
use crate::context::DiagnosticContext;
use crate::disk::repair;
use crate::logs::{display_logs, fetch_logs, LogResponse, LogSource};
use crate::logs::{fetch_logs, LogResponse, LogSource};
use crate::shutdown::Shutdown;
use crate::util::display_none;
use crate::Error;
@@ -23,19 +23,13 @@ pub fn error(#[context] ctx: DiagnosticContext) -> Result<Arc<RpcError>, Error>
Ok(ctx.error.clone())
}
#[command(display(display_logs))]
#[command(rpc_only)]
pub async fn logs(
#[arg] limit: Option<usize>,
#[arg] cursor: Option<String>,
#[arg] before_flag: Option<bool>,
#[arg] before: bool,
) -> Result<LogResponse, Error> {
Ok(fetch_logs(
LogSource::Service(SYSTEMD_UNIT),
limit,
cursor,
before_flag.unwrap_or(false),
)
.await?)
Ok(fetch_logs(LogSource::Service(SYSTEMD_UNIT), limit, cursor, before).await?)
}
#[command(display(display_none))]

View File

@@ -11,7 +11,7 @@ use crate::disk::mount::filesystem::block_dev::mount;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::util::unmount;
use crate::util::Invoke;
use crate::{Error, ResultExt};
use crate::{Error, ErrorKind, ResultExt};
pub const PASSWORD_PATH: &'static str = "/etc/embassy/password";
pub const DEFAULT_PASSWORD: &'static str = "password";
@@ -183,6 +183,7 @@ pub async fn unmount_all_fs<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<()
#[instrument(skip(datadir))]
pub async fn export<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> {
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
unmount_all_fs(guid, datadir).await?;
Command::new("vgchange")
.arg("-an")

View File

@@ -1,7 +1,7 @@
use clap::ArgMatches;
use rpc_toolkit::command;
use self::util::DiskListResponse;
use crate::disk::util::DiskInfo;
use crate::util::display_none;
use crate::util::serde::{display_serializable, IoFormat};
use crate::Error;
@@ -9,7 +9,6 @@ use crate::Error;
pub mod fsck;
pub mod main;
pub mod mount;
pub mod quirks;
pub mod util;
pub const BOOT_RW_PATH: &str = "/media/boot-rw";
@@ -20,7 +19,7 @@ pub fn disk() -> Result<(), Error> {
Ok(())
}
fn display_disk_info(info: DiskListResponse, matches: &ArgMatches<'_>) {
fn display_disk_info(info: Vec<DiskInfo>, matches: &ArgMatches) {
use prettytable::*;
if matches.is_present("format") {
@@ -35,7 +34,7 @@ fn display_disk_info(info: DiskListResponse, matches: &ArgMatches<'_>) {
"USED",
"EMBASSY OS VERSION"
]);
for disk in info.disks {
for disk in info {
let row = row![
disk.logicalname.display(),
"N/A",
@@ -71,7 +70,7 @@ fn display_disk_info(info: DiskListResponse, matches: &ArgMatches<'_>) {
table.add_row(row);
}
}
table.print_tty(false);
table.print_tty(false).unwrap();
}
#[command(display(display_disk_info))]
@@ -79,7 +78,7 @@ pub async fn list(
#[allow(unused_variables)]
#[arg]
format: Option<IoFormat>,
) -> Result<DiskListResponse, Error> {
) -> Result<Vec<DiskInfo>, Error> {
crate::disk::util::list().await
}

View File

@@ -1,6 +1,7 @@
use std::path::{Path, PathBuf};
use color_eyre::eyre::eyre;
use helpers::AtomicFile;
use tokio::io::AsyncWriteExt;
use tracing::instrument;
@@ -14,9 +15,9 @@ use crate::disk::util::EmbassyOsRecoveryInfo;
use crate::middleware::encrypt::{decrypt_slice, encrypt_slice};
use crate::s9pk::manifest::PackageId;
use crate::util::serde::IoFormat;
use crate::util::{AtomicFile, FileLock};
use crate::util::FileLock;
use crate::volume::BACKUP_DIR;
use crate::{Error, ResultExt};
use crate::{Error, ErrorKind, ResultExt};
pub struct BackupMountGuard<G: GenericMountGuard> {
backup_disk_mount_guard: Option<G>,
@@ -162,16 +163,20 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
pub async fn save(&self) -> Result<(), Error> {
let metadata_path = self.as_ref().join("metadata.cbor");
let backup_disk_path = self.backup_disk_path();
let mut file = AtomicFile::new(&metadata_path).await?;
let mut file = AtomicFile::new(&metadata_path, None::<PathBuf>)
.await
.with_kind(ErrorKind::Filesystem)?;
file.write_all(&IoFormat::Cbor.to_vec(&self.metadata)?)
.await?;
file.save().await?;
file.save().await.with_kind(ErrorKind::Filesystem)?;
let unencrypted_metadata_path =
backup_disk_path.join("EmbassyBackups/unencrypted-metadata.cbor");
let mut file = AtomicFile::new(&unencrypted_metadata_path).await?;
let mut file = AtomicFile::new(&unencrypted_metadata_path, None::<PathBuf>)
.await
.with_kind(ErrorKind::Filesystem)?;
file.write_all(&IoFormat::Cbor.to_vec(&self.unencrypted_metadata)?)
.await?;
file.save().await?;
file.save().await.with_kind(ErrorKind::Filesystem)?;
Ok(())
}

View File

@@ -48,13 +48,15 @@ pub async fn unmount<P: AsRef<Path>>(mountpoint: P) -> Result<(), Error> {
.arg(mountpoint.as_ref())
.invoke(crate::ErrorKind::Filesystem)
.await?;
tokio::fs::remove_dir_all(mountpoint.as_ref())
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("rm {}", mountpoint.as_ref().display()),
)
})?;
match tokio::fs::remove_dir(mountpoint.as_ref()).await {
Err(e) if e.raw_os_error() == Some(39) => Ok(()), // directory not empty
a => a,
}
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("rm {}", mountpoint.as_ref().display()),
)
})?;
Ok(())
}

View File

@@ -1,170 +0,0 @@
use std::collections::BTreeSet;
use std::num::ParseIntError;
use std::path::Path;
use color_eyre::eyre::eyre;
use tokio::io::AsyncWriteExt;
use tracing::instrument;
use super::BOOT_RW_PATH;
use crate::util::AtomicFile;
use crate::Error;
pub const QUIRK_PATH: &'static str = "/sys/module/usb_storage/parameters/quirks";
pub const WHITELIST: [(VendorId, ProductId); 5] = [
(VendorId(0x1d6b), ProductId(0x0002)), // root hub usb2
(VendorId(0x1d6b), ProductId(0x0003)), // root hub usb3
(VendorId(0x2109), ProductId(0x3431)),
(VendorId(0x1058), ProductId(0x262f)), // western digital black HDD
(VendorId(0x04e8), ProductId(0x4001)), // Samsung T7
];
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct VendorId(u16);
impl std::str::FromStr for VendorId {
type Err = ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
u16::from_str_radix(s.trim(), 16).map(VendorId)
}
}
impl std::fmt::Display for VendorId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:04x}", self.0)
}
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct ProductId(u16);
impl std::str::FromStr for ProductId {
type Err = ParseIntError;
fn from_str(s: &str) -> Result<Self, Self::Err> {
u16::from_str_radix(s.trim(), 16).map(ProductId)
}
}
impl std::fmt::Display for ProductId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{:04x}", self.0)
}
}
#[derive(Clone, Debug)]
pub struct Quirks(BTreeSet<(VendorId, ProductId)>);
impl Quirks {
pub fn add(&mut self, vendor: VendorId, product: ProductId) {
self.0.insert((vendor, product));
}
pub fn remove(&mut self, vendor: VendorId, product: ProductId) {
self.0.remove(&(vendor, product));
}
pub fn contains(&self, vendor: VendorId, product: ProductId) -> bool {
self.0.contains(&(vendor, product))
}
}
impl std::fmt::Display for Quirks {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut comma = false;
for (vendor, product) in &self.0 {
if comma {
write!(f, ",")?;
} else {
comma = true;
}
write!(f, "{}:{}:u", vendor, product)?;
}
Ok(())
}
}
impl std::str::FromStr for Quirks {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let s = s.trim();
let mut quirks = BTreeSet::new();
for item in s.split(",") {
if let [vendor, product, "u"] = item.splitn(3, ":").collect::<Vec<_>>().as_slice() {
quirks.insert((vendor.parse()?, product.parse()?));
} else {
return Err(Error::new(
eyre!("Invalid quirk: `{}`", item),
crate::ErrorKind::DiskManagement,
));
}
}
Ok(Quirks(quirks))
}
}
#[instrument]
pub async fn update_quirks(quirks: &mut Quirks) -> Result<Vec<String>, Error> {
let mut usb_devices = tokio::fs::read_dir("/sys/bus/usb/devices/").await?;
let mut to_reconnect = Vec::new();
while let Some(usb_device) = usb_devices.next_entry().await? {
if tokio::fs::metadata(usb_device.path().join("idVendor"))
.await
.is_err()
{
continue;
}
let vendor = tokio::fs::read_to_string(usb_device.path().join("idVendor"))
.await?
.parse()?;
let product = tokio::fs::read_to_string(usb_device.path().join("idProduct"))
.await?
.parse()?;
if WHITELIST.contains(&(vendor, product)) {
quirks.remove(vendor, product);
continue;
}
if quirks.contains(vendor, product) {
continue;
}
quirks.add(vendor, product);
{
// write quirks to sysfs
let mut quirk_file = tokio::fs::File::create(QUIRK_PATH).await?;
quirk_file.write_all(quirks.to_string().as_bytes()).await?;
quirk_file.sync_all().await?;
drop(quirk_file);
}
disconnect_usb(usb_device.path()).await?;
let (vendor_name, product_name) = tokio::try_join!(
tokio::fs::read_to_string(usb_device.path().join("manufacturer")),
tokio::fs::read_to_string(usb_device.path().join("product")),
)?;
to_reconnect.push(format!("{} {}", vendor_name, product_name));
}
Ok(to_reconnect)
}
#[instrument(skip(usb_device_path))]
pub async fn disconnect_usb(usb_device_path: impl AsRef<Path>) -> Result<(), Error> {
let authorized_path = usb_device_path.as_ref().join("bConfigurationValue");
let mut authorized_file = tokio::fs::File::create(&authorized_path).await?;
authorized_file.write_all(b"0").await?;
authorized_file.sync_all().await?;
drop(authorized_file);
Ok(())
}
#[instrument]
pub async fn fetch_quirks() -> Result<Quirks, Error> {
Ok(tokio::fs::read_to_string(QUIRK_PATH).await?.parse()?)
}
#[instrument]
pub async fn save_quirks(quirks: &Quirks) -> Result<(), Error> {
let orig_path = Path::new(BOOT_RW_PATH).join("cmdline.txt.orig");
let target_path = Path::new(BOOT_RW_PATH).join("cmdline.txt");
if tokio::fs::metadata(&orig_path).await.is_err() {
tokio::fs::copy(&target_path, &orig_path).await?;
}
let cmdline = tokio::fs::read_to_string(&orig_path).await?;
let mut target = AtomicFile::new(&target_path).await?;
target
.write_all(format!("usb-storage.quirks={} {}", quirks, cmdline).as_bytes())
.await?;
target.save().await?;
Ok(())
}

View File

@@ -19,19 +19,11 @@ use tracing::instrument;
use super::mount::filesystem::block_dev::BlockDev;
use super::mount::filesystem::ReadOnly;
use super::mount::guard::TmpMountGuard;
use super::quirks::{fetch_quirks, save_quirks, update_quirks};
use crate::util::io::from_yaml_async_reader;
use crate::util::serde::IoFormat;
use crate::util::{Invoke, Version};
use crate::{Error, ResultExt as _};
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct DiskListResponse {
pub disks: Vec<DiskInfo>,
pub reconnect: Vec<String>,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct DiskInfo {
@@ -240,10 +232,7 @@ pub async fn recovery_info(
}
#[instrument]
pub async fn list() -> Result<DiskListResponse, Error> {
let mut quirks = fetch_quirks().await?;
let reconnect = update_quirks(&mut quirks).await?;
save_quirks(&mut quirks).await?;
pub async fn list() -> Result<Vec<DiskInfo>, Error> {
let disk_guids = pvscan().await?;
let disks = tokio_stream::wrappers::ReadDirStream::new(
tokio::fs::read_dir(DISK_PATH)
@@ -374,10 +363,7 @@ pub async fn list() -> Result<DiskListResponse, Error> {
})
}
Ok(DiskListResponse {
disks: res,
reconnect,
})
Ok(res)
}
fn parse_pvscan_output(pvscan_output: &str) -> BTreeMap<PathBuf, Option<String>> {

View File

@@ -67,6 +67,10 @@ pub enum ErrorKind {
LanPortConflict = 58,
Javascript = 59,
Pem = 60,
TLSInit = 61,
HttpRange = 62,
ContentLength = 63,
BytesError = 64
}
impl ErrorKind {
pub fn as_str(&self) -> &'static str {
@@ -132,6 +136,10 @@ impl ErrorKind {
LanPortConflict => "Incompatible LAN Port Configuration",
Javascript => "Javascript Engine Error",
Pem => "PEM Encoding Error",
TLSInit => "TLS Backend Initialize Error",
HttpRange => "No Support for Web Server HTTP Ranges",
ContentLength => "Request has no content length header",
BytesError => "Could not get the bytes for this request"
}
}
}
@@ -236,6 +244,8 @@ impl From<openssl::error::ErrorStack> for Error {
fn from(e: openssl::error::ErrorStack) -> Self {
Error::new(eyre!("OpenSSL ERROR:\n{}", e), ErrorKind::OpenSsl)
}
}
impl From<Error> for RpcError {
fn from(e: Error) -> Self {

View File

@@ -1,34 +1,53 @@
use digest::Digest;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use patch_db::DbHandle;
use rand::{thread_rng, Rng};
use tokio::process::Command;
use tracing::instrument;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
use crate::{Error, ErrorKind};
#[derive(Clone, serde::Deserialize, serde::Serialize, Debug)]
pub struct Hostname(pub String);
pub const PRODUCT_KEY_PATH: &'static str = "/embassy-os/product_key.txt";
#[instrument]
pub async fn get_hostname() -> Result<String, Error> {
Ok(derive_hostname(&get_id().await?))
lazy_static::lazy_static! {
static ref ADJECTIVES: Vec<String> = include_str!("./assets/adjectives.txt").lines().map(|x| x.to_string()).collect();
static ref NOUNS: Vec<String> = include_str!("./assets/nouns.txt").lines().map(|x| x.to_string()).collect();
}
impl AsRef<str> for Hostname {
fn as_ref(&self) -> &str {
&self.0
}
}
pub fn derive_hostname(id: &str) -> String {
format!("embassy-{}", id)
impl Hostname {
pub fn lan_address(&self) -> String {
format!("https://{}.local", self.0)
}
}
pub fn generate_hostname() -> Hostname {
let mut rng = thread_rng();
let adjective = &ADJECTIVES[rng.gen_range(0..ADJECTIVES.len())];
let noun = &NOUNS[rng.gen_range(0..NOUNS.len())];
Hostname(format!("embassy-{adjective}-{noun}"))
}
pub fn generate_id() -> String {
let id = uuid::Uuid::new_v4();
id.to_string()
}
#[instrument]
pub async fn get_current_hostname() -> Result<String, Error> {
pub async fn get_current_hostname() -> Result<Hostname, Error> {
let out = Command::new("hostname")
.invoke(ErrorKind::ParseSysInfo)
.await?;
let out_string = String::from_utf8(out)?;
Ok(out_string.trim().to_owned())
Ok(Hostname(out_string.trim().to_owned()))
}
#[instrument]
pub async fn set_hostname(hostname: &str) -> Result<(), Error> {
pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> {
let hostname: &String = &hostname.0;
let _out = Command::new("hostnamectl")
.arg("set-hostname")
.arg(hostname)
@@ -37,38 +56,36 @@ pub async fn set_hostname(hostname: &str) -> Result<(), Error> {
Ok(())
}
#[instrument]
pub async fn get_product_key() -> Result<String, Error> {
let out = tokio::fs::read_to_string(PRODUCT_KEY_PATH)
#[instrument(skip(handle))]
pub async fn get_id<Db: DbHandle>(handle: &mut Db) -> Result<String, Error> {
let id = crate::db::DatabaseModel::new()
.server_info()
.id()
.get(handle, false)
.await?;
Ok(id.to_string())
}
pub async fn get_hostname<Db: DbHandle>(handle: &mut Db) -> Result<Hostname, Error> {
if let Ok(hostname) = crate::db::DatabaseModel::new()
.server_info()
.hostname()
.get(handle, false)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, PRODUCT_KEY_PATH))?;
Ok(out.trim().to_owned())
{
if let Some(hostname) = hostname.to_owned() {
return Ok(Hostname(hostname));
}
}
let id = get_id(handle).await?;
if id.len() != 8 {
return Ok(generate_hostname());
}
return Ok(Hostname(format!("embassy-{}", id)));
}
#[instrument]
pub async fn set_product_key(key: &str) -> Result<(), Error> {
let mut pkey_file = File::create(PRODUCT_KEY_PATH).await?;
pkey_file.write_all(key.as_bytes()).await?;
Ok(())
}
pub fn derive_id(key: &str) -> String {
let mut hasher = sha2::Sha256::new();
hasher.update(key.as_bytes());
let res = hasher.finalize();
hex::encode(&res[0..4])
}
#[instrument]
pub async fn get_id() -> Result<String, Error> {
let key = get_product_key().await?;
Ok(derive_id(&key))
}
// cat /embassy-os/product_key.txt | shasum -a 256 | head -c 8 | awk '{print "embassy-"$1}' | xargs hostnamectl set-hostname && systemctl restart avahi-daemon
#[instrument]
pub async fn sync_hostname() -> Result<(), Error> {
set_hostname(&format!("embassy-{}", get_id().await?)).await?;
#[instrument(skip(handle))]
pub async fn sync_hostname<Db: DbHandle>(handle: &mut Db) -> Result<(), Error> {
set_hostname(&get_hostname(handle).await?).await?;
Command::new("systemctl")
.arg("restart")
.arg("avahi-daemon")

View File

@@ -1,14 +1,11 @@
use std::fmt::Debug;
use std::str::FromStr;
pub use models::{Id, IdUnchecked, InvalidId, SYSTEM_ID};
use serde::{Deserialize, Deserializer, Serialize};
use crate::util::Version;
pub use models::{Id, InvalidId, IdUnchecked, SYSTEM_ID};
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
pub struct ImageId<S: AsRef<str> = String>(Id<S>);
impl<S: AsRef<str>> std::fmt::Display for ImageId<S> {

View File

@@ -1,15 +1,24 @@
use std::collections::HashMap;
use std::path::Path;
use std::process::Stdio;
use std::time::Duration;
use color_eyre::eyre::eyre;
use helpers::NonDetachingJoinHandle;
use patch_db::{DbHandle, LockReceipt, LockType};
use tokio::process::Command;
use crate::config::util::MergeWith;
use crate::context::rpc::RpcContextConfig;
use crate::db::model::ServerStatus;
use crate::install::PKG_DOCKER_DIR;
use crate::sound::CIRCLE_OF_5THS_SHORT;
use crate::util::Invoke;
use crate::version::VersionT;
use crate::Error;
pub const SYSTEM_REBUILD_PATH: &str = "/embassy-os/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/embassy-os/standby";
pub async fn check_time_is_synchronized() -> Result<bool, Error> {
Ok(String::from_utf8(
@@ -66,10 +75,171 @@ impl InitReceipts {
}
}
pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error> {
let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok();
pub async fn pgloader(
old_db_path: impl AsRef<Path>,
batch_rows: usize,
prefetch_rows: usize,
) -> Result<(), Error> {
tokio::fs::write(
"/etc/embassy/migrate.load",
format!(
include_str!("migrate.load"),
sqlite_path = old_db_path.as_ref().display(),
batch_rows = batch_rows,
prefetch_rows = prefetch_rows
),
)
.await?;
match tokio::fs::remove_dir_all("/tmp/pgloader").await {
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(()),
a => a,
}?;
tracing::info!("Running pgloader");
let out = Command::new("pgloader")
.arg("-v")
.arg("/etc/embassy/migrate.load")
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.output()
.await?;
let stdout = String::from_utf8(out.stdout)?;
for line in stdout.lines() {
tracing::debug!("pgloader: {}", line);
}
let stderr = String::from_utf8(out.stderr)?;
for line in stderr.lines() {
tracing::debug!("pgloader err: {}", line);
}
tracing::debug!("pgloader exited with code {:?}", out.status);
if let Some(err) = stdout.lines().chain(stderr.lines()).find_map(|l| {
if l.split_ascii_whitespace()
.any(|word| word == "ERROR" || word == "FATAL")
{
Some(l)
} else {
None
}
}) {
return Err(Error::new(
eyre!("pgloader error: {}", err),
crate::ErrorKind::Database,
));
}
tokio::fs::rename(
old_db_path.as_ref(),
old_db_path.as_ref().with_extension("bak"),
)
.await?;
Ok(())
}
// must be idempotent
pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
let db_dir = datadir.as_ref().join("main/postgresql");
let is_mountpoint = || async {
Ok::<_, Error>(
tokio::process::Command::new("mountpoint")
.arg("/var/lib/postgresql")
.stdout(std::process::Stdio::null())
.stderr(std::process::Stdio::null())
.status()
.await?
.success(),
)
};
let exists = tokio::fs::metadata(&db_dir).await.is_ok();
if !exists {
Command::new("cp")
.arg("-ra")
.arg("/var/lib/postgresql")
.arg(&db_dir)
.invoke(crate::ErrorKind::Filesystem)
.await?;
}
if !is_mountpoint().await? {
crate::disk::mount::util::bind(&db_dir, "/var/lib/postgresql", false).await?;
}
Command::new("chown")
.arg("-R")
.arg("postgres")
.arg("/var/lib/postgresql")
.invoke(crate::ErrorKind::Database)
.await?;
Command::new("systemctl")
.arg("start")
.arg("postgresql")
.invoke(crate::ErrorKind::Database)
.await?;
if !exists {
Command::new("sudo")
.arg("-u")
.arg("postgres")
.arg("createuser")
.arg("root")
.invoke(crate::ErrorKind::Database)
.await?;
Command::new("sudo")
.arg("-u")
.arg("postgres")
.arg("createdb")
.arg("secrets")
.arg("-O")
.arg("root")
.invoke(crate::ErrorKind::Database)
.await?;
}
Ok(())
}
pub struct InitResult {
pub db: patch_db::PatchDb,
}
pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
let secret_store = cfg.secret_store().await?;
let log_dir = cfg.datadir().join("main").join("logs");
let db = cfg.db(&secret_store).await?;
let mut handle = db.handle();
crate::db::DatabaseModel::new()
.server_info()
.lock(&mut handle, LockType::Write)
.await?;
let defaults: serde_json::Value =
serde_json::from_str(include_str!("../../frontend/patchdb-ui-seed.json")).map_err(|x| {
Error::new(
eyre!("Deserialization error {:?}", x),
crate::ErrorKind::Deserialization,
)
})?;
let mut ui = crate::db::DatabaseModel::new()
.ui()
.get(&mut handle, false)
.await?
.clone();
ui.merge_with(&defaults);
crate::db::DatabaseModel::new()
.ui()
.put(&mut handle, &ui)
.await?;
let receipts = InitReceipts::new(&mut handle).await?;
let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok()
|| &*receipts.server_version.get(&mut handle).await?
< &crate::version::Current::new().semver();
let song = if should_rebuild {
Some(NonDetachingJoinHandle::from(tokio::spawn(async {
loop {
CIRCLE_OF_5THS_SHORT.play().await.unwrap();
tokio::time::sleep(Duration::from_secs(10)).await;
}
})))
} else {
None
};
let log_dir = cfg.datadir().join("main/logs");
if tokio::fs::metadata(&log_dir).await.is_err() {
tokio::fs::create_dir_all(&log_dir).await?;
}
@@ -91,7 +261,7 @@ pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error
tokio::fs::remove_dir_all(&tmp_docker).await?;
}
Command::new("cp")
.arg("-r")
.arg("-ra")
.arg("/var/lib/docker")
.arg(&tmp_docker)
.invoke(crate::ErrorKind::Filesystem)
@@ -116,6 +286,28 @@ pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error
tracing::info!("Mounted Docker Data");
if should_rebuild || !tmp_docker_exists {
tracing::info!("Creating Docker Network");
bollard::Docker::connect_with_unix_defaults()?
.create_network(bollard::network::CreateNetworkOptions {
name: "start9",
driver: "bridge",
ipam: bollard::models::Ipam {
config: Some(vec![bollard::models::IpamConfig {
subnet: Some("172.18.0.1/24".into()),
..Default::default()
}]),
..Default::default()
},
options: {
let mut m = HashMap::new();
m.insert("com.docker.network.bridge.name", "br-start9");
m
},
..Default::default()
})
.await?;
tracing::info!("Created Docker Network");
tracing::info!("Loading System Docker Images");
crate::install::load_images("/var/lib/embassy/system-images").await?;
tracing::info!("Loaded System Docker Images");
@@ -125,12 +317,20 @@ pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error
tracing::info!("Loaded Package Docker Images");
}
tracing::info!("Enabling Docker QEMU Emulation");
Command::new("docker")
.arg("run")
.arg("--privileged")
.arg("--rm")
.arg("start9/x_system/binfmt")
.arg("--install")
.arg("all")
.invoke(crate::ErrorKind::Docker)
.await?;
tracing::info!("Enabled Docker QEMU Emulation");
crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?;
tracing::info!("Synced SSH Keys");
let db = cfg.db(&secret_store, product_key).await?;
let mut handle = db.handle();
let receipts = InitReceipts::new(&mut handle).await?;
crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"),
@@ -160,13 +360,23 @@ pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error
}
if warn_time_not_synced {
tracing::warn!("Timed out waiting for system time to synchronize");
} else {
tracing::info!("Syncronized system clock");
}
crate::version::init(&mut handle, &receipts).await?;
if should_rebuild {
tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await?;
match tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await {
Ok(()) => Ok(()),
Err(e) if e.kind() == std::io::ErrorKind::NotFound => Ok(()),
Err(e) => Err(e),
}?;
}
Ok(())
drop(song);
tracing::info!("System initialized.");
Ok(InitResult { db })
}

View File

@@ -2,7 +2,7 @@ use std::collections::HashMap;
use bollard::image::ListImagesOptions;
use patch_db::{DbHandle, LockReceipt, LockTargetId, LockType, PatchDbHandle, Verifier};
use sqlx::{Executor, Sqlite};
use sqlx::{Executor, Postgres};
use tracing::instrument;
use super::{PKG_ARCHIVE_DIR, PKG_DOCKER_DIR};
@@ -119,10 +119,20 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, version: &Version) -> Res
.await
.apply(|res| errors.handle(res));
errors.extend(
futures::future::join_all(images.into_iter().flatten().map(|image| async {
let image = image; // move into future
ctx.docker.remove_image(&image.id, None, None).await
}))
futures::future::join_all(
images
.into_iter()
.flatten()
.flat_map(|image| image.repo_tags)
.filter(|tag| {
tag.starts_with(&format!("start9/{}/", id))
&& tag.ends_with(&format!(":{}", version))
})
.map(|tag| async {
let tag = tag; // move into future
ctx.docker.remove_image(&tag, None, None).await
}),
)
.await,
);
let pkg_archive_dir = ctx
@@ -337,7 +347,7 @@ pub async fn uninstall<Ex>(
id: &PackageId,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let mut tx = db.begin().await?;
let receipts = UninstallReceipts::new(&mut tx, id).await?;
@@ -375,7 +385,7 @@ where
if tokio::fs::metadata(&volumes).await.is_ok() {
tokio::fs::remove_dir_all(&volumes).await?;
}
tx.commit(None).await?;
tx.commit().await?;
remove_tor_keys(secrets, &entry.manifest.id).await?;
Ok(())
}
@@ -383,10 +393,10 @@ where
#[instrument(skip(secrets))]
pub async fn remove_tor_keys<Ex>(secrets: &mut Ex, id: &PackageId) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let id_str = id.as_str();
sqlx::query!("DELETE FROM tor WHERE package = ?", id_str)
sqlx::query!("DELETE FROM tor WHERE package = $1", id_str)
.execute(secrets)
.await?;
Ok(())

View File

@@ -5,7 +5,7 @@ use std::path::{Path, PathBuf};
use std::process::Stdio;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::{Duration, Instant};
use std::time::Duration;
use color_eyre::eyre::eyre;
use emver::VersionRange;
@@ -16,8 +16,8 @@ use http::{Request, Response, StatusCode};
use hyper::Body;
use patch_db::{DbHandle, LockReceipt, LockType};
use reqwest::Url;
use rpc_toolkit::command;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{command, Context};
use tokio::fs::{File, OpenOptions};
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt};
use tokio::process::Command;
@@ -32,7 +32,6 @@ use crate::db::model::{
CurrentDependencies, CurrentDependencyInfo, CurrentDependents, InstalledPackageDataEntry,
PackageDataEntry, RecoveredPackageInfo, StaticDependencyInfo, StaticFiles,
};
use crate::db::util::WithRevision;
use crate::dependencies::{
add_dependent_to_current_dependents_lists, break_all_dependents_transitive,
reconfigure_dependents_with_live_pointers, BreakTransitiveReceipts, BreakageRes,
@@ -45,7 +44,7 @@ use crate::s9pk::manifest::{Manifest, PackageId};
use crate::s9pk::reader::S9pkReader;
use crate::status::{MainStatus, Status};
use crate::util::io::{copy_and_shutdown, response_to_reader};
use crate::util::serde::{display_serializable, IoFormat, Port};
use crate::util::serde::{display_serializable, Port};
use crate::util::{display_none, AsyncFileExt, Version};
use crate::version::{Current, VersionT};
use crate::volume::{asset_dir, script_dir};
@@ -115,19 +114,20 @@ impl std::fmt::Display for MinMax {
#[command(
custom_cli(cli_install(async, context(CliContext))),
display(display_none)
display(display_none),
metadata(sync_db = true)
)]
#[instrument(skip(ctx))]
pub async fn install(
#[context] ctx: RpcContext,
#[arg] id: String,
#[arg(short = "m", long = "marketplace-url", rename = "marketplace-url")]
#[arg(short = 'm', long = "marketplace-url", rename = "marketplace-url")]
marketplace_url: Option<Url>,
#[arg(short = "v", long = "version-spec", rename = "version-spec")] version_spec: Option<
#[arg(short = 'v', long = "version-spec", rename = "version-spec")] version_spec: Option<
String,
>,
#[arg(long = "version-priority", rename = "version-priority")] version_priority: Option<MinMax>,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let version_str = match &version_spec {
None => "*",
Some(v) => &*v,
@@ -143,7 +143,7 @@ pub async fn install(
version,
version_priority,
Current::new().compat(),
platforms::TARGET_ARCH,
&*crate::ARCH,
))
.await
.with_kind(crate::ErrorKind::Registry)?
@@ -159,7 +159,7 @@ pub async fn install(
man.version,
version_priority,
Current::new().compat(),
platforms::TARGET_ARCH,
&*crate::ARCH,
))
.await
.with_kind(crate::ErrorKind::Registry)?
@@ -191,7 +191,7 @@ pub async fn install(
id,
man.version,
Current::new().compat(),
platforms::TARGET_ARCH,
&*crate::ARCH,
))
.await?
.error_for_status()?,
@@ -210,7 +210,7 @@ pub async fn install(
id,
man.version,
Current::new().compat(),
platforms::TARGET_ARCH,
&*crate::ARCH,
))
.await?
.error_for_status()?,
@@ -229,7 +229,7 @@ pub async fn install(
id,
man.version,
Current::new().compat(),
platforms::TARGET_ARCH,
&*crate::ARCH,
))
.await?
.error_for_status()?,
@@ -287,7 +287,7 @@ pub async fn install(
}
}
pde.save(&mut tx).await?;
let res = tx.commit(None).await?;
tx.commit().await?;
drop(db_handle);
tokio::spawn(async move {
@@ -323,10 +323,7 @@ pub async fn install(
}
});
Ok(WithRevision {
revision: res,
response: (),
})
Ok(())
}
#[command(rpc_only, display(display_none))]
@@ -427,7 +424,7 @@ pub async fn sideload(
}
}
pde.save(&mut tx).await?;
tx.commit(None).await?;
tx.commit().await?;
if let Err(e) = download_install_s9pk(
&new_ctx,
@@ -478,23 +475,11 @@ pub async fn sideload(
}
.boxed()
});
let cont = RpcContinuation {
created_at: Instant::now(), // TODO
handler,
};
// gc the map
let mut guard = ctx.rpc_stream_continuations.lock().await;
let garbage_collected = std::mem::take(&mut *guard)
.into_iter()
.filter(|(_, v)| v.created_at.elapsed() < Duration::from_secs(30))
.collect::<BTreeMap<RequestGuid, RpcContinuation>>();
*guard = garbage_collected;
drop(guard);
// insert the new continuation
ctx.rpc_stream_continuations
.lock()
.await
.insert(guid.clone(), cont);
ctx.add_continuation(
guid.clone(),
RpcContinuation::rest(handler, Duration::from_secs(30)),
)
.await;
Ok(guid)
}
@@ -537,12 +522,7 @@ async fn cli_install(
let body = Body::wrap_stream(tokio_util::io::ReaderStream::new(file));
let res = ctx
.client
.post(format!(
"{}://{}/rest/rpc/{}",
ctx.protocol(),
ctx.host(),
guid
))
.post(format!("{}rest/rpc/{}", ctx.base_url, guid,))
.header(CONTENT_LENGTH, content_length)
.body(body)
.send()
@@ -576,7 +556,7 @@ async fn cli_install(
ctx,
"package.install",
params,
PhantomData::<WithRevision<()>>,
PhantomData::<()>,
)
.await?
.result?;
@@ -587,7 +567,8 @@ async fn cli_install(
#[command(
subcommands(self(uninstall_impl(async)), uninstall_dry),
display(display_none)
display(display_none),
metadata(sync_db = true)
)]
pub async fn uninstall(#[arg] id: PackageId) -> Result<PackageId, Error> {
Ok(id)
@@ -618,7 +599,7 @@ pub async fn uninstall_dry(
}
#[instrument(skip(ctx))]
pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevision<()>, Error> {
pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<(), Error> {
let mut handle = ctx.db.handle();
let mut tx = handle.begin().await?;
@@ -646,7 +627,7 @@ pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevisi
removing: installed,
});
pde.save(&mut tx).await?;
let res = tx.commit(None).await?;
tx.commit().await?;
drop(handle);
tokio::spawn(async move {
@@ -683,17 +664,18 @@ pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevisi
}
});
Ok(WithRevision {
revision: res,
response: (),
})
Ok(())
}
#[command(rename = "delete-recovered", display(display_none))]
#[command(
rename = "delete-recovered",
display(display_none),
metadata(sync_db = true)
)]
pub async fn delete_recovered(
#[context] ctx: RpcContext,
#[arg] id: PackageId,
) -> Result<WithRevision<()>, Error> {
) -> Result<(), Error> {
let mut handle = ctx.db.handle();
let mut tx = handle.begin().await?;
let mut sql_tx = ctx.secret_store.begin().await?;
@@ -716,13 +698,10 @@ pub async fn delete_recovered(
}
cleanup::remove_tor_keys(&mut sql_tx, &id).await?;
let res = tx.commit(None).await?;
tx.commit().await?;
sql_tx.commit().await?;
Ok(WithRevision {
revision: res,
response: (),
})
Ok(())
}
pub struct DownloadInstallReceipts {
@@ -788,7 +767,7 @@ pub async fn download_install_s9pk(
for (p, lan) in cfg {
if p.0 == 80 && lan.ssl || p.0 == 443 && !lan.ssl {
return Err(Error::new(
eyre!("SSL Conflict with EmbassyOS"),
eyre!("SSL Conflict with embassyOS"),
ErrorKind::LanPortConflict,
));
}
@@ -875,7 +854,7 @@ pub async fn download_install_s9pk(
tracing::error!("Failed to clean up {}@{}: {}", pkg_id, version, e);
tracing::debug!("{:?}", e);
} else {
tx.commit(None).await?;
tx.commit().await?;
}
Err(e)
} else {
@@ -959,7 +938,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
dep,
info.version,
Current::new().compat(),
platforms::TARGET_ARCH,
&*crate::ARCH,
))
.await
.with_kind(crate::ErrorKind::Registry)?
@@ -994,7 +973,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
dep,
info.version,
Current::new().compat(),
platforms::TARGET_ARCH,
&*crate::ARCH,
))
.await
.with_kind(crate::ErrorKind::Registry)?;
@@ -1164,7 +1143,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
if let Some(mut hdl) = rdr.scripts().await? {
tokio::io::copy(
&mut hdl,
&mut File::create(dbg!(script_dir.join("embassy.js"))).await?,
&mut File::create(script_dir.join("embassy.js")).await?,
)
.await?;
}
@@ -1522,7 +1501,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
}
sql_tx.commit().await?;
tx.commit(None).await?;
tx.commit().await?;
tracing::info!("Install {}@{}: Complete", pkg_id, version);

View File

@@ -1,6 +1,13 @@
pub const DEFAULT_MARKETPLACE: &str = "https://marketplace.start9.com";
pub const DEFAULT_MARKETPLACE: &str = "https://registry.start9.com";
pub const BUFFER_SIZE: usize = 1024;
pub const HOST_IP: [u8; 4] = [172, 18, 0, 1];
pub const TARGET: &str = current_platform::CURRENT_PLATFORM;
lazy_static::lazy_static! {
pub static ref ARCH: &'static str = {
let (arch, _) = TARGET.split_once("-").unwrap();
arch
};
}
pub mod action;
pub mod auth;

View File

@@ -1,22 +1,116 @@
use std::future::Future;
use std::marker::PhantomData;
use std::ops::{Deref, DerefMut};
use std::process::Stdio;
use std::time::{Duration, UNIX_EPOCH};
use chrono::{DateTime, Utc};
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use futures::TryStreamExt;
use futures::stream::BoxStream;
use futures::{FutureExt, SinkExt, Stream, StreamExt, TryStreamExt};
use hyper::upgrade::Upgraded;
use hyper::Error as HyperError;
use rpc_toolkit::command;
use rpc_toolkit::yajrc::RpcError;
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncBufReadExt, BufReader};
use tokio::process::Command;
use tokio::process::{Child, Command};
use tokio::task::JoinError;
use tokio_stream::wrappers::LinesStream;
use tokio_tungstenite::tungstenite::protocol::frame::coding::CloseCode;
use tokio_tungstenite::tungstenite::protocol::CloseFrame;
use tokio_tungstenite::tungstenite::Message;
use tokio_tungstenite::WebSocketStream;
use tracing::instrument;
use crate::context::{CliContext, RpcContext};
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
use crate::error::ResultExt;
use crate::procedure::docker::DockerProcedure;
use crate::s9pk::manifest::PackageId;
use crate::util::display_none;
use crate::util::serde::Reversible;
use crate::Error;
use crate::{Error, ErrorKind};
#[pin_project::pin_project]
struct LogStream {
_child: Child,
#[pin]
entries: BoxStream<'static, Result<JournalctlEntry, Error>>,
}
impl Deref for LogStream {
type Target = BoxStream<'static, Result<JournalctlEntry, Error>>;
fn deref(&self) -> &Self::Target {
&self.entries
}
}
impl DerefMut for LogStream {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.entries
}
}
impl Stream for LogStream {
type Item = Result<JournalctlEntry, Error>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
let this = self.project();
Stream::poll_next(this.entries, cx)
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.entries.size_hint()
}
}
#[instrument(skip(logs, ws_fut))]
async fn ws_handler<
WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
>(
first_entry: Option<LogEntry>,
mut logs: LogStream,
ws_fut: WSFut,
) -> Result<(), Error> {
let mut stream = ws_fut
.await
.with_kind(crate::ErrorKind::Network)?
.with_kind(crate::ErrorKind::Unknown)?;
if let Some(first_entry) = first_entry {
stream
.send(Message::Text(
serde_json::to_string(&first_entry).with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
while let Some(entry) = tokio::select! {
a = logs.try_next() => Some(a?),
a = stream.try_next() => { a.with_kind(crate::ErrorKind::Network)?; None }
} {
if let Some(entry) = entry {
let (_, log_entry) = entry.log_entry()?;
stream
.send(Message::Text(
serde_json::to_string(&log_entry).with_kind(ErrorKind::Serialization)?,
))
.await
.with_kind(ErrorKind::Network)?;
}
}
stream
.close(Some(CloseFrame {
code: CloseCode::Normal,
reason: "Log Stream Finished".into(),
}))
.await
.with_kind(ErrorKind::Network)?;
Ok(())
}
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
#[serde(rename_all = "kebab-case")]
@@ -25,6 +119,12 @@ pub struct LogResponse {
start_cursor: Option<String>,
end_cursor: Option<String>,
}
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
#[serde(rename_all = "kebab-case")]
pub struct LogFollowResponse {
start_cursor: Option<String>,
guid: RequestGuid,
}
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
pub struct LogEntry {
@@ -111,38 +211,145 @@ pub enum LogSource {
Container(PackageId),
}
pub fn display_logs(all: LogResponse, _: &ArgMatches<'_>) {
for entry in all.entries.iter() {
println!("{}", entry);
}
}
#[command(display(display_logs))]
#[command(
custom_cli(cli_logs(async, context(CliContext))),
subcommands(self(logs_nofollow(async)), logs_follow),
display(display_none)
)]
pub async fn logs(
#[arg] id: PackageId,
#[arg] limit: Option<usize>,
#[arg] cursor: Option<String>,
#[arg] before_flag: Option<bool>,
#[arg(short = 'l', long = "limit")] limit: Option<usize>,
#[arg(short = 'c', long = "cursor")] cursor: Option<String>,
#[arg(short = 'B', long = "before", default)] before: bool,
#[arg(short = 'f', long = "follow", default)] follow: bool,
) -> Result<(PackageId, Option<usize>, Option<String>, bool, bool), Error> {
Ok((id, limit, cursor, before, follow))
}
pub async fn cli_logs(
ctx: CliContext,
(id, limit, cursor, before, follow): (PackageId, Option<usize>, Option<String>, bool, bool),
) -> Result<(), RpcError> {
if follow {
if cursor.is_some() {
return Err(RpcError::from(Error::new(
eyre!("The argument '--cursor <cursor>' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
if before {
return Err(RpcError::from(Error::new(
eyre!("The argument '--before' cannot be used with '--follow'"),
crate::ErrorKind::InvalidRequest,
)));
}
cli_logs_generic_follow(ctx, "package.logs.follow", Some(id), limit).await
} else {
cli_logs_generic_nofollow(ctx, "package.logs", Some(id), limit, cursor, before).await
}
}
pub async fn logs_nofollow(
_ctx: (),
(id, limit, cursor, before, _): (PackageId, Option<usize>, Option<String>, bool, bool),
) -> Result<LogResponse, Error> {
Ok(fetch_logs(
LogSource::Container(id),
limit,
cursor,
before_flag.unwrap_or(false),
)
.await?)
fetch_logs(LogSource::Container(id), limit, cursor, before).await
}
#[command(rpc_only, rename = "follow", display(display_none))]
pub async fn logs_follow(
#[context] ctx: RpcContext,
#[parent_data] (id, limit, _, _, _): (PackageId, Option<usize>, Option<String>, bool, bool),
) -> Result<LogFollowResponse, Error> {
follow_logs(ctx, LogSource::Container(id), limit).await
}
#[instrument]
pub async fn fetch_logs(
id: LogSource,
pub async fn cli_logs_generic_nofollow(
ctx: CliContext,
method: &str,
id: Option<PackageId>,
limit: Option<usize>,
cursor: Option<String>,
before_flag: bool,
) -> Result<LogResponse, Error> {
let mut cmd = Command::new("journalctl");
before: bool,
) -> Result<(), RpcError> {
let res = rpc_toolkit::command_helpers::call_remote(
ctx.clone(),
method,
serde_json::json!({
"id": id,
"limit": limit,
"cursor": cursor,
"before": before,
}),
PhantomData::<LogResponse>,
)
.await?
.result?;
let limit = limit.unwrap_or(50);
for entry in res.entries.iter() {
println!("{}", entry);
}
Ok(())
}
pub async fn cli_logs_generic_follow(
ctx: CliContext,
method: &str,
id: Option<PackageId>,
limit: Option<usize>,
) -> Result<(), RpcError> {
let res = rpc_toolkit::command_helpers::call_remote(
ctx.clone(),
method,
serde_json::json!({
"id": id,
"limit": limit,
}),
PhantomData::<LogFollowResponse>,
)
.await?
.result?;
let mut base_url = ctx.base_url.clone();
let ws_scheme = match base_url.scheme() {
"https" => "wss",
"http" => "ws",
_ => {
return Err(Error::new(
eyre!("Cannot parse scheme from base URL"),
crate::ErrorKind::ParseUrl,
)
.into())
}
};
base_url.set_scheme(ws_scheme).or_else(|_| {
Err(Error::new(
eyre!("Cannot set URL scheme"),
crate::ErrorKind::ParseUrl,
))
})?;
let (mut stream, _) =
// base_url is "http://127.0.0.1/", with a trailing slash, so we don't put a leading slash in this path:
tokio_tungstenite::connect_async(format!("{}ws/rpc/{}", base_url, res.guid)).await?;
while let Some(log) = stream.try_next().await? {
match log {
Message::Text(log) => {
println!("{}", serde_json::from_str::<LogEntry>(&log)?);
}
_ => (),
}
}
Ok(())
}
async fn journalctl(
id: LogSource,
limit: usize,
cursor: Option<&str>,
before: bool,
follow: bool,
) -> Result<LogStream, Error> {
let mut cmd = Command::new("journalctl");
cmd.kill_on_drop(true);
cmd.arg("--output=json");
cmd.arg("--output-fields=MESSAGE");
@@ -163,16 +370,15 @@ pub async fn fetch_logs(
}
};
let cursor_formatted = format!("--after-cursor={}", cursor.clone().unwrap_or("".to_owned()));
let mut get_prev_logs_and_reverse = false;
let cursor_formatted = format!("--after-cursor={}", cursor.clone().unwrap_or(""));
if cursor.is_some() {
cmd.arg(&cursor_formatted);
if before_flag {
get_prev_logs_and_reverse = true;
if before {
cmd.arg("--reverse");
}
}
if get_prev_logs_and_reverse {
cmd.arg("--reverse");
if follow {
cmd.arg("--follow");
}
let mut child = cmd.stdout(Stdio::piped()).spawn()?;
@@ -185,7 +391,7 @@ pub async fn fetch_logs(
let journalctl_entries = LinesStream::new(out.lines());
let mut deserialized_entries = journalctl_entries
let deserialized_entries = journalctl_entries
.map_err(|e| Error::new(e, crate::ErrorKind::Journald))
.and_then(|s| {
futures::future::ready(
@@ -194,16 +400,37 @@ pub async fn fetch_logs(
)
});
Ok(LogStream {
_child: child,
entries: deserialized_entries.boxed(),
})
}
#[instrument]
pub async fn fetch_logs(
id: LogSource,
limit: Option<usize>,
cursor: Option<String>,
before: bool,
) -> Result<LogResponse, Error> {
let limit = limit.unwrap_or(50);
let mut stream = journalctl(id, limit, cursor.as_deref(), before, false).await?;
let mut entries = Vec::with_capacity(limit);
let mut start_cursor = None;
if let Some(first) = deserialized_entries.try_next().await? {
if let Some(first) = tokio::time::timeout(Duration::from_secs(1), stream.try_next())
.await
.ok()
.transpose()?
.flatten()
{
let (cursor, entry) = first.log_entry()?;
start_cursor = Some(cursor);
entries.push(entry);
}
let (mut end_cursor, entries) = deserialized_entries
let (mut end_cursor, entries) = stream
.try_fold(
(start_cursor.clone(), entries),
|(_, mut acc), entry| async move {
@@ -215,7 +442,7 @@ pub async fn fetch_logs(
.await?;
let mut entries = Reversible::new(entries);
// reverse again so output is always in increasing chronological order
if get_prev_logs_and_reverse {
if cursor.is_some() && before {
entries.reverse();
std::mem::swap(&mut start_cursor, &mut end_cursor);
}
@@ -226,21 +453,81 @@ pub async fn fetch_logs(
})
}
#[tokio::test]
pub async fn test_logs() {
let response = fetch_logs(
// change `tor.service` to an actual journald unit on your machine
// LogSource::Service("tor.service"),
// first run `docker run --name=hello-world.embassy --log-driver=journald hello-world`
LogSource::Container("hello-world".parse().unwrap()),
// Some(5),
None,
None,
// Some("s=1b8c418e28534400856c27b211dd94fd;i=5a7;b=97571c13a1284f87bc0639b5cff5acbe;m=740e916;t=5ca073eea3445;x=f45bc233ca328348".to_owned()),
false,
#[instrument(skip(ctx))]
pub async fn follow_logs(
ctx: RpcContext,
id: LogSource,
limit: Option<usize>,
) -> Result<LogFollowResponse, Error> {
let limit = limit.unwrap_or(50);
let mut stream = journalctl(id, limit, None, false, true).await?;
let mut start_cursor = None;
let mut first_entry = None;
if let Some(first) = tokio::time::timeout(Duration::from_secs(1), stream.try_next())
.await
.ok()
.transpose()?
.flatten()
{
let (cursor, entry) = first.log_entry()?;
start_cursor = Some(cursor);
first_entry = Some(entry);
}
let guid = RequestGuid::new();
ctx.add_continuation(
guid.clone(),
RpcContinuation::ws(
Box::new(move |ws_fut| ws_handler(first_entry, stream, ws_fut).boxed()),
Duration::from_secs(30),
),
)
.await
.unwrap();
let serialized = serde_json::to_string_pretty(&response).unwrap();
println!("{}", serialized);
.await;
Ok(LogFollowResponse { start_cursor, guid })
}
// #[tokio::test]
// pub async fn test_logs() {
// let response = fetch_logs(
// // change `tor.service` to an actual journald unit on your machine
// // LogSource::Service("tor.service"),
// // first run `docker run --name=hello-world.embassy --log-driver=journald hello-world`
// LogSource::Container("hello-world".parse().unwrap()),
// // Some(5),
// None,
// None,
// // Some("s=1b8c418e28534400856c27b211dd94fd;i=5a7;b=97571c13a1284f87bc0639b5cff5acbe;m=740e916;t=5ca073eea3445;x=f45bc233ca328348".to_owned()),
// false,
// true,
// )
// .await
// .unwrap();
// let serialized = serde_json::to_string_pretty(&response).unwrap();
// println!("{}", serialized);
// }
// #[tokio::test]
// pub async fn test_logs() {
// let mut cmd = Command::new("journalctl");
// cmd.kill_on_drop(true);
// cmd.arg("-f");
// cmd.arg("CONTAINER_NAME=hello-world.embassy");
// let mut child = cmd.stdout(Stdio::piped()).spawn().unwrap();
// let out = BufReader::new(
// child
// .stdout
// .take()
// .ok_or_else(|| Error::new(eyre!("No stdout available"), crate::ErrorKind::Journald))
// .unwrap(),
// );
// let mut journalctl_entries = LinesStream::new(out.lines());
// while let Some(line) = journalctl_entries.try_next().await.unwrap() {
// dbg!(line);
// }
// }

View File

@@ -1,16 +1,95 @@
use std::collections::BTreeMap;
use std::sync::atomic::{AtomicBool, Ordering};
use patch_db::{DbHandle, LockType};
use patch_db::{DbHandle, LockReceipt, LockType};
use tracing::instrument;
use crate::context::RpcContext;
use crate::db::model::CurrentDependents;
use crate::dependencies::{break_transitive, heal_transitive, DependencyError};
use crate::s9pk::manifest::PackageId;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::health_check::{HealthCheckId, HealthCheckResult};
use crate::status::MainStatus;
use crate::Error;
struct HealthCheckPreInformationReceipt {
status_model: LockReceipt<MainStatus, ()>,
manifest: LockReceipt<Manifest, ()>,
}
impl HealthCheckPreInformationReceipt {
pub async fn new(db: &'_ mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks, id);
setup(&db.lock_all(locks).await?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
id: &PackageId,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let status_model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.status().main())
.make_locker(LockType::Read)
.add_to_keys(locks);
let manifest = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.manifest())
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
status_model: status_model.verify(skeleton_key)?,
manifest: manifest.verify(skeleton_key)?,
})
}
}
}
struct HealthCheckStatusReceipt {
status: LockReceipt<MainStatus, ()>,
current_dependents: LockReceipt<CurrentDependents, ()>,
}
impl HealthCheckStatusReceipt {
pub async fn new(db: &'_ mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks, id);
setup(&db.lock_all(locks).await?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
id: &PackageId,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let status = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.status().main())
.make_locker(LockType::Write)
.add_to_keys(locks);
let current_dependents = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.and_then(|x| x.installed())
.map(|x| x.current_dependents())
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
status: status.verify(skeleton_key)?,
current_dependents: current_dependents.verify(skeleton_key)?,
})
}
}
}
#[instrument(skip(ctx, db))]
pub async fn check<Db: DbHandle>(
ctx: &RpcContext,
@@ -19,35 +98,17 @@ pub async fn check<Db: DbHandle>(
should_commit: &AtomicBool,
) -> Result<(), Error> {
let mut tx = db.begin().await?;
let (manifest, started) = {
let mut checkpoint = tx.begin().await?;
let receipts = HealthCheckPreInformationReceipt::new(&mut checkpoint, id).await?;
let mut checkpoint = tx.begin().await?;
let manifest = receipts.manifest.get(&mut checkpoint).await?;
let installed_model = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.expect(&mut checkpoint)
.await?
.installed()
.expect(&mut checkpoint)
.await?;
let started = receipts.status_model.get(&mut checkpoint).await?.started();
let manifest = installed_model
.clone()
.manifest()
.get(&mut checkpoint, true)
.await?
.into_owned();
let started = installed_model
.clone()
.status()
.main()
.started()
.get(&mut checkpoint, true)
.await?
.into_owned();
checkpoint.save().await?;
checkpoint.save().await?;
(manifest, started)
};
let health_results = if let Some(started) = started {
manifest
@@ -61,48 +122,35 @@ pub async fn check<Db: DbHandle>(
if !should_commit.load(Ordering::SeqCst) {
return Ok(());
}
let current_dependents = {
let mut checkpoint = tx.begin().await?;
let receipts = HealthCheckStatusReceipt::new(&mut checkpoint, id).await?;
let mut checkpoint = tx.begin().await?;
let status = receipts.status.get(&mut checkpoint).await?;
crate::db::DatabaseModel::new()
.package_data()
.lock(&mut checkpoint, LockType::Write)
.await?;
let mut status = crate::db::DatabaseModel::new()
.package_data()
.idx_model(id)
.expect(&mut checkpoint)
.await?
.installed()
.expect(&mut checkpoint)
.await?
.status()
.main()
.get_mut(&mut checkpoint)
.await?;
match &mut *status {
MainStatus::Running { health, .. } => {
*health = health_results.clone();
if let MainStatus::Running { health: _, started } = status {
receipts
.status
.set(
&mut checkpoint,
MainStatus::Running {
health: health_results.clone(),
started,
},
)
.await?;
}
_ => (),
}
let current_dependents = receipts.current_dependents.get(&mut checkpoint).await?;
status.save(&mut checkpoint).await?;
let current_dependents = installed_model
.current_dependents()
.get(&mut checkpoint, true)
.await?;
checkpoint.save().await?;
checkpoint.save().await?;
current_dependents
};
tracing::debug!("Checking health of {}", id);
let receipts = crate::dependencies::BreakTransitiveReceipts::new(&mut tx).await?;
tracing::debug!("Got receipts {}", id);
for (dependent, info) in (*current_dependents).0.iter() {
for (dependent, info) in (current_dependents).0.iter() {
let failures: BTreeMap<HealthCheckId, HealthCheckResult> = health_results
.iter()
.filter(|(_, hc_res)| !matches!(hc_res, HealthCheckResult::Success { .. }))

View File

@@ -12,7 +12,7 @@ use color_eyre::eyre::eyre;
use nix::sys::signal::Signal;
use num_enum::TryFromPrimitive;
use patch_db::DbHandle;
use sqlx::{Executor, Sqlite};
use sqlx::{Executor, Postgres};
use tokio::sync::watch::error::RecvError;
use tokio::sync::watch::{channel, Receiver, Sender};
use tokio::sync::{Notify, RwLock};
@@ -47,7 +47,7 @@ impl ManagerMap {
secrets: &mut Ex,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let mut res = BTreeMap::new();
for package in crate::db::DatabaseModel::new()
@@ -229,7 +229,10 @@ async fn run_main(
break;
}
}
Err(bollard::errors::Error::DockerResponseNotFoundError { .. }) => (),
Err(bollard::errors::Error::DockerResponseServerError {
status_code: 404, // NOT FOUND
..
}) => (),
Err(e) => Err(e)?,
}
match futures::poll!(&mut runtime) {
@@ -375,8 +378,13 @@ impl Manager {
.or_else(|e| {
if matches!(
e,
bollard::errors::Error::DockerResponseConflictError { .. }
| bollard::errors::Error::DockerResponseNotFoundError { .. }
bollard::errors::Error::DockerResponseServerError {
status_code: 409, // CONFLICT
..
} | bollard::errors::Error::DockerResponseServerError {
status_code: 404, // NOT FOUND
..
}
) {
Ok(())
} else {
@@ -413,9 +421,18 @@ impl Manager {
)
.await
{
Err(bollard::errors::Error::DockerResponseNotFoundError { .. })
| Err(bollard::errors::Error::DockerResponseConflictError { .. })
| Err(bollard::errors::Error::DockerResponseNotModifiedError { .. }) => (), // Already stopped
Err(bollard::errors::Error::DockerResponseServerError {
status_code: 404, // NOT FOUND
..
})
| Err(bollard::errors::Error::DockerResponseServerError {
status_code: 409, // CONFLICT
..
})
| Err(bollard::errors::Error::DockerResponseServerError {
status_code: 304, // NOT MODIFIED
..
}) => (), // Already stopped
a => a?,
};
self.shared.status.store(
@@ -566,9 +583,18 @@ async fn stop(shared: &ManagerSharedState) -> Result<(), Error> {
)
.await
{
Err(bollard::errors::Error::DockerResponseNotFoundError { .. })
| Err(bollard::errors::Error::DockerResponseConflictError { .. })
| Err(bollard::errors::Error::DockerResponseNotModifiedError { .. }) => (), // Already stopped
Err(bollard::errors::Error::DockerResponseServerError {
status_code: 404, // NOT FOUND
..
})
| Err(bollard::errors::Error::DockerResponseServerError {
status_code: 409, // CONFLICT
..
})
| Err(bollard::errors::Error::DockerResponseServerError {
status_code: 304, // NOT MODIFIED
..
}) => (), // Already stopped
a => a?,
};
shared.status.store(

View File

@@ -12,7 +12,9 @@ use rpc_toolkit::command_helpers::prelude::RequestParts;
use rpc_toolkit::hyper::header::COOKIE;
use rpc_toolkit::hyper::http::Error as HttpError;
use rpc_toolkit::hyper::{Body, Request, Response};
use rpc_toolkit::rpc_server_helpers::{noop3, to_response, DynMiddleware, DynMiddlewareStage2};
use rpc_toolkit::rpc_server_helpers::{
noop4, to_response, DynMiddleware, DynMiddlewareStage2, DynMiddlewareStage3,
};
use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::Metadata;
use serde::{Deserialize, Serialize};
@@ -39,7 +41,7 @@ impl HasLoggedOutSessions {
for session in logged_out_sessions {
let session = session.as_logout_session_id();
sqlx::query!(
"UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = ?",
"UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1",
session
)
.execute(&mut sqlx_conn)
@@ -66,7 +68,7 @@ impl HasValidSession {
pub async fn from_session(session: &HashSessionToken, ctx: &RpcContext) -> Result<Self, Error> {
let session_hash = session.hashed();
let session = sqlx::query!("UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = ? AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP", session_hash)
let session = sqlx::query!("UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP", session_hash)
.execute(&mut ctx.secret_store.acquire().await?)
.await?;
if session.rows_affected() == 0 {
@@ -198,8 +200,7 @@ pub fn auth<M: Metadata>(ctx: RpcContext) -> DynMiddleware<M> {
|_| StatusCode::OK,
)?));
} else if rpc_req.method.as_str() == "auth.login" {
let mut guard = rate_limiter.lock().await;
guard.0 += 1;
let guard = rate_limiter.lock().await;
if guard.1.elapsed() < Duration::from_secs(20) {
if guard.0 >= 3 {
let (res_parts, _) = Response::new(()).into_parts();
@@ -216,13 +217,25 @@ pub fn auth<M: Metadata>(ctx: RpcContext) -> DynMiddleware<M> {
|_| StatusCode::OK,
)?));
}
}
}
}
let m3: DynMiddlewareStage3 = Box::new(move |_, res| {
async move {
let mut guard = rate_limiter.lock().await;
if guard.1.elapsed() < Duration::from_secs(20) {
if res.is_err() {
guard.0 += 1;
}
} else {
guard.0 = 0;
}
guard.1 = Instant::now();
Ok(Ok(noop4()))
}
}
Ok(Ok(noop3()))
.boxed()
});
Ok(Ok(m3))
}
.boxed()
});

View File

@@ -0,0 +1,84 @@
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::FutureExt;
use http::HeaderValue;
use rpc_toolkit::hyper::http::Error as HttpError;
use rpc_toolkit::hyper::{Body, Request, Response};
use rpc_toolkit::rpc_server_helpers::{
noop4, DynMiddleware, DynMiddlewareStage2, DynMiddlewareStage3,
};
use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::Metadata;
use crate::context::RpcContext;
use crate::{Error, ResultExt};
pub fn db<M: Metadata>(ctx: RpcContext) -> DynMiddleware<M> {
Box::new(
move |_: &mut Request<Body>,
metadata: M|
-> BoxFuture<Result<Result<DynMiddlewareStage2, Response<Body>>, HttpError>> {
let ctx = ctx.clone();
async move {
let m2: DynMiddlewareStage2 = Box::new(move |req, rpc_req| {
async move {
let seq = req.headers.remove("x-patch-sequence");
let sync_db = metadata
.get(rpc_req.method.as_str(), "sync_db")
.unwrap_or(false);
let m3: DynMiddlewareStage3 = Box::new(move |res, _| {
async move {
if sync_db && seq.is_some() {
match async {
let seq = seq
.ok_or_else(|| {
Error::new(
eyre!("Missing X-Patch-Sequence"),
crate::ErrorKind::InvalidRequest,
)
})?
.to_str()
.with_kind(crate::ErrorKind::InvalidRequest)?
.parse()?;
let res = ctx.db.sync(seq).await?;
let json = match res {
Ok(revs) => serde_json::to_vec(&revs),
Err(dump) => serde_json::to_vec(&[dump]),
}
.with_kind(crate::ErrorKind::Serialization)?;
Ok::<_, Error>(base64::encode_config(
&json,
base64::URL_SAFE,
))
}
.await
{
Ok(a) => res
.headers
.append("X-Patch-Updates", HeaderValue::from_str(&a)?),
Err(e) => res.headers.append(
"X-Patch-Error",
HeaderValue::from_str(
&base64::encode_config(
&e.to_string(),
base64::URL_SAFE,
),
)?,
),
};
}
Ok(Ok(noop4()))
}
.boxed()
});
Ok(Ok(m3))
}
.boxed()
});
Ok(Ok(m2))
}
.boxed()
},
)
}

View File

@@ -1,24 +1,14 @@
use std::future::Future;
use std::sync::Arc;
use aes::cipher::{CipherKey, NewCipher, Nonce, StreamCipher};
use aes::Aes256Ctr;
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::{FutureExt, Stream};
use futures::Stream;
use hmac::Hmac;
use http::{HeaderMap, HeaderValue};
use rpc_toolkit::hyper::http::Error as HttpError;
use rpc_toolkit::hyper::{self, Body, Request, Response, StatusCode};
use rpc_toolkit::rpc_server_helpers::{
to_response, DynMiddleware, DynMiddlewareStage2, DynMiddlewareStage3, DynMiddlewareStage4,
};
use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::Metadata;
use josekit::jwk::Jwk;
use rpc_toolkit::hyper::{self, Body};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use crate::util::Apply;
use crate::Error;
use tracing::instrument;
pub fn pbkdf2(password: impl AsRef<[u8]>, salt: impl AsRef<[u8]>) -> CipherKey<Aes256Ctr> {
let mut aeskey = CipherKey::<Aes256Ctr>::default();
@@ -35,7 +25,7 @@ pub fn encrypt_slice(input: impl AsRef<[u8]>, password: impl AsRef<[u8]>) -> Vec
let prefix: [u8; 32] = rand::random();
let aeskey = pbkdf2(password.as_ref(), &prefix[16..]);
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
let mut aes = Aes256Ctr::new(&aeskey, ctr);
let mut res = Vec::with_capacity(32 + input.as_ref().len());
res.extend_from_slice(&prefix[..]);
res.extend_from_slice(input.as_ref());
@@ -50,225 +40,79 @@ pub fn decrypt_slice(input: impl AsRef<[u8]>, password: impl AsRef<[u8]>) -> Vec
let (prefix, rest) = input.as_ref().split_at(32);
let aeskey = pbkdf2(password.as_ref(), &prefix[16..]);
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
let mut aes = Aes256Ctr::new(&aeskey, ctr);
let mut res = rest.to_vec();
aes.apply_keystream(&mut res);
res
}
#[pin_project::pin_project]
pub struct DecryptStream {
key: Arc<String>,
#[pin]
body: Body,
ctr: Vec<u8>,
salt: Vec<u8>,
aes: Option<Aes256Ctr>,
}
impl DecryptStream {
pub fn new(key: Arc<String>, body: Body) -> Self {
DecryptStream {
key,
body,
ctr: Vec::new(),
salt: Vec::new(),
aes: None,
}
}
}
impl Stream for DecryptStream {
type Item = hyper::Result<hyper::body::Bytes>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
let this = self.project();
match this.body.poll_next(cx) {
std::task::Poll::Pending => std::task::Poll::Pending,
std::task::Poll::Ready(Some(Ok(bytes))) => std::task::Poll::Ready(Some(Ok({
let mut buf = &*bytes;
if let Some(aes) = this.aes.as_mut() {
let mut res = buf.to_vec();
aes.apply_keystream(&mut res);
res.into()
} else {
if this.ctr.len() < 16 && buf.len() > 0 {
let to_read = std::cmp::min(16 - this.ctr.len(), buf.len());
this.ctr.extend_from_slice(&buf[0..to_read]);
buf = &buf[to_read..];
}
if this.salt.len() < 16 && buf.len() > 0 {
let to_read = std::cmp::min(16 - this.salt.len(), buf.len());
this.salt.extend_from_slice(&buf[0..to_read]);
buf = &buf[to_read..];
}
if this.ctr.len() == 16 && this.salt.len() == 16 {
let aeskey = pbkdf2(this.key.as_bytes(), &this.salt);
let ctr = Nonce::<Aes256Ctr>::from_slice(&this.ctr);
let mut aes = Aes256Ctr::new(&aeskey, &ctr);
let mut res = buf.to_vec();
aes.apply_keystream(&mut res);
*this.aes = Some(aes);
res.into()
} else {
hyper::body::Bytes::new()
}
}
}))),
std::task::Poll::Ready(a) => std::task::Poll::Ready(a),
}
}
#[derive(Debug, Clone, Deserialize, Serialize)]
pub struct EncryptedWire {
encrypted: serde_json::Value,
}
impl EncryptedWire {
#[instrument(skip(current_secret))]
pub fn decrypt(self, current_secret: impl AsRef<Jwk>) -> Option<String> {
let current_secret = current_secret.as_ref();
#[pin_project::pin_project]
pub struct EncryptStream {
#[pin]
body: Body,
aes: Aes256Ctr,
prefix: Option<[u8; 32]>,
}
impl EncryptStream {
pub fn new(key: &str, body: Body) -> Self {
let prefix: [u8; 32] = rand::random();
let aeskey = pbkdf2(key.as_bytes(), &prefix[16..]);
let ctr = Nonce::<Aes256Ctr>::from_slice(&prefix[..16]);
let aes = Aes256Ctr::new(&aeskey, &ctr);
EncryptStream {
body,
aes,
prefix: Some(prefix),
}
}
}
impl Stream for EncryptStream {
type Item = hyper::Result<hyper::body::Bytes>;
fn poll_next(
self: std::pin::Pin<&mut Self>,
cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Self::Item>> {
let this = self.project();
if let Some(prefix) = this.prefix.take() {
std::task::Poll::Ready(Some(Ok(prefix.to_vec().into())))
} else {
match this.body.poll_next(cx) {
std::task::Poll::Pending => std::task::Poll::Pending,
std::task::Poll::Ready(Some(Ok(bytes))) => std::task::Poll::Ready(Some(Ok({
let mut res = bytes.to_vec();
this.aes.apply_keystream(&mut res);
res.into()
}))),
std::task::Poll::Ready(a) => std::task::Poll::Ready(a),
let decrypter = match josekit::jwe::alg::ecdh_es::EcdhEsJweAlgorithm::EcdhEs
.decrypter_from_jwk(current_secret)
{
Ok(a) => a,
Err(e) => {
tracing::warn!("Could not setup awk");
tracing::debug!("{:?}", e);
return None;
}
};
let encrypted = match serde_json::to_string(&self.encrypted) {
Ok(a) => a,
Err(e) => {
tracing::warn!("Could not deserialize");
tracing::debug!("{:?}", e);
return None;
}
};
let (decoded, _) = match josekit::jwe::deserialize_json(&encrypted, &decrypter) {
Ok(a) => a,
Err(e) => {
tracing::warn!("Could not decrypt");
tracing::debug!("{:?}", e);
return None;
}
};
match String::from_utf8(decoded) {
Ok(a) => Some(a),
Err(e) => {
tracing::warn!("Could not decrypt into utf8");
tracing::debug!("{:?}", e);
return None;
}
}
}
}
fn encrypted(headers: &HeaderMap) -> bool {
headers
.get("Content-Encoding")
.and_then(|h| {
h.to_str()
.ok()?
.split(",")
.any(|s| s == "aesctr256")
.apply(Some)
})
.unwrap_or_default()
}
pub fn encrypt<
F: Fn() -> Fut + Send + Sync + Clone + 'static,
Fut: Future<Output = Result<Arc<String>, Error>> + Send + Sync + 'static,
M: Metadata,
>(
keysource: F,
) -> DynMiddleware<M> {
Box::new(
move |req: &mut Request<Body>,
metadata: M|
-> BoxFuture<Result<Result<DynMiddlewareStage2, Response<Body>>, HttpError>> {
let keysource = keysource.clone();
async move {
let encrypted = encrypted(req.headers());
let key = if encrypted {
let key = match keysource().await {
Ok(s) => s,
Err(e) => {
let (res_parts, _) = Response::new(()).into_parts();
return Ok(Err(to_response(
req.headers(),
res_parts,
Err(e.into()),
|_| StatusCode::OK,
)?));
}
};
let body = std::mem::take(req.body_mut());
*req.body_mut() = Body::wrap_stream(DecryptStream::new(key.clone(), body));
Some(key)
} else {
None
};
let res: DynMiddlewareStage2 = Box::new(move |req, rpc_req| {
async move {
if !encrypted
&& metadata
.get(&rpc_req.method.as_str(), "authenticated")
.unwrap_or(true)
{
let (res_parts, _) = Response::new(()).into_parts();
Ok(Err(to_response(
&req.headers,
res_parts,
Err(Error::new(
eyre!("Must be encrypted"),
crate::ErrorKind::Authorization,
)
.into()),
|_| StatusCode::OK,
)?))
} else {
let res: DynMiddlewareStage3 = Box::new(move |_, _| {
async move {
let res: DynMiddlewareStage4 = Box::new(move |res| {
async move {
if let Some(key) = key {
res.headers_mut().insert(
"Content-Encoding",
HeaderValue::from_static("aesctr256"),
);
if let Some(len_header) =
res.headers_mut().get_mut("Content-Length")
{
if let Some(len) = len_header
.to_str()
.ok()
.and_then(|l| l.parse::<u64>().ok())
{
*len_header = HeaderValue::from(len + 32);
}
}
let body = std::mem::take(res.body_mut());
*res.body_mut() = Body::wrap_stream(
EncryptStream::new(key.as_ref(), body),
);
}
Ok(())
}
.boxed()
});
Ok(Ok(res))
}
.boxed()
});
Ok(Ok(res))
}
}
.boxed()
});
Ok(Ok(res))
}
.boxed()
},
/// We created this test by first making the private key, then restoring from this private key for recreatability.
/// After this the frontend then encoded an password, then we are testing that the output that we got (hand coded)
/// will be the shape we want.
#[test]
fn test_gen_awk() {
let private_key: Jwk = serde_json::from_str(
r#"{
"kty": "EC",
"crv": "P-256",
"d": "3P-MxbUJtEhdGGpBCRFXkUneGgdyz_DGZWfIAGSCHOU",
"x": "yHTDYSfjU809fkSv9MmN4wuojf5c3cnD7ZDN13n-jz4",
"y": "8Mpkn744A5KDag0DmX2YivB63srjbugYZzWc3JOpQXI"
}"#,
)
.unwrap();
let encrypted: EncryptedWire = serde_json::from_str(r#"{
"encrypted": { "protected": "eyJlbmMiOiJBMTI4Q0JDLUhTMjU2IiwiYWxnIjoiRUNESC1FUyIsImtpZCI6ImgtZnNXUVh2Tm95dmJEazM5dUNsQ0NUdWc5N3MyZnJockJnWUVBUWVtclUiLCJlcGsiOnsia3R5IjoiRUMiLCJjcnYiOiJQLTI1NiIsIngiOiJmRkF0LXNWYWU2aGNkdWZJeUlmVVdUd3ZvWExaTkdKRHZIWVhIckxwOXNNIiwieSI6IjFvVFN6b00teHlFZC1SLUlBaUFHdXgzS1dJZmNYZHRMQ0JHLUh6MVkzY2sifX0", "iv": "NbwvfvWOdLpZfYRIZUrkcw", "ciphertext": "Zc5Br5kYOlhPkIjQKOLMJw", "tag": "EPoch52lDuCsbUUulzZGfg" }
}"#).unwrap();
assert_eq!(
"testing12345",
&encrypted.decrypt(Arc::new(private_key)).unwrap()
);
}

View File

@@ -1,4 +1,5 @@
pub mod auth;
pub mod cors;
pub mod db;
pub mod diagnostic;
pub mod encrypt;

7
backend/src/migrate.load Normal file
View File

@@ -0,0 +1,7 @@
load database
from sqlite://{sqlite_path}
into postgresql://root@unix:/var/run/postgresql:5432/secrets
with include no drop, truncate, reset sequences, data only, workers = 1, concurrency = 1, max parallel create index = 1, batch rows = {batch_rows}, prefetch rows = {prefetch_rows}
excluding table names like '_sqlx_migrations', 'notifications';

View File

@@ -10,8 +10,7 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::PackageProcedure;
use crate::procedure::ProcedureName;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::volume::Volumes;
@@ -25,22 +24,31 @@ pub struct Migrations {
}
impl Migrations {
#[instrument]
pub fn validate(&self, volumes: &Volumes, image_ids: &BTreeSet<ImageId>) -> Result<(), Error> {
pub fn validate(
&self,
eos_version: &Version,
volumes: &Volumes,
image_ids: &BTreeSet<ImageId>,
) -> Result<(), Error> {
for (version, migration) in &self.from {
migration.validate(volumes, image_ids, true).with_ctx(|_| {
(
crate::ErrorKind::ValidateS9pk,
format!("Migration from {}", version),
)
})?;
migration
.validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| {
(
crate::ErrorKind::ValidateS9pk,
format!("Migration from {}", version),
)
})?;
}
for (version, migration) in &self.to {
migration.validate(volumes, image_ids, true).with_ctx(|_| {
(
crate::ErrorKind::ValidateS9pk,
format!("Migration to {}", version),
)
})?;
migration
.validate(eos_version, volumes, image_ids, true)
.with_ctx(|_| {
(
crate::ErrorKind::ValidateS9pk,
format!("Migration to {}", version),
)
})?;
}
Ok(())
}

View File

@@ -8,6 +8,7 @@ use futures::TryFutureExt;
use helpers::NonDetachingJoinHandle;
use models::PackageId;
use tokio::net::{TcpListener, UdpSocket};
use tokio::process::Command;
use tokio::sync::RwLock;
use trust_dns_server::authority::MessageResponseBuilder;
use trust_dns_server::client::op::{Header, ResponseCode};
@@ -15,9 +16,8 @@ use trust_dns_server::client::rr::{Name, Record, RecordType};
use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use trust_dns_server::ServerFuture;
#[cfg(feature = "avahi")]
use crate::net::mdns::resolve_mdns;
use crate::{Error, ErrorKind, ResultExt};
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, HOST_IP};
pub struct DnsController {
services: Arc<RwLock<BTreeMap<PackageId, BTreeSet<Ipv4Addr>>>>,
@@ -31,25 +31,6 @@ struct Resolver {
impl Resolver {
async fn resolve(&self, name: &Name) -> Option<Vec<Ipv4Addr>> {
match name.iter().next_back() {
#[cfg(feature = "avahi")]
Some(b"local") => match resolve_mdns(&format!(
"{}.local",
name.iter()
.rev()
.skip(1)
.next()
.and_then(|v| std::str::from_utf8(v).ok())
.unwrap_or_default()
))
.await
{
Ok(ip) => Some(vec![ip]),
Err(e) => {
tracing::error!("{}", e);
tracing::debug!("{:?}", e);
None
}
},
Some(b"embassy") => {
if let Some(pkg) = name.iter().rev().skip(1).next() {
if let Some(ip) = self
@@ -63,7 +44,7 @@ impl Resolver {
None
}
} else {
None
Some(vec![HOST_IP.into()])
}
}
_ => None,
@@ -81,7 +62,11 @@ impl RequestHandler for Resolver {
let query = request.request_info().query;
if let Some(ip) = self.resolve(query.name().borrow()).await {
if query.query_type() != RecordType::A {
tracing::warn!("Non A-Record requested for {}", query.name());
tracing::warn!(
"Non A-Record requested for {}: {:?}",
query.name(),
query.query_type()
);
}
response_handle
.send_response(
@@ -142,6 +127,13 @@ impl DnsController {
);
server.register_socket(UdpSocket::bind(bind).await.with_kind(ErrorKind::Network)?);
Command::new("systemd-resolve")
.arg("--set-dns=127.0.0.1")
.arg("--interface=br-start9")
.arg("--set-domain=embassy")
.invoke(ErrorKind::Network)
.await?;
let dns_server = tokio::spawn(
server
.block_until_done()

View File

@@ -4,8 +4,9 @@ use color_eyre::eyre::eyre;
use futures::TryStreamExt;
use indexmap::IndexSet;
use itertools::Either;
pub use models::InterfaceId;
use serde::{Deserialize, Deserializer, Serialize};
use sqlx::{Executor, Sqlite};
use sqlx::{Executor, Postgres};
use torut::onion::TorSecretKeyV3;
use tracing::instrument;
@@ -15,8 +16,6 @@ use crate::s9pk::manifest::PackageId;
use crate::util::serde::Port;
use crate::{Error, ResultExt};
pub use models::InterfaceId;
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Interfaces(pub BTreeMap<InterfaceId, Interface>); // TODO
@@ -40,7 +39,7 @@ impl Interfaces {
package_id: &PackageId,
) -> Result<InterfaceAddressMap, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let mut interface_addresses = InterfaceAddressMap(BTreeMap::new());
for (id, iface) in &self.0 {
@@ -52,7 +51,7 @@ impl Interfaces {
let key = TorSecretKeyV3::generate();
let key_vec = key.as_bytes().to_vec();
sqlx::query!(
"INSERT OR IGNORE INTO tor (package, interface, key) VALUES (?, ?, ?)",
"INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
**package_id,
**id,
key_vec,
@@ -60,7 +59,7 @@ impl Interfaces {
.execute(&mut *secrets)
.await?;
let key_row = sqlx::query!(
"SELECT key FROM tor WHERE package = ? AND interface = ?",
"SELECT key FROM tor WHERE package = $1 AND interface = $2",
**package_id,
**id,
)
@@ -90,10 +89,10 @@ impl Interfaces {
package_id: &PackageId,
) -> Result<BTreeMap<InterfaceId, TorSecretKeyV3>, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
Ok(sqlx::query!(
"SELECT interface, key FROM tor WHERE package = ?",
"SELECT interface, key FROM tor WHERE package = $1",
**package_id
)
.fetch_many(secrets)
@@ -114,7 +113,6 @@ impl Interfaces {
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct Interface {

View File

@@ -1,25 +1,20 @@
use std::collections::BTreeMap;
use std::net::Ipv4Addr;
use avahi_sys::{
self, avahi_client_errno, avahi_entry_group_add_service, avahi_entry_group_commit,
avahi_entry_group_free, avahi_entry_group_reset, avahi_free, avahi_strerror, AvahiClient,
AvahiEntryGroup,
};
use color_eyre::eyre::eyre;
use libc::c_void;
use tokio::process::Command;
use tokio::process::{Child, Command};
use tokio::sync::Mutex;
use torut::onion::TorSecretKeyV3;
use super::interface::InterfaceId;
use crate::s9pk::manifest::PackageId;
use crate::util::Invoke;
use crate::Error;
use crate::{Error, ResultExt};
pub async fn resolve_mdns(hostname: &str) -> Result<Ipv4Addr, Error> {
Ok(String::from_utf8(
Command::new("avahi-resolve-host-name")
.kill_on_drop(true)
.arg("-4")
.arg(hostname)
.invoke(crate::ErrorKind::Network)
@@ -39,258 +34,79 @@ pub async fn resolve_mdns(hostname: &str) -> Result<Ipv4Addr, Error> {
pub struct MdnsController(Mutex<MdnsControllerInner>);
impl MdnsController {
pub fn init() -> Self {
MdnsController(Mutex::new(MdnsControllerInner::init()))
pub async fn init() -> Result<Self, Error> {
Ok(MdnsController(Mutex::new(
MdnsControllerInner::init().await?,
)))
}
pub async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>(
&self,
pkg_id: &PackageId,
interfaces: I,
) {
self.0.lock().await.add(pkg_id, interfaces)
) -> Result<(), Error> {
self.0.lock().await.add(pkg_id, interfaces).await
}
pub async fn remove<I: IntoIterator<Item = InterfaceId>>(
&self,
pkg_id: &PackageId,
interfaces: I,
) {
self.0.lock().await.remove(pkg_id, interfaces)
) -> Result<(), Error> {
self.0.lock().await.remove(pkg_id, interfaces).await
}
}
pub struct MdnsControllerInner {
hostname: Vec<u8>,
hostname_raw: *const libc::c_char,
entry_group: *mut AvahiEntryGroup,
alias_cmd: Option<Child>,
services: BTreeMap<(PackageId, InterfaceId), TorSecretKeyV3>,
_client_error: std::pin::Pin<Box<i32>>,
}
unsafe impl Send for MdnsControllerInner {}
unsafe impl Sync for MdnsControllerInner {}
impl MdnsControllerInner {
fn load_services(&mut self) {
unsafe {
tracing::debug!("Loading services for mDNS");
let mut res;
let http_tcp_cstr = std::ffi::CString::new("_http._tcp")
.expect("Could not cast _http._tcp to c string");
res = avahi_entry_group_add_service(
self.entry_group,
avahi_sys::AVAHI_IF_UNSPEC,
avahi_sys::AVAHI_PROTO_UNSPEC,
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST,
self.hostname_raw,
http_tcp_cstr.as_ptr(),
std::ptr::null(),
std::ptr::null(),
443,
// below is a secret final argument that the type signature of this function does not tell you that it
// needs. This is because the C lib function takes a variable number of final arguments indicating the
// desired TXT records to add to this service entry. The way it decides when to stop taking arguments
// from the stack and dereferencing them is when it finds a null pointer...because fuck you, that's why.
// The consequence of this is that forgetting this last argument will cause segfaults or other undefined
// behavior. Welcome back to the stone age motherfucker.
std::ptr::null::<libc::c_char>(),
);
if res < avahi_sys::AVAHI_OK {
log_str_error("add service to Avahi entry group", res);
panic!("Failed to load Avahi services");
}
tracing::info!(
"Published {:?}",
std::ffi::CStr::from_ptr(self.hostname_raw)
);
for key in self.services.values() {
let lan_address = key
.public()
.get_onion_address()
.get_address_without_dot_onion()
+ ".local";
tracing::debug!("Adding mdns CNAME entry for {}", &lan_address);
let lan_address_ptr = std::ffi::CString::new(lan_address)
.expect("Could not cast lan address to c string");
res = avahi_sys::avahi_entry_group_add_record(
self.entry_group,
avahi_sys::AVAHI_IF_UNSPEC,
avahi_sys::AVAHI_PROTO_UNSPEC,
avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_USE_MULTICAST
| avahi_sys::AvahiPublishFlags_AVAHI_PUBLISH_ALLOW_MULTIPLE,
lan_address_ptr.as_ptr(),
avahi_sys::AVAHI_DNS_CLASS_IN as u16,
avahi_sys::AVAHI_DNS_TYPE_CNAME as u16,
avahi_sys::AVAHI_DEFAULT_TTL,
self.hostname.as_ptr().cast(),
self.hostname.len(),
);
if res < avahi_sys::AVAHI_OK {
log_str_error("add CNAME record to Avahi entry group", res);
panic!("Failed to load Avahi services");
}
tracing::info!("Published {:?}", lan_address_ptr);
}
}
async fn init() -> Result<Self, Error> {
let mut res = MdnsControllerInner {
alias_cmd: None,
services: BTreeMap::new(),
};
res.sync().await?;
Ok(res)
}
fn init() -> Self {
unsafe {
tracing::debug!("Initializing mDNS controller");
let simple_poll = avahi_sys::avahi_simple_poll_new();
let poll = avahi_sys::avahi_simple_poll_get(simple_poll);
let mut box_err = Box::pin(0 as i32);
let err_c: *mut i32 = box_err.as_mut().get_mut();
let avahi_client = avahi_sys::avahi_client_new(
poll,
avahi_sys::AvahiClientFlags::AVAHI_CLIENT_NO_FAIL,
Some(client_callback),
std::ptr::null_mut(),
err_c,
);
if avahi_client == std::ptr::null_mut::<AvahiClient>() {
log_str_error("create Avahi client", *box_err);
panic!("Failed to create Avahi Client");
}
let group = avahi_sys::avahi_entry_group_new(
avahi_client,
Some(entry_group_callback),
std::ptr::null_mut(),
);
if group == std::ptr::null_mut() {
log_str_error("create Avahi entry group", avahi_client_errno(avahi_client));
panic!("Failed to create Avahi Entry Group");
}
let mut hostname_buf = vec![0];
let hostname_raw = avahi_sys::avahi_client_get_host_name_fqdn(avahi_client);
hostname_buf
.extend_from_slice(std::ffi::CStr::from_ptr(hostname_raw).to_bytes_with_nul());
let buflen = hostname_buf.len();
debug_assert!(hostname_buf.ends_with(b".local\0"));
debug_assert!(!hostname_buf[..(buflen - 7)].contains(&b'.'));
// assume fixed length prefix on hostname due to local address
hostname_buf[0] = (buflen - 8) as u8; // set the prefix length to len - 8 (leading byte, .local, nul) for the main address
hostname_buf[buflen - 7] = 5; // set the prefix length to 5 for "local"
let mut res = MdnsControllerInner {
hostname: hostname_buf,
hostname_raw,
entry_group: group,
services: BTreeMap::new(),
_client_error: box_err,
};
res.load_services();
let commit_err = avahi_entry_group_commit(res.entry_group);
if commit_err < avahi_sys::AVAHI_OK {
log_str_error("reset Avahi entry group", commit_err);
panic!("Failed to load Avahi services: reset");
}
res
async fn sync(&mut self) -> Result<(), Error> {
if let Some(mut cmd) = self.alias_cmd.take() {
cmd.kill().await.with_kind(crate::ErrorKind::Network)?;
}
self.alias_cmd = Some(
Command::new("avahi-alias")
.kill_on_drop(true)
.args(self.services.iter().map(|(_, key)| {
key.public()
.get_onion_address()
.get_address_without_dot_onion()
}))
.spawn()?,
);
Ok(())
}
fn sync(&mut self) {
unsafe {
let mut res;
res = avahi_entry_group_reset(self.entry_group);
if res < avahi_sys::AVAHI_OK {
log_str_error("reset Avahi entry group", res);
panic!("Failed to load Avahi services: reset");
}
self.load_services();
res = avahi_entry_group_commit(self.entry_group);
if res < avahi_sys::AVAHI_OK {
log_str_error("commit Avahi entry group", res);
panic!("Failed to load Avahi services: commit");
}
}
}
fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>(
async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>(
&mut self,
pkg_id: &PackageId,
interfaces: I,
) {
) -> Result<(), Error> {
self.services.extend(
interfaces
.into_iter()
.map(|(interface_id, key)| ((pkg_id.clone(), interface_id), key)),
);
self.sync();
self.sync().await?;
Ok(())
}
fn remove<I: IntoIterator<Item = InterfaceId>>(&mut self, pkg_id: &PackageId, interfaces: I) {
async fn remove<I: IntoIterator<Item = InterfaceId>>(
&mut self,
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
for interface_id in interfaces {
self.services.remove(&(pkg_id.clone(), interface_id));
}
self.sync();
}
}
impl Drop for MdnsControllerInner {
fn drop(&mut self) {
unsafe {
avahi_free(self.hostname_raw as *mut c_void);
avahi_entry_group_free(self.entry_group);
}
}
}
fn log_str_error(action: &str, e: i32) {
unsafe {
let e_str = avahi_strerror(e);
tracing::error!(
"Could not {}: {:?}",
action,
std::ffi::CStr::from_ptr(e_str)
);
avahi_free(e_str as *mut c_void);
}
}
unsafe extern "C" fn entry_group_callback(
_group: *mut avahi_sys::AvahiEntryGroup,
state: avahi_sys::AvahiEntryGroupState,
_userdata: *mut core::ffi::c_void,
) {
match state {
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_FAILURE => {
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_FAILURE");
}
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_COLLISION => {
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_COLLISION");
}
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_UNCOMMITED => {
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_UNCOMMITED");
}
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_ESTABLISHED => {
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_ESTABLISHED");
}
avahi_sys::AvahiEntryGroupState_AVAHI_ENTRY_GROUP_REGISTERING => {
tracing::warn!("AvahiCallback: EntryGroupState = AVAHI_ENTRY_GROUP_REGISTERING");
}
other => {
tracing::warn!("AvahiCallback: EntryGroupState = {}", other);
}
}
}
unsafe extern "C" fn client_callback(
_group: *mut avahi_sys::AvahiClient,
state: avahi_sys::AvahiClientState,
_userdata: *mut core::ffi::c_void,
) {
match state {
avahi_sys::AvahiClientState_AVAHI_CLIENT_FAILURE => {
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_FAILURE");
}
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_RUNNING => {
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_S_RUNNING");
}
avahi_sys::AvahiClientState_AVAHI_CLIENT_CONNECTING => {
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_CONNECTING");
}
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_COLLISION => {
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_S_COLLISION");
}
avahi_sys::AvahiClientState_AVAHI_CLIENT_S_REGISTERING => {
tracing::warn!("AvahiCallback: ClientState = AVAHI_CLIENT_S_REGISTERING");
}
other => {
tracing::warn!("AvahiCallback: ClientState = {}", other);
}
self.sync().await?;
Ok(())
}
}

View File

@@ -3,8 +3,9 @@ use std::path::PathBuf;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use patch_db::DbHandle;
use rpc_toolkit::command;
use sqlx::SqlitePool;
use sqlx::PgPool;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument;
@@ -14,6 +15,7 @@ use self::mdns::MdnsController;
use self::nginx::NginxController;
use self::ssl::SslManager;
use self::tor::TorController;
use crate::hostname::get_hostname;
use crate::net::dns::DnsController;
use crate::net::interface::TorConfig;
use crate::net::nginx::InterfaceMetadata;
@@ -50,24 +52,26 @@ pub struct NetController {
pub dns: DnsController,
}
impl NetController {
#[instrument(skip(db))]
pub async fn init(
#[instrument(skip(db, handle))]
pub async fn init<Db: DbHandle>(
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
tor_control: SocketAddr,
dns_bind: &[SocketAddr],
db: SqlitePool,
db: PgPool,
import_root_ca: Option<(PKey<Private>, X509)>,
handle: &mut Db,
) -> Result<Self, Error> {
let ssl = match import_root_ca {
None => SslManager::init(db).await,
None => SslManager::init(db, handle).await,
Some(a) => SslManager::import_root_ca(db, a.0, a.1).await,
}?;
let hostname = get_hostname(handle).await?;
Ok(Self {
tor: TorController::init(embassyd_addr, embassyd_tor_key, tor_control).await?,
#[cfg(feature = "avahi")]
mdns: MdnsController::init(),
nginx: NginxController::init(PathBuf::from("/etc/nginx"), &ssl).await?,
mdns: MdnsController::init().await?,
nginx: NginxController::init(PathBuf::from("/etc/nginx"), &ssl, &hostname).await?,
ssl,
dns: DnsController::init(dns_bind).await?,
})

View File

@@ -9,7 +9,7 @@ use tracing::instrument;
use super::interface::{InterfaceId, LanPortConfig};
use super::ssl::SslManager;
use crate::hostname::get_hostname;
use crate::hostname::Hostname;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::Port;
use crate::util::Invoke;
@@ -20,9 +20,15 @@ pub struct NginxController {
inner: Mutex<NginxControllerInner>,
}
impl NginxController {
pub async fn init(nginx_root: PathBuf, ssl_manager: &SslManager) -> Result<Self, Error> {
pub async fn init(
nginx_root: PathBuf,
ssl_manager: &SslManager,
host_name: &Hostname,
) -> Result<Self, Error> {
Ok(NginxController {
inner: Mutex::new(NginxControllerInner::init(&nginx_root, ssl_manager).await?),
inner: Mutex::new(
NginxControllerInner::init(&nginx_root, ssl_manager, host_name).await?,
),
nginx_root,
})
}
@@ -53,13 +59,17 @@ pub struct NginxControllerInner {
}
impl NginxControllerInner {
#[instrument]
async fn init(nginx_root: &Path, ssl_manager: &SslManager) -> Result<Self, Error> {
async fn init(
nginx_root: &Path,
ssl_manager: &SslManager,
host_name: &Hostname,
) -> Result<Self, Error> {
let inner = NginxControllerInner {
interfaces: BTreeMap::new(),
};
// write main ssl key/cert to fs location
let (key, cert) = ssl_manager
.certificate_for(&get_hostname().await?, &"embassy".parse().unwrap())
.certificate_for(host_name.as_ref(), &"embassy".parse().unwrap())
.await?;
let ssl_path_key = nginx_root.join(format!("ssl/embassy_main.key.pem"));
let ssl_path_cert = nginx_root.join(format!("ssl/embassy_main.cert.pem"));
@@ -91,35 +101,44 @@ impl NginxControllerInner {
for (id, meta) in interface_map.iter() {
for (port, lan_port_config) in meta.lan_config.iter() {
// get ssl certificate chain
let (listen_args, ssl_certificate_line, ssl_certificate_key_line, proxy_redirect_directive) =
if lan_port_config.ssl {
// these have already been written by the net controller
let package_path = nginx_root.join(format!("ssl/{}", package));
if tokio::fs::metadata(&package_path).await.is_err() {
tokio::fs::create_dir_all(&package_path)
.await
.with_ctx(|_| {
(ErrorKind::Filesystem, package_path.display().to_string())
})?;
}
let ssl_path_key = package_path.join(format!("{}.key.pem", id));
let ssl_path_cert = package_path.join(format!("{}.cert.pem", id));
let (key, chain) = ssl_manager
.certificate_for(&meta.dns_base, &package)
.await?;
tokio::try_join!(
crate::net::ssl::export_key(&key, &ssl_path_key),
crate::net::ssl::export_cert(&chain, &ssl_path_cert)
)?;
(
format!("{} ssl", port.0),
format!("ssl_certificate {};", ssl_path_cert.to_str().unwrap()),
format!("ssl_certificate_key {};", ssl_path_key.to_str().unwrap()),
format!("proxy_redirect http://$host/ https://$host/;"),
)
} else {
(format!("{}", port.0), String::from(""), String::from(""), String::from(""))
};
let (
listen_args,
ssl_certificate_line,
ssl_certificate_key_line,
proxy_redirect_directive,
) = if lan_port_config.ssl {
// these have already been written by the net controller
let package_path = nginx_root.join(format!("ssl/{}", package));
if tokio::fs::metadata(&package_path).await.is_err() {
tokio::fs::create_dir_all(&package_path)
.await
.with_ctx(|_| {
(ErrorKind::Filesystem, package_path.display().to_string())
})?;
}
let ssl_path_key = package_path.join(format!("{}.key.pem", id));
let ssl_path_cert = package_path.join(format!("{}.cert.pem", id));
let (key, chain) = ssl_manager
.certificate_for(&meta.dns_base, &package)
.await?;
tokio::try_join!(
crate::net::ssl::export_key(&key, &ssl_path_key),
crate::net::ssl::export_cert(&chain, &ssl_path_cert)
)?;
(
format!("{} ssl", port.0),
format!("ssl_certificate {};", ssl_path_cert.to_str().unwrap()),
format!("ssl_certificate_key {};", ssl_path_key.to_str().unwrap()),
format!("proxy_redirect http://$host/ https://$host/;"),
)
} else {
(
format!("{}", port.0),
String::from(""),
String::from(""),
String::from(""),
)
};
// write nginx configs
let nginx_conf_path = nginx_root.join(format!(
"sites-available/{}_{}_{}.conf",

View File

@@ -11,11 +11,14 @@ use openssl::nid::Nid;
use openssl::pkey::{PKey, Private};
use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::*;
use sqlx::SqlitePool;
use patch_db::DbHandle;
use sqlx::PgPool;
use tokio::process::Command;
use tokio::sync::Mutex;
use tracing::instrument;
use crate::s9pk::manifest::PackageId;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
static CERTIFICATE_VERSION: i32 = 2; // X509 version 3 is actually encoded as '2' in the cert because fuck you.
@@ -31,17 +34,17 @@ pub struct SslManager {
#[derive(Debug)]
struct SslStore {
secret_store: SqlitePool,
secret_store: PgPool,
}
impl SslStore {
fn new(db: SqlitePool) -> Result<Self, Error> {
fn new(db: PgPool) -> Result<Self, Error> {
Ok(SslStore { secret_store: db })
}
#[instrument(skip(self))]
async fn save_root_certificate(&self, key: &PKey<Private>, cert: &X509) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, ?, ?, NULL, datetime('now'), datetime('now'))", key_str, cert_str).execute(&self.secret_store).await?;
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, $1, $2, NULL, now(), now())", key_str, cert_str).execute(&self.secret_store).await?;
Ok(())
}
#[instrument(skip(self))]
@@ -67,7 +70,7 @@ impl SslStore {
) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, ?, ?, NULL, datetime('now'), datetime('now'))", key_str, cert_str).execute(&self.secret_store).await?;
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, $1, $2, NULL, now(), now())", key_str, cert_str).execute(&self.secret_store).await?;
Ok(())
}
async fn load_intermediate_certificate(&self) -> Result<Option<(PKey<Private>, X509)>, Error> {
@@ -106,7 +109,7 @@ impl SslStore {
) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let _n = sqlx::query!("INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (?, ?, ?, datetime('now'), datetime('now'))", key_str, cert_str, lookup_string).execute(&self.secret_store).await?;
let _n = sqlx::query!("INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES ($1, $2, $3, now(), now())", key_str, cert_str, lookup_string).execute(&self.secret_store).await?;
Ok(())
}
async fn load_certificate(
@@ -114,7 +117,7 @@ impl SslStore {
lookup_string: &str,
) -> Result<Option<(PKey<Private>, X509)>, Error> {
let m_row = sqlx::query!(
"SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = ?",
"SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = $1",
lookup_string
)
.fetch_optional(&self.secret_store)
@@ -137,7 +140,8 @@ impl SslStore {
) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let n = sqlx::query!("UPDATE certificates SET priv_key_pem = ?, certificate_pem = ?, updated_at = datetime('now') WHERE lookup_string = ?", key_str, cert_str, lookup_string).execute(&self.secret_store).await?;
let n = sqlx::query!("UPDATE certificates SET priv_key_pem = $1, certificate_pem = $2, updated_at = now() WHERE lookup_string = $3", key_str, cert_str, lookup_string)
.execute(&self.secret_store).await?;
if n.rows_affected() == 0 {
return Err(Error::new(
eyre!(
@@ -158,13 +162,14 @@ lazy_static::lazy_static! {
}
impl SslManager {
#[instrument(skip(db))]
pub async fn init(db: SqlitePool) -> Result<Self, Error> {
#[instrument(skip(db, handle))]
pub async fn init<Db: DbHandle>(db: PgPool, handle: &mut Db) -> Result<Self, Error> {
let store = SslStore::new(db)?;
let id = crate::hostname::get_id(handle).await?;
let (root_key, root_cert) = match store.load_root_certificate().await? {
None => {
let root_key = generate_key()?;
let server_id = crate::hostname::get_id().await?;
let server_id = id;
let root_cert = make_root_cert(&root_key, &server_id)?;
store.save_root_certificate(&root_key, &root_cert).await?;
Ok::<_, Error>((root_key, root_cert))
@@ -180,6 +185,17 @@ impl SslManager {
)
.await?;
tokio::fs::write(ROOT_CA_STATIC_PATH, root_cert.to_pem()?).await?;
// write to ca cert store
tokio::fs::write(
"/usr/local/share/ca-certificates/embassy-root-ca.crt",
root_cert.to_pem()?,
)
.await?;
Command::new("update-ca-certificates")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
let (int_key, int_cert) = match store.load_intermediate_certificate().await? {
None => {
let int_key = generate_key()?;
@@ -191,6 +207,10 @@ impl SslManager {
}
Some((key, cert)) => Ok((key, cert)),
}?;
sqlx::query!("SELECT setval('certificates_id_seq', GREATEST(MAX(id) + 1, nextval('certificates_id_seq') - 1)) FROM certificates")
.fetch_one(&store.secret_store).await?;
Ok(SslManager {
store,
root_cert,
@@ -208,7 +228,7 @@ impl SslManager {
// the intermediate certificate
#[instrument(skip(db))]
pub async fn import_root_ca(
db: SqlitePool,
db: PgPool,
root_key: PKey<Private>,
root_cert: X509,
) -> Result<Self, Error> {
@@ -493,54 +513,56 @@ fn make_leaf_cert(
Ok(cert)
}
#[tokio::test]
async fn ca_details_persist() -> Result<(), Error> {
let pool = sqlx::Pool::<sqlx::Sqlite>::connect("sqlite::memory:").await?;
sqlx::query_file!("migrations/20210629193146_Init.sql")
.execute(&pool)
.await?;
let mgr = SslManager::init(pool.clone()).await?;
let root_cert0 = mgr.root_cert;
let int_key0 = mgr.int_key;
let int_cert0 = mgr.int_cert;
let mgr = SslManager::init(pool).await?;
let root_cert1 = mgr.root_cert;
let int_key1 = mgr.int_key;
let int_cert1 = mgr.int_cert;
assert_eq!(root_cert0.to_pem()?, root_cert1.to_pem()?);
assert_eq!(
int_key0.private_key_to_pem_pkcs8()?,
int_key1.private_key_to_pem_pkcs8()?
);
assert_eq!(int_cert0.to_pem()?, int_cert1.to_pem()?);
Ok(())
}
#[tokio::test]
async fn certificate_details_persist() -> Result<(), Error> {
let pool = sqlx::Pool::<sqlx::Sqlite>::connect("sqlite::memory:").await?;
sqlx::query_file!("migrations/20210629193146_Init.sql")
.execute(&pool)
.await?;
let mgr = SslManager::init(pool.clone()).await?;
let package_id = "bitcoind".parse().unwrap();
let (key0, cert_chain0) = mgr.certificate_for("start9", &package_id).await?;
let (key1, cert_chain1) = mgr.certificate_for("start9", &package_id).await?;
assert_eq!(
key0.private_key_to_pem_pkcs8()?,
key1.private_key_to_pem_pkcs8()?
);
assert_eq!(
cert_chain0
.iter()
.map(|cert| cert.to_pem().unwrap())
.collect::<Vec<Vec<u8>>>(),
cert_chain1
.iter()
.map(|cert| cert.to_pem().unwrap())
.collect::<Vec<Vec<u8>>>()
);
Ok(())
}
// #[tokio::test]
// async fn ca_details_persist() -> Result<(), Error> {
// let pool = sqlx::Pool::<sqlx::Postgres>::connect("postgres::memory:").await?;
// sqlx::migrate!()
// .run(&pool)
// .await
// .with_kind(crate::ErrorKind::Database)?;
// let mgr = SslManager::init(pool.clone()).await?;
// let root_cert0 = mgr.root_cert;
// let int_key0 = mgr.int_key;
// let int_cert0 = mgr.int_cert;
// let mgr = SslManager::init(pool).await?;
// let root_cert1 = mgr.root_cert;
// let int_key1 = mgr.int_key;
// let int_cert1 = mgr.int_cert;
//
// assert_eq!(root_cert0.to_pem()?, root_cert1.to_pem()?);
// assert_eq!(
// int_key0.private_key_to_pem_pkcs8()?,
// int_key1.private_key_to_pem_pkcs8()?
// );
// assert_eq!(int_cert0.to_pem()?, int_cert1.to_pem()?);
// Ok(())
// }
//
// #[tokio::test]
// async fn certificate_details_persist() -> Result<(), Error> {
// let pool = sqlx::Pool::<sqlx::Postgres>::connect("postgres::memory:").await?;
// sqlx::migrate!()
// .run(&pool)
// .await
// .with_kind(crate::ErrorKind::Database)?;
// let mgr = SslManager::init(pool.clone()).await?;
// let package_id = "bitcoind".parse().unwrap();
// let (key0, cert_chain0) = mgr.certificate_for("start9", &package_id).await?;
// let (key1, cert_chain1) = mgr.certificate_for("start9", &package_id).await?;
//
// assert_eq!(
// key0.private_key_to_pem_pkcs8()?,
// key1.private_key_to_pem_pkcs8()?
// );
// assert_eq!(
// cert_chain0
// .iter()
// .map(|cert| cert.to_pem().unwrap())
// .collect::<Vec<Vec<u8>>>(),
// cert_chain1
// .iter()
// .map(|cert| cert.to_pem().unwrap())
// .collect::<Vec<Vec<u8>>>()
// );
// Ok(())
// }

View File

@@ -9,7 +9,7 @@ use futures::FutureExt;
use reqwest::Client;
use rpc_toolkit::command;
use serde_json::json;
use sqlx::{Executor, Sqlite};
use sqlx::{Executor, Postgres};
use tokio::net::TcpStream;
use tokio::sync::Mutex;
use torut::control::{AsyncEvent, AuthenticatedConn, ConnError};
@@ -32,7 +32,7 @@ pub fn tor() -> Result<(), Error> {
Ok(())
}
fn display_services(services: Vec<OnionAddressV3>, matches: &ArgMatches<'_>) {
fn display_services(services: Vec<OnionAddressV3>, matches: &ArgMatches) {
use prettytable::*;
if matches.is_present("format") {
@@ -44,7 +44,7 @@ fn display_services(services: Vec<OnionAddressV3>, matches: &ArgMatches<'_>) {
let row = row![&service.to_string()];
table.add_row(row);
}
table.print_tty(false);
table.print_tty(false).unwrap();
}
#[command(rename = "list-services", display(display_services))]
@@ -60,7 +60,7 @@ pub async fn list_services(
#[instrument(skip(secrets))]
pub async fn os_key<Ex>(secrets: &mut Ex) -> Result<TorSecretKeyV3, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let key = sqlx::query!("SELECT tor_key FROM account")
.fetch_one(secrets)

View File

@@ -188,7 +188,7 @@ pub struct WifiListOut {
security: Vec<String>,
}
pub type WifiList = HashMap<Ssid, WifiListInfo>;
fn display_wifi_info(info: WiFiInfo, matches: &ArgMatches<'_>) {
fn display_wifi_info(info: WiFiInfo, matches: &ArgMatches) {
use prettytable::*;
if matches.is_present("format") {
@@ -215,7 +215,7 @@ fn display_wifi_info(info: WiFiInfo, matches: &ArgMatches<'_>) {
&info.country.alpha2(),
&format!("{}", info.ethernet)
]);
table_global.print_tty(false);
table_global.print_tty(false).unwrap();
let mut table_ssids = Table::new();
table_ssids.add_row(row![bc => "SSID", "STRENGTH"]);
@@ -233,7 +233,7 @@ fn display_wifi_info(info: WiFiInfo, matches: &ArgMatches<'_>) {
.for_each(drop);
table_ssids.add_row(row);
}
table_ssids.print_tty(false);
table_ssids.print_tty(false).unwrap();
let mut table_global = Table::new();
table_global.add_row(row![bc =>
@@ -249,10 +249,10 @@ fn display_wifi_info(info: WiFiInfo, matches: &ArgMatches<'_>) {
]);
}
table_global.print_tty(false);
table_global.print_tty(false).unwrap();
}
fn display_wifi_list(info: Vec<WifiListOut>, matches: &ArgMatches<'_>) {
fn display_wifi_list(info: Vec<WifiListOut>, matches: &ArgMatches) {
use prettytable::*;
if matches.is_present("format") {
@@ -273,7 +273,7 @@ fn display_wifi_list(info: Vec<WifiListOut>, matches: &ArgMatches<'_>) {
]);
}
table_global.print_tty(false);
table_global.print_tty(false).unwrap();
}
#[command(display(display_wifi_info))]
@@ -764,7 +764,7 @@ pub async fn interface_connected(interface: &str) -> Result<bool, Error> {
Ok(v.is_some())
}
pub fn country_code_parse(code: &str, _matches: &ArgMatches<'_>) -> Result<CountryCode, Error> {
pub fn country_code_parse(code: &str, _matches: &ArgMatches) -> Result<CountryCode, Error> {
CountryCode::for_alpha2(code).map_err(|_| {
Error::new(
color_eyre::eyre::eyre!("Invalid Country Code: {}", code),
@@ -801,7 +801,6 @@ pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>(
.arg("up")
.invoke(ErrorKind::Wifi)
.await?;
Command::new("dhclient").invoke(ErrorKind::Wifi).await?;
if let Some(last_country_code) = last_country_code {
tracing::info!("Setting the region");
let _ = Command::new("iw")

View File

@@ -16,6 +16,8 @@ server {{
server_name .{lan_hostname};
proxy_buffers 4 512k;
proxy_buffer_size 512k;
proxy_buffering off;
proxy_request_buffering off;
proxy_socket_keepalive on;
@@ -71,6 +73,8 @@ server {{
server_name .{tor_hostname};
proxy_buffers 4 512k;
proxy_buffer_size 512k;
proxy_buffering off;
proxy_request_buffering off;
proxy_socket_keepalive on;

View File

@@ -6,13 +6,12 @@ use chrono::{DateTime, Utc};
use color_eyre::eyre::eyre;
use patch_db::{DbHandle, LockType};
use rpc_toolkit::command;
use sqlx::SqlitePool;
use sqlx::PgPool;
use tokio::sync::Mutex;
use tracing::instrument;
use crate::backup::BackupReport;
use crate::context::RpcContext;
use crate::db::util::WithRevision;
use crate::s9pk::manifest::PackageId;
use crate::util::display_none;
use crate::util::serde::display_serializable;
@@ -27,9 +26,9 @@ pub async fn notification() -> Result<(), Error> {
#[instrument(skip(ctx))]
pub async fn list(
#[context] ctx: RpcContext,
#[arg] before: Option<u32>,
#[arg] before: Option<i32>,
#[arg] limit: Option<u32>,
) -> Result<WithRevision<Vec<Notification>>, Error> {
) -> Result<Vec<Notification>, Error> {
let limit = limit.unwrap_or(40);
let mut handle = ctx.db.handle();
match before {
@@ -39,8 +38,8 @@ pub async fn list(
.unread_notification_count();
model.lock(&mut handle, LockType::Write).await?;
let records = sqlx::query!(
"SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT ?",
limit
"SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT $1",
limit as i64
).fetch_all(&ctx.secret_store).await?;
let notifs = records
.into_iter()
@@ -72,17 +71,14 @@ pub async fn list(
})
.collect::<Result<Vec<Notification>, Error>>()?;
// set notification count to zero
let r = model.put(&mut handle, &0).await?;
Ok(WithRevision {
response: notifs,
revision: r,
})
model.put(&mut handle, &0).await?;
Ok(notifs)
}
Some(before) => {
let records = sqlx::query!(
"SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < ? ORDER BY id DESC LIMIT ?",
"SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2",
before,
limit
limit as i64
).fetch_all(&ctx.secret_store).await?;
let res = records
.into_iter()
@@ -113,25 +109,22 @@ pub async fn list(
})
})
.collect::<Result<Vec<Notification>, Error>>()?;
Ok(WithRevision {
response: res,
revision: None,
})
Ok(res)
}
}
}
#[command(display(display_none))]
pub async fn delete(#[context] ctx: RpcContext, #[arg] id: u32) -> Result<(), Error> {
sqlx::query!("DELETE FROM notifications WHERE id = ?", id)
pub async fn delete(#[context] ctx: RpcContext, #[arg] id: i32) -> Result<(), Error> {
sqlx::query!("DELETE FROM notifications WHERE id = $1", id)
.execute(&ctx.secret_store)
.await?;
Ok(())
}
#[command(rename = "delete-before", display(display_none))]
pub async fn delete_before(#[context] ctx: RpcContext, #[arg] before: u32) -> Result<(), Error> {
sqlx::query!("DELETE FROM notifications WHERE id < ?", before)
pub async fn delete_before(#[context] ctx: RpcContext, #[arg] before: i32) -> Result<(), Error> {
sqlx::query!("DELETE FROM notifications WHERE id < $1", before)
.execute(&ctx.secret_store)
.await?;
Ok(())
@@ -218,22 +211,22 @@ pub struct Notification {
pub trait NotificationType:
serde::Serialize + for<'de> serde::Deserialize<'de> + std::fmt::Debug
{
const CODE: u32;
const CODE: i32;
}
impl NotificationType for () {
const CODE: u32 = 0;
const CODE: i32 = 0;
}
impl NotificationType for BackupReport {
const CODE: u32 = 1;
const CODE: i32 = 1;
}
pub struct NotificationManager {
sqlite: SqlitePool,
sqlite: PgPool,
cache: Mutex<HashMap<(Option<PackageId>, NotificationLevel, String), i64>>,
}
impl NotificationManager {
pub fn new(sqlite: SqlitePool) -> Self {
pub fn new(sqlite: PgPool) -> Self {
NotificationManager {
sqlite,
cache: Mutex::new(HashMap::new()),
@@ -267,9 +260,9 @@ impl NotificationManager {
let sql_data =
serde_json::to_string(&subtype).with_kind(crate::ErrorKind::Serialization)?;
sqlx::query!(
"INSERT INTO notifications (package_id, code, level, title, message, data) VALUES (?, ?, ?, ?, ?, ?)",
"INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)",
sql_package_id,
sql_code,
sql_code as i32,
sql_level,
title,
message,

View File

@@ -64,6 +64,7 @@ pub struct DockerProcedure {
impl DockerProcedure {
pub fn validate(
&self,
eos_version: &Version,
volumes: &Volumes,
image_ids: &BTreeSet<ImageId>,
expected_io: bool,
@@ -85,6 +86,12 @@ impl DockerProcedure {
if expected_io && self.io_format.is_none() {
color_eyre::eyre::bail!("expected io-format");
}
if &**eos_version >= &emver::Version::new(0, 3, 1, 1)
&& self.inject
&& !self.mounts.is_empty()
{
color_eyre::eyre::bail!("mounts not allowed in inject actions");
}
Ok(())
}
@@ -127,7 +134,11 @@ impl DockerProcedure {
)
.await
{
Ok(()) | Err(bollard::errors::Error::DockerResponseNotFoundError { .. }) => Ok(()),
Ok(())
| Err(bollard::errors::Error::DockerResponseServerError {
status_code: 404, // NOT FOUND
..
}) => Ok(()),
Err(e) => Err(e),
}?;
}
@@ -193,7 +204,7 @@ impl DockerProcedure {
match format.from_slice(buffer.as_bytes()) {
Ok(a) => a,
Err(e) => {
tracing::warn!(
tracing::trace!(
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
format,
e
@@ -334,7 +345,7 @@ impl DockerProcedure {
match format.from_slice(buffer.as_bytes()) {
Ok(a) => a,
Err(e) => {
tracing::warn!(
tracing::trace!(
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
format,
e

View File

@@ -1,21 +1,18 @@
use std::{
path::{Path, PathBuf},
time::Duration,
};
use std::path::{Path, PathBuf};
use std::time::Duration;
pub use js_engine::JsError;
use js_engine::{JsExecutionEnvironment, PathForVolumeId};
use models::VolumeId;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::{
context::RpcContext, s9pk::manifest::PackageId, util::Version, volume::Volumes, Error,
};
use js_engine::{JsExecutionEnvironment, PathForVolumeId};
use super::ProcedureName;
pub use js_engine::JsError;
use crate::context::RpcContext;
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::volume::Volumes;
use crate::Error;
#[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "kebab-case")]
@@ -45,7 +42,10 @@ impl PathForVolumeId for Volumes {
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct JsProcedure {}
pub struct JsProcedure {
#[serde(default)]
args: Vec<serde_json::Value>,
}
impl JsProcedure {
pub fn validate(&self, _volumes: &Volumes) -> Result<(), color_eyre::eyre::Report> {
@@ -71,7 +71,7 @@ impl JsProcedure {
Box::new(volumes.clone()),
)
.await?
.run_action(name, input);
.run_action(name, input, self.args.clone());
let output: ErrorValue = match timeout {
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action)
.await
@@ -105,7 +105,7 @@ impl JsProcedure {
)
.await?
.read_only_effects()
.run_action(name, input);
.run_action(name, input, self.args.clone());
let output: ErrorValue = match timeout {
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action)
.await
@@ -145,7 +145,7 @@ fn unwrap_known_error<O: for<'de> Deserialize<'de>>(
#[tokio::test]
async fn js_action_execute() {
let js_action = JsProcedure {};
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
@@ -200,7 +200,7 @@ async fn js_action_execute() {
#[tokio::test]
async fn js_action_execute_error() {
let js_action = JsProcedure {};
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
@@ -244,7 +244,7 @@ async fn js_action_execute_error() {
#[tokio::test]
async fn js_action_fetch() {
let js_action = JsProcedure {};
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
@@ -285,3 +285,179 @@ async fn js_action_fetch() {
.unwrap()
.unwrap();
}
#[tokio::test]
async fn js_action_var_arg() {
let js_action = JsProcedure {
args: vec![42.into()],
};
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
.canonicalize()
.unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::Action("js-action-var-arg".parse().unwrap());
let volumes: Volumes = serde_json::from_value(serde_json::json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap();
let input: Option<serde_json::Value> = None;
let timeout = Some(Duration::from_secs(10));
js_action
.execute::<serde_json::Value, serde_json::Value>(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn js_action_test_rename() {
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
.canonicalize()
.unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::Action("test-rename".parse().unwrap());
let volumes: Volumes = serde_json::from_value(serde_json::json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap();
let input: Option<serde_json::Value> = None;
let timeout = Some(Duration::from_secs(10));
js_action
.execute::<serde_json::Value, serde_json::Value>(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn js_action_test_deep_dir() {
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
.canonicalize()
.unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::Action("test-deep-dir".parse().unwrap());
let volumes: Volumes = serde_json::from_value(serde_json::json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap();
let input: Option<serde_json::Value> = None;
let timeout = Some(Duration::from_secs(10));
js_action
.execute::<serde_json::Value, serde_json::Value>(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn js_action_test_deep_dir_escape() {
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
.canonicalize()
.unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::Action("test-deep-dir-escape".parse().unwrap());
let volumes: Volumes = serde_json::from_value(serde_json::json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap();
let input: Option<serde_json::Value> = None;
let timeout = Some(Duration::from_secs(10));
js_action
.execute::<serde_json::Value, serde_json::Value>(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
)
.await
.unwrap()
.unwrap();
}

View File

@@ -40,12 +40,15 @@ impl PackageProcedure {
#[instrument]
pub fn validate(
&self,
eos_version: &Version,
volumes: &Volumes,
image_ids: &BTreeSet<ImageId>,
expected_io: bool,
) -> Result<(), color_eyre::eyre::Report> {
match self {
PackageProcedure::Docker(action) => action.validate(volumes, image_ids, expected_io),
PackageProcedure::Docker(action) => {
action.validate(eos_version, volumes, image_ids, expected_io)
}
#[cfg(feature = "js_engine")]
PackageProcedure::Script(action) => action.validate(volumes),

View File

@@ -9,7 +9,7 @@ use crate::procedure::ProcedureName;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::{Error, ErrorKind};
pub fn display_properties(response: Value, _: &ArgMatches<'_>) {
pub fn display_properties(response: Value, _: &ArgMatches) {
println!("{}", response);
}

View File

@@ -1,6 +1,7 @@
use std::path::{Path, PathBuf};
use color_eyre::eyre::eyre;
pub use models::{PackageId, SYSTEM_PACKAGE_ID};
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use url::Url;
@@ -18,8 +19,6 @@ use crate::version::{Current, VersionT};
use crate::volume::Volumes;
use crate::Error;
pub use models::{PackageId, SYSTEM_PACKAGE_ID};
fn current_version() -> Version {
Current::new().semver().into()
}

Some files were not shown because too many files have changed in this diff Show More