Compare commits

...

75 Commits

Author SHA1 Message Date
Matt Hill
d0ba0936ca remove taiga icons (#2222) 2023-03-15 12:29:24 -06:00
Matt Hill
b08556861f Fix/stupid updates (#2221)
one more thing
2023-03-15 12:23:25 -06:00
Aiden McClelland
c96628ad49 do not log parameters 2023-03-15 12:19:11 -06:00
Matt Hill
a615882b3f fix more bugs with updates tab... (#2219) 2023-03-15 11:33:54 -06:00
Matt Hill
2bcc8e0d30 only when version higher and show after login (#2217)
* only when version higher and show after login

* unused import
2023-03-14 11:24:54 -06:00
Aiden McClelland
de519edf78 fix caching (#2216) 2023-03-13 17:25:10 -06:00
Lucy C
caf47943c3 Fix/misc UI (#2215)
* add courier new as asset

* fix login button on mobile
2023-03-13 17:24:59 -06:00
Aiden McClelland
427ab12724 wait for time sync before starting tor (#2209)
* wait for time sync before starting tor

* don't initialize /var/lib/docker
2023-03-13 15:45:36 -06:00
Matt Hill
eba16c0cc3 Fix/more UI (#2213)
* fix badge counter...again

* remove taiga styles from angular json

* better syntax
2023-03-13 15:29:39 -06:00
Aiden McClelland
a485de6359 let final build-image step create nc-broadcast (#2210) 2023-03-13 15:23:46 -06:00
Matt Hill
1a985f7e82 Fix updates badge and rework updates page (#2205)
* fix updates badge and rework updates page

* resize icons

* better language around browser tab title

* no period

* updates tab more fixes
2023-03-13 12:15:43 -06:00
Aiden McClelland
7867411095 fix stack overflow on shutdown (#2208) 2023-03-13 12:13:14 -06:00
Aiden McClelland
2f6ebd16c1 use yesterday for not_before on ssl certs (#2204) 2023-03-13 11:43:10 -06:00
Aiden McClelland
878b235614 x86 build for compat (#2203) 2023-03-10 17:11:15 -07:00
Aiden McClelland
75f9c6b0fb fix raspi kernel upgrades (#2202)
* fix build

* use same node versions

* lock kernel to 5.15.76
2023-03-10 17:11:06 -07:00
Lucy C
7c1e2bf96f fix spacing and font size (#2199)
* fix spacing and font size

* fix spacing reverted by linting

* fix styles and event propagation on updates tab

* fix login theme

* remove global font setting

* remove taiga theming for now

* move website button and bump shared and marketplace libs

---------

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2023-03-10 11:24:58 -07:00
Alex Inkin
181b44e117 chore: update Taiga UI and remove unnecessary styles (#2200)
* chore: update Taiga UI and remove unnecessary styles

* update workflow and package lock

---------

Co-authored-by: Lucy Cifferello <12953208+elvece@users.noreply.github.com>
2023-03-10 10:15:38 -07:00
Aiden McClelland
f7793976fb export cert correctly 2023-03-09 18:10:52 -07:00
J H
8ffcd9b60a fix: Br is too slow, removed from encoding (#2197) 2023-03-09 15:37:41 -07:00
kn0wmad
52d3c4d62d Update CHANGELOG to v0.3.3 (#2196) 2023-03-09 15:33:45 -07:00
Aiden McClelland
0fb3e75253 fix display for SANInfo 2023-03-09 15:08:39 -07:00
Matt Hill
2c40e403c4 misc 0.3.4 bugfixes (#2193)
* display message not object on login page

* more release notes

* fix firefox ssl issue

* fix no pubkey error

* Fix/missing main (#2194)

fix: Main during migration

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
2023-03-09 12:34:48 -07:00
Aiden McClelland
d1c519ed0d remove system rebuild 2023-03-09 10:56:55 -07:00
Aiden McClelland
27470ef934 fix http -> https redirect 2023-03-09 10:45:22 -07:00
Aiden McClelland
8a1da87702 fix img naming 2023-03-09 10:12:03 -07:00
Lucy C
c8d89f805b Update/misc frontend (#2191)
* update version to 0.3.4

* update release  guide with sdk instructions

* remove comment

* update page styling

* closes #2152, closes #2155, closes #2157

* move marketing site link to description block

* re-arrange setup wizard recovery options

* move divider for update list item

* fix bug in mocks to display lnd as aavailable for update

---------

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2023-03-09 07:02:48 -07:00
Aiden McClelland
c9fceafc16 Feature/efi (#2192)
* update makefile

* fix

* add efi support

* fix efi

* clean up

* add `make update` and `make update-overlay`

* more protections

* update package lock

* rename reflash to indicate it isn't real

* fix authcookie

* Update product.yaml
2023-03-09 00:10:37 -07:00
Aiden McClelland
bbb9980941 Refactor/networking (#2189)
* refactor networking and account

* add interfaces from manifest automatically

* use nistp256 to satisfy firefox

* use ed25519 if available

* fix ip signing

* fix SQL error

* update prettytable to fix segfault

* fix migration

* fix migration

* bump welcome-ack

* add redirect if connecting to https over http

* misc rebase fixes

* fix compression

* bump rustc version
2023-03-08 19:30:46 -07:00
J H
da55d6f7cd feat: Add in the chmod + chown to libs::js_engine (#2185)
* feat: Add in the chmod + chown to libs::js_engine

* fix: Build
2023-03-08 14:50:56 -07:00
Aiden McClelland
eeacdc1359 support path routing (#2188) 2023-03-08 14:50:27 -07:00
J H
ee1e92e1cb feat: No pemissions for the rsync (#2187)
* feat: No pemissions for the rsync

* chore: Fix the build for missing a property
2023-03-08 12:48:47 -07:00
Aiden McClelland
705802e584 gzip and brotli (#2186) 2023-03-08 12:48:21 -07:00
J H
b2e509f055 chore: Update version to 0.3.4" (#2184)
* chore: Update version to 0.3.4"

* chore: Update others to the latest code

* release notes

* registry not marketplace

---------

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2023-03-08 10:58:03 -07:00
Lucy C
cca70764d4 update packages (#2183) 2023-03-08 09:08:23 -07:00
Aiden McClelland
3ac94710fb relocate usermod 2023-03-08 08:22:25 -07:00
Aiden McClelland
ca73a47785 Update pureos-iso.yaml 2023-03-08 07:55:45 -07:00
Aiden McClelland
1ef67fc8e9 fix typo and update hash 2023-03-08 00:00:48 -07:00
Aiden McClelland
8f3c2f4f3d bump version 2023-03-07 21:00:43 -07:00
Aiden McClelland
e42b98ec17 new debspawn version 2023-03-07 20:18:32 -07:00
Alex Inkin
efb318a979 feat: lazy loading node-jose (#2177) 2023-03-07 19:09:10 -07:00
Alex Inkin
3c0a82293c Night theme (#2137)
* feat: add themes

* fix: remove obvious issues with light theme

* chore: improve light theme a bit

* comment out theme swticher

* chore: make login dark

* add theme and widgets to seeds

* add theme and widgets to migration

---------

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
2023-03-07 19:09:10 -07:00
Matt Hill
e867f31c31 Next (#2170)
* feat: add widgets (#2034)

* feat: add Taiga UI library (#1992)

* feat: add widgets

* update patchdb

* right resizable sidebar with widgets

* feat: add resizing directive

* chore: remove unused code

* chore: remove unnecessary dep

* feat: `ResponsiveCol` add directive for responsive grid

* feat: add widgets edit mode and dialogs

* feat: add widgets model and modal

* chore: fix import

* chore: hide mobile widgets behind flag

* chore: add dummy widgets

* chore: start working on heath widget and implement other comments

* feat: health widget

* feat: add saving widgets and sidebar params to patch

* feat: preemptive UI update for widgets

* update health widget with more accurate states and styling (#2127)

* feat: `ResponsiveCol` add directive for responsive grid

* chore: some changes after merge

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>
Co-authored-by: Lucy C <12953208+elvece@users.noreply.github.com>

* fix(shared): `ElasticContainer` fix collapsing margin (#2150)

* fix(shared): `ElasticContainer` fix collapsing margin

* fix toolbar height so titles not chopped

---------

Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>

* feat: make widgets sidebar width togglable (#2146)

* feat: make widgets sidebar width togglable

* feat: move widgets under header

* chore: fix wide layout

* fix(shared): `ResponsiveCol` fix missing grid steps (#2153)

* fix widget flag and refactor for non-persistence

* default widget flag to false

* fix(shared): fix responsive column size (#2159)

* fix(shared): fix responsive column size

* fix: add responsiveness to all pages

* fix responsiveness on more pages

* fix: comments

* revert some padding changes

---------

Co-authored-by: Lucy Cifferello <12953208+elvece@users.noreply.github.com>
Co-authored-by: Matt Hill <matthewonthemoon@gmail.com>

* chore: add analyzer (#2165)

* fix list styling to previous default (#2173)

* fix list styling to previous default

* dont need important flag

---------

Co-authored-by: Alex Inkin <alexander@inkin.ru>
Co-authored-by: Lucy C <12953208+elvece@users.noreply.github.com>
2023-03-07 19:09:10 -07:00
kn0wmad
aeb6da111b Minor README Update (#2158) 2023-03-07 19:09:10 -07:00
J H
2736fa5202 feat: Add in the read dir. (#2141)
* feat: Add in the read dir.

Have a test that proves that this is working.

* chore: Let the read dir work while in a read only mode

* revert: To old sync
2023-03-07 19:09:10 -07:00
Matt Hill
4d3df867da Better Updates Tab and updates count (#2151)
* wip

* should be working now

* delete unused function

* delete 2 more unused functions

* update fixture to include beta registry

* address comments

* wait for connection to get local packages
2023-03-07 19:09:10 -07:00
Matt Hill
62f78e4312 invert conditional (#2138) 2023-03-07 19:09:10 -07:00
Matt Hill
d223ac4675 Config refactor (#2128)
* prevent excessive nesting for unions, closes #2107, and genrally refactor config

* a littel cleaner

* working but with inefficiencies

* remove warning from union list

* introduce messaging for config with only pointers

* feat(shared): `ElasticContainer` add new component (#2134)

* feat(shared): `ElasticContainer` add new component

* chore: fix imports

* revert to 250 for resize

* remove logs

Co-authored-by: Alex Inkin <alexander@inkin.ru>
2023-03-07 19:09:10 -07:00
Matt Hill
c16404bb2d dont hard code alpha and beta, use substring detection instead (#2135) 2023-03-07 19:09:10 -07:00
Matt Hill
cf70933e21 Only show alpha and beta in updates with dev tools enabled (#2132)
only show alpha and beta in updates with dev tools
2023-03-07 19:09:10 -07:00
Matt Hill
46222e9352 Feat/marketplace show links (#2105)
* closes #2084, rearranges marketplace show, app show, and donation link for Start9

* use url query param if present when fetching license and instructions

* remove log

* chore: Add some checking

* chore: Update something about validation

* chore: Update to use correct default

Co-authored-by: BluJ <mogulslayer@gmail.com>
2023-03-07 19:09:10 -07:00
J M
212e94756b fix: Zero op new dir dies. (#2122) 2023-03-07 19:09:10 -07:00
gStart9
b42abbd4a2 Always invoke fdisk with sudo fdisk in build/raspberry-pi/make-image.sh (#2123)
Always invoke fdisk with sudo fdisk
2023-03-07 19:09:10 -07:00
Matt Hill
730a55e721 re-add community marketplace and handle missing dependency (#2110)
* re-add community marketplace and handle missing dependency

* feat: Add in the community to migration

* chore: Add in the community marketplace_url update

* change var name to hidden

* chore: Add in the down

Co-authored-by: BluJ <mogulslayer@gmail.com>
2023-03-07 19:09:10 -07:00
Matt Hill
06cf83b901 WIP: IP, pubkey, system time, system uptime, ca fingerprint (#2091)
* closes #923, #2063, #2012, #1153

* add ca fingerprint

* add `server.time`

* add `ip-info` to `server-info`

* add ssh pubkey

* support multiple IPs

* rename key

* add `ca-fingerprint` and `system-start-time`

* fix off-by-one

* update compat cargo lock

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-03-07 19:09:10 -07:00
J M
673e5af030 Feat/logging local (#2103)
* wip: Working on sockets, but can't connect?

* simplify unix socket connection

* wip: Get responses back from the server at least once.

* WIP: Get the sockets working'

* feat: Sockets can start/ stop/ config/ properites/ uninstall

* fix: Restart services

* Fix: Sockets work and can stop main and not kill client

* chore: Add logging to service

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-03-07 19:09:10 -07:00
Aiden McClelland
a0bc16c255 risk mitigation (#2115)
* don't lock db on shutdown

* reorder init
2023-03-07 19:09:10 -07:00
Matt Hill
76b5234f7b alphabetize backup select and recovery select (#2113) 2023-03-07 19:09:10 -07:00
J M
928de47d1d Feat/long running sockets (#2090)
* wip: Working on sockets, but can't connect?

* simplify unix socket connection

* wip: Get responses back from the server at least once.

* WIP: Get the sockets working'

* feat: Sockets can start/ stop/ config/ properites/ uninstall

* fix: Restart services

* Fix: Sockets work and can stop main and not kill client

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-03-07 19:09:10 -07:00
Aiden McClelland
274db6f606 use a vec instead of set for ip (#2112) 2023-03-07 19:08:59 -07:00
Aiden McClelland
89ca0ca927 fix docker storage driver (#2111) 2023-01-12 09:56:54 -07:00
Matt Hill
8047008fa5 Add system rebuild and disk repair to Diagnostic UI (#2093)
* add system rebuild and disk repair to diagnostic

* add `diagnostic.rebuild`

Co-authored-by: Aiden McClelland <me@drbonez.dev>
2023-01-10 15:02:16 -07:00
Matt Hill
f914110626 Feat/logs revamp (#2075)
auto reconnect to logs websocket and hopefully fix scrolling issues
2023-01-10 14:55:11 -07:00
Matt Hill
5656fd0b96 fix config bug for number 0 and also maybe conenction icon (#2098)
* fix config bug for number 0 and also maybe conenction icon

* add max password length to confimation input
2023-01-10 14:53:57 -07:00
Aiden McClelland
c3d8c72302 remove tor health daemon (#2101) 2023-01-10 14:34:48 -07:00
J M
1eefff9025 fix: Manager's exit not stopping (#2104) 2023-01-09 16:33:17 -07:00
Aiden McClelland
1dc7c7b0a4 only do standby mode for pi (#2102) 2023-01-09 16:21:30 -07:00
Aiden McClelland
011bac7b4f pin nextest to 0.9.47 (#2099) 2023-01-06 17:24:29 -07:00
Aiden McClelland
dc2d6e60d8 double bep instead of circle of 5ths (#2085) 2022-12-23 11:55:44 -07:00
Aiden McClelland
7809b6e50f delete logs from other machine ids (#2086) 2022-12-23 11:55:33 -07:00
Matt Hill
f7f0370bf5 add nyx (#2064) 2022-12-19 13:42:31 -07:00
Matt Hill
6300fc5364 UI multiple bug fixes (#2072)
* fixes #2071 #2068

* closes #2070

* closes #2046

* fixes #2074

* closes #2045

* closes #2077. Use LAN address instead of IP when opening https
2022-12-19 13:42:05 -07:00
Lucy C
16270cbd1a fix typo for os arch default type (#2079) 2022-12-19 13:41:14 -07:00
Aiden McClelland
3b226dd2c0 fix 0.3.3 OTA update flow for pi (#2048)
* feat: Conver from the copy all bytes over to file-file transer

* use pi-beep

* fix minor mistakes

* recursive rm

* add fs resize

Co-authored-by: BluJ <mogulslayer@gmail.com>
2022-12-15 13:50:21 -07:00
Mariusz Kogen
4ac61d18ff 📊 include htop (#2042) 2022-12-15 12:04:11 -07:00
Matt Hill
fd7abdb8a4 don't be so fragile when comparing marketplace URLs (#2040)
* don't be so fragile when comparing marketplace URLs

* handle more edges

* minor

* clean up a little
2022-12-15 12:00:01 -07:00
370 changed files with 12766 additions and 6956 deletions

View File

@@ -5,7 +5,7 @@ on:
workflow_dispatch:
env:
RUST_VERSION: "1.62.1"
RUST_VERSION: "1.67.1"
ENVIRONMENT: "dev"
jobs:
@@ -194,7 +194,9 @@ jobs:
if: ${{ matrix.target == 'x86_64' }}
- name: Install nextest
uses: taiki-e/install-action@nextest
uses: taiki-e/install-action@v2
with:
tool: nextest@0.9.47
if: ${{ matrix.target == 'x86_64' }}
- name: Download archive
@@ -203,7 +205,7 @@ jobs:
name: nextest-archive-${{ matrix.target }}
- name: Download nextest (aarch64)
run: wget -O nextest-aarch64.tar.gz https://get.nexte.st/latest/linux-arm
run: wget -O nextest-aarch64.tar.gz https://get.nexte.st/0.9.47/linux-arm
if: ${{ matrix.target == 'aarch64' }}
- name: Run tests

View File

@@ -5,7 +5,7 @@ on:
workflow_dispatch:
env:
NODEJS_VERSION: '16'
NODEJS_VERSION: '16.11.0'
ENVIRONMENT: "dev"
jobs:

View File

@@ -5,7 +5,7 @@ on:
workflow_dispatch:
env:
NODEJS_VERSION: '16'
NODEJS_VERSION: '16.11.0'
ENVIRONMENT: "dev"
jobs:

View File

@@ -36,13 +36,6 @@ jobs:
artifact_name: binfmt.tar
artifact_path: system-images/binfmt/docker-images/aarch64.tar
nc-broadcast:
uses: ./.github/workflows/reusable-workflow.yaml
with:
build_command: make cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast
artifact_name: nc-broadcast.tar
artifact_path: cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast
backend:
uses: ./.github/workflows/backend.yaml
@@ -53,7 +46,7 @@ jobs:
name: Build image
runs-on: ubuntu-latest
timeout-minutes: 60
needs: [compat,utils,binfmt,nc-broadcast,backend,frontend]
needs: [compat,utils,binfmt,backend,frontend]
steps:
- uses: actions/checkout@v3
with:
@@ -77,12 +70,6 @@ jobs:
name: binfmt.tar
path: system-images/binfmt/docker-images/
- name: Download nc-broadcast.tar artifact
uses: actions/download-artifact@v3
with:
name: nc-broadcast.tar
path: cargo-deps/aarch64-unknown-linux-gnu/release
- name: Download js_snapshot artifact
uses: actions/download-artifact@v3
with:
@@ -134,9 +121,9 @@ jobs:
- name: Build image
run: |
make V=1 embassyos-raspi.img --debug
make V=1 eos_raspberrypi-uninit.img --debug
- uses: actions/upload-artifact@v3
with:
name: image
path: embassyos-raspi.img
path: eos_raspberrypi-uninit.img

View File

@@ -31,9 +31,9 @@ jobs:
- name: Install dependencies
run: |
sudo apt update
wget http://ftp.us.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.0-1.1_all.deb
sha256sum ./debspawn_0.6.0-1.1_all.deb | grep 6cdb444844825b82ef388378f2c508fd8886f79a49800d4547a353ea6772d493
sudo apt-get install -y ./debspawn_0.6.0-1.1_all.deb
wget http://ftp.us.debian.org/debian/pool/main/d/debspawn/debspawn_0.6.1-1_all.deb
sha256sum ./debspawn_0.6.1-1_all.deb | grep fb8a3f588438ff9ef51e713ec1d83306db893f0aa97447565e28bbba9c6e90c6
sudo apt-get install -y ./debspawn_0.6.1-1_all.deb
wget https://repo.pureos.net/pureos/pool/main/d/debootstrap/debootstrap_1.0.125pureos1_all.deb
sudo apt-get install -y --allow-downgrades ./debootstrap_1.0.125pureos1_all.deb
wget https://repo.pureos.net/pureos/pool/main/p/pureos-archive-keyring/pureos-archive-keyring_2021.11.0_all.deb

429
CHANGELOG.md Normal file
View File

@@ -0,0 +1,429 @@
# v0.3.3
## Highlights
- x86_64 architecture compatibility
- Kiosk mode - use your Embassy with monitor, keyboard, and mouse (available on x86 builds only, disabled on Raspberry Pi)
- "Updates" tab - view all service updates from all registries in one place
- Various UI/UX improvements
- Various bugfixes and optimizations
## What's Changed
- Minor typo fixes by @kn0wmad in #1887
- Update build pipeline by @moerketh in #1896
- Feature/setup migrate by @elvece in #1841
- Feat/patch migration by @Blu-J in #1890
- make js cancellable by @dr-bonez in #1901
- wip: Making Injectable exec by @Blu-J in #1897
- Fix/debug by @Blu-J in #1909
- chore: Fix on the rsync not having stdout. by @Blu-J in #1911
- install wizard project by @MattDHill in #1893
- chore: Remove the duplicate loggging information that is making usele… by @Blu-J in #1912
- Http proxy by @redragonx in #1772
- fix(marketplace): loosen type in categories component by @waterplea in #1918
- set custom meta title by @MattDHill in #1915
- Feature/git hash by @dr-bonez in #1919
- closes #1900 by @dr-bonez in #1920
- feature/marketplace icons by @dr-bonez in #1921
- Bugfix/0.3.3 migration by @dr-bonez in #1922
- feat: Exposing the rsync that we have to the js by @Blu-J in #1907
- Feature/install wizard disk info by @dr-bonez in #1923
- bump shared and marketplace npm versions by @dr-bonez in #1924
- fix error handling when store unreachable by @dr-bonez in #1925
- wait for network online before launching init by @dr-bonez in #1930
- silence service crash notifications by @dr-bonez in #1929
- disable efi by @dr-bonez in #1931
- Tor daemon fix by @redragonx in #1934
- wait for url to be available before launching kiosk by @dr-bonez in #1933
- fix migration to support portable fatties by @dr-bonez in #1935
- Add guid to partition type by @MattDHill in #1932
- add localhost support to the http server by @redragonx in #1939
- refactor setup wizard by @dr-bonez in #1937
- feat(shared): Ticker add new component and use it in marketplace by @waterplea in #1940
- feat: For ota update using rsyncd by @Blu-J in #1938
- Feat/update progress by @MattDHill in #1944
- Fix/app show hidden by @MattDHill in #1948
- create dpkg and iso workflows by @dr-bonez in #1941
- changing ip addr type by @redragonx in #1950
- Create mountpoints first by @k0gen in #1949
- Hard code registry icons by @MattDHill in #1951
- fix: Cleanup by sending a command and kill when dropped by @Blu-J in #1945
- Update setup wizard styling by @elvece in #1954
- Feature/homepage by @elvece in #1956
- Fix millis by @Blu-J in #1960
- fix accessing dev tools by @MattDHill in #1966
- Update/misc UI fixes by @elvece in #1961
- Embassy-init typo by @redragonx in #1959
- feature: 0.3.2 -> 0.3.3 upgrade by @dr-bonez in #1958
- Fix/migrate by @Blu-J in #1962
- chore: Make validation reject containers by @Blu-J in #1970
- get pubkey and encrypt password on login by @elvece in #1965
- Multiple bugs and styling by @MattDHill in #1975
- filter out usb stick during install by @dr-bonez in #1974
- fix http upgrades by @dr-bonez in #1980
- restore interfaces before creating manager by @dr-bonez in #1982
- fuckit: no patch db locks by @dr-bonez in #1969
- fix websocket hangup error by @dr-bonez in #1981
- revert app show to use header and fix back button by @MattDHill in #1984
- Update/marketplace info by @elvece in #1983
- force docker image removal by @dr-bonez in #1985
- do not error if cannot determine live usb device by @dr-bonez in #1986
- remove community registry from FE defaults by @MattDHill in #1988
- check environment by @dr-bonez in #1990
- fix marketplace search and better category disabling by @MattDHill in #1991
- better migration progress bar by @dr-bonez in #1993
- bump cargo version by @dr-bonez in #1995
- preload icons and pause on setup complete for kiosk mode by @MattDHill in #1997
- use squashfs for rpi updates by @dr-bonez in #1998
- do not start progress at 0 before diff complete by @dr-bonez in #1999
- user must click continue in kiosk on success page by @MattDHill in #2001
- fix regex in image rip script by @dr-bonez in #2002
- fix bug with showing embassy drives and center error text by @MattDHill in #2006
- fix partition type by @dr-bonez in #2007
- lowercase service for alphabetic sorting by @MattDHill in #2008
- dont add updates cat by @MattDHill in #2009
- make downloaded page a full html doc by @MattDHill in #2011
- wait for monitor to be attached before launching firefox by @chrisguida in #2005
- UI fixes by @elvece in #2014
- fix: Stop service before by @Blu-J in #2019
- shield links update by @k0gen in #2018
- fix: Undoing the breaking introduced by trying to stopp by @Blu-J in #2023
- update link rename from embassy -> system by @elvece in #2027
- initialize embassy before restoring packages by @dr-bonez in #2029
- make procfs an optional dependency so sdk can build on macos by @elvece in #2028
- take(1) for recover select by @MattDHill in #2030
- take one from server info to prevent multiple reqs to registries by @MattDHill in #2032
- remove write lock during backup by @MattDHill in #2033
- fix: Ensure that during migration we make the urls have a trailing slash by @Blu-J in #2036
- fix: Make the restores limited # restore at a time by @Blu-J in #2037
- fix error and display of unknown font weight on success page by @elvece in #2038
## Checksums
```
8602e759d3ece7cf503b9ca43e8419109f14e424617c2703b3771c8801483d7e embassyos_amd64.deb
b5c0d8d1af760881a1b5cf32bd7c5b1d1cf6468f6da594a1b4895a866d03a58c embassyos_amd64.iso
fe518453a7e1a8d8c2be43223a1a12adff054468f8082df0560e1ec50df3dbfd embassyos_raspberrypi.img
7b1ff0ada27b6714062aa991ec31c2d95ac4edf254cd464a4fa251905aa47ebd embassyos_raspberrypi.tar.gz
```
# v0.3.2.1
## What's Changed
- Update index.html copy and styling by @elvece in #1855
- increase maximum avahi entry group size by @dr-bonez in #1869
- bump version by @dr-bonez in #1871
### Linux and Mac
Download the `eos.tar.gz` file, then extract and flash the resulting eos.img to your SD Card
Windows
Download the `eos.zip` file, then extract and flash the resulting eos.img to your SD Card
## SHA-256 Checksums
```
c4b17658910dd10c37df134d5d5fdd6478f962ba1b803d24477d563d44430f96 eos.tar.gz
3a8b29878fe222a9d7cbf645c975b12805704b0f39c7daa46033d22380f9828c eos.zip
dedff3eb408ea411812b8f46e6c6ed32bfbd97f61ec2b85a6be40373c0528256 eos.img
```
# v0.3.2
## Highlights
- Autoscrolling for logs
- Improved connectivity between browser and Embassy
- Switch to Postgres for EOS database for better performance
- Multiple bug fixes and under-the-hood improvements
- Various UI/UX enhancements
- Removal of product keys
Update Hash (SHA256): `d8ce908b06baee6420b45be1119e5eb9341ba8df920d1e255f94d1ffb7cc4de9`
Image Hash (SHA256): `e035cd764e5ad9eb1c60e2f7bc3b9bd7248f42a91c69015c8a978a0f94b90bbb`
Note: This image was uploaded as a gzipped POSIX sparse TAR file. The recommended command for unpacking it on systems that support sparse files is `tar --format=posix --sparse -zxvf eos.tar.gz`
## What's Changed
- formatting by @dr-bonez in #1698
- Update README.md by @kn0wmad in #1705
- Update README.md by @dr-bonez in #1703
- feat: migrate to Angular 14 and RxJS 7 by @waterplea in #1681
- 0312 multiple FE by @MattDHill in #1712
- Fix http requests by @MattDHill in #1717
- Add build-essential to README.md by @chrisguida in #1716
- write image to sparse-aware archive format by @dr-bonez in #1709
- fix: Add modification to the max_user_watches by @Blu-J in #1695
- [Feat] follow logs by @chrisguida in #1714
- Update README.md by @dr-bonez in #1728
- fix build for patch-db client for consistency by @elvece in #1722
- fix cli install by @chrisguida in #1720
- highlight instructions if not viewed by @MattDHill in #1731
- Feat: HttpReader by @redragonx in #1733
- Bugfix/dns by @dr-bonez in #1741
- add x86 build and run unittests to backend pipeline by @moerketh in #1682
- [Fix] websocket connecting and patchDB connection monitoring by @MattDHill in #1738
- Set pipeline job timeouts and add ca-certificates to test container by @moerketh in #1753
- Disable bluetooth properly #862 by @redragonx in #1745
- [feat]: resumable downloads by @dr-bonez in #1746
- Fix/empty properties by @elvece in #1764
- use hostname from patchDB as default server name by @MattDHill in #1758
- switch to postgresql by @dr-bonez in #1763
- remove product key from setup flow by @MattDHill in #1750
- pinning cargo dep versions for CLI by @redragonx in #1775
- fix: Js deep dir by @Blu-J in #1784
- 0.3.2 final cleanup by @dr-bonez in #1782
- expect ui marketplace to be undefined by @MattDHill in #1787
- fix init to exit on failure by @dr-bonez in #1788
- fix search to return more accurate results by @MattDHill in #1792
- update backend dependencies by @dr-bonez in #1796
- use base64 for HTTP headers by @dr-bonez in #1795
- fix: Bad cert of *.local.local is now fixed to correct. by @Blu-J in #1798
- fix duplicate patch updates, add scroll button to setup success by @MattDHill in #1800
- level_slider reclaiming that precious RAM memory by @k0gen in #1799
- stop leaking avahi clients by @dr-bonez in #1802
- fix: Deep is_parent was wrong and could be escapped by @Blu-J in #1801
- prevent cfg str generation from running forever by @dr-bonez in #1804
- better RPC error message by @MattDHill in #1803
- Bugfix/marketplace add by @elvece in #1805
- fix mrketplace swtiching by @MattDHill in #1810
- clean up code and logs by @MattDHill in #1809
- fix: Minor fix that matt wanted by @Blu-J in #1808
- onion replace instead of adding tor repository by @k0gen in #1813
- bank Start as embassy hostname from the begining by @k0gen in #1814
- add descriptions to marketplace list page by @elvece in #1812
- Fix/encryption by @elvece in #1811
- restructure initialization by @dr-bonez in #1816
- update license by @MattDHill in #1819
- perform system rebuild after updating by @dr-bonez in #1820
- ignore file not found error for delete by @dr-bonez in #1822
- Multiple by @MattDHill in #1823
- Bugfix/correctly package backend job by @moerketh in #1826
- update patch-db by @dr-bonez in #1831
- give name to logs file by @MattDHill in #1833
- play song during update by @dr-bonez in #1832
- Seed patchdb UI data by @elvece in #1835
- update patch db and enable logging by @dr-bonez in #1837
- reduce patch-db log level to warn by @dr-bonez in #1840
- update ts matches to fix properties ordering bug by @elvece in #1843
- handle multiple image tags having the same hash and increase timeout by @dr-bonez in #1844
- retry pgloader up to 5x by @dr-bonez in #1845
- show connection bar right away by @MattDHill in #1849
- dizzy Rebranding to embassyOS by @k0gen in #1851
- update patch db by @MattDHill in #1852
- camera_flash screenshots update by @k0gen in #1853
- disable concurrency and delete tmpdir before retry by @dr-bonez in #1846
## New Contributors
- @redragonx made their first contribution in #1733
# v0.3.1.1
## What's Changed
- whale2 docker stats fix by @k0gen in #1630
- update backend dependencies by @dr-bonez in #1637
- Fix/receipts health by @Blu-J in #1616
- return correct error on failed os download by @dr-bonez in #1636
- fix build by @dr-bonez in #1639
- Update product.yaml by @dr-bonez in #1638
- handle case where selected union enum is invalid after migration by @MattDHill in #1658
- fix: Resolve fighting with NM by @Blu-J in #1660
- sdk: don't allow mounts in inject actions by @chrisguida in #1653
- feat: Variable args by @Blu-J in #1667
- add readme to system-images folder by @elvece in #1665
- Mask chars beyond 16 by @MattDHill in #1666
- chore: Update to have the new version 0.3.1.1 by @Blu-J in #1668
- feat: Make the rename effect by @Blu-J in #1669
- fix migration, add logging by @dr-bonez in #1674
- run build checks only when relevant FE changes by @elvece in #1664
- trust local ca by @dr-bonez in #1670
- lower log level for docker deser fallback message by @dr-bonez in #1672
- refactor build process by @dr-bonez in #1675
- chore: enable strict mode by @waterplea in #1569
- draft releases notes for 0311 by @MattDHill in #1677
- add standby mode by @dr-bonez in #1671
- feat: atomic writing by @Blu-J in #1673
- allow server.update to update to current version by @dr-bonez in #1679
- allow falsey rpc response by @dr-bonez in #1680
- issue notification when individual package restore fails by @dr-bonez in #1685
- replace bang with question mark in html by @MattDHill in #1683
- only validate mounts for inject if eos >=0.3.1.1 by @dr-bonez in #1686
- add marketplace_url to backup metadata for service by @dr-bonez in #1688
- marketplace published at for service by @MattDHill in #1689
- sync data to fs before shutdown by @dr-bonez in #1690
- messaging for restart, shutdown, rebuild by @MattDHill in #1691
- honor shutdown from diagnostic ui by @dr-bonez in #1692
- ask for sudo password immediately during make by @dr-bonez in #1693
- sync blockdev after update by @dr-bonez in #1694
- set Matt as default assignee by @MattDHill in #1697
- NO_KEY for CI images by @dr-bonez in #1700
- fix typo by @dr-bonez in #1702
# v0.3.1
## What's Changed
- Feat bulk locking by @Blu-J in #1422
- Switching SSH keys to start9 user by @k0gen in #1321
- chore: Convert from ajv to ts-matches by @Blu-J in #1415
- Fix/id params by @elvece in #1414
- make nicer update sound by @ProofOfKeags in #1438
- adds product key to error message in setup flow when there is mismatch by @dr-bonez in #1436
- Update README.md to include yq by @cryptodread in #1385
- yin_yang For the peace of mind yin_yang by @k0gen in #1444
- Feature/update sound by @ProofOfKeags in #1439
- Feature/script packing by @ProofOfKeags in #1435
- rename ActionImplementation to PackageProcedure by @dr-bonez in #1448
- Chore/warning cleanse by @ProofOfKeags in #1447
- refactor packing to async by @ProofOfKeags in #1453
- Add nginx config for proxy redirect by @yzernik in #1421
- Proxy local frontend to remote backend by @elvece in #1452
- Feat/js action by @Blu-J in #1437
- Fix/making js work by @Blu-J in #1456
- fix: Dependency vs dependents by @Blu-J in #1462
- refactor: isolate network toast and login redirect to separate services by @waterplea in #1412
- Fix links in CONTRIBUTING.md, update ToC by @BBlackwo in #1463
- Feature/require script consistency by @ProofOfKeags in #1451
- Chore/version 0 3 1 0 by @Blu-J in #1475
- remove interactive TTY requirement from scripts by @moerketh in #1469
- Disable view in marketplace button when side-loaded by @BBlackwo in #1471
- Link to tor address on LAN setup page (#1277) by @BBlackwo in #1466
- UI version updates and welcome message for 0.3.1 by @elvece in #1479
- Update contribution and frontend readme by @BBlackwo in #1467
- Clean up config by @MattDHill in #1484
- Enable Control Groups for Docker containers by @k0gen in #1468
- Fix/patch db unwrap remove by @Blu-J in #1481
- handles spaces in working dir in make-image.sh by @moerketh in #1487
- UI cosmetic improvements by @MattDHill in #1486
- chore: fix the master by @Blu-J in #1495
- generate unique ca names based off of server id by @ProofOfKeags in #1500
- allow embassy-cli not as root by @dr-bonez in #1501
- fix: potential fix for the docker leaking the errors and such by @Blu-J in #1496
- Fix/memory leak docker by @Blu-J in #1505
- fixes serialization of regex pattern + description by @ProofOfKeags in #1509
- allow interactive TTY if available by @dr-bonez in #1508
- fix "missing proxy" error in embassy-cli by @dr-bonez in #1516
- Feat/js known errors by @Blu-J in #1514
- fixes a bug where nginx will crash if eos goes into diagnostic mode a… by @dr-bonez in #1506
- fix: restart/ uninstall sometimes didn't work by @Blu-J in #1527
- add "error_for_status" to static file downloads by @dr-bonez in #1532
- fixes #1169 by @dr-bonez in #1533
- disable unnecessary services by @dr-bonez in #1535
- chore: Update types to match embassyd by @Blu-J in #1539
- fix: found a unsaturaded args fix by @Blu-J in #1540
- chore: Update the lite types to include the union and enum by @Blu-J in #1542
- Feat: Make the js check for health by @Blu-J in #1543
- fix incorrect error message for deserialization in ValueSpecString by @dr-bonez in #1547
- fix dependency/dependent id issue by @dr-bonez in #1546
- add textarea to ValueSpecString by @dr-bonez in #1534
- Feat/js metadata by @Blu-J in #1548
- feat: uid/gid/mode added to metadata by @Blu-J in #1551
- Strict null checks by @waterplea in #1464
- fix backend builds for safe git config by @elvece in #1549
- update should send version not version spec by @elvece in #1559
- chore: Add tracing for debuging the js procedure slowness by @Blu-J in #1552
- Reset password through setup wizard by @MattDHill in #1490
- feat: Make sdk by @Blu-J in #1564
- fix: Missing a feature flat cfg by @Blu-J in #1563
- fixed sentence that didn't make sense by @BitcoinMechanic in #1565
- refactor(patch-db): use PatchDB class declaratively by @waterplea in #1562
- fix bugs with config and clean up dev options by @MattDHill in #1558
- fix: Make it so we only need the password on the backup by @Blu-J in #1566
- kill all sessions and remove ripple effect by @MattDHill in #1567
- adjust service marketplace button for installation source relevance by @elvece in #1571
- fix connection failure display monitoring and other style changes by @MattDHill in #1573
- add dns server to embassy-os by @dr-bonez in #1572
- Fix/mask generic inputs by @elvece in #1570
- Fix/sideload icon type by @elvece in #1577
- add avahi conditional compilation flags to dns by @dr-bonez in #1579
- selective backups and better drive selection interface by @MattDHill in #1576
- Feat/use modern tor by @kn0wmad in #1575
- update welcome notes for 031 by @MattDHill in #1580
- fix: Properties had a null description by @Blu-J in #1581
- fix backup lock ordering by @dr-bonez in #1582
- Bugfix/backup lock order by @dr-bonez in #1583
- preload redacted and visibility hidden by @MattDHill in #1584
- turn chevron red in config if error by @MattDHill in #1586
- switch to utc by @dr-bonez in #1587
- update patchdb for array patch fix by @elvece in #1588
- filter package ids when backing up by @dr-bonez in #1589
- add select/deselect all to backups and enum lists by @elvece in #1590
- fix: Stop the buffer from dropped pre-maturly by @Blu-J in #1591
- chore: commit the snapshots by @Blu-J in #1592
- nest new entries and message updates better by @MattDHill in #1595
- fix html parsing in logs by @elvece in #1598
- don't crash service if io-format is set for main by @dr-bonez in #1599
- strip html from colors from logs by @elvece in #1604
- feat: fetch effect by @Blu-J in #1605
- Fix/UI misc by @elvece in #1606
- display bottom item in backup list and refactor for cleanliness by @MattDHill in #1609
# v0.3.0.3
## What's Changed
- refactor: decompose app component by @waterplea in #1359
- Update Makefile by @kn0wmad in #1400
- ⬐ smarter wget by @k0gen in #1401
- prevent the kernel from OOMKilling embassyd by @dr-bonez in #1402
- attempt to heal when health check passes by @dr-bonez in #1420
- Feat new locking by @Blu-J in #1384
- version bump by @dr-bonez in #1423
- Update server-show.page.ts by @chrisguida in #1424
- Bump async from 2.6.3 to 2.6.4 in /frontend by @dependabot in #1426
- Update index.html by @mirkoRainer in #1419
## New Contributors
- @dependabot made their first contribution in #1426
- @mirkoRainer made their first contribution in #1419
# v0.3.0.2
- Minor compatibility fixes
- #1392
- #1390
- #1388
# v0.3.0.1
Minor bugfixes and performance improvements
# v0.3.0
- Websockets
- Real-time sync
- Patch DB
- Closely mirror FE and BE state. Most operating systems are connected to their GUI. Here it is served over the web. Patch DB and websockets serve to close the perceptual gap of this inherent challenge.
- Switch kernel from Raspbian to Ubuntu
- 64 bit
- Possibility for alternative hardware
- Merging of lifeline, agent, and appmgr into embassyd
- Elimination of Haskell in favor of pure Rust
- Unified API for interacting with the OS
- Easier to build from source
- OS (quarantined from OS and service data)
- Kernel/boot
- Persistent metadata (disk guid, product key)
- Rootfs (the os)
- Reserved (for updates) - swaps with rootfs
- Revamped OS updates
- Progress indicators
- Non-blocking
- Simple swap on reboot
- Revamped setup flow
- Elimination of Setup App (Apple/Google dependencies gone)
- Setup Wizard on http://embassy.local
- Revamped service config
- Dynamic, validated forms
- Diagnostic UI
- Missing disk, wrong disk, corrupt disk
- Turing complete API for actions, backup/restore, config, properties, notifications, health checks, and dependency requirements
- Optional, arbitrary inputs for actions
- Install, update, recover progress for apps
- Multiple interfaces
- E.g. rpc, p2p, ui
- Health checks
- Developer defined
- Internal, dependencies, and/or external
- Full Embassy backup (diff-based)
- External drive support/requirement
- Single at first
- Groundwork for extension and mirror drives
- Disk encryption
- Random key encrypted with static value
- Groundwork for swapping static value with chosen password
- Session Management
- List all active sessions
- Option to kill
- More robust and extensive logs
- Donations

117
Makefile
View File

@@ -1,6 +1,6 @@
RASPI_TARGETS := embassyos-raspi.img embassyos-raspi.tar.gz gzip lite-upgrade.img
ARCH := $(shell if echo $(RASPI_TARGETS) | grep -qw "$(MAKECMDGOALS)"; then echo aarch64; else uname -m; fi)
RASPI_TARGETS := eos_raspberrypi-uninit.img eos_raspberrypi-uninit.tar.gz
OS_ARCH := $(shell if echo $(RASPI_TARGETS) | grep -qw "$(MAKECMDGOALS)"; then echo raspberrypi; else uname -m; fi)
ARCH := $(shell if [ "$(OS_ARCH)" = "raspberrypi" ]; then echo aarch64; else echo $(OS_ARCH); fi)
ENVIRONMENT_FILE = $(shell ./check-environment.sh)
GIT_HASH_FILE = $(shell ./check-git-hash.sh)
VERSION_FILE = $(shell ./check-version.sh)
@@ -19,24 +19,34 @@ FRONTEND_DIAGNOSTIC_UI_SRC := $(shell find frontend/projects/diagnostic-ui)
FRONTEND_INSTALL_WIZARD_SRC := $(shell find frontend/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell find patch-db/client -not -path patch-db/client/dist)
GZIP_BIN := $(shell which pigz || which gzip)
$(shell sudo true)
ALL_TARGETS := $(EMBASSY_BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar $(EMBASSY_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
ifeq ($(REMOTE),)
mkdir = mkdir -p $1
rm = rm -rf $1
cp = cp -r $1 $2
else
mkdir = ssh $(REMOTE) 'mkdir -p $1'
rm = ssh $(REMOTE) 'sudo rm -rf $1'
define cp
tar --transform "s|^$1|x|" -czv -f- $1 | ssh $(REMOTE) "sudo tar --transform 's|^x|$2|' -xzv -f- -C /"
endef
endif
.DELETE_ON_ERROR:
.PHONY: all gzip install clean format sdk snapshots frontends ui backend
.PHONY: all gzip install clean format sdk snapshots frontends ui backend reflash eos_raspberrypi.img sudo
all: $(EMBASSY_SRC) $(EMBASSY_BINS) system-images/compat/docker-images/aarch64.tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
all: $(ALL_TARGETS)
gzip: embassyos-raspi.tar.gz
embassyos-raspi.tar.gz: embassyos-raspi.img
tar --format=posix -cS -f- embassyos-raspi.img | $(GZIP_BIN) > embassyos-raspi.tar.gz
sudo:
sudo true
clean:
rm -f 2022-01-28-raspios-bullseye-arm64-lite.zip
rm -f raspios.img
rm -f embassyos-raspi.img
rm -f embassyos-raspi.tar.gz
rm -f eos_raspberrypi-uninit.img
rm -f eos_raspberrypi-uninit.tar.gz
rm -f ubuntu.img
rm -f product_key.txt
rm -f system-images/**/*.tar
@@ -62,51 +72,73 @@ format:
sdk:
cd backend/ && ./install-sdk.sh
embassyos-raspi.img: all raspios.img cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast
! test -f embassyos-raspi.img || rm embassyos-raspi.img
eos_raspberrypi-uninit.img: $(ALL_TARGETS) raspios.img cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep | sudo
! test -f eos_raspberrypi-uninit.img || rm eos_raspberrypi-uninit.img
./build/raspberry-pi/make-image.sh
lite-upgrade.img: raspios.img cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast $(BUILD_SRC) eos.raspberrypi.squashfs
lite-upgrade.img: raspios.img cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep $(BUILD_SRC) eos.raspberrypi.squashfs
! test -f lite-upgrade.img || rm lite-upgrade.img
./build/raspberry-pi/make-upgrade-image.sh
eos_raspberrypi.img: raspios.img $(BUILD_SRC) eos.raspberrypi.squashfs $(VERSION_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
eos_raspberrypi.img: raspios.img $(BUILD_SRC) eos.raspberrypi.squashfs $(VERSION_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) | sudo
! test -f eos_raspberrypi.img || rm eos_raspberrypi.img
./build/raspberry-pi/make-initialized-image.sh
# For creating os images. DO NOT USE
install: all
mkdir -p $(DESTDIR)/usr/bin
cp backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init $(DESTDIR)/usr/bin/
cp backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd $(DESTDIR)/usr/bin/
cp backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli $(DESTDIR)/usr/bin/
cp backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias $(DESTDIR)/usr/bin/
install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/bin)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-init,$(DESTDIR)/usr/bin/embassy-init)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassyd,$(DESTDIR)/usr/bin/embassyd)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/embassy-cli,$(DESTDIR)/usr/bin/embassy-cli)
$(call cp,backend/target/$(ARCH)-unknown-linux-gnu/release/avahi-alias,$(DESTDIR)/usr/bin/avahi-alias)
mkdir -p $(DESTDIR)/usr/lib
rm -rf $(DESTDIR)/usr/lib/embassy
cp -r build/lib $(DESTDIR)/usr/lib/embassy
$(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/embassy)
$(call cp,build/lib,$(DESTDIR)/usr/lib/embassy)
cp ENVIRONMENT.txt $(DESTDIR)/usr/lib/embassy/
cp GIT_HASH.txt $(DESTDIR)/usr/lib/embassy/
cp VERSION.txt $(DESTDIR)/usr/lib/embassy/
$(call cp,ENVIRONMENT.txt,$(DESTDIR)/usr/lib/embassy/ENVIRONMENT.txt)
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/embassy/GIT_HASH.txt)
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/embassy/VERSION.txt)
mkdir -p $(DESTDIR)/usr/lib/embassy/container
cp libs/target/aarch64-unknown-linux-musl/release/embassy_container_init $(DESTDIR)/usr/lib/embassy/container/embassy_container_init.arm64
cp libs/target/x86_64-unknown-linux-musl/release/embassy_container_init $(DESTDIR)/usr/lib/embassy/container/embassy_container_init.amd64
$(call mkdir,$(DESTDIR)/usr/lib/embassy/container)
$(call cp,libs/target/aarch64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/embassy/container/embassy_container_init.arm64)
$(call cp,libs/target/x86_64-unknown-linux-musl/release/embassy_container_init,$(DESTDIR)/usr/lib/embassy/container/embassy_container_init.amd64)
mkdir -p $(DESTDIR)/usr/lib/embassy/system-images
cp system-images/compat/docker-images/aarch64.tar $(DESTDIR)/usr/lib/embassy/system-images/compat.tar
cp system-images/utils/docker-images/$(ARCH).tar $(DESTDIR)/usr/lib/embassy/system-images/utils.tar
cp system-images/binfmt/docker-images/$(ARCH).tar $(DESTDIR)/usr/lib/embassy/system-images/binfmt.tar
$(call mkdir,$(DESTDIR)/usr/lib/embassy/system-images)
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/compat.tar)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/utils.tar)
$(call cp,system-images/binfmt/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/embassy/system-images/binfmt.tar)
mkdir -p $(DESTDIR)/var/www/html
cp -r frontend/dist/diagnostic-ui $(DESTDIR)/var/www/html/diagnostic
cp -r frontend/dist/setup-wizard $(DESTDIR)/var/www/html/setup
cp -r frontend/dist/install-wizard $(DESTDIR)/var/www/html/install
cp -r frontend/dist/ui $(DESTDIR)/var/www/html/main
cp index.html $(DESTDIR)/var/www/html/
$(call mkdir,$(DESTDIR)/var/www/html)
$(call cp,frontend/dist/diagnostic-ui,$(DESTDIR)/var/www/html/diagnostic)
$(call cp,frontend/dist/setup-wizard,$(DESTDIR)/var/www/html/setup)
$(call cp,frontend/dist/install-wizard,$(DESTDIR)/var/www/html/install)
$(call cp,frontend/dist/ui,$(DESTDIR)/var/www/html/main)
$(call cp,index.html,$(DESTDIR)/var/www/html/index.html)
system-images/compat/docker-images/aarch64.tar: $(COMPAT_SRC)
update-overlay:
@echo "\033[33m!!! THIS WILL ONLY REFLASH YOUR DEVICE IN MEMORY !!!\033[0m"
@echo "\033[33mALL CHANGES WILL BE REVERTED IF YOU RESTART THE DEVICE\033[0m"
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@if [ "`ssh $(REMOTE) 'cat /usr/lib/embassy/VERSION.txt'`" != "`cat ./VERSION.txt`" ]; then >&2 echo "Embassy requires migrations: update-overlay is unavailable." && false; fi
@if ssh $(REMOTE) "pidof embassy-init"; then >&2 echo "Embassy in INIT: update-overlay is unavailable." && false; fi
ssh $(REMOTE) "sudo systemctl stop embassyd"
$(MAKE) install REMOTE=$(REMOTE) OS_ARCH=$(OS_ARCH)
ssh $(REMOTE) "sudo systemctl start embassyd"
update:
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
ssh $(REMOTE) "sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/"
$(MAKE) install REMOTE=$(REMOTE) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH)
ssh $(REMOTE) "sudo touch /media/embassy/config/upgrade && sudo sync && sudo reboot"
emulate-reflash:
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
ssh $(REMOTE) "sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/"
$(MAKE) install REMOTE=$(REMOTE) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH)
ssh $(REMOTE) "sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot"
system-images/compat/docker-images/aarch64.tar system-images/compat/docker-images/x86_64.tar: $(COMPAT_SRC)
cd system-images/compat && make
system-images/utils/docker-images/aarch64.tar system-images/utils/docker-images/x86_64.tar: $(UTILS_SRC)
@@ -176,3 +208,6 @@ backend: $(EMBASSY_BINS)
cargo-deps/aarch64-unknown-linux-gnu/release/nc-broadcast:
./build-cargo-dep.sh nc-broadcast
cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep:
./build-cargo-dep.sh pi-beep

View File

@@ -22,12 +22,12 @@ This is the most convenient option. Simply [buy an Embassy](https://start9.com)
### :construction_worker: Build your own Embassy
While not as convenient as buying an Embassy, this option is easier than you might imagine, and there are 4 reasons why you might prefer it:
1. You already have a Raspberry Pi and would like to re-purpose it.
1. You already have your own hardware.
1. You want to save on shipping costs.
1. You prefer not to divulge your physical address.
1. You just like building things.
To pursue this option, follow this [guide](https://start9.com/latest/diy).
To pursue this option, follow one of our [DIY guides](https://start9.com/latest/diy).
### :hammer_and_wrench: Build embassyOS from Source
@@ -35,7 +35,7 @@ embassyOS can be built from source, for personal use, for free.
A detailed guide for doing so can be found [here](https://github.com/Start9Labs/embassy-os/blob/master/build/README.md).
## :heart: Contributing
There are multiple ways to contribute: work directly on embassyOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://github.com/Start9Labs/embassy-os/blob/master/CONTRIBUTING.md).
There are multiple ways to contribute: work directly on embassyOS, package a service for the marketplace, or help with documentation and guides. To learn more about contributing, see [here](https://docs.start9.com/latest/contribute/) or [here](https://github.com/Start9Labs/embassy-os/blob/master/CONTRIBUTING.md).
## UI Screenshots
<p align="center">

1615
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -14,7 +14,7 @@ keywords = [
name = "embassy-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/embassy-os"
version = "0.3.3"
version = "0.3.4"
[lib]
name = "embassy"
@@ -48,6 +48,11 @@ unstable = ["patch-db/unstable"]
[dependencies]
aes = { version = "0.7.5", features = ["ctr"] }
async-compression = { version = "0.3.15", features = [
"gzip",
"brotli",
"tokio",
] }
async-stream = "0.3.3"
async-trait = "0.1.56"
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [
@@ -62,7 +67,8 @@ bytes = "1"
chrono = { version = "0.4.19", features = ["serde"] }
clap = "3.2.8"
color-eyre = "0.6.1"
cookie_store = "0.16.1"
cookie = "0.16.2"
cookie_store = "0.19.0"
current_platform = "0.2.0"
digest = "0.10.3"
digest-old = { package = "digest", version = "0.9.0" }
@@ -75,6 +81,7 @@ emver = { version = "0.1.7", git = "https://github.com/Start9Labs/emver-rs.git",
fd-lock-rs = "0.1.4"
futures = "0.3.21"
git-version = "0.3.5"
gpt = "3.0.0"
helpers = { path = "../libs/helpers" }
embassy_container_init = { path = "../libs/embassy_container_init" }
hex = "0.4.3"
@@ -84,6 +91,8 @@ hyper = { version = "0.14.20", features = ["full"] }
hyper-ws-listener = "0.2.0"
imbl = "2.0.0"
indexmap = { version = "1.9.1", features = ["serde"] }
ipnet = { version = "2.7.1", features = ["serde"] }
iprange = { version = "0.6.7", features = ["serde"] }
isocountry = "0.3.2"
itertools = "0.10.3"
josekit = "0.8.1"
@@ -103,17 +112,18 @@ openssl = { version = "0.10.41", features = ["vendored"] }
patch-db = { version = "*", path = "../patch-db/patch-db", features = [
"trace",
] }
p256 = { version = "0.12.0", features = ["pem"] }
pbkdf2 = "0.11.0"
pin-project = "1.0.11"
pkcs8 = { version = "0.9.0", features = ["std"] }
prettytable-rs = "0.9.0"
prettytable-rs = "0.10.0"
proptest = "1.0.0"
proptest-derive = "0.3.0"
rand = { version = "0.8.5", features = ["std"] }
rand-old = { package = "rand", version = "0.7.3" }
regex = "1.6.0"
reqwest = { version = "0.11.11", features = ["stream", "json", "socks"] }
reqwest_cookie_store = "0.4.0"
reqwest_cookie_store = "0.5.0"
rpassword = "7.0.0"
rpc-toolkit = "0.2.2"
rust-argon2 = "1.0.0"
@@ -133,11 +143,12 @@ sqlx = { version = "0.6.0", features = [
"runtime-tokio-rustls",
"postgres",
] }
ssh-key = { version = "0.5.1", features = ["ed25519"] }
stderrlog = "0.5.3"
tar = "0.4.38"
thiserror = "1.0.31"
tokio = { version = "1.21.2", features = ["full"] }
tokio-stream = { version = "0.1.9", features = ["io-util", "sync"] }
tokio = { version = "1.23", features = ["full"] }
tokio-stream = { version = "0.1.11", features = ["io-util", "sync", "net"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.17.1", features = ["native-tls"] }
tokio-rustls = "0.23.4"
@@ -151,6 +162,7 @@ trust-dns-server = "0.22.0"
typed-builder = "0.10.0"
url = { version = "2.2.2", features = ["serde"] }
uuid = { version = "1.1.2", features = ["v4"] }
zeroize = "1.5.7"
[profile.test]
opt-level = 3

View File

@@ -2,7 +2,7 @@
Description=Embassy Init
After=network-online.target
Requires=network-online.target
Wants=avahi-daemon.service tor.service
Wants=avahi-daemon.service
[Service]
Type=oneshot

View File

@@ -0,0 +1,62 @@
-- Add migration script here
CREATE EXTENSION pgcrypto;
ALTER TABLE
account
ADD
COLUMN server_id TEXT,
ADD
COLUMN hostname TEXT,
ADD
COLUMN network_key BYTEA CHECK (length(network_key) = 32),
ADD
COLUMN root_ca_key_pem TEXT,
ADD
COLUMN root_ca_cert_pem TEXT;
UPDATE
account
SET
network_key = gen_random_bytes(32),
root_ca_key_pem = (
SELECT
priv_key_pem
FROM
certificates
WHERE
id = 0
),
root_ca_cert_pem = (
SELECT
certificate_pem
FROM
certificates
WHERE
id = 0
)
WHERE
id = 0;
ALTER TABLE
account
ALTER COLUMN
tor_key DROP NOT NULL,
ALTER COLUMN
network_key
SET
NOT NULL,
ALTER COLUMN
root_ca_key_pem
SET
NOT NULL,
ALTER COLUMN
root_ca_cert_pem
SET
NOT NULL;
CREATE TABLE IF NOT EXISTS network_keys (
package TEXT NOT NULL,
interface TEXT NOT NULL,
key BYTEA NOT NULL CHECK (length(key) = 32),
PRIMARY KEY (package, interface)
);

View File

@@ -1,6 +1,6 @@
{
"db": "PostgreSQL",
"094882d4d46d52e814f9aaf5fae172a5dd745b06cbde347f47b18e6498167269": {
"1ce5254f27de971fd87f5ab66d300f2b22433c86617a0dbf796bf2170186dd2e": {
"describe": {
"columns": [],
"nullable": [],
@@ -8,35 +8,11 @@
"Left": [
"Text",
"Text",
"Text"
"Bytea"
]
}
},
"query": "UPDATE certificates SET priv_key_pem = $1, certificate_pem = $2, updated_at = now() WHERE lookup_string = $3"
},
"165daa7d6a60cb42122373b2c5ac7d39399bcc99992f0002ee7bfef50a8daceb": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": []
}
},
"query": "DELETE FROM certificates WHERE id = 0 OR id = 1;"
},
"1f7936d27d63f01118ecd6f824e8a79607ed2b6e6def23f3e2487466dd2ddfe1": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES ($1, $2, $3, now(), now())"
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING"
},
"21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930": {
"describe": {
@@ -50,20 +26,6 @@
},
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1"
},
"22613628ff50341fdc35366e194fdcd850118824763cfe0dfff68dadc72167e9": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET key = $3"
},
"28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec": {
"describe": {
"columns": [
@@ -102,24 +64,6 @@
},
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1"
},
"3502e58f2ab48fb4566d21c920c096f81acfa3ff0d02f970626a4dcd67bac71d": {
"describe": {
"columns": [
{
"name": "tor_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT tor_key FROM account"
},
"4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997": {
"describe": {
"columns": [
@@ -152,32 +96,6 @@
},
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1"
},
"46815a4ac2c43e1dfbab3c0017ed09d5c833062e523205db4245a5218b2562b8": {
"describe": {
"columns": [
{
"name": "priv_key_pem",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "certificate_pem",
"ordinal": 1,
"type_info": "Text"
}
],
"nullable": [
false,
false
],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = $1"
},
"4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": {
"describe": {
"columns": [
@@ -238,24 +156,6 @@
},
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1"
},
"548448e8ed8bcdf9efdc813d65af2cc55064685293b936f0f09e07f91a328eb9": {
"describe": {
"columns": [
{
"name": "setval",
"ordinal": 0,
"type_info": "Int8"
}
],
"nullable": [
null
],
"parameters": {
"Left": []
}
},
"query": "SELECT setval('certificates_id_seq', GREATEST(MAX(id) + 1, nextval('certificates_id_seq') - 1)) FROM certificates"
},
"629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": {
"describe": {
"columns": [
@@ -295,30 +195,6 @@
},
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2"
},
"6c96d76bffcc5f03290d8d8544a58521345ed2a843a509b17bbcd6257bb81821": {
"describe": {
"columns": [
{
"name": "priv_key_pem",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "certificate_pem",
"ordinal": 1,
"type_info": "Text"
}
],
"nullable": [
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;"
},
"6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca": {
"describe": {
"columns": [],
@@ -331,6 +207,28 @@
},
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
},
"770c1017734720453dc87b58c385b987c5af5807151ff71a59000014586752e0": {
"describe": {
"columns": [
{
"name": "key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key"
},
"7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210": {
"describe": {
"columns": [
@@ -394,6 +292,23 @@
},
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2"
},
"7c7a3549c997eb75bf964ea65fbb98a73045adf618696cd838d79203ef5383fb": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Bytea",
"Text",
"Text"
]
}
},
"query": "\n INSERT INTO account (\n id,\n server_id,\n hostname,\n password,\n network_key,\n root_ca_key_pem,\n root_ca_cert_pem\n ) VALUES (\n 0, $1, $2, $3, $4, $5, $6\n ) ON CONFLICT (id) DO UPDATE SET\n server_id = EXCLUDED.server_id,\n hostname = EXCLUDED.hostname,\n password = EXCLUDED.password,\n network_key = EXCLUDED.network_key,\n root_ca_key_pem = EXCLUDED.root_ca_key_pem,\n root_ca_cert_pem = EXCLUDED.root_ca_cert_pem\n "
},
"7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576": {
"describe": {
"columns": [],
@@ -536,33 +451,6 @@
},
"query": "DELETE FROM cifs_shares WHERE id = $1"
},
"a645d636be810a4ba61dcadf22e90de6e9baf3614aa9e97f053ff480cb3118a2": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, 'main', $2) ON CONFLICT (package, interface) DO UPDATE SET key = $2"
},
"a6645d91f76b3d5fac2191ea3bec5dab7d7d124715fde02e6a816fa5dbc7acf2": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO account (id, password, tor_key) VALUES ($1, $2, $3) ON CONFLICT (id) DO UPDATE SET password = $2, tor_key = $3"
},
"a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e": {
"describe": {
"columns": [
@@ -609,19 +497,6 @@
},
"query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5"
},
"cec8112be0ebc02ef7e651631be09efe26d1677b5b8aa95ceb3a92aff1afdbcc": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text"
]
}
},
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, $1, $2, NULL, now(), now())"
},
"d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": {
"describe": {
"columns": [
@@ -657,19 +532,6 @@
},
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)"
},
"df4428ccb891bd791824bcd7990550cc9651e1cfaab1db33833ff7959d113b2c": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text"
]
}
},
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, $1, $2, NULL, now(), now())"
},
"e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7": {
"describe": {
"columns": [],
@@ -682,17 +544,23 @@
},
"query": "DELETE FROM notifications WHERE id = $1"
},
"e25e53c45c5a494a45cdb4d145de507df6f322ac6706e71b86895f1c64195f41": {
"e545696735f202f9d13cf22a561f3ff3f9aed7f90027a9ba97634bcb47d772f0": {
"describe": {
"columns": [],
"nullable": [],
"columns": [
{
"name": "tor_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
true
],
"parameters": {
"Left": [
"Text"
]
"Left": []
}
},
"query": "UPDATE account SET password = $1"
"query": "SELECT tor_key FROM account WHERE id = 0"
},
"e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d": {
"describe": {
@@ -708,21 +576,33 @@
},
"query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)"
},
"e85749336fce4afaf16627bee8cfcb70be6f189ea7d1f05f9a26bead4be11839": {
"e95322a8e2ae3b93f1e974b24c0b81803f1e9ec9e8ebbf15cafddfc1c5a028ed": {
"describe": {
"columns": [
{
"name": "interface",
"name": "package",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "key",
"name": "interface",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "tor_key?",
"ordinal": 3,
"type_info": "Bytea"
}
],
"nullable": [
false,
false,
false,
false
],
@@ -732,7 +612,7 @@
]
}
},
"query": "SELECT interface, key FROM tor WHERE package = $1"
"query": "\n SELECT\n network_keys.package,\n network_keys.interface,\n network_keys.key,\n tor.key AS \"tor_key?\"\n FROM\n network_keys\n LEFT JOIN\n tor\n ON\n network_keys.package = tor.package\n AND\n network_keys.interface = tor.interface\n WHERE\n network_keys.package = $1\n "
},
"eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a": {
"describe": {
@@ -769,30 +649,6 @@
},
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id"
},
"ed848affa5bf92997cd441e3a50b3616b6724df3884bd9d199b3225e0bea8a54": {
"describe": {
"columns": [
{
"name": "priv_key_pem",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "certificate_pem",
"ordinal": 1,
"type_info": "Text"
}
],
"nullable": [
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;"
},
"f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556": {
"describe": {
"columns": [],
@@ -806,5 +662,83 @@
}
},
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)"
},
"f7d2dae84613bcef330f7403352cc96547f3f6dbec11bf2eadfaf53ad8ab51b5": {
"describe": {
"columns": [
{
"name": "network_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT network_key FROM account WHERE id = 0"
},
"fe6e4f09f3028e5b6b6259e86cbad285680ce157aae9d7837ac020c8b2945e7f": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "password",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "tor_key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "server_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "hostname",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "network_key",
"ordinal": 5,
"type_info": "Bytea"
},
{
"name": "root_ca_key_pem",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "root_ca_cert_pem",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
false,
true,
true,
true,
false,
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT * FROM account WHERE id = 0"
}
}

120
backend/src/account.rs Normal file
View File

@@ -0,0 +1,120 @@
use ed25519_dalek::{ExpandedSecretKey, SecretKey};
use models::ResultExt;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use sqlx::PgExecutor;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key;
use crate::net::ssl::{generate_key, make_root_cert};
use crate::Error;
fn hash_password(password: &str) -> Result<String, Error> {
argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)
}
#[derive(Debug, Clone)]
pub struct AccountInfo {
pub server_id: String,
pub hostname: Hostname,
pub password: String,
pub key: Key,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
}
impl AccountInfo {
pub fn new(password: &str) -> Result<Self, Error> {
let server_id = generate_id();
let hostname = generate_hostname();
let root_ca_key = generate_key()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname)?;
Ok(Self {
server_id,
hostname,
password: hash_password(password)?,
key: Key::new(None),
root_ca_key,
root_ca_cert,
})
}
pub async fn load(secrets: impl PgExecutor<'_>) -> Result<Self, Error> {
let r = sqlx::query!("SELECT * FROM account WHERE id = 0")
.fetch_one(secrets)
.await?;
let server_id = r.server_id.unwrap_or_else(generate_id);
let hostname = r.hostname.map(Hostname).unwrap_or_else(generate_hostname);
let password = r.password;
let network_key = SecretKey::from_bytes(&r.network_key)?;
let tor_key = if let Some(k) = &r.tor_key {
ExpandedSecretKey::from_bytes(k)?
} else {
ExpandedSecretKey::from(&network_key)
};
let key = Key::from_pair(None, network_key.to_bytes(), tor_key.to_bytes());
let root_ca_key = PKey::private_key_from_pem(r.root_ca_key_pem.as_bytes())?;
let root_ca_cert = X509::from_pem(r.root_ca_cert_pem.as_bytes())?;
Ok(Self {
server_id,
hostname,
password,
key,
root_ca_key,
root_ca_cert,
})
}
pub async fn save(&self, secrets: impl PgExecutor<'_>) -> Result<(), Error> {
let server_id = self.server_id.as_str();
let hostname = self.hostname.0.as_str();
let password = self.password.as_str();
let network_key = self.key.as_bytes();
let network_key = network_key.as_slice();
let root_ca_key = String::from_utf8(self.root_ca_key.private_key_to_pem_pkcs8()?)?;
let root_ca_cert = String::from_utf8(self.root_ca_cert.to_pem()?)?;
sqlx::query!(
r#"
INSERT INTO account (
id,
server_id,
hostname,
password,
network_key,
root_ca_key_pem,
root_ca_cert_pem
) VALUES (
0, $1, $2, $3, $4, $5, $6
) ON CONFLICT (id) DO UPDATE SET
server_id = EXCLUDED.server_id,
hostname = EXCLUDED.hostname,
password = EXCLUDED.password,
network_key = EXCLUDED.network_key,
root_ca_key_pem = EXCLUDED.root_ca_key_pem,
root_ca_cert_pem = EXCLUDED.root_ca_cert_pem
"#,
server_id,
hostname,
password,
network_key,
root_ca_key,
root_ca_cert,
)
.execute(secrets)
.await?;
Ok(())
}
pub fn set_password(&mut self, password: &str) -> Result<(), Error> {
self.password = hash_password(password)?;
Ok(())
}
}

View File

@@ -4,13 +4,13 @@ use clap::ArgMatches;
use color_eyre::eyre::eyre;
use indexmap::IndexSet;
pub use models::ActionId;
use models::ImageId;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::config::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
@@ -56,7 +56,7 @@ pub struct Action {
pub input_spec: ConfigSpec,
}
impl Action {
#[instrument]
#[instrument(skip_all)]
pub fn validate(
&self,
container: &Option<DockerContainers>,
@@ -74,7 +74,7 @@ impl Action {
})
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn execute(
&self,
ctx: &RpcContext,
@@ -120,7 +120,7 @@ fn display_action_result(action_result: ActionResult, matches: &ArgMatches) {
}
#[command(about = "Executes an action", display(display_action_result))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn action(
#[context] ctx: RpcContext,
#[arg(rename = "id")] pkg_id: PackageId,

View File

@@ -90,7 +90,7 @@ fn gen_pwd() {
)
}
#[instrument(skip(ctx, password))]
#[instrument(skip_all)]
async fn cli_login(
ctx: CliContext,
password: Option<PasswordType>,
@@ -145,7 +145,7 @@ where
display(display_none),
metadata(authenticated = false)
)]
#[instrument(skip(ctx, password))]
#[instrument(skip_all)]
pub async fn login(
#[context] ctx: RpcContext,
#[request] req: &RequestParts,
@@ -183,7 +183,7 @@ pub async fn login(
}
#[command(display(display_none), metadata(authenticated = false))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn logout(
#[context] ctx: RpcContext,
#[request] req: &RequestParts,
@@ -250,7 +250,7 @@ fn display_sessions(arg: SessionList, matches: &ArgMatches) {
}
#[command(display(display_sessions))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn list(
#[context] ctx: RpcContext,
#[request] req: &RequestParts,
@@ -296,7 +296,7 @@ impl AsLogoutSessionId for KillSessionId {
}
#[command(display(display_none))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn kill(
#[context] ctx: RpcContext,
#[arg(parse(parse_comma_separated))] ids: Vec<String>,
@@ -305,7 +305,7 @@ pub async fn kill(
Ok(())
}
#[instrument(skip(ctx, old_password, new_password))]
#[instrument(skip_all)]
async fn cli_reset_password(
ctx: CliContext,
old_password: Option<PasswordType>,
@@ -364,37 +364,12 @@ impl SetPasswordReceipt {
}
}
pub async fn set_password<Db: DbHandle, Ex>(
db: &mut Db,
receipt: &SetPasswordReceipt,
secrets: &mut Ex,
password: &str,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let password = argon2::hash_encoded(
password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
sqlx::query!("UPDATE account SET password = $1", password,)
.execute(secrets)
.await?;
receipt.0.set(db, password).await?;
Ok(())
}
#[command(
rename = "reset-password",
custom_cli(cli_reset_password(async, context(CliContext))),
display(display_none)
)]
#[instrument(skip(ctx, old_password, new_password))]
#[instrument(skip_all)]
pub async fn reset_password(
#[context] ctx: RpcContext,
#[arg(rename = "old-password")] old_password: Option<PasswordType>,
@@ -403,20 +378,32 @@ pub async fn reset_password(
let old_password = old_password.unwrap_or_default().decrypt(&ctx)?;
let new_password = new_password.unwrap_or_default().decrypt(&ctx)?;
let mut secrets = ctx.secret_store.acquire().await?;
check_password_against_db(&mut secrets, &old_password).await?;
let mut db = ctx.db.handle();
let set_password_receipt = SetPasswordReceipt::new(&mut db).await?;
set_password(&mut db, &set_password_receipt, &mut secrets, &new_password).await?;
let mut account = ctx.account.write().await;
if !argon2::verify_encoded(&account.password, old_password.as_bytes())
.with_kind(crate::ErrorKind::IncorrectPassword)?
{
return Err(Error::new(
eyre!("Incorrect Password"),
crate::ErrorKind::IncorrectPassword,
));
}
account.set_password(&new_password)?;
account.save(&ctx.secret_store).await?;
crate::db::DatabaseModel::new()
.server_info()
.password_hash()
.put(&mut ctx.db.handle(), &account.password)
.await?;
Ok(())
}
#[command(rename = "get-pubkey", display(display_none))]
#[instrument(skip(ctx))]
#[command(
rename = "get-pubkey",
display(display_none),
metadata(authenticated = false)
)]
#[instrument(skip_all)]
pub async fn get_pubkey(#[context] ctx: RpcContext) -> Result<Jwk, RpcError> {
let secret = ctx.as_ref().clone();
let pub_key = secret.to_public_key()?;

View File

@@ -5,19 +5,15 @@ use chrono::Utc;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use helpers::AtomicFile;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use patch_db::{DbHandle, LockType, PatchDbHandle};
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::io::AsyncWriteExt;
use torut::onion::TorSecretKeyV3;
use tracing::instrument;
use super::target::BackupTargetId;
use super::PackageBackupReport;
use crate::auth::check_password_against_db;
use crate::backup::os::OsBackup;
use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext;
use crate::db::model::BackupProgress;
@@ -32,88 +28,6 @@ use crate::util::serde::IoFormat;
use crate::version::VersionT;
use crate::{Error, ErrorKind, ResultExt};
#[derive(Debug)]
pub struct OsBackup {
pub tor_key: TorSecretKeyV3,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
pub ui: Value,
}
impl<'de> Deserialize<'de> for OsBackup {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
#[derive(Deserialize)]
#[serde(rename = "kebab-case")]
struct OsBackupDe {
tor_key: String,
root_ca_key: String,
root_ca_cert: String,
ui: Value,
}
let int = OsBackupDe::deserialize(deserializer)?;
let key_vec = base32::decode(base32::Alphabet::RFC4648 { padding: true }, &int.tor_key)
.ok_or_else(|| {
serde::de::Error::invalid_value(
serde::de::Unexpected::Str(&int.tor_key),
&"an RFC4648 encoded string",
)
})?;
if key_vec.len() != 64 {
return Err(serde::de::Error::invalid_value(
serde::de::Unexpected::Str(&int.tor_key),
&"a 64 byte value encoded as an RFC4648 string",
));
}
let mut key_slice = [0; 64];
key_slice.clone_from_slice(&key_vec);
Ok(OsBackup {
tor_key: TorSecretKeyV3::from(key_slice),
root_ca_key: PKey::<Private>::private_key_from_pem(int.root_ca_key.as_bytes())
.map_err(serde::de::Error::custom)?,
root_ca_cert: X509::from_pem(int.root_ca_cert.as_bytes())
.map_err(serde::de::Error::custom)?,
ui: int.ui,
})
}
}
impl Serialize for OsBackup {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
#[derive(Serialize)]
#[serde(rename = "kebab-case")]
struct OsBackupSer<'a> {
tor_key: String,
root_ca_key: String,
root_ca_cert: String,
ui: &'a Value,
}
OsBackupSer {
tor_key: base32::encode(
base32::Alphabet::RFC4648 { padding: true },
&self.tor_key.as_bytes(),
),
root_ca_key: String::from_utf8(
self.root_ca_key
.private_key_to_pem_pkcs8()
.map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,
root_ca_cert: String::from_utf8(
self.root_ca_cert
.to_pem()
.map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,
ui: &self.ui,
}
.serialize(serializer)
}
}
fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<BTreeSet<PackageId>, Error> {
arg.split(',')
.map(|s| s.trim().parse().map_err(Error::from))
@@ -121,7 +35,7 @@ fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<BTreeSet<PackageId
}
#[command(rename = "create", display(display_none))]
#[instrument(skip(ctx, old_password, password))]
#[instrument(skip_all)]
pub async fn backup_all(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId,
@@ -247,7 +161,7 @@ pub async fn backup_all(
Ok(())
}
#[instrument(skip(db, packages))]
#[instrument(skip_all)]
async fn assure_backing_up(
db: &mut PatchDbHandle,
packages: impl IntoIterator<Item = &PackageId>,
@@ -286,7 +200,7 @@ async fn assure_backing_up(
Ok(())
}
#[instrument(skip(ctx, db, backup_guard))]
#[instrument(skip_all)]
async fn perform_backup<Db: DbHandle>(
ctx: &RpcContext,
mut db: Db,
@@ -294,7 +208,6 @@ async fn perform_backup<Db: DbHandle>(
package_ids: &BTreeSet<PackageId>,
) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> {
let mut backup_report = BTreeMap::new();
for package_id in crate::db::DatabaseModel::new()
.package_data()
.keys(&mut db)
@@ -422,11 +335,12 @@ async fn perform_backup<Db: DbHandle>(
tx.save().await?;
}
crate::db::DatabaseModel::new()
.lock(&mut db, LockType::Write)
.await?;
let ui = crate::db::DatabaseModel::new()
.ui()
.get(&mut db)
.await?
.into_owned();
let (root_ca_key, root_ca_cert) = ctx.net_controller.ssl.export_root_ca().await?;
let mut os_backup_file = AtomicFile::new(
backup_guard.as_ref().join("os-backup.cbor"),
None::<PathBuf>,
@@ -434,18 +348,10 @@ async fn perform_backup<Db: DbHandle>(
.await
.with_kind(ErrorKind::Filesystem)?;
os_backup_file
.write_all(
&IoFormat::Cbor.to_vec(&OsBackup {
tor_key: ctx.net_controller.tor.embassyd_tor_key().await,
root_ca_key,
root_ca_cert,
ui: crate::db::DatabaseModel::new()
.ui()
.get(&mut db)
.await?
.into_owned(),
})?,
)
.write_all(&IoFormat::Cbor.to_vec(&OsBackup {
account: ctx.account.read().await.clone(),
ui,
})?)
.await?;
os_backup_file
.save()

View File

@@ -4,6 +4,7 @@ use std::path::{Path, PathBuf};
use chrono::{DateTime, Utc};
use color_eyre::eyre::eyre;
use helpers::AtomicFile;
use models::ImageId;
use patch_db::{DbHandle, HasModel};
use reqwest::Url;
use rpc_toolkit::command;
@@ -15,19 +16,20 @@ use tracing::instrument;
use self::target::PackageBackupInfo;
use crate::context::RpcContext;
use crate::dependencies::reconfigure_dependents_with_live_pointers;
use crate::id::ImageId;
use crate::install::PKG_ARCHIVE_DIR;
use crate::net::interface::{InterfaceId, Interfaces};
use crate::net::keys::Key;
use crate::procedure::docker::DockerContainers;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::serde::IoFormat;
use crate::util::serde::{Base32, Base64, IoFormat};
use crate::util::Version;
use crate::version::{Current, VersionT};
use crate::volume::{backup_dir, Volume, VolumeId, Volumes, BACKUP_DIR};
use crate::{Error, ErrorKind, ResultExt};
pub mod backup_bulk;
pub mod os;
pub mod restore;
pub mod target;
@@ -61,7 +63,10 @@ pub fn package_backup() -> Result<(), Error> {
#[derive(Deserialize, Serialize)]
struct BackupMetadata {
pub timestamp: DateTime<Utc>,
pub tor_keys: BTreeMap<InterfaceId, String>,
#[serde(default)]
pub network_keys: BTreeMap<InterfaceId, Base64<[u8; 32]>>,
#[serde(default)]
pub tor_keys: BTreeMap<InterfaceId, Base32<[u8; 64]>>, // DEPRECATED
pub marketplace_url: Option<Url>,
}
@@ -87,7 +92,7 @@ impl BackupActions {
Ok(())
}
#[instrument(skip(ctx, db))]
#[instrument(skip_all)]
pub async fn create<Db: DbHandle>(
&self,
ctx: &RpcContext,
@@ -117,17 +122,17 @@ impl BackupActions {
.await?
.map_err(|e| eyre!("{}", e.1))
.with_kind(crate::ErrorKind::Backup)?;
let tor_keys = interfaces
.tor_keys(&mut ctx.secret_store.acquire().await?, pkg_id)
let (network_keys, tor_keys) = Key::for_package(&ctx.secret_store, pkg_id)
.await?
.into_iter()
.map(|(id, key)| {
(
id,
base32::encode(base32::Alphabet::RFC4648 { padding: true }, &key.as_bytes()),
)
.filter_map(|k| {
let interface = k.interface().map(|(_, i)| i)?;
Some((
(interface.clone(), Base64(k.as_bytes())),
(interface, Base32(k.tor_key().as_bytes())),
))
})
.collect();
.unzip();
let marketplace_url = crate::db::DatabaseModel::new()
.package_data()
.idx_model(pkg_id)
@@ -170,6 +175,7 @@ impl BackupActions {
outfile
.write_all(&IoFormat::Cbor.to_vec(&BackupMetadata {
timestamp,
network_keys,
tor_keys,
marketplace_url,
})?)
@@ -183,7 +189,7 @@ impl BackupActions {
})
}
#[instrument(skip(ctx, db))]
#[instrument(skip_all)]
pub async fn restore<Db: DbHandle>(
&self,
ctx: &RpcContext,

121
backend/src/backup/os.rs Normal file
View File

@@ -0,0 +1,121 @@
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key;
use crate::util::serde::Base64;
use crate::Error;
use openssl::pkey::PKey;
use openssl::x509::X509;
use serde::{Deserialize, Serialize};
use serde_json::Value;
pub struct OsBackup {
pub account: AccountInfo,
pub ui: Value,
}
impl<'de> Deserialize<'de> for OsBackup {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
let tagged = OsBackupSerDe::deserialize(deserializer)?;
match tagged.version {
0 => serde_json::from_value::<OsBackupV0>(tagged.rest)
.map_err(serde::de::Error::custom)?
.project()
.map_err(serde::de::Error::custom),
1 => serde_json::from_value::<OsBackupV1>(tagged.rest)
.map_err(serde::de::Error::custom)?
.project()
.map_err(serde::de::Error::custom),
v => Err(serde::de::Error::custom(&format!(
"Unknown backup version {v}"
))),
}
}
}
impl Serialize for OsBackup {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
OsBackupSerDe {
version: 1,
rest: serde_json::to_value(
&OsBackupV1::unproject(self).map_err(serde::ser::Error::custom)?,
)
.map_err(serde::ser::Error::custom)?,
}
.serialize(serializer)
}
}
#[derive(Deserialize, Serialize)]
struct OsBackupSerDe {
#[serde(default)]
version: usize,
#[serde(flatten)]
rest: Value,
}
/// V0
#[derive(Deserialize)]
#[serde(rename = "kebab-case")]
struct OsBackupV0 {
// tor_key: Base32<[u8; 64]>,
root_ca_key: String, // PEM Encoded OpenSSL Key
root_ca_cert: String, // PEM Encoded OpenSSL X509 Certificate
ui: Value, // JSON Value
}
impl OsBackupV0 {
fn project(self) -> Result<OsBackup, Error> {
Ok(OsBackup {
account: AccountInfo {
server_id: generate_id(),
hostname: generate_hostname(),
password: Default::default(),
key: Key::new(None),
root_ca_key: PKey::private_key_from_pem(self.root_ca_key.as_bytes())?,
root_ca_cert: X509::from_pem(self.root_ca_cert.as_bytes())?,
},
ui: self.ui,
})
}
}
/// V1
#[derive(Deserialize, Serialize)]
#[serde(rename = "kebab-case")]
struct OsBackupV1 {
server_id: String, // uuidv4
hostname: String, // embassy-<adjective>-<noun>
net_key: Base64<[u8; 32]>, // Ed25519 Secret Key
root_ca_key: String, // PEM Encoded OpenSSL Key
root_ca_cert: String, // PEM Encoded OpenSSL X509 Certificate
ui: Value, // JSON Value
// TODO add more
}
impl OsBackupV1 {
fn project(self) -> Result<OsBackup, Error> {
Ok(OsBackup {
account: AccountInfo {
server_id: self.server_id,
hostname: Hostname(self.hostname),
password: Default::default(),
key: Key::from_bytes(None, self.net_key.0),
root_ca_key: PKey::private_key_from_pem(self.root_ca_key.as_bytes())?,
root_ca_cert: X509::from_pem(self.root_ca_cert.as_bytes())?,
},
ui: self.ui,
})
}
fn unproject(backup: &OsBackup) -> Result<Self, Error> {
Ok(Self {
server_id: backup.account.server_id.clone(),
hostname: backup.account.hostname.0.clone(),
net_key: Base64(backup.account.key.as_bytes()),
root_ca_key: String::from_utf8(backup.account.root_ca_key.private_key_to_pem_pkcs8()?)?,
root_ca_cert: String::from_utf8(backup.account.root_ca_cert.to_pem()?)?,
ui: backup.ui.clone(),
})
}
}

View File

@@ -1,23 +1,23 @@
use std::collections::BTreeMap;
use std::path::Path;
use std::sync::atomic::Ordering;
use std::sync::Arc;
use std::time::Duration;
use std::{collections::BTreeMap, pin::Pin};
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use futures::{future::BoxFuture, stream, Future};
use futures::{future::BoxFuture, stream};
use futures::{FutureExt, StreamExt};
use openssl::x509::X509;
use patch_db::{DbHandle, PatchDbHandle};
use rpc_toolkit::command;
use sqlx::Connection;
use tokio::fs::File;
use tokio::task::JoinHandle;
use torut::onion::OnionAddressV3;
use tracing::instrument;
use super::target::BackupTargetId;
use crate::backup::backup_bulk::OsBackup;
use crate::backup::os::OsBackup;
use crate::backup::BackupMetadata;
use crate::context::rpc::RpcContextConfig;
use crate::context::{RpcContext, SetupContext};
@@ -25,11 +25,10 @@ use crate::db::model::{PackageDataEntry, StaticFiles};
use crate::disk::mount::backup::{BackupMountGuard, PackageBackupMountGuard};
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard;
use crate::hostname::{get_hostname, Hostname};
use crate::hostname::Hostname;
use crate::init::init;
use crate::install::progress::InstallProgress;
use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR};
use crate::net::ssl::SslManager;
use crate::notifications::NotificationLevel;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::s9pk::reader::S9pkReader;
@@ -47,7 +46,7 @@ fn parse_comma_separated(arg: &str, _: &ArgMatches) -> Result<Vec<PackageId>, Er
}
#[command(rename = "restore", display(display_none))]
#[instrument(skip(ctx, password))]
#[instrument(skip_all)]
pub async fn restore_packages_rpc(
#[context] ctx: RpcContext,
#[arg(parse(parse_comma_separated))] ids: Vec<PackageId>,
@@ -170,7 +169,7 @@ impl ProgressInfo {
}
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn recover_full_embassy(
ctx: SetupContext,
disk_guid: Arc<String>,
@@ -185,7 +184,7 @@ pub async fn recover_full_embassy(
.await?;
let os_backup_path = backup_guard.as_ref().join("os-backup.cbor");
let os_backup: OsBackup =
let mut os_backup: OsBackup =
IoFormat::Cbor.from_slice(&tokio::fs::read(&os_backup_path).await.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
@@ -193,29 +192,17 @@ pub async fn recover_full_embassy(
)
})?)?;
let password = argon2::hash_encoded(
os_backup.account.password = argon2::hash_encoded(
embassy_password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
let key_vec = os_backup.tor_key.as_bytes().to_vec();
let secret_store = ctx.secret_store().await?;
sqlx::query!(
"INSERT INTO account (id, password, tor_key) VALUES ($1, $2, $3) ON CONFLICT (id) DO UPDATE SET password = $2, tor_key = $3",
0,
password,
key_vec,
)
.execute(&mut secret_store.acquire().await?)
.await?;
SslManager::import_root_ca(
secret_store.clone(),
os_backup.root_ca_key,
os_backup.root_ca_cert.clone(),
)
.await?;
let secret_store = ctx.secret_store().await?;
os_backup.account.save(&secret_store).await?;
secret_store.close().await;
let cfg = RpcContextConfig::load(ctx.config_path.clone()).await?;
@@ -223,12 +210,7 @@ pub async fn recover_full_embassy(
init(&cfg).await?;
let rpc_ctx = RpcContext::init(ctx.config_path.clone(), disk_guid.clone()).await?;
let mut db = rpc_ctx.db.handle();
let receipts = crate::hostname::HostNameReceipt::new(&mut db).await?;
let hostname = get_hostname(&mut db, &receipts).await?;
drop(db);
let mut db = rpc_ctx.db.handle();
let ids = backup_guard
@@ -273,9 +255,9 @@ pub async fn recover_full_embassy(
Ok((
disk_guid,
hostname,
os_backup.tor_key.public().get_onion_address(),
os_backup.root_ca_cert,
os_backup.account.hostname,
os_backup.account.key.tor_address(),
os_backup.account.root_ca_cert,
))
}
@@ -324,7 +306,7 @@ async fn restore_packages(
Ok((backup_guard, tasks, progress_info))
}
#[instrument(skip(ctx, db, backup_guard))]
#[instrument(skip_all)]
async fn assure_restoring(
ctx: &RpcContext,
db: &mut PatchDbHandle,
@@ -394,7 +376,7 @@ async fn assure_restoring(
Ok(guards)
}
#[instrument(skip(ctx, guard))]
#[instrument(skip_all)]
async fn restore_package<'a>(
ctx: RpcContext,
manifest: Manifest,
@@ -413,23 +395,32 @@ async fn restore_package<'a>(
metadata_path.display().to_string(),
)
})?)?;
for (iface, key) in metadata.tor_keys {
let key_vec = base32::decode(base32::Alphabet::RFC4648 { padding: true }, &key)
.ok_or_else(|| {
Error::new(
eyre!("invalid base32 string"),
crate::ErrorKind::Deserialization,
)
})?;
let mut secrets = ctx.secret_store.acquire().await?;
let mut secrets_tx = secrets.begin().await?;
for (iface, key) in metadata.network_keys {
let k = key.0.as_slice();
sqlx::query!(
"INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET key = $3",
*id,
*iface,
key_vec,
)
.execute(&ctx.secret_store)
.await?;
"INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
*id,
*iface,
k,
)
.execute(&mut secrets_tx).await?;
}
// DEPRECATED
for (iface, key) in metadata.tor_keys {
let k = key.0.as_slice();
sqlx::query!(
"INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
*id,
*iface,
k,
)
.execute(&mut secrets_tx).await?;
}
secrets_tx.commit().await?;
drop(secrets);
let len = tokio::fs::metadata(&s9pk_path)
.await

View File

@@ -223,7 +223,7 @@ fn display_backup_info(info: BackupInfo, matches: &ArgMatches) {
}
#[command(display(display_backup_info))]
#[instrument(skip(ctx, password))]
#[instrument(skip_all)]
pub async fn info(
#[context] ctx: RpcContext,
#[arg(rename = "target-id")] target_id: BackupTargetId,

View File

@@ -8,48 +8,21 @@ use embassy::disk::fsck::RepairStrategy;
use embassy::disk::main::DEFAULT_PASSWORD;
use embassy::disk::REPAIR_DISK_PATH;
use embassy::init::STANDBY_MODE_PATH;
use embassy::net::embassy_service_http_server::EmbassyServiceHTTPServer;
#[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController;
use embassy::net::net_utils::ResourceFqdn;
use embassy::net::static_server::{
diag_ui_file_router, install_ui_file_router, setup_ui_file_router,
};
use embassy::net::web_server::WebServer;
use embassy::shutdown::Shutdown;
use embassy::sound::CHIME;
use embassy::util::logger::EmbassyLogger;
use embassy::util::Invoke;
use embassy::{Error, ErrorKind, ResultExt};
use embassy::{Error, ErrorKind, ResultExt, IS_RASPBERRY_PI};
use tokio::process::Command;
use tracing::instrument;
#[instrument]
#[instrument(skip_all)]
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
if tokio::fs::metadata("/cdrom").await.is_ok() {
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init().await?;
let ctx = InstallContext::init(cfg_path).await?;
let embassy_ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr;
let embassy_fqdn: ResourceFqdn = "embassy.local".parse()?;
let localhost_fqdn = ResourceFqdn::LocalHost;
let install_ui_handler = install_ui_file_router(ctx.clone()).await?;
let mut install_http_server =
EmbassyServiceHTTPServer::new([0, 0, 0, 0].into(), 80, None).await?;
install_http_server
.add_svc_handler_mapping(embassy_ip_fqdn, install_ui_handler.clone())
.await?;
install_http_server
.add_svc_handler_mapping(embassy_fqdn, install_ui_handler.clone())
.await?;
install_http_server
.add_svc_handler_mapping(localhost_fqdn, install_ui_handler.clone())
.await?;
let server = WebServer::install(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
@@ -59,7 +32,9 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
.recv()
.await
.expect("context dropped");
install_http_server.shutdown.send(()).unwrap();
server.shutdown().await;
Command::new("reboot")
.invoke(embassy::ErrorKind::Unknown)
.await?;
@@ -67,29 +42,9 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
.await
.is_err()
{
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init().await?;
let ctx = SetupContext::init(cfg_path).await?;
let embassy_ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr;
let embassy_fqdn: ResourceFqdn = "embassy.local".parse()?;
let localhost_fqdn = ResourceFqdn::LocalHost;
let setup_ui_handler = setup_ui_file_router(ctx.clone()).await?;
let mut setup_http_server =
EmbassyServiceHTTPServer::new([0, 0, 0, 0].into(), 80, None).await?;
setup_http_server
.add_svc_handler_mapping(embassy_ip_fqdn, setup_ui_handler.clone())
.await?;
setup_http_server
.add_svc_handler_mapping(embassy_fqdn, setup_ui_handler.clone())
.await?;
setup_http_server
.add_svc_handler_mapping(localhost_fqdn, setup_ui_handler.clone())
.await?;
let server = WebServer::setup(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
tokio::time::sleep(Duration::from_secs(1)).await; // let the record state that I hate this
CHIME.play().await?;
@@ -98,7 +53,9 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
.recv()
.await
.expect("context dropped");
setup_http_server.shutdown.send(()).unwrap();
server.shutdown().await;
tokio::task::yield_now().await;
if let Err(e) = Command::new("killall")
.arg("firefox-esr")
@@ -160,9 +117,9 @@ async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
}
}
#[instrument]
#[instrument(skip_all)]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
if *IS_RASPBERRY_PI && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
tokio::fs::remove_file(STANDBY_MODE_PATH).await?;
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
embassy::sound::SHUTDOWN.play().await?;
@@ -178,8 +135,6 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
tracing::error!("{}", e.source);
tracing::debug!("{}", e.source);
embassy::sound::BEETHOVEN.play().await?;
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init().await?;
let ctx = DiagnosticContext::init(
cfg_path,
@@ -200,28 +155,12 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
)
.await?;
let embassy_ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr;
let embassy_fqdn: ResourceFqdn = "embassy.local".parse()?;
let localhost_fqdn = ResourceFqdn::LocalHost;
let diag_ui_handler = diag_ui_file_router(ctx.clone()).await?;
let mut diag_http_server =
EmbassyServiceHTTPServer::new([0, 0, 0, 0].into(), 80, None).await?;
diag_http_server
.add_svc_handler_mapping(embassy_ip_fqdn, diag_ui_handler.clone())
.await?;
diag_http_server
.add_svc_handler_mapping(embassy_fqdn, diag_ui_handler.clone())
.await?;
diag_http_server
.add_svc_handler_mapping(localhost_fqdn, diag_ui_handler.clone())
.await?;
let server = WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();
diag_http_server.shutdown.send(()).unwrap();
server.shutdown().await;
Ok(shutdown)
}
.await

View File

@@ -1,29 +1,20 @@
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use color_eyre::eyre::eyre;
use embassy::context::{DiagnosticContext, RpcContext};
use embassy::net::embassy_service_http_server::EmbassyServiceHTTPServer;
#[cfg(feature = "avahi")]
use embassy::net::mdns::MdnsController;
use embassy::net::net_controller::NetController;
use embassy::net::net_utils::ResourceFqdn;
use embassy::net::static_server::diag_ui_file_router;
use embassy::net::tor::tor_health_check;
use embassy::net::web_server::WebServer;
use embassy::shutdown::Shutdown;
use embassy::system::launch_metrics_task;
use embassy::util::daemon;
use embassy::util::logger::EmbassyLogger;
use embassy::{Error, ErrorKind, ResultExt};
use futures::{FutureExt, TryFutureExt};
use reqwest::{Client, Proxy};
use tokio::signal::unix::signal;
use tracing::instrument;
#[instrument]
#[instrument(skip_all)]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, shutdown) = {
let (rpc_ctx, server, shutdown) = {
let rpc_ctx = RpcContext::init(
cfg_path,
Arc::new(
@@ -34,7 +25,8 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
),
)
.await?;
NetController::setup_embassy_ui(rpc_ctx.clone()).await?;
embassy::hostname::sync_hostname(&*rpc_ctx.account.read().await).await?;
let server = WebServer::main(([0, 0, 0, 0], 80).into(), rpc_ctx.clone()).await?;
let mut shutdown_recv = rpc_ctx.shutdown.subscribe();
@@ -66,12 +58,6 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
.expect("send shutdown signal");
});
{
let mut db = rpc_ctx.db.handle();
let receipts = embassy::context::rpc::RpcSetHostNameReceipts::new(&mut db).await?;
embassy::hostname::sync_hostname(&mut db, &receipts.hostname_receipts).await?;
}
let metrics_ctx = rpc_ctx.clone();
let metrics_task = tokio::spawn(async move {
launch_metrics_task(&metrics_ctx.metrics_cache, || {
@@ -80,60 +66,32 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
.await
});
let tor_health_ctx = rpc_ctx.clone();
let tor_client = Client::builder()
.proxy(
Proxy::http(format!(
"socks5h://{}:{}",
rpc_ctx.tor_socks.ip(),
rpc_ctx.tor_socks.port()
))
.with_kind(crate::ErrorKind::Network)?,
)
.build()
.with_kind(crate::ErrorKind::Network)?;
let tor_health_daemon = daemon(
move || {
let ctx = tor_health_ctx.clone();
let client = tor_client.clone();
async move { tor_health_check(&client, &ctx.net_controller.tor).await }
},
Duration::from_secs(300),
rpc_ctx.shutdown.subscribe(),
);
embassy::sound::CHIME.play().await?;
futures::try_join!(
metrics_task
.map_err(|e| Error::new(
metrics_task
.map_err(|e| {
Error::new(
eyre!("{}", e).wrap_err("Metrics daemon panicked!"),
ErrorKind::Unknown
))
.map_ok(|_| tracing::debug!("Metrics daemon Shutdown")),
tor_health_daemon
.map_err(|e| Error::new(
e.wrap_err("Tor Health daemon panicked!"),
ErrorKind::Unknown
))
.map_ok(|_| tracing::debug!("Tor Health daemon Shutdown")),
)?;
ErrorKind::Unknown,
)
})
.map_ok(|_| tracing::debug!("Metrics daemon Shutdown"))
.await?;
let mut shutdown = shutdown_recv
let shutdown = shutdown_recv
.recv()
.await
.with_kind(crate::ErrorKind::Unknown)?;
sig_handler.abort();
if let Some(shutdown) = &mut shutdown {
drop(shutdown.db_handle.take());
}
(rpc_ctx, shutdown)
(rpc_ctx, server, shutdown)
};
server.shutdown().await;
rpc_ctx.shutdown().await?;
tracing::info!("RPC Context is dropped");
Ok(shutdown)
}
@@ -160,12 +118,10 @@ fn main() {
match inner_main(cfg_path.clone()).await {
Ok(a) => Ok(a),
Err(e) => {
(|| async {
async {
tracing::error!("{}", e.source);
tracing::debug!("{:?}", e.source);
embassy::sound::BEETHOVEN.play().await?;
#[cfg(feature = "avahi")]
let _mdns = MdnsController::init().await?;
let ctx = DiagnosticContext::init(
cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid")
@@ -185,24 +141,18 @@ fn main() {
)
.await?;
let embassy_ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr;
let embassy_fqdn: ResourceFqdn = "embassy.local".parse()?;
let diag_ui_handler = diag_ui_file_router(ctx.clone()).await?;
let mut diag_http_server =
EmbassyServiceHTTPServer::new([0, 0, 0, 0].into(), 80, None).await?;
diag_http_server
.add_svc_handler_mapping(embassy_ip_fqdn, diag_ui_handler.clone())
.await?;
diag_http_server
.add_svc_handler_mapping(embassy_fqdn, diag_ui_handler)
.await?;
let server =
WebServer::diagnostic(([0, 0, 0, 0], 80).into(), ctx.clone()).await?;
let mut shutdown = ctx.shutdown.subscribe();
shutdown.recv().await.with_kind(crate::ErrorKind::Unknown)
})()
let shutdown =
shutdown.recv().await.with_kind(crate::ErrorKind::Unknown)?;
server.shutdown().await;
Ok::<_, Error>(shutdown)
}
.await
}
}

View File

@@ -1,6 +1,7 @@
use std::collections::{BTreeMap, BTreeSet};
use color_eyre::eyre::eyre;
use models::ImageId;
use nix::sys::signal::Signal;
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
@@ -9,7 +10,6 @@ use tracing::instrument;
use super::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::dependencies::Dependencies;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
@@ -31,7 +31,7 @@ pub struct ConfigActions {
pub set: PackageProcedure,
}
impl ConfigActions {
#[instrument]
#[instrument(skip_all)]
pub fn validate(
&self,
container: &Option<DockerContainers>,
@@ -47,7 +47,7 @@ impl ConfigActions {
.with_ctx(|_| (crate::ErrorKind::ValidateS9pk, "Config Set"))?;
Ok(())
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn get(
&self,
ctx: &RpcContext,
@@ -71,7 +71,7 @@ impl ConfigActions {
})
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn set(
&self,
ctx: &RpcContext,

View File

@@ -214,7 +214,7 @@ impl ConfigGetReceipts {
}
#[command(display(display_serializable))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn get(
#[context] ctx: RpcContext,
#[parent_data] id: PackageId,
@@ -240,7 +240,7 @@ pub async fn get(
display(display_none),
metadata(sync_db = true)
)]
#[instrument]
#[instrument(skip_all)]
pub fn set(
#[parent_data] id: PackageId,
#[allow(unused_variables)]
@@ -413,7 +413,7 @@ impl ConfigReceipts {
}
#[command(rename = "dry", display(display_serializable))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn set_dry(
#[context] ctx: RpcContext,
#[parent_data] (id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
@@ -440,7 +440,7 @@ pub async fn set_dry(
Ok(BreakageRes(breakages))
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn set_impl(
ctx: RpcContext,
(id, config, timeout): (PackageId, Option<Config>, Option<Duration>),
@@ -465,7 +465,7 @@ pub async fn set_impl(
Ok(())
}
#[instrument(skip(ctx, db, receipts))]
#[instrument(skip_all)]
pub async fn configure<'a, Db: DbHandle>(
ctx: &RpcContext,
db: &'a mut Db,
@@ -485,7 +485,7 @@ pub async fn configure<'a, Db: DbHandle>(
Ok(())
}
#[instrument(skip(ctx, db, receipts))]
#[instrument(skip_all)]
pub fn configure_rec<'a, Db: DbHandle>(
ctx: &'a RpcContext,
db: &'a mut Db,
@@ -771,7 +771,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
}
.boxed()
}
#[instrument]
#[instrument(skip_all)]
pub fn not_found() -> Error {
Error::new(eyre!("Could not find"), crate::ErrorKind::Incoherent)
}

View File

@@ -25,6 +25,7 @@ use super::{Config, MatchError, NoMatchWithPath, TimeoutError, TypeOf};
use crate::config::ConfigurationError;
use crate::context::RpcContext;
use crate::net::interface::InterfaceId;
use crate::net::keys::Key;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::Error;
@@ -2059,22 +2060,19 @@ impl TorKeyPointer {
ValueSpecPointer::Package(PackagePointerSpec::TorKey(self.clone())),
));
}
let x = sqlx::query!(
"SELECT key FROM tor WHERE package = $1 AND interface = $2",
*self.package_id,
*self.interface
let key = Key::for_interface(
&mut secrets
.acquire()
.await
.map_err(|e| ConfigurationError::SystemError(e.into()))?,
Some((self.package_id.clone(), self.interface.clone())),
)
.fetch_optional(secrets)
.await
.map_err(|e| ConfigurationError::SystemError(e.into()))?;
if let Some(x) = x {
Ok(Value::String(base32::encode(
base32::Alphabet::RFC4648 { padding: false },
&x.key,
)))
} else {
Ok(Value::Null)
}
.map_err(ConfigurationError::SystemError)?;
Ok(Value::String(base32::encode(
base32::Alphabet::RFC4648 { padding: false },
&key.tor_key().as_bytes(),
)))
}
}
impl fmt::Display for TorKeyPointer {

View File

@@ -6,6 +6,7 @@ use std::sync::Arc;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use cookie::Cookie;
use cookie_store::CookieStore;
use josekit::jwk::Jwk;
use reqwest::Proxy;
@@ -16,6 +17,7 @@ use rpc_toolkit::Context;
use serde::Deserialize;
use tracing::instrument;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::util::config::{load_config_from_paths, local_config_path};
use crate::ResultExt;
@@ -66,7 +68,7 @@ const DEFAULT_PORT: u16 = 5959;
pub struct CliContext(Arc<CliContextSeed>);
impl CliContext {
/// BLOCKING
#[instrument(skip(matches))]
#[instrument(skip_all)]
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
let local_config_path = local_config_path();
let base: CliContextConfig = load_config_from_paths(
@@ -83,7 +85,7 @@ impl CliContext {
} else if let Some(host) = base.host {
host
} else {
format!("http://localhost").parse()?
"http://localhost".parse()?
};
let proxy = if let Some(proxy) = matches.value_of("proxy") {
Some(proxy.parse()?)
@@ -100,9 +102,15 @@ impl CliContext {
.join(".cookies.json")
});
let cookie_store = Arc::new(CookieStoreMutex::new(if cookie_path.exists() {
CookieStore::load_json(BufReader::new(File::open(&cookie_path)?))
let mut store = CookieStore::load_json(BufReader::new(File::open(&cookie_path)?))
.map_err(|e| eyre!("{}", e))
.with_kind(crate::ErrorKind::Deserialization)?
.with_kind(crate::ErrorKind::Deserialization)?;
if let Ok(local) = std::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH) {
store
.insert_raw(&Cookie::new("local", local), &"http://localhost".parse()?)
.with_kind(crate::ErrorKind::Network)?;
}
store
} else {
CookieStore::default()
}));

View File

@@ -18,7 +18,7 @@ pub struct DiagnosticContextConfig {
pub datadir: Option<PathBuf>,
}
impl DiagnosticContextConfig {
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || {
load_config_from_paths(
@@ -52,7 +52,7 @@ pub struct DiagnosticContextSeed {
#[derive(Clone)]
pub struct DiagnosticContext(Arc<DiagnosticContextSeed>);
impl DiagnosticContext {
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn init<P: AsRef<Path> + Send + 'static>(
path: Option<P>,
disk_guid: Option<Arc<String>>,

View File

@@ -7,7 +7,7 @@ use serde::Deserialize;
use tokio::sync::broadcast::Sender;
use tracing::instrument;
use crate::os_install::find_eth_iface;
use crate::net::utils::find_eth_iface;
use crate::util::config::load_config_from_paths;
use crate::Error;
@@ -15,7 +15,7 @@ use crate::Error;
#[serde(rename_all = "kebab-case")]
pub struct InstallContextConfig {}
impl InstallContextConfig {
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || {
load_config_from_paths(
@@ -38,7 +38,7 @@ pub struct InstallContextSeed {
#[derive(Clone)]
pub struct InstallContext(Arc<InstallContextSeed>);
impl InstallContext {
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let _cfg = InstallContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?;
let (shutdown, _) = tokio::sync::broadcast::channel(1);

View File

@@ -1,4 +1,4 @@
use std::collections::{BTreeMap, VecDeque};
use std::collections::BTreeMap;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::ops::Deref;
use std::path::{Path, PathBuf};
@@ -10,7 +10,7 @@ use bollard::Docker;
use helpers::to_tmp_path;
use josekit::jwk::Jwk;
use patch_db::json_ptr::JsonPointer;
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb, Revision};
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb};
use reqwest::Url;
use rpc_toolkit::Context;
use serde::Deserialize;
@@ -19,19 +19,18 @@ use sqlx::PgPool;
use tokio::sync::{broadcast, oneshot, Mutex, RwLock};
use tracing::instrument;
use crate::account::AccountInfo;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
use crate::disk::OsPartitionInfo;
use crate::hostname::HostNameReceipt;
use crate::init::{init_postgres, pgloader};
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
use crate::manager::ManagerMap;
use crate::middleware::auth::HashSessionToken;
use crate::net::net_controller::NetController;
use crate::net::tor::os_key;
use crate::net::ssl::SslManager;
use crate::net::wifi::WpaCli;
use crate::notifications::NotificationManager;
use crate::setup::password_hash;
use crate::shutdown::Shutdown;
use crate::status::{MainStatus, Status};
use crate::util::config::load_config_from_paths;
@@ -76,24 +75,18 @@ impl RpcContextConfig {
.as_deref()
.unwrap_or_else(|| Path::new("/embassy-data"))
}
pub async fn db(&self, secret_store: &PgPool) -> Result<PatchDb, Error> {
pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> {
let db_path = self.datadir().join("main").join("embassy.db");
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await {
db.put(
&<JsonPointer>::default(),
&Database::init(
&os_key(&mut secret_store.acquire().await?).await?,
password_hash(&mut secret_store.acquire().await?).await?,
),
)
.await?;
db.put(&<JsonPointer>::default(), &Database::init(account))
.await?;
}
Ok(db)
}
#[instrument]
#[instrument(skip_all)]
pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(self.datadir()).await?;
let secret_store =
@@ -125,11 +118,10 @@ pub struct RpcContextSeed {
pub disk_guid: Arc<String>,
pub db: PatchDb,
pub secret_store: PgPool,
pub account: RwLock<AccountInfo>,
pub docker: Docker,
pub net_controller: NetController,
pub net_controller: Arc<NetController>,
pub managers: ManagerMap,
pub revision_cache_size: usize,
pub revision_cache: RwLock<VecDeque<Arc<Revision>>>,
pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
pub shutdown: broadcast::Sender<Option<Shutdown>>,
pub tor_socks: SocketAddr,
@@ -178,41 +170,10 @@ impl RpcCleanReceipts {
}
}
pub struct RpcSetHostNameReceipts {
pub hostname_receipts: HostNameReceipt,
#[allow(dead_code)]
server_info: LockReceipt<crate::db::model::ServerInfo, ()>,
}
impl RpcSetHostNameReceipts {
pub async fn new(db: &'_ mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
Ok(setup(&db.lock_all(locks).await?)?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
let hostname_receipts = HostNameReceipt::setup(locks);
let server_info = crate::db::DatabaseModel::new()
.server_info()
.make_locker(LockType::Read)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
hostname_receipts: hostname_receipts(skeleton_key)?,
server_info: server_info.verify(skeleton_key)?,
})
}
}
}
#[derive(Clone)]
pub struct RpcContext(Arc<RpcContextSeed>);
impl RpcContext {
#[instrument(skip(cfg_path))]
#[instrument(skip_all)]
pub async fn init<P: AsRef<Path> + Send + 'static>(
cfg_path: Option<P>,
disk_guid: Arc<String>,
@@ -226,25 +187,26 @@ impl RpcContext {
let (shutdown, _) = tokio::sync::broadcast::channel(1);
let secret_store = base.secret_store().await?;
tracing::info!("Opened Pg DB");
let db = base.db(&secret_store).await?;
let account = AccountInfo::load(&secret_store).await?;
let db = base.db(&account).await?;
tracing::info!("Opened PatchDB");
let mut docker = Docker::connect_with_unix_defaults()?;
docker.set_timeout(Duration::from_secs(600));
tracing::info!("Connected to Docker");
let net_controller = NetController::init(
([0, 0, 0, 0], 80).into(),
crate::net::tor::os_key(&mut secret_store.acquire().await?).await?,
base.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
base.dns_bind
.as_ref()
.map(|v| v.as_slice())
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
secret_store.clone(),
&mut db.handle(),
None,
)
.await?;
let net_controller = Arc::new(
NetController::init(
base.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
base.dns_bind
.as_ref()
.map(|v| v.as_slice())
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
SslManager::new(&account)?,
&account.hostname,
&account.key,
)
.await?,
);
tracing::info!("Initialized Net Controller");
let managers = ManagerMap::default();
let metrics_cache = RwLock::new(None);
@@ -259,11 +221,10 @@ impl RpcContext {
disk_guid,
db,
secret_store,
account: RwLock::new(account),
docker,
net_controller,
managers,
revision_cache_size: base.revision_cache_size.unwrap_or(512),
revision_cache: RwLock::new(VecDeque::new()),
metrics_cache,
shutdown,
tor_socks: tor_proxy,
@@ -299,16 +260,17 @@ impl RpcContext {
Ok(res)
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn shutdown(self) -> Result<(), Error> {
self.managers.empty().await?;
self.secret_store.close().await;
self.is_closed.store(true, Ordering::SeqCst);
tracing::info!("RPC Context is shutdown");
// TODO: shutdown http servers
Ok(())
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn cleanup(&self) -> Result<(), Error> {
let mut db = self.db.handle();
let receipts = RpcCleanReceipts::new(&mut db).await?;
@@ -386,7 +348,7 @@ impl RpcContext {
Ok(())
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn clean_continuations(&self) {
let mut continuations = self.rpc_stream_continuations.lock().await;
let mut to_remove = Vec::new();
@@ -400,7 +362,7 @@ impl RpcContext {
}
}
#[instrument(skip(self, handler))]
#[instrument(skip_all)]
pub async fn add_continuation(&self, guid: RequestGuid, handler: RpcContinuation) {
self.clean_continuations().await;
self.rpc_stream_continuations

View File

@@ -25,7 +25,7 @@ pub struct SdkContextSeed {
pub struct SdkContext(Arc<SdkContextSeed>);
impl SdkContext {
/// BLOCKING
#[instrument(skip(matches))]
#[instrument(skip_all)]
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
let local_config_path = local_config_path();
let base: SdkContextConfig = load_config_from_paths(
@@ -49,7 +49,7 @@ impl SdkContext {
})))
}
/// BLOCKING
#[instrument]
#[instrument(skip_all)]
pub fn developer_key(&self) -> Result<ed25519_dalek::Keypair, Error> {
if !self.developer_key_path.exists() {
return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized));

View File

@@ -14,11 +14,11 @@ use tokio::sync::broadcast::Sender;
use tokio::sync::RwLock;
use tracing::instrument;
use crate::account::AccountInfo;
use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::{init_postgres, pgloader};
use crate::net::tor::os_key;
use crate::setup::{password_hash, SetupStatus};
use crate::setup::SetupStatus;
use crate::util::config::load_config_from_paths;
use crate::{Error, ResultExt};
@@ -47,7 +47,7 @@ pub struct SetupContextConfig {
pub datadir: Option<PathBuf>,
}
impl SetupContextConfig {
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn load<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
tokio::task::spawn_blocking(move || {
load_config_from_paths(
@@ -92,7 +92,7 @@ impl AsRef<Jwk> for SetupContextSeed {
#[derive(Clone)]
pub struct SetupContext(Arc<SetupContextSeed>);
impl SetupContext {
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn init<P: AsRef<Path> + Send + 'static>(path: Option<P>) -> Result<Self, Error> {
let cfg = SetupContextConfig::load(path.as_ref().map(|p| p.as_ref().to_owned())).await?;
let (shutdown, _) = tokio::sync::broadcast::channel(1);
@@ -110,25 +110,19 @@ impl SetupContext {
setup_result: RwLock::new(None),
})))
}
#[instrument(skip(self))]
pub async fn db(&self, secret_store: &PgPool) -> Result<PatchDb, Error> {
#[instrument(skip_all)]
pub async fn db(&self, account: &AccountInfo) -> Result<PatchDb, Error> {
let db_path = self.datadir.join("main").join("embassy.db");
let db = PatchDb::open(&db_path)
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
if !db.exists(&<JsonPointer>::default()).await {
db.put(
&<JsonPointer>::default(),
&Database::init(
&os_key(&mut secret_store.acquire().await?).await?,
password_hash(&mut secret_store.acquire().await?).await?,
),
)
.await?;
db.put(&<JsonPointer>::default(), &Database::init(account))
.await?;
}
Ok(db)
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn secret_store(&self) -> Result<PgPool, Error> {
init_postgres(&self.datadir).await?;
let secret_store =

View File

@@ -61,7 +61,7 @@ impl StartReceipts {
}
#[command(display(display_none), metadata(sync_db = true))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;
@@ -120,7 +120,7 @@ impl StopReceipts {
}
}
#[instrument(skip(db))]
#[instrument(skip_all)]
pub async fn stop_common<Db: DbHandle>(
db: &mut Db,
id: &PackageId,
@@ -154,7 +154,7 @@ pub fn stop(#[arg] id: PackageId) -> Result<PackageId, Error> {
}
#[command(rename = "dry", display(display_serializable))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn stop_dry(
#[context] ctx: RpcContext,
#[parent_data] id: PackageId,
@@ -170,7 +170,7 @@ pub async fn stop_dry(
Ok(BreakageRes(breakages))
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<MainStatus, Error> {
let mut db = ctx.db.handle();
let mut tx = db.begin().await?;

View File

@@ -27,7 +27,7 @@ use crate::middleware::auth::{HasValidSession, HashSessionToken};
use crate::util::serde::{display_serializable, IoFormat};
use crate::{Error, ResultExt};
#[instrument(skip(ctx, session, ws_fut))]
#[instrument(skip_all)]
async fn ws_handler<
WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
>(
@@ -73,7 +73,7 @@ async fn subscribe_to_session_kill(
recv
}
#[instrument(skip(_has_valid_authentication, kill, sub, stream))]
#[instrument(skip_all)]
async fn deal_with_messages(
_has_valid_authentication: HasValidSession,
mut kill: oneshot::Receiver<()>,
@@ -132,7 +132,7 @@ pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<B
let (parts, body) = req.into_parts();
let session = match async {
let token = HashSessionToken::from_request_parts(&parts)?;
let session = HasValidSession::from_session(&token, &ctx).await?;
let session = HasValidSession::from_request_parts(&parts, &ctx).await?;
Ok::<_, Error>((session, token))
}
.await
@@ -205,7 +205,7 @@ pub fn put() -> Result<(), RpcError> {
}
#[command(display(display_serializable))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn ui(
#[context] ctx: RpcContext,
#[arg] pointer: JsonPointer,

View File

@@ -1,25 +1,31 @@
use std::collections::{BTreeMap, BTreeSet};
use std::net::{Ipv4Addr, Ipv6Addr};
use std::sync::Arc;
use chrono::{DateTime, Utc};
use emver::VersionRange;
use ipnet::{Ipv4Net, Ipv6Net};
use isocountry::CountryCode;
use itertools::Itertools;
use openssl::hash::MessageDigest;
use patch_db::json_ptr::JsonPointer;
use patch_db::{HasModel, Map, MapModel, OptionModel};
use reqwest::Url;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use torut::onion::TorSecretKeyV3;
use ssh_key::public::Ed25519PublicKey;
use crate::account::AccountInfo;
use crate::config::spec::{PackagePointerSpec, SystemPointerSpec};
use crate::hostname::{generate_hostname, generate_id};
use crate::install::progress::InstallProgress;
use crate::net::interface::InterfaceId;
use crate::net::utils::{get_iface_ipv4_addr, get_iface_ipv6_addr};
use crate::s9pk::manifest::{Manifest, ManifestModel, PackageId};
use crate::status::health_check::HealthCheckId;
use crate::status::Status;
use crate::util::Version;
use crate::version::{Current, VersionT};
use crate::Error;
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
@@ -31,23 +37,22 @@ pub struct Database {
pub ui: Value,
}
impl Database {
pub fn init(tor_key: &TorSecretKeyV3, password_hash: String) -> Self {
let id = generate_id();
let my_hostname = generate_hostname();
let lan_address = my_hostname.lan_address().parse().unwrap();
pub fn init(account: &AccountInfo) -> Self {
let lan_address = account.hostname.lan_address().parse().unwrap();
// TODO
Database {
server_info: ServerInfo {
id,
id: account.server_id.clone(),
version: Current::new().semver().into(),
hostname: Some(my_hostname.0),
hostname: Some(account.hostname.no_dot_host_name()),
last_backup: None,
last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(),
lan_address,
tor_address: format!("http://{}", tor_key.public().get_onion_address())
tor_address: format!("http://{}", account.key.tor_address())
.parse()
.unwrap(),
ip_info: BTreeMap::new(),
status_info: ServerStatus {
backup_progress: None,
updated: false,
@@ -63,7 +68,18 @@ impl Database {
tor: Vec::new(),
clearnet: Vec::new(),
},
password_hash,
password_hash: account.password.clone(),
pubkey: ssh_key::PublicKey::from(Ed25519PublicKey::from(&account.key.ssh_key()))
.to_openssh()
.unwrap(),
ca_fingerprint: account
.root_ca_cert
.digest(MessageDigest::sha256())
.unwrap()
.iter()
.map(|x| format!("{x:X}"))
.join(":"),
system_start_time: Utc::now().to_rfc3339(),
},
package_data: AllPackageData::default(),
ui: serde_json::from_str(include_str!("../../../frontend/patchdb-ui-seed.json"))
@@ -90,12 +106,38 @@ pub struct ServerInfo {
pub lan_address: Url,
pub tor_address: Url,
#[model]
pub ip_info: BTreeMap<String, IpInfo>,
#[model]
#[serde(default)]
pub status_info: ServerStatus,
pub wifi: WifiInfo,
pub unread_notification_count: u64,
pub connection_addresses: ConnectionAddresses,
pub password_hash: String,
pub pubkey: String,
pub ca_fingerprint: String,
pub system_start_time: String,
}
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")]
pub struct IpInfo {
pub ipv4_range: Option<Ipv4Net>,
pub ipv4: Option<Ipv4Addr>,
pub ipv6_range: Option<Ipv6Net>,
pub ipv6: Option<Ipv6Addr>,
}
impl IpInfo {
pub async fn for_interface(iface: &str) -> Result<Self, Error> {
let (ipv4, ipv4_range) = get_iface_ipv4_addr(iface).await?.unzip();
let (ipv6, ipv6_range) = get_iface_ipv6_addr(iface).await?.unzip();
Ok(Self {
ipv4_range,
ipv4,
ipv6_range,
ipv6,
})
}
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]

View File

@@ -191,7 +191,7 @@ impl DependencyError {
(DependencyError::Transitive, _) => DependencyError::Transitive,
}
}
#[instrument(skip(ctx, db, receipts))]
#[instrument(skip_all)]
pub fn try_heal<'a, Db: DbHandle>(
self,
ctx: &'a RpcContext,
@@ -693,7 +693,7 @@ pub struct ConfigDryRes {
}
#[command(rename = "dry", display(display_serializable))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn configure_dry(
#[context] ctx: RpcContext,
#[parent_data] (pkg_id, dependency_id): (PackageId, PackageId),
@@ -784,7 +784,7 @@ pub async fn configure_logic(
spec,
})
}
#[instrument(skip(db, current_dependencies, current_dependent_receipt))]
#[instrument(skip_all)]
pub async fn add_dependent_to_current_dependents_lists<'a, Db: DbHandle>(
db: &mut Db,
dependent_id: &PackageId,
@@ -919,7 +919,7 @@ impl BreakTransitiveReceipts {
}
}
#[instrument(skip(db, receipts))]
#[instrument(skip_all)]
pub fn break_transitive<'a, Db: DbHandle>(
db: &'a mut Db,
id: &'a PackageId,
@@ -986,7 +986,7 @@ pub fn break_transitive<'a, Db: DbHandle>(
.boxed()
}
#[instrument(skip(ctx, db, locks))]
#[instrument(skip_all)]
pub async fn heal_all_dependents_transitive<'a, Db: DbHandle>(
ctx: &'a RpcContext,
db: &'a mut Db,
@@ -1004,7 +1004,7 @@ pub async fn heal_all_dependents_transitive<'a, Db: DbHandle>(
Ok(())
}
#[instrument(skip(ctx, db, receipts))]
#[instrument(skip_all)]
pub fn heal_transitive<'a, Db: DbHandle>(
ctx: &'a RpcContext,
db: &'a mut Db,

View File

@@ -12,7 +12,7 @@ use crate::util::display_none;
use crate::{Error, ResultExt};
#[command(cli_only, blocking, display(display_none))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
if !ctx.developer_key_path.exists() {
let parent = ctx.developer_key_path.parent().unwrap_or(Path::new("/"));

View File

@@ -6,6 +6,7 @@ use rpc_toolkit::yajrc::RpcError;
use crate::context::DiagnosticContext;
use crate::disk::repair;
use crate::init::SYSTEM_REBUILD_PATH;
use crate::logs::{fetch_logs, LogResponse, LogSource};
use crate::shutdown::Shutdown;
use crate::util::display_none;
@@ -13,7 +14,7 @@ use crate::Error;
pub const SYSTEMD_UNIT: &'static str = "embassy-init";
#[command(subcommands(error, logs, exit, restart, forget_disk, disk))]
#[command(subcommands(error, logs, exit, restart, forget_disk, disk, rebuild))]
pub fn diagnostic() -> Result<(), Error> {
Ok(())
}
@@ -44,13 +45,18 @@ pub fn restart(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
.send(Some(Shutdown {
datadir: ctx.datadir.clone(),
disk_guid: ctx.disk_guid.clone(),
db_handle: None,
restart: true,
}))
.expect("receiver dropped");
Ok(())
}
#[command(display(display_none))]
pub async fn rebuild(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
tokio::fs::write(SYSTEM_REBUILD_PATH, b"").await?;
restart(ctx)
}
#[command(subcommands(forget_disk, repair))]
pub fn disk() -> Result<(), Error> {
Ok(())

View File

@@ -35,7 +35,7 @@ impl RepairStrategy {
}
}
#[instrument]
#[instrument(skip_all)]
pub async fn e2fsck_preen(
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {
@@ -59,7 +59,7 @@ fn backup_existing_undo_file<'a>(path: &'a Path) -> BoxFuture<'a, Result<(), Err
.boxed()
}
#[instrument]
#[instrument(skip_all)]
pub async fn e2fsck_aggressive(
logicalname: impl AsRef<Path> + std::fmt::Debug,
) -> Result<RequiresReboot, Error> {

View File

@@ -17,7 +17,7 @@ pub const PASSWORD_PATH: &'static str = "/etc/embassy/password";
pub const DEFAULT_PASSWORD: &'static str = "password";
pub const MAIN_FS_SIZE: FsSize = FsSize::Gigabytes(8);
#[instrument(skip(disks, datadir, password))]
#[instrument(skip_all)]
pub async fn create<I, P>(
disks: &I,
pvscan: &BTreeMap<PathBuf, Option<String>>,
@@ -34,7 +34,7 @@ where
Ok(guid)
}
#[instrument(skip(disks))]
#[instrument(skip_all)]
pub async fn create_pool<I, P>(
disks: &I,
pvscan: &BTreeMap<PathBuf, Option<String>>,
@@ -84,7 +84,7 @@ pub enum FsSize {
FreePercentage(usize),
}
#[instrument(skip(datadir, password))]
#[instrument(skip_all)]
pub async fn create_fs<P: AsRef<Path>>(
guid: &str,
datadir: P,
@@ -139,7 +139,7 @@ pub async fn create_fs<P: AsRef<Path>>(
Ok(())
}
#[instrument(skip(datadir, password))]
#[instrument(skip_all)]
pub async fn create_all_fs<P: AsRef<Path>>(
guid: &str,
datadir: P,
@@ -157,7 +157,7 @@ pub async fn create_all_fs<P: AsRef<Path>>(
Ok(())
}
#[instrument(skip(datadir))]
#[instrument(skip_all)]
pub async fn unmount_fs<P: AsRef<Path>>(guid: &str, datadir: P, name: &str) -> Result<(), Error> {
unmount(datadir.as_ref().join(name)).await?;
Command::new("cryptsetup")
@@ -170,7 +170,7 @@ pub async fn unmount_fs<P: AsRef<Path>>(guid: &str, datadir: P, name: &str) -> R
Ok(())
}
#[instrument(skip(datadir))]
#[instrument(skip_all)]
pub async fn unmount_all_fs<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> {
unmount_fs(guid, &datadir, "main").await?;
unmount_fs(guid, &datadir, "package-data").await?;
@@ -181,7 +181,7 @@ pub async fn unmount_all_fs<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<()
Ok(())
}
#[instrument(skip(datadir))]
#[instrument(skip_all)]
pub async fn export<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error> {
Command::new("sync").invoke(ErrorKind::Filesystem).await?;
unmount_all_fs(guid, datadir).await?;
@@ -197,7 +197,7 @@ pub async fn export<P: AsRef<Path>>(guid: &str, datadir: P) -> Result<(), Error>
Ok(())
}
#[instrument(skip(datadir, password))]
#[instrument(skip_all)]
pub async fn import<P: AsRef<Path>>(
guid: &str,
datadir: P,
@@ -254,7 +254,7 @@ pub async fn import<P: AsRef<Path>>(
mount_all_fs(guid, datadir, repair, password).await
}
#[instrument(skip(datadir, password))]
#[instrument(skip_all)]
pub async fn mount_fs<P: AsRef<Path>>(
guid: &str,
datadir: P,
@@ -285,7 +285,7 @@ pub async fn mount_fs<P: AsRef<Path>>(
Ok(reboot)
}
#[instrument(skip(datadir, password))]
#[instrument(skip_all)]
pub async fn mount_all_fs<P: AsRef<Path>>(
guid: &str,
datadir: P,

View File

@@ -18,15 +18,21 @@ pub mod util;
pub const BOOT_RW_PATH: &str = "/media/boot-rw";
pub const REPAIR_DISK_PATH: &str = "/media/embassy/config/repair-disk";
#[derive(Debug, Default, Deserialize, Serialize)]
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct OsPartitionInfo {
pub efi: Option<PathBuf>,
pub boot: PathBuf,
pub root: PathBuf,
}
impl OsPartitionInfo {
pub fn contains(&self, logicalname: impl AsRef<Path>) -> bool {
&*self.boot == logicalname.as_ref() || &*self.root == logicalname.as_ref()
self.efi
.as_ref()
.map(|p| p == logicalname.as_ref())
.unwrap_or(false)
|| &*self.boot == logicalname.as_ref()
|| &*self.root == logicalname.as_ref()
}
}

View File

@@ -35,7 +35,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
}
}
#[instrument(skip(password))]
#[instrument(skip_all)]
pub async fn mount(backup_disk_mount_guard: G, password: &str) -> Result<Self, Error> {
let backup_disk_path = backup_disk_mount_guard.as_ref();
let unencrypted_metadata_path =
@@ -145,7 +145,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
Ok(())
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn mount_package_backup(
&self,
id: &PackageId,
@@ -159,7 +159,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
})
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn save(&self) -> Result<(), Error> {
let metadata_path = self.as_ref().join("metadata.cbor");
let backup_disk_path = self.backup_disk_path();
@@ -180,7 +180,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
Ok(())
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn unmount(mut self) -> Result<(), Error> {
if let Some(guard) = self.encrypted_guard.take() {
guard.unmount().await?;
@@ -191,7 +191,7 @@ impl<G: GenericMountGuard> BackupMountGuard<G> {
Ok(())
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn save_and_unmount(self) -> Result<(), Error> {
self.save().await?;
self.unmount().await?;

View File

@@ -33,7 +33,7 @@ async fn resolve_hostname(hostname: &str) -> Result<IpAddr, Error> {
.parse()?)
}
#[instrument(skip(path, password, mountpoint))]
#[instrument(skip_all)]
pub async fn mount_cifs(
hostname: &str,
path: impl AsRef<Path>,

View File

@@ -0,0 +1,40 @@
use std::os::unix::ffi::OsStrExt;
use std::path::Path;
use async_trait::async_trait;
use digest::generic_array::GenericArray;
use digest::{Digest, OutputSizeUser};
use sha2::Sha256;
use super::{FileSystem, MountType, ReadOnly};
use crate::util::Invoke;
use crate::{Error, ResultExt};
pub struct EfiVarFs;
#[async_trait]
impl FileSystem for EfiVarFs {
async fn mount<P: AsRef<Path> + Send + Sync>(
&self,
mountpoint: P,
mount_type: MountType,
) -> Result<(), Error> {
tokio::fs::create_dir_all(mountpoint.as_ref()).await?;
let mut cmd = tokio::process::Command::new("mount");
cmd.arg("-t")
.arg("efivarfs")
.arg("efivarfs")
.arg(mountpoint.as_ref());
if mount_type == ReadOnly {
cmd.arg("-o").arg("ro");
}
cmd.invoke(crate::ErrorKind::Filesystem).await?;
Ok(())
}
async fn source_hash(
&self,
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
let mut sha = Sha256::new();
sha.update("EfiVarFs");
Ok(sha.finalize())
}
}

View File

@@ -11,6 +11,7 @@ pub mod bind;
pub mod block_dev;
pub mod cifs;
pub mod ecryptfs;
pub mod efivarfs;
pub mod httpdirfs;
pub mod label;

View File

@@ -3,6 +3,7 @@ use std::path::{Path, PathBuf};
use std::sync::{Arc, Weak};
use lazy_static::lazy_static;
use models::ResultExt;
use tokio::sync::Mutex;
use tracing::instrument;
@@ -36,9 +37,21 @@ impl MountGuard {
mounted: true,
})
}
pub async fn unmount(mut self) -> Result<(), Error> {
pub async fn unmount(mut self, delete_mountpoint: bool) -> Result<(), Error> {
if self.mounted {
unmount(&self.mountpoint).await?;
if delete_mountpoint {
match tokio::fs::remove_dir(&self.mountpoint).await {
Err(e) if e.raw_os_error() == Some(39) => Ok(()), // directory not empty
a => a,
}
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("rm {}", self.mountpoint.display()),
)
})?;
}
self.mounted = false;
}
Ok(())
@@ -60,7 +73,7 @@ impl Drop for MountGuard {
#[async_trait::async_trait]
impl GenericMountGuard for MountGuard {
async fn unmount(mut self) -> Result<(), Error> {
MountGuard::unmount(self).await
MountGuard::unmount(self, false).await
}
}
@@ -82,7 +95,7 @@ pub struct TmpMountGuard {
}
impl TmpMountGuard {
/// DRAGONS: if you try to mount something as ro and rw at the same time, the ro mount will be upgraded to rw.
#[instrument(skip(filesystem))]
#[instrument(skip_all)]
pub async fn mount(filesystem: &impl FileSystem, mount_type: MountType) -> Result<Self, Error> {
let mountpoint = tmp_mountpoint(filesystem).await?;
let mut tmp_mounts = TMP_MOUNTS.lock().await;
@@ -111,7 +124,7 @@ impl TmpMountGuard {
}
pub async fn unmount(self) -> Result<(), Error> {
if let Ok(guard) = Arc::try_unwrap(self.guard) {
guard.unmount().await?;
guard.unmount(true).await?;
}
Ok(())
}

View File

@@ -5,7 +5,7 @@ use tracing::instrument;
use crate::util::Invoke;
use crate::{Error, ResultExt};
#[instrument(skip(src, dst))]
#[instrument(skip_all)]
pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
src: P0,
dst: P1,
@@ -40,7 +40,7 @@ pub async fn bind<P0: AsRef<Path>, P1: AsRef<Path>>(
Ok(())
}
#[instrument(skip(mountpoint))]
#[instrument(skip_all)]
pub async fn unmount<P: AsRef<Path>>(mountpoint: P) -> Result<(), Error> {
tracing::debug!("Unmounting {}.", mountpoint.as_ref().display());
tokio::process::Command::new("umount")
@@ -48,15 +48,5 @@ pub async fn unmount<P: AsRef<Path>>(mountpoint: P) -> Result<(), Error> {
.arg(mountpoint.as_ref())
.invoke(crate::ErrorKind::Filesystem)
.await?;
match tokio::fs::remove_dir(mountpoint.as_ref()).await {
Err(e) if e.raw_os_error() == Some(39) => Ok(()), // directory not empty
a => a,
}
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("rm {}", mountpoint.as_ref().display()),
)
})?;
Ok(())
}

View File

@@ -23,10 +23,18 @@ use crate::util::serde::IoFormat;
use crate::util::{Invoke, Version};
use crate::{Error, ResultExt as _};
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub enum PartitionTable {
Mbr,
Gpt,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct DiskInfo {
pub logicalname: PathBuf,
pub partition_table: Option<PartitionTable>,
pub vendor: Option<String>,
pub model: Option<String>,
pub partitions: Vec<PartitionInfo>,
@@ -61,7 +69,25 @@ lazy_static::lazy_static! {
static ref PARTITION_REGEX: Regex = Regex::new("-part[0-9]+$").unwrap();
}
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn get_partition_table<P: AsRef<Path>>(path: P) -> Result<Option<PartitionTable>, Error> {
Ok(String::from_utf8(
Command::new("fdisk")
.arg("-l")
.arg(path.as_ref())
.invoke(crate::ErrorKind::BlockDevice)
.await?,
)?
.lines()
.find_map(|l| l.strip_prefix("Disklabel type:"))
.and_then(|t| match t.trim() {
"dos" => Some(PartitionTable::Mbr),
"gpt" => Some(PartitionTable::Gpt),
_ => None,
}))
}
#[instrument(skip_all)]
pub async fn get_vendor<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> {
let vendor = tokio::fs::read_to_string(
Path::new(SYS_BLOCK_PATH)
@@ -84,7 +110,7 @@ pub async fn get_vendor<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error
})
}
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn get_model<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> {
let model = tokio::fs::read_to_string(
Path::new(SYS_BLOCK_PATH)
@@ -103,7 +129,7 @@ pub async fn get_model<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error>
Ok(if model.is_empty() { None } else { Some(model) })
}
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn get_capacity<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
Ok(String::from_utf8(
Command::new("blockdev")
@@ -116,7 +142,7 @@ pub async fn get_capacity<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
.parse::<u64>()?)
}
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn get_label<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error> {
let label = String::from_utf8(
Command::new("lsblk")
@@ -131,7 +157,7 @@ pub async fn get_label<P: AsRef<Path>>(path: P) -> Result<Option<String>, Error>
Ok(if label.is_empty() { None } else { Some(label) })
}
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn get_used<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
Ok(String::from_utf8(
Command::new("df")
@@ -149,7 +175,7 @@ pub async fn get_used<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
.parse::<u64>()?)
}
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn get_available<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
Ok(String::from_utf8(
Command::new("df")
@@ -167,7 +193,7 @@ pub async fn get_available<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
.parse::<u64>()?)
}
#[instrument(skip(path))]
#[instrument(skip_all)]
pub async fn get_percentage<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
Ok(String::from_utf8(
Command::new("df")
@@ -186,7 +212,7 @@ pub async fn get_percentage<P: AsRef<Path>>(path: P) -> Result<u64, Error> {
.parse::<u64>()?)
}
#[instrument]
#[instrument(skip_all)]
pub async fn pvscan() -> Result<BTreeMap<PathBuf, Option<String>>, Error> {
let pvscan_out = Command::new("pvscan")
.invoke(crate::ErrorKind::DiskManagement)
@@ -222,7 +248,7 @@ pub async fn recovery_info(
Ok(None)
}
#[instrument]
#[instrument(skip_all)]
pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
struct DiskIndex {
parts: IndexSet<PathBuf>,
@@ -328,6 +354,16 @@ pub async fn list(os: &OsPartitionInfo) -> Result<Vec<DiskInfo>, Error> {
}
async fn disk_info(disk: PathBuf) -> DiskInfo {
let partition_table = get_partition_table(&disk)
.await
.map_err(|e| {
tracing::warn!(
"Could not get partition table of {}: {}",
disk.display(),
e.source
)
})
.unwrap_or_default();
let vendor = get_vendor(&disk)
.await
.map_err(|e| tracing::warn!("Could not get vendor of {}: {}", disk.display(), e.source))
@@ -342,6 +378,7 @@ async fn disk_info(disk: PathBuf) -> DiskInfo {
.unwrap_or_default();
DiskInfo {
logicalname: disk,
partition_table,
vendor,
model,
partitions: Vec::new(),

View File

@@ -1,2 +0,0 @@
{boot} /boot vfat defaults 0 2
{root} / ext4 defaults 0 1

View File

@@ -1,10 +1,9 @@
use patch_db::DbHandle;
use rand::{thread_rng, Rng};
use sqlx::Connection;
use tokio::process::Command;
use tracing::instrument;
use crate::context::RpcContext;
use crate::account::AccountInfo;
use crate::util::Invoke;
use crate::{Error, ErrorKind};
#[derive(Clone, serde::Deserialize, serde::Serialize, Debug)]
@@ -33,33 +32,11 @@ impl Hostname {
}
}
pub async fn get_current_ip(eth: String) -> Result<String, Error> {
let cmd = format!(r"ifconfig {} | awk '/inet / {{print $2}}'", eth);
let out = Command::new("bash")
.arg("-c")
.arg(cmd)
.invoke(ErrorKind::ParseSysInfo)
.await?;
let out_string = String::from_utf8(out)?;
Ok(out_string.trim().to_owned())
}
pub async fn get_embassyd_tor_addr(rpc_ctx: RpcContext) -> Result<String, Error> {
let mut secrets_handle = rpc_ctx.secret_store.acquire().await?;
let mut secrets_tx = secrets_handle.begin().await?;
let tor_key = crate::net::tor::os_key(&mut secrets_tx).await?;
Ok(tor_key.public().get_onion_address().to_string())
}
pub fn generate_hostname() -> Hostname {
let mut rng = thread_rng();
let adjective = &ADJECTIVES[rng.gen_range(0..ADJECTIVES.len())];
let noun = &NOUNS[rng.gen_range(0..NOUNS.len())];
Hostname(format!("embassy-{adjective}-{noun}"))
Hostname(format!("{adjective}-{noun}"))
}
pub fn generate_id() -> String {
@@ -67,7 +44,7 @@ pub fn generate_id() -> String {
id.to_string()
}
#[instrument]
#[instrument(skip_all)]
pub async fn get_current_hostname() -> Result<Hostname, Error> {
let out = Command::new("hostname")
.invoke(ErrorKind::ParseSysInfo)
@@ -76,7 +53,7 @@ pub async fn get_current_hostname() -> Result<Hostname, Error> {
Ok(Hostname(out_string.trim().to_owned()))
}
#[instrument]
#[instrument(skip_all)]
pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> {
let hostname: &String = &hostname.0;
let _out = Command::new("hostnamectl")
@@ -87,83 +64,9 @@ pub async fn set_hostname(hostname: &Hostname) -> Result<(), Error> {
Ok(())
}
#[instrument(skip(handle, receipts))]
pub async fn get_id<Db: DbHandle>(
handle: &mut Db,
receipts: &HostNameReceipt,
) -> Result<String, Error> {
let id = receipts.id.get(handle).await?;
Ok(id)
}
pub async fn get_hostname<Db: DbHandle>(
handle: &mut Db,
receipts: &HostNameReceipt,
) -> Result<Hostname, Error> {
if let Ok(hostname) = receipts.hostname.get(handle).await {
if let Some(hostname) = hostname.to_owned() {
return Ok(Hostname(hostname));
}
}
let id = get_id(handle, receipts).await?;
if id.len() != 8 {
return Ok(generate_hostname());
}
return Ok(Hostname(format!("embassy-{}", id)));
}
pub async fn ensure_hostname_is_set<Db: DbHandle>(
handle: &mut Db,
receipts: &HostNameReceipt,
) -> Result<(), Error> {
let hostname = get_hostname(handle, &receipts).await?;
receipts.hostname.set(handle, Some(hostname.0)).await?;
Ok(())
}
#[derive(Clone)]
pub struct HostNameReceipt {
hostname: patch_db::LockReceipt<Option<String>, ()>,
pub id: patch_db::LockReceipt<String, ()>,
}
impl HostNameReceipt {
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
let mut locks = Vec::new();
let setup = Self::setup(&mut locks);
setup(&db.lock_all(locks).await?)
}
pub fn setup(
locks: &mut Vec<patch_db::LockTargetId>,
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
use patch_db::LockType;
let hostname = crate::db::DatabaseModel::new()
.server_info()
.hostname()
.make_locker(LockType::Write)
.add_to_keys(locks);
let id = crate::db::DatabaseModel::new()
.server_info()
.id()
.make_locker(LockType::Write)
.add_to_keys(locks);
move |skeleton_key| {
Ok(Self {
hostname: hostname.verify(skeleton_key)?,
id: id.verify(skeleton_key)?,
})
}
}
}
#[instrument(skip(handle, receipts))]
pub async fn sync_hostname<Db: DbHandle>(
handle: &mut Db,
receipts: &HostNameReceipt,
) -> Result<(), Error> {
set_hostname(&get_hostname(handle, receipts).await?).await?;
#[instrument(skip_all)]
pub async fn sync_hostname(account: &AccountInfo) -> Result<(), Error> {
set_hostname(&account.hostname).await?;
Command::new("systemctl")
.arg("restart")
.arg("avahi-daemon")

View File

@@ -1,20 +1,27 @@
use std::collections::HashMap;
use std::collections::{BTreeMap, HashMap};
use std::fs::Permissions;
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::process::Stdio;
use std::time::Duration;
use color_eyre::eyre::eyre;
use helpers::NonDetachingJoinHandle;
use models::ResultExt;
use patch_db::{DbHandle, LockReceipt, LockType};
use rand::random;
use sqlx::{Pool, Postgres};
use tokio::process::Command;
use crate::account::AccountInfo;
use crate::context::rpc::RpcContextConfig;
use crate::db::model::ServerStatus;
use crate::db::model::{IpInfo, ServerStatus};
use crate::install::PKG_ARCHIVE_DIR;
use crate::sound::CIRCLE_OF_5THS_SHORT;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::sound::BEP;
use crate::system::time;
use crate::util::Invoke;
use crate::Error;
use crate::{Error, ARCH};
pub const SYSTEM_REBUILD_PATH: &str = "/media/embassy/config/system-rebuild";
pub const STANDBY_MODE_PATH: &str = "/media/embassy/config/standby";
@@ -37,6 +44,8 @@ pub struct InitReceipts {
pub version_range: LockReceipt<emver::VersionRange, ()>,
pub last_wifi_region: LockReceipt<Option<isocountry::CountryCode>, ()>,
pub status_info: LockReceipt<ServerStatus, ()>,
pub ip_info: LockReceipt<BTreeMap<String, IpInfo>, ()>,
pub system_start_time: LockReceipt<String, ()>,
}
impl InitReceipts {
pub async fn new(db: &mut impl DbHandle) -> Result<Self, Error> {
@@ -57,19 +66,31 @@ impl InitReceipts {
.last_wifi_region()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let ip_info = crate::db::DatabaseModel::new()
.server_info()
.ip_info()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let status_info = crate::db::DatabaseModel::new()
.server_info()
.status_info()
.into_model()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let system_start_time = crate::db::DatabaseModel::new()
.server_info()
.system_start_time()
.make_locker(LockType::Write)
.add_to_keys(&mut locks);
let skeleton_key = db.lock_all(locks).await?;
Ok(Self {
server_version: server_version.verify(&skeleton_key)?,
version_range: version_range.verify(&skeleton_key)?,
ip_info: ip_info.verify(&skeleton_key)?,
status_info: status_info.verify(&skeleton_key)?,
last_wifi_region: last_wifi_region.verify(&skeleton_key)?,
system_start_time: system_start_time.verify(&skeleton_key)?,
})
}
}
@@ -196,24 +217,77 @@ pub struct InitResult {
}
pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
tokio::fs::create_dir_all("/run/embassy")
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, "mkdir -p /run/embassy"))?;
if tokio::fs::metadata(LOCAL_AUTH_COOKIE_PATH).await.is_err() {
tokio::fs::write(
LOCAL_AUTH_COOKIE_PATH,
base64::encode(random::<[u8; 32]>()).as_bytes(),
)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("write {}", LOCAL_AUTH_COOKIE_PATH),
)
})?;
tokio::fs::set_permissions(LOCAL_AUTH_COOKIE_PATH, Permissions::from_mode(0o046)).await?;
Command::new("chown")
.arg("root:embassy")
.arg(LOCAL_AUTH_COOKIE_PATH)
.invoke(crate::ErrorKind::Filesystem)
.await?;
}
let secret_store = cfg.secret_store().await?;
let db = cfg.db(&secret_store).await?;
tracing::info!("Opened Postgres");
crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?;
tracing::info!("Synced SSH Keys");
let account = AccountInfo::load(&secret_store).await?;
let db = cfg.db(&account).await?;
tracing::info!("Opened PatchDB");
let mut handle = db.handle();
crate::db::DatabaseModel::new()
.server_info()
.lock(&mut handle, LockType::Write)
.await?;
let receipts = InitReceipts::new(&mut handle).await?;
// write to ca cert store
tokio::fs::write(
"/usr/local/share/ca-certificates/embassy-root-ca.crt",
account.root_ca_cert.to_pem()?,
)
.await?;
Command::new("update-ca-certificates")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
if let Some(wifi_interface) = &cfg.wifi_interface {
crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"),
wifi_interface,
&receipts.last_wifi_region.get(&mut handle).await?,
)
.await?;
tracing::info!("Synchronized WiFi");
}
let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok()
|| &*receipts.server_version.get(&mut handle).await? < &emver::Version::new(0, 3, 2, 0);
|| &*receipts.server_version.get(&mut handle).await? < &emver::Version::new(0, 3, 2, 0)
|| (*ARCH == "x86_64"
&& &*receipts.server_version.get(&mut handle).await?
< &emver::Version::new(0, 3, 4, 0));
let song = if should_rebuild {
Some(NonDetachingJoinHandle::from(tokio::spawn(async {
loop {
CIRCLE_OF_5THS_SHORT.play().await.unwrap();
tokio::time::sleep(Duration::from_secs(10)).await;
BEP.play().await.unwrap();
BEP.play().await.unwrap();
tokio::time::sleep(Duration::from_secs(60)).await;
}
})))
} else {
@@ -224,6 +298,13 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
if tokio::fs::metadata(&log_dir).await.is_err() {
tokio::fs::create_dir_all(&log_dir).await?;
}
let current_machine_id = tokio::fs::read_to_string("/etc/machine-id").await?;
let mut machine_ids = tokio::fs::read_dir(&log_dir).await?;
while let Some(machine_id) = machine_ids.next_entry().await? {
if machine_id.file_name().to_string_lossy().trim() != current_machine_id.trim() {
tokio::fs::remove_dir_all(machine_id.path()).await?;
}
}
crate::disk::mount::util::bind(&log_dir, "/var/log/journal", false).await?;
Command::new("systemctl")
.arg("restart")
@@ -231,22 +312,15 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
.invoke(crate::ErrorKind::Journald)
.await?;
tracing::info!("Mounted Logs");
let tmp_dir = cfg.datadir().join("package-data/tmp");
if tokio::fs::metadata(&tmp_dir).await.is_err() {
tokio::fs::create_dir_all(&tmp_dir).await?;
}
let tmp_docker = cfg.datadir().join("package-data/tmp/docker");
let tmp_docker_exists = tokio::fs::metadata(&tmp_docker).await.is_ok();
if should_rebuild || !tmp_docker_exists {
if tmp_docker_exists {
tokio::fs::remove_dir_all(&tmp_docker).await?;
}
Command::new("cp")
.arg("-ra")
.arg("/var/lib/docker")
.arg(&tmp_docker)
.invoke(crate::ErrorKind::Filesystem)
.await?;
if should_rebuild && tmp_docker_exists {
tokio::fs::remove_dir_all(&tmp_docker).await?;
}
Command::new("systemctl")
.arg("stop")
@@ -310,30 +384,6 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
.await?;
tracing::info!("Enabled Docker QEMU Emulation");
crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?;
tracing::info!("Synced SSH Keys");
if let Some(wifi_interface) = &cfg.wifi_interface {
crate::net::wifi::synchronize_wpa_supplicant_conf(
&cfg.datadir().join("main"),
wifi_interface,
&receipts.last_wifi_region.get(&mut handle).await?,
)
.await?;
tracing::info!("Synchronized WiFi");
}
receipts
.status_info
.set(
&mut handle,
ServerStatus {
updated: false,
update_progress: None,
backup_progress: None,
},
)
.await?;
let mut warn_time_not_synced = true;
for _ in 0..60 {
if check_time_is_synchronized().await? {
@@ -348,7 +398,34 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
tracing::info!("Syncronized system clock");
}
crate::version::init(&mut handle, &receipts).await?;
Command::new("systemctl")
.arg("start")
.arg("tor")
.invoke(crate::ErrorKind::Tor)
.await?;
receipts
.ip_info
.set(&mut handle, crate::net::dhcp::init_ips().await?)
.await?;
receipts
.status_info
.set(
&mut handle,
ServerStatus {
updated: false,
update_progress: None,
backup_progress: None,
},
)
.await?;
receipts
.system_start_time
.set(&mut handle, time().await?)
.await?;
crate::version::init(&mut handle, &secret_store, &receipts).await?;
if should_rebuild {
match tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await {

View File

@@ -62,7 +62,7 @@ impl UpdateDependencyReceipts {
}
}
#[instrument(skip(ctx, db, deps, receipts))]
#[instrument(skip_all)]
pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>(
ctx: &RpcContext,
db: &mut Db,
@@ -99,7 +99,7 @@ pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>(
Ok(())
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn cleanup(ctx: &RpcContext, id: &PackageId, version: &Version) -> Result<(), Error> {
let mut errors = ErrorCollection::new();
ctx.managers.remove(&(id.clone(), version.clone())).await;
@@ -204,7 +204,7 @@ impl CleanupFailedReceipts {
}
}
#[instrument(skip(ctx, db, receipts))]
#[instrument(skip_all)]
pub async fn cleanup_failed<Db: DbHandle>(
ctx: &RpcContext,
db: &mut Db,
@@ -272,7 +272,7 @@ pub async fn cleanup_failed<Db: DbHandle>(
Ok(())
}
#[instrument(skip(db, current_dependencies, current_dependent_receipt))]
#[instrument(skip_all)]
pub async fn remove_from_current_dependents_lists<'a, Db: DbHandle>(
db: &mut Db,
id: &'a PackageId,
@@ -340,7 +340,7 @@ impl UninstallReceipts {
}
}
}
#[instrument(skip(ctx, secrets, db))]
#[instrument(skip_all)]
pub async fn uninstall<Ex>(
ctx: &RpcContext,
db: &mut PatchDbHandle,
@@ -404,7 +404,7 @@ where
Ok(())
}
#[instrument(skip(secrets))]
#[instrument(skip_all)]
pub async fn remove_tor_keys<Ex>(secrets: &mut Ex, id: &PackageId) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,

View File

@@ -45,7 +45,7 @@ use crate::s9pk::reader::S9pkReader;
use crate::status::{MainStatus, Status};
use crate::util::io::{copy_and_shutdown, response_to_reader};
use crate::util::serde::{display_serializable, Port};
use crate::util::{display_none, AsyncFileExt, Version};
use crate::util::{assure_send, display_none, AsyncFileExt, Version};
use crate::version::{Current, VersionT};
use crate::volume::{asset_dir, script_dir};
use crate::{Error, ErrorKind, ResultExt};
@@ -116,7 +116,7 @@ impl std::fmt::Display for MinMax {
display(display_none),
metadata(sync_db = true)
)]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn install(
#[context] ctx: RpcContext,
#[arg] id: String,
@@ -326,7 +326,7 @@ pub async fn install(
}
#[command(rpc_only, display(display_none))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn sideload(
#[context] ctx: RpcContext,
#[arg] manifest: Manifest,
@@ -482,7 +482,7 @@ pub async fn sideload(
Ok(guid)
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
async fn cli_install(
ctx: CliContext,
target: String,
@@ -574,7 +574,7 @@ pub async fn uninstall(#[arg] id: PackageId) -> Result<PackageId, Error> {
}
#[command(rename = "dry", display(display_serializable))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn uninstall_dry(
#[context] ctx: RpcContext,
#[parent_data] id: PackageId,
@@ -597,7 +597,7 @@ pub async fn uninstall_dry(
Ok(BreakageRes(breakages))
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn uninstall_impl(ctx: RpcContext, id: PackageId) -> Result<(), Error> {
let mut handle = ctx.db.handle();
let mut tx = handle.begin().await?;
@@ -700,7 +700,7 @@ impl DownloadInstallReceipts {
}
}
#[instrument(skip(ctx, temp_manifest, s9pk))]
#[instrument(skip_all)]
pub async fn download_install_s9pk(
ctx: &RpcContext,
temp_manifest: &Manifest,
@@ -873,7 +873,7 @@ impl InstallS9Receipts {
}
}
#[instrument(skip(ctx, rdr))]
#[instrument(skip_all)]
pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
ctx: &RpcContext,
pkg_id: &PackageId,
@@ -1116,13 +1116,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
tracing::info!("Install {}@{}: Installed interfaces", pkg_id, version);
tracing::info!("Install {}@{}: Creating manager", pkg_id, version);
ctx.managers
.add(
ctx.clone(),
manifest.clone(),
manifest.interfaces.tor_keys(&mut sql_tx, pkg_id).await?,
)
.await?;
ctx.managers.add(ctx.clone(), manifest.clone()).await?;
tracing::info!("Install {}@{}: Created manager", pkg_id, version);
let static_files = StaticFiles::local(pkg_id, version, manifest.assets.icon_type());
@@ -1408,7 +1402,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
Ok(())
}
#[instrument(skip(datadir))]
#[instrument(skip_all)]
pub fn load_images<'a, P: AsRef<Path> + 'a + Send + Sync>(
datadir: P,
) -> BoxFuture<'a, Result<(), Error>> {

View File

@@ -60,7 +60,7 @@ pub async fn update() -> Result<(), Error> {
Ok(())
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
#[command(display(display_serializable))]
pub async fn dry(
#[context] ctx: RpcContext,

View File

@@ -15,6 +15,7 @@ lazy_static::lazy_static! {
};
}
pub mod account;
pub mod action;
pub mod auth;
pub mod backup;
@@ -29,7 +30,6 @@ pub mod diagnostic;
pub mod disk;
pub mod error;
pub mod hostname;
pub mod id;
pub mod init;
pub mod inspect;
pub mod install;
@@ -86,6 +86,7 @@ pub fn main_api() -> Result<(), RpcError> {
}
#[command(subcommands(
system::time,
system::logs,
system::kernel_logs,
system::metrics,

View File

@@ -64,7 +64,7 @@ impl Stream for LogStream {
}
}
#[instrument(skip(logs, ws_fut))]
#[instrument(skip_all)]
async fn ws_handler<
WSFut: Future<Output = Result<Result<WebSocketStream<Upgraded>, HyperError>, JoinError>>,
>(
@@ -409,7 +409,7 @@ async fn journalctl(
})
}
#[instrument]
#[instrument(skip_all)]
pub async fn fetch_logs(
id: LogSource,
limit: Option<usize>,
@@ -456,7 +456,7 @@ pub async fn fetch_logs(
})
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn follow_logs(
ctx: RpcContext,
id: LogSource,

View File

@@ -90,7 +90,7 @@ impl HealthCheckStatusReceipt {
}
}
#[instrument(skip(ctx, db))]
#[instrument(skip_all)]
pub async fn check<Db: DbHandle>(
ctx: &RpcContext,
db: &mut Db,

View File

@@ -9,26 +9,25 @@ use std::time::Duration;
use bollard::container::{KillContainerOptions, StopContainerOptions};
use color_eyre::eyre::eyre;
use embassy_container_init::{ProcessGroupId, SignalGroupParams};
use helpers::RpcClient;
use helpers::UnixRpcClient;
use nix::sys::signal::Signal;
use patch_db::DbHandle;
use sqlx::{Executor, Postgres};
use sqlx::{Connection, Executor, Postgres};
use tokio::sync::watch::error::RecvError;
use tokio::sync::watch::{channel, Receiver, Sender};
use tokio::sync::{oneshot, Notify, RwLock};
use torut::onion::TorSecretKeyV3;
use tracing::instrument;
use crate::context::RpcContext;
use crate::manager::sync::synchronizer;
use crate::net::interface::InterfaceId;
use crate::net::GeneratedCertificateMountPoint;
use crate::net::net_controller::NetService;
use crate::procedure::docker::{DockerContainer, DockerProcedure, LongRunning};
#[cfg(feature = "js_engine")]
use crate::procedure::js_scripts::JsProcedure;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::util::{ApplyRef, Container, NonDetachingJoinHandle, Version};
use crate::volume::Volume;
use crate::Error;
pub mod health;
@@ -40,7 +39,7 @@ pub const HEALTH_CHECK_GRACE_PERIOD_SECONDS: u64 = 5;
#[derive(Default)]
pub struct ManagerMap(RwLock<BTreeMap<(PackageId, Version), Arc<Manager>>>);
impl ManagerMap {
#[instrument(skip(self, ctx, db, secrets))]
#[instrument(skip_all)]
pub async fn init<Db: DbHandle, Ex>(
&self,
ctx: &RpcContext,
@@ -70,23 +69,17 @@ impl ManagerMap {
continue;
};
let tor_keys = man.interfaces.tor_keys(secrets, &package).await?;
res.insert(
(package, man.version.clone()),
Arc::new(Manager::create(ctx.clone(), man, tor_keys).await?),
Arc::new(Manager::create(ctx.clone(), man).await?),
);
}
*self.0.write().await = res;
Ok(())
}
#[instrument(skip(self, ctx))]
pub async fn add(
&self,
ctx: RpcContext,
manifest: Manifest,
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
) -> Result<(), Error> {
#[instrument(skip_all)]
pub async fn add(&self, ctx: RpcContext, manifest: Manifest) -> Result<(), Error> {
let mut lock = self.0.write().await;
let id = (manifest.id.clone(), manifest.version.clone());
if let Some(man) = lock.remove(&id) {
@@ -94,14 +87,11 @@ impl ManagerMap {
man.exit().await?;
}
}
lock.insert(
id,
Arc::new(Manager::create(ctx, manifest, tor_keys).await?),
);
lock.insert(id, Arc::new(Manager::create(ctx, manifest).await?));
Ok(())
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn remove(&self, id: &(PackageId, Version)) {
if let Some(man) = self.0.write().await.remove(id) {
if let Err(e) = man.exit().await {
@@ -111,7 +101,7 @@ impl ManagerMap {
}
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn empty(&self) -> Result<(), Error> {
let res =
futures::future::join_all(std::mem::take(&mut *self.0.write().await).into_iter().map(
@@ -138,7 +128,7 @@ impl ManagerMap {
})
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn get(&self, id: &(PackageId, Version)) -> Option<Arc<Manager>> {
self.0.read().await.get(id).cloned()
}
@@ -162,7 +152,6 @@ struct ManagerSeed {
ctx: RpcContext,
manifest: Manifest,
container_name: String,
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
}
pub struct ManagerSharedState {
@@ -185,18 +174,13 @@ pub enum OnStop {
Exit,
}
#[instrument(skip(state))]
#[instrument(skip_all)]
async fn run_main(
state: &Arc<ManagerSharedState>,
) -> Result<Result<NoOutput, (i32, String)>, Error> {
let rt_state = state.clone();
let interfaces = main_interfaces(&*state.seed)?;
let generated_certificate = generate_certificate(&*state.seed, &interfaces).await?;
let mut runtime = NonDetachingJoinHandle::from(tokio::spawn(start_up_image(
rt_state,
generated_certificate,
)));
let mut runtime = NonDetachingJoinHandle::from(tokio::spawn(start_up_image(rt_state)));
let ip = match state.persistent_container.is_some() {
false => Some(match get_running_ip(state, &mut runtime).await {
GetRunningIp::Ip(x) => x,
@@ -206,9 +190,11 @@ async fn run_main(
true => None,
};
if let Some(ip) = ip {
add_network_for_main(&*state.seed, ip, interfaces, generated_certificate).await?;
}
let svc = if let Some(ip) = ip {
Some(add_network_for_main(&*state.seed, ip).await?)
} else {
None
};
set_commit_health_true(state);
let health = main_health_check_daemon(state.clone());
@@ -218,8 +204,8 @@ async fn run_main(
_ = health => Err(Error::new(eyre!("Health check daemon exited!"), crate::ErrorKind::Unknown)),
_ = state.killer.notified() => Ok(Err((137, "Killed".to_string())))
};
if let Some(ip) = ip {
remove_network_for_main(&*state.seed, ip).await?;
if let Some(svc) = svc {
remove_network_for_main(svc).await?;
}
res
}
@@ -228,7 +214,6 @@ async fn run_main(
/// Note for _generated_certificate: Needed to know that before we start the state we have generated the certificate
async fn start_up_image(
rt_state: Arc<ManagerSharedState>,
_generated_certificate: GeneratedCertificateMountPoint,
) -> Result<Result<NoOutput, (i32, String)>, Error> {
rt_state
.seed
@@ -247,18 +232,13 @@ async fn start_up_image(
}
impl Manager {
#[instrument(skip(ctx))]
async fn create(
ctx: RpcContext,
manifest: Manifest,
tor_keys: BTreeMap<InterfaceId, TorSecretKeyV3>,
) -> Result<Self, Error> {
#[instrument(skip_all)]
async fn create(ctx: RpcContext, manifest: Manifest) -> Result<Self, Error> {
let (on_stop, recv) = channel(OnStop::Sleep);
let seed = Arc::new(ManagerSeed {
ctx,
container_name: DockerProcedure::container_name(&manifest.id, None),
manifest,
tor_keys,
});
let persistent_container = PersistentContainer::init(&seed).await?;
let shared = Arc::new(ManagerSharedState {
@@ -291,7 +271,7 @@ impl Manager {
send_signal(&self.shared, signal).await
}
#[instrument(skip(self))]
#[instrument(skip_all)]
async fn exit(&self) -> Result<(), Error> {
self.shared
.commit_health_check_results
@@ -327,7 +307,6 @@ impl Manager {
}) => (), // Already stopped
a => a?,
};
self.shared.killer.notify_waiters();
if let Some(thread) = self.thread.take().await {
@@ -360,7 +339,7 @@ impl Manager {
gid
}
pub fn rpc_client(&self) -> Option<Arc<RpcClient>> {
pub fn rpc_client(&self) -> Option<Arc<UnixRpcClient>> {
self.shared
.persistent_container
.as_ref()
@@ -450,11 +429,11 @@ async fn manager_thread_loop(mut recv: Receiver<OnStop>, thread_shared: &Arc<Man
pub struct PersistentContainer {
_running_docker: NonDetachingJoinHandle<()>,
rpc_client: Receiver<Arc<RpcClient>>,
rpc_client: Receiver<Arc<UnixRpcClient>>,
}
impl PersistentContainer {
#[instrument(skip(seed))]
#[instrument(skip_all)]
async fn init(seed: &Arc<ManagerSeed>) -> Result<Option<Self>, Error> {
Ok(if let Some(containers) = &seed.manifest.containers {
let (running_docker, rpc_client) =
@@ -472,16 +451,14 @@ impl PersistentContainer {
async fn spawn_persistent_container(
seed: Arc<ManagerSeed>,
container: DockerContainer,
) -> Result<(NonDetachingJoinHandle<()>, Receiver<Arc<RpcClient>>), Error> {
) -> Result<(NonDetachingJoinHandle<()>, Receiver<Arc<UnixRpcClient>>), Error> {
let (send_inserter, inserter) = oneshot::channel();
Ok((
tokio::task::spawn(async move {
let mut inserter_send: Option<Sender<Arc<RpcClient>>> = None;
let mut send_inserter: Option<oneshot::Sender<Receiver<Arc<RpcClient>>>> = Some(send_inserter);
let mut inserter_send: Option<Sender<Arc<UnixRpcClient>>> = None;
let mut send_inserter: Option<oneshot::Sender<Receiver<Arc<UnixRpcClient>>>> = Some(send_inserter);
loop {
if let Err(e) = async {
let interfaces = main_interfaces(&*seed)?;
let generated_certificate = generate_certificate(&*seed, &interfaces).await?;
let (mut runtime, inserter) =
long_running_docker(&seed, &container).await?;
@@ -494,7 +471,7 @@ async fn spawn_persistent_container(
return Ok(());
}
};
add_network_for_main(&*seed, ip, interfaces, generated_certificate).await?;
let svc = add_network_for_main(&*seed, ip).await?;
if let Some(inserter_send) = inserter_send.as_mut() {
let _ = inserter_send.send(Arc::new(inserter));
@@ -510,7 +487,7 @@ async fn spawn_persistent_container(
a = runtime.running_output => a.map_err(|_| Error::new(eyre!("Manager runtime panicked!"), crate::ErrorKind::Docker)).map(|_| ()),
};
remove_network_for_main(&*seed, ip).await?;
remove_network_for_main(svc).await?;
res
}.await {
@@ -519,6 +496,7 @@ async fn spawn_persistent_container(
} else {
break;
}
tokio::time::sleep(Duration::from_millis(200)).await;
}
})
.into(),
@@ -529,7 +507,7 @@ async fn spawn_persistent_container(
async fn long_running_docker(
seed: &ManagerSeed,
container: &DockerContainer,
) -> Result<(LongRunning, RpcClient), Error> {
) -> Result<(LongRunning, UnixRpcClient), Error> {
container
.long_running_execute(
&seed.ctx,
@@ -540,16 +518,8 @@ async fn long_running_docker(
.await
}
async fn remove_network_for_main(seed: &ManagerSeed, ip: std::net::Ipv4Addr) -> Result<(), Error> {
seed.ctx
.net_controller
.remove(
&seed.manifest.id,
ip,
seed.manifest.interfaces.0.keys().cloned(),
)
.await?;
Ok(())
async fn remove_network_for_main(svc: NetService) -> Result<(), Error> {
svc.remove_all().await
}
fn fetch_starting_to_running(state: &Arc<ManagerSharedState>) {
@@ -592,18 +562,32 @@ fn set_commit_health_true(state: &Arc<ManagerSharedState>) {
async fn add_network_for_main(
seed: &ManagerSeed,
ip: std::net::Ipv4Addr,
interfaces: Vec<(
InterfaceId,
&crate::net::interface::Interface,
TorSecretKeyV3,
)>,
generated_certificate: GeneratedCertificateMountPoint,
) -> Result<(), Error> {
seed.ctx
) -> Result<NetService, Error> {
let mut svc = seed
.ctx
.net_controller
.add(&seed.manifest.id, ip, interfaces, generated_certificate)
.create_service(seed.manifest.id.clone(), ip)
.await?;
Ok(())
// DEPRECATED
let mut secrets = seed.ctx.secret_store.acquire().await?;
let mut tx = secrets.begin().await?;
for (id, interface) in &seed.manifest.interfaces.0 {
for (external, internal) in interface.lan_config.iter().flatten() {
svc.add_lan(&mut tx, id.clone(), external.0, internal.internal, false)
.await?;
}
for (external, internal) in interface.tor_config.iter().flat_map(|t| &t.port_mapping) {
svc.add_tor(&mut tx, id.clone(), external.0, internal.0)
.await?;
}
}
for volume in seed.manifest.volumes.values() {
if let Volume::Certificate { interface_id } = volume {
svc.export_cert(&mut tx, interface_id, ip.into()).await?;
}
}
tx.commit().await?;
Ok(svc)
}
enum GetRunningIp {
@@ -644,9 +628,23 @@ async fn get_running_ip(
if let Poll::Ready(res) = futures::poll!(&mut runtime) {
match res {
Ok(Ok(response)) => return GetRunningIp::EarlyExit(response),
Err(_) | Ok(Err(_)) => {
Err(e) => {
return GetRunningIp::Error(Error::new(
eyre!("Manager runtime panicked!"),
match e.try_into_panic() {
Ok(e) => {
eyre!(
"Manager runtime panicked: {}",
e.downcast_ref::<&'static str>().unwrap_or(&"UNKNOWN")
)
}
_ => eyre!("Manager runtime cancelled!"),
},
crate::ErrorKind::Docker,
))
}
Ok(Err(e)) => {
return GetRunningIp::Error(Error::new(
eyre!("Manager runtime returned error: {}", e),
crate::ErrorKind::Docker,
))
}
@@ -702,49 +700,6 @@ async fn container_inspect(
.await
}
async fn generate_certificate(
seed: &ManagerSeed,
interfaces: &Vec<(
InterfaceId,
&crate::net::interface::Interface,
TorSecretKeyV3,
)>,
) -> Result<GeneratedCertificateMountPoint, Error> {
seed.ctx
.net_controller
.generate_certificate_mountpoint(&seed.manifest.id, interfaces)
.await
}
fn main_interfaces(
seed: &ManagerSeed,
) -> Result<
Vec<(
InterfaceId,
&crate::net::interface::Interface,
TorSecretKeyV3,
)>,
Error,
> {
seed.manifest
.interfaces
.0
.iter()
.map(|(id, info)| {
Ok((
id.clone(),
info,
seed.tor_keys
.get(id)
.ok_or_else(|| {
Error::new(eyre!("interface {} missing key", id), crate::ErrorKind::Tor)
})?
.clone(),
))
})
.collect::<Result<Vec<_>, Error>>()
}
async fn wait_for_status(shared: &ManagerSharedState, status: Status) {
let mut recv = shared.status.0.subscribe();
while {
@@ -767,17 +722,16 @@ fn sigterm_timeout(manifest: &Manifest) -> Option<Duration> {
}
}
#[instrument(skip(shared))]
#[instrument(skip_all)]
async fn stop(shared: &ManagerSharedState) -> Result<(), Error> {
shared
.commit_health_check_results
.store(false, Ordering::SeqCst);
shared.on_stop.send(OnStop::Sleep).map_err(|_| {
Error::new(
eyre!("Manager has already been shutdown"),
crate::ErrorKind::Docker,
)
})?;
shared.on_stop.send_modify(|status| {
if matches!(*status, OnStop::Restart) {
*status = OnStop::Sleep;
}
});
if *shared.status.1.borrow() == Status::Paused {
resume(shared).await?;
}
@@ -792,14 +746,13 @@ async fn stop(shared: &ManagerSharedState) -> Result<(), Error> {
Ok(())
}
#[instrument(skip(shared))]
#[instrument(skip_all)]
async fn start(shared: &ManagerSharedState) -> Result<(), Error> {
shared.on_stop.send(OnStop::Restart).map_err(|_| {
Error::new(
eyre!("Manager has already been shutdown"),
crate::ErrorKind::Docker,
)
})?;
shared.on_stop.send_modify(|status| {
if matches!(*status, OnStop::Sleep) {
*status = OnStop::Restart;
}
});
let _ = shared.status.0.send_modify(|x| {
if *x != Status::Running {
*x = Status::Starting
@@ -808,7 +761,7 @@ async fn start(shared: &ManagerSharedState) -> Result<(), Error> {
Ok(())
}
#[instrument(skip(shared))]
#[instrument(skip_all)]
async fn pause(shared: &ManagerSharedState) -> Result<(), Error> {
if let Err(e) = shared
.seed
@@ -825,7 +778,7 @@ async fn pause(shared: &ManagerSharedState) -> Result<(), Error> {
Ok(())
}
#[instrument(skip(shared))]
#[instrument(skip_all)]
async fn resume(shared: &ManagerSharedState) -> Result<(), Error> {
shared
.seed

View File

@@ -23,6 +23,9 @@ use tokio::sync::Mutex;
use crate::context::RpcContext;
use crate::{Error, ResultExt};
pub const LOCAL_AUTH_COOKIE_PATH: &str = "/run/embassy/rpc.authcookie";
pub trait AsLogoutSessionId {
fn as_logout_session_id(self) -> String;
}
@@ -63,7 +66,29 @@ impl HasValidSession {
request_parts: &RequestParts,
ctx: &RpcContext,
) -> Result<Self, Error> {
Self::from_session(&HashSessionToken::from_request_parts(request_parts)?, ctx).await
if let Some(cookie_header) = request_parts.headers.get(COOKIE) {
let cookies = Cookie::parse(
cookie_header
.to_str()
.with_kind(crate::ErrorKind::Authorization)?,
)
.with_kind(crate::ErrorKind::Authorization)?;
if let Some(cookie) = cookies.iter().find(|c| c.get_name() == "local") {
if let Ok(s) = Self::from_local(cookie).await {
return Ok(s);
}
}
if let Some(cookie) = cookies.iter().find(|c| c.get_name() == "session") {
if let Ok(s) = Self::from_session(&HashSessionToken::from_cookie(cookie), ctx).await
{
return Ok(s);
}
}
}
Err(Error::new(
eyre!("UNAUTHORIZED"),
crate::ErrorKind::Authorization,
))
}
pub async fn from_session(session: &HashSessionToken, ctx: &RpcContext) -> Result<Self, Error> {
@@ -79,6 +104,18 @@ impl HasValidSession {
}
Ok(Self(()))
}
pub async fn from_local(local: &Cookie<'_>) -> Result<Self, Error> {
let token = tokio::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH).await?;
if local.get_value() == &*token {
Ok(Self(()))
} else {
Err(Error::new(
eyre!("UNAUTHORIZED"),
crate::ErrorKind::Authorization,
))
}
}
}
/// When we have a need to create a new session,

View File

@@ -47,7 +47,7 @@ pub struct EncryptedWire {
encrypted: serde_json::Value,
}
impl EncryptedWire {
#[instrument(skip(current_secret))]
#[instrument(skip_all)]
pub fn decrypt(self, current_secret: impl AsRef<Jwk>) -> Option<String> {
let current_secret = current_secret.as_ref();

View File

@@ -4,12 +4,12 @@ use color_eyre::eyre::eyre;
use emver::VersionRange;
use futures::{Future, FutureExt};
use indexmap::IndexMap;
use models::ImageId;
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainers;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
@@ -24,7 +24,7 @@ pub struct Migrations {
pub to: IndexMap<VersionRange, PackageProcedure>,
}
impl Migrations {
#[instrument]
#[instrument(skip_all)]
pub fn validate(
&self,
container: &Option<DockerContainers>,
@@ -55,7 +55,7 @@ impl Migrations {
Ok(())
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub fn from<'a>(
&'a self,
container: &'a Option<DockerContainers>,
@@ -95,7 +95,7 @@ impl Migrations {
}
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub fn to<'a>(
&'a self,
ctx: &'a RpcContext,

View File

@@ -1,215 +0,0 @@
use std::collections::BTreeMap;
use std::io;
use std::pin::Pin;
use std::str::FromStr;
use std::sync::{Arc, RwLock};
use std::task::{Context, Poll};
use color_eyre::eyre::eyre;
use futures::{ready, Future};
use hyper::server::accept::Accept;
use hyper::server::conn::{AddrIncoming, AddrStream};
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio_rustls::rustls::server::ResolvesServerCert;
use tokio_rustls::rustls::sign::{any_supported_type, CertifiedKey};
use tokio_rustls::rustls::{Certificate, PrivateKey, ServerConfig};
use crate::net::net_utils::ResourceFqdn;
use crate::Error;
enum State {
Handshaking(tokio_rustls::Accept<AddrStream>),
Streaming(tokio_rustls::server::TlsStream<AddrStream>),
}
// tokio_rustls::server::TlsStream doesn't expose constructor methods,
// so we have to TlsAcceptor::accept and handshake to have access to it
// TlsStream implements AsyncRead/AsyncWrite handshaking tokio_rustls::Accept first
pub struct TlsStream {
state: State,
}
impl TlsStream {
fn new(stream: AddrStream, config: Arc<ServerConfig>) -> TlsStream {
let accept = tokio_rustls::TlsAcceptor::from(config).accept(stream);
TlsStream {
state: State::Handshaking(accept),
}
}
}
impl AsyncRead for TlsStream {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &mut ReadBuf,
) -> Poll<io::Result<()>> {
let pin = self.get_mut();
match pin.state {
State::Handshaking(ref mut accept) => match ready!(Pin::new(accept).poll(cx)) {
Ok(mut stream) => {
let result = Pin::new(&mut stream).poll_read(cx, buf);
pin.state = State::Streaming(stream);
result
}
Err(err) => Poll::Ready(Err(err)),
},
State::Streaming(ref mut stream) => Pin::new(stream).poll_read(cx, buf),
}
}
}
impl AsyncWrite for TlsStream {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
let pin = self.get_mut();
match pin.state {
State::Handshaking(ref mut accept) => match ready!(Pin::new(accept).poll(cx)) {
Ok(mut stream) => {
let result = Pin::new(&mut stream).poll_write(cx, buf);
pin.state = State::Streaming(stream);
result
}
Err(err) => Poll::Ready(Err(err)),
},
State::Streaming(ref mut stream) => Pin::new(stream).poll_write(cx, buf),
}
}
fn poll_flush(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.state {
State::Handshaking(_) => Poll::Ready(Ok(())),
State::Streaming(ref mut stream) => Pin::new(stream).poll_flush(cx),
}
}
fn poll_shutdown(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.state {
State::Handshaking(_) => Poll::Ready(Ok(())),
State::Streaming(ref mut stream) => Pin::new(stream).poll_shutdown(cx),
}
}
}
impl ResolvesServerCert for EmbassyCertResolver {
fn resolve(
&self,
client_hello: tokio_rustls::rustls::server::ClientHello,
) -> Option<Arc<tokio_rustls::rustls::sign::CertifiedKey>> {
let hostname_raw = client_hello.server_name();
match hostname_raw {
Some(hostname_str) => {
let full_fqdn = match ResourceFqdn::from_str(hostname_str) {
Ok(fqdn) => fqdn,
Err(_) => {
tracing::error!("Error converting {} to fqdn struct", hostname_str);
return None;
}
};
let lock = self.cert_mapping.read();
match lock {
Ok(lock) => lock
.get(&full_fqdn)
.map(|cert_key| Arc::new(cert_key.to_owned())),
Err(err) => {
tracing::error!("resolve fn Error: {}", err);
None
}
}
}
None => None,
}
}
}
#[derive(Clone, Default)]
pub struct EmbassyCertResolver {
cert_mapping: Arc<RwLock<BTreeMap<ResourceFqdn, CertifiedKey>>>,
}
impl EmbassyCertResolver {
pub fn new() -> Self {
Self::default()
}
pub async fn add_certificate_to_resolver(
&mut self,
service_resource_fqdn: ResourceFqdn,
package_cert_data: (PKey<Private>, Vec<X509>),
) -> Result<(), Error> {
let x509_cert_chain = package_cert_data.1;
let private_keys = package_cert_data
.0
.private_key_to_der()
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::OpenSsl))?;
let mut full_rustls_certs = Vec::new();
for cert in x509_cert_chain.iter() {
let cert = Certificate(
cert.to_der()
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::OpenSsl))?,
);
full_rustls_certs.push(cert);
}
let pre_sign_key = PrivateKey(private_keys);
let actual_sign_key = any_supported_type(&pre_sign_key)
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::OpenSsl))?;
let cert_key = CertifiedKey::new(full_rustls_certs, actual_sign_key);
let mut lock = self
.cert_mapping
.write()
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::Network))?;
lock.insert(service_resource_fqdn, cert_key);
Ok(())
}
pub async fn remove_cert(&mut self, hostname: ResourceFqdn) -> Result<(), Error> {
let mut lock = self
.cert_mapping
.write()
.map_err(|err| Error::new(eyre!("{}", err), crate::ErrorKind::Network))?;
lock.remove(&hostname);
Ok(())
}
}
pub struct TlsAcceptor {
config: Arc<ServerConfig>,
incoming: AddrIncoming,
}
impl TlsAcceptor {
pub fn new(config: Arc<ServerConfig>, incoming: AddrIncoming) -> TlsAcceptor {
TlsAcceptor { config, incoming }
}
}
impl Accept for TlsAcceptor {
type Conn = TlsStream;
type Error = io::Error;
fn poll_accept(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
let pin = self.get_mut();
match ready!(Pin::new(&mut pin.incoming).poll_accept(cx)) {
Some(Ok(sock)) => Poll::Ready(Some(Ok(TlsStream::new(sock, pin.config.clone())))),
Some(Err(e)) => Poll::Ready(Some(Err(e))),
None => Poll::Ready(None),
}
}
}

79
backend/src/net/dhcp.rs Normal file
View File

@@ -0,0 +1,79 @@
use std::collections::{BTreeMap, BTreeSet};
use std::net::IpAddr;
use futures::TryStreamExt;
use rpc_toolkit::command;
use tokio::sync::RwLock;
use crate::context::RpcContext;
use crate::db::model::IpInfo;
use crate::net::utils::{iface_is_physical, list_interfaces};
use crate::util::display_none;
use crate::Error;
lazy_static::lazy_static! {
static ref CACHED_IPS: RwLock<BTreeSet<IpAddr>> = RwLock::new(BTreeSet::new());
}
async fn _ips() -> Result<BTreeSet<IpAddr>, Error> {
Ok(init_ips()
.await?
.values()
.flat_map(|i| {
std::iter::empty()
.chain(i.ipv4.map(IpAddr::from))
.chain(i.ipv6.map(IpAddr::from))
})
.collect())
}
pub async fn ips() -> Result<BTreeSet<IpAddr>, Error> {
let ips = CACHED_IPS.read().await.clone();
if !ips.is_empty() {
return Ok(ips);
}
let ips = _ips().await?;
*CACHED_IPS.write().await = ips.clone();
Ok(ips)
}
pub async fn init_ips() -> Result<BTreeMap<String, IpInfo>, Error> {
let mut res = BTreeMap::new();
let mut ifaces = list_interfaces();
while let Some(iface) = ifaces.try_next().await? {
if iface_is_physical(&iface).await {
let ip_info = IpInfo::for_interface(&iface).await?;
res.insert(iface, ip_info);
}
}
Ok(res)
}
#[command(subcommands(update))]
pub async fn dhcp() -> Result<(), Error> {
Ok(())
}
#[command(display(display_none))]
pub async fn update(#[context] ctx: RpcContext, #[arg] interface: String) -> Result<(), Error> {
if iface_is_physical(&interface).await {
let ip_info = IpInfo::for_interface(&interface).await?;
crate::db::DatabaseModel::new()
.server_info()
.ip_info()
.idx_model(&interface)
.put(&mut ctx.db.handle(), &ip_info)
.await?;
let mut cached = CACHED_IPS.write().await;
if cached.is_empty() {
*cached = _ips().await?;
} else {
cached.extend(
std::iter::empty()
.chain(ip_info.ipv4.map(IpAddr::from))
.chain(ip_info.ipv6.map(IpAddr::from)),
);
}
}
Ok(())
}

View File

@@ -1,9 +1,10 @@
use std::borrow::Borrow;
use std::collections::{BTreeMap, BTreeSet};
use std::collections::BTreeMap;
use std::net::{Ipv4Addr, SocketAddr};
use std::sync::Arc;
use std::sync::{Arc, Weak};
use std::time::Duration;
use color_eyre::eyre::eyre;
use futures::TryFutureExt;
use helpers::NonDetachingJoinHandle;
use models::PackageId;
@@ -17,34 +18,48 @@ use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, Respons
use trust_dns_server::ServerFuture;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, HOST_IP};
use crate::{Error, ErrorKind, ResultExt};
pub struct DnsController {
services: Arc<RwLock<BTreeMap<PackageId, BTreeSet<Ipv4Addr>>>>,
services: Weak<RwLock<BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>>>,
#[allow(dead_code)]
dns_server: NonDetachingJoinHandle<Result<(), Error>>,
}
struct Resolver {
services: Arc<RwLock<BTreeMap<PackageId, BTreeSet<Ipv4Addr>>>>,
services: Arc<RwLock<BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>>>,
}
impl Resolver {
async fn resolve(&self, name: &Name) -> Option<Vec<Ipv4Addr>> {
match name.iter().next_back() {
Some(b"embassy") => {
if let Some(pkg) = name.iter().rev().skip(1).next() {
if let Some(ip) = self
.services
.read()
.await
.get(std::str::from_utf8(pkg).unwrap_or_default())
{
Some(ip.iter().copied().collect())
if let Some(ip) = self.services.read().await.get(&Some(
std::str::from_utf8(pkg)
.unwrap_or_default()
.parse()
.unwrap_or_default(),
)) {
Some(
ip.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(ip, _)| *ip)
.collect(),
)
} else {
None
}
} else {
Some(vec![HOST_IP.into()])
if let Some(ip) = self.services.read().await.get(&None) {
Some(
ip.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(ip, _)| *ip)
.collect(),
)
} else {
None
}
}
}
_ => None,
@@ -61,32 +76,51 @@ impl RequestHandler for Resolver {
) -> ResponseInfo {
let query = request.request_info().query;
if let Some(ip) = self.resolve(query.name().borrow()).await {
if query.query_type() != RecordType::A {
tracing::warn!(
"Non A-Record requested for {}: {:?}",
query.name(),
query.query_type()
);
match query.query_type() {
RecordType::A => {
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
Header::response_from_request(request.header()),
&ip.into_iter()
.map(|ip| {
Record::from_rdata(
request.request_info().query.name().to_owned().into(),
0,
trust_dns_server::client::rr::RData::A(ip),
)
})
.collect::<Vec<_>>(),
[],
[],
[],
),
)
.await
}
a => {
if a != RecordType::AAAA {
tracing::warn!(
"Non A-Record requested for {}: {:?}",
query.name(),
query.query_type()
);
}
let mut res = Header::response_from_request(request.header());
res.set_response_code(ResponseCode::NXDomain);
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
res.into(),
[],
[],
[],
[],
),
)
.await
}
}
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
Header::response_from_request(request.header()),
&ip.into_iter()
.map(|ip| {
Record::from_rdata(
request.request_info().query.name().to_owned().into(),
0,
trust_dns_server::client::rr::RData::A(ip),
)
})
.collect::<Vec<_>>(),
[],
[],
[],
),
)
.await
} else {
let mut res = Header::response_from_request(request.header());
res.set_response_code(ResponseCode::NXDomain);
@@ -142,24 +176,47 @@ impl DnsController {
.into();
Ok(Self {
services,
services: Arc::downgrade(&services),
dns_server,
})
}
pub async fn add(&self, pkg_id: &PackageId, ip: Ipv4Addr) {
let mut writable = self.services.write().await;
let mut ips = writable.remove(pkg_id).unwrap_or_default();
ips.insert(ip);
writable.insert(pkg_id.clone(), ips);
pub async fn add(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<Arc<()>, Error> {
if let Some(services) = Weak::upgrade(&self.services) {
let mut writable = services.write().await;
let mut ips = writable.remove(&pkg_id).unwrap_or_default();
let rc = if let Some(rc) = Weak::upgrade(&ips.remove(&ip).unwrap_or_default()) {
rc
} else {
Arc::new(())
};
ips.insert(ip, Arc::downgrade(&rc));
writable.insert(pkg_id, ips);
Ok(rc)
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
crate::ErrorKind::Network,
))
}
}
pub async fn remove(&self, pkg_id: &PackageId, ip: Ipv4Addr) {
let mut writable = self.services.write().await;
let mut ips = writable.remove(pkg_id).unwrap_or_default();
ips.remove(&ip);
if !ips.is_empty() {
writable.insert(pkg_id.clone(), ips);
pub async fn gc(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<(), Error> {
if let Some(services) = Weak::upgrade(&self.services) {
let mut writable = services.write().await;
let mut ips = writable.remove(&pkg_id).unwrap_or_default();
if let Some(rc) = Weak::upgrade(&ips.remove(&ip).unwrap_or_default()) {
ips.insert(ip, Arc::downgrade(&rc));
}
if !ips.is_empty() {
writable.insert(pkg_id, ips);
}
Ok(())
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
crate::ErrorKind::Network,
))
}
}
}

View File

@@ -1,173 +0,0 @@
use std::collections::BTreeMap;
use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use helpers::NonDetachingJoinHandle;
use http::StatusCode;
use hyper::server::conn::AddrIncoming;
use hyper::service::{make_service_fn, service_fn};
use hyper::{Body, Error as HyperError, Response, Server};
use tokio::sync::oneshot;
use tokio_rustls::rustls::ServerConfig;
use tracing::error;
use crate::net::cert_resolver::TlsAcceptor;
use crate::net::net_utils::{host_addr_fqdn, ResourceFqdn};
use crate::net::HttpHandler;
use crate::Error;
static RES_NOT_FOUND: &[u8] = b"503 Service Unavailable";
static NO_HOST: &[u8] = b"No host header found";
pub struct EmbassyServiceHTTPServer {
pub svc_mapping: Arc<tokio::sync::RwLock<BTreeMap<ResourceFqdn, HttpHandler>>>,
pub shutdown: oneshot::Sender<()>,
pub handle: NonDetachingJoinHandle<()>,
pub ssl_cfg: Option<Arc<ServerConfig>>,
}
impl EmbassyServiceHTTPServer {
pub async fn new(
listener_addr: IpAddr,
port: u16,
ssl_cfg: Option<Arc<ServerConfig>>,
) -> Result<Self, Error> {
let (tx, rx) = tokio::sync::oneshot::channel::<()>();
let listener_socket_addr = SocketAddr::from((listener_addr, port));
let server_service_mapping = Arc::new(tokio::sync::RwLock::new(BTreeMap::<
ResourceFqdn,
HttpHandler,
>::new()));
let server_service_mapping1 = server_service_mapping.clone();
let bare_make_service_fn = move || {
let server_service_mapping = server_service_mapping.clone();
async move {
Ok::<_, HyperError>(service_fn(move |req| {
let mut server_service_mapping = server_service_mapping.clone();
async move {
server_service_mapping = server_service_mapping.clone();
let host = host_addr_fqdn(&req);
match host {
Ok(host_uri) => {
let res = {
let mapping = server_service_mapping.read().await;
let opt_handler = mapping.get(&host_uri).cloned();
opt_handler
};
match res {
Some(opt_handler) => {
let response = opt_handler(req).await;
match response {
Ok(resp) => Ok::<Response<Body>, hyper::Error>(resp),
Err(err) => Ok(respond_hyper_error(err)),
}
}
None => Ok(res_not_found()),
}
}
Err(e) => Ok(no_host_found(e)),
}
}
}))
}
};
let inner_ssl_cfg = ssl_cfg.clone();
let handle = tokio::spawn(async move {
match inner_ssl_cfg {
Some(cfg) => {
let incoming = AddrIncoming::bind(&listener_socket_addr).unwrap();
let server = Server::builder(TlsAcceptor::new(cfg, incoming))
.http1_preserve_header_case(true)
.http1_title_case_headers(true)
.serve(make_service_fn(|_| bare_make_service_fn()))
.with_graceful_shutdown({
async {
rx.await.ok();
}
});
if let Err(e) = server.await {
error!("Spawning hyper server errorr: {}", e);
}
}
None => {
let server = Server::bind(&listener_socket_addr)
.http1_preserve_header_case(true)
.http1_title_case_headers(true)
.serve(make_service_fn(|_| bare_make_service_fn()))
.with_graceful_shutdown({
async {
rx.await.ok();
}
});
if let Err(e) = server.await {
error!("Spawning hyper server errorr: {}", e);
}
}
};
});
Ok(Self {
svc_mapping: server_service_mapping1,
handle: handle.into(),
shutdown: tx,
ssl_cfg,
})
}
pub async fn add_svc_handler_mapping(
&mut self,
fqdn: ResourceFqdn,
svc_handle: HttpHandler,
) -> Result<(), Error> {
let mut mapping = self.svc_mapping.write().await;
mapping.insert(fqdn.clone(), svc_handle);
Ok(())
}
pub async fn remove_svc_handler_mapping(&mut self, fqdn: ResourceFqdn) -> Result<(), Error> {
let mut mapping = self.svc_mapping.write().await;
mapping.remove(&fqdn);
Ok(())
}
}
/// HTTP status code 503
fn res_not_found() -> Response<Body> {
Response::builder()
.status(StatusCode::SERVICE_UNAVAILABLE)
.body(RES_NOT_FOUND.into())
.unwrap()
}
fn no_host_found(err: Error) -> Response<Body> {
let err_txt = format!("{}: Error {}", String::from_utf8_lossy(NO_HOST), err);
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(err_txt.into())
.unwrap()
}
fn respond_hyper_error(err: hyper::Error) -> Response<Body> {
let err_txt = format!("{}: Error {}", String::from_utf8_lossy(NO_HOST), err);
Response::builder()
.status(StatusCode::BAD_REQUEST)
.body(err_txt.into())
.unwrap()
}

View File

@@ -1,9 +1,6 @@
use std::collections::BTreeMap;
use color_eyre::eyre::eyre;
use futures::TryStreamExt;
use indexmap::IndexSet;
use itertools::Either;
pub use models::InterfaceId;
use serde::{Deserialize, Deserializer, Serialize};
use sqlx::{Executor, Postgres};
@@ -11,7 +8,6 @@ use torut::onion::TorSecretKeyV3;
use tracing::instrument;
use crate::db::model::{InterfaceAddressMap, InterfaceAddresses};
use crate::id::Id;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::Port;
use crate::{Error, ResultExt};
@@ -20,7 +16,7 @@ use crate::{Error, ResultExt};
#[serde(rename_all = "kebab-case")]
pub struct Interfaces(pub BTreeMap<InterfaceId, Interface>); // TODO
impl Interfaces {
#[instrument]
#[instrument(skip_all)]
pub fn validate(&self) -> Result<(), Error> {
for (_, interface) in &self.0 {
interface.validate().with_ctx(|_| {
@@ -32,7 +28,7 @@ impl Interfaces {
}
Ok(())
}
#[instrument(skip(secrets))]
#[instrument(skip_all)]
pub async fn install<Ex>(
&self,
secrets: &mut Ex,
@@ -81,36 +77,6 @@ impl Interfaces {
}
Ok(interface_addresses)
}
#[instrument(skip(secrets))]
pub async fn tor_keys<Ex>(
&self,
secrets: &mut Ex,
package_id: &PackageId,
) -> Result<BTreeMap<InterfaceId, TorSecretKeyV3>, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
Ok(sqlx::query!(
"SELECT interface, key FROM tor WHERE package = $1",
**package_id
)
.fetch_many(secrets)
.map_err(Error::from)
.try_filter_map(|qr| async move {
Ok(if let Either::Right(r) = qr {
let mut buf = [0; 64];
buf.clone_from_slice(r.key.get(0..64).ok_or_else(|| {
Error::new(eyre!("Invalid Tor Key Length"), crate::ErrorKind::Database)
})?);
Some((InterfaceId::from(Id::try_from(r.interface)?), buf.into()))
} else {
None
})
})
.try_collect()
.await?)
}
}
#[derive(Clone, Debug, Deserialize, Serialize)]
@@ -124,7 +90,7 @@ pub struct Interface {
pub protocols: IndexSet<String>,
}
impl Interface {
#[instrument]
#[instrument(skip_all)]
pub fn validate(&self) -> Result<(), color_eyre::eyre::Report> {
if self.tor_config.is_some() && !self.protocols.contains("tcp") {
color_eyre::eyre::bail!("must support tcp to set up a tor hidden service");

272
backend/src/net/keys.rs Normal file
View File

@@ -0,0 +1,272 @@
use color_eyre::eyre::eyre;
use ed25519_dalek::{ExpandedSecretKey, SecretKey};
use models::{Id, InterfaceId, PackageId};
use openssl::pkey::{PKey, Private};
use openssl::sha::Sha256;
use openssl::x509::X509;
use p256::elliptic_curve::pkcs8::EncodePrivateKey;
use sqlx::PgExecutor;
use ssh_key::private::Ed25519PrivateKey;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use zeroize::Zeroize;
use crate::net::ssl::CertPair;
use crate::Error;
// TODO: delete once we may change tor addresses
async fn compat(
secrets: impl PgExecutor<'_>,
interface: &Option<(PackageId, InterfaceId)>,
) -> Result<Option<ExpandedSecretKey>, Error> {
if let Some((package, interface)) = interface {
if let Some(r) = sqlx::query!(
"SELECT key FROM tor WHERE package = $1 AND interface = $2",
**package,
**interface
)
.fetch_optional(secrets)
.await?
{
Ok(Some(ExpandedSecretKey::from_bytes(&r.key)?))
} else {
Ok(None)
}
} else {
if let Some(key) = sqlx::query!("SELECT tor_key FROM account WHERE id = 0")
.fetch_one(secrets)
.await?
.tor_key
{
Ok(Some(ExpandedSecretKey::from_bytes(&key)?))
} else {
Ok(None)
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Key {
interface: Option<(PackageId, InterfaceId)>,
base: [u8; 32],
tor_key: [u8; 64], // Does NOT necessarily match base
}
impl Key {
pub fn interface(&self) -> Option<(PackageId, InterfaceId)> {
self.interface.clone()
}
pub fn as_bytes(&self) -> [u8; 32] {
self.base
}
pub fn internal_address(&self) -> String {
self.interface
.as_ref()
.map(|(pkg_id, _)| format!("{}.embassy", pkg_id))
.unwrap_or_else(|| "embassy".to_owned())
}
pub fn tor_key(&self) -> TorSecretKeyV3 {
ed25519_dalek::ExpandedSecretKey::from_bytes(&self.tor_key)
.unwrap()
.to_bytes()
.into()
}
pub fn tor_address(&self) -> OnionAddressV3 {
self.tor_key().public().get_onion_address()
}
pub fn base_address(&self) -> String {
self.tor_key()
.public()
.get_onion_address()
.get_address_without_dot_onion()
}
pub fn local_address(&self) -> String {
self.base_address() + ".local"
}
pub fn openssl_key_ed25519(&self) -> PKey<Private> {
PKey::private_key_from_raw_bytes(&self.base, openssl::pkey::Id::ED25519).unwrap()
}
pub fn openssl_key_nistp256(&self) -> PKey<Private> {
let mut buf = self.base;
loop {
if let Ok(k) = p256::SecretKey::from_be_bytes(&buf) {
return PKey::private_key_from_pkcs8(&*k.to_pkcs8_der().unwrap().as_bytes())
.unwrap();
}
let mut sha = Sha256::new();
sha.update(&buf);
buf = sha.finish();
}
}
pub fn ssh_key(&self) -> Ed25519PrivateKey {
Ed25519PrivateKey::from_bytes(&self.base)
}
pub(crate) fn from_pair(
interface: Option<(PackageId, InterfaceId)>,
bytes: [u8; 32],
tor_key: [u8; 64],
) -> Self {
Self {
interface,
tor_key,
base: bytes,
}
}
pub fn from_bytes(interface: Option<(PackageId, InterfaceId)>, bytes: [u8; 32]) -> Self {
Self::from_pair(
interface,
bytes,
ExpandedSecretKey::from(&SecretKey::from_bytes(&bytes).unwrap()).to_bytes(),
)
}
pub fn new(interface: Option<(PackageId, InterfaceId)>) -> Self {
Self::from_bytes(interface, rand::random())
}
pub(super) fn with_certs(self, certs: CertPair, int: X509, root: X509) -> KeyInfo {
KeyInfo {
key: self,
certs,
int,
root,
}
}
pub async fn for_package(
secrets: impl PgExecutor<'_>,
package: &PackageId,
) -> Result<Vec<Self>, Error> {
sqlx::query!(
r#"
SELECT
network_keys.package,
network_keys.interface,
network_keys.key,
tor.key AS "tor_key?"
FROM
network_keys
LEFT JOIN
tor
ON
network_keys.package = tor.package
AND
network_keys.interface = tor.interface
WHERE
network_keys.package = $1
"#,
**package
)
.fetch_all(secrets)
.await?
.into_iter()
.map(|row| {
let interface = Some((
package.clone(),
InterfaceId::from(Id::try_from(row.interface)?),
));
let bytes = row.key.try_into().map_err(|e: Vec<u8>| {
Error::new(
eyre!("Invalid length for network key {} expected 32", e.len()),
crate::ErrorKind::Database,
)
})?;
Ok(match row.tor_key {
Some(tor_key) => Key::from_pair(
interface,
bytes,
tor_key.try_into().map_err(|e: Vec<u8>| {
Error::new(
eyre!("Invalid length for tor key {} expected 64", e.len()),
crate::ErrorKind::Database,
)
})?,
),
None => Key::from_bytes(interface, bytes),
})
})
.collect()
}
pub async fn for_interface<Ex>(
secrets: &mut Ex,
interface: Option<(PackageId, InterfaceId)>,
) -> Result<Self, Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let tentative = rand::random::<[u8; 32]>();
let actual = if let Some((pkg, iface)) = &interface {
let k = tentative.as_slice();
let actual = sqlx::query!(
"INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key",
**pkg,
**iface,
k,
)
.fetch_one(&mut *secrets)
.await?.key;
let mut bytes = tentative;
bytes.clone_from_slice(actual.get(0..32).ok_or_else(|| {
Error::new(
eyre!("Invalid key size returned from DB"),
crate::ErrorKind::Database,
)
})?);
bytes
} else {
let actual = sqlx::query!("SELECT network_key FROM account WHERE id = 0")
.fetch_one(&mut *secrets)
.await?
.network_key;
let mut bytes = tentative;
bytes.clone_from_slice(actual.get(0..32).ok_or_else(|| {
Error::new(
eyre!("Invalid key size returned from DB"),
crate::ErrorKind::Database,
)
})?);
bytes
};
let mut res = Self::from_bytes(interface, actual);
if let Some(tor_key) = compat(secrets, &res.interface).await? {
res.tor_key = tor_key.to_bytes();
}
Ok(res)
}
}
impl Drop for Key {
fn drop(&mut self) {
self.base.zeroize();
self.tor_key.zeroize();
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct KeyInfo {
key: Key,
certs: CertPair,
int: X509,
root: X509,
}
impl KeyInfo {
pub fn key(&self) -> &Key {
&self.key
}
pub fn certs(&self) -> &CertPair {
&self.certs
}
pub fn int_ca(&self) -> &X509 {
&self.int
}
pub fn root_ca(&self) -> &X509 {
&self.root
}
pub fn fullchain_ed25519(&self) -> Vec<&X509> {
vec![&self.certs.ed25519, &self.int, &self.root]
}
pub fn fullchain_nistp256(&self) -> Vec<&X509> {
vec![&self.certs.nistp256, &self.int, &self.root]
}
}
#[test]
pub fn test_keygen() {
let key = Key::new(None);
key.tor_key();
key.openssl_key_nistp256();
}

View File

@@ -1,13 +1,11 @@
use std::collections::BTreeMap;
use std::net::Ipv4Addr;
use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre;
use tokio::process::{Child, Command};
use tokio::sync::Mutex;
use torut::onion::TorSecretKeyV3;
use super::interface::InterfaceId;
use crate::s9pk::manifest::PackageId;
use crate::util::Invoke;
use crate::{Error, ResultExt};
@@ -39,25 +37,17 @@ impl MdnsController {
MdnsControllerInner::init().await?,
)))
}
pub async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>(
&self,
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
self.0.lock().await.add(pkg_id, interfaces).await
pub async fn add(&self, alias: String) -> Result<Arc<()>, Error> {
self.0.lock().await.add(alias).await
}
pub async fn remove<I: IntoIterator<Item = InterfaceId>>(
&self,
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
self.0.lock().await.remove(pkg_id, interfaces).await
pub async fn gc(&self, alias: String) -> Result<(), Error> {
self.0.lock().await.gc(alias).await
}
}
pub struct MdnsControllerInner {
alias_cmd: Option<Child>,
services: BTreeMap<(PackageId, InterfaceId), TorSecretKeyV3>,
services: BTreeMap<String, Weak<()>>,
}
impl MdnsControllerInner {
@@ -76,35 +66,30 @@ impl MdnsControllerInner {
self.alias_cmd = Some(
Command::new("avahi-alias")
.kill_on_drop(true)
.args(self.services.iter().map(|(_, key)| {
key.public()
.get_onion_address()
.get_address_without_dot_onion()
}))
.args(
self.services
.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(s, _)| s),
)
.spawn()?,
);
Ok(())
}
async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorSecretKeyV3)>>(
&mut self,
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
self.services.extend(
interfaces
.into_iter()
.map(|(interface_id, key)| ((pkg_id.clone(), interface_id), key)),
);
async fn add(&mut self, alias: String) -> Result<Arc<()>, Error> {
let rc = if let Some(rc) = Weak::upgrade(&self.services.remove(&alias).unwrap_or_default())
{
rc
} else {
Arc::new(())
};
self.services.insert(alias, Arc::downgrade(&rc));
self.sync().await?;
Ok(())
Ok(rc)
}
async fn remove<I: IntoIterator<Item = InterfaceId>>(
&mut self,
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
for interface_id in interfaces {
self.services.remove(&(pkg_id.clone(), interface_id));
async fn gc(&mut self, alias: String) -> Result<(), Error> {
if let Some(rc) = Weak::upgrade(&self.services.remove(&alias).unwrap_or_default()) {
self.services.insert(alias, Arc::downgrade(&rc));
}
self.sync().await?;
Ok(())

View File

@@ -1,53 +1,33 @@
use std::collections::BTreeMap;
use std::sync::Arc;
use futures::future::BoxFuture;
use hyper::{Body, Error as HyperError, Request, Response};
use indexmap::IndexSet;
use rpc_toolkit::command;
use self::interface::InterfaceId;
use crate::net::interface::LanPortConfig;
use crate::util::serde::Port;
use crate::Error;
pub mod cert_resolver;
pub mod dhcp;
pub mod dns;
pub mod embassy_service_http_server;
pub mod interface;
pub mod keys;
#[cfg(feature = "avahi")]
pub mod mdns;
pub mod net_controller;
pub mod net_utils;
pub mod proxy_controller;
pub mod ssl;
pub mod static_server;
pub mod tor;
pub mod vhost_controller;
pub mod utils;
pub mod vhost;
pub mod web_server;
pub mod wifi;
const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
pub const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
#[command(subcommands(tor::tor))]
#[command(subcommands(tor::tor, dhcp::dhcp))]
pub fn net() -> Result<(), Error> {
Ok(())
}
#[derive(Default)]
struct PackageNetInfo {
interfaces: BTreeMap<InterfaceId, InterfaceMetadata>,
}
pub struct InterfaceMetadata {
pub fqdn: String,
pub lan_config: BTreeMap<Port, LanPortConfig>,
pub protocols: IndexSet<String>,
}
/// Indicates that the net controller has created the
/// SSL keys
#[derive(Clone, Copy)]
pub struct GeneratedCertificateMountPoint(());
pub type HttpHandler = Arc<
dyn Fn(Request<Body>) -> BoxFuture<'static, Result<Response<Body>, HyperError>> + Send + Sync,
>;

View File

@@ -1,278 +1,361 @@
use std::net::{Ipv4Addr, SocketAddr};
use std::path::PathBuf;
use std::str::FromStr;
use std::collections::BTreeMap;
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre;
use models::InterfaceId;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use patch_db::DbHandle;
use sqlx::PgPool;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use sqlx::PgExecutor;
use tracing::instrument;
use crate::context::RpcContext;
use crate::hostname::{get_embassyd_tor_addr, get_hostname, HostNameReceipt};
use crate::error::ErrorCollection;
use crate::hostname::Hostname;
use crate::net::dns::DnsController;
use crate::net::interface::{Interface, TorConfig};
use crate::net::keys::Key;
#[cfg(feature = "avahi")]
use crate::net::mdns::MdnsController;
use crate::net::net_utils::ResourceFqdn;
use crate::net::proxy_controller::ProxyController;
use crate::net::ssl::SslManager;
use crate::net::ssl::{export_cert, export_key, SslManager};
use crate::net::tor::TorController;
use crate::net::{
GeneratedCertificateMountPoint, HttpHandler, InterfaceMetadata, PACKAGE_CERT_PATH,
};
use crate::net::vhost::VHostController;
use crate::s9pk::manifest::PackageId;
use crate::Error;
use crate::volume::cert_dir;
use crate::{Error, HOST_IP};
pub struct NetController {
pub tor: TorController,
pub(super) tor: TorController,
#[cfg(feature = "avahi")]
pub mdns: MdnsController,
pub proxy: ProxyController,
pub ssl: SslManager,
pub dns: DnsController,
pub(super) mdns: MdnsController,
pub(super) vhost: VHostController,
pub(super) dns: DnsController,
pub(super) ssl: Arc<SslManager>,
pub(super) os_bindings: Vec<Arc<()>>,
}
impl NetController {
#[instrument(skip(db, db_handle))]
pub async fn init<Db: DbHandle>(
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
#[instrument(skip_all)]
pub async fn init(
tor_control: SocketAddr,
dns_bind: &[SocketAddr],
db: PgPool,
db_handle: &mut Db,
import_root_ca: Option<(PKey<Private>, X509)>,
ssl: SslManager,
hostname: &Hostname,
os_key: &Key,
) -> Result<Self, Error> {
let receipts = HostNameReceipt::new(db_handle).await?;
let embassy_host_name = get_hostname(db_handle, &receipts).await?;
let embassy_name = embassy_host_name.local_domain_name();
let fqdn_name = ResourceFqdn::from_str(&embassy_name)?;
let ssl = match import_root_ca {
None => SslManager::init(db.clone(), db_handle).await,
Some(a) => SslManager::import_root_ca(db.clone(), a.0, a.1).await,
}?;
Ok(Self {
tor: TorController::init(embassyd_addr, embassyd_tor_key, tor_control).await?,
let ssl = Arc::new(ssl);
let mut res = Self {
tor: TorController::init(tor_control).await?,
#[cfg(feature = "avahi")]
mdns: MdnsController::init().await?,
proxy: ProxyController::init(embassyd_addr, fqdn_name, ssl.clone()).await?,
ssl,
vhost: VHostController::new(ssl.clone()),
dns: DnsController::init(dns_bind).await?,
ssl,
os_bindings: Vec::new(),
};
res.add_os_bindings(hostname, os_key).await?;
Ok(res)
}
async fn add_os_bindings(&mut self, hostname: &Hostname, key: &Key) -> Result<(), Error> {
// Internal DNS
self.vhost
.add(
key.clone(),
Some("embassy".into()),
443,
([127, 0, 0, 1], 80).into(),
false,
)
.await?;
self.os_bindings
.push(self.dns.add(None, HOST_IP.into()).await?);
// LAN IP
self.os_bindings.push(
self.vhost
.add(key.clone(), None, 443, ([127, 0, 0, 1], 80).into(), false)
.await?,
);
// localhost
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some("localhost".into()),
443,
([127, 0, 0, 1], 80).into(),
false,
)
.await?,
);
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some(hostname.no_dot_host_name()),
443,
([127, 0, 0, 1], 80).into(),
false,
)
.await?,
);
// LAN mDNS
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some(hostname.local_domain_name()),
443,
([127, 0, 0, 1], 80).into(),
false,
)
.await?,
);
// Tor (http)
self.os_bindings.push(
self.tor
.add(&key.tor_key(), 80, ([127, 0, 0, 1], 80).into())
.await?,
);
// Tor (https)
self.os_bindings.push(
self.vhost
.add(
key.clone(),
Some(key.tor_address().to_string()),
443,
([127, 0, 0, 1], 80).into(),
false,
)
.await?,
);
self.os_bindings.push(
self.tor
.add(&key.tor_key(), 443, ([127, 0, 0, 1], 443).into())
.await?,
);
Ok(())
}
#[instrument(skip_all)]
pub async fn create_service(
self: &Arc<Self>,
package: PackageId,
ip: Ipv4Addr,
) -> Result<NetService, Error> {
let dns = self.dns.add(Some(package.clone()), ip).await?;
Ok(NetService {
id: package,
ip,
dns,
controller: Arc::downgrade(self),
tor: BTreeMap::new(),
lan: BTreeMap::new(),
})
}
pub async fn setup_embassy_ui(rpc_ctx: RpcContext) -> Result<(), Error> {
NetController::setup_embassy_http_ui_handle(rpc_ctx.clone()).await?;
NetController::setup_embassy_https_ui_handle(rpc_ctx.clone()).await?;
Ok(())
}
async fn setup_embassy_https_ui_handle(rpc_ctx: RpcContext) -> Result<(), Error> {
let host_name = rpc_ctx.net_controller.proxy.get_hostname().await;
let host_name_fqdn: ResourceFqdn = host_name.parse()?;
let handler: HttpHandler =
crate::net::static_server::main_ui_server_router(rpc_ctx.clone()).await?;
let eos_pkg_id: PackageId = "embassy".parse().unwrap();
if let ResourceFqdn::Uri {
full_uri: _,
root,
tld: _,
} = host_name_fqdn.clone()
{
let root_cert = rpc_ctx
.net_controller
.ssl
.certificate_for(&root, &eos_pkg_id)
.await?;
rpc_ctx
.net_controller
.proxy
.add_certificate_to_resolver(host_name_fqdn.clone(), root_cert.clone())
.await?;
rpc_ctx
.net_controller
.proxy
.add_handle(443, host_name_fqdn.clone(), handler.clone(), true)
.await?;
};
// serving ip https is not yet supported
Ok(())
}
async fn setup_embassy_http_ui_handle(rpc_ctx: RpcContext) -> Result<(), Error> {
let host_name = rpc_ctx.net_controller.proxy.get_hostname().await;
let embassy_tor_addr = get_embassyd_tor_addr(rpc_ctx.clone()).await?;
let embassy_tor_fqdn: ResourceFqdn = embassy_tor_addr.parse()?;
let host_name_fqdn: ResourceFqdn = host_name.parse()?;
let ip_fqdn: ResourceFqdn = ResourceFqdn::IpAddr;
let localhost_fqdn = ResourceFqdn::LocalHost;
let handler: HttpHandler =
crate::net::static_server::main_ui_server_router(rpc_ctx.clone()).await?;
rpc_ctx
.net_controller
.proxy
.add_handle(80, embassy_tor_fqdn.clone(), handler.clone(), false)
.await?;
rpc_ctx
.net_controller
.proxy
.add_handle(80, host_name_fqdn.clone(), handler.clone(), false)
.await?;
rpc_ctx
.net_controller
.proxy
.add_handle(80, ip_fqdn.clone(), handler.clone(), false)
.await?;
rpc_ctx
.net_controller
.proxy
.add_handle(80, localhost_fqdn.clone(), handler.clone(), false)
.await?;
Ok(())
}
pub fn ssl_directory_for(pkg_id: &PackageId) -> PathBuf {
PathBuf::from(PACKAGE_CERT_PATH).join(pkg_id)
}
#[instrument(skip(self, interfaces, _generated_certificate))]
pub async fn add<'a, I>(
async fn add_tor(
&self,
pkg_id: &PackageId,
ip: Ipv4Addr,
interfaces: I,
_generated_certificate: GeneratedCertificateMountPoint,
) -> Result<(), Error>
where
I: IntoIterator<Item = (InterfaceId, &'a Interface, TorSecretKeyV3)> + Clone,
for<'b> &'b I: IntoIterator<Item = &'b (InterfaceId, &'a Interface, TorSecretKeyV3)>,
{
let interfaces_tor = interfaces
.clone()
.into_iter()
.filter_map(|i| match i.1.tor_config.clone() {
None => None,
Some(cfg) => Some((i.0, cfg, i.2)),
})
.collect::<Vec<(InterfaceId, TorConfig, TorSecretKeyV3)>>();
let (tor_res, _, proxy_res, _) = tokio::join!(
self.tor.add(pkg_id, ip, interfaces_tor),
{
#[cfg(feature = "avahi")]
let mdns_fut = self.mdns.add(
pkg_id,
interfaces
.clone()
.into_iter()
.map(|(interface_id, _, key)| (interface_id, key)),
);
key: &Key,
external: u16,
target: SocketAddr,
) -> Result<Vec<Arc<()>>, Error> {
let mut rcs = Vec::with_capacity(1);
rcs.push(self.tor.add(&key.tor_key(), external, target).await?);
Ok(rcs)
}
#[cfg(not(feature = "avahi"))]
let mdns_fut = futures::future::ready(());
mdns_fut
},
{
let interfaces =
interfaces
.clone()
.into_iter()
.filter_map(|(id, interface, tor_key)| {
interface.lan_config.as_ref().map(|cfg| {
(
id,
InterfaceMetadata {
fqdn: OnionAddressV3::from(&tor_key.public())
.get_address_without_dot_onion()
+ ".local",
lan_config: cfg.clone(),
protocols: interface.protocols.clone(),
},
)
})
});
self.proxy
.add_docker_service(pkg_id.clone(), ip, interfaces)
},
self.dns.add(pkg_id, ip),
async fn remove_tor(&self, key: &Key, external: u16, rcs: Vec<Arc<()>>) -> Result<(), Error> {
drop(rcs);
self.tor.gc(&key.tor_key(), external).await
}
async fn add_lan(
&self,
key: Key,
external: u16,
target: SocketAddr,
connect_ssl: bool,
) -> Result<Vec<Arc<()>>, Error> {
let mut rcs = Vec::with_capacity(2);
rcs.push(
self.vhost
.add(
key.clone(),
Some(key.local_address()),
external,
target.into(),
connect_ssl,
)
.await?,
);
tor_res?;
proxy_res?;
Ok(())
#[cfg(feature = "avahi")]
rcs.push(self.mdns.add(key.base_address()).await?);
Ok(rcs)
}
#[instrument(skip(self, interfaces))]
pub async fn remove<I: IntoIterator<Item = InterfaceId> + Clone>(
&self,
pkg_id: &PackageId,
ip: Ipv4Addr,
interfaces: I,
) -> Result<(), Error> {
let (tor_res, _, proxy_res, _) = tokio::join!(
self.tor.remove(pkg_id, interfaces.clone()),
{
#[cfg(feature = "avahi")]
let mdns_fut = self.mdns.remove(pkg_id, interfaces);
#[cfg(not(feature = "avahi"))]
let mdns_fut = futures::future::ready(());
mdns_fut
},
self.proxy.remove_docker_service(pkg_id),
self.dns.remove(pkg_id, ip),
);
tor_res?;
proxy_res?;
Ok(())
}
pub async fn generate_certificate_mountpoint<'a, I>(
&self,
pkg_id: &PackageId,
interfaces: &I,
) -> Result<GeneratedCertificateMountPoint, Error>
where
I: IntoIterator<Item = (InterfaceId, &'a Interface, TorSecretKeyV3)> + Clone,
for<'b> &'b I: IntoIterator<Item = &'b (InterfaceId, &'a Interface, TorSecretKeyV3)>,
{
tracing::info!("Generating SSL Certificate mountpoints for {}", pkg_id);
let package_path = PathBuf::from(PACKAGE_CERT_PATH).join(pkg_id);
tokio::fs::create_dir_all(&package_path).await?;
for (id, _, key) in interfaces {
let dns_base = OnionAddressV3::from(&key.public()).get_address_without_dot_onion();
let ssl_path_key = package_path.join(format!("{}.key.pem", id));
let ssl_path_cert = package_path.join(format!("{}.cert.pem", id));
let (key, chain) = self.ssl.certificate_for(&dns_base, pkg_id).await?;
tokio::try_join!(
crate::net::ssl::export_key(&key, &ssl_path_key),
crate::net::ssl::export_cert(&chain, &ssl_path_cert)
)?;
}
Ok(GeneratedCertificateMountPoint(()))
}
pub async fn export_root_ca(&self) -> Result<(PKey<Private>, X509), Error> {
self.ssl.export_root_ca().await
async fn remove_lan(&self, key: &Key, external: u16, rcs: Vec<Arc<()>>) -> Result<(), Error> {
drop(rcs);
#[cfg(feature = "avahi")]
self.mdns.gc(key.base_address()).await?;
self.vhost.gc(Some(key.local_address()), external).await
}
}
pub struct NetService {
id: PackageId,
ip: Ipv4Addr,
dns: Arc<()>,
controller: Weak<NetController>,
tor: BTreeMap<(InterfaceId, u16), (Key, Vec<Arc<()>>)>,
lan: BTreeMap<(InterfaceId, u16), (Key, Vec<Arc<()>>)>,
}
impl NetService {
fn net_controller(&self) -> Result<Arc<NetController>, Error> {
Weak::upgrade(&self.controller).ok_or_else(|| {
Error::new(
eyre!("NetController is shutdown"),
crate::ErrorKind::Network,
)
})
}
pub async fn add_tor<Ex>(
&mut self,
secrets: &mut Ex,
id: InterfaceId,
external: u16,
internal: u16,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_interface(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let tor_idx = (id, external);
let mut tor = self
.tor
.remove(&tor_idx)
.unwrap_or_else(|| (key.clone(), Vec::new()));
tor.1.append(
&mut ctrl
.add_tor(&key, external, SocketAddr::new(self.ip.into(), internal))
.await?,
);
self.tor.insert(tor_idx, tor);
Ok(())
}
pub async fn remove_tor(&mut self, id: InterfaceId, external: u16) -> Result<(), Error> {
let ctrl = self.net_controller()?;
if let Some((key, rcs)) = self.tor.remove(&(id, external)) {
ctrl.remove_tor(&key, external, rcs).await?;
}
Ok(())
}
pub async fn add_lan<Ex>(
&mut self,
secrets: &mut Ex,
id: InterfaceId,
external: u16,
internal: u16,
connect_ssl: bool,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_interface(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let lan_idx = (id, external);
let mut lan = self
.lan
.remove(&lan_idx)
.unwrap_or_else(|| (key.clone(), Vec::new()));
lan.1.append(
&mut ctrl
.add_lan(
key,
external,
SocketAddr::new(self.ip.into(), internal),
connect_ssl,
)
.await?,
);
self.lan.insert(lan_idx, lan);
Ok(())
}
pub async fn remove_lan(&mut self, id: InterfaceId, external: u16) -> Result<(), Error> {
let ctrl = self.net_controller()?;
if let Some((key, rcs)) = self.lan.remove(&(id, external)) {
ctrl.remove_lan(&key, external, rcs).await?;
}
Ok(())
}
pub async fn export_cert<Ex>(
&self,
secrets: &mut Ex,
id: &InterfaceId,
ip: IpAddr,
) -> Result<(), Error>
where
for<'a> &'a mut Ex: PgExecutor<'a>,
{
let key = Key::for_interface(secrets, Some((self.id.clone(), id.clone()))).await?;
let ctrl = self.net_controller()?;
let cert = ctrl.ssl.with_certs(key, ip).await?;
let cert_dir = cert_dir(&self.id, id);
tokio::fs::create_dir_all(&cert_dir).await?;
export_key(
&cert.key().openssl_key_nistp256(),
&cert_dir.join(format!("{id}.key.pem")),
)
.await?;
export_cert(
&cert.fullchain_nistp256(),
&cert_dir.join(format!("{id}.cert.pem")),
)
.await?; // TODO: can upgrade to ed25519?
Ok(())
}
pub async fn remove_all(mut self) -> Result<(), Error> {
let mut errors = ErrorCollection::new();
if let Some(ctrl) = Weak::upgrade(&self.controller) {
for ((_, external), (key, rcs)) in std::mem::take(&mut self.lan) {
errors.handle(ctrl.remove_lan(&key, external, rcs).await);
}
for ((_, external), (key, rcs)) in std::mem::take(&mut self.tor) {
errors.handle(ctrl.remove_tor(&key, external, rcs).await);
}
std::mem::take(&mut self.dns);
errors.handle(ctrl.dns.gc(Some(self.id.clone()), self.ip).await);
self.ip = Ipv4Addr::new(0, 0, 0, 0);
errors.into_result()
} else {
Err(Error::new(
eyre!("NetController is shutdown"),
crate::ErrorKind::Network,
))
}
}
}
impl Drop for NetService {
fn drop(&mut self) {
if self.ip != Ipv4Addr::new(0, 0, 0, 0) {
tracing::debug!("Dropping NetService for {}", self.id);
let svc = std::mem::replace(
self,
NetService {
id: Default::default(),
ip: Ipv4Addr::new(0, 0, 0, 0),
dns: Default::default(),
controller: Default::default(),
tor: Default::default(),
lan: Default::default(),
},
);
tokio::spawn(async move { svc.remove_all().await.unwrap() });
}
}
}

View File

@@ -1,139 +0,0 @@
use std::fmt;
use std::net::IpAddr;
use std::str::FromStr;
use color_eyre::eyre::eyre;
use http::{Request, Uri};
use hyper::Body;
use crate::Error;
pub fn host_addr_fqdn(req: &Request<Body>) -> Result<ResourceFqdn, Error> {
let host = req.headers().get(http::header::HOST);
match host {
Some(host) => {
let host_str = host
.to_str()
.map_err(|e| Error::new(eyre!("{}", e), crate::ErrorKind::Ascii))?
.to_string();
let host_uri: ResourceFqdn = host_str.split(':').next().unwrap().parse()?;
Ok(host_uri)
}
None => Err(Error::new(
eyre!("No Host header"),
crate::ErrorKind::MissingHeader,
)),
}
}
#[derive(Eq, PartialEq, PartialOrd, Ord, Debug, Clone)]
pub enum ResourceFqdn {
IpAddr,
Uri {
full_uri: String,
root: String,
tld: Tld,
},
LocalHost,
}
impl fmt::Display for ResourceFqdn {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
ResourceFqdn::Uri {
full_uri,
root: _,
tld: _,
} => {
write!(f, "{}", full_uri)
}
ResourceFqdn::LocalHost => write!(f, "localhost"),
ResourceFqdn::IpAddr => write!(f, "ip-address"),
}
}
}
#[derive(PartialEq, Eq, PartialOrd, Ord, Debug, Clone)]
pub enum Tld {
Local,
Onion,
Embassy,
}
impl fmt::Display for Tld {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
Tld::Local => write!(f, ".local"),
Tld::Onion => write!(f, ".onion"),
Tld::Embassy => write!(f, ".embassy"),
}
}
}
impl FromStr for ResourceFqdn {
type Err = Error;
fn from_str(input: &str) -> Result<ResourceFqdn, Self::Err> {
if input.parse::<IpAddr>().is_ok() {
return Ok(ResourceFqdn::IpAddr);
}
if input == "localhost" {
return Ok(ResourceFqdn::LocalHost);
}
let hostname_split: Vec<&str> = input.split('.').collect();
if hostname_split.len() != 2 {
return Err(Error::new(
eyre!("invalid url tld number: add support for tldextract to parse complex urls like blah.domain.co.uk and etc?"),
crate::ErrorKind::ParseUrl,
));
}
match hostname_split[1] {
"local" => Ok(ResourceFqdn::Uri {
full_uri: input.to_owned(),
root: hostname_split[0].to_owned(),
tld: Tld::Local,
}),
"embassy" => Ok(ResourceFqdn::Uri {
full_uri: input.to_owned(),
root: hostname_split[0].to_owned(),
tld: Tld::Embassy,
}),
"onion" => Ok(ResourceFqdn::Uri {
full_uri: input.to_owned(),
root: hostname_split[0].to_owned(),
tld: Tld::Onion,
}),
_ => Err(Error::new(
eyre!("Unknown TLD for enum"),
crate::ErrorKind::ParseUrl,
)),
}
}
}
impl TryFrom<Uri> for ResourceFqdn {
type Error = Error;
fn try_from(value: Uri) -> Result<Self, Self::Error> {
Self::from_str(&value.to_string())
}
}
pub fn is_upgrade_req(req: &Request<Body>) -> bool {
req.headers()
.get("connection")
.and_then(|c| c.to_str().ok())
.map(|c| {
c.split(",")
.any(|c| c.trim().eq_ignore_ascii_case("upgrade"))
})
.unwrap_or(false)
}

View File

@@ -1,334 +0,0 @@
use std::collections::BTreeMap;
use std::net::{Ipv4Addr, SocketAddr};
use std::str::FromStr;
use std::sync::Arc;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use http::uri::{Authority, Scheme};
use http::{Request, Response, Uri};
use hyper::{Body, Error as HyperError};
use models::{InterfaceId, PackageId};
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use tokio::sync::Mutex;
use tracing::{error, instrument};
use crate::net::net_utils::{is_upgrade_req, ResourceFqdn};
use crate::net::ssl::SslManager;
use crate::net::vhost_controller::VHOSTController;
use crate::net::{HttpHandler, InterfaceMetadata, PackageNetInfo};
use crate::{Error, ResultExt};
pub struct ProxyController {
inner: Mutex<ProxyControllerInner>,
}
impl ProxyController {
pub async fn init(
embassyd_socket_addr: SocketAddr,
embassy_fqdn: ResourceFqdn,
ssl_manager: SslManager,
) -> Result<Self, Error> {
Ok(ProxyController {
inner: Mutex::new(
ProxyControllerInner::init(embassyd_socket_addr, embassy_fqdn, ssl_manager).await?,
),
})
}
pub async fn add_docker_service<I: IntoIterator<Item = (InterfaceId, InterfaceMetadata)>>(
&self,
package: PackageId,
ipv4: Ipv4Addr,
interfaces: I,
) -> Result<(), Error> {
self.inner
.lock()
.await
.add_docker_service(package, ipv4, interfaces)
.await
}
pub async fn remove_docker_service(&self, package: &PackageId) -> Result<(), Error> {
self.inner.lock().await.remove_docker_service(package).await
}
pub async fn add_certificate_to_resolver(
&self,
fqdn: ResourceFqdn,
cert_data: (PKey<Private>, Vec<X509>),
) -> Result<(), Error> {
self.inner
.lock()
.await
.add_certificate_to_resolver(fqdn, cert_data)
.await
}
pub async fn add_handle(
&self,
ext_port: u16,
fqdn: ResourceFqdn,
handler: HttpHandler,
is_ssl: bool,
) -> Result<(), Error> {
self.inner
.lock()
.await
.add_handle(ext_port, fqdn, handler, is_ssl)
.await
}
pub async fn get_hostname(&self) -> String {
self.inner.lock().await.get_embassy_hostname()
}
async fn proxy(
client: &hyper::Client<hyper::client::HttpConnector>,
mut req: Request<Body>,
addr: SocketAddr,
) -> Result<Response<Body>, HyperError> {
let mut uri = std::mem::take(req.uri_mut()).into_parts();
uri.scheme = Some(Scheme::HTTP);
uri.authority = Authority::from_str(&addr.to_string()).ok();
match Uri::from_parts(uri) {
Ok(uri) => *req.uri_mut() = uri,
Err(e) => error!("Error rewriting uri: {}", e),
}
let addr = req.uri().to_string();
if is_upgrade_req(&req) {
let upgraded_req = hyper::upgrade::on(&mut req);
let mut res = client.request(req).await?;
let upgraded_res = hyper::upgrade::on(&mut res);
tokio::spawn(async move {
if let Err(e) = async {
let mut req = upgraded_req.await?;
let mut res = upgraded_res.await?;
tokio::io::copy_bidirectional(&mut req, &mut res).await?;
Ok::<_, color_eyre::eyre::Report>(())
}
.await
{
error!("error binding together tcp streams for {}: {}", addr, e);
}
});
Ok(res)
} else {
client.request(req).await
}
}
}
struct ProxyControllerInner {
ssl_manager: SslManager,
vhosts: VHOSTController,
embassyd_fqdn: ResourceFqdn,
docker_interfaces: BTreeMap<PackageId, PackageNetInfo>,
docker_iface_lookups: BTreeMap<(PackageId, InterfaceId), ResourceFqdn>,
}
impl ProxyControllerInner {
#[instrument]
async fn init(
embassyd_socket_addr: SocketAddr,
embassyd_fqdn: ResourceFqdn,
ssl_manager: SslManager,
) -> Result<Self, Error> {
let inner = ProxyControllerInner {
vhosts: VHOSTController::init(embassyd_socket_addr),
ssl_manager,
embassyd_fqdn,
docker_interfaces: BTreeMap::new(),
docker_iface_lookups: BTreeMap::new(),
};
Ok(inner)
}
async fn add_certificate_to_resolver(
&mut self,
hostname: ResourceFqdn,
cert_data: (PKey<Private>, Vec<X509>),
) -> Result<(), Error> {
self.vhosts
.cert_resolver
.add_certificate_to_resolver(hostname, cert_data)
.await
.map_err(|err| {
Error::new(
eyre!("Unable to add ssl cert to the resolver: {}", err),
crate::ErrorKind::Network,
)
})?;
Ok(())
}
async fn add_package_certificate_to_resolver(
&mut self,
resource_fqdn: ResourceFqdn,
pkg_id: PackageId,
) -> Result<(), Error> {
let package_cert = match resource_fqdn.clone() {
ResourceFqdn::IpAddr => {
return Err(Error::new(
eyre!("ssl not supported for ip addresses"),
crate::ErrorKind::Network,
))
}
ResourceFqdn::Uri {
full_uri: _,
root,
tld: _,
} => self.ssl_manager.certificate_for(&root, &pkg_id).await?,
ResourceFqdn::LocalHost => {
return Err(Error::new(
eyre!("ssl not supported for localhost"),
crate::ErrorKind::Network,
))
}
};
self.vhosts
.cert_resolver
.add_certificate_to_resolver(resource_fqdn, package_cert)
.await
.map_err(|err| {
Error::new(
eyre!("Unable to add ssl cert to the resolver: {}", err),
crate::ErrorKind::Network,
)
})?;
Ok(())
}
pub async fn add_handle(
&mut self,
external_svc_port: u16,
fqdn: ResourceFqdn,
svc_handler: HttpHandler,
is_ssl: bool,
) -> Result<(), Error> {
self.vhosts
.add_server_or_handle(external_svc_port, fqdn, svc_handler, is_ssl)
.await
}
#[instrument(skip(self, interfaces))]
pub async fn add_docker_service<I: IntoIterator<Item = (InterfaceId, InterfaceMetadata)>>(
&mut self,
package: PackageId,
docker_ipv4: Ipv4Addr,
interfaces: I,
) -> Result<(), Error> {
let mut interface_map = interfaces
.into_iter()
.filter(|(_, meta)| {
// don't add stuff for anything we can't connect to over some flavor of http
(meta.protocols.contains("http") || meta.protocols.contains("https"))
// also don't add anything unless it has at least one exposed port
&& !meta.lan_config.is_empty()
})
.collect::<BTreeMap<InterfaceId, InterfaceMetadata>>();
for (id, meta) in interface_map.iter() {
for (external_svc_port, lan_port_config) in meta.lan_config.iter() {
let full_fqdn = ResourceFqdn::from_str(&meta.fqdn).unwrap();
self.docker_iface_lookups
.insert((package.clone(), id.clone()), full_fqdn.clone());
self.add_package_certificate_to_resolver(full_fqdn.clone(), package.clone())
.await?;
let svc_handler =
Self::create_docker_handle((docker_ipv4, lan_port_config.internal).into())
.await;
self.add_handle(
external_svc_port.0,
full_fqdn.clone(),
svc_handler,
lan_port_config.ssl,
)
.await?;
}
}
let docker_interface = self.docker_interfaces.entry(package.clone()).or_default();
docker_interface.interfaces.append(&mut interface_map);
Ok(())
}
async fn create_docker_handle(internal_addr: SocketAddr) -> HttpHandler {
let svc_handler: HttpHandler = Arc::new(move |req| {
let client = hyper::client::Client::builder()
.set_host(false)
.build_http();
async move { ProxyController::proxy(&client, req, internal_addr).await }.boxed()
});
svc_handler
}
#[instrument(skip(self))]
pub async fn remove_docker_service(&mut self, package: &PackageId) -> Result<(), Error> {
let mut server_removals: Vec<(u16, InterfaceId)> = Default::default();
let net_info = match self.docker_interfaces.get(package) {
Some(a) => a,
None => return Ok(()),
};
for (id, meta) in &net_info.interfaces {
for (service_ext_port, _lan_port_config) in meta.lan_config.iter() {
if let Some(server) = self.vhosts.service_servers.get_mut(&service_ext_port.0) {
if let Some(fqdn) = self
.docker_iface_lookups
.get(&(package.clone(), id.clone()))
{
server.remove_svc_handler_mapping(fqdn.to_owned()).await?;
self.vhosts
.cert_resolver
.remove_cert(fqdn.to_owned())
.await?;
let mapping = server.svc_mapping.read().await;
if mapping.is_empty() {
server_removals.push((service_ext_port.0, id.to_owned()));
}
}
}
}
}
for (port, interface_id) in server_removals {
if let Some(removed_server) = self.vhosts.service_servers.remove(&port) {
removed_server.shutdown.send(()).map_err(|_| {
Error::new(
eyre!("Hyper server did not quit properly"),
crate::ErrorKind::Unknown,
)
})?;
removed_server
.handle
.await
.with_kind(crate::ErrorKind::Unknown)?;
self.docker_interfaces.remove(&package.clone());
self.docker_iface_lookups
.remove(&(package.clone(), interface_id));
}
}
Ok(())
}
pub fn get_embassy_hostname(&self) -> String {
self.embassyd_fqdn.to_string()
}
}

View File

@@ -1,7 +1,9 @@
use std::cmp::Ordering;
use std::collections::{BTreeMap, BTreeSet};
use std::net::IpAddr;
use std::path::Path;
use std::time::{SystemTime, UNIX_EPOCH};
use color_eyre::eyre::eyre;
use futures::FutureExt;
use openssl::asn1::{Asn1Integer, Asn1Time};
use openssl::bn::{BigNum, MsbOption};
@@ -11,147 +13,109 @@ use openssl::nid::Nid;
use openssl::pkey::{PKey, Private};
use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::*;
use patch_db::DbHandle;
use sqlx::PgPool;
use tokio::process::Command;
use tokio::sync::Mutex;
use tokio::sync::{Mutex, RwLock};
use tracing::instrument;
use crate::s9pk::manifest::PackageId;
use crate::util::Invoke;
use crate::account::AccountInfo;
use crate::hostname::Hostname;
use crate::net::dhcp::ips;
use crate::net::keys::{Key, KeyInfo};
use crate::{Error, ErrorKind, ResultExt};
static CERTIFICATE_VERSION: i32 = 2; // X509 version 3 is actually encoded as '2' in the cert because fuck you.
pub const ROOT_CA_STATIC_PATH: &str = "/var/lib/embassy/ssl/root-ca.crt";
#[derive(Debug, Clone)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct CertPair {
pub ed25519: X509,
pub nistp256: X509,
}
impl CertPair {
fn updated(
pair: Option<&Self>,
hostname: &Hostname,
signer: (&PKey<Private>, &X509),
applicant: &Key,
ip: BTreeSet<IpAddr>,
) -> Result<(Self, bool), Error> {
let mut updated = false;
let mut updated_cert = |cert: Option<&X509>, osk: PKey<Private>| -> Result<X509, Error> {
let mut ips = BTreeSet::new();
if let Some(cert) = cert {
ips.extend(
cert.subject_alt_names()
.iter()
.flatten()
.filter_map(|a| a.ipaddress())
.filter_map(|a| match a.len() {
4 => Some::<IpAddr>(<[u8; 4]>::try_from(a).unwrap().into()),
16 => Some::<IpAddr>(<[u8; 16]>::try_from(a).unwrap().into()),
_ => None,
}),
);
if cert
.not_after()
.compare(Asn1Time::days_from_now(30)?.as_ref())?
== Ordering::Greater
&& ips.is_superset(&ip)
{
return Ok(cert.clone());
}
}
ips.extend(ip.iter().copied());
updated = true;
make_leaf_cert(signer, (&osk, &SANInfo::new(&applicant, hostname, ips)))
};
Ok((
Self {
ed25519: updated_cert(pair.map(|c| &c.ed25519), applicant.openssl_key_ed25519())?,
nistp256: updated_cert(
pair.map(|c| &c.nistp256),
applicant.openssl_key_nistp256(),
)?,
},
updated,
))
}
}
#[derive(Debug)]
pub struct SslManager {
store: SslStore,
hostname: Hostname,
root_cert: X509,
int_key: PKey<Private>,
int_cert: X509,
cert_cache: RwLock<BTreeMap<Key, CertPair>>,
}
impl SslManager {
pub fn new(account: &AccountInfo) -> Result<Self, Error> {
let int_key = generate_key()?;
let int_cert = make_int_cert((&account.root_ca_key, &account.root_ca_cert), &int_key)?;
Ok(Self {
hostname: account.hostname.clone(),
root_cert: account.root_ca_cert.clone(),
int_key,
int_cert,
cert_cache: RwLock::new(BTreeMap::new()),
})
}
pub async fn with_certs(&self, key: Key, ip: IpAddr) -> Result<KeyInfo, Error> {
let mut ips = ips().await?;
ips.insert(ip);
let (pair, updated) = CertPair::updated(
self.cert_cache.read().await.get(&key),
&self.hostname,
(&self.int_key, &self.int_cert),
&key,
ips,
)?;
if updated {
self.cert_cache
.write()
.await
.insert(key.clone(), pair.clone());
}
#[derive(Debug, Clone)]
struct SslStore {
secret_store: PgPool,
}
impl SslStore {
fn new(db: PgPool) -> Result<Self, Error> {
Ok(SslStore { secret_store: db })
}
#[instrument(skip(self))]
async fn save_root_certificate(&self, key: &PKey<Private>, cert: &X509) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, $1, $2, NULL, now(), now())", key_str, cert_str).execute(&self.secret_store).await?;
Ok(())
}
#[instrument(skip(self))]
async fn load_root_certificate(&self) -> Result<Option<(PKey<Private>, X509)>, Error> {
let m_row =
sqlx::query!("SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;")
.fetch_optional(&self.secret_store)
.await?;
match m_row {
None => Ok(None),
Some(row) => {
let priv_key = PKey::private_key_from_pem(&row.priv_key_pem.into_bytes())?;
let certificate = X509::from_pem(&row.certificate_pem.into_bytes())?;
Ok(Some((priv_key, certificate)))
}
}
}
#[instrument(skip(self))]
async fn save_intermediate_certificate(
&self,
key: &PKey<Private>,
cert: &X509,
) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let _n = sqlx::query!("INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, $1, $2, NULL, now(), now())", key_str, cert_str).execute(&self.secret_store).await?;
Ok(())
}
async fn load_intermediate_certificate(&self) -> Result<Option<(PKey<Private>, X509)>, Error> {
let m_row =
sqlx::query!("SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;")
.fetch_optional(&self.secret_store)
.await?;
match m_row {
None => Ok(None),
Some(row) => {
let priv_key = PKey::private_key_from_pem(&row.priv_key_pem.into_bytes())?;
let certificate = X509::from_pem(&row.certificate_pem.into_bytes())?;
Ok(Some((priv_key, certificate)))
}
}
}
#[instrument(skip(self))]
async fn import_root_certificate(
&self,
root_key: &PKey<Private>,
root_cert: &X509,
) -> Result<(), Error> {
// remove records for both root and intermediate CA
sqlx::query!("DELETE FROM certificates WHERE id = 0 OR id = 1;")
.execute(&self.secret_store)
.await?;
self.save_root_certificate(root_key, root_cert).await?;
Ok(())
}
#[instrument(skip(self))]
async fn save_certificate(
&self,
key: &PKey<Private>,
cert: &X509,
lookup_string: &str,
) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let _n = sqlx::query!("INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES ($1, $2, $3, now(), now())", key_str, cert_str, lookup_string).execute(&self.secret_store).await?;
Ok(())
}
async fn load_certificate(
&self,
lookup_string: &str,
) -> Result<Option<(PKey<Private>, X509)>, Error> {
let m_row = sqlx::query!(
"SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = $1",
lookup_string
)
.fetch_optional(&self.secret_store)
.await?;
match m_row {
None => Ok(None),
Some(row) => {
let priv_key = PKey::private_key_from_pem(&row.priv_key_pem.into_bytes())?;
let certificate = X509::from_pem(&row.certificate_pem.into_bytes())?;
Ok(Some((priv_key, certificate)))
}
}
}
#[instrument(skip(self))]
async fn update_certificate(
&self,
key: &PKey<Private>,
cert: &X509,
lookup_string: &str,
) -> Result<(), Error> {
let key_str = String::from_utf8(key.private_key_to_pem_pkcs8()?)?;
let cert_str = String::from_utf8(cert.to_pem()?)?;
let n = sqlx::query!("UPDATE certificates SET priv_key_pem = $1, certificate_pem = $2, updated_at = now() WHERE lookup_string = $3", key_str, cert_str, lookup_string)
.execute(&self.secret_store).await?;
if n.rows_affected() == 0 {
return Err(Error::new(
eyre!(
"Attempted to update non-existent certificate: {}",
lookup_string
),
ErrorKind::OpenSsl,
));
}
Ok(())
Ok(key.with_certs(pair, self.int_cert.clone(), self.root_cert.clone()))
}
}
@@ -161,150 +125,13 @@ lazy_static::lazy_static! {
static ref SSL_MUTEX: Mutex<()> = Mutex::new(()); // TODO: make thread safe
}
impl SslManager {
#[instrument(skip(db, handle))]
pub async fn init<Db: DbHandle>(db: PgPool, handle: &mut Db) -> Result<Self, Error> {
let store = SslStore::new(db)?;
let receipts = crate::hostname::HostNameReceipt::new(handle).await?;
let id = crate::hostname::get_id(handle, &receipts).await?;
let (root_key, root_cert) = match store.load_root_certificate().await? {
None => {
let root_key = generate_key()?;
let server_id = id;
let root_cert = make_root_cert(&root_key, &server_id)?;
store.save_root_certificate(&root_key, &root_cert).await?;
Ok::<_, Error>((root_key, root_cert))
}
Some((key, cert)) => Ok((key, cert)),
}?;
// generate static file for download, this will gte blown up on embassy restart so it's good to write it on
// every ssl manager init
tokio::fs::create_dir_all(
Path::new(ROOT_CA_STATIC_PATH)
.parent()
.unwrap_or(Path::new("/")),
)
.await?;
tokio::fs::write(ROOT_CA_STATIC_PATH, root_cert.to_pem()?).await?;
// write to ca cert store
tokio::fs::write(
"/usr/local/share/ca-certificates/embassy-root-ca.crt",
root_cert.to_pem()?,
)
.await?;
Command::new("update-ca-certificates")
.invoke(crate::ErrorKind::OpenSsl)
.await?;
let (int_key, int_cert) = match store.load_intermediate_certificate().await? {
None => {
let int_key = generate_key()?;
let int_cert = make_int_cert((&root_key, &root_cert), &int_key)?;
store
.save_intermediate_certificate(&int_key, &int_cert)
.await?;
Ok::<_, Error>((int_key, int_cert))
}
Some((key, cert)) => Ok((key, cert)),
}?;
sqlx::query!("SELECT setval('certificates_id_seq', GREATEST(MAX(id) + 1, nextval('certificates_id_seq') - 1)) FROM certificates")
.fetch_one(&store.secret_store).await?;
Ok(SslManager {
store,
root_cert,
int_key,
int_cert,
})
}
// TODO: currently the burden of proof is on the caller to ensure that all of the arguments to this function are
// consistent. The following properties are assumed and not verified:
// 1. `root_cert` is self-signed and contains the public key that matches the private key `root_key`
// 2. certificate is not past its expiration date
// Warning: If this function ever fails, you must either call it again or regenerate your certificates from scratch
// since it is possible for it to fail after successfully saving the root certificate but before successfully saving
// the intermediate certificate
#[instrument(skip(db))]
pub async fn import_root_ca(
db: PgPool,
root_key: PKey<Private>,
root_cert: X509,
) -> Result<Self, Error> {
let store = SslStore::new(db)?;
store.import_root_certificate(&root_key, &root_cert).await?;
let int_key = generate_key()?;
let int_cert = make_int_cert((&root_key, &root_cert), &int_key)?;
store
.save_intermediate_certificate(&int_key, &int_cert)
.await?;
Ok(SslManager {
store,
root_cert,
int_key,
int_cert,
})
}
#[instrument(skip(self))]
pub async fn export_root_ca(&self) -> Result<(PKey<Private>, X509), Error> {
match self.store.load_root_certificate().await? {
None => Err(Error::new(
eyre!("Failed to export root certificate: root certificate has not been generated"),
ErrorKind::OpenSsl,
)),
Some(a) => Ok(a),
}
}
#[instrument(skip(self))]
pub async fn certificate_for(
&self,
dns_base: &str,
package_id: &PackageId,
) -> Result<(PKey<Private>, Vec<X509>), Error> {
let (key, cert) = match self.store.load_certificate(dns_base).await? {
None => {
let key = generate_key()?;
let cert = make_leaf_cert(
(&self.int_key, &self.int_cert),
(&key, dns_base, package_id),
)?;
self.store.save_certificate(&key, &cert, dns_base).await?;
Ok::<_, Error>((key, cert))
}
Some((key, cert)) => {
let window_end = Asn1Time::days_from_now(30)?;
let expiration = cert.not_after();
if expiration.compare(&window_end)? == Ordering::Less {
let key = generate_key()?;
let cert = make_leaf_cert(
(&self.int_key, &self.int_cert),
(&key, dns_base, package_id),
)?;
self.store.update_certificate(&key, &cert, dns_base).await?;
Ok((key, cert))
} else {
Ok((key, cert))
}
}
}?;
Ok((
key,
vec![cert, self.int_cert.clone(), self.root_cert.clone()],
))
}
}
pub async fn export_key(key: &PKey<Private>, target: &Path) -> Result<(), Error> {
tokio::fs::write(target, key.private_key_to_pem_pkcs8()?)
.map(|res| res.with_ctx(|_| (ErrorKind::Filesystem, target.display().to_string())))
.await?;
Ok(())
}
pub async fn export_cert(chain: &Vec<X509>, target: &Path) -> Result<(), Error> {
pub async fn export_cert(chain: &[&X509], target: &Path) -> Result<(), Error> {
tokio::fs::write(
target,
chain
@@ -315,21 +142,23 @@ pub async fn export_cert(chain: &Vec<X509>, target: &Path) -> Result<(), Error>
.await?;
Ok(())
}
#[instrument]
#[instrument(skip_all)]
fn rand_serial() -> Result<Asn1Integer, Error> {
let mut bn = BigNum::new()?;
bn.rand(64, MsbOption::MAYBE_ZERO, false)?;
let asn1 = Asn1Integer::from_bn(&bn)?;
Ok(asn1)
}
#[instrument]
fn generate_key() -> Result<PKey<Private>, Error> {
#[instrument(skip_all)]
pub fn generate_key() -> Result<PKey<Private>, Error> {
let new_key = EcKey::generate(EC_GROUP.as_ref())?;
let key = PKey::from_ec_key(new_key)?;
Ok(key)
}
#[instrument]
fn make_root_cert(root_key: &PKey<Private>, server_id: &str) -> Result<X509, Error> {
#[instrument(skip_all)]
pub fn make_root_cert(root_key: &PKey<Private>, hostname: &Hostname) -> Result<X509, Error> {
let mut builder = X509Builder::new()?;
builder.set_version(CERTIFICATE_VERSION)?;
@@ -342,8 +171,7 @@ fn make_root_cert(root_key: &PKey<Private>, server_id: &str) -> Result<X509, Err
builder.set_serial_number(&*rand_serial()?)?;
let mut subject_name_builder = X509NameBuilder::new()?;
subject_name_builder
.append_entry_by_text("CN", &format!("Embassy Local Root CA ({})", server_id))?;
subject_name_builder.append_entry_by_text("CN", &format!("{} Local Root CA", &*hostname.0))?;
subject_name_builder.append_entry_by_text("O", "Start9")?;
subject_name_builder.append_entry_by_text("OU", "Embassy")?;
let subject_name = subject_name_builder.build();
@@ -380,8 +208,8 @@ fn make_root_cert(root_key: &PKey<Private>, server_id: &str) -> Result<X509, Err
let cert = builder.build();
Ok(cert)
}
#[instrument]
fn make_int_cert(
#[instrument(skip_all)]
pub fn make_int_cert(
signer: (&PKey<Private>, &X509),
applicant: &PKey<Private>,
) -> Result<X509, Error> {
@@ -442,15 +270,86 @@ fn make_int_cert(
Ok(cert)
}
#[instrument]
fn make_leaf_cert(
#[derive(Debug, PartialEq, Eq, PartialOrd, Ord)]
pub enum MaybeWildcard {
WithWildcard(String),
WithoutWildcard(String),
}
impl MaybeWildcard {
pub fn as_str(&self) -> &str {
match self {
MaybeWildcard::WithWildcard(s) => s.as_str(),
MaybeWildcard::WithoutWildcard(s) => s.as_str(),
}
}
}
impl std::fmt::Display for MaybeWildcard {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
MaybeWildcard::WithWildcard(dns) => write!(f, "DNS:{dns},DNS:*.{dns}"),
MaybeWildcard::WithoutWildcard(dns) => write!(f, "DNS:{dns}"),
}
}
}
#[derive(Debug)]
pub struct SANInfo {
pub dns: BTreeSet<MaybeWildcard>,
pub ips: BTreeSet<IpAddr>,
}
impl SANInfo {
pub fn new(key: &Key, hostname: &Hostname, ips: BTreeSet<IpAddr>) -> Self {
let mut dns = BTreeSet::new();
if let Some((id, _)) = key.interface() {
dns.insert(MaybeWildcard::WithWildcard(format!("{id}.embassy")));
dns.insert(MaybeWildcard::WithWildcard(key.local_address().to_string()));
} else {
dns.insert(MaybeWildcard::WithoutWildcard("embassy".to_owned()));
dns.insert(MaybeWildcard::WithWildcard(hostname.local_domain_name()));
dns.insert(MaybeWildcard::WithoutWildcard(hostname.no_dot_host_name()));
dns.insert(MaybeWildcard::WithoutWildcard("localhost".to_owned()));
}
dns.insert(MaybeWildcard::WithWildcard(key.tor_address().to_string()));
Self { dns, ips }
}
}
impl std::fmt::Display for SANInfo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut written = false;
for dns in &self.dns {
if written {
write!(f, ",")?;
}
written = true;
write!(f, "{dns}")?;
}
for ip in &self.ips {
if written {
write!(f, ",")?;
}
written = true;
write!(f, "IP:{ip}")?;
}
Ok(())
}
}
#[instrument(skip_all)]
pub fn make_leaf_cert(
signer: (&PKey<Private>, &X509),
applicant: (&PKey<Private>, &str, &PackageId),
applicant: (&PKey<Private>, &SANInfo),
) -> Result<X509, Error> {
let mut builder = X509Builder::new()?;
builder.set_version(CERTIFICATE_VERSION)?;
let embargo = Asn1Time::days_from_now(0)?;
let embargo = Asn1Time::from_unix(
SystemTime::now()
.duration_since(UNIX_EPOCH)
.map(|d| d.as_secs() as i64)
.or_else(|_| UNIX_EPOCH.elapsed().map(|d| -(d.as_secs() as i64)))
.unwrap_or_default()
- 86400,
)?;
builder.set_not_before(&embargo)?;
// Google Apple and Mozilla reject certificate horizons longer than 397 days
@@ -461,7 +360,15 @@ fn make_leaf_cert(
builder.set_serial_number(&*rand_serial()?)?;
let mut subject_name_builder = X509NameBuilder::new()?;
subject_name_builder.append_entry_by_text("CN", &format!("{}.local", &applicant.1))?;
subject_name_builder.append_entry_by_text(
"CN",
applicant
.1
.dns
.first()
.map(MaybeWildcard::as_str)
.unwrap_or("localhost"),
)?;
subject_name_builder.append_entry_by_text("O", "Start9")?;
subject_name_builder.append_entry_by_text("OU", "Embassy")?;
let subject_name = subject_name_builder.build();
@@ -493,15 +400,9 @@ fn make_leaf_cert(
"critical,digitalSignature,keyEncipherment",
)?;
let subject_alt_name = X509Extension::new_nid(
Some(&cfg),
Some(&ctx),
Nid::SUBJECT_ALT_NAME,
&format!(
"DNS:{}.local,DNS:*.{}.local,DNS:{}.onion,DNS:*.{}.onion,DNS:{}.embassy,DNS:*.{}.embassy",
&applicant.1, &applicant.1, &applicant.1, &applicant.1, &applicant.2, &applicant.2,
),
)?;
let san_string = applicant.1.to_string();
let subject_alt_name =
X509Extension::new_nid(Some(&cfg), Some(&ctx), Nid::SUBJECT_ALT_NAME, &san_string)?;
builder.append_extension(subject_key_identifier)?;
builder.append_extension(authority_key_identifier)?;
builder.append_extension(subject_alt_name)?;

View File

@@ -1,26 +1,38 @@
use std::fs::Metadata;
use std::path::{Path, PathBuf};
use std::path::Path;
use std::sync::Arc;
use std::time::UNIX_EPOCH;
use async_compression::tokio::bufread::BrotliEncoder;
use async_compression::tokio::bufread::GzipEncoder;
use color_eyre::eyre::eyre;
use digest::Digest;
use futures::FutureExt;
use http::header::ACCEPT_ENCODING;
use http::header::CONTENT_ENCODING;
use http::request::Parts as RequestParts;
use http::response::Builder;
use hyper::{Body, Method, Request, Response, StatusCode};
use openssl::hash::MessageDigest;
use openssl::x509::X509;
use rpc_toolkit::rpc_handler;
use tokio::fs::File;
use tokio_util::codec::{BytesCodec, FramedRead};
use tokio::io::BufReader;
use tokio_util::io::ReaderStream;
use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::core::rpc_continuations::RequestGuid;
use crate::db::subscribe;
use crate::install::PKG_PUBLIC_DIR;
use crate::middleware::auth::HasValidSession;
use crate::middleware::auth::{auth as auth_middleware, HasValidSession};
use crate::middleware::cors::cors;
use crate::middleware::db::db as db_middleware;
use crate::middleware::diagnostic::diagnostic as diagnostic_middleware;
use crate::net::HttpHandler;
use crate::{diagnostic_api, install_api, main_api, setup_api, Error, ErrorKind, ResultExt};
static NOT_FOUND: &[u8] = b"Not Found";
static METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed";
static NOT_AUTHORIZED: &[u8] = b"Not Authorized";
pub const MAIN_UI_WWW_DIR: &str = "/var/www/html/main";
@@ -48,8 +60,14 @@ pub async fn setup_ui_file_router(ctx: SetupContext) -> Result<HttpHandler, Erro
async move {
let res = match req.uri().path() {
path if path.starts_with("/rpc/") => {
let rpc_handler =
rpc_handler!({command: setup_api, context: ctx, status: status_fn});
let rpc_handler = rpc_handler!({
command: setup_api,
context: ctx,
status: status_fn,
middleware: [
cors,
]
});
rpc_handler(req)
.await
@@ -76,8 +94,15 @@ pub async fn diag_ui_file_router(ctx: DiagnosticContext) -> Result<HttpHandler,
async move {
let res = match req.uri().path() {
path if path.starts_with("/rpc/") => {
let rpc_handler =
rpc_handler!({command: diagnostic_api, context: ctx, status: status_fn});
let rpc_handler = rpc_handler!({
command: diagnostic_api,
context: ctx,
status: status_fn,
middleware: [
cors,
diagnostic_middleware,
]
});
rpc_handler(req)
.await
@@ -104,8 +129,14 @@ pub async fn install_ui_file_router(ctx: InstallContext) -> Result<HttpHandler,
async move {
let res = match req.uri().path() {
path if path.starts_with("/rpc/") => {
let rpc_handler =
rpc_handler!({command: install_api, context: ctx, status: status_fn});
let rpc_handler = rpc_handler!({
command: install_api,
context: ctx,
status: status_fn,
middleware: [
cors,
]
});
rpc_handler(req)
.await
@@ -132,8 +163,18 @@ pub async fn main_ui_server_router(ctx: RpcContext) -> Result<HttpHandler, Error
async move {
let res = match req.uri().path() {
path if path.starts_with("/rpc/") => {
let rpc_handler =
rpc_handler!({command: main_api, context: ctx, status: status_fn});
let auth_middleware = auth_middleware(ctx.clone());
let db_middleware = db_middleware(ctx.clone());
let rpc_handler = rpc_handler!({
command: main_api,
context: ctx,
status: status_fn,
middleware: [
cors,
auth_middleware,
db_middleware,
]
});
rpc_handler(req)
.await
@@ -193,41 +234,41 @@ async fn alt_ui(req: Request<Body>, ui_mode: UiMode) -> Result<Response<Body>, E
};
let (request_parts, _body) = req.into_parts();
match request_parts.uri.path() {
"/" => {
let full_path = PathBuf::from(selected_root_dir).join("index.html");
let accept_encoding = request_parts
.headers
.get_all(ACCEPT_ENCODING)
.into_iter()
.filter_map(|h| h.to_str().ok())
.flat_map(|s| s.split(","))
.filter_map(|s| s.split(";").next())
.map(|s| s.trim())
.collect::<Vec<_>>();
match &request_parts.method {
&Method::GET => {
let uri_path = request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path());
file_send(full_path).await
}
_ => {
match (
request_parts.method,
request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path())
.split_once('/'),
) {
(Method::GET, None) => {
let uri_path = request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path());
let full_path = PathBuf::from(selected_root_dir).join(uri_path);
file_send(full_path).await
}
(Method::GET, Some((dir, file))) => {
let full_path = PathBuf::from(selected_root_dir).join(dir).join(file);
file_send(full_path).await
}
_ => Ok(not_found()),
}
let full_path = Path::new(selected_root_dir).join(uri_path);
file_send(
&request_parts,
if tokio::fs::metadata(&full_path)
.await
.ok()
.map(|f| f.is_file())
.unwrap_or(false)
{
full_path
} else {
Path::new(selected_root_dir).join("index.html")
},
&accept_encoding,
)
.await
}
_ => Ok(method_not_allowed()),
}
}
@@ -235,102 +276,79 @@ async fn main_embassy_ui(req: Request<Body>, ctx: RpcContext) -> Result<Response
let selected_root_dir = MAIN_UI_WWW_DIR;
let (request_parts, _body) = req.into_parts();
match request_parts.uri.path() {
"/" => {
let full_path = PathBuf::from(selected_root_dir).join("index.html");
file_send(full_path).await
}
_ => {
let valid_session = HasValidSession::from_request_parts(&request_parts, &ctx).await;
match valid_session {
Ok(_valid) => {
match (
request_parts.method,
request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path())
.split_once('/'),
) {
(Method::GET, Some(("public", path))) => {
let sub_path = Path::new(path);
if let Ok(rest) = sub_path.strip_prefix("package-data") {
file_send(ctx.datadir.join(PKG_PUBLIC_DIR).join(rest)).await
} else if let Ok(rest) = sub_path.strip_prefix("eos") {
match rest.to_str() {
Some("local.crt") => {
file_send(crate::net::ssl::ROOT_CA_STATIC_PATH).await
}
None => Ok(bad_request()),
_ => Ok(not_found()),
}
} else {
Ok(not_found())
}
let accept_encoding = request_parts
.headers
.get_all(ACCEPT_ENCODING)
.into_iter()
.filter_map(|h| h.to_str().ok())
.flat_map(|s| s.split(","))
.filter_map(|s| s.split(";").next())
.map(|s| s.trim())
.collect::<Vec<_>>();
match (
&request_parts.method,
request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path())
.split_once('/'),
) {
(&Method::GET, Some(("public", path))) => {
match HasValidSession::from_request_parts(&request_parts, &ctx).await {
Ok(_) => {
let sub_path = Path::new(path);
if let Ok(rest) = sub_path.strip_prefix("package-data") {
file_send(
&request_parts,
ctx.datadir.join(PKG_PUBLIC_DIR).join(rest),
&accept_encoding,
)
.await
} else if let Ok(rest) = sub_path.strip_prefix("eos") {
match rest.to_str() {
Some("local.crt") => cert_send(&ctx.account.read().await.root_ca_cert),
None => Ok(bad_request()),
_ => Ok(not_found()),
}
(Method::GET, Some(("eos", "local.crt"))) => {
file_send(PathBuf::from(crate::net::ssl::ROOT_CA_STATIC_PATH)).await
}
(Method::GET, None) => {
let uri_path = request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path());
let full_path = PathBuf::from(selected_root_dir).join(uri_path);
file_send(full_path).await
}
(Method::GET, Some((dir, file))) => {
let full_path = PathBuf::from(selected_root_dir).join(dir).join(file);
file_send(full_path).await
}
_ => Ok(not_found()),
}
}
Err(err) => {
match (
request_parts.method,
request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path())
.split_once('/'),
) {
(Method::GET, Some(("public", _path))) => {
un_authorized(err, request_parts.uri.path())
}
(Method::GET, Some(("eos", "local.crt"))) => {
un_authorized(err, request_parts.uri.path())
}
(Method::GET, None) => {
let uri_path = request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path());
let full_path = PathBuf::from(selected_root_dir).join(uri_path);
file_send(full_path).await
}
(Method::GET, Some((dir, file))) => {
let full_path = PathBuf::from(selected_root_dir).join(dir).join(file);
file_send(full_path).await
}
_ => Ok(not_found()),
} else {
Ok(not_found())
}
}
Err(e) => un_authorized(e, &format!("public/{path}")),
}
}
(&Method::GET, Some(("eos", "local.crt"))) => {
match HasValidSession::from_request_parts(&request_parts, &ctx).await {
Ok(_) => cert_send(&ctx.account.read().await.root_ca_cert),
Err(e) => un_authorized(e, "eos/local.crt"),
}
}
(&Method::GET, _) => {
let uri_path = request_parts
.uri
.path()
.strip_prefix('/')
.unwrap_or(request_parts.uri.path());
let full_path = Path::new(selected_root_dir).join(uri_path);
file_send(
&request_parts,
if tokio::fs::metadata(&full_path)
.await
.ok()
.map(|f| f.is_file())
.unwrap_or(false)
{
full_path
} else {
Path::new(selected_root_dir).join("index.html")
},
&accept_encoding,
)
.await
}
_ => Ok(method_not_allowed()),
}
}
@@ -351,6 +369,14 @@ fn not_found() -> Response<Body> {
.unwrap()
}
/// HTTP status code 405
fn method_not_allowed() -> Response<Body> {
Response::builder()
.status(StatusCode::METHOD_NOT_ALLOWED)
.body(METHOD_NOT_ALLOWED.into())
.unwrap()
}
fn server_error(err: Error) -> Response<Body> {
Response::builder()
.status(StatusCode::INTERNAL_SERVER_ERROR)
@@ -365,45 +391,87 @@ fn bad_request() -> Response<Body> {
.unwrap()
}
async fn file_send(path: impl AsRef<Path>) -> Result<Response<Body>, Error> {
fn cert_send(cert: &X509) -> Result<Response<Body>, Error> {
let pem = cert.to_pem()?;
Response::builder()
.status(StatusCode::OK)
.header(
http::header::ETAG,
base32::encode(
base32::Alphabet::RFC4648 { padding: false },
&*cert.digest(MessageDigest::sha256())?,
)
.to_lowercase(),
)
.header(http::header::CONTENT_TYPE, "application/x-pem-file")
.header(http::header::CONTENT_LENGTH, pem.len())
.body(Body::from(pem))
.with_kind(ErrorKind::Network)
}
async fn file_send(
req: &RequestParts,
path: impl AsRef<Path>,
accept_encoding: &[&str],
) -> Result<Response<Body>, Error> {
// Serve a file by asynchronously reading it by chunks using tokio-util crate.
let path = path.as_ref();
if let Ok(file) = File::open(path).await {
let metadata = file.metadata().await.with_kind(ErrorKind::Filesystem)?;
let file = File::open(path)
.await
.with_ctx(|_| (ErrorKind::Filesystem, path.display().to_string()))?;
let metadata = file
.metadata()
.await
.with_ctx(|_| (ErrorKind::Filesystem, path.display().to_string()))?;
match IsNonEmptyFile::new(&metadata, path) {
Some(a) => a,
None => return Ok(not_found()),
let e_tag = e_tag(path, &metadata)?;
let mut builder = Response::builder();
builder = with_content_type(path, builder);
builder = builder.header(http::header::ETAG, &e_tag);
builder = builder.header(
http::header::CACHE_CONTROL,
"public, max-age=21000000, immutable",
);
if req
.headers
.get_all(http::header::CONNECTION)
.iter()
.flat_map(|s| s.to_str().ok())
.flat_map(|s| s.split(","))
.any(|s| s.trim() == "keep-alive")
{
builder = builder.header(http::header::CONNECTION, "keep-alive");
}
if req
.headers
.get("if-none-match")
.and_then(|h| h.to_str().ok())
== Some(e_tag.as_str())
{
builder = builder.status(StatusCode::NOT_MODIFIED);
builder.body(Body::empty())
} else {
let body = if false && accept_encoding.contains(&"br") && metadata.len() > u16::MAX as u64 {
builder = builder.header(CONTENT_ENCODING, "br");
Body::wrap_stream(ReaderStream::new(BrotliEncoder::new(BufReader::new(file))))
} else if accept_encoding.contains(&"gzip") && metadata.len() > u16::MAX as u64 {
builder = builder.header(CONTENT_ENCODING, "gzip");
Body::wrap_stream(ReaderStream::new(GzipEncoder::new(BufReader::new(file))))
} else {
builder = with_content_length(&metadata, builder);
Body::wrap_stream(ReaderStream::new(file))
};
let mut builder = Response::builder().status(StatusCode::OK);
builder = with_e_tag(path, &metadata, builder)?;
builder = with_content_type(path, builder);
builder = with_content_length(&metadata, builder);
let stream = FramedRead::new(file, BytesCodec::new());
let body = Body::wrap_stream(stream);
return builder.body(body).with_kind(ErrorKind::Network);
builder.body(body)
}
tracing::debug!("File not found: {:?}", path);
Ok(not_found())
.with_kind(ErrorKind::Network)
}
struct IsNonEmptyFile(());
impl IsNonEmptyFile {
fn new(metadata: &Metadata, path: &Path) -> Option<Self> {
let length = metadata.len();
if !metadata.is_file() || length == 0 {
tracing::debug!("File is empty: {:?}", path);
return None;
}
Some(Self(()))
}
}
fn with_e_tag(path: &Path, metadata: &Metadata, builder: Builder) -> Result<Builder, Error> {
fn e_tag(path: &Path, metadata: &Metadata) -> Result<String, Error> {
let modified = metadata.modified().with_kind(ErrorKind::Filesystem)?;
let mut hasher = sha2::Sha256::new();
hasher.update(format!("{:?}", path).as_bytes());
@@ -418,11 +486,12 @@ fn with_e_tag(path: &Path, metadata: &Metadata, builder: Builder) -> Result<Buil
.as_bytes(),
);
let res = hasher.finalize();
Ok(builder.header(
"ETag",
base32::encode(base32::Alphabet::RFC4648 { padding: false }, res.as_slice()).to_lowercase(),
Ok(format!(
"\"{}\"",
base32::encode(base32::Alphabet::RFC4648 { padding: false }, res.as_slice()).to_lowercase()
))
}
///https://en.wikipedia.org/wiki/Media_type
fn with_content_type(path: &Path, builder: Builder) -> Builder {
let content_type = match path.extension() {
@@ -449,7 +518,7 @@ fn with_content_type(path: &Path, builder: Builder) -> Builder {
},
None => "text/plain",
};
builder.header("Content-Type", content_type)
builder.header(http::header::CONTENT_TYPE, content_type)
}
fn with_content_length(metadata: &Metadata, builder: Builder) -> Builder {

View File

@@ -1,30 +1,25 @@
use std::collections::BTreeMap;
use std::net::{Ipv4Addr, SocketAddr};
use std::time::Duration;
use std::net::SocketAddr;
use std::sync::{Arc, Weak};
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::FutureExt;
use reqwest::Client;
use rpc_toolkit::command;
use serde_json::json;
use sqlx::{Executor, Postgres};
use tokio::net::TcpStream;
use tokio::sync::Mutex;
use torut::control::{AsyncEvent, AuthenticatedConn, ConnError};
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument;
use super::interface::{InterfaceId, TorConfig};
use crate::context::RpcContext;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::{display_serializable, IoFormat};
use crate::{Error, ErrorKind, ResultExt as _};
#[test]
fn random_key() {
println!("x'{}'", hex::encode(TorSecretKeyV3::generate().as_bytes()));
println!("x'{}'", hex::encode(rand::random::<[u8; 32]>()));
}
#[command(subcommands(list_services))]
@@ -57,68 +52,29 @@ pub async fn list_services(
ctx.net_controller.tor.list_services().await
}
#[instrument(skip(secrets))]
pub async fn os_key<Ex>(secrets: &mut Ex) -> Result<TorSecretKeyV3, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let key = sqlx::query!("SELECT tor_key FROM account")
.fetch_one(secrets)
.await?
.tor_key;
let mut buf = [0; 64];
buf.clone_from_slice(
key.get(0..64).ok_or_else(|| {
Error::new(eyre!("Invalid Tor Key Length"), crate::ErrorKind::Database)
})?,
);
Ok(buf.into())
}
fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> {
async move { Ok(()) }.boxed()
}
pub struct TorController(Mutex<TorControllerInner>);
impl TorController {
pub async fn init(
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
tor_control: SocketAddr,
) -> Result<Self, Error> {
pub async fn init(tor_control: SocketAddr) -> Result<Self, Error> {
Ok(TorController(Mutex::new(
TorControllerInner::init(embassyd_addr, embassyd_tor_key, tor_control).await?,
TorControllerInner::init(tor_control).await?,
)))
}
pub async fn add<I: IntoIterator<Item = (InterfaceId, TorConfig, TorSecretKeyV3)> + Clone>(
pub async fn add(
&self,
pkg_id: &PackageId,
ip: Ipv4Addr,
interfaces: I,
) -> Result<(), Error> {
self.0.lock().await.add(pkg_id, ip, interfaces).await
key: &TorSecretKeyV3,
external: u16,
target: SocketAddr,
) -> Result<Arc<()>, Error> {
self.0.lock().await.add(key, external, target).await
}
pub async fn remove<I: IntoIterator<Item = InterfaceId> + Clone>(
&self,
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
self.0.lock().await.remove(pkg_id, interfaces).await
}
pub async fn replace(&self) -> Result<bool, Error> {
self.0.lock().await.replace().await
}
pub async fn embassyd_tor_key(&self) -> TorSecretKeyV3 {
self.0.lock().await.embassyd_tor_key.clone()
}
pub async fn embassyd_onion(&self) -> OnionAddressV3 {
self.0.lock().await.embassyd_onion()
pub async fn gc(&self, key: &TorSecretKeyV3, external: u16) -> Result<(), Error> {
self.0.lock().await.gc(key, external).await
}
pub async fn list_services(&self) -> Result<Vec<OnionAddressV3>, Error> {
@@ -131,92 +87,95 @@ type AuthenticatedConnection = AuthenticatedConn<
fn(AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>>,
>;
#[derive(Clone, Debug, PartialEq, Eq)]
struct HiddenServiceConfig {
ip: Ipv4Addr,
cfg: TorConfig,
}
pub struct TorControllerInner {
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
control_addr: SocketAddr,
connection: Option<AuthenticatedConnection>,
services: BTreeMap<(PackageId, InterfaceId), (TorSecretKeyV3, TorConfig, Ipv4Addr)>,
connection: AuthenticatedConnection,
services: BTreeMap<String, BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
}
impl TorControllerInner {
#[instrument(skip(self, interfaces))]
async fn add<'a, I: IntoIterator<Item = (InterfaceId, TorConfig, TorSecretKeyV3)>>(
#[instrument(skip_all)]
async fn add(
&mut self,
pkg_id: &PackageId,
ip: Ipv4Addr,
interfaces: I,
) -> Result<(), Error> {
for (interface_id, tor_cfg, key) in interfaces {
let id = (pkg_id.clone(), interface_id);
match self.services.get(&id) {
Some(k) if k.0 != key => {
self.remove(pkg_id, std::iter::once(id.1.clone())).await?;
key: &TorSecretKeyV3,
external: u16,
target: SocketAddr,
) -> Result<Arc<()>, Error> {
let mut rm_res = Ok(());
let onion_base = key
.public()
.get_onion_address()
.get_address_without_dot_onion();
let mut service = if let Some(service) = self.services.remove(&onion_base) {
rm_res = self.connection.del_onion(&onion_base).await;
service
} else {
BTreeMap::new()
};
let mut binding = service.remove(&external).unwrap_or_default();
let rc = if let Some(rc) = Weak::upgrade(&binding.remove(&target).unwrap_or_default()) {
rc
} else {
Arc::new(())
};
binding.insert(target, Arc::downgrade(&rc));
service.insert(external, binding);
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
self.services.insert(onion_base, service);
rm_res?;
self.connection
.add_onion_v3(key, false, false, false, None, &mut bindings.iter())
.await?;
Ok(rc)
}
#[instrument(skip_all)]
async fn gc(&mut self, key: &TorSecretKeyV3, external: u16) -> Result<(), Error> {
let onion_base = key
.public()
.get_onion_address()
.get_address_without_dot_onion();
if let Some(mut service) = self.services.remove(&onion_base) {
if let Some(mut binding) = service.remove(&external) {
binding = binding
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect();
if !binding.is_empty() {
service.insert(external, binding);
}
Some(_) => continue,
None => (),
}
self.connection
.as_mut()
.ok_or_else(|| {
Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Unknown)
})?
.add_onion_v3(
&key,
false,
false,
false,
None,
&mut tor_cfg
.port_mapping
.iter()
.map(|(external, internal)| {
(external.0, SocketAddr::from((ip, internal.0)))
})
.collect::<Vec<_>>()
.iter(),
)
.await?;
self.services.insert(id, (key, tor_cfg, ip));
}
Ok(())
}
#[instrument(skip(self, interfaces))]
async fn remove<I: IntoIterator<Item = InterfaceId>>(
&mut self,
pkg_id: &PackageId,
interfaces: I,
) -> Result<(), Error> {
for interface_id in interfaces {
if let Some((key, _cfg, _ip)) = self.services.remove(&(pkg_id.clone(), interface_id)) {
let rm_res = self.connection.del_onion(&onion_base).await;
if !service.is_empty() {
let bindings = service
.iter()
.flat_map(|(ext, int)| {
int.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*ext, SocketAddr::from(*addr)))
})
.collect::<Vec<_>>();
self.services.insert(onion_base, service);
rm_res?;
self.connection
.as_mut()
.ok_or_else(|| {
Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Tor)
})?
.del_onion(
&key.public()
.get_onion_address()
.get_address_without_dot_onion(),
)
.add_onion_v3(&key, false, false, false, None, &mut bindings.iter())
.await?;
} else {
rm_res?;
}
}
Ok(())
}
#[instrument]
async fn init(
embassyd_addr: SocketAddr,
embassyd_tor_key: TorSecretKeyV3,
tor_control: SocketAddr,
) -> Result<Self, Error> {
#[instrument(skip_all)]
async fn init(tor_control: SocketAddr) -> Result<Self, Error> {
let mut conn = torut::control::UnauthenticatedConn::new(
TcpStream::connect(tor_control).await?, // TODO
);
@@ -230,125 +189,16 @@ impl TorControllerInner {
let mut connection: AuthenticatedConnection = conn.into_authenticated().await;
connection.set_async_event_handler(Some(event_handler));
let mut controller = TorControllerInner {
embassyd_addr,
embassyd_tor_key,
Ok(Self {
control_addr: tor_control,
connection: Some(connection),
connection,
services: BTreeMap::new(),
};
controller.add_embassyd_onion().await?;
Ok(controller)
})
}
#[instrument(skip(self))]
async fn add_embassyd_onion(&mut self) -> Result<(), Error> {
tracing::info!(
"Registering Main Tor Service: {}",
self.embassyd_tor_key.public().get_onion_address()
);
self.connection
.as_mut()
.ok_or_else(|| Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Tor))?
.add_onion_v3(
&self.embassyd_tor_key,
false,
false,
false,
None,
&mut std::iter::once(&(self.embassyd_addr.port(), self.embassyd_addr)),
)
.await?;
tracing::info!(
"Registered Main Tor Service: {}",
self.embassyd_tor_key.public().get_onion_address()
);
Ok(())
}
#[instrument(skip(self))]
async fn replace(&mut self) -> Result<bool, Error> {
let connection = self.connection.take();
let uptime = if let Some(mut c) = connection {
// this should be unreachable because the only time when this should be none is for the duration of tor's
// restart lower down in this method, which is held behind a Mutex
let uptime = c.get_info("uptime").await?.parse::<u64>()?;
// we never want to restart the tor daemon if it hasn't been up for at least a half hour
if uptime < 1800 {
self.connection = Some(c); // put it back
return Ok(false);
}
// when connection closes below, tor daemon is restarted
c.take_ownership().await?;
// this should close the connection
drop(c);
Some(uptime)
} else {
None
};
// attempt to reconnect to the control socket, not clear how long this should take
let mut new_connection: AuthenticatedConnection;
loop {
match TcpStream::connect(self.control_addr).await {
Ok(stream) => {
let mut new_conn = torut::control::UnauthenticatedConn::new(stream);
let auth = new_conn
.load_protocol_info()
.await?
.make_auth_data()?
.ok_or_else(|| eyre!("Cookie Auth Not Available"))
.with_kind(crate::ErrorKind::Tor)?;
new_conn.authenticate(&auth).await?;
new_connection = new_conn.into_authenticated().await;
let uptime_new = new_connection.get_info("uptime").await?.parse::<u64>()?;
// if the new uptime exceeds the one we got at the beginning, it's the same tor daemon, do not proceed
match uptime {
Some(uptime) if uptime_new > uptime => (),
_ => {
new_connection.set_async_event_handler(Some(event_handler));
break;
}
}
}
Err(e) => {
tracing::info!("Failed to reconnect to tor control socket: {}", e);
tracing::info!("Trying again in one second");
}
}
tokio::time::sleep(Duration::from_secs(1)).await;
}
// replace the connection object here on the new copy of the tor daemon
self.connection.replace(new_connection);
// swap empty map for owned old service map
let old_services = std::mem::take(&mut self.services);
// re add all of the services on the new control socket
for ((package_id, interface_id), (tor_key, tor_cfg, ipv4)) in old_services {
self.add(
&package_id,
ipv4,
std::iter::once((interface_id, tor_cfg, tor_key)),
)
.await?;
}
// add embassyd hidden service again
self.add_embassyd_onion().await?;
Ok(true)
}
fn embassyd_onion(&self) -> OnionAddressV3 {
self.embassyd_tor_key.public().get_onion_address()
}
#[instrument(skip(self))]
#[instrument(skip_all)]
async fn list_services(&mut self) -> Result<Vec<OnionAddressV3>, Error> {
self.connection
.as_mut()
.ok_or_else(|| Error::new(eyre!("Missing Tor Control Connection"), ErrorKind::Tor))?
.get_info("onions/current")
.await?
.lines()
@@ -359,54 +209,6 @@ impl TorControllerInner {
}
}
pub async fn tor_health_check(client: &Client, tor_controller: &TorController) {
tracing::debug!("Attempting to self-check tor address");
let onion_addr = tor_controller.embassyd_onion().await;
let result = client
.post(format!("http://{}/rpc/v1", onion_addr))
.body(
json!({
"jsonrpc": "2.0",
"method": "echo",
"params": { "message": "Follow the orange rabbit" },
})
.to_string()
.into_bytes(),
)
.send()
.await;
if let Err(e) = result {
let mut num_attempt = 1;
tracing::error!("Unable to reach self over tor, we will retry now...");
tracing::error!("The first TOR error: {}", e);
loop {
tracing::debug!("TOR Reconnecting retry number: {num_attempt}");
match tor_controller.replace().await {
Ok(restarted) => {
if restarted {
tracing::error!("Tor has been recently restarted, refusing to restart again right now...");
}
break;
}
Err(e) => {
tracing::error!("TOR retry error: {}", e);
tracing::error!("Unable to restart tor on attempt {num_attempt}...Retrying");
num_attempt += 1;
continue;
}
}
}
} else {
tracing::debug!(
"Successfully verified main tor address liveness at {}",
onion_addr
)
}
}
#[tokio::test]
async fn test() {
let mut conn = torut::control::UnauthenticatedConn::new(
@@ -441,6 +243,15 @@ async fn test() {
)
.await
.unwrap();
connection
.del_onion(
&tor_key
.public()
.get_onion_address()
.get_address_without_dot_onion(),
)
.await
.unwrap();
connection
.add_onion_v3(
&tor_key,

123
backend/src/net/utils.rs Normal file
View File

@@ -0,0 +1,123 @@
use std::convert::Infallible;
use std::net::{Ipv4Addr, Ipv6Addr};
use std::path::Path;
use async_stream::try_stream;
use color_eyre::eyre::eyre;
use futures::stream::BoxStream;
use futures::{StreamExt, TryStreamExt};
use ipnet::{Ipv4Net, Ipv6Net};
use tokio::process::Command;
use crate::util::Invoke;
use crate::Error;
fn parse_iface_ip(output: &str) -> Result<Option<&str>, Error> {
let output = output.trim();
if output.is_empty() {
return Ok(None);
}
if let Some(ip) = output.split_ascii_whitespace().nth(3) {
Ok(Some(ip))
} else {
Err(Error::new(
eyre!("malformed output from `ip`"),
crate::ErrorKind::Network,
))
}
}
pub async fn get_iface_ipv4_addr(iface: &str) -> Result<Option<(Ipv4Addr, Ipv4Net)>, Error> {
Ok(parse_iface_ip(&String::from_utf8(
Command::new("ip")
.arg("-4")
.arg("-o")
.arg("addr")
.arg("show")
.arg(iface)
.invoke(crate::ErrorKind::Network)
.await?,
)?)?
.map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?)))
.transpose()?)
}
pub async fn get_iface_ipv6_addr(iface: &str) -> Result<Option<(Ipv6Addr, Ipv6Net)>, Error> {
Ok(parse_iface_ip(&String::from_utf8(
Command::new("ip")
.arg("-6")
.arg("-o")
.arg("addr")
.arg("show")
.arg(iface)
.invoke(crate::ErrorKind::Network)
.await?,
)?)?
.map(|s| Ok::<_, Error>((s.split("/").next().unwrap().parse()?, s.parse()?)))
.transpose()?)
}
pub async fn iface_is_physical(iface: &str) -> bool {
tokio::fs::metadata(Path::new("/sys/class/net").join(iface).join("device"))
.await
.is_ok()
}
pub async fn iface_is_wireless(iface: &str) -> bool {
tokio::fs::metadata(Path::new("/sys/class/net").join(iface).join("wireless"))
.await
.is_ok()
}
pub fn list_interfaces() -> BoxStream<'static, Result<String, Error>> {
try_stream! {
let mut ifaces = tokio::fs::read_dir("/sys/class/net").await?;
while let Some(iface) = ifaces.next_entry().await? {
if let Some(iface) = iface.file_name().into_string().ok() {
yield iface;
}
}
}
.boxed()
}
pub async fn find_wifi_iface() -> Result<Option<String>, Error> {
let mut ifaces = list_interfaces();
while let Some(iface) = ifaces.try_next().await? {
if iface_is_wireless(&iface).await {
return Ok(Some(iface));
}
}
Ok(None)
}
pub async fn find_eth_iface() -> Result<String, Error> {
let mut ifaces = list_interfaces();
while let Some(iface) = ifaces.try_next().await? {
if iface_is_physical(&iface).await && !iface_is_wireless(&iface).await {
return Ok(iface);
}
}
Err(Error::new(
eyre!("Could not detect ethernet interface"),
crate::ErrorKind::Network,
))
}
#[pin_project::pin_project]
pub struct SingleAccept<T>(Option<T>);
impl<T> SingleAccept<T> {
pub fn new(conn: T) -> Self {
Self(Some(conn))
}
}
impl<T> hyper::server::accept::Accept for SingleAccept<T> {
type Conn = T;
type Error = Infallible;
fn poll_accept(
self: std::pin::Pin<&mut Self>,
_cx: &mut std::task::Context<'_>,
) -> std::task::Poll<Option<Result<Self::Conn, Self::Error>>> {
std::task::Poll::Ready(self.project().0.take().map(Ok))
}
}

329
backend/src/net/vhost.rs Normal file
View File

@@ -0,0 +1,329 @@
use std::collections::BTreeMap;
use std::convert::Infallible;
use std::net::{IpAddr, SocketAddr};
use std::str::FromStr;
use std::sync::{Arc, Weak};
use color_eyre::eyre::eyre;
use helpers::NonDetachingJoinHandle;
use http::{Response, Uri};
use hyper::service::{make_service_fn, service_fn};
use hyper::Body;
use models::ResultExt;
use tokio::net::{TcpListener, TcpStream};
use tokio::sync::{Mutex, RwLock};
use tokio_rustls::rustls::server::Acceptor;
use tokio_rustls::rustls::{RootCertStore, ServerConfig};
use tokio_rustls::{LazyConfigAcceptor, TlsConnector};
use crate::net::keys::Key;
use crate::net::ssl::SslManager;
use crate::net::utils::SingleAccept;
use crate::util::io::BackTrackingReader;
use crate::Error;
// not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
pub struct VHostController {
ssl: Arc<SslManager>,
servers: Mutex<BTreeMap<u16, VHostServer>>,
}
impl VHostController {
pub fn new(ssl: Arc<SslManager>) -> Self {
Self {
ssl,
servers: Mutex::new(BTreeMap::new()),
}
}
pub async fn add(
&self,
key: Key,
hostname: Option<String>,
external: u16,
target: SocketAddr,
connect_ssl: bool,
) -> Result<Arc<()>, Error> {
let mut writable = self.servers.lock().await;
let server = if let Some(server) = writable.remove(&external) {
server
} else {
VHostServer::new(external, self.ssl.clone()).await?
};
let rc = server
.add(
hostname,
TargetInfo {
addr: target,
connect_ssl,
key,
},
)
.await;
writable.insert(external, server);
Ok(rc?)
}
pub async fn gc(&self, hostname: Option<String>, external: u16) -> Result<(), Error> {
let mut writable = self.servers.lock().await;
if let Some(server) = writable.remove(&external) {
server.gc(hostname).await?;
if !server.is_empty().await? {
writable.insert(external, server);
}
}
Ok(())
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord)]
struct TargetInfo {
addr: SocketAddr,
connect_ssl: bool,
key: Key,
}
struct VHostServer {
mapping: Weak<RwLock<BTreeMap<Option<String>, BTreeMap<TargetInfo, Weak<()>>>>>,
_thread: NonDetachingJoinHandle<()>,
}
impl VHostServer {
async fn new(port: u16, ssl: Arc<SslManager>) -> Result<Self, Error> {
// check if port allowed
let listener = TcpListener::bind(SocketAddr::new([0, 0, 0, 0].into(), port))
.await
.with_kind(crate::ErrorKind::Network)?;
let mapping = Arc::new(RwLock::new(BTreeMap::new()));
Ok(Self {
mapping: Arc::downgrade(&mapping),
_thread: tokio::spawn(async move {
loop {
match listener.accept().await {
Ok((stream, _)) => {
let mut stream = BackTrackingReader::new(stream);
stream.start_buffering();
let mapping = mapping.clone();
let ssl = ssl.clone();
tokio::spawn(async move {
if let Err(e) = async {
let mid = match LazyConfigAcceptor::new(
Acceptor::default(),
&mut stream,
)
.await
{
Ok(a) => a,
Err(_) => {
stream.rewind();
return hyper::server::Server::builder(
SingleAccept::new(stream),
)
.serve(make_service_fn(|_| async {
Ok::<_, Infallible>(service_fn(|req| async move {
let host = req
.headers()
.get(http::header::HOST)
.and_then(|host| host.to_str().ok());
let uri = Uri::from_parts({
let mut parts =
req.uri().to_owned().into_parts();
parts.authority = host
.map(FromStr::from_str)
.transpose()?;
parts
})?;
Response::builder()
.status(
http::StatusCode::TEMPORARY_REDIRECT,
)
.header(
http::header::LOCATION,
uri.to_string(),
)
.body(Body::default())
}))
}))
.await
.with_kind(crate::ErrorKind::Network);
}
};
let target_name =
mid.client_hello().server_name().map(|s| s.to_owned());
let target = {
let mapping = mapping.read().await;
mapping
.get(&target_name)
.into_iter()
.flatten()
.find(|(_, rc)| rc.strong_count() > 0)
.or_else(|| {
if target_name
.map(|s| s.parse::<IpAddr>().is_ok())
.unwrap_or(true)
{
mapping
.get(&None)
.into_iter()
.flatten()
.find(|(_, rc)| rc.strong_count() > 0)
} else {
None
}
})
.map(|(target, _)| target.clone())
};
if let Some(target) = target {
let mut tcp_stream =
TcpStream::connect(target.addr).await?;
let key =
ssl.with_certs(target.key, target.addr.ip()).await?;
let cfg = ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth();
let cfg =
if mid.client_hello().signature_schemes().contains(
&tokio_rustls::rustls::SignatureScheme::ED25519,
) {
cfg.with_single_cert(
key.fullchain_ed25519()
.into_iter()
.map(|c| {
Ok(tokio_rustls::rustls::Certificate(
c.to_der()?,
))
})
.collect::<Result<_, Error>>()?,
tokio_rustls::rustls::PrivateKey(
key.key()
.openssl_key_ed25519()
.private_key_to_der()?,
),
)
} else {
cfg.with_single_cert(
key.fullchain_nistp256()
.into_iter()
.map(|c| {
Ok(tokio_rustls::rustls::Certificate(
c.to_der()?,
))
})
.collect::<Result<_, Error>>()?,
tokio_rustls::rustls::PrivateKey(
key.key()
.openssl_key_nistp256()
.private_key_to_der()?,
),
)
};
let mut tls_stream = mid
.into_stream(Arc::new(
cfg.with_kind(crate::ErrorKind::OpenSsl)?,
))
.await?;
tls_stream.get_mut().0.stop_buffering();
if target.connect_ssl {
tokio::io::copy_bidirectional(
&mut tls_stream,
&mut TlsConnector::from(Arc::new(
tokio_rustls::rustls::ClientConfig::builder()
.with_safe_defaults()
.with_root_certificates({
let mut store = RootCertStore::empty();
store.add(
&tokio_rustls::rustls::Certificate(
key.root_ca().to_der()?,
),
).with_kind(crate::ErrorKind::OpenSsl)?;
store
})
.with_no_client_auth(),
))
.connect(
key.key()
.internal_address()
.as_str()
.try_into()
.with_kind(crate::ErrorKind::OpenSsl)?,
tcp_stream,
)
.await
.with_kind(crate::ErrorKind::OpenSsl)?,
)
.await?;
} else {
tokio::io::copy_bidirectional(
&mut tls_stream,
&mut tcp_stream,
)
.await?;
}
} else {
// 503
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("Error in VHostController on port {port}: {e}");
tracing::debug!("{e:?}")
}
});
}
Err(e) => {
tracing::error!("Error in VHostController on port {port}: {e}");
tracing::debug!("{e:?}");
}
}
}
})
.into(),
})
}
async fn add(&self, hostname: Option<String>, target: TargetInfo) -> Result<Arc<()>, Error> {
if let Some(mapping) = Weak::upgrade(&self.mapping) {
let mut writable = mapping.write().await;
let mut targets = writable.remove(&hostname).unwrap_or_default();
let rc = if let Some(rc) = Weak::upgrade(&targets.remove(&target).unwrap_or_default()) {
rc
} else {
Arc::new(())
};
targets.insert(target, Arc::downgrade(&rc));
writable.insert(hostname, targets);
Ok(rc)
} else {
Err(Error::new(
eyre!("VHost Service Thread has exited"),
crate::ErrorKind::Network,
))
}
}
async fn gc(&self, hostname: Option<String>) -> Result<(), Error> {
if let Some(mapping) = Weak::upgrade(&self.mapping) {
let mut writable = mapping.write().await;
let mut targets = writable.remove(&hostname).unwrap_or_default();
targets = targets
.into_iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.collect();
if !targets.is_empty() {
writable.insert(hostname, targets);
}
Ok(())
} else {
Err(Error::new(
eyre!("VHost Service Thread has exited"),
crate::ErrorKind::Network,
))
}
}
async fn is_empty(&self) -> Result<bool, Error> {
if let Some(mapping) = Weak::upgrade(&self.mapping) {
Ok(mapping.read().await.is_empty())
} else {
Err(Error::new(
eyre!("VHost Service Thread has exited"),
crate::ErrorKind::Network,
))
}
}
}

View File

@@ -1,81 +0,0 @@
use std::collections::BTreeMap;
use std::net::SocketAddr;
use std::sync::Arc;
use tokio_rustls::rustls::ServerConfig;
use crate::net::cert_resolver::EmbassyCertResolver;
use crate::net::embassy_service_http_server::EmbassyServiceHTTPServer;
use crate::net::net_utils::ResourceFqdn;
use crate::net::HttpHandler;
use crate::Error;
pub struct VHOSTController {
pub service_servers: BTreeMap<u16, EmbassyServiceHTTPServer>,
pub cert_resolver: EmbassyCertResolver,
embassyd_addr: SocketAddr,
}
impl VHOSTController {
pub fn init(embassyd_addr: SocketAddr) -> Self {
Self {
embassyd_addr,
service_servers: BTreeMap::new(),
cert_resolver: EmbassyCertResolver::new(),
}
}
pub fn build_ssl_svr_cfg(&self) -> Result<Arc<ServerConfig>, Error> {
let ssl_cfg = ServerConfig::builder()
.with_safe_default_cipher_suites()
.with_safe_default_kx_groups()
.with_safe_default_protocol_versions()
.unwrap()
.with_no_client_auth()
.with_cert_resolver(Arc::new(self.cert_resolver.clone()));
Ok(Arc::new(ssl_cfg))
}
pub async fn add_server_or_handle(
&mut self,
external_svc_port: u16,
fqdn: ResourceFqdn,
svc_handler: HttpHandler,
is_ssl: bool,
) -> Result<(), Error> {
if let Some(server) = self.service_servers.get_mut(&external_svc_port) {
server.add_svc_handler_mapping(fqdn, svc_handler).await?;
} else {
self.add_server(is_ssl, external_svc_port, fqdn, svc_handler)
.await?;
}
Ok(())
}
async fn add_server(
&mut self,
is_ssl: bool,
external_svc_port: u16,
fqdn: ResourceFqdn,
svc_handler: HttpHandler,
) -> Result<(), Error> {
let ssl_cfg = if is_ssl {
Some(self.build_ssl_svr_cfg()?)
} else {
None
};
let mut new_service_server =
EmbassyServiceHTTPServer::new(self.embassyd_addr.ip(), external_svc_port, ssl_cfg)
.await?;
new_service_server
.add_svc_handler_mapping(fqdn.clone(), svc_handler)
.await?;
self.service_servers
.insert(external_svc_port, new_service_server);
Ok(())
}
}

View File

@@ -0,0 +1,61 @@
use std::convert::Infallible;
use std::net::SocketAddr;
use futures::future::ready;
use futures::FutureExt;
use helpers::NonDetachingJoinHandle;
use hyper::service::{make_service_fn, service_fn};
use hyper::Server;
use tokio::sync::oneshot;
use crate::context::{DiagnosticContext, InstallContext, RpcContext, SetupContext};
use crate::net::static_server::{
diag_ui_file_router, install_ui_file_router, main_ui_server_router, setup_ui_file_router,
};
use crate::net::HttpHandler;
use crate::Error;
pub struct WebServer {
shutdown: oneshot::Sender<()>,
thread: NonDetachingJoinHandle<()>,
}
impl WebServer {
pub fn new(bind: SocketAddr, router: HttpHandler) -> Self {
let (shutdown, shutdown_recv) = oneshot::channel();
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
let server = Server::bind(&bind)
.http1_preserve_header_case(true)
.http1_title_case_headers(true)
.serve(make_service_fn(move |_| {
let router = router.clone();
ready(Ok::<_, Infallible>(service_fn(move |req| router(req))))
}))
.with_graceful_shutdown(shutdown_recv.map(|_| ()));
if let Err(e) = server.await {
tracing::error!("Spawning hyper server error: {}", e);
}
}));
Self { shutdown, thread }
}
pub async fn shutdown(self) {
self.shutdown.send(()).unwrap_or_default();
self.thread.await.unwrap()
}
pub async fn main(bind: SocketAddr, ctx: RpcContext) -> Result<Self, Error> {
Ok(Self::new(bind, main_ui_server_router(ctx).await?))
}
pub async fn setup(bind: SocketAddr, ctx: SetupContext) -> Result<Self, Error> {
Ok(Self::new(bind, setup_ui_file_router(ctx).await?))
}
pub async fn diagnostic(bind: SocketAddr, ctx: DiagnosticContext) -> Result<Self, Error> {
Ok(Self::new(bind, diag_ui_file_router(ctx).await?))
}
pub async fn install(bind: SocketAddr, ctx: InstallContext) -> Result<Self, Error> {
Ok(Self::new(bind, install_ui_file_router(ctx).await?))
}
}

View File

@@ -47,7 +47,7 @@ pub async fn country() -> Result<(), Error> {
}
#[command(display(display_none))]
#[instrument(skip(ctx, password))]
#[instrument(skip_all)]
pub async fn add(
#[context] ctx: RpcContext,
#[arg] ssid: String,
@@ -103,7 +103,7 @@ pub async fn add(
}
#[command(display(display_none))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() {
@@ -155,7 +155,7 @@ pub async fn connect(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<
}
#[command(display(display_none))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn delete(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(), Error> {
let wifi_manager = wifi_manager(&ctx)?;
if !ssid.is_ascii() {
@@ -293,7 +293,7 @@ fn display_wifi_list(info: Vec<WifiListOut>, matches: &ArgMatches) {
}
#[command(display(display_wifi_info))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn get(
#[context] ctx: RpcContext,
#[allow(unused_variables)]
@@ -347,7 +347,7 @@ pub async fn get(
}
#[command(rename = "get", display(display_wifi_list))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn get_available(
#[context] ctx: RpcContext,
#[allow(unused_variables)]
@@ -457,7 +457,7 @@ impl WpaCli {
WpaCli { interface }
}
#[instrument(skip(self, psk))]
#[instrument(skip_all)]
pub async fn set_add_network_low(&mut self, ssid: &Ssid, psk: &Psk) -> Result<(), Error> {
let _ = Command::new("nmcli")
.arg("-a")
@@ -473,7 +473,7 @@ impl WpaCli {
.await?;
Ok(())
}
#[instrument(skip(self, psk))]
#[instrument(skip_all)]
pub async fn add_network_low(&mut self, ssid: &Ssid, psk: &Psk) -> Result<(), Error> {
if self.find_networks(ssid).await?.is_empty() {
Command::new("nmcli")
@@ -567,7 +567,7 @@ impl WpaCli {
.await?;
Ok(())
}
#[instrument]
#[instrument(skip_all)]
pub async fn list_networks_low(&self) -> Result<BTreeMap<NetworkId, WifiInfo>, Error> {
let r = Command::new("nmcli")
.arg("-t")
@@ -596,7 +596,7 @@ impl WpaCli {
.collect::<BTreeMap<NetworkId, WifiInfo>>())
}
#[instrument]
#[instrument(skip_all)]
pub async fn list_wifi_low(&self) -> Result<WifiList, Error> {
let r = Command::new("nmcli")
.arg("-g")
@@ -681,7 +681,7 @@ impl WpaCli {
})
.collect())
}
#[instrument(skip(db))]
#[instrument(skip_all)]
pub async fn select_network(&mut self, db: impl DbHandle, ssid: &Ssid) -> Result<bool, Error> {
let m_id = self.check_active_network(ssid).await?;
match m_id {
@@ -717,7 +717,7 @@ impl WpaCli {
}
}
}
#[instrument]
#[instrument(skip_all)]
pub async fn get_current_network(&self) -> Result<Option<Ssid>, Error> {
let r = Command::new("iwgetid")
.arg(&self.interface)
@@ -733,7 +733,7 @@ impl WpaCli {
Ok(Some(Ssid(network.to_owned())))
}
}
#[instrument(skip(db))]
#[instrument(skip_all)]
pub async fn remove_network(&mut self, db: impl DbHandle, ssid: &Ssid) -> Result<bool, Error> {
let found_networks = self.find_networks(ssid).await?;
if found_networks.is_empty() {
@@ -745,7 +745,7 @@ impl WpaCli {
self.save_config(db).await?;
Ok(true)
}
#[instrument(skip(psk, db))]
#[instrument(skip_all)]
pub async fn set_add_network(
&mut self,
db: impl DbHandle,
@@ -757,7 +757,7 @@ impl WpaCli {
self.save_config(db).await?;
Ok(())
}
#[instrument(skip(psk, db))]
#[instrument(skip_all)]
pub async fn add_network(
&mut self,
db: impl DbHandle,
@@ -771,7 +771,7 @@ impl WpaCli {
}
}
#[instrument]
#[instrument(skip_all)]
pub async fn interface_connected(interface: &str) -> Result<bool, Error> {
let out = Command::new("ifconfig")
.arg(interface)
@@ -792,7 +792,7 @@ pub fn country_code_parse(code: &str, _matches: &ArgMatches) -> Result<CountryCo
})
}
#[instrument(skip(main_datadir))]
#[instrument(skip_all)]
pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>(
main_datadir: P,
wifi_iface: &str,

View File

@@ -23,7 +23,7 @@ pub async fn notification() -> Result<(), Error> {
}
#[command(display(display_serializable))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn list(
#[context] ctx: RpcContext,
#[arg] before: Option<i32>,
@@ -232,7 +232,7 @@ impl NotificationManager {
cache: Mutex::new(HashMap::new()),
}
}
#[instrument(skip(self, db))]
#[instrument(skip_all)]
pub async fn notify<Db: DbHandle, T: NotificationType>(
&self,
db: &mut Db,
@@ -254,7 +254,7 @@ impl NotificationManager {
.unread_notification_count()
.get_mut(db)
.await?;
let sql_package_id = package_id.map::<String, _>(|p| p.into());
let sql_package_id = package_id.as_ref().map(|p| &**p);
let sql_code = T::CODE;
let sql_level = format!("{}", level);
let sql_data =

View File

@@ -0,0 +1,3 @@
{boot} /boot vfat umask=0077 0 2
{efi} /boot/efi vfat umask=0077 0 1
{root} / ext4 defaults 0 1

View File

@@ -0,0 +1,122 @@
use color_eyre::eyre::eyre;
use gpt::disk::LogicalBlockSize;
use gpt::GptConfig;
use crate::disk::util::DiskInfo;
use crate::disk::OsPartitionInfo;
use crate::os_install::partition_for;
use crate::Error;
pub async fn partition(disk: &DiskInfo, overwrite: bool) -> Result<OsPartitionInfo, Error> {
{
let disk = disk.clone();
tokio::task::spawn_blocking(move || {
let mut device = Box::new(
std::fs::File::options()
.read(true)
.write(true)
.open(&disk.logicalname)?,
);
let (mut gpt, guid_part) = if overwrite {
let mbr = gpt::mbr::ProtectiveMBR::with_lb_size(
u32::try_from((disk.capacity / 512) - 1).unwrap_or(0xFF_FF_FF_FF),
);
mbr.overwrite_lba0(&mut device)?;
(
GptConfig::new()
.writable(true)
.initialized(false)
.logical_block_size(LogicalBlockSize::Lb512)
.create_from_device(device, None)?,
None,
)
} else {
let gpt = GptConfig::new()
.writable(true)
.initialized(true)
.logical_block_size(LogicalBlockSize::Lb512)
.open_from_device(device)?;
let mut guid_part = None;
for (idx, part_info) in disk
.partitions
.iter()
.enumerate()
.map(|(idx, x)| (idx + 1, x))
{
if let Some(entry) = gpt.partitions().get(&(idx as u32)) {
if entry.first_lba >= 33556480 {
if idx < 3 {
guid_part = Some(entry.clone())
}
break;
}
if part_info.guid.is_some() {
return Err(Error::new(
eyre!("Not enough space before embassy data"),
crate::ErrorKind::InvalidRequest,
));
}
}
}
(gpt, guid_part)
};
gpt.update_partitions(Default::default())?;
gpt.add_partition("efi", 100 * 1024 * 1024, gpt::partition_types::EFI, 0, None)?;
gpt.add_partition(
"boot",
1024 * 1024 * 1024,
gpt::partition_types::LINUX_FS,
0,
None,
)?;
gpt.add_partition(
"root",
15 * 1024 * 1024 * 1024,
match *crate::ARCH {
"x86_64" => gpt::partition_types::LINUX_ROOT_X64,
"aarch64" => gpt::partition_types::LINUX_ROOT_ARM_64,
_ => gpt::partition_types::LINUX_FS,
},
0,
None,
)?;
if overwrite {
gpt.add_partition(
"data",
gpt.find_free_sectors()
.iter()
.map(|(_, size)| *size * u64::from(*gpt.logical_block_size()))
.max()
.ok_or_else(|| {
Error::new(
eyre!("No free space left on device"),
crate::ErrorKind::BlockDevice,
)
})?,
gpt::partition_types::LINUX_LVM,
0,
None,
)?;
} else if let Some(guid_part) = guid_part {
let mut parts = gpt.partitions().clone();
parts.insert(gpt.find_next_partition_id(), guid_part);
gpt.update_partitions(parts)?;
}
gpt.write()?;
Ok(())
})
.await
.unwrap()?;
}
Ok(OsPartitionInfo {
efi: Some(partition_for(&disk.logicalname, 1)),
boot: partition_for(&disk.logicalname, 2),
root: partition_for(&disk.logicalname, 3),
})
}

View File

@@ -0,0 +1,91 @@
use color_eyre::eyre::eyre;
use mbrman::{MBRPartitionEntry, CHS, MBR};
use crate::disk::util::DiskInfo;
use crate::disk::OsPartitionInfo;
use crate::os_install::partition_for;
use crate::Error;
pub async fn partition(disk: &DiskInfo, overwrite: bool) -> Result<OsPartitionInfo, Error> {
{
let sectors = (disk.capacity / 512) as u32;
let disk = disk.clone();
tokio::task::spawn_blocking(move || {
let mut file = std::fs::File::options()
.read(true)
.write(true)
.open(&disk.logicalname)?;
let (mut mbr, guid_part) = if overwrite {
(MBR::new_from(&mut file, 512, rand::random())?, None)
} else {
let mut mbr = MBR::read_from(&mut file, 512)?;
let mut guid_part = None;
for (idx, part_info) in disk
.partitions
.iter()
.enumerate()
.map(|(idx, x)| (idx + 1, x))
{
if let Some(entry) = mbr.get_mut(idx) {
if entry.starting_lba >= 33556480 {
if idx < 3 {
guid_part =
Some(std::mem::replace(entry, MBRPartitionEntry::empty()))
}
break;
}
if part_info.guid.is_some() {
return Err(Error::new(
eyre!("Not enough space before embassy data"),
crate::ErrorKind::InvalidRequest,
));
}
*entry = MBRPartitionEntry::empty();
}
}
(mbr, guid_part)
};
mbr[1] = MBRPartitionEntry {
boot: 0x80,
first_chs: CHS::empty(),
sys: 0x0b,
last_chs: CHS::empty(),
starting_lba: 2048,
sectors: 2099200 - 2048,
};
mbr[2] = MBRPartitionEntry {
boot: 0,
first_chs: CHS::empty(),
sys: 0x83,
last_chs: CHS::empty(),
starting_lba: 2099200,
sectors: 33556480 - 2099200,
};
if overwrite {
mbr[3] = MBRPartitionEntry {
boot: 0,
first_chs: CHS::empty(),
sys: 0x8e,
last_chs: CHS::empty(),
starting_lba: 33556480,
sectors: sectors - 33556480,
}
} else if let Some(guid_part) = guid_part {
mbr[3] = guid_part;
}
mbr.write_into(&mut file)?;
Ok(())
})
.await
.unwrap()?;
}
Ok(OsPartitionInfo {
efi: None,
boot: partition_for(&disk.logicalname, 1),
root: partition_for(&disk.logicalname, 2),
})
}

View File

@@ -1,7 +1,6 @@
use std::path::{Path, PathBuf};
use color_eyre::eyre::eyre;
use mbrman::{MBRPartitionEntry, CHS, MBR};
use models::Error;
use rpc_toolkit::command;
use serde::{Deserialize, Serialize};
@@ -10,12 +9,18 @@ use tokio::process::Command;
use crate::context::InstallContext;
use crate::disk::mount::filesystem::bind::Bind;
use crate::disk::mount::filesystem::block_dev::BlockDev;
use crate::disk::mount::filesystem::efivarfs::EfiVarFs;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{MountGuard, TmpMountGuard};
use crate::disk::util::DiskInfo;
use crate::disk::util::{DiskInfo, PartitionTable};
use crate::disk::OsPartitionInfo;
use crate::net::utils::{find_eth_iface, find_wifi_iface};
use crate::util::serde::IoFormat;
use crate::util::{display_none, Invoke};
use crate::ARCH;
mod gpt;
mod mbr;
#[derive(Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
@@ -69,43 +74,6 @@ pub async fn list() -> Result<Vec<DiskInfo>, Error> {
.collect())
}
pub async fn find_wifi_iface() -> Result<Option<String>, Error> {
let mut ifaces = tokio::fs::read_dir("/sys/class/net").await?;
while let Some(iface) = ifaces.next_entry().await? {
if tokio::fs::metadata(iface.path().join("wireless"))
.await
.is_ok()
{
if let Some(iface) = iface.file_name().into_string().ok() {
return Ok(Some(iface));
}
}
}
Ok(None)
}
pub async fn find_eth_iface() -> Result<String, Error> {
let mut ifaces = tokio::fs::read_dir("/sys/class/net").await?;
while let Some(iface) = ifaces.next_entry().await? {
if tokio::fs::metadata(iface.path().join("wireless"))
.await
.is_err()
&& tokio::fs::metadata(iface.path().join("device"))
.await
.is_ok()
{
if let Some(iface) = iface.file_name().into_string().ok() {
return Ok(iface);
}
}
}
Err(Error::new(
eyre!("Could not detect ethernet interface"),
crate::ErrorKind::Network,
))
}
pub fn partition_for(disk: impl AsRef<Path>, idx: usize) -> PathBuf {
let disk_path = disk.as_ref();
let (root, leaf) = if let (Some(root), Some(leaf)) = (
@@ -123,12 +91,30 @@ pub fn partition_for(disk: impl AsRef<Path>, idx: usize) -> PathBuf {
}
}
async fn partition(disk: &mut DiskInfo, overwrite: bool) -> Result<OsPartitionInfo, Error> {
let partition_type = match (overwrite, disk.partition_table) {
(true, _) | (_, None) => {
if tokio::fs::metadata("/sys/firmware/efi").await.is_ok() {
PartitionTable::Gpt
} else {
PartitionTable::Mbr
}
}
(_, Some(t)) => t,
};
disk.partition_table = Some(partition_type);
match partition_type {
PartitionTable::Gpt => gpt::partition(disk, overwrite).await,
PartitionTable::Mbr => mbr::partition(disk, overwrite).await,
}
}
#[command(display(display_none))]
pub async fn execute(
#[arg] logicalname: PathBuf,
#[arg(short = 'o')] mut overwrite: bool,
) -> Result<(), Error> {
let disk = crate::disk::util::list(&Default::default())
let mut disk = crate::disk::util::list(&Default::default())
.await?
.into_iter()
.find(|d| &d.logicalname == &logicalname)
@@ -142,110 +128,60 @@ pub async fn execute(
let wifi_iface = find_wifi_iface().await?;
overwrite |= disk.guid.is_none() && disk.partitions.iter().all(|p| p.guid.is_none());
let sectors = (disk.capacity / 512) as u32;
tokio::task::spawn_blocking(move || {
let mut file = std::fs::File::options()
.read(true)
.write(true)
.open(&logicalname)?;
let (mut mbr, guid_part) = if overwrite {
(MBR::new_from(&mut file, 512, rand::random())?, None)
} else {
let mut mbr = MBR::read_from(&mut file, 512)?;
let mut guid_part = None;
for (idx, part_info) in disk
.partitions
.iter()
.enumerate()
.map(|(idx, x)| (idx + 1, x))
{
if let Some(entry) = mbr.get_mut(idx) {
if entry.starting_lba >= 33556480 {
if idx < 3 {
guid_part = Some(std::mem::replace(entry, MBRPartitionEntry::empty()))
}
break;
}
if part_info.guid.is_some() {
return Err(Error::new(
eyre!("Not enough space before embassy data"),
crate::ErrorKind::InvalidRequest,
));
}
*entry = MBRPartitionEntry::empty();
}
}
(mbr, guid_part)
};
mbr[1] = MBRPartitionEntry {
boot: 0x80,
first_chs: CHS::empty(),
sys: 0x0b,
last_chs: CHS::empty(),
starting_lba: 2048,
sectors: 2099200 - 2048,
};
mbr[2] = MBRPartitionEntry {
boot: 0,
first_chs: CHS::empty(),
sys: 0x83,
last_chs: CHS::empty(),
starting_lba: 2099200,
sectors: 33556480 - 2099200,
};
let part_info = partition(&mut disk, overwrite).await?;
if overwrite {
mbr[3] = MBRPartitionEntry {
boot: 0,
first_chs: CHS::empty(),
sys: 0x8e,
last_chs: CHS::empty(),
starting_lba: 33556480,
sectors: sectors - 33556480,
}
} else if let Some(guid_part) = guid_part {
mbr[3] = guid_part;
}
mbr.write_into(&mut file)?;
Ok(())
})
.await
.unwrap()?;
let boot_part = partition_for(&disk.logicalname, 1);
let root_part = partition_for(&disk.logicalname, 2);
if let Some(efi) = &part_info.efi {
Command::new("mkfs.vfat")
.arg(efi)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("fatlabel")
.arg(efi)
.arg("efi")
.invoke(crate::ErrorKind::DiskManagement)
.await?;
}
Command::new("mkfs.vfat")
.arg(&boot_part)
.arg(&part_info.boot)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("fatlabel")
.arg(&boot_part)
.arg(&part_info.boot)
.arg("boot")
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("mkfs.ext4")
.arg(&root_part)
.arg(&part_info.root)
.invoke(crate::ErrorKind::DiskManagement)
.await?;
Command::new("e2label")
.arg(&root_part)
.arg(&part_info.root)
.arg("rootfs")
.invoke(crate::ErrorKind::DiskManagement)
.await?;
let rootfs = TmpMountGuard::mount(&BlockDev::new(&root_part), ReadWrite).await?;
let rootfs = TmpMountGuard::mount(&BlockDev::new(&part_info.root), ReadWrite).await?;
tokio::fs::create_dir(rootfs.as_ref().join("config")).await?;
tokio::fs::create_dir(rootfs.as_ref().join("next")).await?;
let current = rootfs.as_ref().join("current");
tokio::fs::create_dir(&current).await?;
tokio::fs::create_dir(current.join("boot")).await?;
let boot =
MountGuard::mount(&BlockDev::new(&boot_part), current.join("boot"), ReadWrite).await?;
let boot = MountGuard::mount(
&BlockDev::new(&part_info.boot),
current.join("boot"),
ReadWrite,
)
.await?;
let efi = if let Some(efi) = &part_info.efi {
Some(MountGuard::mount(&BlockDev::new(efi), current.join("boot/efi"), ReadWrite).await?)
} else {
None
};
Command::new("unsquashfs")
.arg("-n")
@@ -259,10 +195,7 @@ pub async fn execute(
tokio::fs::write(
rootfs.as_ref().join("config/config.yaml"),
IoFormat::Yaml.to_vec(&PostInstallConfig {
os_partitions: OsPartitionInfo {
boot: boot_part.clone(),
root: root_part.clone(),
},
os_partitions: part_info.clone(),
ethernet_interface: eth_iface,
wifi_interface: wifi_iface,
})?,
@@ -273,8 +206,13 @@ pub async fn execute(
current.join("etc/fstab"),
format!(
include_str!("fstab.template"),
boot = boot_part.display(),
root = root_part.display()
boot = part_info.boot.display(),
efi = part_info
.efi
.as_ref()
.map(|p| p.display().to_string())
.unwrap_or_else(|| "# N/A".to_owned()),
root = part_info.root.display(),
),
)
.await?;
@@ -293,26 +231,52 @@ pub async fn execute(
.await?;
let dev = MountGuard::mount(&Bind::new("/dev"), current.join("dev"), ReadWrite).await?;
let sys = MountGuard::mount(&Bind::new("/sys"), current.join("sys"), ReadWrite).await?;
let proc = MountGuard::mount(&Bind::new("/proc"), current.join("proc"), ReadWrite).await?;
let sys = MountGuard::mount(&Bind::new("/sys"), current.join("sys"), ReadWrite).await?;
let efivarfs = if let Some(efi) = &part_info.efi {
Some(
MountGuard::mount(
&EfiVarFs,
current.join("sys/firmware/efi/efivars"),
ReadWrite,
)
.await?,
)
} else {
None
};
Command::new("chroot")
.arg(&current)
.arg("update-grub")
.invoke(crate::ErrorKind::Grub)
.await?;
Command::new("chroot")
.arg(&current)
.arg("grub-install")
.arg("--target=i386-pc")
let mut install = Command::new("chroot");
install.arg(&current).arg("grub-install");
if part_info.efi.is_none() {
install.arg("--target=i386-pc");
} else {
match *ARCH {
"x86_64" => install.arg("--target=x86_64-efi"),
"aarch64" => install.arg("--target=arm64-efi"),
_ => &mut install,
};
}
install
.arg(&disk.logicalname)
.invoke(crate::ErrorKind::Grub)
.await?;
dev.unmount().await?;
sys.unmount().await?;
proc.unmount().await?;
boot.unmount().await?;
dev.unmount(false).await?;
if let Some(efivarfs) = efivarfs {
efivarfs.unmount(false).await?;
}
sys.unmount(false).await?;
proc.unmount(false).await?;
if let Some(efi) = efi {
efi.unmount(false).await?;
}
boot.unmount(false).await?;
rootfs.unmount().await?;
Ok(())

View File

@@ -2,7 +2,7 @@ use std::borrow::Cow;
use std::collections::{BTreeMap, BTreeSet, VecDeque};
use std::ffi::{OsStr, OsString};
use std::net::Ipv4Addr;
use std::path::PathBuf;
use std::path::{Path, PathBuf};
use std::time::Duration;
use async_stream::stream;
@@ -11,18 +11,21 @@ use color_eyre::eyre::eyre;
use color_eyre::Report;
use futures::future::Either as EitherFuture;
use futures::TryStreamExt;
use helpers::{NonDetachingJoinHandle, RpcClient};
use helpers::{NonDetachingJoinHandle, UnixRpcClient};
use models::{Id, ImageId};
use nix::sys::signal;
use nix::unistd::Pid;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use serde_json::Value;
use tokio::io::{AsyncBufRead, AsyncBufReadExt, BufReader};
use tokio::{
io::{AsyncBufRead, AsyncBufReadExt, BufReader},
time::timeout,
};
use tracing::instrument;
use super::ProcedureName;
use crate::context::RpcContext;
use crate::id::{Id, ImageId};
use crate::s9pk::manifest::{PackageId, SYSTEM_PACKAGE_ID};
use crate::util::serde::{Duration as SerdeDuration, IoFormat};
use crate::util::Version;
@@ -66,21 +69,29 @@ pub struct DockerContainer {
#[serde(default)]
pub system: bool,
}
impl DockerContainer {
/// We created a new exec runner, where we are going to be passing the commands for it to run.
/// Idea is that we are going to send it command and get the inputs be filtered back from the manager.
/// Then we could in theory run commands without the cost of running the docker exec which is known to have
/// a dely of > 200ms which is not acceptable.
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn long_running_execute(
&self,
ctx: &RpcContext,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
) -> Result<(LongRunning, RpcClient), Error> {
) -> Result<(LongRunning, UnixRpcClient), Error> {
let container_name = DockerProcedure::container_name(pkg_id, None);
let socket_path =
Path::new("/tmp/embassy/containers").join(format!("{pkg_id}_{pkg_version}"));
if tokio::fs::metadata(&socket_path).await.is_ok() {
tokio::fs::remove_dir_all(&socket_path).await?;
}
tokio::fs::create_dir_all(&socket_path).await?;
let mut cmd = LongRunning::setup_long_running_docker_cmd(
self,
ctx,
@@ -88,20 +99,13 @@ impl DockerContainer {
volumes,
pkg_id,
pkg_version,
&socket_path,
)
.await?;
let mut handle = cmd.spawn().with_kind(crate::ErrorKind::Docker)?;
let client =
if let (Some(stdin), Some(stdout)) = (handle.stdin.take(), handle.stdout.take()) {
RpcClient::new(stdin, stdout)
} else {
return Err(Error::new(
eyre!("No stdin/stdout handle for container init"),
crate::ErrorKind::Incoherent,
));
};
let client = UnixRpcClient::new(socket_path.join("rpc.sock"));
let running_output = NonDetachingJoinHandle::from(tokio::spawn(async move {
if let Err(err) = handle
@@ -114,6 +118,19 @@ impl DockerContainer {
}
}));
{
let socket = socket_path.join("rpc.sock");
if let Err(_err) = timeout(Duration::from_secs(1), async move {
while tokio::fs::metadata(&socket).await.is_err() {
tokio::time::sleep(Duration::from_millis(10)).await;
}
})
.await
{
tracing::error!("Timed out waiting for init to create socket");
}
}
Ok((LongRunning { running_output }, client))
}
}
@@ -195,7 +212,7 @@ impl DockerProcedure {
Ok(())
}
#[instrument(skip(ctx, input))]
#[instrument(skip_all)]
pub async fn execute<I: Serialize, O: DeserializeOwned>(
&self,
ctx: &RpcContext,
@@ -376,7 +393,7 @@ impl DockerProcedure {
)
}
#[instrument(skip(_ctx, input))]
#[instrument(skip_all)]
pub async fn inject<I: Serialize, O: DeserializeOwned>(
&self,
_ctx: &RpcContext,
@@ -531,7 +548,7 @@ impl DockerProcedure {
)
}
#[instrument(skip(ctx, input))]
#[instrument(skip_all)]
pub async fn sandboxed<I: Serialize, O: DeserializeOwned>(
&self,
ctx: &RpcContext,
@@ -651,7 +668,7 @@ impl DockerProcedure {
}
}
pub fn uncontainer_name(name: &str) -> Option<(PackageId<&str>, Option<&str>)> {
pub fn uncontainer_name(name: &str) -> Option<(PackageId, Option<&str>)> {
let (pre_tld, _) = name.split_once('.')?;
if pre_tld.contains('_') {
let (pkg, name) = name.split_once('_')?;
@@ -699,7 +716,7 @@ impl DockerProcedure {
res.push(OsStr::new("--entrypoint").into());
res.push(OsStr::new(&self.entrypoint).into());
if self.system {
res.push(OsString::from(self.image.for_package(SYSTEM_PACKAGE_ID, None)).into());
res.push(OsString::from(self.image.for_package(&*SYSTEM_PACKAGE_ID, None)).into());
} else {
res.push(OsString::from(self.image.for_package(pkg_id, Some(pkg_version))).into());
}
@@ -771,9 +788,10 @@ impl LongRunning {
volumes: &Volumes,
pkg_id: &PackageId,
pkg_version: &Version,
socket_path: &Path,
) -> Result<tokio::process::Command, Error> {
const INIT_EXEC: &str = "/start9/embassy_container_init";
const BIND_LOCATION: &str = "/usr/lib/embassy/container";
const INIT_EXEC: &str = "/start9/bin/embassy_container_init";
const BIND_LOCATION: &str = "/usr/lib/embassy/container/";
tracing::trace!("setup_long_running_docker_cmd");
LongRunning::cleanup_previous_container(ctx, container_name).await?;
@@ -786,7 +804,7 @@ impl LongRunning {
.arg("'{{.Architecture}}'");
if docker.system {
cmd.arg(docker.image.for_package(SYSTEM_PACKAGE_ID, None));
cmd.arg(docker.image.for_package(&*SYSTEM_PACKAGE_ID, None));
} else {
cmd.arg(docker.image.for_package(pkg_id, Some(pkg_version)));
}
@@ -799,7 +817,14 @@ impl LongRunning {
.arg("--network=start9")
.arg(format!("--add-host=embassy:{}", Ipv4Addr::from(HOST_IP)))
.arg("--mount")
.arg(format!("type=bind,src={BIND_LOCATION},dst=/start9"))
.arg(format!(
"type=bind,src={BIND_LOCATION},dst=/start9/bin/,readonly"
))
.arg("--mount")
.arg(format!(
"type=bind,src={input},dst=/start9/sockets/",
input = socket_path.display()
))
.arg("--name")
.arg(&container_name)
.arg(format!("--hostname={}", &container_name))
@@ -831,7 +856,7 @@ impl LongRunning {
}
cmd.arg("--log-driver=journald");
if docker.system {
cmd.arg(docker.image.for_package(SYSTEM_PACKAGE_ID, None));
cmd.arg(docker.image.for_package(&*SYSTEM_PACKAGE_ID, None));
} else {
cmd.arg(docker.image.for_package(pkg_id, Some(pkg_version)));
}

View File

@@ -4,7 +4,7 @@ use std::time::Duration;
use color_eyre::eyre::eyre;
use embassy_container_init::{ProcessGroupId, SignalGroup, SignalGroupParams};
use helpers::RpcClient;
use helpers::UnixRpcClient;
pub use js_engine::JsError;
use js_engine::{JsExecutionEnvironment, PathForVolumeId};
use models::{ErrorKind, VolumeId};
@@ -57,7 +57,7 @@ impl JsProcedure {
Ok(())
}
#[instrument(skip(directory, input, rpc_client))]
#[instrument(skip_all)]
pub async fn execute<I: Serialize, O: DeserializeOwned>(
&self,
directory: &PathBuf,
@@ -68,7 +68,7 @@ impl JsProcedure {
input: Option<I>,
timeout: Option<Duration>,
gid: ProcessGroupId,
rpc_client: Option<Arc<RpcClient>>,
rpc_client: Option<Arc<UnixRpcClient>>,
) -> Result<Result<O, (i32, String)>, Error> {
let cleaner_client = rpc_client.clone();
let cleaner = GeneralGuard::new(move || {
@@ -96,7 +96,7 @@ impl JsProcedure {
)
.await?
.run_action(name, input, self.args.clone());
let output: ErrorValue = match timeout {
let output: Option<ErrorValue> = match timeout {
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action)
.await
.map_err(|_| (JsError::Timeout, "Timed out. Retrying soon...".to_owned()))??,
@@ -111,7 +111,7 @@ impl JsProcedure {
Ok(res)
}
#[instrument(skip(ctx, input))]
#[instrument(skip_all)]
pub async fn sandboxed<I: Serialize, O: DeserializeOwned>(
&self,
ctx: &RpcContext,
@@ -134,7 +134,7 @@ impl JsProcedure {
.await?
.read_only_effects()
.run_action(name, input, self.args.clone());
let output: ErrorValue = match timeout {
let output: Option<ErrorValue> = match timeout {
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action)
.await
.map_err(|_| (JsError::Timeout, "Timed out. Retrying soon...".to_owned()))??,
@@ -149,8 +149,9 @@ impl JsProcedure {
}
fn unwrap_known_error<O: DeserializeOwned>(
error_value: ErrorValue,
error_value: Option<ErrorValue>,
) -> Result<O, (JsError, String)> {
let error_value = error_value.unwrap_or_else(|| ErrorValue::Result(serde_json::Value::Null));
match error_value {
ErrorValue::Error(error) => Err((JsError::Javascript, error)),
ErrorValue::ErrorCode((code, message)) => Err((JsError::Code(code), message)),
@@ -553,6 +554,96 @@ async fn js_action_test_deep_dir_escape() {
.unwrap()
.unwrap();
}
#[tokio::test]
async fn js_action_test_zero_dir() {
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
.canonicalize()
.unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::Action("test-zero-dir".parse().unwrap());
let volumes: Volumes = serde_json::from_value(serde_json::json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap();
let input: Option<serde_json::Value> = None;
let timeout = Some(Duration::from_secs(10));
js_action
.execute::<serde_json::Value, serde_json::Value>(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
ProcessGroupId(0),
None,
)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn js_action_test_read_dir() {
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
.canonicalize()
.unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::Action("test-read-dir".parse().unwrap());
let volumes: Volumes = serde_json::from_value(serde_json::json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap();
let input: Option<serde_json::Value> = None;
let timeout = Some(Duration::from_secs(10));
js_action
.execute::<serde_json::Value, serde_json::Value>(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
ProcessGroupId(0),
None,
)
.await
.unwrap()
.unwrap();
}
#[tokio::test]
async fn js_rsync() {

View File

@@ -2,6 +2,7 @@ use std::collections::BTreeSet;
use std::time::Duration;
use color_eyre::eyre::eyre;
use models::ImageId;
use patch_db::HasModel;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
@@ -9,7 +10,6 @@ use tracing::instrument;
use self::docker::{DockerContainers, DockerProcedure};
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::volume::Volumes;
@@ -40,7 +40,7 @@ impl PackageProcedure {
_ => false,
}
}
#[instrument]
#[instrument(skip_all)]
pub fn validate(
&self,
container: &Option<DockerContainers>,
@@ -58,7 +58,7 @@ impl PackageProcedure {
}
}
#[instrument(skip(ctx, input))]
#[instrument(skip_all)]
pub async fn execute<I: Serialize, O: DeserializeOwned + 'static>(
&self,
ctx: &RpcContext,
@@ -121,7 +121,7 @@ impl PackageProcedure {
}
}
#[instrument(skip(ctx, input))]
#[instrument(skip_all)]
pub async fn sandboxed<I: Serialize, O: DeserializeOwned>(
&self,
container: &Option<DockerContainers>,

View File

@@ -18,7 +18,7 @@ pub async fn properties(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Res
Ok(fetch_properties(ctx, id).await?)
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn fetch_properties(ctx: RpcContext, id: PackageId) -> Result<Value, Error> {
let mut db = ctx.db.handle();

View File

@@ -42,7 +42,7 @@ impl<
> S9pkPacker<'a, W, RLicense, RInstructions, RIcon, RDockerImages, RAssets, RScripts>
{
/// BLOCKING
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn pack(mut self, key: &ed25519_dalek::Keypair) -> Result<(), Error> {
let header_pos = self.writer.stream_position().await?;
if header_pos != 0 {

View File

@@ -76,6 +76,9 @@ pub struct Manifest {
pub dependencies: Dependencies,
#[model]
pub containers: Option<DockerContainers>,
#[serde(default)]
pub replaces: Vec<String>,
}
impl Manifest {

View File

@@ -31,7 +31,7 @@ pub mod reader;
pub const SIG_CONTEXT: &'static [u8] = b"s9pk";
#[command(cli_only, display(display_none))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn pack(#[context] ctx: SdkContext, #[arg] path: Option<PathBuf>) -> Result<(), Error> {
use tokio::fs::File;

View File

@@ -10,6 +10,7 @@ use color_eyre::eyre::eyre;
use digest_old::Output;
use ed25519_dalek::PublicKey;
use futures::TryStreamExt;
use models::ImageId;
use sha2_old::{Digest, Sha512};
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf};
@@ -18,12 +19,14 @@ use tracing::instrument;
use super::header::{FileSection, Header, TableOfContents};
use super::manifest::{Manifest, PackageId};
use super::SIG_CONTEXT;
use crate::id::ImageId;
use crate::install::progress::InstallProgressTracker;
use crate::s9pk::docker::DockerReader;
use crate::util::Version;
use crate::{Error, ResultExt};
const MAX_REPLACES: usize = 10;
const MAX_TITLE_LEN: usize = 30;
#[pin_project::pin_project]
#[derive(Debug)]
pub struct ReadHandle<'a, R = File> {
@@ -88,7 +91,7 @@ pub struct ImageTag {
pub version: Version,
}
impl ImageTag {
#[instrument]
#[instrument(skip_all)]
pub fn validate(&self, id: &PackageId, version: &Version) -> Result<(), Error> {
if id != &self.package_id {
return Err(Error::new(
@@ -165,7 +168,7 @@ impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<InstallProgressT
}
}
impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<R> {
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn validate(&mut self) -> Result<(), Error> {
if self.toc.icon.length > 102_400 {
// 100 KiB
@@ -241,6 +244,25 @@ impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<R> {
));
}
if man.replaces.len() >= MAX_REPLACES {
return Err(Error::new(
eyre!("Cannot have more than {MAX_REPLACES} replaces"),
crate::ErrorKind::ValidateS9pk,
));
}
if let Some(too_big) = man.replaces.iter().find(|x| x.len() >= MAX_REPLACES) {
return Err(Error::new(
eyre!("We have found a replaces of ({too_big}) that exceeds the max length of {MAX_TITLE_LEN} "),
crate::ErrorKind::ValidateS9pk,
));
}
if man.title.len() >= MAX_TITLE_LEN {
return Err(Error::new(
eyre!("Cannot have more than a length of {MAX_TITLE_LEN} for title"),
crate::ErrorKind::ValidateS9pk,
));
}
if man.containers.is_some()
&& matches!(man.main, crate::procedure::PackageProcedure::Docker(_))
{
@@ -264,7 +286,7 @@ impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<R> {
Ok(())
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn image_tags(&mut self) -> Result<Vec<ImageTag>, Error> {
let mut tar = tokio_tar::Archive::new(self.docker_images().await?);
let mut entries = tar.entries()?;
@@ -292,7 +314,7 @@ impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<R> {
crate::ErrorKind::ParseS9pk,
))
}
#[instrument(skip(rdr))]
#[instrument(skip_all)]
pub async fn from_reader(mut rdr: R, check_sig: bool) -> Result<Self, Error> {
let header = Header::deserialize(&mut rdr).await?;

View File

@@ -10,12 +10,13 @@ use patch_db::DbHandle;
use rpc_toolkit::command;
use rpc_toolkit::yajrc::RpcError;
use serde::{Deserialize, Serialize};
use sqlx::{Connection, Executor, Postgres};
use sqlx::Connection;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use torut::onion::OnionAddressV3;
use tracing::instrument;
use crate::account::AccountInfo;
use crate::backup::restore::recover_full_embassy;
use crate::backup::target::BackupTargetFS;
use crate::context::rpc::RpcContextConfig;
@@ -28,25 +29,11 @@ use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::TmpMountGuard;
use crate::disk::util::{pvscan, recovery_info, DiskInfo, EmbassyOsRecoveryInfo};
use crate::disk::REPAIR_DISK_PATH;
use crate::hostname::{get_hostname, HostNameReceipt, Hostname};
use crate::hostname::Hostname;
use crate::init::{init, InitResult};
use crate::middleware::encrypt::EncryptedWire;
use crate::net::ssl::SslManager;
use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip(secrets))]
pub async fn password_hash<Ex>(secrets: &mut Ex) -> Result<String, Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let password = sqlx::query!("SELECT password FROM account")
.fetch_one(secrets)
.await?
.password;
Ok(password)
}
#[command(subcommands(status, disk, attach, execute, cifs, complete, get_pubkey, exit))]
pub fn setup() -> Result<(), Error> {
Ok(())
@@ -73,30 +60,26 @@ async fn setup_init(
let mut secrets_tx = secrets_handle.begin().await?;
let mut db_tx = db_handle.begin().await?;
if let Some(password) = password {
let set_password_receipt = crate::auth::SetPasswordReceipt::new(&mut db_tx).await?;
crate::auth::set_password(
&mut db_tx,
&set_password_receipt,
&mut secrets_tx,
&password,
)
.await?;
}
let mut account = AccountInfo::load(&mut secrets_tx).await?;
let tor_key = crate::net::tor::os_key(&mut secrets_tx).await?;
if let Some(password) = password {
account.set_password(&password)?;
account.save(&mut secrets_tx).await?;
crate::db::DatabaseModel::new()
.server_info()
.password_hash()
.put(&mut db_tx, &account.password)
.await?;
}
db_tx.commit().await?;
secrets_tx.commit().await?;
let hostname_receipts = HostNameReceipt::new(&mut db_handle).await?;
let hostname = get_hostname(&mut db_handle, &hostname_receipts).await?;
let (_, root_ca) = SslManager::init(secret_store, &mut db_handle)
.await?
.export_root_ca()
.await?;
Ok((hostname, tor_key.public().get_onion_address(), root_ca))
Ok((
account.hostname,
account.key.tor_address(),
account.root_ca_cert,
))
}
#[command(rpc_only)]
@@ -320,7 +303,7 @@ pub async fn execute(
Ok(())
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
#[command(rpc_only)]
pub async fn complete(#[context] ctx: SetupContext) -> Result<SetupResult, Error> {
let (guid, setup_result) = if let Some((guid, setup_result)) = &*ctx.setup_result.read().await {
@@ -337,14 +320,14 @@ pub async fn complete(#[context] ctx: SetupContext) -> Result<SetupResult, Error
Ok(setup_result)
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
#[command(rpc_only)]
pub async fn exit(#[context] ctx: SetupContext) -> Result<(), Error> {
ctx.shutdown.send(()).expect("failed to shutdown");
Ok(())
}
#[instrument(skip(ctx, embassy_password, recovery_password))]
#[instrument(skip_all)]
pub async fn execute_inner(
ctx: SetupContext,
embassy_logicalname: PathBuf,
@@ -383,38 +366,21 @@ async fn fresh_setup(
ctx: &SetupContext,
embassy_password: &str,
) -> Result<(Hostname, OnionAddressV3, X509), Error> {
let password = argon2::hash_encoded(
embassy_password.as_bytes(),
&rand::random::<[u8; 16]>()[..],
&argon2::Config::default(),
)
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
let tor_key = TorSecretKeyV3::generate();
let key_vec = tor_key.as_bytes().to_vec();
let account = AccountInfo::new(embassy_password)?;
let sqlite_pool = ctx.secret_store().await?;
sqlx::query!(
"INSERT INTO account (id, password, tor_key) VALUES ($1, $2, $3) ON CONFLICT (id) DO UPDATE SET password = $2, tor_key = $3",
0,
password,
key_vec,
)
.execute(&mut sqlite_pool.acquire().await?)
.await?;
account.save(&sqlite_pool).await?;
sqlite_pool.close().await;
let InitResult { secret_store, db } =
let InitResult { secret_store, .. } =
init(&RpcContextConfig::load(ctx.config_path.clone()).await?).await?;
let mut handle = db.handle();
let receipts = crate::hostname::HostNameReceipt::new(&mut handle).await?;
let hostname = get_hostname(&mut handle, &receipts).await?;
let (_, root_ca) = SslManager::init(secret_store.clone(), &mut handle)
.await?
.export_root_ca()
.await?;
secret_store.close().await;
Ok((hostname, tor_key.public().get_onion_address(), root_ca))
Ok((
account.hostname.clone(),
account.key.tor_address(),
account.root_ca_cert.clone(),
))
}
#[instrument(skip(ctx, embassy_password, recovery_password))]
#[instrument(skip_all)]
async fn recover(
ctx: SetupContext,
guid: Arc<String>,
@@ -433,7 +399,7 @@ async fn recover(
.await
}
#[instrument(skip(ctx, embassy_password))]
#[instrument(skip_all)]
async fn migrate(
ctx: SetupContext,
guid: Arc<String>,
@@ -462,6 +428,7 @@ async fn migrate(
force: true,
ignore_existing: false,
exclude: Vec::new(),
no_permissions: false,
},
)
.await?;
@@ -473,6 +440,7 @@ async fn migrate(
force: true,
ignore_existing: false,
exclude: vec!["tmp".to_owned()],
no_permissions: false,
},
)
.await?;

View File

@@ -1,7 +1,6 @@
use std::path::PathBuf;
use std::sync::Arc;
use patch_db::{LockType, PatchDbHandle};
use rpc_toolkit::command;
use crate::context::RpcContext;
@@ -9,20 +8,25 @@ use crate::disk::main::export;
use crate::init::{STANDBY_MODE_PATH, SYSTEM_REBUILD_PATH};
use crate::sound::SHUTDOWN;
use crate::util::{display_none, Invoke};
use crate::{Error, ErrorKind};
use crate::{Error, ErrorKind, IS_RASPBERRY_PI};
#[derive(Debug, Clone)]
pub struct Shutdown {
pub datadir: PathBuf,
pub disk_guid: Option<Arc<String>>,
pub restart: bool,
pub db_handle: Option<Arc<PatchDbHandle>>,
}
impl Shutdown {
/// BLOCKING
pub fn execute(&self) {
use std::process::Command;
if self.restart {
tracing::info!("Beginning server restart");
} else {
tracing::info!("Beginning server shutdown");
}
let rt = tokio::runtime::Builder::new_current_thread()
.enable_all()
.build()
@@ -54,39 +58,43 @@ impl Shutdown {
tracing::debug!("{:?}", e);
}
}
if self.restart {
if !*IS_RASPBERRY_PI || self.restart {
if let Err(e) = SHUTDOWN.play().await {
tracing::error!("Error Playing Shutdown Song: {}", e);
tracing::debug!("{:?}", e);
}
} else {
tokio::fs::write(STANDBY_MODE_PATH, "").await.unwrap();
Command::new("sync")
.invoke(ErrorKind::Filesystem)
.await
.unwrap();
}
});
drop(rt);
if !self.restart {
std::fs::write(STANDBY_MODE_PATH, "").unwrap();
if *IS_RASPBERRY_PI {
if !self.restart {
std::fs::write(STANDBY_MODE_PATH, "").unwrap();
Command::new("sync").spawn().unwrap().wait().unwrap();
}
Command::new("reboot").spawn().unwrap().wait().unwrap();
} else {
if self.restart {
Command::new("reboot").spawn().unwrap().wait().unwrap();
} else {
Command::new("shutdown")
.arg("-h")
.arg("now")
.spawn()
.unwrap()
.wait()
.unwrap();
}
}
Command::new("reboot").spawn().unwrap().wait().unwrap();
}
}
#[command(display(display_none))]
pub async fn shutdown(#[context] ctx: RpcContext) -> Result<(), Error> {
let mut db = ctx.db.handle();
crate::db::DatabaseModel::new()
.lock(&mut db, LockType::Write)
.await?;
ctx.shutdown
.send(Some(Shutdown {
datadir: ctx.datadir.clone(),
disk_guid: Some(ctx.disk_guid.clone()),
restart: false,
db_handle: Some(Arc::new(db)),
}))
.map_err(|_| ())
.expect("receiver dropped");
@@ -95,16 +103,11 @@ pub async fn shutdown(#[context] ctx: RpcContext) -> Result<(), Error> {
#[command(display(display_none))]
pub async fn restart(#[context] ctx: RpcContext) -> Result<(), Error> {
let mut db = ctx.db.handle();
crate::db::DatabaseModel::new()
.lock(&mut db, LockType::Write)
.await?;
ctx.shutdown
.send(Some(Shutdown {
datadir: ctx.datadir.clone(),
disk_guid: Some(ctx.disk_guid.clone()),
restart: true,
db_handle: Some(Arc::new(db)),
}))
.map_err(|_| ())
.expect("receiver dropped");

View File

@@ -1,6 +1,5 @@
use std::cmp::Ordering;
use std::path::Path;
use std::time::{Duration, Instant};
use std::time::Duration;
use divrem::DivRem;
use proptest_derive::Arbitrary;
@@ -8,130 +7,46 @@ use tokio::process::Command;
use tracing::instrument;
use crate::util::{FileLock, Invoke};
use crate::{Error, ErrorKind, ResultExt};
use crate::{Error, ErrorKind};
lazy_static::lazy_static! {
static ref SEMITONE_K: f64 = 2f64.powf(1f64 / 12f64);
static ref A_4: f64 = 440f64;
static ref C_0: f64 = *A_4 / SEMITONE_K.powf(9f64) / 2f64.powf(4f64);
static ref EXPORT_FILE: &'static Path = Path::new("/sys/class/pwm/pwmchip0/export");
static ref UNEXPORT_FILE: &'static Path = Path::new("/sys/class/pwm/pwmchip0/unexport");
static ref PERIOD_FILE: &'static Path = Path::new("/sys/class/pwm/pwmchip0/pwm0/period");
static ref DUTY_FILE: &'static Path = Path::new("/sys/class/pwm/pwmchip0/pwm0/duty_cycle");
static ref SWITCH_FILE: &'static Path = Path::new("/sys/class/pwm/pwmchip0/pwm0/enable");
}
pub const SOUND_LOCK_FILE: &'static str = "/etc/embassy/sound.lock";
struct SoundInterface {
use_beep: bool,
guard: Option<FileLock>,
}
impl SoundInterface {
#[instrument]
#[instrument(skip_all)]
pub async fn lease() -> Result<Self, Error> {
let guard = FileLock::new(SOUND_LOCK_FILE, true).await?;
if Command::new("which")
.arg("beep")
.invoke(ErrorKind::NotFound)
.await
.is_ok()
{
Ok(SoundInterface {
use_beep: true,
guard: Some(guard),
})
} else {
tokio::fs::write(&*EXPORT_FILE, "0")
.await
.or_else(|e| {
if e.raw_os_error() == Some(16) {
Ok(())
} else {
Err(e)
}
})
.with_ctx(|_| (ErrorKind::SoundError, EXPORT_FILE.to_string_lossy()))?;
let instant = Instant::now();
while tokio::fs::metadata(&*PERIOD_FILE).await.is_err()
&& instant.elapsed() < Duration::from_secs(1)
{
tokio::time::sleep(Duration::from_millis(1)).await;
}
Ok(SoundInterface {
use_beep: false,
guard: Some(guard),
})
}
Ok(SoundInterface { guard: Some(guard) })
}
#[instrument(skip(self))]
async fn play_pwm(&mut self, note: &Note) -> Result<(), Error> {
let curr_period = tokio::fs::read_to_string(&*PERIOD_FILE)
.await
.with_ctx(|_| (ErrorKind::SoundError, PERIOD_FILE.to_string_lossy()))?;
if curr_period == "0\n" {
tokio::fs::write(&*PERIOD_FILE, "1000")
.await
.with_ctx(|_| (ErrorKind::SoundError, PERIOD_FILE.to_string_lossy()))?;
}
let new_period = ((1.0 / note.frequency()) * 1_000_000_000.0).round() as u64;
tokio::fs::write(&*DUTY_FILE, "0")
.await
.with_ctx(|_| (ErrorKind::SoundError, DUTY_FILE.to_string_lossy()))?;
tokio::fs::write(&*PERIOD_FILE, format!("{}", new_period))
.await
.with_ctx(|_| (ErrorKind::SoundError, PERIOD_FILE.to_string_lossy()))?;
tokio::fs::write(&*DUTY_FILE, format!("{}", new_period / 2))
.await
.with_ctx(|_| (ErrorKind::SoundError, DUTY_FILE.to_string_lossy()))?;
tokio::fs::write(&*SWITCH_FILE, "1")
.await
.with_ctx(|_| (ErrorKind::SoundError, SWITCH_FILE.to_string_lossy()))?;
Ok(())
}
#[instrument(skip(self))]
async fn stop_pwm(&mut self) -> Result<(), Error> {
tokio::fs::write(&*SWITCH_FILE, "0")
.await
.with_ctx(|_| (ErrorKind::SoundError, SWITCH_FILE.to_string_lossy()))
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn close(mut self) -> Result<(), Error> {
if let Some(lock) = self.guard.take() {
lock.unlock().await?;
}
Ok(())
}
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn play_for_time_slice(
&mut self,
tempo_qpm: u16,
note: &Note,
time_slice: &TimeSlice,
) -> Result<(), Error> {
if self.use_beep {
Command::new("beep")
.arg("-f")
.arg(note.frequency().to_string())
.arg("-l")
.arg(time_slice.to_duration(tempo_qpm).as_millis().to_string())
.invoke(ErrorKind::SoundError)
.await?;
} else {
if let Err(e) = async {
self.play_pwm(note).await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) * 19 / 20).await;
self.stop_pwm().await?;
tokio::time::sleep(time_slice.to_duration(tempo_qpm) / 20).await;
Ok::<_, Error>(())
}
.await
{
// we could catch this error and propagate but I'd much prefer the original error bubble up
let _mute = self.stop_pwm().await;
return Err(e);
}
}
Command::new("beep")
.arg("-f")
.arg(note.frequency().to_string())
.arg("-l")
.arg(time_slice.to_duration(tempo_qpm).as_millis().to_string())
.invoke(ErrorKind::SoundError)
.await?;
Ok(())
}
}
@@ -144,7 +59,7 @@ impl<'a, T> Song<T>
where
T: IntoIterator<Item = (Option<Note>, TimeSlice)> + Clone,
{
#[instrument(skip(self))]
#[instrument(skip_all)]
pub async fn play(&self) -> Result<(), Error> {
let mut sound = SoundInterface::lease().await?;
for (note, slice) in self.note_sequence.clone() {
@@ -164,15 +79,8 @@ where
impl Drop for SoundInterface {
fn drop(&mut self) {
let use_beep = self.use_beep;
let guard = self.guard.take();
tokio::spawn(async move {
if !use_beep {
if let Err(e) = tokio::fs::write(&*UNEXPORT_FILE, "0").await {
tracing::error!("Failed to Unexport Sound Interface: {}", e);
tracing::debug!("{:?}", e);
}
}
if let Some(guard) = guard {
if let Err(e) = guard.unlock().await {
tracing::error!("Failed to drop Sound Interface File Lock: {}", e);

View File

@@ -4,7 +4,8 @@ use chrono::Utc;
use clap::ArgMatches;
use color_eyre::eyre::eyre;
use rpc_toolkit::command;
use sqlx::{Pool, Postgres};
use sqlx::{Executor, Pool, Postgres};
use ssh_key::private::Ed25519PrivateKey;
use tracing::instrument;
use crate::context::RpcContext;
@@ -56,7 +57,7 @@ pub fn ssh() -> Result<(), Error> {
}
#[command(display(display_none))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn add(#[context] ctx: RpcContext, #[arg] key: PubKey) -> Result<SshKeyResponse, Error> {
let pool = &ctx.secret_store;
// check fingerprint for duplicates
@@ -91,7 +92,7 @@ pub async fn add(#[context] ctx: RpcContext, #[arg] key: PubKey) -> Result<SshKe
}
}
#[command(display(display_none))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn delete(#[context] ctx: RpcContext, #[arg] fingerprint: String) -> Result<(), Error> {
let pool = &ctx.secret_store;
// check if fingerprint is in DB
@@ -141,7 +142,7 @@ fn display_all_ssh_keys(all: Vec<SshKeyResponse>, matches: &ArgMatches) {
}
#[command(display(display_all_ssh_keys))]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn list(
#[context] ctx: RpcContext,
#[allow(unused_variables)]
@@ -171,7 +172,7 @@ pub async fn list(
.collect())
}
#[instrument(skip(pool, dest))]
#[instrument(skip_all)]
pub async fn sync_keys_from_db<P: AsRef<Path>>(
pool: &Pool<Postgres>,
dest: P,

View File

@@ -2,11 +2,11 @@ use std::collections::{BTreeMap, BTreeSet};
use chrono::{DateTime, Utc};
pub use models::HealthCheckId;
use models::ImageId;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::docker::DockerContainers;
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
@@ -18,7 +18,7 @@ use crate::{Error, ResultExt};
#[derive(Clone, Debug, Deserialize, Serialize)]
pub struct HealthChecks(pub BTreeMap<HealthCheckId, HealthCheck>);
impl HealthChecks {
#[instrument]
#[instrument(skip_all)]
pub fn validate(
&self,
container: &Option<DockerContainers>,
@@ -71,7 +71,7 @@ pub struct HealthCheck {
pub timeout: Option<Duration>,
}
impl HealthCheck {
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn check(
&self,
ctx: &RpcContext,

View File

@@ -1,5 +1,6 @@
use std::fmt;
use chrono::Utc;
use color_eyre::eyre::eyre;
use futures::FutureExt;
use rpc_toolkit::command;
@@ -22,6 +23,11 @@ use crate::{Error, ErrorKind, ResultExt};
pub const SYSTEMD_UNIT: &'static str = "embassyd";
#[command]
pub async fn time() -> Result<String, Error> {
Ok(Utc::now().to_rfc3339())
}
#[command(
custom_cli(cli_logs(async, context(CliContext))),
subcommands(self(logs_nofollow(async)), logs_follow),
@@ -504,7 +510,7 @@ async fn launch_disk_task(
}
}
#[instrument]
#[instrument(skip_all)]
async fn get_temp() -> Result<Celsius, Error> {
let temp_file = "/sys/class/thermal/thermal_zone0/temp";
let milli = tokio::fs::read_to_string(temp_file)
@@ -544,7 +550,7 @@ impl ProcStat {
}
}
#[instrument]
#[instrument(skip_all)]
async fn get_proc_stat() -> Result<ProcStat, Error> {
use tokio::io::AsyncBufReadExt;
let mut cpu_line = String::new();
@@ -586,7 +592,7 @@ async fn get_proc_stat() -> Result<ProcStat, Error> {
}
}
#[instrument]
#[instrument(skip_all)]
async fn get_cpu_info(last: &mut ProcStat) -> Result<MetricsCpu, Error> {
let new = get_proc_stat().await?;
let total_old = last.total();
@@ -613,7 +619,7 @@ pub struct MemInfo {
swap_total: Option<u64>,
swap_free: Option<u64>,
}
#[instrument]
#[instrument(skip_all)]
async fn get_mem_info() -> Result<MetricsMemory, Error> {
let contents = tokio::fs::read_to_string("/proc/meminfo").await?;
let mut mem_info = MemInfo {
@@ -687,7 +693,7 @@ async fn get_mem_info() -> Result<MetricsMemory, Error> {
})
}
#[instrument]
#[instrument(skip_all)]
async fn get_disk_info() -> Result<MetricsDisk, Error> {
let package_used_task = get_used("/embassy-data/package-data");
let package_available_task = get_available("/embassy-data/package-data");

View File

@@ -41,7 +41,7 @@ lazy_static! {
display(display_update_result),
metadata(sync_db = true)
)]
#[instrument(skip(ctx))]
#[instrument(skip_all)]
pub async fn update_system(
#[context] ctx: RpcContext,
#[arg(rename = "marketplace-url")] marketplace_url: Url,
@@ -75,7 +75,7 @@ fn display_update_result(status: UpdateResult, _: &ArgMatches) {
}
}
#[instrument(skip(ctx))]
#[instrument(skip_all)]
async fn maybe_do_update(
ctx: RpcContext,
marketplace_url: Url,
@@ -194,11 +194,11 @@ async fn maybe_do_update(
Ok(rev)
}
#[instrument(skip(ctx, eos_url))]
#[instrument(skip_all)]
async fn do_update(ctx: RpcContext, eos_url: EosUrl) -> Result<(), Error> {
let mut rsync = Rsync::new(
eos_url.rsync_path()?,
"/media/embassy/next",
"/media/embassy/next/",
Default::default(),
)
.await?;
@@ -303,8 +303,9 @@ async fn sync_boot() -> Result<(), Error> {
RsyncOptions {
delete: false,
force: false,
ignore_existing: true,
ignore_existing: false,
exclude: Vec::new(),
no_permissions: false,
},
)
.await?
@@ -324,15 +325,15 @@ async fn sync_boot() -> Result<(), Error> {
.arg("update-grub")
.invoke(ErrorKind::MigrationFailed)
.await?;
boot_mnt.unmount().await?;
proc_mnt.unmount().await?;
sys_mnt.unmount().await?;
dev_mnt.unmount().await?;
boot_mnt.unmount(false).await?;
proc_mnt.unmount(false).await?;
sys_mnt.unmount(false).await?;
dev_mnt.unmount(false).await?;
}
Ok(())
}
#[instrument]
#[instrument(skip_all)]
async fn swap_boot_label() -> Result<(), Error> {
tokio::fs::write("/media/embassy/config/upgrade", b"").await?;
Ok(())

Some files were not shown because too many files have changed in this diff Show More