Merge branch 'next' of github.com:Start9Labs/start-os into rebase/integration/refactors

This commit is contained in:
Aiden McClelland
2023-10-18 17:55:09 -06:00
174 changed files with 5736 additions and 4682 deletions

21
LICENSE Normal file
View File

@@ -0,0 +1,21 @@
MIT License
Copyright (c) 2023 Start9 Labs, Inc.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@@ -1,42 +0,0 @@
# START9 NON-COMMERCIAL LICENSE v1
Version 1, 22 September 2022
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
### 1.Definitions
"License" means version 1 of the Start9 Non-Commercial License.
"Licensor" means the Start9 Labs, Inc, or its successor(s) in interest, or a future assignee of the copyright.
"You" (or "Your") means an individual or organization exercising permissions granted by this License.
"Source Code" for a work means the preferred form of the work for making modifications to it.
"Object Code" means any non-source form of a work, including the machine-language output by a compiler or assembler.
"Work" means any work of authorship, whether in Source or Object form, made available under this License.
"Derivative Work" means any work, whether in Source or Object form, that is based on (or derived from) the Work.
"Distribute" means to convey or to publish and generally has the same meaning here as under U.S. Copyright law.
"Sell" means practicing any or all of the rights granted to you under the License to provide to third parties, for a fee or other consideration (including, without limitation, fees for hosting, consulting, or support services), a product or service whose value derives, entirely or substantially, from the functionality of the Work or Derivative Work.
### 2. Grant of Rights
Subject to the terms of this license, the Licensor grants you, the licensee, a non-exclusive, worldwide, royalty-free copyright license to access, audit, copy, modify, compile, run, test, distribute, or otherwise use the Software.
### 3. Limitations
1. The grant of rights under the License does NOT include, and the License does NOT grant You the right to Sell the Work or Derivative Work.
2. If you Distribute the Work or Derivative Work, you expressly undertake not to remove or modify, in any manner, the copyright notices attached to the Work or displayed in any output of the Work when run, and to reproduce these notices, in an identical manner, in any distributed copies of the Work or Derivative Work together with a copy of this License.
3. If you Distribute a Derivative Work, it must carry prominent notices stating that it has been modified from the Work, providing a relevant date.
### 4. Contributions
You hereby grant to Licensor a perpetual, irrevocable, worldwide, non-exclusive, royalty-free license to use and exploit any Derivative Work of which you are the author.
### 5. Disclaimer
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. LICENSOR HAS NO OBLIGATION TO SUPPORT RECIPIENTS OF THE SOFTWARE.

View File

@@ -19,7 +19,7 @@ FRONTEND_INSTALL_WIZARD_SRC := $(shell find frontend/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell find patch-db/client -not -path patch-db/client/dist -and -not -path patch-db/client/node_modules) PATCH_DB_CLIENT_SRC := $(shell find patch-db/client -not -path patch-db/client/dist -and -not -path patch-db/client/node_modules)
GZIP_BIN := $(shell which pigz || which gzip) GZIP_BIN := $(shell which pigz || which gzip)
TAR_BIN := $(shell which gtar || which tar) TAR_BIN := $(shell which gtar || which tar)
ALL_TARGETS := $(EMBASSY_BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar $(EMBASSY_SRC) $(shell if [ "$(OS_ARCH)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep; fi) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) ALL_TARGETS := $(EMBASSY_BINS) system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar $(EMBASSY_SRC) $(shell if [ "$(OS_ARCH)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console; fi') $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE)
ifeq ($(REMOTE),) ifeq ($(REMOTE),)
mkdir = mkdir -p $1 mkdir = mkdir -p $1
@@ -75,7 +75,7 @@ format:
sdk: sdk:
cd backend/ && ./install-sdk.sh cd backend/ && ./install-sdk.sh
startos_raspberrypi.img: $(BUILD_SRC) startos.raspberrypi.squashfs $(VERSION_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep | sudo startos_raspberrypi.img: $(BUILD_SRC) startos.raspberrypi.squashfs $(VERSION_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) | sudo
./build/raspberrypi/make-image.sh ./build/raspberrypi/make-image.sh
# For creating os images. DO NOT USE # For creating os images. DO NOT USE
@@ -85,9 +85,11 @@ install: $(ALL_TARGETS)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-deno)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/avahi-alias) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/avahi-alias)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/embassy-cli) $(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/embassy-cli)
if [ "$(OS_ARCH)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi if [ "$(OS_ARCH)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then $(call cp,cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); fi
$(call mkdir,$(DESTDIR)/usr/lib) $(call mkdir,$(DESTDIR)/usr/lib)
$(call rm,$(DESTDIR)/usr/lib/embassy) $(call rm,$(DESTDIR)/usr/lib/embassy)
@@ -122,7 +124,7 @@ update:
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/") $(call ssh,"sudo rsync -a --delete --force --info=progress2 /media/embassy/embassyfs/current/ /media/embassy/next/")
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH) $(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH)
$(call ssh,"sudo touch /media/embassy/config/upgrade && sudo sync && sudo reboot") $(call ssh,'sudo NO_SYNC=1 /media/embassy/next/usr/lib/embassy/scripts/chroot-and-upgrade "apt-get install -y $(shell cat ./build/lib/depends)"')
emulate-reflash: emulate-reflash:
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi @if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
@@ -130,14 +132,14 @@ emulate-reflash:
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH) $(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/embassy/next OS_ARCH=$(OS_ARCH)
$(call ssh,"sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot") $(call ssh,"sudo touch /media/embassy/config/upgrade && sudo rm -f /media/embassy/config/disk.guid && sudo sync && sudo reboot")
system-images/compat/docker-images/aarch64.tar system-images/compat/docker-images/x86_64.tar: $(COMPAT_SRC) | sudo system-images/compat/docker-images/aarch64.tar system-images/compat/docker-images/x86_64.tar: $(COMPAT_SRC) backend/Cargo.lock | sudo
cd system-images/compat && make cd system-images/compat && make && touch docker-images/*.tar
system-images/utils/docker-images/aarch64.tar system-images/utils/docker-images/x86_64.tar: $(UTILS_SRC) | sudo system-images/utils/docker-images/aarch64.tar system-images/utils/docker-images/x86_64.tar: $(UTILS_SRC) | sudo
cd system-images/utils && make cd system-images/utils && make && touch docker-images/*.tar
system-images/binfmt/docker-images/aarch64.tar system-images/binfmt/docker-images/x86_64.tar: $(BINFMT_SRC) | sudo system-images/binfmt/docker-images/aarch64.tar system-images/binfmt/docker-images/x86_64.tar: $(BINFMT_SRC) | sudo
cd system-images/binfmt && make cd system-images/binfmt && make && touch docker-images/*.tar
snapshots: libs/snapshot_creator/Cargo.toml snapshots: libs/snapshot_creator/Cargo.toml
cd libs/ && ./build-v8-snapshot.sh cd libs/ && ./build-v8-snapshot.sh
@@ -198,3 +200,6 @@ backend: $(EMBASSY_BINS)
cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep: | sudo cargo-deps/aarch64-unknown-linux-gnu/release/pi-beep: | sudo
ARCH=aarch64 ./build-cargo-dep.sh pi-beep ARCH=aarch64 ./build-cargo-dep.sh pi-beep
cargo-deps/$(ARCH)-unknown-linux-gnu/release/tokio-console: | sudo
ARCH=$(ARCH) ./build-cargo-dep.sh tokio-console

View File

@@ -1,32 +1,35 @@
<div align="center"> <div align="center">
<img src="frontend/projects/shared/assets/img/icon_pwa.png" alt="StartOS Logo" width="16%" /> <img src="frontend/projects/shared/assets/img/icon.png" alt="StartOS Logo" width="16%" />
<h1 style="margin-top: 0;">StartOS</h1> <h1 style="margin-top: 0;">StartOS</h1>
<a href="https://github.com/Start9Labs/start-os/releases"> <a href="https://github.com/Start9Labs/start-os/releases">
<img src="https://img.shields.io/github/v/tag/Start9Labs/start-os?color=success" /> <img alt="GitHub release (with filter)" src="https://img.shields.io/github/v/release/start9labs/start-os?logo=github">
</a> </a>
<a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml"> <a href="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml">
<img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg"> <img src="https://github.com/Start9Labs/start-os/actions/workflows/startos-iso.yaml/badge.svg">
</a> </a>
<a href="https://twitter.com/start9labs"> <a href="https://heyapollo.com/product/startos">
<img src="https://img.shields.io/twitter/follow/start9labs?label=Follow"> <img alt="Static Badge" src="https://img.shields.io/badge/apollo-review%20%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%E2%AD%90%20-slateblue">
</a> </a>
<a href="http://mastodon.start9labs.com"> <a href="https://twitter.com/start9labs">
<img alt="X (formerly Twitter) Follow" src="https://img.shields.io/twitter/follow/start9labs">
</a>
<a href="https://mastodon.start9labs.com">
<img src="https://img.shields.io/mastodon/follow/000000001?domain=https%3A%2F%2Fmastodon.start9labs.com&label=Follow&style=social"> <img src="https://img.shields.io/mastodon/follow/000000001?domain=https%3A%2F%2Fmastodon.start9labs.com&label=Follow&style=social">
</a> </a>
<a href="https://matrix.to/#/#community:matrix.start9labs.com"> <a href="https://matrix.to/#/#community:matrix.start9labs.com">
<img src="https://img.shields.io/badge/community-matrix-yellow"> <img alt="Static Badge" src="https://img.shields.io/badge/community-matrix-yellow?logo=matrix">
</a> </a>
<a href="https://t.me/start9_labs"> <a href="https://t.me/start9_labs">
<img src="https://img.shields.io/badge/community-telegram-informational"> <img alt="Static Badge" src="https://img.shields.io/badge/community-telegram-blue?logo=telegram">
</a> </a>
<a href="https://docs.start9.com"> <a href="https://docs.start9.com">
<img src="https://img.shields.io/badge/support-docs-important"> <img alt="Static Badge" src="https://img.shields.io/badge/docs-orange?label=%F0%9F%91%A4%20support">
</a> </a>
<a href="https://matrix.to/#/#community-dev:matrix.start9labs.com"> <a href="https://matrix.to/#/#community-dev:matrix.start9labs.com">
<img src="https://img.shields.io/badge/developer-matrix-blueviolet"> <img alt="Static Badge" src="https://img.shields.io/badge/developer-matrix-darkcyan?logo=matrix">
</a> </a>
<a href="https://start9.com"> <a href="https://start9.com">
<img src="https://img.shields.io/website?down_color=lightgrey&down_message=offline&up_color=green&up_message=online&url=https%3A%2F%2Fstart9.com"> <img alt="Website" src="https://img.shields.io/website?up_message=online&down_message=offline&url=https%3A%2F%2Fstart9.com&logo=website&label=%F0%9F%8C%90%20website">
</a> </a>
</div> </div>
<br /> <br />
@@ -35,7 +38,7 @@
Welcome to the era of Sovereign Computing Welcome to the era of Sovereign Computing
</h3> </h3>
<p> <p>
StartOS is a Debian-based Linux distro optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services. StartOS is an open source Linux distribution optimized for running a personal server. It facilitates the discovery, installation, network configuration, service configuration, data backup, dependency management, and health monitoring of self-hosted software services.
</p> </p>
</div> </div>
<br /> <br />
@@ -65,7 +68,7 @@ There are multiple ways to contribute: work directly on StartOS, package a servi
To report security issues, please email our security team - security@start9.com. To report security issues, please email our security team - security@start9.com.
## 🌎 Marketplace ## 🌎 Marketplace
There are dozens of service available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/) There are dozens of services available for StartOS, and new ones are being added all the time. Check out the full list of available services [here](https://marketplace.start9.com/marketplace). To read more about the Marketplace ecosystem, check out this [blog post](https://blog.start9.com/start9-marketplace-strategy/)
## 🖥️ User Interface Screenshots ## 🖥️ User Interface Screenshots

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": []
},
"hash": "1ce5254f27de971fd87f5ab66d300f2b22433c86617a0dbf796bf2170186dd2e"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930"
}

View File

@@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "path",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "username",
"type_info": "Text"
},
{
"ordinal": 3,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false,
false,
false,
true
]
},
"hash": "28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec"
}

View File

@@ -0,0 +1,34 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "fingerprint",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "openssh_pubkey",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false
]
},
"hash": "4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997"
}

View File

@@ -0,0 +1,50 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "logged_in",
"type_info": "Timestamp"
},
{
"ordinal": 2,
"name": "logged_out",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "last_active",
"type_info": "Timestamp"
},
{
"ordinal": 4,
"name": "user_agent",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "metadata",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
true,
false,
true,
false
]
},
"hash": "4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "4bcfbefb1eb3181343871a1cd7fc3afb81c2be5c681cfa8b4be0ce70610e9c3a"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT password FROM account",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a"
}

View File

@@ -0,0 +1,23 @@
{
"db_name": "PostgreSQL",
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text",
"Text"
]
},
"nullable": [
false
]
},
"hash": "687688055e63d27123cdc89a5bbbd8361776290a9411d527eaf1fdb40bef399d"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca"
}

View File

@@ -0,0 +1,24 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": [
false
]
},
"hash": "770c1017734720453dc87b58c385b987c5af5807151ff71a59000014586752e0"
}

View File

@@ -0,0 +1,65 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "package_id",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "code",
"type_info": "Int4"
},
{
"ordinal": 4,
"name": "level",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "title",
"type_info": "Text"
},
{
"ordinal": 6,
"name": "message",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "data",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int4",
"Int8"
]
},
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
]
},
"hash": "7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210"
}

View File

@@ -0,0 +1,19 @@
{
"db_name": "PostgreSQL",
"query": "\n INSERT INTO account (\n id,\n server_id,\n hostname,\n password,\n network_key,\n root_ca_key_pem,\n root_ca_cert_pem\n ) VALUES (\n 0, $1, $2, $3, $4, $5, $6\n ) ON CONFLICT (id) DO UPDATE SET\n server_id = EXCLUDED.server_id,\n hostname = EXCLUDED.hostname,\n password = EXCLUDED.password,\n network_key = EXCLUDED.network_key,\n root_ca_key_pem = EXCLUDED.root_ca_key_pem,\n root_ca_cert_pem = EXCLUDED.root_ca_cert_pem\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Bytea",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "7c7a3549c997eb75bf964ea65fbb98a73045adf618696cd838d79203ef5383fb"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM tor WHERE package = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text"
]
},
"nullable": []
},
"hash": "7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576"
}

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
},
"nullable": []
},
"hash": "8951b9126fbf60dbb5997241e11e3526b70bccf3e407327917294a993bc17ed5"
}

View File

@@ -0,0 +1,64 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT $1",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "package_id",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Timestamp"
},
{
"ordinal": 3,
"name": "code",
"type_info": "Int4"
},
{
"ordinal": 4,
"name": "level",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "title",
"type_info": "Text"
},
{
"ordinal": 6,
"name": "message",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "data",
"type_info": "Text"
}
],
"parameters": {
"Left": [
"Int8"
]
},
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
]
},
"hash": "94d471bb374b4965c6cbedf8c17bbf6bea226d38efaf6559923c79a36d5ca08c"
}

View File

@@ -0,0 +1,44 @@
{
"db_name": "PostgreSQL",
"query": "SELECT id, hostname, path, username, password FROM cifs_shares",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "path",
"type_info": "Text"
},
{
"ordinal": 3,
"name": "username",
"type_info": "Text"
},
{
"ordinal": 4,
"name": "password",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
false,
false,
true
]
},
"hash": "95c4ab4c645f3302568c6ff13d85ab58252362694cf0f56999bf60194d20583a"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM cifs_shares WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": []
},
"hash": "a60d6e66719325b08dc4ecfacaf337527233c84eee758ac9be967906e5841d27"
}

View File

@@ -0,0 +1,32 @@
{
"db_name": "PostgreSQL",
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "fingerprint",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "openssh_pubkey",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "created_at",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
false
]
},
"hash": "a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e"
}

View File

@@ -0,0 +1,18 @@
{
"db_name": "PostgreSQL",
"query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text",
"Int4"
]
},
"nullable": []
},
"hash": "b1147beaaabbed89f2ab8c1e13ec4393a9a8fde2833cf096af766a979d94dee6"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT openssh_pubkey FROM ssh_keys",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "openssh_pubkey",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca"
}

View File

@@ -0,0 +1,19 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Int4",
"Text",
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "da71f94b29798d1738d2b10b9a721ea72db8cfb362e7181c8226d9297507c62b"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM notifications WHERE id = $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": []
},
"hash": "e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT tor_key FROM account WHERE id = 0",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "tor_key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": []
},
"nullable": [
true
]
},
"hash": "e545696735f202f9d13cf22a561f3ff3f9aed7f90027a9ba97634bcb47d772f0"
}

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d"
}

View File

@@ -0,0 +1,40 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT\n network_keys.package,\n network_keys.interface,\n network_keys.key,\n tor.key AS \"tor_key?\"\n FROM\n network_keys\n LEFT JOIN\n tor\n ON\n network_keys.package = tor.package\n AND\n network_keys.interface = tor.interface\n WHERE\n network_keys.package = $1\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "package",
"type_info": "Text"
},
{
"ordinal": 1,
"name": "interface",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "key",
"type_info": "Bytea"
},
{
"ordinal": 3,
"name": "tor_key?",
"type_info": "Bytea"
}
],
"parameters": {
"Left": [
"Text"
]
},
"nullable": [
false,
false,
false,
false
]
},
"hash": "e95322a8e2ae3b93f1e974b24c0b81803f1e9ec9e8ebbf15cafddfc1c5a028ed"
}

View File

@@ -0,0 +1,14 @@
{
"db_name": "PostgreSQL",
"query": "DELETE FROM notifications WHERE id < $1",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": []
},
"hash": "eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a"
}

View File

@@ -0,0 +1,25 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text"
]
},
"nullable": [
false
]
},
"hash": "ecc765d8205c0876956f95f76944ac6a5f34dd820c4073b7728c7067aab9fded"
}

View File

@@ -0,0 +1,16 @@
{
"db_name": "PostgreSQL",
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
},
"nullable": []
},
"hash": "f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556"
}

View File

@@ -0,0 +1,20 @@
{
"db_name": "PostgreSQL",
"query": "SELECT network_key FROM account WHERE id = 0",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "network_key",
"type_info": "Bytea"
}
],
"parameters": {
"Left": []
},
"nullable": [
false
]
},
"hash": "f7d2dae84613bcef330f7403352cc96547f3f6dbec11bf2eadfaf53ad8ab51b5"
}

View File

@@ -0,0 +1,62 @@
{
"db_name": "PostgreSQL",
"query": "SELECT * FROM account WHERE id = 0",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "password",
"type_info": "Text"
},
{
"ordinal": 2,
"name": "tor_key",
"type_info": "Bytea"
},
{
"ordinal": 3,
"name": "server_id",
"type_info": "Text"
},
{
"ordinal": 4,
"name": "hostname",
"type_info": "Text"
},
{
"ordinal": 5,
"name": "network_key",
"type_info": "Bytea"
},
{
"ordinal": 6,
"name": "root_ca_key_pem",
"type_info": "Text"
},
{
"ordinal": 7,
"name": "root_ca_cert_pem",
"type_info": "Text"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
true,
true,
true,
false,
false,
false
]
},
"hash": "fe6e4f09f3028e5b6b6259e86cbad285680ce157aae9d7837ac020c8b2945e7f"
}

2032
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -14,7 +14,7 @@ keywords = [
name = "start-os" name = "start-os"
readme = "README.md" readme = "README.md"
repository = "https://github.com/Start9Labs/start-os" repository = "https://github.com/Start9Labs/start-os"
version = "0.3.4-rev.4" version = "0.3.5"
[lib] [lib]
name = "startos" name = "startos"
@@ -26,145 +26,150 @@ path = "src/main.rs"
[features] [features]
avahi = ["avahi-sys"] avahi = ["avahi-sys"]
default = ["cli", "sdk", "daemon", "js_engine"]
dev = []
unstable = ["patch-db/unstable"]
avahi-alias = ["avahi"] avahi-alias = ["avahi"]
cli = [] cli = []
sdk = []
daemon = [] daemon = []
default = ["cli", "sdk", "daemon", "js_engine"]
dev = []
podman = [] podman = []
sdk = []
unstable = ["console-subscriber", "tokio/tracing"]
[dependencies] [dependencies]
aes = { version = "0.7.5", features = ["ctr"] } aes = { version = "0.7.5", features = ["ctr"] }
async-compression = { version = "0.3.15", features = [ async-compression = { version = "0.4.4", features = [
"gzip", "gzip",
"brotli", "brotli",
"tokio", "tokio",
] } ] }
async-stream = "0.3.3" async-stream = "0.3.5"
async-trait = "0.1.56" async-trait = "0.1.74"
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [ avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [
"dynamic", "dynamic",
], optional = true } ], optional = true }
base32 = "0.4.0" base32 = "0.4.0"
base64 = "0.13.0" base64 = "0.21.4"
base64ct = "1.5.1" base64ct = "1.6.0"
basic-cookies = "0.1.4" basic-cookies = "0.1.4"
bimap = { version = "0.6.2", features = ["serde"] } bimap = { version = "0.6.2", features = ["serde"] }
bytes = "1" bytes = "1"
chrono = { version = "0.4.19", features = ["serde"] } chrono = { version = "0.4.31", features = ["serde"] }
clap = "3.2.8" clap = "3.2.25"
color-eyre = "0.6.1" color-eyre = "0.6.2"
cookie = "0.16.2" console = "0.15.7"
cookie_store = "0.19.0" console-subscriber = { version = "0.2", optional = true }
cookie = "0.18.0"
cookie_store = "0.20.0"
current_platform = "0.2.0" current_platform = "0.2.0"
digest = "0.10.3" digest = "0.10.7"
digest-old = { package = "digest", version = "0.9.0" }
divrem = "1.0.0" divrem = "1.0.0"
ed25519 = { version = "1.5.2", features = ["pkcs8", "pem", "alloc"] } ed25519 = { version = "2.2.3", features = ["pkcs8", "pem", "alloc"] }
ed25519-dalek = { version = "1.0.1", features = ["serde"] } ed25519-dalek = { version = "2.0.0", features = [
"serde",
"hazmat",
"zeroize",
"rand_core",
"digest",
] }
embassy_container_init = { path = "../libs/embassy_container_init" }
emver = { version = "0.1.7", git = "https://github.com/Start9Labs/emver-rs.git", features = [ emver = { version = "0.1.7", git = "https://github.com/Start9Labs/emver-rs.git", features = [
"serde", "serde",
] } ] }
fd-lock-rs = "0.1.4" fd-lock-rs = "0.1.4"
futures = "0.3.21" futures = "0.3.28"
git-version = "0.3.5" git-version = "0.3.5"
gpt = "3.0.0" gpt = "3.1.0"
helpers = { path = "../libs/helpers" } helpers = { path = "../libs/helpers" }
embassy_container_init = { path = "../libs/embassy_container_init" }
hex = "0.4.3" hex = "0.4.3"
hmac = "0.12.1" hmac = "0.12.1"
http = "0.2.8" http = "0.2.9"
hyper = { version = "0.14.20", features = ["full"] } hyper = { version = "0.14.27", features = ["full"] }
hyper-ws-listener = "0.2.0" hyper-ws-listener = "0.3.0"
id-pool = { version = "0.2.2", features = [ id-pool = { version = "0.2.2", features = [
"u16", "u16",
"serde", "serde",
], default-features = false } ], default-features = false }
imbl = "2.0.0" imbl = "2.0.2"
imbl-value = { git = "https://github.com/Start9Labs/imbl-value.git" } imbl-value = { git = "https://github.com/Start9Labs/imbl-value.git" }
include_dir = "0.7.3" include_dir = "0.7.3"
indexmap = { version = "1.9.1", features = ["serde"] } indexmap = { version = "2.0.2", features = ["serde"] }
ipnet = { version = "2.7.1", features = ["serde"] } indicatif = { version = "0.17.7", features = ["tokio"] }
ipnet = { version = "2.8.0", features = ["serde"] }
iprange = { version = "0.6.7", features = ["serde"] } iprange = { version = "0.6.7", features = ["serde"] }
isocountry = "0.3.2" isocountry = "0.3.2"
itertools = "0.10.3" itertools = "0.11.0"
jaq-core = "0.10.0" jaq-core = "0.10.1"
jaq-std = "0.10.0" jaq-std = "0.10.0"
josekit = "0.8.1" josekit = "0.8.4"
js_engine = { path = '../libs/js_engine', optional = true } js_engine = { path = '../libs/js_engine', optional = true }
jsonpath_lib = { git = "https://github.com/Start9Labs/jsonpath.git" } jsonpath_lib = { git = "https://github.com/Start9Labs/jsonpath.git" }
lazy_static = "1.4.0" lazy_static = "1.4.0"
libc = "0.2.126" libc = "0.2.149"
log = "0.4.17" log = "0.4.20"
mbrman = "0.5.0" mbrman = "0.5.2"
models = { version = "*", path = "../libs/models" } models = { version = "*", path = "../libs/models" }
new_mime_guess = "4" new_mime_guess = "4"
nix = "0.25.0" nix = { version = "0.27.1", features = ["user", "process", "signal", "fs"] }
nom = "7.1.1" nom = "7.1.3"
num = "0.4.0" num = "0.4.1"
num_enum = "0.5.7" num_enum = "0.7.0"
openssh-keys = "0.5.0" openssh-keys = "0.6.2"
openssl = { version = "0.10.41", features = ["vendored"] } openssl = { version = "0.10.57", features = ["vendored"] }
p256 = { version = "0.13.2", features = ["pem"] }
patch-db = { version = "*", path = "../patch-db/patch-db", features = [ patch-db = { version = "*", path = "../patch-db/patch-db", features = [
"trace", "trace",
] } ] }
p256 = { version = "0.12.0", features = ["pem"] } pbkdf2 = "0.12.2"
pbkdf2 = "0.11.0" pin-project = "1.1.3"
pin-project = "1.0.11" pkcs8 = { version = "0.10.2", features = ["std"] }
pkcs8 = { version = "0.9.0", features = ["std"] }
prettytable-rs = "0.10.0" prettytable-rs = "0.10.0"
proptest = "1.0.0" proptest = "1.3.1"
proptest-derive = "0.3.0" proptest-derive = "0.4.0"
rand = { version = "0.8.5", features = ["std"] } rand = { version = "0.8.5", features = ["std"] }
rand-old = { package = "rand", version = "0.7.3" } regex = "1.10.2"
regex = "1.6.0" reqwest = { version = "0.11.22", features = ["stream", "json", "socks"] }
reqwest = { version = "0.11.11", features = ["stream", "json", "socks"] } reqwest_cookie_store = "0.6.0"
reqwest_cookie_store = "0.5.0" rpassword = "7.2.0"
rpassword = "7.0.0"
rpc-toolkit = "0.2.2" rpc-toolkit = "0.2.2"
rust-argon2 = "1.0.0" rust-argon2 = "2.0.0"
scopeguard = "1.1" # because avahi-sys fucks your shit up scopeguard = "1.1" # because avahi-sys fucks your shit up
serde = { version = "1.0.139", features = ["derive", "rc"] } serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.0" } serde_cbor = { package = "ciborium", version = "0.2.1" }
serde_json = "1.0.93" serde_json = "1.0"
serde_toml = { package = "toml", version = "0.5.9" } serde_toml = { package = "toml", version = "0.8.2" }
serde_with = { version = "2.0.1", features = ["macros", "json"] } serde_with = { version = "3.4.0", features = ["macros", "json"] }
serde_yaml = "0.9.11" serde_yaml = "0.9.25"
sha2 = "0.10.2" sha2 = "0.10.2"
sha2-old = { package = "sha2", version = "0.9.9" }
simple-logging = "2.0.2" simple-logging = "2.0.2"
sqlx = { version = "0.6.0", features = [ sqlx = { version = "0.7.2", features = [
"chrono", "chrono",
"offline",
"runtime-tokio-rustls", "runtime-tokio-rustls",
"postgres", "postgres",
] } ] }
ssh-key = { version = "0.5.1", features = ["ed25519"] } sscanf = "0.4.1"
stderrlog = "0.5.3" ssh-key = { version = "0.6.2", features = ["ed25519"] }
tar = "0.4.38" stderrlog = "0.5.4"
thiserror = "1.0.31" tar = "0.4.40"
tokio = { version = "1.23", features = ["full"] } thiserror = "1.0.49"
tokio-stream = { version = "0.1.11", features = ["io-util", "sync", "net"] } tokio = { version = "1", features = ["full"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" } tokio-rustls = "0.24.1"
tokio-tungstenite = { version = "0.17.1", features = ["native-tls"] }
tokio-rustls = "0.23.4"
tokio-socks = "0.5.1" tokio-socks = "0.5.1"
tokio-util = { version = "0.7.3", features = ["io"] } tokio-stream = { version = "0.1.14", features = ["io-util", "sync", "net"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.20.1", features = ["native-tls"] }
tokio-util = { version = "0.7.9", features = ["io"] }
torut = "0.2.1" torut = "0.2.1"
tracing = "0.1.35" tracing = "0.1.39"
tracing-error = "0.2.0" tracing-error = "0.2.0"
tracing-futures = "0.2.5" tracing-futures = "0.2.5"
tracing-subscriber = { version = "0.3.14", features = ["env-filter"] } tracing-journald = "0.3.0"
trust-dns-server = "0.22.0" tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
typed-builder = "0.10.0" trust-dns-server = "0.23.1"
url = { version = "2.2.2", features = ["serde"] } typed-builder = "0.17.0"
urlencoding = "2.1.2" url = { version = "2.4.1", features = ["serde"] }
uuid = { version = "1.1.2", features = ["v4"] } urlencoding = "2.1.3"
zeroize = "1.5.7" uuid = { version = "1.4.1", features = ["v4"] }
indicatif = { version = "0.17.6", features = ["tokio"] } zeroize = "1.6.0"
console = "^0.15"
[profile.test] [profile.test]
opt-level = 3 opt-level = 3

View File

@@ -22,49 +22,38 @@ if tty -s; then
USE_TTY="-it" USE_TTY="-it"
fi fi
alias 'rust-gnu-builder'='docker run $USE_TTY --rm -e "OS_ARCH=$OS_ARCH" -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$(pwd)":/home/rust/src -w /home/rust/src -P start9/rust-arm-cross:aarch64'
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "OS_ARCH=$OS_ARCH" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
cd .. cd ..
FLAGS="" FLAGS=""
RUSTFLAGS=""
if [[ "$ENVIRONMENT" =~ (^|-)podman($|-) ]]; then if [[ "$ENVIRONMENT" =~ (^|-)podman($|-) ]]; then
FLAGS="podman,$FLAGS" FLAGS="podman,$FLAGS"
fi fi
if [[ "$ENVIRONMENT" =~ (^|-)unstable($|-) ]]; then if [[ "$ENVIRONMENT" =~ (^|-)unstable($|-) ]]; then
FLAGS="unstable,$FLAGS" FLAGS="unstable,$FLAGS"
RUSTFLAGS="$RUSTFLAGS --cfg tokio_unstable"
fi fi
if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then
FLAGS="dev,$FLAGS" FLAGS="dev,$FLAGS"
fi fi
alias 'rust-gnu-builder'='docker run $USE_TTY --rm -e "OS_ARCH=$OS_ARCH" -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/usr/local/cargo/registry -v "$(pwd)":/home/rust/src -w /home/rust/src -P start9/rust-arm-cross:aarch64'
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "OS_ARCH=$OS_ARCH" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
set +e set +e
fail= fail=
if [[ "$FLAGS" = "" ]]; then echo "FLAGS=\"$FLAGS\""
rust-gnu-builder sh -c "(cd backend && cargo build --release --locked --features avahi-alias, --target=$ARCH-unknown-linux-gnu)" echo "RUSTFLAGS=\"$RUSTFLAGS\""
if test $? -ne 0; then rust-gnu-builder sh -c "(cd backend && cargo build --release --features avahi-alias,$FLAGS --locked --target=$ARCH-unknown-linux-gnu)"
fail=true if test $? -ne 0; then
fi fail=true
for ARCH in x86_64 aarch64
do
rust-musl-builder sh -c "(cd libs && cargo build --release --locked --bin embassy_container_init )"
if test $? -ne 0; then
fail=true
fi
done
else
echo "FLAGS=$FLAGS"
rust-gnu-builder sh -c "(cd backend && cargo build --release --features avahi-alias,$FLAGS --locked --target=$ARCH-unknown-linux-gnu)"
if test $? -ne 0; then
fail=true
fi
for ARCH in x86_64 aarch64
do
rust-musl-builder sh -c "(cd libs && cargo build --release --locked --bin embassy_container_init)"
if test $? -ne 0; then
fail=true
fi
done
fi fi
for ARCH in x86_64 aarch64
do
rust-musl-builder sh -c "(cd libs && cargo build --release --locked --bin embassy_container_init)"
if test $? -ne 0; then
fail=true
fi
done
set -e set -e
cd backend cd backend

View File

@@ -1,744 +0,0 @@
{
"db": "PostgreSQL",
"1ce5254f27de971fd87f5ab66d300f2b22433c86617a0dbf796bf2170186dd2e": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING"
},
"21471490cdc3adb206274cc68e1ea745ffa5da4479478c1fd2158a45324b1930": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "DELETE FROM ssh_keys WHERE fingerprint = $1"
},
"28ea34bbde836e0618c5fc9bb7c36e463c20c841a7d6a0eb15be0f24f4a928ec": {
"describe": {
"columns": [
{
"name": "hostname",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "path",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "username",
"ordinal": 2,
"type_info": "Text"
},
{
"name": "password",
"ordinal": 3,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false,
true
],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = $1"
},
"4099028a5c0de578255bf54a67cef6cb0f1e9a4e158260700f1639dd4b438997": {
"describe": {
"columns": [
{
"name": "fingerprint",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "openssh_pubkey",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false
],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "SELECT * FROM ssh_keys WHERE fingerprint = $1"
},
"4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "logged_in",
"ordinal": 1,
"type_info": "Timestamp"
},
{
"name": "logged_out",
"ordinal": 2,
"type_info": "Timestamp"
},
{
"name": "last_active",
"ordinal": 3,
"type_info": "Timestamp"
},
{
"name": "user_agent",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "metadata",
"ordinal": 5,
"type_info": "Text"
}
],
"nullable": [
false,
false,
true,
false,
true,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
},
"4bcfbefb1eb3181343871a1cd7fc3afb81c2be5c681cfa8b4be0ce70610e9c3a": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1"
},
"629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": {
"describe": {
"columns": [
{
"name": "password",
"ordinal": 0,
"type_info": "Text"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT password FROM account"
},
"687688055e63d27123cdc89a5bbbd8361776290a9411d527eaf1fdb40bef399d": {
"describe": {
"columns": [
{
"name": "key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text"
]
}
},
"query": "SELECT key FROM tor WHERE package = $1 AND interface = $2"
},
"6d35ccf780fb2bb62586dd1d3df9c1550a41ee580dad3f49d35cb843ebef10ca": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
},
"770c1017734720453dc87b58c385b987c5af5807151ff71a59000014586752e0": {
"describe": {
"columns": [
{
"name": "key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO network_keys (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO UPDATE SET package = EXCLUDED.package RETURNING key"
},
"7b64f032d507e8ffe37c41f4c7ad514a66c421a11ab04c26d89a7aa8f6b67210": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "package_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Timestamp"
},
{
"name": "code",
"ordinal": 3,
"type_info": "Int4"
},
{
"name": "level",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "title",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "message",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "data",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
],
"parameters": {
"Left": [
"Int4",
"Int8"
]
}
},
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < $1 ORDER BY id DESC LIMIT $2"
},
"7c7a3549c997eb75bf964ea65fbb98a73045adf618696cd838d79203ef5383fb": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Bytea",
"Text",
"Text"
]
}
},
"query": "\n INSERT INTO account (\n id,\n server_id,\n hostname,\n password,\n network_key,\n root_ca_key_pem,\n root_ca_cert_pem\n ) VALUES (\n 0, $1, $2, $3, $4, $5, $6\n ) ON CONFLICT (id) DO UPDATE SET\n server_id = EXCLUDED.server_id,\n hostname = EXCLUDED.hostname,\n password = EXCLUDED.password,\n network_key = EXCLUDED.network_key,\n root_ca_key_pem = EXCLUDED.root_ca_key_pem,\n root_ca_cert_pem = EXCLUDED.root_ca_cert_pem\n "
},
"7e0649d839927e57fa03ee51a2c9f96a8bdb0fc97ee8a3c6df1069e1e2b98576": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "DELETE FROM tor WHERE package = $1"
},
"8951b9126fbf60dbb5997241e11e3526b70bccf3e407327917294a993bc17ed5": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Bytea"
]
}
},
"query": "INSERT INTO tor (package, interface, key) VALUES ($1, $2, $3) ON CONFLICT (package, interface) DO NOTHING"
},
"94d471bb374b4965c6cbedf8c17bbf6bea226d38efaf6559923c79a36d5ca08c": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "package_id",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Timestamp"
},
{
"name": "code",
"ordinal": 3,
"type_info": "Int4"
},
{
"name": "level",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "title",
"ordinal": 5,
"type_info": "Text"
},
{
"name": "message",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "data",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
true,
false,
false,
false,
false,
false,
true
],
"parameters": {
"Left": [
"Int8"
]
}
},
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT $1"
},
"95c4ab4c645f3302568c6ff13d85ab58252362694cf0f56999bf60194d20583a": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "hostname",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "path",
"ordinal": 2,
"type_info": "Text"
},
{
"name": "username",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "password",
"ordinal": 4,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false,
false,
true
],
"parameters": {
"Left": []
}
},
"query": "SELECT id, hostname, path, username, password FROM cifs_shares"
},
"a60d6e66719325b08dc4ecfacaf337527233c84eee758ac9be967906e5841d27": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "DELETE FROM cifs_shares WHERE id = $1"
},
"a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e": {
"describe": {
"columns": [
{
"name": "fingerprint",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "openssh_pubkey",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "created_at",
"ordinal": 2,
"type_info": "Text"
}
],
"nullable": [
false,
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys"
},
"b1147beaaabbed89f2ab8c1e13ec4393a9a8fde2833cf096af766a979d94dee6": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text",
"Int4"
]
}
},
"query": "UPDATE cifs_shares SET hostname = $1, path = $2, username = $3, password = $4 WHERE id = $5"
},
"d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": {
"describe": {
"columns": [
{
"name": "openssh_pubkey",
"ordinal": 0,
"type_info": "Text"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT openssh_pubkey FROM ssh_keys"
},
"da71f94b29798d1738d2b10b9a721ea72db8cfb362e7181c8226d9297507c62b": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Int4",
"Text",
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES ($1, $2, $3, $4, $5, $6)"
},
"e185203cf84e43b801dfb23b4159e34aeaef1154dcd3d6811ab504915497ccf7": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "DELETE FROM notifications WHERE id = $1"
},
"e545696735f202f9d13cf22a561f3ff3f9aed7f90027a9ba97634bcb47d772f0": {
"describe": {
"columns": [
{
"name": "tor_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
true
],
"parameters": {
"Left": []
}
},
"query": "SELECT tor_key FROM account WHERE id = 0"
},
"e5843c5b0e7819b29aa1abf2266799bd4f82e761837b526a0972c3d4439a264d": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO session (id, user_agent, metadata) VALUES ($1, $2, $3)"
},
"e95322a8e2ae3b93f1e974b24c0b81803f1e9ec9e8ebbf15cafddfc1c5a028ed": {
"describe": {
"columns": [
{
"name": "package",
"ordinal": 0,
"type_info": "Text"
},
{
"name": "interface",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "tor_key?",
"ordinal": 3,
"type_info": "Bytea"
}
],
"nullable": [
false,
false,
false,
false
],
"parameters": {
"Left": [
"Text"
]
}
},
"query": "\n SELECT\n network_keys.package,\n network_keys.interface,\n network_keys.key,\n tor.key AS \"tor_key?\"\n FROM\n network_keys\n LEFT JOIN\n tor\n ON\n network_keys.package = tor.package\n AND\n network_keys.interface = tor.interface\n WHERE\n network_keys.package = $1\n "
},
"eb750adaa305bdbf3c5b70aaf59139c7b7569602adb58f2d6b3a94da4f167b0a": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Int4"
]
}
},
"query": "DELETE FROM notifications WHERE id < $1"
},
"ecc765d8205c0876956f95f76944ac6a5f34dd820c4073b7728c7067aab9fded": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
}
],
"nullable": [
false
],
"parameters": {
"Left": [
"Text",
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES ($1, $2, $3, $4) RETURNING id"
},
"f6d1c5ef0f9d9577bea8382318967b9deb46da75788c7fe6082b43821c22d556": {
"describe": {
"columns": [],
"nullable": [],
"parameters": {
"Left": [
"Text",
"Text",
"Text"
]
}
},
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES ($1, $2, $3)"
},
"f7d2dae84613bcef330f7403352cc96547f3f6dbec11bf2eadfaf53ad8ab51b5": {
"describe": {
"columns": [
{
"name": "network_key",
"ordinal": 0,
"type_info": "Bytea"
}
],
"nullable": [
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT network_key FROM account WHERE id = 0"
},
"fe6e4f09f3028e5b6b6259e86cbad285680ce157aae9d7837ac020c8b2945e7f": {
"describe": {
"columns": [
{
"name": "id",
"ordinal": 0,
"type_info": "Int4"
},
{
"name": "password",
"ordinal": 1,
"type_info": "Text"
},
{
"name": "tor_key",
"ordinal": 2,
"type_info": "Bytea"
},
{
"name": "server_id",
"ordinal": 3,
"type_info": "Text"
},
{
"name": "hostname",
"ordinal": 4,
"type_info": "Text"
},
{
"name": "network_key",
"ordinal": 5,
"type_info": "Bytea"
},
{
"name": "root_ca_key_pem",
"ordinal": 6,
"type_info": "Text"
},
{
"name": "root_ca_cert_pem",
"ordinal": 7,
"type_info": "Text"
}
],
"nullable": [
false,
false,
true,
true,
true,
false,
false,
false
],
"parameters": {
"Left": []
}
},
"query": "SELECT * FROM account WHERE id = 0"
}
}

View File

@@ -1,5 +1,5 @@
use ed25519_dalek::{ExpandedSecretKey, SecretKey}; use digest::Digest;
use models::ResultExt; use ed25519_dalek::SecretKey;
use openssl::pkey::{PKey, Private}; use openssl::pkey::{PKey, Private};
use openssl::x509::X509; use openssl::x509::X509;
use sqlx::PgExecutor; use sqlx::PgExecutor;
@@ -7,7 +7,8 @@ use sqlx::PgExecutor;
use crate::hostname::{generate_hostname, generate_id, Hostname}; use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::net::keys::Key; use crate::net::keys::Key;
use crate::net::ssl::{generate_key, make_root_cert}; use crate::net::ssl::{generate_key, make_root_cert};
use crate::Error; use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
fn hash_password(password: &str) -> Result<String, Error> { fn hash_password(password: &str) -> Result<String, Error> {
argon2::hash_encoded( argon2::hash_encoded(
@@ -51,13 +52,23 @@ impl AccountInfo {
let server_id = r.server_id.unwrap_or_else(generate_id); let server_id = r.server_id.unwrap_or_else(generate_id);
let hostname = r.hostname.map(Hostname).unwrap_or_else(generate_hostname); let hostname = r.hostname.map(Hostname).unwrap_or_else(generate_hostname);
let password = r.password; let password = r.password;
let network_key = SecretKey::from_bytes(&r.network_key)?; let network_key = SecretKey::try_from(r.network_key).map_err(|e| {
Error::new(
eyre!("expected vec of len 32, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?;
let tor_key = if let Some(k) = &r.tor_key { let tor_key = if let Some(k) = &r.tor_key {
ExpandedSecretKey::from_bytes(k)? <[u8; 64]>::try_from(&k[..]).map_err(|_| {
Error::new(
eyre!("expected vec of len 64, got len {}", k.len()),
ErrorKind::ParseDbField,
)
})?
} else { } else {
ExpandedSecretKey::from(&network_key) ed25519_expand_key(&network_key)
}; };
let key = Key::from_pair(None, network_key.to_bytes(), tor_key.to_bytes()); let key = Key::from_pair(None, network_key, tor_key);
let root_ca_key = PKey::private_key_from_pem(r.root_ca_key_pem.as_bytes())?; let root_ca_key = PKey::private_key_from_pem(r.root_ca_key_pem.as_bytes())?;
let root_ca_cert = X509::from_pem(r.root_ca_cert_pem.as_bytes())?; let root_ca_cert = X509::from_pem(r.root_ca_cert_pem.as_bytes())?;

View File

@@ -134,7 +134,7 @@ pub async fn action(
let manifest = ctx let manifest = ctx
.db .db
.peek() .peek()
.await? .await
.as_package_data() .as_package_data()
.as_idx(&pkg_id) .as_idx(&pkg_id)
.or_not_found(&pkg_id)? .or_not_found(&pkg_id)?

View File

@@ -160,7 +160,7 @@ pub async fn login(
) -> Result<(), Error> { ) -> Result<(), Error> {
let password = password.unwrap_or_default().decrypt(&ctx)?; let password = password.unwrap_or_default().decrypt(&ctx)?;
let mut handle = ctx.secret_store.acquire().await?; let mut handle = ctx.secret_store.acquire().await?;
check_password_against_db(&mut handle, &password).await?; check_password_against_db(handle.as_mut(), &password).await?;
let hash_token = HashSessionToken::new(); let hash_token = HashSessionToken::new();
let user_agent = req.headers.get("user-agent").and_then(|h| h.to_str().ok()); let user_agent = req.headers.get("user-agent").and_then(|h| h.to_str().ok());
@@ -172,7 +172,7 @@ pub async fn login(
user_agent, user_agent,
metadata, metadata,
) )
.execute(&mut handle) .execute(handle.as_mut())
.await?; .await?;
res.headers.insert( res.headers.insert(
"set-cookie", "set-cookie",
@@ -263,7 +263,7 @@ pub async fn list(
sessions: sqlx::query!( sessions: sqlx::query!(
"SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP" "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
) )
.fetch_all(&mut ctx.secret_store.acquire().await?) .fetch_all(ctx.secret_store.acquire().await?.as_mut())
.await? .await?
.into_iter() .into_iter()
.map(|row| { .map(|row| {

View File

@@ -56,16 +56,16 @@ pub async fn backup_all(
package_ids: Option<OrdSet<PackageId>>, package_ids: Option<OrdSet<PackageId>>,
#[arg] password: crate::auth::PasswordType, #[arg] password: crate::auth::PasswordType,
) -> Result<(), Error> { ) -> Result<(), Error> {
let db = ctx.db.peek().await?; let db = ctx.db.peek().await;
let old_password_decrypted = old_password let old_password_decrypted = old_password
.as_ref() .as_ref()
.unwrap_or(&password) .unwrap_or(&password)
.clone() .clone()
.decrypt(&ctx)?; .decrypt(&ctx)?;
let password = password.decrypt(&ctx)?; let password = password.decrypt(&ctx)?;
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?; check_password_against_db(ctx.secret_store.acquire().await?.as_mut(), &password).await?;
let fs = target_id let fs = target_id
.load(&mut ctx.secret_store.acquire().await?) .load(ctx.secret_store.acquire().await?.as_mut())
.await?; .await?;
let mut backup_guard = BackupMountGuard::mount( let mut backup_guard = BackupMountGuard::mount(
TmpMountGuard::mount(&fs, ReadWrite).await?, TmpMountGuard::mount(&fs, ReadWrite).await?,
@@ -265,7 +265,7 @@ async fn perform_backup(
} }
} }
let ui = ctx.db.peek().await?.into_ui().de()?; let ui = ctx.db.peek().await.into_ui().de()?;
let mut os_backup_file = AtomicFile::new( let mut os_backup_file = AtomicFile::new(
backup_guard.lock().await.as_ref().join("os-backup.cbor"), backup_guard.lock().await.as_ref().join("os-backup.cbor"),

View File

@@ -134,7 +134,7 @@ impl BackupActions {
let marketplace_url = ctx let marketplace_url = ctx
.db .db
.peek() .peek()
.await? .await
.as_package_data() .as_package_data()
.as_idx(&pkg_id) .as_idx(&pkg_id)
.or_not_found(pkg_id)? .or_not_found(pkg_id)?

View File

@@ -52,7 +52,7 @@ pub async fn restore_packages_rpc(
#[arg] password: String, #[arg] password: String,
) -> Result<(), Error> { ) -> Result<(), Error> {
let fs = target_id let fs = target_id
.load(&mut ctx.secret_store.acquire().await?) .load(ctx.secret_store.acquire().await?.as_mut())
.await?; .await?;
let backup_guard = let backup_guard =
BackupMountGuard::mount(TmpMountGuard::mount(&fs, ReadWrite).await?, &password).await?; BackupMountGuard::mount(TmpMountGuard::mount(&fs, ReadWrite).await?, &password).await?;
@@ -310,7 +310,7 @@ async fn assure_restoring(
let mut insert_packages = BTreeMap::new(); let mut insert_packages = BTreeMap::new();
for id in ids { for id in ids {
let peek = ctx.db.peek().await?; let peek = ctx.db.peek().await;
let model = peek.as_package_data().as_idx(&id); let model = peek.as_package_data().as_idx(&id);
@@ -402,7 +402,7 @@ async fn restore_package<'a>(
iface.to_string(), iface.to_string(),
k, k,
) )
.execute(&mut secrets_tx).await?; .execute(secrets_tx.as_mut()).await?;
} }
// DEPRECATED // DEPRECATED
for (iface, key) in metadata.tor_keys { for (iface, key) in metadata.tor_keys {
@@ -413,7 +413,7 @@ async fn restore_package<'a>(
iface.to_string(), iface.to_string(),
k, k,
) )
.execute(&mut secrets_tx).await?; .execute(secrets_tx.as_mut()).await?;
} }
secrets_tx.commit().await?; secrets_tx.commit().await?;
drop(secrets); drop(secrets);

View File

@@ -142,7 +142,7 @@ pub async fn list(
let mut sql_handle = ctx.secret_store.acquire().await?; let mut sql_handle = ctx.secret_store.acquire().await?;
let (disks_res, cifs) = tokio::try_join!( let (disks_res, cifs) = tokio::try_join!(
crate::disk::util::list(&ctx.os_partitions), crate::disk::util::list(&ctx.os_partitions),
cifs::list(&mut sql_handle), cifs::list(sql_handle.as_mut()),
)?; )?;
Ok(disks_res Ok(disks_res
.into_iter() .into_iter()
@@ -233,7 +233,7 @@ pub async fn info(
let guard = BackupMountGuard::mount( let guard = BackupMountGuard::mount(
TmpMountGuard::mount( TmpMountGuard::mount(
&target_id &target_id
.load(&mut ctx.secret_store.acquire().await?) .load(ctx.secret_store.acquire().await?.as_mut())
.await?, .await?,
ReadWrite, ReadWrite,
) )
@@ -271,7 +271,7 @@ pub async fn mount(
TmpMountGuard::mount( TmpMountGuard::mount(
&target_id &target_id
.clone() .clone()
.load(&mut ctx.secret_store.acquire().await?) .load(ctx.secret_store.acquire().await?.as_mut())
.await?, .await?,
ReadWrite, ReadWrite,
) )

View File

@@ -5,6 +5,8 @@ pub mod avahi_alias;
pub mod deprecated; pub mod deprecated;
#[cfg(feature = "cli")] #[cfg(feature = "cli")]
pub mod start_cli; pub mod start_cli;
#[cfg(feature = "js_engine")]
pub mod start_deno;
#[cfg(feature = "daemon")] #[cfg(feature = "daemon")]
pub mod start_init; pub mod start_init;
#[cfg(feature = "sdk")] #[cfg(feature = "sdk")]
@@ -16,6 +18,8 @@ fn select_executable(name: &str) -> Option<fn()> {
match name { match name {
#[cfg(feature = "avahi-alias")] #[cfg(feature = "avahi-alias")]
"avahi-alias" => Some(avahi_alias::main), "avahi-alias" => Some(avahi_alias::main),
#[cfg(feature = "js_engine")]
"start-deno" => Some(start_deno::main),
#[cfg(feature = "cli")] #[cfg(feature = "cli")]
"start-cli" => Some(start_cli::main), "start-cli" => Some(start_cli::main),
#[cfg(feature = "sdk")] #[cfg(feature = "sdk")]

View File

@@ -0,0 +1,145 @@
use clap::Arg;
use rpc_toolkit::command;
use rpc_toolkit::run_cli;
use rpc_toolkit::yajrc::RpcError;
use serde_json::Value;
use crate::context::CliContext;
use crate::procedure::js_scripts::ExecuteArgs;
use crate::s9pk::manifest::PackageId;
use crate::util::logger::EmbassyLogger;
use crate::util::serde::{display_serializable, parse_stdin_deserializable};
use crate::version::{Current, VersionT};
use crate::Error;
lazy_static::lazy_static! {
static ref VERSION_STRING: String = Current::new().semver().to_string();
}
#[command(subcommands(execute, sandbox))]
fn deno_api() -> Result<(), Error> {
Ok(())
}
#[command(cli_only, display(display_serializable))]
async fn execute(
#[arg(stdin, parse(parse_stdin_deserializable))] arg: ExecuteArgs,
) -> Result<Result<Value, (i32, String)>, Error> {
let ExecuteArgs {
procedure,
directory,
pkg_id,
pkg_version,
name,
volumes,
input,
} = arg;
PackageLogger::init(&pkg_id);
procedure
.execute_impl(&directory, &pkg_id, &pkg_version, name, &volumes, input)
.await
}
#[command(cli_only, display(display_serializable))]
async fn sandbox(
#[arg(stdin, parse(parse_stdin_deserializable))] arg: ExecuteArgs,
) -> Result<Result<Value, (i32, String)>, Error> {
let ExecuteArgs {
procedure,
directory,
pkg_id,
pkg_version,
name,
volumes,
input,
} = arg;
PackageLogger::init(&pkg_id);
procedure
.sandboxed_impl(&directory, &pkg_id, &pkg_version, &volumes, input, name)
.await
}
use tracing::Subscriber;
use tracing_subscriber::util::SubscriberInitExt;
#[derive(Clone)]
struct PackageLogger {}
impl PackageLogger {
fn base_subscriber(id: &PackageId) -> impl Subscriber {
use tracing_error::ErrorLayer;
use tracing_subscriber::prelude::*;
use tracing_subscriber::{fmt, EnvFilter};
let filter_layer = EnvFilter::builder()
.with_default_directive(
format!("{}=info", std::module_path!().split("::").next().unwrap())
.parse()
.unwrap(),
)
.from_env_lossy();
let fmt_layer = fmt::layer().with_writer(std::io::stderr).with_target(true);
let journald_layer = tracing_journald::layer()
.unwrap()
.with_syslog_identifier(format!("{id}.embassy"));
let sub = tracing_subscriber::registry()
.with(filter_layer)
.with(fmt_layer)
.with(journald_layer)
.with(ErrorLayer::default());
sub
}
pub fn init(id: &PackageId) -> Self {
Self::base_subscriber(id).init();
color_eyre::install().unwrap_or_else(|_| tracing::warn!("tracing too many times"));
Self {}
}
}
fn inner_main() -> Result<(), Error> {
run_cli!({
command: deno_api,
app: app => app
.name("StartOS Deno Executor")
.version(&**VERSION_STRING)
.arg(
clap::Arg::with_name("config")
.short('c')
.long("config")
.takes_value(true),
),
context: matches => {
CliContext::init(matches)?
},
exit: |e: RpcError| {
match e.data {
Some(Value::String(s)) => eprintln!("{}: {}", e.message, s),
Some(Value::Object(o)) => if let Some(Value::String(s)) = o.get("details") {
eprintln!("{}: {}", e.message, s);
if let Some(Value::String(s)) = o.get("debug") {
tracing::debug!("{}", s)
}
}
Some(a) => eprintln!("{}: {}", e.message, a),
None => eprintln!("{}", e.message),
}
std::process::exit(e.code);
}
});
Ok(())
}
pub fn main() {
match inner_main() {
Ok(_) => (),
Err(e) => {
eprintln!("{}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}

View File

@@ -11,6 +11,7 @@ use crate::context::{DiagnosticContext, InstallContext, SetupContext};
use crate::disk::fsck::RepairStrategy; use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD; use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::REPAIR_DISK_PATH; use crate::disk::REPAIR_DISK_PATH;
use crate::firmware::update_firmware;
use crate::init::STANDBY_MODE_PATH; use crate::init::STANDBY_MODE_PATH;
use crate::net::web_server::WebServer; use crate::net::web_server::WebServer;
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
@@ -19,7 +20,14 @@ use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt, OS_ARCH}; use crate::{Error, ErrorKind, ResultExt, OS_ARCH};
#[instrument(skip_all)] #[instrument(skip_all)]
async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> { async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
if update_firmware().await?.0 {
return Ok(Some(Shutdown {
export_args: None,
restart: true,
}));
}
Command::new("ln") Command::new("ln")
.arg("-sf") .arg("-sf")
.arg("/usr/lib/embassy/scripts/fake-apt") .arg("/usr/lib/embassy/scripts/fake-apt")
@@ -146,7 +154,7 @@ async fn setup_or_init(cfg_path: Option<PathBuf>) -> Result<(), Error> {
crate::init::init(&cfg).await?; crate::init::init(&cfg).await?;
} }
Ok(()) Ok(None)
} }
async fn run_script_if_exists<P: AsRef<Path>>(path: P) { async fn run_script_if_exists<P: AsRef<Path>>(path: P) {
@@ -180,46 +188,47 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
run_script_if_exists("/media/embassy/config/preinit.sh").await; run_script_if_exists("/media/embassy/config/preinit.sh").await;
let res = if let Err(e) = setup_or_init(cfg_path.clone()).await { let res = match setup_or_init(cfg_path.clone()).await {
async move { Err(e) => {
tracing::error!("{}", e.source); async move {
tracing::debug!("{}", e.source); tracing::error!("{}", e.source);
crate::sound::BEETHOVEN.play().await?; tracing::debug!("{}", e.source);
crate::sound::BEETHOVEN.play().await?;
let ctx = DiagnosticContext::init( let ctx = DiagnosticContext::init(
cfg_path, cfg_path,
if tokio::fs::metadata("/media/embassy/config/disk.guid") if tokio::fs::metadata("/media/embassy/config/disk.guid")
.await .await
.is_ok() .is_ok()
{ {
Some(Arc::new( Some(Arc::new(
tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy tokio::fs::read_to_string("/media/embassy/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
.await? .await?
.trim() .trim()
.to_owned(), .to_owned(),
)) ))
} else { } else {
None None
}, },
e, e,
) )
.await?; .await?;
let server = WebServer::diagnostic( let server = WebServer::diagnostic(
SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80), SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80),
ctx.clone(), ctx.clone(),
) )
.await?; .await?;
let shutdown = ctx.shutdown.subscribe().recv().await.unwrap(); let shutdown = ctx.shutdown.subscribe().recv().await.unwrap();
server.shutdown().await; server.shutdown().await;
Ok(shutdown) Ok(shutdown)
}
.await
} }
.await Ok(s) => Ok(s),
} else {
Ok(None)
}; };
run_script_if_exists("/media/embassy/config/postinit.sh").await; run_script_if_exists("/media/embassy/config/postinit.sh").await;

View File

@@ -16,7 +16,7 @@ use crate::{Error, ErrorKind, ResultExt};
#[instrument(skip_all)] #[instrument(skip_all)]
async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> { async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error> {
let (rpc_ctx, server, shutdown) = { let (rpc_ctx, server, shutdown) = async {
let rpc_ctx = RpcContext::init( let rpc_ctx = RpcContext::init(
cfg_path, cfg_path,
Arc::new( Arc::new(
@@ -91,8 +91,9 @@ async fn inner_main(cfg_path: Option<PathBuf>) -> Result<Option<Shutdown>, Error
sig_handler.abort(); sig_handler.abort();
(rpc_ctx, server, shutdown) Ok::<_, Error>((rpc_ctx, server, shutdown))
}; }
.await?;
server.shutdown().await; server.shutdown().await;
rpc_ctx.shutdown().await?; rpc_ctx.shutdown().await?;

View File

@@ -14,9 +14,9 @@ use rpc_toolkit::command;
use tracing::instrument; use tracing::instrument;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::db::model::CurrentDependencies;
use crate::prelude::*; use crate::prelude::*;
use crate::s9pk::manifest::{Manifest, PackageId}; use crate::s9pk::manifest::{PackageId};
use crate::util::display_none; use crate::util::display_none;
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat}; use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
use crate::Error; use crate::Error;
@@ -167,7 +167,7 @@ pub async fn get(
#[arg(long = "format")] #[arg(long = "format")]
format: Option<IoFormat>, format: Option<IoFormat>,
) -> Result<ConfigRes, Error> { ) -> Result<ConfigRes, Error> {
let db = ctx.db.peek().await?; let db = ctx.db.peek().await;
let manifest = db let manifest = db
.as_package_data() .as_package_data()
.as_idx(&id) .as_idx(&id)
@@ -256,7 +256,7 @@ pub async fn configure(
id: &PackageId, id: &PackageId,
configure_context: ConfigureContext, configure_context: ConfigureContext,
) -> Result<BTreeMap<PackageId, String>, Error> { ) -> Result<BTreeMap<PackageId, String>, Error> {
let db = ctx.db.peek().await?; let db = ctx.db.peek().await;
let package = db let package = db
.as_package_data() .as_package_data()
.as_idx(id) .as_idx(id)

View File

@@ -1696,7 +1696,6 @@ impl TorAddressPointer {
.db .db
.peek() .peek()
.await .await
.map_err(|e| ConfigurationError::SystemError(e))?
.as_package_data() .as_package_data()
.as_idx(&self.package_id) .as_idx(&self.package_id)
.and_then(|pde| pde.as_installed()) .and_then(|pde| pde.as_installed())
@@ -1739,7 +1738,6 @@ impl LanAddressPointer {
.db .db
.peek() .peek()
.await .await
.map_err(|e| ConfigurationError::SystemError(e))?
.as_package_data() .as_package_data()
.as_idx(&self.package_id) .as_idx(&self.package_id)
.and_then(|pde| pde.as_installed()) .and_then(|pde| pde.as_installed())
@@ -1775,11 +1773,7 @@ impl ConfigPointer {
Ok(self.select(&Value::Object(cfg.clone()))) Ok(self.select(&Value::Object(cfg.clone())))
} else { } else {
let id = &self.package_id; let id = &self.package_id;
let db = ctx let db = ctx.db.peek().await;
.db
.peek()
.await
.map_err(|e| ConfigurationError::SystemError(e))?;
let manifest = db.as_package_data().as_idx(id).map(|pde| pde.as_manifest()); let manifest = db.as_package_data().as_idx(id).map(|pde| pde.as_manifest());
let cfg_actions = manifest.and_then(|m| m.as_config().transpose_ref()); let cfg_actions = manifest.and_then(|m| m.as_config().transpose_ref());
if let (Some(manifest), Some(cfg_actions)) = (manifest, cfg_actions) { if let (Some(manifest), Some(cfg_actions)) = (manifest, cfg_actions) {
@@ -1900,10 +1894,11 @@ impl TorKeyPointer {
)); ));
} }
let key = Key::for_interface( let key = Key::for_interface(
&mut secrets secrets
.acquire() .acquire()
.await .await
.map_err(|e| ConfigurationError::SystemError(e.into()))?, .map_err(|e| ConfigurationError::SystemError(e.into()))?
.as_mut(),
Some((self.package_id.clone(), self.interface.clone())), Some((self.package_id.clone(), self.interface.clone())),
) )
.await .await

View File

@@ -6,8 +6,7 @@ use std::sync::Arc;
use clap::ArgMatches; use clap::ArgMatches;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use cookie::Cookie; use cookie_store::{CookieStore, RawCookie};
use cookie_store::CookieStore;
use josekit::jwk::Jwk; use josekit::jwk::Jwk;
use reqwest::Proxy; use reqwest::Proxy;
use reqwest_cookie_store::CookieStoreMutex; use reqwest_cookie_store::CookieStoreMutex;
@@ -111,7 +110,10 @@ impl CliContext {
}; };
if let Ok(local) = std::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH) { if let Ok(local) = std::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH) {
store store
.insert_raw(&Cookie::new("local", local), &"http://localhost".parse()?) .insert_raw(
&RawCookie::new("local", local),
&"http://localhost".parse()?,
)
.with_kind(crate::ErrorKind::Network)?; .with_kind(crate::ErrorKind::Network)?;
} }
store store

View File

@@ -22,6 +22,7 @@ use crate::account::AccountInfo;
use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation}; use crate::core::rpc_continuations::{RequestGuid, RestHandler, RpcContinuation};
use crate::db::model::{CurrentDependents, Database, PackageDataEntryMatchModelRef}; use crate::db::model::{CurrentDependents, Database, PackageDataEntryMatchModelRef};
use crate::db::prelude::PatchDbExt; use crate::db::prelude::PatchDbExt;
use crate::dependencies::compute_dependency_config_errs;
use crate::disk::OsPartitionInfo; use crate::disk::OsPartitionInfo;
use crate::init::init_postgres; use crate::init::init_postgres;
use crate::install::cleanup::{cleanup_failed, uninstall}; use crate::install::cleanup::{cleanup_failed, uninstall};
@@ -155,8 +156,7 @@ impl RpcContext {
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))), .unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy, tor_proxy,
base.dns_bind base.dns_bind
.as_ref() .as_deref()
.map(|v| v.as_slice())
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]), .unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
SslManager::new(&account)?, SslManager::new(&account)?,
&account.hostname, &account.hostname,
@@ -217,11 +217,8 @@ impl RpcContext {
}); });
let res = Self(seed.clone()); let res = Self(seed.clone());
res.cleanup().await?; res.cleanup_and_initialize().await?;
tracing::info!("Cleaned up transient states"); tracing::info!("Cleaned up transient states");
let peeked = res.db.peek().await?;
res.managers.init(res.clone(), peeked).await?;
tracing::info!("Initialized Package Managers");
Ok(res) Ok(res)
} }
@@ -236,7 +233,7 @@ impl RpcContext {
} }
#[instrument(skip(self))] #[instrument(skip(self))]
pub async fn cleanup(&self) -> Result<(), Error> { pub async fn cleanup_and_initialize(&self) -> Result<(), Error> {
self.db self.db
.mutate(|f| { .mutate(|f| {
let mut current_dependents = f let mut current_dependents = f
@@ -278,9 +275,10 @@ impl RpcContext {
Ok(()) Ok(())
}) })
.await?; .await?;
let peek = self.db.peek().await?;
let peek = self.db.peek().await;
for (package_id, package) in peek.as_package_data().as_entries()?.into_iter() { for (package_id, package) in peek.as_package_data().as_entries()?.into_iter() {
let package = package.clone();
let action = match package.as_match() { let action = match package.as_match() {
PackageDataEntryMatchModelRef::Installing(_) PackageDataEntryMatchModelRef::Installing(_)
| PackageDataEntryMatchModelRef::Restoring(_) | PackageDataEntryMatchModelRef::Restoring(_)
@@ -288,7 +286,12 @@ impl RpcContext {
cleanup_failed(self, &package_id).await cleanup_failed(self, &package_id).await
} }
PackageDataEntryMatchModelRef::Removing(_) => { PackageDataEntryMatchModelRef::Removing(_) => {
uninstall(self, &mut self.secret_store.acquire().await?, &package_id).await uninstall(
self,
self.secret_store.acquire().await?.as_mut(),
&package_id,
)
.await
} }
PackageDataEntryMatchModelRef::Installed(m) => { PackageDataEntryMatchModelRef::Installed(m) => {
let version = m.as_manifest().as_version().clone().de()?; let version = m.as_manifest().as_version().clone().de()?;
@@ -298,7 +301,7 @@ impl RpcContext {
&self.datadir, &self.datadir,
&package_id, &package_id,
&version, &version,
&volume_id, volume_id,
)) ))
.with_kind(ErrorKind::Filesystem)?; .with_kind(ErrorKind::Filesystem)?;
if tokio::fs::metadata(&tmp_path).await.is_ok() { if tokio::fs::metadata(&tmp_path).await.is_ok() {
@@ -314,7 +317,8 @@ impl RpcContext {
tracing::debug!("{:?}", e); tracing::debug!("{:?}", e);
} }
} }
self.db let peek = self
.db
.mutate(|v| { .mutate(|v| {
for (_, pde) in v.as_package_data_mut().as_entries_mut()? { for (_, pde) in v.as_package_data_mut().as_entries_mut()? {
let status = pde let status = pde
@@ -329,9 +333,49 @@ impl RpcContext {
MainStatus::Stopped MainStatus::Stopped
})?; })?;
} }
Ok(v.clone())
})
.await?;
self.managers.init(self.clone(), peek.clone()).await?;
tracing::info!("Initialized Package Managers");
let mut all_dependency_config_errs = BTreeMap::new();
for (package_id, package) in peek.as_package_data().as_entries()?.into_iter() {
let package = package.clone();
if let Some(current_dependencies) = package
.as_installed()
.and_then(|x| x.as_current_dependencies().de().ok())
{
let manifest = package.as_manifest().de()?;
all_dependency_config_errs.insert(
package_id.clone(),
compute_dependency_config_errs(
self,
&peek,
&manifest,
&current_dependencies,
&Default::default(),
)
.await?,
);
}
}
self.db
.mutate(|v| {
for (package_id, errs) in all_dependency_config_errs {
if let Some(config_errors) = v
.as_package_data_mut()
.as_idx_mut(&package_id)
.and_then(|pde| pde.as_installed_mut())
.map(|i| i.as_status_mut().as_dependency_config_errors_mut())
{
config_errors.ser(&errs)?;
}
}
Ok(()) Ok(())
}) })
.await?; .await?;
Ok(()) Ok(())
} }
@@ -389,7 +433,7 @@ impl RpcContext {
} }
impl AsRef<Jwk> for RpcContext { impl AsRef<Jwk> for RpcContext {
fn as_ref(&self) -> &Jwk { fn as_ref(&self) -> &Jwk {
&*CURRENT_SECRET &CURRENT_SECRET
} }
} }
impl Context for RpcContext {} impl Context for RpcContext {}
@@ -403,7 +447,7 @@ impl Deref for RpcContext {
tracing_error::SpanTrace::capture() tracing_error::SpanTrace::capture()
); );
} }
&*self.0 &self.0
} }
} }
impl Drop for RpcContext { impl Drop for RpcContext {

View File

@@ -7,8 +7,8 @@ use rpc_toolkit::Context;
use serde::Deserialize; use serde::Deserialize;
use tracing::instrument; use tracing::instrument;
use crate::prelude::*;
use crate::util::config::{load_config_from_paths, local_config_path}; use crate::util::config::{load_config_from_paths, local_config_path};
use crate::{Error, ResultExt};
#[derive(Debug, Default, Deserialize)] #[derive(Debug, Default, Deserialize)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
@@ -50,21 +50,21 @@ impl SdkContext {
} }
/// BLOCKING /// BLOCKING
#[instrument(skip_all)] #[instrument(skip_all)]
pub fn developer_key(&self) -> Result<ed25519_dalek::Keypair, Error> { pub fn developer_key(&self) -> Result<ed25519_dalek::SigningKey, Error> {
if !self.developer_key_path.exists() { if !self.developer_key_path.exists() {
return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized)); return Err(Error::new(eyre!("Developer Key does not exist! Please run `start-sdk init` before running this command."), crate::ErrorKind::Uninitialized));
} }
let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem( let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
&std::fs::read_to_string(&self.developer_key_path)?, &std::fs::read_to_string(&self.developer_key_path)?,
) )
.with_kind(crate::ErrorKind::Pem)?; .with_kind(crate::ErrorKind::Pem)?;
let secret = ed25519_dalek::SecretKey::from_bytes(&pair.secret_key[..])?; let secret = ed25519_dalek::SecretKey::try_from(&pair.secret_key[..]).map_err(|_| {
let public = if let Some(public) = pair.public_key { Error::new(
ed25519_dalek::PublicKey::from_bytes(&public[..])? eyre!("pkcs8 key is of incorrect length"),
} else { ErrorKind::OpenSsl,
(&secret).into() )
}; })?;
Ok(ed25519_dalek::Keypair { secret, public }) Ok(secret.into())
} }
} }
impl std::ops::Deref for SdkContext { impl std::ops::Deref for SdkContext {

View File

@@ -12,7 +12,7 @@ use crate::Error;
#[command(display(display_none), metadata(sync_db = true))] #[command(display(display_none), metadata(sync_db = true))]
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> { pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let peek = ctx.db.peek().await?; let peek = ctx.db.peek().await;
let version = peek let version = peek
.as_package_data() .as_package_data()
.as_idx(&id) .as_idx(&id)
@@ -27,14 +27,15 @@ pub async fn start(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(
.get(&(id, version)) .get(&(id, version))
.await .await
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))? .ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
.start(); .start()
.await;
Ok(()) Ok(())
} }
#[command(display(display_none), metadata(sync_db = true))] #[command(display(display_none), metadata(sync_db = true))]
pub async fn stop(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<MainStatus, Error> { pub async fn stop(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<MainStatus, Error> {
let peek = ctx.db.peek().await?; let peek = ctx.db.peek().await;
let version = peek let version = peek
.as_package_data() .as_package_data()
.as_idx(&id) .as_idx(&id)
@@ -62,14 +63,15 @@ pub async fn stop(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<Ma
.get(&(id, version)) .get(&(id, version))
.await .await
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))? .ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
.stop(); .stop()
.await;
Ok(last_statuts) Ok(last_statuts)
} }
#[command(display(display_none), metadata(sync_db = true))] #[command(display(display_none), metadata(sync_db = true))]
pub async fn restart(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> { pub async fn restart(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result<(), Error> {
let peek = ctx.db.peek().await?; let peek = ctx.db.peek().await;
let version = peek let version = peek
.as_package_data() .as_package_data()
.as_idx(&id) .as_idx(&id)
@@ -83,7 +85,8 @@ pub async fn restart(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Result
.get(&(id, version)) .get(&(id, version))
.await .await
.ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))? .ok_or_else(|| Error::new(eyre!("Manager not found"), crate::ErrorKind::InvalidRequest))?
.restart(); .restart()
.await;
Ok(()) Ok(())
} }

View File

@@ -37,7 +37,7 @@ async fn ws_handler<
session: Option<(HasValidSession, HashSessionToken)>, session: Option<(HasValidSession, HashSessionToken)>,
ws_fut: WSFut, ws_fut: WSFut,
) -> Result<(), Error> { ) -> Result<(), Error> {
let (dump, sub) = ctx.db.dump_and_sub().await?; let (dump, sub) = ctx.db.dump_and_sub().await;
let mut stream = ws_fut let mut stream = ws_fut
.await .await
.with_kind(ErrorKind::Network)? .with_kind(ErrorKind::Network)?
@@ -82,6 +82,8 @@ async fn deal_with_messages(
mut sub: patch_db::Subscriber, mut sub: patch_db::Subscriber,
mut stream: WebSocketStream<Upgraded>, mut stream: WebSocketStream<Upgraded>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let mut timer = tokio::time::interval(tokio::time::Duration::from_secs(5));
loop { loop {
futures::select! { futures::select! {
_ = (&mut kill).fuse() => { _ = (&mut kill).fuse() => {
@@ -112,6 +114,13 @@ async fn deal_with_messages(
_ => (), _ => (),
} }
} }
// This is trying to give a health checks to the home to keep the ui alive.
_ = timer.tick().fuse() => {
stream
.send(Message::Ping(vec![]))
.await
.with_kind(crate::ErrorKind::Network)?;
}
} }
} }
} }
@@ -165,7 +174,7 @@ pub async fn subscribe(ctx: RpcContext, req: Request<Body>) -> Result<Response<B
Ok(res) Ok(res)
} }
#[command(subcommands(revisions, dump, put, apply))] #[command(subcommands(dump, put, apply))]
pub fn db() -> Result<(), RpcError> { pub fn db() -> Result<(), RpcError> {
Ok(()) Ok(())
} }
@@ -177,20 +186,6 @@ pub enum RevisionsRes {
Dump(Dump), Dump(Dump),
} }
#[command(display(display_serializable))]
pub async fn revisions(
#[context] ctx: RpcContext,
#[arg] since: u64,
#[allow(unused_variables)]
#[arg(long = "format")]
format: Option<IoFormat>,
) -> Result<RevisionsRes, Error> {
Ok(match ctx.db.sync(since).await? {
Ok(revs) => RevisionsRes::Revisions(revs),
Err(dump) => RevisionsRes::Dump(dump),
})
}
#[instrument(skip_all)] #[instrument(skip_all)]
async fn cli_dump( async fn cli_dump(
ctx: CliContext, ctx: CliContext,
@@ -198,7 +193,7 @@ async fn cli_dump(
path: Option<PathBuf>, path: Option<PathBuf>,
) -> Result<Dump, RpcError> { ) -> Result<Dump, RpcError> {
let dump = if let Some(path) = path { let dump = if let Some(path) = path {
PatchDb::open(path).await?.dump().await? PatchDb::open(path).await?.dump().await
} else { } else {
rpc_toolkit::command_helpers::call_remote( rpc_toolkit::command_helpers::call_remote(
ctx, ctx,
@@ -226,7 +221,7 @@ pub async fn dump(
#[arg] #[arg]
path: Option<PathBuf>, path: Option<PathBuf>,
) -> Result<Dump, Error> { ) -> Result<Dump, Error> {
Ok(ctx.db.dump().await?) Ok(ctx.db.dump().await)
} }
fn apply_expr(input: jaq_core::Val, expr: &str) -> Result<jaq_core::Val, Error> { fn apply_expr(input: jaq_core::Val, expr: &str) -> Result<jaq_core::Val, Error> {

View File

@@ -47,7 +47,7 @@ impl Database {
last_wifi_region: None, last_wifi_region: None,
eos_version_compat: Current::new().compat().clone(), eos_version_compat: Current::new().compat().clone(),
lan_address, lan_address,
tor_address: format!("http://{}", account.key.tor_address()) tor_address: format!("https://{}", account.key.tor_address())
.parse() .parse()
.unwrap(), .unwrap(),
ip_info: BTreeMap::new(), ip_info: BTreeMap::new(),
@@ -426,7 +426,7 @@ pub struct InstalledPackageInfo {
pub marketplace_url: Option<Url>, pub marketplace_url: Option<Url>,
#[serde(default)] #[serde(default)]
#[serde(with = "crate::util::serde::ed25519_pubkey")] #[serde(with = "crate::util::serde::ed25519_pubkey")]
pub developer_key: ed25519_dalek::PublicKey, pub developer_key: ed25519_dalek::VerifyingKey,
pub manifest: Manifest, pub manifest: Manifest,
pub last_backup: Option<DateTime<Utc>>, pub last_backup: Option<DateTime<Utc>>,
pub dependency_info: BTreeMap<PackageId, StaticDependencyInfo>, pub dependency_info: BTreeMap<PackageId, StaticDependencyInfo>,
@@ -483,6 +483,7 @@ pub struct StaticDependencyInfo {
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"] #[model = "Model<Self>"]
pub struct CurrentDependencyInfo { pub struct CurrentDependencyInfo {
#[serde(default)]
pub pointers: BTreeSet<PackagePointerSpec>, pub pointers: BTreeSet<PackagePointerSpec>,
pub health_checks: BTreeSet<HealthCheckId>, pub health_checks: BTreeSet<HealthCheckId>,
} }

View File

@@ -28,7 +28,7 @@ where
#[async_trait::async_trait] #[async_trait::async_trait]
pub trait PatchDbExt { pub trait PatchDbExt {
async fn peek(&self) -> Result<DatabaseModel, Error>; async fn peek(&self) -> DatabaseModel;
async fn mutate<U: UnwindSafe + Send>( async fn mutate<U: UnwindSafe + Send>(
&self, &self,
f: impl FnOnce(&mut DatabaseModel) -> Result<U, Error> + UnwindSafe + Send, f: impl FnOnce(&mut DatabaseModel) -> Result<U, Error> + UnwindSafe + Send,
@@ -40,8 +40,8 @@ pub trait PatchDbExt {
} }
#[async_trait::async_trait] #[async_trait::async_trait]
impl PatchDbExt for PatchDb { impl PatchDbExt for PatchDb {
async fn peek(&self) -> Result<DatabaseModel, Error> { async fn peek(&self) -> DatabaseModel {
Ok(DatabaseModel::from(self.dump().await?.value)) DatabaseModel::from(self.dump().await.value)
} }
async fn mutate<U: UnwindSafe + Send>( async fn mutate<U: UnwindSafe + Send>(
&self, &self,

View File

@@ -170,7 +170,7 @@ pub async fn configure_logic(
ctx: RpcContext, ctx: RpcContext,
(pkg_id, dependency_id): (PackageId, PackageId), (pkg_id, dependency_id): (PackageId, PackageId),
) -> Result<ConfigDryRes, Error> { ) -> Result<ConfigDryRes, Error> {
let db = ctx.db.peek().await?; let db = ctx.db.peek().await;
let pkg = db let pkg = db
.as_package_data() .as_package_data()
.as_idx(&pkg_id) .as_idx(&pkg_id)

View File

@@ -3,7 +3,8 @@ use std::io::Write;
use std::path::Path; use std::path::Path;
use ed25519::pkcs8::EncodePrivateKey; use ed25519::pkcs8::EncodePrivateKey;
use ed25519_dalek::Keypair; use ed25519::PublicKeyBytes;
use ed25519_dalek::{SigningKey, VerifyingKey};
use rpc_toolkit::command; use rpc_toolkit::command;
use tracing::instrument; use tracing::instrument;
@@ -21,11 +22,11 @@ pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?; .with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
} }
tracing::info!("Generating new developer key..."); tracing::info!("Generating new developer key...");
let keypair = Keypair::generate(&mut rand_old::thread_rng()); let secret = SigningKey::generate(&mut rand::thread_rng());
tracing::info!("Writing key to {}", ctx.developer_key_path.display()); tracing::info!("Writing key to {}", ctx.developer_key_path.display());
let keypair_bytes = ed25519::KeypairBytes { let keypair_bytes = ed25519::KeypairBytes {
secret_key: keypair.secret.to_bytes(), secret_key: secret.to_bytes(),
public_key: Some(keypair.public.to_bytes()), public_key: Some(PublicKeyBytes(VerifyingKey::from(&secret).to_bytes())),
}; };
let mut dev_key_file = File::create(&ctx.developer_key_path)?; let mut dev_key_file = File::create(&ctx.developer_key_path)?;
dev_key_file.write_all( dev_key_file.write_all(

View File

@@ -9,7 +9,6 @@ use crate::disk::repair;
use crate::init::SYSTEM_REBUILD_PATH; use crate::init::SYSTEM_REBUILD_PATH;
use crate::logs::{fetch_logs, LogResponse, LogSource}; use crate::logs::{fetch_logs, LogResponse, LogSource};
use crate::shutdown::Shutdown; use crate::shutdown::Shutdown;
use crate::system::SYSTEMD_UNIT;
use crate::util::display_none; use crate::util::display_none;
use crate::Error; use crate::Error;
@@ -29,7 +28,7 @@ pub async fn logs(
#[arg] cursor: Option<String>, #[arg] cursor: Option<String>,
#[arg] before: bool, #[arg] before: bool,
) -> Result<LogResponse, Error> { ) -> Result<LogResponse, Error> {
Ok(fetch_logs(LogSource::Service(SYSTEMD_UNIT), limit, cursor, before).await?) Ok(fetch_logs(LogSource::System, limit, cursor, before).await?)
} }
#[command(display(display_none))] #[command(display(display_none))]
@@ -42,8 +41,10 @@ pub fn exit(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
pub fn restart(#[context] ctx: DiagnosticContext) -> Result<(), Error> { pub fn restart(#[context] ctx: DiagnosticContext) -> Result<(), Error> {
ctx.shutdown ctx.shutdown
.send(Some(Shutdown { .send(Some(Shutdown {
datadir: ctx.datadir.clone(), export_args: ctx
disk_guid: ctx.disk_guid.clone(), .disk_guid
.clone()
.map(|guid| (guid, ctx.datadir.clone())),
restart: true, restart: true,
})) }))
.expect("receiver dropped"); .expect("receiver dropped");

View File

@@ -126,6 +126,7 @@ pub async fn create_fs<P: AsRef<Path>>(
Command::new("cryptsetup") Command::new("cryptsetup")
.arg("-q") .arg("-q")
.arg("luksOpen") .arg("luksOpen")
.arg("--allow-discards")
.arg(format!("--key-file={}", PASSWORD_PATH)) .arg(format!("--key-file={}", PASSWORD_PATH))
.arg(format!("--keyfile-size={}", password.len())) .arg(format!("--keyfile-size={}", password.len()))
.arg(&blockdev_path) .arg(&blockdev_path)

82
backend/src/firmware.rs Normal file
View File

@@ -0,0 +1,82 @@
use std::path::Path;
use std::process::Stdio;
use async_compression::tokio::bufread::GzipDecoder;
use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncWriteExt, BufReader};
use tokio::process::Command;
use crate::disk::fsck::RequiresReboot;
use crate::prelude::*;
use crate::util::Invoke;
pub async fn update_firmware() -> Result<RequiresReboot, Error> {
let product_name = String::from_utf8(
Command::new("dmidecode")
.arg("-s")
.arg("system-product-name")
.invoke(ErrorKind::Firmware)
.await?,
)?
.trim()
.to_owned();
if product_name.is_empty() {
return Ok(RequiresReboot(false));
}
let firmware_dir = Path::new("/usr/lib/embassy/firmware").join(&product_name);
if tokio::fs::metadata(&firmware_dir).await.is_ok() {
let current_firmware = String::from_utf8(
Command::new("dmidecode")
.arg("-s")
.arg("bios-version")
.invoke(ErrorKind::Firmware)
.await?,
)?
.trim()
.to_owned();
if tokio::fs::metadata(firmware_dir.join(format!("{current_firmware}.rom.gz")))
.await
.is_err()
&& tokio::fs::metadata(firmware_dir.join(format!("{current_firmware}.rom")))
.await
.is_err()
{
let mut firmware_read_dir = tokio::fs::read_dir(&firmware_dir).await?;
while let Some(entry) = firmware_read_dir.next_entry().await? {
let filename = entry.file_name().to_string_lossy().into_owned();
let rdr: Option<Box<dyn AsyncRead + Unpin>> = if filename.ends_with(".rom.gz") {
Some(Box::new(GzipDecoder::new(BufReader::new(
File::open(entry.path()).await?,
))))
} else if filename.ends_with(".rom") {
Some(Box::new(File::open(entry.path()).await?))
} else {
None
};
if let Some(mut rdr) = rdr {
let mut flashrom = Command::new("flashrom")
.arg("-p")
.arg("internal")
.arg("-w-")
.stdin(Stdio::piped())
.spawn()?;
let mut rom_dest = flashrom.stdin.take().or_not_found("stdin")?;
tokio::io::copy(&mut rdr, &mut rom_dest).await?;
rom_dest.flush().await?;
rom_dest.shutdown().await?;
drop(rom_dest);
let o = flashrom.wait_with_output().await?;
if !o.status.success() {
return Err(Error::new(
eyre!("{}", std::str::from_utf8(&o.stderr)?),
ErrorKind::Firmware,
));
} else {
return Ok(RequiresReboot(true));
}
}
}
}
}
Ok(RequiresReboot(false))
}

View File

@@ -20,6 +20,9 @@ use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::prelude::*; use crate::prelude::*;
use crate::sound::BEP; use crate::sound::BEP;
use crate::system::time; use crate::system::time;
use crate::util::cpupower::{
current_governor, get_available_governors, set_governor, GOVERNOR_PERFORMANCE,
};
use crate::util::docker::{create_bridge_network, CONTAINER_DATADIR, CONTAINER_TOOL}; use crate::util::docker::{create_bridge_network, CONTAINER_DATADIR, CONTAINER_TOOL};
use crate::util::Invoke; use crate::util::Invoke;
use crate::{Error, ARCH}; use crate::{Error, ARCH};
@@ -200,13 +203,8 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
let account = AccountInfo::load(&secret_store).await?; let account = AccountInfo::load(&secret_store).await?;
let db = cfg.db(&account).await?; let db = cfg.db(&account).await?;
db.mutate(|d| {
let model = d.de()?;
d.ser(&model)
})
.await?;
tracing::info!("Opened PatchDB"); tracing::info!("Opened PatchDB");
let peek = db.peek().await?; let peek = db.peek().await;
let mut server_info = peek.as_server_info().de()?; let mut server_info = peek.as_server_info().de()?;
// write to ca cert store // write to ca cert store
@@ -268,6 +266,11 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
if tokio::fs::metadata(&tmp_dir).await.is_err() { if tokio::fs::metadata(&tmp_dir).await.is_err() {
tokio::fs::create_dir_all(&tmp_dir).await?; tokio::fs::create_dir_all(&tmp_dir).await?;
} }
let tmp_var = cfg.datadir().join(format!("package-data/tmp/var"));
if tokio::fs::metadata(&tmp_var).await.is_ok() {
tokio::fs::remove_dir_all(&tmp_var).await?;
}
crate::disk::mount::util::bind(&tmp_var, "/var/tmp", false).await?;
let tmp_docker = cfg let tmp_docker = cfg
.datadir() .datadir()
.join(format!("package-data/tmp/{CONTAINER_TOOL}")); .join(format!("package-data/tmp/{CONTAINER_TOOL}"));
@@ -341,6 +344,23 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
.await?; .await?;
tracing::info!("Enabled Docker QEMU Emulation"); tracing::info!("Enabled Docker QEMU Emulation");
if current_governor()
.await?
.map(|g| &g != &GOVERNOR_PERFORMANCE)
.unwrap_or(false)
{
tracing::info!("Setting CPU Governor to \"{}\"", GOVERNOR_PERFORMANCE);
if get_available_governors()
.await?
.contains(&GOVERNOR_PERFORMANCE)
{
set_governor(&GOVERNOR_PERFORMANCE).await?;
tracing::info!("Set CPU Governor");
} else {
tracing::warn!("CPU Governor \"{}\" Not Available", GOVERNOR_PERFORMANCE)
}
}
let mut warn_time_not_synced = true; let mut warn_time_not_synced = true;
for _ in 0..60 { for _ in 0..60 {
if check_time_is_synchronized().await? { if check_time_is_synchronized().await? {
@@ -375,6 +395,12 @@ pub async fn init(cfg: &RpcContextConfig) -> Result<InitResult, Error> {
crate::version::init(&db, &secret_store).await?; crate::version::init(&db, &secret_store).await?;
db.mutate(|d| {
let model = d.de()?;
d.ser(&model)
})
.await?;
if should_rebuild { if should_rebuild {
match tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await { match tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await {
Ok(()) => Ok(()), Ok(()) => Ok(()),

View File

@@ -62,7 +62,7 @@ pub async fn cleanup_failed(ctx: &RpcContext, id: &PackageId) -> Result<(), Erro
if let Some(version) = match ctx if let Some(version) = match ctx
.db .db
.peek() .peek()
.await? .await
.as_package_data() .as_package_data()
.as_idx(id) .as_idx(id)
.or_not_found(id)? .or_not_found(id)?
@@ -141,7 +141,7 @@ pub async fn uninstall<Ex>(ctx: &RpcContext, secrets: &mut Ex, id: &PackageId) -
where where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>, for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{ {
let db = ctx.db.peek().await?; let db = ctx.db.peek().await;
let entry = db let entry = db
.as_package_data() .as_package_data()
.as_idx(id) .as_idx(id)

View File

@@ -22,7 +22,7 @@ use serde_json::{json, Value};
use tokio::fs::{File, OpenOptions}; use tokio::fs::{File, OpenOptions};
use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt, AsyncWriteExt}; use tokio::io::{AsyncRead, AsyncSeek, AsyncSeekExt, AsyncWriteExt};
use tokio::process::Command; use tokio::process::Command;
use tokio::sync::{oneshot, Mutex}; use tokio::sync::oneshot;
use tokio_stream::wrappers::ReadDirStream; use tokio_stream::wrappers::ReadDirStream;
use tracing::instrument; use tracing::instrument;
@@ -64,7 +64,7 @@ pub const PKG_WASM_DIR: &str = "package-data/wasm";
#[command(display(display_serializable))] #[command(display(display_serializable))]
pub async fn list(#[context] ctx: RpcContext) -> Result<Value, Error> { pub async fn list(#[context] ctx: RpcContext) -> Result<Value, Error> {
Ok(ctx.db.peek().await?.as_package_data().as_entries()? Ok(ctx.db.peek().await.as_package_data().as_entries()?
.iter() .iter()
.filter_map(|(id, pde)| { .filter_map(|(id, pde)| {
let status = match pde.as_match() { let status = match pde.as_match() {
@@ -626,9 +626,10 @@ pub async fn uninstall(
let return_id = id.clone(); let return_id = id.clone();
tokio::spawn(async move { tokio::spawn(async move {
if let Err(e) = if let Err(e) = async {
async { cleanup::uninstall(&ctx, &mut ctx.secret_store.acquire().await?, &id).await } cleanup::uninstall(&ctx, ctx.secret_store.acquire().await?.as_mut(), &id).await
.await }
.await
{ {
let err_str = format!("Uninstall of {} Failed: {}", id, e); let err_str = format!("Uninstall of {} Failed: {}", id, e);
tracing::error!("{}", err_str); tracing::error!("{}", err_str);
@@ -666,23 +667,11 @@ pub async fn download_install_s9pk(
) -> Result<(), Error> { ) -> Result<(), Error> {
let pkg_id = &temp_manifest.id; let pkg_id = &temp_manifest.id;
let version = &temp_manifest.version; let version = &temp_manifest.version;
let previous_state: Arc<Mutex<Option<MainStatus>>> = Default::default(); let db = ctx.db.peek().await;
let db = ctx.db.peek().await?;
let after_previous_state = previous_state.clone();
if let Result::<(), Error>::Err(e) = { if let Result::<(), Error>::Err(e) = {
let ctx = ctx.clone(); let ctx = ctx.clone();
async move { async move {
if db
.as_package_data()
.as_idx(&pkg_id)
.or_not_found(&pkg_id)?
.as_installed()
.is_some()
{
*previous_state.lock().await =
crate::control::stop(ctx.clone(), pkg_id.clone()).await.ok();
}
// // Build set of existing manifests // // Build set of existing manifests
let mut manifests = Vec::new(); let mut manifests = Vec::new();
for (_id, pkg) in db.as_package_data().as_entries()? { for (_id, pkg) in db.as_package_data().as_entries()? {
@@ -699,7 +688,7 @@ pub async fn download_install_s9pk(
for (p, lan) in cfg { for (p, lan) in cfg {
if p.0 == 80 && lan.ssl || p.0 == 443 && !lan.ssl { if p.0 == 80 && lan.ssl || p.0 == 443 && !lan.ssl {
return Err(Error::new( return Err(Error::new(
eyre!("SSL Conflict with embassyOS"), eyre!("SSL Conflict with StartOS"),
ErrorKind::LanPortConflict, ErrorKind::LanPortConflict,
)); ));
} }
@@ -779,15 +768,6 @@ pub async fn download_install_s9pk(
tracing::debug!("{:?}", e); tracing::debug!("{:?}", e);
} }
let previous_state = after_previous_state.lock().await;
if previous_state
.as_ref()
.map(|x| x.running())
.unwrap_or(false)
{
crate::control::start(ctx.clone(), pkg_id.clone()).await?;
}
Err(e) Err(e)
} else { } else {
Ok::<_, Error>(()) Ok::<_, Error>(())
@@ -807,7 +787,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
rdr.validated(); rdr.validated();
let developer_key = rdr.developer_key().clone(); let developer_key = rdr.developer_key().clone();
rdr.reset().await?; rdr.reset().await?;
let db = ctx.db.peek().await?; let db = ctx.db.peek().await;
tracing::info!("Install {}@{}: Unpacking Manifest", pkg_id, version); tracing::info!("Install {}@{}: Unpacking Manifest", pkg_id, version);
let manifest = progress let manifest = progress
@@ -845,7 +825,12 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
.await .await
.with_kind(crate::ErrorKind::Deserialization)?, .with_kind(crate::ErrorKind::Deserialization)?,
)), )),
Err(e) if e.status() == Some(StatusCode::BAD_REQUEST) => Ok(None), Err(e)
if e.status() == Some(StatusCode::BAD_REQUEST)
|| e.status() == Some(StatusCode::NOT_FOUND) =>
{
Ok(None)
}
Err(e) => Err(e), Err(e) => Err(e),
} }
.with_kind(crate::ErrorKind::Registry)? .with_kind(crate::ErrorKind::Registry)?
@@ -1033,6 +1018,12 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
) )
.await?; .await?;
let peek = ctx.db.peek().await;
let prev = peek
.as_package_data()
.as_idx(pkg_id)
.or_not_found(pkg_id)?
.de()?;
let mut sql_tx = ctx.secret_store.begin().await?; let mut sql_tx = ctx.secret_store.begin().await?;
tracing::info!("Install {}@{}: Creating volumes", pkg_id, version); tracing::info!("Install {}@{}: Creating volumes", pkg_id, version);
@@ -1040,7 +1031,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
tracing::info!("Install {}@{}: Created volumes", pkg_id, version); tracing::info!("Install {}@{}: Created volumes", pkg_id, version);
tracing::info!("Install {}@{}: Installing interfaces", pkg_id, version); tracing::info!("Install {}@{}: Installing interfaces", pkg_id, version);
let interface_addresses = manifest.interfaces.install(&mut sql_tx, pkg_id).await?; let interface_addresses = manifest.interfaces.install(sql_tx.as_mut(), pkg_id).await?;
tracing::info!( tracing::info!(
"Install {}@{}: Installed interfaces {:?}", "Install {}@{}: Installed interfaces {:?}",
pkg_id, pkg_id,
@@ -1095,17 +1086,10 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
CurrentDependents(deps) CurrentDependents(deps)
}; };
let peek = ctx.db.peek().await?;
let prev = peek
.as_package_data()
.as_idx(pkg_id)
.or_not_found(pkg_id)?
.de()?;
let installed = InstalledPackageInfo { let installed = InstalledPackageInfo {
status: Status { status: Status {
configured: manifest.config.is_none(), configured: manifest.config.is_none(),
main: MainStatus::Stopped, main: MainStatus::Stopped,
dependency_errors: Default::default(),
dependency_config_errors: compute_dependency_config_errs( dependency_config_errors: compute_dependency_config_errs(
&ctx, &ctx,
&peek, &peek,
@@ -1241,11 +1225,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
manager.configure(configure_context).await?; manager.configure(configure_context).await?;
} }
if auto_start { for to_configure in to_configure.into_iter().filter(|(dep, _)| dep != pkg_id) {
manager.start();
}
for to_configure in to_configure {
if let Err(e) = async { if let Err(e) = async {
ctx.managers ctx.managers
.get(&to_configure) .get(&to_configure)
@@ -1267,6 +1247,10 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin + Send + Sync>(
} }
} }
if auto_start {
manager.start().await;
}
tracing::info!("Install {}@{}: Complete", pkg_id, version); tracing::info!("Install {}@{}: Complete", pkg_id, version);
Ok(()) Ok(())

View File

@@ -28,6 +28,7 @@ pub mod developer;
pub mod diagnostic; pub mod diagnostic;
pub mod disk; pub mod disk;
pub mod error; pub mod error;
pub mod firmware;
pub mod hostname; pub mod hostname;
pub mod init; pub mod init;
pub mod inspect; pub mod inspect;

View File

@@ -198,7 +198,7 @@ fn deserialize_string_or_utf8_array<'de, D: serde::de::Deserializer<'de>>(
String::from_utf8( String::from_utf8(
std::iter::repeat_with(|| seq.next_element::<u8>().transpose()) std::iter::repeat_with(|| seq.next_element::<u8>().transpose())
.take_while(|a| a.is_some()) .take_while(|a| a.is_some())
.filter_map(|a| a) .flatten()
.collect::<Result<Vec<u8>, _>>()?, .collect::<Result<Vec<u8>, _>>()?,
) )
.map_err(serde::de::Error::custom) .map_err(serde::de::Error::custom)
@@ -207,13 +207,22 @@ fn deserialize_string_or_utf8_array<'de, D: serde::de::Deserializer<'de>>(
deserializer.deserialize_any(Visitor) deserializer.deserialize_any(Visitor)
} }
/// Defining how we are going to filter on a journalctl cli log.
/// Kernal: (-k --dmesg Show kernel message log from the current boot)
/// Unit: ( -u --unit=UNIT Show logs from the specified unit
/// --user-unit=UNIT Show logs from the specified user unit))
/// System: Unit is startd, but we also filter on the comm
/// Container: Filtering containers, like podman/docker is done by filtering on the CONTAINER_NAME
#[derive(Debug)] #[derive(Debug)]
pub enum LogSource { pub enum LogSource {
Kernel, Kernel,
Service(&'static str), Unit(&'static str),
System,
Container(PackageId), Container(PackageId),
} }
pub const SYSTEM_UNIT: &str = "startd";
#[command( #[command(
custom_cli(cli_logs(async, context(CliContext))), custom_cli(cli_logs(async, context(CliContext))),
subcommands(self(logs_nofollow(async)), logs_follow), subcommands(self(logs_nofollow(async)), logs_follow),
@@ -323,21 +332,15 @@ pub async fn cli_logs_generic_follow(
.into()) .into())
} }
}; };
base_url.set_scheme(ws_scheme).or_else(|_| { base_url
Err(Error::new( .set_scheme(ws_scheme)
eyre!("Cannot set URL scheme"), .map_err(|_| Error::new(eyre!("Cannot set URL scheme"), crate::ErrorKind::ParseUrl))?;
crate::ErrorKind::ParseUrl,
))
})?;
let (mut stream, _) = let (mut stream, _) =
// base_url is "http://127.0.0.1/", with a trailing slash, so we don't put a leading slash in this path: // base_url is "http://127.0.0.1/", with a trailing slash, so we don't put a leading slash in this path:
tokio_tungstenite::connect_async(format!("{}ws/rpc/{}", base_url, res.guid)).await?; tokio_tungstenite::connect_async(format!("{}ws/rpc/{}", base_url, res.guid)).await?;
while let Some(log) = stream.try_next().await? { while let Some(log) = stream.try_next().await? {
match log { if let Message::Text(log) = log {
Message::Text(log) => { println!("{}", serde_json::from_str::<LogEntry>(&log)?);
println!("{}", serde_json::from_str::<LogEntry>(&log)?);
}
_ => (),
} }
} }
@@ -361,11 +364,22 @@ pub async fn journalctl(
LogSource::Kernel => { LogSource::Kernel => {
cmd.arg("-k"); cmd.arg("-k");
} }
LogSource::Service(id) => { LogSource::Unit(id) => {
cmd.arg("-u"); cmd.arg("-u");
cmd.arg(id); cmd.arg(id);
} }
LogSource::System => {
cmd.arg("-u");
cmd.arg(SYSTEM_UNIT);
cmd.arg(format!("_COMM={}", SYSTEM_UNIT));
}
LogSource::Container(id) => { LogSource::Container(id) => {
#[cfg(feature = "podman")]
cmd.arg(format!(
"SYSLOG_IDENTIFIER={}",
DockerProcedure::container_name(&id, None)
));
#[cfg(not(feature = "podman"))]
cmd.arg(format!( cmd.arg(format!(
"CONTAINER_NAME={}", "CONTAINER_NAME={}",
DockerProcedure::container_name(&id, None) DockerProcedure::container_name(&id, None)
@@ -373,7 +387,7 @@ pub async fn journalctl(
} }
}; };
let cursor_formatted = format!("--after-cursor={}", cursor.clone().unwrap_or("")); let cursor_formatted = format!("--after-cursor={}", cursor.unwrap_or(""));
if cursor.is_some() { if cursor.is_some() {
cmd.arg(&cursor_formatted); cmd.arg(&cursor_formatted);
if before { if before {

View File

@@ -11,7 +11,7 @@ use crate::Error;
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn check(ctx: &RpcContext, id: &PackageId) -> Result<(), Error> { pub async fn check(ctx: &RpcContext, id: &PackageId) -> Result<(), Error> {
let (manifest, started) = { let (manifest, started) = {
let peeked = ctx.db.peek().await?; let peeked = ctx.db.peek().await;
let pde = peeked let pde = peeked
.as_package_data() .as_package_data()
.as_idx(id) .as_idx(id)

View File

@@ -53,7 +53,7 @@ impl ManageContainer {
let current_state = Arc::new(watch::channel(StartStop::Stop).0); let current_state = Arc::new(watch::channel(StartStop::Stop).0);
let desired_state = Arc::new( let desired_state = Arc::new(
watch::channel::<StartStop>( watch::channel::<StartStop>(
get_status(seed.ctx.db.peek().await?, &seed.manifest).into(), get_status(seed.ctx.db.peek().await, &seed.manifest).into(),
) )
.0, .0,
); );
@@ -103,7 +103,7 @@ impl ManageContainer {
&self, &self,
seed: &manager_seed::ManagerSeed, seed: &manager_seed::ManagerSeed,
) -> Result<(), Error> { ) -> Result<(), Error> {
let current_state = get_status(seed.ctx.db.peek().await?, &seed.manifest); let current_state = get_status(seed.ctx.db.peek().await, &seed.manifest);
self.override_main_status self.override_main_status
.send_modify(|x| *x = Some(current_state)); .send_modify(|x| *x = Some(current_state));
Ok(()) Ok(())

View File

@@ -111,7 +111,7 @@ pub struct Manager {
seed: Arc<ManagerSeed>, seed: Arc<ManagerSeed>,
manage_container: Arc<manager_container::ManageContainer>, manage_container: Arc<manager_container::ManageContainer>,
transition: Arc<watch::Sender<Arc<TransitionState>>>, transition: Arc<watch::Sender<TransitionState>>,
persistent_container: ManagerPersistentContainer, persistent_container: ManagerPersistentContainer,
pub gid: Arc<Gid>, pub gid: Arc<Gid>,
@@ -140,60 +140,67 @@ impl Manager {
}) })
} }
pub fn start(&self) { /// awaiting this does not wait for the start to complete
self._transition_abort(); pub async fn start(&self) {
self.manage_container.to_desired(StartStop::Start);
}
pub fn stop(&self) {
self._transition_abort();
self.manage_container.to_desired(StartStop::Stop);
}
pub fn restart(&self) {
if self._is_transition_restart() { if self._is_transition_restart() {
return; return;
} }
self._transition_replace(self._transition_restart()); self._transition_abort().await;
self.manage_container.to_desired(StartStop::Start);
} }
/// awaiting this does not wait for the stop to complete
pub async fn stop(&self) {
self._transition_abort().await;
self.manage_container.to_desired(StartStop::Stop);
}
/// awaiting this does not wait for the restart to complete
pub async fn restart(&self) {
if self._is_transition_restart()
&& *self.manage_container.desired_state().borrow() == StartStop::Stop
{
return;
}
if self.manage_container.desired_state().borrow().is_start() {
self._transition_replace(self._transition_restart()).await;
}
}
/// awaiting this does not wait for the restart to complete
pub async fn configure( pub async fn configure(
&self, &self,
configure_context: ConfigureContext, configure_context: ConfigureContext,
) -> Result<BTreeMap<PackageId, String>, Error> { ) -> Result<BTreeMap<PackageId, String>, Error> {
if self._is_transition_configure() { if self._is_transition_restart() {
return Ok(configure_context.breakages); self._transition_abort().await;
} else if self._is_transition_backup() {
return Err(Error::new(
eyre!("Can't configure because service is backing up"),
ErrorKind::InvalidRequest,
));
} }
let context = self.seed.ctx.clone(); let context = self.seed.ctx.clone();
let id = self.seed.manifest.id.clone(); let id = self.seed.manifest.id.clone();
let breakages = configure(context, id, configure_context).await?; let breakages = configure(context, id, configure_context).await?;
self._transition_replace({
let manage_container = self.manage_container.clone();
let state_reverter = DesiredStateReverter::new(manage_container.clone());
let transition = self.transition.clone(); self.restart().await;
TransitionState::Configuring(
tokio::spawn(async move {
manage_container.wait_for_desired(StartStop::Stop).await;
state_reverter.revert().await;
transition.send_replace(Default::default());
})
.into(),
)
});
Ok(breakages) Ok(breakages)
} }
/// awaiting this does not wait for the backup to complete
pub async fn backup(&self, backup_guard: BackupGuard) -> BackupReturn { pub async fn backup(&self, backup_guard: BackupGuard) -> BackupReturn {
if self._is_transition_backup() { if self._is_transition_backup() {
return BackupReturn::AlreadyRunning(PackageBackupReport { return BackupReturn::AlreadyRunning(PackageBackupReport {
error: Some("Can't do backup because service is in a backing up state".to_owned()), error: Some("Can't do backup because service is already backing up".to_owned()),
}); });
} }
let (transition_state, done) = self._transition_backup(backup_guard); let (transition_state, done) = self._transition_backup(backup_guard);
self._transition_replace(transition_state); self._transition_replace(transition_state).await;
done.await done.await
} }
pub async fn exit(&self) { pub async fn exit(&self) {
self._transition_abort(); self._transition_abort().await;
self.manage_container self.manage_container
.wait_for_desired(StartStop::Stop) .wait_for_desired(StartStop::Stop)
.await; .await;
@@ -220,19 +227,14 @@ impl Manager {
.map(|x| x.rpc_client()) .map(|x| x.rpc_client())
} }
fn _transition_abort(&self) { async fn _transition_abort(&self) {
if let Some(transition) = self
.transition
.send_replace(Default::default())
.join_handle()
{
(**transition).abort();
}
}
fn _transition_replace(&self, transition_state: TransitionState) {
self.transition self.transition
.send_replace(Arc::new(transition_state)) .send_replace(Default::default())
.abort(); .abort()
.await;
}
async fn _transition_replace(&self, transition_state: TransitionState) {
self.transition.send_replace(transition_state).abort().await;
} }
pub(super) fn perform_restart(&self) -> impl Future<Output = Result<(), Error>> + 'static { pub(super) fn perform_restart(&self) -> impl Future<Output = Result<(), Error>> + 'static {
@@ -265,7 +267,7 @@ impl Manager {
let manage_container = self.manage_container.clone(); let manage_container = self.manage_container.clone();
let seed = self.seed.clone(); let seed = self.seed.clone();
async move { async move {
let peek = seed.ctx.db.peek().await?; let peek = seed.ctx.db.peek().await;
let state_reverter = DesiredStateReverter::new(manage_container.clone()); let state_reverter = DesiredStateReverter::new(manage_container.clone());
let override_guard = let override_guard =
manage_container.set_override(get_status(peek, &seed.manifest).backing_up())?; manage_container.set_override(get_status(peek, &seed.manifest).backing_up())?;
@@ -322,15 +324,11 @@ impl Manager {
} }
fn _is_transition_restart(&self) -> bool { fn _is_transition_restart(&self) -> bool {
let transition = self.transition.borrow(); let transition = self.transition.borrow();
matches!(**transition, TransitionState::Restarting(_)) matches!(*transition, TransitionState::Restarting(_))
} }
fn _is_transition_backup(&self) -> bool { fn _is_transition_backup(&self) -> bool {
let transition = self.transition.borrow(); let transition = self.transition.borrow();
matches!(**transition, TransitionState::BackingUp(_)) matches!(*transition, TransitionState::BackingUp(_))
}
fn _is_transition_configure(&self) -> bool {
let transition = self.transition.borrow();
matches!(**transition, TransitionState::Configuring(_))
} }
} }
@@ -340,7 +338,7 @@ async fn configure(
id: PackageId, id: PackageId,
mut configure_context: ConfigureContext, mut configure_context: ConfigureContext,
) -> Result<BTreeMap<PackageId, String>, Error> { ) -> Result<BTreeMap<PackageId, String>, Error> {
let db = ctx.db.peek().await?; let db = ctx.db.peek().await;
let id = &id; let id = &id;
let ctx = &ctx; let ctx = &ctx;
let overrides = &mut configure_context.overrides; let overrides = &mut configure_context.overrides;
@@ -602,7 +600,7 @@ impl Drop for DesiredStateReverter {
type BackupDoneSender = oneshot::Sender<Result<PackageBackupInfo, Error>>; type BackupDoneSender = oneshot::Sender<Result<PackageBackupInfo, Error>>;
fn finish_up_backup_task( fn finish_up_backup_task(
transition: Arc<Sender<Arc<TransitionState>>>, transition: Arc<Sender<TransitionState>>,
send: BackupDoneSender, send: BackupDoneSender,
) -> impl FnOnce(Result<Result<PackageBackupInfo, Error>, Error>) -> BoxFuture<'static, ()> { ) -> impl FnOnce(Result<Result<PackageBackupInfo, Error>, Error>) -> BoxFuture<'static, ()> {
move |result| { move |result| {
@@ -761,7 +759,7 @@ async fn add_network_for_main(
for (id, interface) in &seed.manifest.interfaces.0 { for (id, interface) in &seed.manifest.interfaces.0 {
for (external, internal) in interface.lan_config.iter().flatten() { for (external, internal) in interface.lan_config.iter().flatten() {
svc.add_lan( svc.add_lan(
&mut tx, tx.as_mut(),
id.clone(), id.clone(),
external.0, external.0,
internal.internal, internal.internal,
@@ -770,13 +768,14 @@ async fn add_network_for_main(
.await?; .await?;
} }
for (external, internal) in interface.tor_config.iter().flat_map(|t| &t.port_mapping) { for (external, internal) in interface.tor_config.iter().flat_map(|t| &t.port_mapping) {
svc.add_tor(&mut tx, id.clone(), external.0, internal.0) svc.add_tor(tx.as_mut(), id.clone(), external.0, internal.0)
.await?; .await?;
} }
} }
for volume in seed.manifest.volumes.values() { for volume in seed.manifest.volumes.values() {
if let Volume::Certificate { interface_id } = volume { if let Volume::Certificate { interface_id } = volume {
svc.export_cert(&mut tx, interface_id, ip.into()).await?; svc.export_cert(tx.as_mut(), interface_id, ip.into())
.await?;
} }
} }
tx.commit().await?; tx.commit().await?;

View File

@@ -5,21 +5,26 @@ use helpers::NonDetachingJoinHandle;
pub(super) enum TransitionState { pub(super) enum TransitionState {
BackingUp(NonDetachingJoinHandle<()>), BackingUp(NonDetachingJoinHandle<()>),
Restarting(NonDetachingJoinHandle<()>), Restarting(NonDetachingJoinHandle<()>),
Configuring(NonDetachingJoinHandle<()>),
None, None,
} }
impl TransitionState { impl TransitionState {
pub(super) fn join_handle(&self) -> Option<&NonDetachingJoinHandle<()>> { pub(super) fn take(&mut self) -> Self {
std::mem::take(self)
}
pub(super) fn into_join_handle(self) -> Option<NonDetachingJoinHandle<()>> {
Some(match self { Some(match self {
TransitionState::BackingUp(a) => a, TransitionState::BackingUp(a) => a,
TransitionState::Restarting(a) => a, TransitionState::Restarting(a) => a,
TransitionState::Configuring(a) => a,
TransitionState::None => return None, TransitionState::None => return None,
}) })
} }
pub(super) fn abort(&self) { pub(super) async fn abort(&mut self) {
self.join_handle().map(|transition| transition.abort()); if let Some(s) = self.take().into_join_handle() {
if s.wait_for_abort().await.is_ok() {
tracing::trace!("transition completed before abort");
}
}
} }
} }

View File

@@ -47,7 +47,7 @@ impl HasLoggedOutSessions {
"UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1", "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = $1",
session session
) )
.execute(&mut sqlx_conn) .execute(sqlx_conn.as_mut())
.await?; .await?;
for socket in open_authed_websockets.remove(&session).unwrap_or_default() { for socket in open_authed_websockets.remove(&session).unwrap_or_default() {
let _ = socket.send(()); let _ = socket.send(());
@@ -94,7 +94,7 @@ impl HasValidSession {
pub async fn from_session(session: &HashSessionToken, ctx: &RpcContext) -> Result<Self, Error> { pub async fn from_session(session: &HashSessionToken, ctx: &RpcContext) -> Result<Self, Error> {
let session_hash = session.hashed(); let session_hash = session.hashed();
let session = sqlx::query!("UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP", session_hash) let session = sqlx::query!("UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = $1 AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP", session_hash)
.execute(&mut ctx.secret_store.acquire().await?) .execute(ctx.secret_store.acquire().await?.as_mut())
.await?; .await?;
if session.rows_affected() == 0 { if session.rows_affected() == 0 {
return Err(Error::new( return Err(Error::new(

View File

@@ -1,4 +1,6 @@
use futures::FutureExt; use futures::FutureExt;
use http::HeaderValue;
use hyper::header::HeaderMap;
use rpc_toolkit::hyper::http::Error as HttpError; use rpc_toolkit::hyper::http::Error as HttpError;
use rpc_toolkit::hyper::{Body, Method, Request, Response}; use rpc_toolkit::hyper::{Body, Method, Request, Response};
use rpc_toolkit::rpc_server_helpers::{ use rpc_toolkit::rpc_server_helpers::{
@@ -6,24 +8,35 @@ use rpc_toolkit::rpc_server_helpers::{
}; };
use rpc_toolkit::Metadata; use rpc_toolkit::Metadata;
fn get_cors_headers(req: &Request<Body>) -> HeaderMap {
let mut res = HeaderMap::new();
if let Some(origin) = req.headers().get("Origin") {
res.insert("Access-Control-Allow-Origin", origin.clone());
}
if let Some(method) = req.headers().get("Access-Control-Request-Method") {
res.insert("Access-Control-Allow-Methods", method.clone());
}
if let Some(headers) = req.headers().get("Access-Control-Request-Headers") {
res.insert("Access-Control-Allow-Headers", headers.clone());
}
res.insert(
"Access-Control-Allow-Credentials",
HeaderValue::from_static("true"),
);
res
}
pub async fn cors<M: Metadata>( pub async fn cors<M: Metadata>(
req: &mut Request<Body>, req: &mut Request<Body>,
_metadata: M, _metadata: M,
) -> Result<Result<DynMiddlewareStage2, Response<Body>>, HttpError> { ) -> Result<Result<DynMiddlewareStage2, Response<Body>>, HttpError> {
let headers = get_cors_headers(req);
if req.method() == Method::OPTIONS { if req.method() == Method::OPTIONS {
Ok(Err(Response::builder() Ok(Err({
.header( let mut res = Response::new(Body::empty());
"Access-Control-Allow-Origin", res.headers_mut().extend(headers.into_iter());
if let Some(origin) = req.headers().get("origin").and_then(|s| s.to_str().ok()) { res
origin }))
} else {
"*"
},
)
.header("Access-Control-Allow-Methods", "*")
.header("Access-Control-Allow-Headers", "*")
.header("Access-Control-Allow-Credentials", "true")
.body(Body::empty())?))
} else { } else {
Ok(Ok(Box::new(|_, _| { Ok(Ok(Box::new(|_, _| {
async move { async move {
@@ -31,8 +44,7 @@ pub async fn cors<M: Metadata>(
async move { async move {
let res: DynMiddlewareStage4 = Box::new(|res| { let res: DynMiddlewareStage4 = Box::new(|res| {
async move { async move {
res.headers_mut() res.headers_mut().extend(headers.into_iter());
.insert("Access-Control-Allow-Origin", "*".parse()?);
Ok::<_, HttpError>(()) Ok::<_, HttpError>(())
} }
.boxed() .boxed()

View File

@@ -1,4 +1,3 @@
use color_eyre::eyre::eyre;
use futures::future::BoxFuture; use futures::future::BoxFuture;
use futures::FutureExt; use futures::FutureExt;
use http::HeaderValue; use http::HeaderValue;
@@ -11,7 +10,6 @@ use rpc_toolkit::yajrc::RpcMethod;
use rpc_toolkit::Metadata; use rpc_toolkit::Metadata;
use crate::context::RpcContext; use crate::context::RpcContext;
use crate::{Error, ResultExt};
pub fn db<M: Metadata>(ctx: RpcContext) -> DynMiddleware<M> { pub fn db<M: Metadata>(ctx: RpcContext) -> DynMiddleware<M> {
Box::new( Box::new(
@@ -20,51 +18,21 @@ pub fn db<M: Metadata>(ctx: RpcContext) -> DynMiddleware<M> {
-> BoxFuture<Result<Result<DynMiddlewareStage2, Response<Body>>, HttpError>> { -> BoxFuture<Result<Result<DynMiddlewareStage2, Response<Body>>, HttpError>> {
let ctx = ctx.clone(); let ctx = ctx.clone();
async move { async move {
let m2: DynMiddlewareStage2 = Box::new(move |req, rpc_req| { let m2: DynMiddlewareStage2 = Box::new(move |_req, rpc_req| {
async move { async move {
let seq = req.headers.remove("x-patch-sequence");
let sync_db = metadata let sync_db = metadata
.get(rpc_req.method.as_str(), "sync_db") .get(rpc_req.method.as_str(), "sync_db")
.unwrap_or(false); .unwrap_or(false);
let m3: DynMiddlewareStage3 = Box::new(move |res, _| { let m3: DynMiddlewareStage3 = Box::new(move |res, _| {
async move { async move {
if sync_db && seq.is_some() { if sync_db {
match async { res.headers.append(
let seq = seq "X-Patch-Sequence",
.ok_or_else(|| { HeaderValue::from_str(
Error::new( &ctx.db.sequence().await.to_string(),
eyre!("Missing X-Patch-Sequence"), )?,
crate::ErrorKind::InvalidRequest, );
)
})?
.to_str()
.with_kind(crate::ErrorKind::InvalidRequest)?
.parse()?;
let res = ctx.db.sync(seq).await?;
let json = match res {
Ok(revs) => serde_json::to_vec(&revs),
Err(dump) => serde_json::to_vec(&[dump]),
}
.with_kind(crate::ErrorKind::Serialization)?;
Ok::<_, Error>(base64::encode_config(
&json,
base64::URL_SAFE,
))
}
.await
{
Ok(a) => res
.headers
.append("X-Patch-Updates", HeaderValue::from_str(&a)?),
Err(e) => res.headers.append(
"X-Patch-Error",
HeaderValue::from_str(&base64::encode_config(
&e.to_string(),
base64::URL_SAFE,
))?,
),
};
} }
Ok(Ok(noop4())) Ok(Ok(noop4()))
} }

View File

@@ -13,7 +13,7 @@ pub fn pbkdf2(password: impl AsRef<[u8]>, salt: impl AsRef<[u8]>) -> CipherKey<A
salt.as_ref(), salt.as_ref(),
1000, 1000,
aeskey.as_mut_slice(), aeskey.as_mut_slice(),
); ).unwrap();
aeskey aeskey
} }

View File

@@ -13,8 +13,8 @@ use tokio::process::Command;
use tokio::sync::RwLock; use tokio::sync::RwLock;
use tracing::instrument; use tracing::instrument;
use trust_dns_server::authority::MessageResponseBuilder; use trust_dns_server::authority::MessageResponseBuilder;
use trust_dns_server::client::op::{Header, ResponseCode}; use trust_dns_server::proto::op::{Header, ResponseCode};
use trust_dns_server::client::rr::{Name, Record, RecordType}; use trust_dns_server::proto::rr::{Name, Record, RecordType};
use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo}; use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use trust_dns_server::ServerFuture; use trust_dns_server::ServerFuture;
@@ -86,7 +86,7 @@ impl RequestHandler for Resolver {
Record::from_rdata( Record::from_rdata(
request.request_info().query.name().to_owned().into(), request.request_info().query.name().to_owned().into(),
0, 0,
trust_dns_server::client::rr::RData::A(ip), trust_dns_server::proto::rr::RData::A(ip.into()),
) )
}) })
.collect::<Vec<_>>(), .collect::<Vec<_>>(),

View File

@@ -1,5 +1,4 @@
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use ed25519_dalek::{ExpandedSecretKey, SecretKey};
use models::{Id, InterfaceId, PackageId}; use models::{Id, InterfaceId, PackageId};
use openssl::pkey::{PKey, Private}; use openssl::pkey::{PKey, Private};
use openssl::sha::Sha256; use openssl::sha::Sha256;
@@ -12,14 +11,15 @@ use tracing::instrument;
use zeroize::Zeroize; use zeroize::Zeroize;
use crate::net::ssl::CertPair; use crate::net::ssl::CertPair;
use crate::Error; use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
// TODO: delete once we may change tor addresses // TODO: delete once we may change tor addresses
#[instrument(skip(secrets))] #[instrument(skip(secrets))]
async fn compat( async fn compat(
secrets: impl PgExecutor<'_>, secrets: impl PgExecutor<'_>,
interface: &Option<(PackageId, InterfaceId)>, interface: &Option<(PackageId, InterfaceId)>,
) -> Result<Option<ExpandedSecretKey>, Error> { ) -> Result<Option<[u8; 64]>, Error> {
if let Some((package, interface)) = interface { if let Some((package, interface)) = interface {
if let Some(r) = sqlx::query!( if let Some(r) = sqlx::query!(
"SELECT key FROM tor WHERE package = $1 AND interface = $2", "SELECT key FROM tor WHERE package = $1 AND interface = $2",
@@ -29,7 +29,12 @@ async fn compat(
.fetch_optional(secrets) .fetch_optional(secrets)
.await? .await?
{ {
Ok(Some(ExpandedSecretKey::from_bytes(&r.key)?)) Ok(Some(<[u8; 64]>::try_from(r.key).map_err(|e| {
Error::new(
eyre!("expected vec of len 64, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?))
} else { } else {
Ok(None) Ok(None)
} }
@@ -38,7 +43,12 @@ async fn compat(
.await? .await?
.tor_key .tor_key
{ {
Ok(Some(ExpandedSecretKey::from_bytes(&key)?)) Ok(Some(<[u8; 64]>::try_from(key).map_err(|e| {
Error::new(
eyre!("expected vec of len 64, got len {}", e.len()),
ErrorKind::ParseDbField,
)
})?))
} else { } else {
Ok(None) Ok(None)
} }
@@ -64,10 +74,7 @@ impl Key {
.unwrap_or_else(|| "embassy".to_owned()) .unwrap_or_else(|| "embassy".to_owned())
} }
pub fn tor_key(&self) -> TorSecretKeyV3 { pub fn tor_key(&self) -> TorSecretKeyV3 {
ed25519_dalek::ExpandedSecretKey::from_bytes(&self.tor_key) self.tor_key.into()
.unwrap()
.to_bytes()
.into()
} }
pub fn tor_address(&self) -> OnionAddressV3 { pub fn tor_address(&self) -> OnionAddressV3 {
self.tor_key().public().get_onion_address() self.tor_key().public().get_onion_address()
@@ -87,7 +94,7 @@ impl Key {
pub fn openssl_key_nistp256(&self) -> PKey<Private> { pub fn openssl_key_nistp256(&self) -> PKey<Private> {
let mut buf = self.base; let mut buf = self.base;
loop { loop {
if let Ok(k) = p256::SecretKey::from_be_bytes(&buf) { if let Ok(k) = p256::SecretKey::from_slice(&buf) {
return PKey::private_key_from_pkcs8(&*k.to_pkcs8_der().unwrap().as_bytes()) return PKey::private_key_from_pkcs8(&*k.to_pkcs8_der().unwrap().as_bytes())
.unwrap(); .unwrap();
} }
@@ -111,11 +118,7 @@ impl Key {
} }
} }
pub fn from_bytes(interface: Option<(PackageId, InterfaceId)>, bytes: [u8; 32]) -> Self { pub fn from_bytes(interface: Option<(PackageId, InterfaceId)>, bytes: [u8; 32]) -> Self {
Self::from_pair( Self::from_pair(interface, bytes, ed25519_expand_key(&bytes))
interface,
bytes,
ExpandedSecretKey::from(&SecretKey::from_bytes(&bytes).unwrap()).to_bytes(),
)
} }
pub fn new(interface: Option<(PackageId, InterfaceId)>) -> Self { pub fn new(interface: Option<(PackageId, InterfaceId)>) -> Self {
Self::from_bytes(interface, rand::random()) Self::from_bytes(interface, rand::random())
@@ -225,7 +228,7 @@ impl Key {
}; };
let mut res = Self::from_bytes(interface, actual); let mut res = Self::from_bytes(interface, actual);
if let Some(tor_key) = compat(secrets, &res.interface).await? { if let Some(tor_key) = compat(secrets, &res.interface).await? {
res.tor_key = tor_key.to_bytes(); res.tor_key = tor_key;
} }
Ok(res) Ok(res)
} }

View File

@@ -23,7 +23,7 @@ pub mod wifi;
pub const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl"; pub const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
#[command(subcommands(tor::tor, dhcp::dhcp))] #[command(subcommands(tor::tor, dhcp::dhcp, ssl::ssl))]
pub fn net() -> Result<(), Error> { pub fn net() -> Result<(), Error> {
Ok(()) Ok(())
} }

View File

@@ -159,6 +159,7 @@ impl NetController {
let dns = self.dns.add(Some(package.clone()), ip).await?; let dns = self.dns.add(Some(package.clone()), ip).await?;
Ok(NetService { Ok(NetService {
shutdown: false,
id: package, id: package,
ip, ip,
dns, dns,
@@ -225,6 +226,7 @@ impl NetController {
} }
pub struct NetService { pub struct NetService {
shutdown: bool,
id: PackageId, id: PackageId,
ip: Ipv4Addr, ip: Ipv4Addr,
dns: Arc<()>, dns: Arc<()>,
@@ -372,6 +374,7 @@ impl NetService {
Ok(()) Ok(())
} }
pub async fn remove_all(mut self) -> Result<(), Error> { pub async fn remove_all(mut self) -> Result<(), Error> {
self.shutdown = true;
let mut errors = ErrorCollection::new(); let mut errors = ErrorCollection::new();
if let Some(ctrl) = Weak::upgrade(&self.controller) { if let Some(ctrl) = Weak::upgrade(&self.controller) {
for ((_, external), (key, rcs)) in std::mem::take(&mut self.lan) { for ((_, external), (key, rcs)) in std::mem::take(&mut self.lan) {
@@ -385,9 +388,9 @@ impl NetService {
} }
std::mem::take(&mut self.dns); std::mem::take(&mut self.dns);
errors.handle(ctrl.dns.gc(Some(self.id.clone()), self.ip).await); errors.handle(ctrl.dns.gc(Some(self.id.clone()), self.ip).await);
self.ip = Ipv4Addr::new(0, 0, 0, 0);
errors.into_result() errors.into_result()
} else { } else {
tracing::warn!("NetService dropped after NetController is shutdown");
Err(Error::new( Err(Error::new(
eyre!("NetController is shutdown"), eyre!("NetController is shutdown"),
crate::ErrorKind::Network, crate::ErrorKind::Network,
@@ -398,11 +401,12 @@ impl NetService {
impl Drop for NetService { impl Drop for NetService {
fn drop(&mut self) { fn drop(&mut self) {
if self.ip != Ipv4Addr::new(0, 0, 0, 0) { if !self.shutdown {
tracing::debug!("Dropping NetService for {}", self.id); tracing::debug!("Dropping NetService for {}", self.id);
let svc = std::mem::replace( let svc = std::mem::replace(
self, self,
NetService { NetService {
shutdown: true,
id: Default::default(), id: Default::default(),
ip: Ipv4Addr::new(0, 0, 0, 0), ip: Ipv4Addr::new(0, 0, 0, 0),
dns: Default::default(), dns: Default::default(),

View File

@@ -13,14 +13,15 @@ use openssl::nid::Nid;
use openssl::pkey::{PKey, Private}; use openssl::pkey::{PKey, Private};
use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509}; use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::*; use openssl::*;
use rpc_toolkit::command;
use tokio::sync::{Mutex, RwLock}; use tokio::sync::{Mutex, RwLock};
use tracing::instrument; use tracing::instrument;
use crate::account::AccountInfo; use crate::account::AccountInfo;
use crate::context::RpcContext;
use crate::hostname::Hostname; use crate::hostname::Hostname;
use crate::net::dhcp::ips; use crate::net::dhcp::ips;
use crate::net::keys::{Key, KeyInfo}; use crate::net::keys::{Key, KeyInfo};
use crate::s9pk::manifest::PackageId;
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
static CERTIFICATE_VERSION: i32 = 2; // X509 version 3 is actually encoded as '2' in the cert because fuck you. static CERTIFICATE_VERSION: i32 = 2; // X509 version 3 is actually encoded as '2' in the cert because fuck you.
@@ -415,3 +416,16 @@ pub fn make_leaf_cert(
let cert = builder.build(); let cert = builder.build();
Ok(cert) Ok(cert)
} }
#[command(subcommands(size))]
pub async fn ssl() -> Result<(), Error> {
Ok(())
}
#[command]
pub async fn size(#[context] ctx: RpcContext) -> Result<String, Error> {
Ok(format!(
"Cert Catch size: {}",
ctx.net_controller.ssl.cert_cache.read().await.len()
))
}

View File

@@ -139,7 +139,7 @@ pub async fn logs_nofollow(
_ctx: (), _ctx: (),
(limit, cursor, before, _): (Option<usize>, Option<String>, bool, bool), (limit, cursor, before, _): (Option<usize>, Option<String>, bool, bool),
) -> Result<LogResponse, Error> { ) -> Result<LogResponse, Error> {
fetch_logs(LogSource::Service(SYSTEMD_UNIT), limit, cursor, before).await fetch_logs(LogSource::Unit(SYSTEMD_UNIT), limit, cursor, before).await
} }
#[command(rpc_only, rename = "follow", display(display_none))] #[command(rpc_only, rename = "follow", display(display_none))]
@@ -147,7 +147,7 @@ pub async fn logs_follow(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[parent_data] (limit, _, _, _): (Option<usize>, Option<String>, bool, bool), #[parent_data] (limit, _, _, _): (Option<usize>, Option<String>, bool, bool),
) -> Result<LogFollowResponse, Error> { ) -> Result<LogFollowResponse, Error> {
follow_logs(ctx, LogSource::Service(SYSTEMD_UNIT), limit).await follow_logs(ctx, LogSource::Unit(SYSTEMD_UNIT), limit).await
} }
fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> { fn event_handler(_event: AsyncEvent<'static>) -> BoxFuture<'static, Result<(), ConnError>> {
@@ -305,7 +305,7 @@ async fn torctl(
.invoke(ErrorKind::Tor) .invoke(ErrorKind::Tor)
.await?; .await?;
let logs = journalctl(LogSource::Service(SYSTEMD_UNIT), 0, None, false, true).await?; let logs = journalctl(LogSource::Unit(SYSTEMD_UNIT), 0, None, false, true).await?;
let mut tcp_stream = None; let mut tcp_stream = None;
for _ in 0..60 { for _ in 0..60 {

View File

@@ -16,12 +16,13 @@ use tokio::sync::{Mutex, RwLock};
use tokio_rustls::rustls::server::Acceptor; use tokio_rustls::rustls::server::Acceptor;
use tokio_rustls::rustls::{RootCertStore, ServerConfig}; use tokio_rustls::rustls::{RootCertStore, ServerConfig};
use tokio_rustls::{LazyConfigAcceptor, TlsConnector}; use tokio_rustls::{LazyConfigAcceptor, TlsConnector};
use tracing::instrument;
use crate::net::keys::Key; use crate::net::keys::Key;
use crate::net::ssl::SslManager; use crate::net::ssl::SslManager;
use crate::net::utils::SingleAccept; use crate::net::utils::SingleAccept;
use crate::prelude::*;
use crate::util::io::{BackTrackingReader, TimeoutStream}; use crate::util::io::{BackTrackingReader, TimeoutStream};
use crate::Error;
// not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353 // not allowed: <=1024, >=32768, 5355, 5432, 9050, 6010, 9051, 5353
@@ -36,6 +37,7 @@ impl VHostController {
servers: Mutex::new(BTreeMap::new()), servers: Mutex::new(BTreeMap::new()),
} }
} }
#[instrument(skip_all)]
pub async fn add( pub async fn add(
&self, &self,
key: Key, key: Key,
@@ -63,6 +65,7 @@ impl VHostController {
writable.insert(external, server); writable.insert(external, server);
Ok(rc?) Ok(rc?)
} }
#[instrument(skip_all)]
pub async fn gc(&self, hostname: Option<String>, external: u16) -> Result<(), Error> { pub async fn gc(&self, hostname: Option<String>, external: u16) -> Result<(), Error> {
let mut writable = self.servers.lock().await; let mut writable = self.servers.lock().await;
if let Some(server) = writable.remove(&external) { if let Some(server) = writable.remove(&external) {
@@ -93,6 +96,7 @@ struct VHostServer {
_thread: NonDetachingJoinHandle<()>, _thread: NonDetachingJoinHandle<()>,
} }
impl VHostServer { impl VHostServer {
#[instrument(skip_all)]
async fn new(port: u16, ssl: Arc<SslManager>) -> Result<Self, Error> { async fn new(port: u16, ssl: Arc<SslManager>) -> Result<Self, Error> {
// check if port allowed // check if port allowed
let listener = TcpListener::bind(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), port)) let listener = TcpListener::bind(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), port))

View File

@@ -174,7 +174,7 @@ pub async fn delete(#[context] ctx: RpcContext, #[arg] ssid: String) -> Result<(
pub struct WiFiInfo { pub struct WiFiInfo {
ssids: HashMap<Ssid, SignalStrength>, ssids: HashMap<Ssid, SignalStrength>,
connected: Option<Ssid>, connected: Option<Ssid>,
country: CountryCode, country: Option<CountryCode>,
ethernet: bool, ethernet: bool,
available_wifi: Vec<WifiListOut>, available_wifi: Vec<WifiListOut>,
} }
@@ -216,7 +216,7 @@ fn display_wifi_info(info: WiFiInfo, matches: &ArgMatches) {
.as_ref() .as_ref()
.and_then(|x| info.ssids.get(x)) .and_then(|x| info.ssids.get(x))
.map_or("[N/A]".to_owned(), |ss| format!("{}", ss.0)), .map_or("[N/A]".to_owned(), |ss| format!("{}", ss.0)),
&info.country.alpha2(), info.country.as_ref().map(|c| c.alpha2()).unwrap_or("00"),
&format!("{}", info.ethernet) &format!("{}", info.ethernet)
]); ]);
table_global.print_tty(false).unwrap(); table_global.print_tty(false).unwrap();
@@ -517,7 +517,7 @@ impl WpaCli {
Ok(()) Ok(())
} }
pub async fn get_country_low(&self) -> Result<CountryCode, Error> { pub async fn get_country_low(&self) -> Result<Option<CountryCode>, Error> {
let r = Command::new("iw") let r = Command::new("iw")
.arg("reg") .arg("reg")
.arg("get") .arg("get")
@@ -539,12 +539,16 @@ impl WpaCli {
ErrorKind::Wifi, ErrorKind::Wifi,
) )
})?[1]; })?[1];
Ok(CountryCode::for_alpha2(country).map_err(|_| { if country == "00" {
Error::new( Ok(None)
color_eyre::eyre::eyre!("Invalid Country Code: {}", country), } else {
ErrorKind::Wifi, Ok(Some(CountryCode::for_alpha2(country).map_err(|_| {
) Error::new(
})?) color_eyre::eyre::eyre!("Invalid Country Code: {}", country),
ErrorKind::Wifi,
)
})?))
}
} }
pub async fn remove_network_low(&mut self, id: NetworkId) -> Result<(), Error> { pub async fn remove_network_low(&mut self, id: NetworkId) -> Result<(), Error> {
let _ = Command::new("nmcli") let _ = Command::new("nmcli")
@@ -634,7 +638,7 @@ impl WpaCli {
Ok(()) Ok(())
} }
pub async fn save_config(&mut self, db: PatchDb) -> Result<(), Error> { pub async fn save_config(&mut self, db: PatchDb) -> Result<(), Error> {
let new_country = Some(self.get_country_low().await?); let new_country = self.get_country_low().await?;
db.mutate(|d| { db.mutate(|d| {
d.as_server_info_mut() d.as_server_info_mut()
.as_last_wifi_region_mut() .as_last_wifi_region_mut()

View File

@@ -236,7 +236,7 @@ impl NotificationManager {
subtype: T, subtype: T,
debounce_interval: Option<u32>, debounce_interval: Option<u32>,
) -> Result<(), Error> { ) -> Result<(), Error> {
let peek = db.peek().await?; let peek = db.peek().await;
if !self if !self
.should_notify(&package_id, &level, &title, debounce_interval) .should_notify(&package_id, &level, &title, debounce_interval)
.await .await

View File

@@ -1,24 +1,27 @@
use std::path::{Path, PathBuf};
use std::sync::Arc; use std::sync::Arc;
use std::time::Duration; use std::time::Duration;
use std::{
path::{Path, PathBuf},
process::Stdio,
};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use color_eyre::Report; use embassy_container_init::ProcessGroupId;
use embassy_container_init::{ProcessGroupId, SignalGroup, SignalGroupParams}; use helpers::UnixRpcClient;
use helpers::{Address, AddressSchemaLocal, AddressSchemaOnion, Callback, OsApi, UnixRpcClient};
pub use js_engine::JsError; pub use js_engine::JsError;
use js_engine::{JsExecutionEnvironment, PathForVolumeId}; use js_engine::{JsExecutionEnvironment, PathForVolumeId};
use models::{ErrorKind, InterfaceId, VolumeId}; use models::VolumeId;
use serde::de::DeserializeOwned; use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use tokio::process::Command;
use tracing::instrument; use tracing::instrument;
use super::ProcedureName; use super::ProcedureName;
use crate::context::RpcContext; use crate::prelude::*;
use crate::s9pk::manifest::PackageId; use crate::s9pk::manifest::PackageId;
use crate::util::{GeneralGuard, Version}; use crate::util::io::to_json_async_writer;
use crate::util::Version;
use crate::volume::Volumes; use crate::volume::Volumes;
use crate::Error;
#[derive(Debug, Serialize, Deserialize, Clone)] #[derive(Debug, Serialize, Deserialize, Clone)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
@@ -46,56 +49,15 @@ impl PathForVolumeId for Volumes {
} }
} }
struct SandboxOsApi { #[derive(Clone, Debug, Deserialize, Serialize)]
_ctx: RpcContext, pub struct ExecuteArgs {
} pub procedure: JsProcedure,
#[async_trait::async_trait] pub directory: PathBuf,
impl OsApi for SandboxOsApi { pub pkg_id: PackageId,
#[allow(unused_variables)] pub pkg_version: Version,
async fn get_service_config( pub name: ProcedureName,
&self, pub volumes: Volumes,
id: PackageId, pub input: Option<serde_json::Value>,
path: &str,
callback: Option<Callback>,
) -> Result<Vec<serde_json::Value>, Report> {
Err(eyre!("Operation not permitted"))
}
#[allow(unused_variables)]
async fn bind_local(
&self,
internal_port: u16,
address_schema: AddressSchemaLocal,
) -> Result<Address, Report> {
Err(eyre!("Operation not permitted"))
}
#[allow(unused_variables)]
async fn bind_onion(
&self,
internal_port: u16,
address_schema: AddressSchemaOnion,
) -> Result<Address, Report> {
Err(eyre!("Operation not permitted"))
}
#[allow(unused_variables)]
async fn unbind_local(&self, id: InterfaceId, external: u16) -> Result<(), Report> {
Err(eyre!("Operation not permitted"))
}
#[allow(unused_variables)]
async fn unbind_onion(&self, id: InterfaceId, external: u16) -> Result<(), Report> {
Err(eyre!("Operation not permitted"))
}
fn set_started(&self) -> Result<(), Report> {
Err(eyre!("Operation not permitted"))
}
async fn restart(&self) -> Result<(), Report> {
Err(eyre!("Operation not permitted"))
}
async fn start(&self) -> Result<(), Report> {
Err(eyre!("Operation not permitted"))
}
async fn stop(&self) -> Result<(), Report> {
Err(eyre!("Operation not permitted"))
}
} }
#[derive(Clone, Debug, Default, Deserialize, Serialize)] #[derive(Clone, Debug, Default, Deserialize, Serialize)]
@@ -120,56 +82,54 @@ impl JsProcedure {
volumes: &Volumes, volumes: &Volumes,
input: Option<I>, input: Option<I>,
timeout: Option<Duration>, timeout: Option<Duration>,
gid: ProcessGroupId, _gid: ProcessGroupId,
rpc_client: Option<Arc<UnixRpcClient>>, _rpc_client: Option<Arc<UnixRpcClient>>,
os: Arc<dyn OsApi>,
) -> Result<Result<O, (i32, String)>, Error> { ) -> Result<Result<O, (i32, String)>, Error> {
let cleaner_client = rpc_client.clone(); let runner_argument = ExecuteArgs {
let cleaner = GeneralGuard::new(move || { procedure: self.clone(),
tokio::spawn(async move { directory: directory.clone(),
if let Some(client) = cleaner_client { pkg_id: pkg_id.clone(),
client pkg_version: pkg_version.clone(),
.request(SignalGroup, SignalGroupParams { gid, signal: 9 }) name,
.await volumes: volumes.clone(),
.map_err(|e| { input: input.and_then(|x| serde_json::to_value(x).ok()),
Error::new(eyre!("{}: {:?}", e.message, e.data), ErrorKind::Docker) };
}) let mut runner = Command::new("start-deno")
} else { .arg("execute")
Ok(()) .stdin(Stdio::piped())
} .stdout(Stdio::piped())
}) .stderr(Stdio::piped())
}); .kill_on_drop(true)
let res = async move { .spawn()?;
let running_action = JsExecutionEnvironment::load_from_package( to_json_async_writer(
os, &mut runner.stdin.take().or_not_found("stdin")?,
directory, &runner_argument,
pkg_id, )
pkg_version, .await?;
Box::new(volumes.clone()),
gid, let res = if let Some(timeout) = timeout {
rpc_client, tokio::time::timeout(timeout, runner.wait_with_output())
) .await
.await? .with_kind(ErrorKind::Timeout)??
.run_action(name, input, self.args.clone()); } else {
let output: Option<ErrorValue> = match timeout { runner.wait_with_output().await?
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action) };
.await
.map_err(|_| (JsError::Timeout, "Timed out. Retrying soon...".to_owned()))??, if res.status.success() {
None => running_action.await?, serde_json::from_str::<Result<O, (i32, String)>>(std::str::from_utf8(&res.stdout)?)
}; .with_kind(ErrorKind::Deserialization)
let output: O = unwrap_known_error(output)?; } else {
Ok(output) Err(Error::new(
eyre!("{}", String::from_utf8(res.stderr)?),
ErrorKind::Javascript,
))
} }
.await
.map_err(|(error, message)| (error.as_code_num(), message));
cleaner.drop().await.unwrap()?;
Ok(res)
} }
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn sandboxed<I: Serialize, O: DeserializeOwned>( pub async fn sandboxed<I: Serialize, O: DeserializeOwned>(
&self, &self,
ctx: &RpcContext, directory: &PathBuf,
pkg_id: &PackageId, pkg_id: &PackageId,
pkg_version: &Version, pkg_version: &Version,
volumes: &Volumes, volumes: &Volumes,
@@ -177,25 +137,97 @@ impl JsProcedure {
timeout: Option<Duration>, timeout: Option<Duration>,
name: ProcedureName, name: ProcedureName,
) -> Result<Result<O, (i32, String)>, Error> { ) -> Result<Result<O, (i32, String)>, Error> {
Ok(async move { let runner_argument = ExecuteArgs {
procedure: self.clone(),
directory: directory.clone(),
pkg_id: pkg_id.clone(),
pkg_version: pkg_version.clone(),
name,
volumes: volumes.clone(),
input: input.and_then(|x| serde_json::to_value(x).ok()),
};
let mut runner = Command::new("start-deno")
.arg("sandbox")
.stdin(Stdio::piped())
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.kill_on_drop(true)
.spawn()?;
to_json_async_writer(
&mut runner.stdin.take().or_not_found("stdin")?,
&runner_argument,
)
.await?;
let res = if let Some(timeout) = timeout {
tokio::time::timeout(timeout, runner.wait_with_output())
.await
.with_kind(ErrorKind::Timeout)??
} else {
runner.wait_with_output().await?
};
if res.status.success() {
serde_json::from_str::<Result<O, (i32, String)>>(std::str::from_utf8(&res.stdout)?)
.with_kind(ErrorKind::Deserialization)
} else {
Err(Error::new(
eyre!("{}", String::from_utf8(res.stderr)?),
ErrorKind::Javascript,
))
}
}
#[instrument(skip_all)]
pub async fn execute_impl<I: Serialize, O: DeserializeOwned>(
&self,
directory: &PathBuf,
pkg_id: &PackageId,
pkg_version: &Version,
name: ProcedureName,
volumes: &Volumes,
input: Option<I>,
) -> Result<Result<O, (i32, String)>, Error> {
let res = async move {
let running_action = JsExecutionEnvironment::load_from_package( let running_action = JsExecutionEnvironment::load_from_package(
Arc::new(SandboxOsApi { _ctx: ctx.clone() }), directory,
&ctx.datadir, pkg_id,
pkg_version,
Box::new(volumes.clone()),
)
.await?
.run_action(name, input, self.args.clone());
let output: Option<ErrorValue> = running_action.await?;
let output: O = unwrap_known_error(output)?;
Ok(output)
}
.await
.map_err(|(error, message)| (error.as_code_num(), message));
Ok(res)
}
#[instrument(skip_all)]
pub async fn sandboxed_impl<I: Serialize, O: DeserializeOwned>(
&self,
directory: &PathBuf,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
input: Option<I>,
name: ProcedureName,
) -> Result<Result<O, (i32, String)>, Error> {
Ok(async move {
let running_action = JsExecutionEnvironment::load_from_package(
directory,
pkg_id, pkg_id,
pkg_version, pkg_version,
Box::new(volumes.clone()), Box::new(volumes.clone()),
ProcessGroupId(0),
None,
) )
.await? .await?
.read_only_effects() .read_only_effects()
.run_action(name, input, self.args.clone()); .run_action(name, input, self.args.clone());
let output: Option<ErrorValue> = match timeout { let output: Option<ErrorValue> = running_action.await?;
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action)
.await
.map_err(|_| (JsError::Timeout, "Timed out. Retrying soon...".to_owned()))??,
None => running_action.await?,
};
let output: O = unwrap_known_error(output)?; let output: O = unwrap_known_error(output)?;
Ok(output) Ok(output)
} }
@@ -873,134 +905,40 @@ mod tests {
} }
})) }))
.unwrap(); .unwrap();
let input: Option<serde_json::Value> = None; let package_id = "test-package".parse().unwrap();
let timeout = Some(Duration::from_secs(10)); let package_version: Version = "0.3.0.3".parse().unwrap();
js_action let name = ProcedureName::Action("test-disk-usage".parse().unwrap());
.execute::<serde_json::Value, serde_json::Value>( let volumes: Volumes = serde_json::from_value(serde_json::json!({
&path, "main": {
&package_id, "type": "data"
&package_version, },
name, "compat": {
&volumes, "type": "assets"
input, },
timeout, "filebrowser" :{
ProcessGroupId(0), "package-id": "filebrowser",
None, "path": "data",
Arc::new(OsApiMock::default()), "readonly": true,
) "type": "pointer",
.await "volume-id": "main",
.unwrap() }
.unwrap(); }))
} .unwrap();
#[tokio::test] let input: Option<serde_json::Value> = None;
async fn test_callback() { let timeout = Some(Duration::from_secs(10));
let api = Arc::new(OsApiMock::default()); js_action
let action_api = api.clone(); .execute::<serde_json::Value, serde_json::Value>(
let spawned = tokio::spawn(async move { &path,
let mut watching = api.config_callbacks.subscribe(); &package_id,
loop { &package_version,
if watching.borrow().is_empty() { name,
watching.changed().await.unwrap(); &volumes,
continue; input,
} timeout,
api.config_callbacks.send_modify(|x| { ProcessGroupId(0),
x[0].call(vec![json!("This is something across the wire!")]) None,
.map_err(|e| format!("Failed call")) )
.unwrap(); .await
}); .unwrap()
break;
}
});
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
.canonicalize()
.unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::Action("test-callback".parse().unwrap());
let volumes: Volumes = serde_json::from_value(json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap(); .unwrap();
let input: Option<serde_json::Value> = None;
let timeout = Some(Duration::from_secs(10));
js_action
.execute::<serde_json::Value, serde_json::Value>(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
ProcessGroupId(0),
None,
action_api,
)
.await
.unwrap()
.unwrap();
spawned.await.unwrap();
}
#[tokio::test]
async fn js_disk_usage() {
let js_action = JsProcedure { args: vec![] };
let path: PathBuf = "test/js_action_execute/"
.parse::<PathBuf>()
.unwrap()
.canonicalize()
.unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::Action("test-disk-usage".parse().unwrap());
let volumes: Volumes = serde_json::from_value(serde_json::json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap();
let input: Option<serde_json::Value> = None;
let timeout = Some(Duration::from_secs(10));
dbg!(js_action
.execute::<serde_json::Value, serde_json::Value>(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
ProcessGroupId(0),
None,
Arc::new(OsApiMock::default()),
)
.await
.unwrap()
.unwrap());
}
} }

View File

@@ -21,8 +21,6 @@ pub mod docker;
pub mod js_scripts; pub mod js_scripts;
pub use models::ProcedureName; pub use models::ProcedureName;
// TODO: create RPC endpoint that looks up the appropriate action and calls `execute`
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)] #[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
#[serde(rename_all = "kebab-case")] #[serde(rename_all = "kebab-case")]
#[serde(tag = "type")] #[serde(tag = "type")]
@@ -140,7 +138,15 @@ impl PackageProcedure {
#[cfg(feature = "js_engine")] #[cfg(feature = "js_engine")]
PackageProcedure::Script(procedure) => { PackageProcedure::Script(procedure) => {
procedure procedure
.sandboxed(ctx, pkg_id, pkg_version, volumes, input, timeout, name) .sandboxed(
&ctx.datadir,
pkg_id,
pkg_version,
volumes,
input,
timeout,
name,
)
.await .await
} }
} }
@@ -158,13 +164,15 @@ impl std::fmt::Display for PackageProcedure {
} }
} }
// TODO: make this not allocate
#[derive(Debug)] #[derive(Debug)]
pub struct NoOutput; pub struct NoOutput;
impl<'de> Deserialize<'de> for NoOutput { impl<'de> Deserialize<'de> for NoOutput {
fn deserialize<D>(_: D) -> Result<Self, D::Error> fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where where
D: serde::Deserializer<'de>, D: serde::Deserializer<'de>,
{ {
let _ = Value::deserialize(deserializer)?;
Ok(NoOutput) Ok(NoOutput)
} }
} }

View File

@@ -21,7 +21,7 @@ pub async fn properties(#[context] ctx: RpcContext, #[arg] id: PackageId) -> Res
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn fetch_properties(ctx: RpcContext, id: PackageId) -> Result<Value, Error> { pub async fn fetch_properties(ctx: RpcContext, id: PackageId) -> Result<Value, Error> {
let peek = ctx.db.peek().await?; let peek = ctx.db.peek().await;
let manifest = peek let manifest = peek
.as_package_data() .as_package_data()

View File

@@ -52,7 +52,7 @@ async fn do_index(
pkg: &Package, pkg: &Package,
) -> Result<(), Error> { ) -> Result<(), Error> {
url.set_path("/admin/v0/index"); url.set_path("/admin/v0/index");
let mut req = httpc let req = httpc
.post(url) .post(url)
.header(header::ACCEPT, "text/plain") .header(header::ACCEPT, "text/plain")
.basic_auth(user, Some(pass)) .basic_auth(user, Some(pass))
@@ -74,7 +74,7 @@ async fn do_upload(
body: Body, body: Body,
) -> Result<(), Error> { ) -> Result<(), Error> {
url.set_path("/admin/v0/upload"); url.set_path("/admin/v0/upload");
let mut req = httpc let req = httpc
.post(url) .post(url)
.header(header::ACCEPT, "text/plain") .header(header::ACCEPT, "text/plain")
.basic_auth(user, Some(pass)) .basic_auth(user, Some(pass))

View File

@@ -1,3 +1,4 @@
use base64::Engine;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use reqwest::{StatusCode, Url}; use reqwest::{StatusCode, Url};
use rpc_toolkit::command; use rpc_toolkit::command;
@@ -65,12 +66,11 @@ pub async fn get(#[context] ctx: RpcContext, #[arg] url: Url) -> Result<Value, E
Some(ctype) => Ok(Value::String(format!( Some(ctype) => Ok(Value::String(format!(
"data:{};base64,{}", "data:{};base64,{}",
ctype, ctype,
base64::encode_config( base64::engine::general_purpose::URL_SAFE.encode(
&response &response
.bytes() .bytes()
.await .await
.with_kind(crate::ErrorKind::Registry)?, .with_kind(crate::ErrorKind::Registry)?
base64::URL_SAFE
) )
))), ))),
_ => Err(Error::new( _ => Err(Error::new(

View File

@@ -1,4 +1,4 @@
use sha2_old::{Digest, Sha512}; use sha2::{Digest, Sha512};
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, SeekFrom}; use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, SeekFrom};
use tracing::instrument; use tracing::instrument;
use typed_builder::TypedBuilder; use typed_builder::TypedBuilder;
@@ -43,7 +43,7 @@ impl<
{ {
/// BLOCKING /// BLOCKING
#[instrument(skip_all)] #[instrument(skip_all)]
pub async fn pack(mut self, key: &ed25519_dalek::Keypair) -> Result<(), Error> { pub async fn pack(mut self, key: &ed25519_dalek::SigningKey) -> Result<(), Error> {
let header_pos = self.writer.stream_position().await?; let header_pos = self.writer.stream_position().await?;
if header_pos != 0 { if header_pos != 0 {
tracing::warn!("Appending to non-empty file."); tracing::warn!("Appending to non-empty file.");
@@ -132,7 +132,7 @@ impl<
// header // header
let (hash, _) = writer.finish(); let (hash, _) = writer.finish();
self.writer.seek(SeekFrom::Start(header_pos)).await?; self.writer.seek(SeekFrom::Start(header_pos)).await?;
header.pubkey = key.public.clone(); header.pubkey = key.into();
header.signature = key.sign_prehashed(hash, Some(SIG_CONTEXT))?; header.signature = key.sign_prehashed(hash, Some(SIG_CONTEXT))?;
header header
.serialize(&mut self.writer) .serialize(&mut self.writer)

View File

@@ -1,7 +1,7 @@
use std::collections::BTreeMap; use std::collections::BTreeMap;
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use ed25519_dalek::{PublicKey, Signature}; use ed25519_dalek::{Signature, VerifyingKey};
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt};
use crate::Error; use crate::Error;
@@ -11,15 +11,15 @@ pub const VERSION: u8 = 1;
#[derive(Debug)] #[derive(Debug)]
pub struct Header { pub struct Header {
pub pubkey: PublicKey, pub pubkey: VerifyingKey,
pub signature: Signature, pub signature: Signature,
pub table_of_contents: TableOfContents, pub table_of_contents: TableOfContents,
} }
impl Header { impl Header {
pub fn placeholder() -> Self { pub fn placeholder() -> Self {
Header { Header {
pubkey: PublicKey::default(), pubkey: VerifyingKey::default(),
signature: Signature::from_bytes(&[0; 64]).expect("Invalid ed25519 signature"), signature: Signature::from_bytes(&[0; 64]),
table_of_contents: Default::default(), table_of_contents: Default::default(),
} }
} }
@@ -28,7 +28,7 @@ impl Header {
writer.write_all(&MAGIC).await?; writer.write_all(&MAGIC).await?;
writer.write_all(&[VERSION]).await?; writer.write_all(&[VERSION]).await?;
writer.write_all(self.pubkey.as_bytes()).await?; writer.write_all(self.pubkey.as_bytes()).await?;
writer.write_all(self.signature.as_ref()).await?; writer.write_all(&self.signature.to_bytes()).await?;
self.table_of_contents.serialize(writer).await?; self.table_of_contents.serialize(writer).await?;
Ok(()) Ok(())
} }
@@ -51,11 +51,11 @@ impl Header {
} }
let mut pubkey_bytes = [0; 32]; let mut pubkey_bytes = [0; 32];
reader.read_exact(&mut pubkey_bytes).await?; reader.read_exact(&mut pubkey_bytes).await?;
let pubkey = PublicKey::from_bytes(&pubkey_bytes) let pubkey = VerifyingKey::from_bytes(&pubkey_bytes)
.map_err(|e| Error::new(e, crate::ErrorKind::ParseS9pk))?; .map_err(|e| Error::new(e, crate::ErrorKind::ParseS9pk))?;
let mut sig_bytes = [0; 64]; let mut sig_bytes = [0; 64];
reader.read_exact(&mut sig_bytes).await?; reader.read_exact(&mut sig_bytes).await?;
let signature = Signature::from_bytes(&sig_bytes).expect("Invalid ed25519 signature"); let signature = Signature::from_bytes(&sig_bytes);
let table_of_contents = TableOfContents::deserialize(reader).await?; let table_of_contents = TableOfContents::deserialize(reader).await?;
Ok(Header { Ok(Header {

View File

@@ -7,11 +7,11 @@ use std::str::FromStr;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use color_eyre::eyre::eyre; use color_eyre::eyre::eyre;
use digest_old::Output; use digest::Output;
use ed25519_dalek::PublicKey; use ed25519_dalek::VerifyingKey;
use futures::TryStreamExt; use futures::TryStreamExt;
use models::ImageId; use models::ImageId;
use sha2_old::{Digest, Sha512}; use sha2::{Digest, Sha512};
use tokio::fs::File; use tokio::fs::File;
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf}; use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf};
use tracing::instrument; use tracing::instrument;
@@ -147,7 +147,7 @@ impl FromStr for ImageTag {
pub struct S9pkReader<R: AsyncRead + AsyncSeek + Unpin + Send + Sync = File> { pub struct S9pkReader<R: AsyncRead + AsyncSeek + Unpin + Send + Sync = File> {
hash: Option<Output<Sha512>>, hash: Option<Output<Sha512>>,
hash_string: Option<String>, hash_string: Option<String>,
developer_key: PublicKey, developer_key: VerifyingKey,
toc: TableOfContents, toc: TableOfContents,
pos: u64, pos: u64,
rdr: R, rdr: R,
@@ -333,7 +333,7 @@ impl<R: AsyncRead + AsyncSeek + Unpin + Send + Sync> S9pkReader<R> {
self.hash_string.as_ref().map(|s| s.as_str()) self.hash_string.as_ref().map(|s| s.as_str())
} }
pub fn developer_key(&self) -> &PublicKey { pub fn developer_key(&self) -> &VerifyingKey {
&self.developer_key &self.developer_key
} }

View File

@@ -59,11 +59,11 @@ async fn setup_init(
let mut secrets_handle = secret_store.acquire().await?; let mut secrets_handle = secret_store.acquire().await?;
let mut secrets_tx = secrets_handle.begin().await?; let mut secrets_tx = secrets_handle.begin().await?;
let mut account = AccountInfo::load(&mut secrets_tx).await?; let mut account = AccountInfo::load(secrets_tx.as_mut()).await?;
if let Some(password) = password { if let Some(password) = password {
account.set_password(&password)?; account.set_password(&password)?;
account.save(&mut secrets_tx).await?; account.save(secrets_tx.as_mut()).await?;
db.mutate(|m| { db.mutate(|m| {
m.as_server_info_mut() m.as_server_info_mut()
.as_password_hash_mut() .as_password_hash_mut()

View File

@@ -13,8 +13,7 @@ use crate::{Error, OS_ARCH};
#[derive(Debug, Clone)] #[derive(Debug, Clone)]
pub struct Shutdown { pub struct Shutdown {
pub datadir: PathBuf, pub export_args: Option<(Arc<String>, PathBuf)>,
pub disk_guid: Option<Arc<String>>,
pub restart: bool, pub restart: bool,
} }
impl Shutdown { impl Shutdown {
@@ -55,8 +54,8 @@ impl Shutdown {
tracing::debug!("{:?}", e); tracing::debug!("{:?}", e);
} }
} }
if let Some(guid) = &self.disk_guid { if let Some((guid, datadir)) = &self.export_args {
if let Err(e) = export(guid, &self.datadir).await { if let Err(e) = export(guid, datadir).await {
tracing::error!("Error Exporting Volume Group: {}", e); tracing::error!("Error Exporting Volume Group: {}", e);
tracing::debug!("{:?}", e); tracing::debug!("{:?}", e);
} }
@@ -93,8 +92,7 @@ impl Shutdown {
pub async fn shutdown(#[context] ctx: RpcContext) -> Result<(), Error> { pub async fn shutdown(#[context] ctx: RpcContext) -> Result<(), Error> {
ctx.shutdown ctx.shutdown
.send(Some(Shutdown { .send(Some(Shutdown {
datadir: ctx.datadir.clone(), export_args: Some((ctx.disk_guid.clone(), ctx.datadir.clone())),
disk_guid: Some(ctx.disk_guid.clone()),
restart: false, restart: false,
})) }))
.map_err(|_| ()) .map_err(|_| ())
@@ -106,8 +104,7 @@ pub async fn shutdown(#[context] ctx: RpcContext) -> Result<(), Error> {
pub async fn restart(#[context] ctx: RpcContext) -> Result<(), Error> { pub async fn restart(#[context] ctx: RpcContext) -> Result<(), Error> {
ctx.shutdown ctx.shutdown
.send(Some(Shutdown { .send(Some(Shutdown {
datadir: ctx.datadir.clone(), export_args: Some((ctx.disk_guid.clone(), ctx.datadir.clone())),
disk_guid: Some(ctx.disk_guid.clone()),
restart: true, restart: true,
})) }))
.map_err(|_| ()) .map_err(|_| ())

View File

@@ -16,8 +16,6 @@ pub struct Status {
pub configured: bool, pub configured: bool,
pub main: MainStatus, pub main: MainStatus,
#[serde(default)] #[serde(default)]
pub dependency_errors: BTreeMap<(), ()>, // TODO: remove
#[serde(default)]
pub dependency_config_errors: DependencyConfigErrors, pub dependency_config_errors: DependencyConfigErrors,
} }

View File

@@ -23,8 +23,6 @@ use crate::util::serde::{display_serializable, IoFormat};
use crate::util::{display_none, Invoke}; use crate::util::{display_none, Invoke};
use crate::{Error, ErrorKind, ResultExt}; use crate::{Error, ErrorKind, ResultExt};
pub const SYSTEMD_UNIT: &'static str = "startd";
#[command(subcommands(zram))] #[command(subcommands(zram))]
pub async fn experimental() -> Result<(), Error> { pub async fn experimental() -> Result<(), Error> {
Ok(()) Ok(())
@@ -60,7 +58,7 @@ pub async fn enable_zram() -> Result<(), Error> {
#[command(display(display_none))] #[command(display(display_none))]
pub async fn zram(#[context] ctx: RpcContext, #[arg] enable: bool) -> Result<(), Error> { pub async fn zram(#[context] ctx: RpcContext, #[arg] enable: bool) -> Result<(), Error> {
let db = ctx.db.peek().await?; let db = ctx.db.peek().await;
let zram = db.as_server_info().as_zram().de()?; let zram = db.as_server_info().as_zram().de()?;
if enable == zram { if enable == zram {
@@ -130,7 +128,7 @@ pub async fn logs_nofollow(
_ctx: (), _ctx: (),
(limit, cursor, before, _): (Option<usize>, Option<String>, bool, bool), (limit, cursor, before, _): (Option<usize>, Option<String>, bool, bool),
) -> Result<LogResponse, Error> { ) -> Result<LogResponse, Error> {
fetch_logs(LogSource::Service(SYSTEMD_UNIT), limit, cursor, before).await fetch_logs(LogSource::System, limit, cursor, before).await
} }
#[command(rpc_only, rename = "follow", display(display_none))] #[command(rpc_only, rename = "follow", display(display_none))]
@@ -138,7 +136,7 @@ pub async fn logs_follow(
#[context] ctx: RpcContext, #[context] ctx: RpcContext,
#[parent_data] (limit, _, _, _): (Option<usize>, Option<String>, bool, bool), #[parent_data] (limit, _, _, _): (Option<usize>, Option<String>, bool, bool),
) -> Result<LogFollowResponse, Error> { ) -> Result<LogFollowResponse, Error> {
follow_logs(ctx, LogSource::Service(SYSTEMD_UNIT), limit).await follow_logs(ctx, LogSource::System, limit).await
} }
#[command( #[command(
@@ -590,7 +588,8 @@ async fn get_temp() -> Result<Celsius, Error> {
.flat_map(|(_, v)| v.as_object()) .flat_map(|(_, v)| v.as_object())
.flatten() .flatten()
.filter_map(|(k, v)| { .filter_map(|(k, v)| {
if k.ends_with("_input") { // we have seen so far that `temp1` is always a composite reading of some sort, so we should just use that for each chip
if k.trim() == "temp1_input" {
v.as_f64() v.as_f64()
} else { } else {
None None

View File

@@ -76,7 +76,7 @@ fn display_update_result(status: UpdateResult, _: &ArgMatches) {
#[instrument(skip_all)] #[instrument(skip_all)]
async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option<()>, Error> { async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option<()>, Error> {
let peeked = ctx.db.peek().await?; let peeked = ctx.db.peek().await;
let latest_version: Version = ctx let latest_version: Version = ctx
.client .client
.get(with_query_params( .get(with_query_params(
@@ -154,7 +154,7 @@ async fn maybe_do_update(ctx: RpcContext, marketplace_url: Url) -> Result<Option
ctx.db.clone(), ctx.db.clone(),
None, None,
NotificationLevel::Error, NotificationLevel::Error,
"embassyOS Update Failed".to_owned(), "StartOS Update Failed".to_owned(),
format!("Update was not successful because of {}", e), format!("Update was not successful because of {}", e),
(), (),
None, None,

View File

@@ -0,0 +1,125 @@
use std::borrow::Cow;
use std::collections::BTreeSet;
use imbl::OrdMap;
use tokio::process::Command;
use crate::prelude::*;
use crate::util::Invoke;
pub const GOVERNOR_PERFORMANCE: Governor = Governor(Cow::Borrowed("performance"));
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct Governor(Cow<'static, str>);
impl std::fmt::Display for Governor {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0.fmt(f)
}
}
impl std::ops::Deref for Governor {
type Target = str;
fn deref(&self) -> &Self::Target {
&*self.0
}
}
impl std::borrow::Borrow<str> for Governor {
fn borrow(&self) -> &str {
&**self
}
}
pub async fn get_available_governors() -> Result<BTreeSet<Governor>, Error> {
let raw = String::from_utf8(
Command::new("cpupower")
.arg("frequency-info")
.arg("-g")
.invoke(ErrorKind::CpuSettings)
.await?,
)?;
let mut for_cpu: OrdMap<u32, BTreeSet<Governor>> = OrdMap::new();
let mut current_cpu = None;
for line in raw.lines() {
if line.starts_with("analyzing") {
current_cpu = Some(
sscanf::sscanf!(line, "analyzing CPU {u32}:")
.map_err(|e| eyre!("{e}"))
.with_kind(ErrorKind::ParseSysInfo)?,
);
} else if let Some(rest) = line
.trim()
.strip_prefix("available cpufreq governors:")
.map(|s| s.trim())
{
if rest != "Not Available" {
for_cpu
.entry(current_cpu.ok_or_else(|| {
Error::new(
eyre!("governors listed before cpu"),
ErrorKind::ParseSysInfo,
)
})?)
.or_default()
.extend(
rest.split_ascii_whitespace()
.map(|g| Governor(Cow::Owned(g.to_owned()))),
);
}
}
}
Ok(for_cpu
.into_iter()
.fold(None, |acc: Option<BTreeSet<Governor>>, (_, x)| {
if let Some(acc) = acc {
Some(acc.intersection(&x).cloned().collect())
} else {
Some(x)
}
})
.unwrap_or_default()) // include only governors available for ALL cpus
}
pub async fn current_governor() -> Result<Option<Governor>, Error> {
let Some(raw) = Command::new("cpupower")
.arg("frequency-info")
.arg("-p")
.invoke(ErrorKind::CpuSettings)
.await
.and_then(|s| Ok(Some(String::from_utf8(s)?)))
.or_else(|e| {
if e.source
.to_string()
.contains("Unable to determine current policy")
{
Ok(None)
} else {
Err(e)
}
})?
else {
return Ok(None);
};
for line in raw.lines() {
if let Some(governor) = line
.trim()
.strip_prefix("The governor \"")
.and_then(|s| s.strip_suffix("\" may decide which speed to use"))
{
return Ok(Some(Governor(Cow::Owned(governor.to_owned()))));
}
}
Err(Error::new(
eyre!("Failed to parse cpupower output:\n{raw}"),
ErrorKind::ParseSysInfo,
))
}
pub async fn set_governor(governor: &Governor) -> Result<(), Error> {
Command::new("cpupower")
.arg("frequency-set")
.arg("-g")
.arg(&*governor.0)
.invoke(ErrorKind::CpuSettings)
.await?;
Ok(())
}

View File

@@ -0,0 +1,13 @@
use ed25519_dalek::hazmat::ExpandedSecretKey;
use ed25519_dalek::{SecretKey, EXPANDED_SECRET_KEY_LENGTH};
#[inline]
pub fn ed25519_expand_key(key: &SecretKey) -> [u8; EXPANDED_SECRET_KEY_LENGTH] {
let key = ExpandedSecretKey::from(key);
let mut bytes: [u8; 64] = [0u8; 64];
bytes[..32].copy_from_slice(key.scalar.as_bytes());
bytes[32..].copy_from_slice(&key.hash_prefix[..]);
bytes
}

Some files were not shown because too many files have changed in this diff Show More