mirror of
https://github.com/Start9Labs/start-os.git
synced 2026-03-26 10:21:52 +00:00
Compare commits
157 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
244260e34a | ||
|
|
575ed06225 | ||
|
|
b6fdc57888 | ||
|
|
758d7d89c2 | ||
|
|
2db31b54e8 | ||
|
|
99d16a37d5 | ||
|
|
449968bc4e | ||
|
|
b0a55593c1 | ||
|
|
17ef97c375 | ||
|
|
36e0ba0f06 | ||
|
|
b365a60c00 | ||
|
|
88afb756f5 | ||
|
|
e2d58c2959 | ||
|
|
3cfc333512 | ||
|
|
89da50dd37 | ||
|
|
9319314672 | ||
|
|
6d805ae941 | ||
|
|
8ba932aa36 | ||
|
|
b580f549a6 | ||
|
|
cb9c01d94b | ||
|
|
f9b0f6ae35 | ||
|
|
1b1ff05c81 | ||
|
|
7b465ce10b | ||
|
|
ee66395dfe | ||
|
|
31af6eeb76 | ||
|
|
e9a2d81bbe | ||
|
|
7d7f03da4f | ||
|
|
8966b62ec7 | ||
|
|
ec8d9b0da8 | ||
|
|
38ba1251ef | ||
|
|
005c46cb06 | ||
|
|
4b0ff07d70 | ||
|
|
f1e065a448 | ||
|
|
c82c6eaf34 | ||
|
|
b8f3759739 | ||
|
|
70aba1605c | ||
|
|
2c5aa84fe7 | ||
|
|
753f395b8d | ||
|
|
f22f11eb58 | ||
|
|
123f71cb86 | ||
|
|
22af45fb6e | ||
|
|
0849df524a | ||
|
|
31952afe1e | ||
|
|
83755e93dc | ||
|
|
0fbcc11f99 | ||
|
|
d431fac7de | ||
|
|
53ca9b0420 | ||
|
|
a8749f574a | ||
|
|
a9d839fd8f | ||
|
|
477d37f87d | ||
|
|
d2195411a6 | ||
|
|
1f5e6dbff6 | ||
|
|
09c0448186 | ||
|
|
b318bf64f4 | ||
|
|
af1d2c1603 | ||
|
|
1c11d3d08f | ||
|
|
a4a8f33df0 | ||
|
|
889cf03c1c | ||
|
|
0ac5b34f2d | ||
|
|
37304a9d92 | ||
|
|
4ad9886517 | ||
|
|
8e9d2b5314 | ||
|
|
7916a2352f | ||
|
|
2b92d0f119 | ||
|
|
961a9342fa | ||
|
|
3cde39c7ed | ||
|
|
09922c8dfa | ||
|
|
0390954a85 | ||
|
|
948fb795f2 | ||
|
|
452c8ea2d9 | ||
|
|
9c41090a7a | ||
|
|
59eee33767 | ||
|
|
cc5e60ed90 | ||
|
|
27bc493884 | ||
|
|
75a2b2d2ab | ||
|
|
0b7d8b4db0 | ||
|
|
d05cd7de0d | ||
|
|
b0068a333b | ||
|
|
d947c2db13 | ||
|
|
90e09c8c25 | ||
|
|
dbf59a7853 | ||
|
|
4d89e3beba | ||
|
|
5a88f41718 | ||
|
|
435956a272 | ||
|
|
7854885465 | ||
|
|
901ea6203e | ||
|
|
9217d00528 | ||
|
|
f234f894af | ||
|
|
4286edd78f | ||
|
|
334437f677 | ||
|
|
183c5cda14 | ||
|
|
45265453cb | ||
|
|
80a06272cc | ||
|
|
473213d14b | ||
|
|
d53e295569 | ||
|
|
18e2c610bc | ||
|
|
e0c68c1911 | ||
|
|
34729c4509 | ||
|
|
ca778b327b | ||
|
|
bde6169746 | ||
|
|
3dfbf2fffd | ||
|
|
34068ef633 | ||
|
|
e11729013f | ||
|
|
cceef054ac | ||
|
|
b8751e7add | ||
|
|
37344f99a7 | ||
|
|
61bcd8720d | ||
|
|
6801ff996e | ||
|
|
c8fc9a98bf | ||
|
|
52de5426ad | ||
|
|
e7d0a81bfe | ||
|
|
4f3223d3ad | ||
|
|
4829637b46 | ||
|
|
7f2494a26b | ||
|
|
f7b5fb55d7 | ||
|
|
2b6e54da1e | ||
|
|
1023916390 | ||
|
|
6a0e9d5c0a | ||
|
|
7b4d657a2d | ||
|
|
b7e86bf556 | ||
|
|
fa777bbd63 | ||
|
|
2e7b2c15bc | ||
|
|
9bc0fc8f05 | ||
|
|
b354d30fe9 | ||
|
|
a253e95b5a | ||
|
|
7e4c0d660a | ||
|
|
6a8bf2b074 | ||
|
|
16729ebffc | ||
|
|
f44d432b6a | ||
|
|
93ee418f65 | ||
|
|
cd6bda2113 | ||
|
|
4a007cea78 | ||
|
|
ab532b4432 | ||
|
|
ee98b91a29 | ||
|
|
0294143b22 | ||
|
|
2890798342 | ||
|
|
2d44852ec4 | ||
|
|
b9de5755d1 | ||
|
|
84463673e2 | ||
|
|
56efe9811d | ||
|
|
a6234e4507 | ||
|
|
e41b2f6ca9 | ||
|
|
8cf000198f | ||
|
|
cc6cbbfb07 | ||
|
|
10d7a3d585 | ||
|
|
864555bcf0 | ||
|
|
5d3bc8cfa5 | ||
|
|
b130608a78 | ||
|
|
6fa0a9762f | ||
|
|
17270e41fd | ||
|
|
4ac03293ed | ||
|
|
7c17e26480 | ||
|
|
1ac711c864 | ||
|
|
82c2adbc7b | ||
|
|
1dcf390ee9 | ||
|
|
a143e2581e | ||
|
|
d7bdc15e49 |
24
.github/workflows/README.md
vendored
Normal file
24
.github/workflows/README.md
vendored
Normal file
@@ -0,0 +1,24 @@
|
||||
# This folder contains GitHub Actions workflows for building the project
|
||||
|
||||
## backend-pr
|
||||
Runs: when a pull request targets the master branch and changes the libs/ and/or backend/ folders
|
||||
|
||||
This workflow uses the actions docker/setup-qemu-action@v1 and docker/setup-buildx-action@v1 to prepare the environment for aarch64 cross complilation using docker buildx.
|
||||
A matrix-strategy has been used for building the v8 snapshot instead of the makefile to allow parallel job execution.
|
||||
|
||||
## frontend-pr
|
||||
Runs: when a pull request targets the master branch and changes the frontend/ folder
|
||||
|
||||
This workflow builds the frontends.
|
||||
|
||||
## product
|
||||
Runs: when a change to the master branch is made
|
||||
|
||||
This workflow builds everything, re-using the backend-pr and frontend-pr workflows.
|
||||
The download and extraction order of artifacts is relevant to `make`, as it checks the file timestamps to decide which targets need to be executed.
|
||||
|
||||
Result: eos.img
|
||||
|
||||
## a note on uploading artifacts
|
||||
|
||||
Artifacts are used to share data between jobs. File permissions are not maintained during artifact upload. Where file permissions are relevant, the workaround using tar has been used. See (here)[https://github.com/actions/upload-artifact#maintaining-file-permissions-and-case-sensitive-files].
|
||||
104
.github/workflows/backend-pr.yaml
vendored
Normal file
104
.github/workflows/backend-pr.yaml
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
name: Backend PR
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'backend/**'
|
||||
- 'libs/**'
|
||||
|
||||
jobs:
|
||||
libs:
|
||||
name: Build libs
|
||||
strategy:
|
||||
matrix:
|
||||
target: [amd64, arm64]
|
||||
include:
|
||||
- target: amd64
|
||||
snapshot_command: ./build-v8-snapshot.sh
|
||||
artifact_name: js_snapshot
|
||||
artifact_path: libs/js_engine/src/artifacts/JS_SNAPSHOT.bin
|
||||
- target: arm64
|
||||
snapshot_command: ./build-arm-v8-snapshot.sh
|
||||
artifact_name: arm_js_snapshot
|
||||
artifact_path: libs/js_engine/src/artifacts/ARM_JS_SNAPSHOT.bin
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-libs-${{ matrix.target }}-${{ hashFiles('libs/Cargo.lock') }}
|
||||
|
||||
- name: Build v8 snapshot
|
||||
run: ${{ matrix.snapshot_command }}
|
||||
working-directory: libs
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: ${{ matrix.artifact_name }}
|
||||
path: ${{ matrix.artifact_path }}
|
||||
|
||||
backend:
|
||||
name: Build backend
|
||||
runs-on: ubuntu-latest
|
||||
needs: libs
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Download arm_js_snapshot artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: arm_js_snapshot
|
||||
path: libs/js_engine/src/artifacts/
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-backend-${{ hashFiles('backend/Cargo.lock') }}
|
||||
|
||||
- name: Build backend
|
||||
run: make backend
|
||||
|
||||
- name: 'Tar files to preserve file permissions'
|
||||
run: tar -cvf backend.tar backend/target/aarch64-unknown-linux-gnu/release/embassy*
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: backend
|
||||
path: backend.tar
|
||||
46
.github/workflows/frontend-pr.yaml
vendored
Normal file
46
.github/workflows/frontend-pr.yaml
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
name: Frontend PR
|
||||
|
||||
on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
paths:
|
||||
- 'frontend/**'
|
||||
|
||||
jobs:
|
||||
frontend:
|
||||
name: Build frontend
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
|
||||
- name: Get npm cache directory
|
||||
id: npm-cache-dir
|
||||
run: |
|
||||
echo "::set-output name=dir::$(npm config get cache)"
|
||||
- uses: actions/cache@v3
|
||||
id: npm-cache
|
||||
with:
|
||||
path: ${{ steps.npm-cache-dir.outputs.dir }}
|
||||
key: ${{ runner.os }}-node-${{ hashFiles('**/package-lock.json') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-node-
|
||||
|
||||
- name: Build frontends
|
||||
run: make frontends
|
||||
|
||||
- name: 'Tar files to preserve file permissions'
|
||||
run: tar -cvf frontend.tar frontend/dist frontend/config.json
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: frontend
|
||||
path: frontend.tar
|
||||
142
.github/workflows/product.yaml
vendored
Normal file
142
.github/workflows/product.yaml
vendored
Normal file
@@ -0,0 +1,142 @@
|
||||
name: Build Pipeline
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- master
|
||||
|
||||
jobs:
|
||||
compat:
|
||||
name: Build compat.tar
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
override: true
|
||||
|
||||
- uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/bin/
|
||||
~/.cargo/registry/index/
|
||||
~/.cargo/registry/cache/
|
||||
~/.cargo/git/db/
|
||||
target/
|
||||
key: ${{ runner.os }}-cargo-compat-${{ hashFiles('**/system-images/compat/Cargo.lock') }}
|
||||
|
||||
- name: Build image
|
||||
run: make system-images/compat/compat.tar
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: compat.tar
|
||||
path: system-images/compat/compat.tar
|
||||
|
||||
utils:
|
||||
name: Build utils.tar
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
|
||||
- name: Build image
|
||||
run: make system-images/utils/utils.tar
|
||||
|
||||
- uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: utils.tar
|
||||
path: system-images/utils/utils.tar
|
||||
|
||||
backend:
|
||||
uses: ./.github/workflows/backend-pr.yaml
|
||||
|
||||
frontend:
|
||||
uses: ./.github/workflows/frontend-pr.yaml
|
||||
|
||||
image:
|
||||
name: Build image
|
||||
runs-on: ubuntu-latest
|
||||
needs: [compat,utils,backend,frontend]
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
with:
|
||||
submodules: recursive
|
||||
|
||||
- name: Download compat.tar artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: compat.tar
|
||||
path: system-images/compat
|
||||
|
||||
- name: Download utils.tar artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: utils.tar
|
||||
path: system-images/utils
|
||||
|
||||
- name: Download js_snapshot artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: js_snapshot
|
||||
path: libs/js_engine/src/artifacts/
|
||||
|
||||
- name: Download arm_js_snapshot artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: arm_js_snapshot
|
||||
path: libs/js_engine/src/artifacts/
|
||||
|
||||
- name: Download backend artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: backend
|
||||
|
||||
- name: 'Extract backend'
|
||||
run:
|
||||
tar -mxvf backend.tar
|
||||
|
||||
- name: Download frontend artifact
|
||||
uses: actions/download-artifact@v3
|
||||
with:
|
||||
name: frontend
|
||||
|
||||
- name: Skip frontend build
|
||||
run: |
|
||||
mkdir frontend/node_modules
|
||||
mkdir frontend/dist
|
||||
mkdir patch-db/client/node_modules
|
||||
mkdir patch-db/client/dist
|
||||
|
||||
- name: 'Extract frontend'
|
||||
run: |
|
||||
tar -mxvf frontend.tar frontend/config.json
|
||||
tar -mxvf frontend.tar frontend/dist
|
||||
|
||||
- name: Cache raspiOS
|
||||
id: cache-raspios
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: raspios.img
|
||||
key: cache-raspios
|
||||
|
||||
- name: Build image
|
||||
run: "make V=1 eos.img --debug"
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -8,3 +8,7 @@
|
||||
/product_key.txt
|
||||
/*_product_key.txt
|
||||
.vscode/settings.json
|
||||
deploy_web.sh
|
||||
deploy_web.sh
|
||||
secrets.db
|
||||
.vscode/
|
||||
|
||||
@@ -19,6 +19,7 @@ All types of contributions are encouraged and valued. See the [Table of Contents
|
||||
- [I Want To Contribute](#i-want-to-contribute)
|
||||
- [Reporting Bugs](#reporting-bugs)
|
||||
- [Suggesting Enhancements](#suggesting-enhancements)
|
||||
- [Project Structure](#project-structure)
|
||||
- [Your First Code Contribution](#your-first-code-contribution)
|
||||
- [Setting Up Your Development Environment](#setting-up-your-development-environment)
|
||||
- [Building The Image](#building-the-image)
|
||||
@@ -134,22 +135,24 @@ Enhancement suggestions are tracked as [GitHub issues](https://github.com/Start9
|
||||
<!-- You might want to create an issue template for enhancement suggestions that can be used as a guide and that defines the structure of the information to be included. If you do so, reference it here in the description. -->
|
||||
|
||||
### Project Structure
|
||||
|
||||
EmbassyOS is composed of the following components. Please visit the README for each component to understand the dependency requirements and installation instructions.
|
||||
- [`ui`](ui/README.md) (Typescript Ionic Angular) is the code that is deployed to the browser to provide the user interface for EmbassyOS.
|
||||
- [`backend`] (backend/README.md) (Rust) is a command line utility, daemon, and software development kit that sets up and manages services and their environments, provides the interface for the ui, manages system state, and provides utilities for packaging services for EmbassyOS.
|
||||
- [`ui`](frontend/README.md) (Typescript Ionic Angular) is the code that is deployed to the browser to provide the user interface for EmbassyOS.
|
||||
- [`backend`](backend/README.md) (Rust) is a command line utility, daemon, and software development kit that sets up and manages services and their environments, provides the interface for the ui, manages system state, and provides utilities for packaging services for EmbassyOS.
|
||||
- `patch-db` - A diff based data store that is used to synchronize data between the front and backend.
|
||||
- Notably, `patch-db` has a [client](patch-db/client/README.md) with its own dependency and installation requirements.
|
||||
- Notably, `patch-db` has a [client](https://github.com/Start9Labs/patch-db/tree/master/client) with its own dependency and installation requirements.
|
||||
- `rpc-toolkit` - A library for generating an rpc server with cli bindings from Rust functions.
|
||||
- `system-images` - (Docker, Rust) A suite of utility Docker images that are preloaded with EmbassyOS to assist with functions relating to services (eg. configuration, backups, health checks).
|
||||
- [`setup-wizard`] (ui/README.md)- Code for the user interface that is displayed during the setup and recovery process for EmbassyOS.
|
||||
- [`diagnostic-ui`] (diagnostic-ui/README.md) - Code for the user interface that is displayed when something has gone wrong with starting up EmbassyOS, which provides helpful debugging tools.
|
||||
- [`setup-wizard`](frontend/README.md)- Code for the user interface that is displayed during the setup and recovery process for EmbassyOS.
|
||||
- [`diagnostic-ui`](frontend/README.md) - Code for the user interface that is displayed when something has gone wrong with starting up EmbassyOS, which provides helpful debugging tools.
|
||||
|
||||
### Your First Code Contribution
|
||||
|
||||
#### Setting up your development environment
|
||||
#### Setting Up Your Development Environment
|
||||
|
||||
First, clone the EmbassyOS repository and from the project root, pull in the submodules for dependent libraries.
|
||||
|
||||
```
|
||||
```sh
|
||||
git clone https://github.com/Start9Labs/embassy-os.git
|
||||
git submodule update --init --recursive
|
||||
```
|
||||
@@ -188,7 +191,7 @@ The body of a pull request should contain sufficient description of what the cha
|
||||
You should include references to any relevant [issues](https://github.com/Start9Labs/embassy-os/issues).
|
||||
|
||||
### Rebasing Changes
|
||||
When a pull request conflicts with the target branch, you may be asked to rebase it on top of the current target branch. The git rebase command will take care of rebuilding your commits on top of the new base.
|
||||
When a pull request conflicts with the target branch, you may be asked to rebase it on top of the current target branch. The `git rebase` command will take care of rebuilding your commits on top of the new base.
|
||||
|
||||
This project aims to have a clean git history, where code changes are only made in non-merge commits. This simplifies auditability because merge commits can be assumed to not contain arbitrary code changes.
|
||||
|
||||
|
||||
22
Makefile
22
Makefile
@@ -22,12 +22,16 @@ clean:
|
||||
rm -f product_key.txt
|
||||
rm -f system-images/**/*.tar
|
||||
sudo rm -f $(EMBASSY_BINS)
|
||||
rm -f frontend/config.json
|
||||
rm -rf frontend/node_modules
|
||||
rm -rf frontend/dist
|
||||
rm -rf patch-db/client/node_modules
|
||||
rm -rf patch-db/client/dist
|
||||
|
||||
eos.img: $(EMBASSY_SRC) system-images/compat/compat.tar system-images/utils/utils.tar
|
||||
sdk:
|
||||
cd backend/ && ./install-sdk.sh
|
||||
|
||||
eos.img: $(EMBASSY_SRC) system-images/compat/compat.tar system-images/utils/utils.tar
|
||||
! test -f eos.img || rm eos.img
|
||||
if [ "$(NO_KEY)" = "1" ]; then NO_KEY=1 ./build/make-image.sh; else ./build/make-image.sh; fi
|
||||
|
||||
@@ -39,7 +43,7 @@ system-images/utils/utils.tar: $(UTILS_SRC)
|
||||
cd system-images/utils && DOCKER_CLI_EXPERIMENTAL=enabled docker buildx build --tag start9/x_system/utils --platform=linux/arm64 -o type=docker,dest=utils.tar .
|
||||
|
||||
raspios.img:
|
||||
wget https://downloads.raspberrypi.org/raspios_lite_arm64/images/raspios_lite_arm64-2022-01-28/2022-01-28-raspios-bullseye-arm64-lite.zip
|
||||
wget --continue https://downloads.raspberrypi.org/raspios_lite_arm64/images/raspios_lite_arm64-2022-01-28/2022-01-28-raspios-bullseye-arm64-lite.zip
|
||||
unzip 2022-01-28-raspios-bullseye-arm64-lite.zip
|
||||
mv 2022-01-28-raspios-bullseye-arm64-lite.img raspios.img
|
||||
|
||||
@@ -49,7 +53,11 @@ product_key.txt:
|
||||
if [ "$(KEY)" != "" ]; then $(shell which echo) -n "$(KEY)" > product_key.txt; fi
|
||||
echo >> product_key.txt
|
||||
|
||||
$(EMBASSY_BINS): $(BACKEND_SRC)
|
||||
snapshots: libs/snapshot-creator/Cargo.toml
|
||||
cd libs/ && ./build-v8-snapshot.sh
|
||||
cd libs/ && ./build-arm-v8-snapshot.sh
|
||||
|
||||
$(EMBASSY_BINS): $(BACKEND_SRC)
|
||||
cd backend && ./build-prod.sh
|
||||
|
||||
frontend/node_modules: frontend/package.json
|
||||
@@ -70,4 +78,10 @@ patch-db/client/dist: $(PATCH_DB_CLIENT_SRC) patch-db/client/node_modules
|
||||
npm --prefix patch-db/client run build
|
||||
|
||||
# this is a convenience step to build all frontends - it is not referenced elsewhere in this file
|
||||
frontend: frontend/node_modules $(EMBASSY_UIS)
|
||||
frontends: frontend/node_modules frontend/config.json $(EMBASSY_UIS)
|
||||
|
||||
# this is a convenience step to build the UI
|
||||
ui: frontend/node_modules frontend/config.json frontend/dist/ui
|
||||
|
||||
# this is a convenience step to build the backend
|
||||
backend: $(EMBASSY_BINS)
|
||||
1957
backend/Cargo.lock
generated
1957
backend/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -14,7 +14,7 @@ keywords = [
|
||||
name = "embassy-os"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/Start9Labs/embassy-os"
|
||||
version = "0.3.0-rev.2"
|
||||
version = "0.3.1"
|
||||
|
||||
[lib]
|
||||
name = "embassy"
|
||||
@@ -38,46 +38,53 @@ path = "src/bin/embassy-cli.rs"
|
||||
|
||||
[features]
|
||||
avahi = ["avahi-sys"]
|
||||
beta = []
|
||||
default = ["avahi", "sound", "metal"]
|
||||
default = ["avahi", "sound", "metal", "js_engine"]
|
||||
dev = []
|
||||
metal = []
|
||||
sound = []
|
||||
unstable = ["patch-db/unstable"]
|
||||
|
||||
[dependencies]
|
||||
aes = { version = "0.7.5", features = ["ctr"] }
|
||||
async-stream = "0.3.3"
|
||||
async-trait = "0.1.51"
|
||||
avahi-sys = { git = "https://github.com/Start9Labs/avahi-sys", version = "0.10.0", branch = "feature/dynamic-linking", features = [
|
||||
"dynamic",
|
||||
], optional = true }
|
||||
base32 = "0.4.0"
|
||||
base64 = "0.13.0"
|
||||
base64ct = "1.5.0"
|
||||
basic-cookies = "0.1.4"
|
||||
bollard = "0.11.0"
|
||||
chrono = { version = "0.4.19", features = ["serde"] }
|
||||
clap = "2.33"
|
||||
color-eyre = "0.5"
|
||||
cookie_store = "0.15.0"
|
||||
digest = "0.9.0"
|
||||
digest = "0.10.3"
|
||||
digest-old = { package = "digest", version = "0.9.0" }
|
||||
divrem = "1.0.0"
|
||||
ed25519 = { version = "1.5.2", features = ["pkcs8", "pem", "alloc"] }
|
||||
ed25519-dalek = { version = "1.0.1", features = ["serde"] }
|
||||
emver = { version = "0.1.6", features = ["serde"] }
|
||||
fd-lock-rs = "0.1.3"
|
||||
futures = "0.3.17"
|
||||
git-version = "0.3.5"
|
||||
helpers = { path = "../libs/helpers" }
|
||||
hex = "0.4.3"
|
||||
hmac = "0.11.0"
|
||||
hmac = "0.12.1"
|
||||
http = "0.2.5"
|
||||
hyper = "0.14.13"
|
||||
hyper-ws-listener = { git = "https://github.com/Start9Labs/hyper-ws-listener.git", branch = "main" }
|
||||
imbl = "1.0.1"
|
||||
indexmap = { version = "1.7.0", features = ["serde"] }
|
||||
indexmap = { version = "1.8.1", features = ["serde"] }
|
||||
isocountry = "0.3.2"
|
||||
itertools = "0.10.1"
|
||||
js_engine = { path = '../libs/js_engine', optional = true }
|
||||
jsonpath_lib = "0.3.0"
|
||||
lazy_static = "1.4"
|
||||
libc = "0.2.103"
|
||||
log = "0.4.14"
|
||||
models = { version = "*", path = "../libs/models" }
|
||||
nix = "0.23.0"
|
||||
nom = "7.0.0"
|
||||
num = "0.4.0"
|
||||
@@ -87,8 +94,9 @@ openssl = { version = "0.10.36", features = ["vendored"] }
|
||||
patch-db = { version = "*", path = "../patch-db/patch-db", features = [
|
||||
"trace",
|
||||
] }
|
||||
pbkdf2 = "0.9.0"
|
||||
pbkdf2 = "0.11.0"
|
||||
pin-project = "1.0.8"
|
||||
pkcs8 = { version = "0.9.0", features = ["std"] }
|
||||
platforms = "1.1.0"
|
||||
prettytable-rs = "0.8.0"
|
||||
proptest = "1.0.0"
|
||||
@@ -106,7 +114,8 @@ serde_cbor = { package = "ciborium", version = "0.2.0" }
|
||||
serde_json = "1.0.68"
|
||||
serde_toml = { package = "toml", version = "0.5.8" }
|
||||
serde_yaml = "0.8.21"
|
||||
sha2 = "0.9.8"
|
||||
sha2 = "0.10.2"
|
||||
sha2-old = { package = "sha2", version = "0.9.8" }
|
||||
simple-logging = "2.0"
|
||||
sqlx = { version = "0.5.11", features = [
|
||||
"chrono",
|
||||
@@ -128,6 +137,7 @@ tracing = "0.1"
|
||||
tracing-error = "0.1"
|
||||
tracing-futures = "0.2"
|
||||
tracing-subscriber = "0.2"
|
||||
trust-dns-server = "0.21.2"
|
||||
typed-builder = "0.9.1"
|
||||
url = { version = "2.2.2", features = ["serde"] }
|
||||
|
||||
|
||||
@@ -8,7 +8,12 @@ if [ "$0" != "./build-dev.sh" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
alias 'rust-arm64-builder'='docker run --rm -it -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
|
||||
USE_TTY=
|
||||
if tty -s; then
|
||||
USE_TTY="-it"
|
||||
fi
|
||||
|
||||
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
|
||||
|
||||
cd ..
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build)"
|
||||
|
||||
@@ -8,7 +8,12 @@ if [ "$0" != "./build-portable-dev.sh" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
alias 'rust-musl-builder'='docker run --rm -it -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
||||
USE_TTY=
|
||||
if tty -s; then
|
||||
USE_TTY="-it"
|
||||
fi
|
||||
|
||||
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
||||
|
||||
cd ..
|
||||
rust-musl-builder sh -c "(cd backend && cargo +beta build --target=x86_64-unknown-linux-musl --no-default-features)"
|
||||
|
||||
@@ -8,7 +8,12 @@ if [ "$0" != "./build-portable.sh" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
alias 'rust-musl-builder'='docker run --rm -it -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
||||
USE_TTY=
|
||||
if tty -s; then
|
||||
USE_TTY="-it"
|
||||
fi
|
||||
|
||||
alias 'rust-musl-builder'='docker run $USE_TTY --rm -v "$HOME"/.cargo/registry:/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-musl-cross:x86_64-musl'
|
||||
|
||||
cd ..
|
||||
rust-musl-builder sh -c "(cd backend && cargo +beta build --release --target=x86_64-unknown-linux-musl --no-default-features)"
|
||||
|
||||
@@ -8,21 +8,26 @@ if [ "$0" != "./build-prod.sh" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
alias 'rust-arm64-builder'='docker run --rm -it -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src start9/rust-arm-cross:aarch64'
|
||||
USE_TTY=
|
||||
if tty -s; then
|
||||
USE_TTY="-it"
|
||||
fi
|
||||
|
||||
alias 'rust-arm64-builder'='docker run $USE_TTY --rm -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$(pwd)":/home/rust/src -P start9/rust-arm-cross:aarch64'
|
||||
|
||||
cd ..
|
||||
FLAGS=""
|
||||
if [[ "$ENVIRONMENT" =~ (^|-)unstable($|-) ]]; then
|
||||
if [[ "$ENVIRONMENT" =~ (^|-)beta($|-) ]]; then
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build --release --features beta,unstable)"
|
||||
else
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build --release --features unstable)"
|
||||
fi
|
||||
FLAGS="unstable,$FLAGS"
|
||||
fi
|
||||
if [[ "$ENVIRONMENT" =~ (^|-)dev($|-) ]]; then
|
||||
FLAGS="dev,$FLAGS"
|
||||
fi
|
||||
if [[ "$FLAGS" = "" ]]; then
|
||||
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release)"
|
||||
else
|
||||
if [[ "$ENVIRONMENT" =~ (^|-)beta($|-) ]]; then
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build --release --features beta)"
|
||||
else
|
||||
rust-arm64-builder sh -c "(cd backend && cargo build --release)"
|
||||
fi
|
||||
echo "FLAGS=$FLAGS"
|
||||
rust-arm64-builder sh -c "(git config --global --add safe.directory '*'; cd backend && cargo build --release --features $FLAGS)"
|
||||
fi
|
||||
cd backend
|
||||
#rust-arm64-builder aarch64-linux-gnu-strip target/aarch64-unknown-linux-gnu/release/embassyd
|
||||
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Enter the backend directory, copy over the built EmbassyOS binaries and systemd services, edit the nginx config, then create the .ssh directory
|
||||
|
||||
cp target/aarch64-unknown-linux-gnu/release/embassy-init /mnt/usr/local/bin
|
||||
cp target/aarch64-unknown-linux-gnu/release/embassyd /mnt/usr/local/bin
|
||||
cp target/aarch64-unknown-linux-gnu/release/embassy-cli /mnt/usr/local/bin
|
||||
cp *.service /mnt/etc/systemd/system/
|
||||
|
||||
echo "application/wasm wasm;" | sudo tee -a "/mnt/etc/nginx/mime.types"
|
||||
|
||||
mkdir -p /mnt/root/.ssh
|
||||
@@ -6,7 +6,7 @@ Wants=avahi-daemon.service nginx.service tor.service
|
||||
|
||||
[Service]
|
||||
Type=oneshot
|
||||
Environment=RUST_LOG=embassy_init=debug,embassy=debug
|
||||
Environment=RUST_LOG=embassy_init=debug,embassy=debug,js_engine=debug
|
||||
ExecStart=/usr/local/bin/embassy-init
|
||||
RemainAfterExit=true
|
||||
|
||||
|
||||
@@ -5,10 +5,13 @@ Requires=embassy-init.service
|
||||
|
||||
[Service]
|
||||
Type=simple
|
||||
Environment=RUST_LOG=embassyd=debug,embassy=debug
|
||||
Environment=RUST_LOG=embassyd=debug,embassy=debug,js_engine=debug
|
||||
ExecStart=/usr/local/bin/embassyd
|
||||
Restart=always
|
||||
RestartSec=3
|
||||
ManagedOOMPreference=avoid
|
||||
CPUAccounting=true
|
||||
CPUWeight=1000
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
||||
|
||||
@@ -8,4 +8,4 @@ if [ "$0" != "./install-sdk.sh" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cargo install --bin=embassy-sdk --path=. --no-default-features
|
||||
cargo install --bin=embassy-sdk --bin=embassy-cli --path=. --no-default-features --features=js_engine
|
||||
|
||||
@@ -1,47 +1,46 @@
|
||||
{
|
||||
"db": "SQLite",
|
||||
"10350f5a16f1b2a6ce91672ae5dc6acc46691bd8f901861545ec83c326a8ccef": {
|
||||
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "INSERT INTO ssh_keys (fingerprint, openssh_pubkey, created_at) VALUES (?, ?, ?)"
|
||||
},
|
||||
"118d59de5cf930d5a3b5667b2220e9a3d593bd84276beb2b76c93b2694b0fd72": {
|
||||
"query": "INSERT INTO session (id, user_agent, metadata) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "INSERT INTO session (id, user_agent, metadata) VALUES (?, ?, ?)"
|
||||
},
|
||||
"165daa7d6a60cb42122373b2c5ac7d39399bcc99992f0002ee7bfef50a8daceb": {
|
||||
"query": "DELETE FROM certificates WHERE id = 0 OR id = 1;",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "DELETE FROM certificates WHERE id = 0 OR id = 1;"
|
||||
},
|
||||
"177c4b9cc7901a3b906e5969b86b1c11e6acbfb8e86e98f197d7333030b17964": {
|
||||
"query": "DELETE FROM notifications WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "DELETE FROM notifications WHERE id = ?"
|
||||
},
|
||||
"1b2242afa55e730b37b00929b656d80940b457ec86c234ddd0de917bd8872611": {
|
||||
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES (?, ?, ?, ?) RETURNING id AS \"id: u32\"",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -50,36 +49,36 @@
|
||||
"type_info": "Int64"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 4
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 4
|
||||
}
|
||||
},
|
||||
"query": "INSERT INTO cifs_shares (hostname, path, username, password) VALUES (?, ?, ?, ?) RETURNING id AS \"id: u32\""
|
||||
},
|
||||
"1eee1fdc793919c391008854407143d7a11b4668486c11a760b49af49992f9f8": {
|
||||
"query": "REPLACE INTO tor (package, interface, key) VALUES (?, 'main', ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "REPLACE INTO tor (package, interface, key) VALUES (?, 'main', ?)"
|
||||
},
|
||||
"2932aa02735b6422fca4ba889abfb3de8598178d4690076dc278898753d9df62": {
|
||||
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = ?"
|
||||
},
|
||||
"3502e58f2ab48fb4566d21c920c096f81acfa3ff0d02f970626a4dcd67bac71d": {
|
||||
"query": "SELECT tor_key FROM account",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -88,16 +87,16 @@
|
||||
"type_info": "Blob"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
}
|
||||
},
|
||||
"query": "SELECT tor_key FROM account"
|
||||
},
|
||||
"3e57a0e52b69f33e9411c13b03a5d82c5856d63f0375eb4c23b255a09c54f8b1": {
|
||||
"query": "SELECT key FROM tor WHERE package = ? AND interface = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -106,16 +105,16 @@
|
||||
"type_info": "Blob"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
}
|
||||
},
|
||||
"query": "SELECT key FROM tor WHERE package = ? AND interface = ?"
|
||||
},
|
||||
"4691e3a2ce80b59009ac17124f54f925f61dc5ea371903e62cdffa5d7b67ca96": {
|
||||
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -149,9 +148,6 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
@@ -159,51 +155,54 @@
|
||||
false,
|
||||
true,
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
}
|
||||
},
|
||||
"query": "SELECT * FROM session WHERE logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
|
||||
},
|
||||
"530192a2a530ee6b92e5b98e1eb1bf6d1426c7b0cb2578593a367cb0bf2c3ca8": {
|
||||
"query": "UPDATE certificates SET priv_key_pem = ?, certificate_pem = ?, updated_at = datetime('now') WHERE lookup_string = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "UPDATE certificates SET priv_key_pem = ?, certificate_pem = ?, updated_at = datetime('now') WHERE lookup_string = ?"
|
||||
},
|
||||
"56b986f2a2b7091d9c3acdd78f75d9842242de1f4da8f3672f2793d9fb256928": {
|
||||
"query": "DELETE FROM tor WHERE package = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "DELETE FROM tor WHERE package = ?"
|
||||
},
|
||||
"5b114c450073f77f466c980a2541293f30087b57301c379630326e5e5c2fb792": {
|
||||
"query": "REPLACE INTO tor (package, interface, key) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "REPLACE INTO tor (package, interface, key) VALUES (?, ?, ?)"
|
||||
},
|
||||
"5c47da44b9c84468e95a13fc47301989900f130b3b5899d1ee6664df3ed812ac": {
|
||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, ?, ?, NULL, datetime('now'), datetime('now'))",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (0, ?, ?, NULL, datetime('now'), datetime('now'))"
|
||||
},
|
||||
"629be61c3c341c131ddbbff0293a83dbc6afd07cae69d246987f62cf0cc35c2a": {
|
||||
"query": "SELECT password FROM account",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -212,36 +211,36 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
}
|
||||
},
|
||||
"query": "SELECT password FROM account"
|
||||
},
|
||||
"63785dc5f193ea31e6f641a910c75857ccd288a3f6e9c4f704331531e4f0689f": {
|
||||
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = ? AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "UPDATE session SET last_active = CURRENT_TIMESTAMP WHERE id = ? AND logged_out IS NULL OR logged_out > CURRENT_TIMESTAMP"
|
||||
},
|
||||
"6440354d73a67c041ea29508b43b5f309d45837a44f1a562051ad540d894c7d6": {
|
||||
"query": "DELETE FROM ssh_keys WHERE fingerprint = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "DELETE FROM ssh_keys WHERE fingerprint = ?"
|
||||
},
|
||||
"65e6c3fbb138da5cf385af096fdd3c062b6e826e12a8a4b23e16fcc773004c29": {
|
||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < ? ORDER BY id DESC LIMIT ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -285,9 +284,6 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
true,
|
||||
@@ -297,11 +293,14 @@
|
||||
false,
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
}
|
||||
},
|
||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications WHERE id < ? ORDER BY id DESC LIMIT ?"
|
||||
},
|
||||
"668f39c868f90cdbcc635858bac9e55ed73192ed2aec5c52dcfba9800a7a4a41": {
|
||||
"query": "SELECT id AS \"id: u32\", hostname, path, username, password FROM cifs_shares",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -330,30 +329,30 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
}
|
||||
},
|
||||
"query": "SELECT id AS \"id: u32\", hostname, path, username, password FROM cifs_shares"
|
||||
},
|
||||
"6b9abc9e079cff975f8a7f07ff70548c7877ecae3be0d0f2d3f439a6713326c0": {
|
||||
"query": "DELETE FROM notifications WHERE id < ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "DELETE FROM notifications WHERE id < ?"
|
||||
},
|
||||
"6c96d76bffcc5f03290d8d8544a58521345ed2a843a509b17bbcd6257bb81821": {
|
||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -367,27 +366,37 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
}
|
||||
},
|
||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 1;"
|
||||
},
|
||||
"7d548d2472fa3707bd17364b4800e229b9c2b1c0a22e245bf4e635b9b16b8c24": {
|
||||
"query": "INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (?, ?, ?, datetime('now'), datetime('now'))",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "INSERT INTO certificates (priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (?, ?, ?, datetime('now'), datetime('now'))"
|
||||
},
|
||||
"82a8fa7eae8a73b5345015c72af024b4f21489b1d9b42235398d7eb8977fb132": {
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
}
|
||||
},
|
||||
"query": "UPDATE account SET password = ?"
|
||||
},
|
||||
"8595651866e7db772260bd79e19d55b7271fd795b82a99821c935a9237c1aa16": {
|
||||
"query": "SELECT interface, key FROM tor WHERE package = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -401,17 +410,17 @@
|
||||
"type_info": "Blob"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
}
|
||||
},
|
||||
"query": "SELECT interface, key FROM tor WHERE package = ?"
|
||||
},
|
||||
"9496e17a73672ac3675e02efa7c4bf8bd479b866c0d31fa1e3a85ef159310a57": {
|
||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -425,47 +434,47 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
}
|
||||
},
|
||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE lookup_string = ?"
|
||||
},
|
||||
"9fcedab1ba34daa2c6ae97c5953c09821b35b55be75b0c66045ab31a2cf4553e": {
|
||||
"query": "REPLACE INTO account (id, password, tor_key) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "REPLACE INTO account (id, password, tor_key) VALUES (?, ?, ?)"
|
||||
},
|
||||
"a1cbaac36d8e14c8c3e7276237c4824bff18861f91b0b08aa5791704c492acb7": {
|
||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, ?, ?, NULL, datetime('now'), datetime('now'))",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 2
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "INSERT INTO certificates (id, priv_key_pem, certificate_pem, lookup_string, created_at, updated_at) VALUES (1, ?, ?, NULL, datetime('now'), datetime('now'))"
|
||||
},
|
||||
"a4e7162322b28508310b9de7ebc891e619b881ff6d3ea09eba13da39626ab12f": {
|
||||
"query": "UPDATE cifs_shares SET hostname = ?, path = ?, username = ?, password = ? WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 5
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "UPDATE cifs_shares SET hostname = ?, path = ?, username = ?, password = ? WHERE id = ?"
|
||||
},
|
||||
"a6b0c8909a3a5d6d9156aebfb359424e6b5a1d1402e028219e21726f1ebd282e": {
|
||||
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -484,18 +493,18 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
}
|
||||
},
|
||||
"query": "SELECT fingerprint, openssh_pubkey, created_at FROM ssh_keys"
|
||||
},
|
||||
"abfdeea8cd10343b85f647d7abc5dc3bd0b5891101b143485938192ee3b8c907": {
|
||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -539,9 +548,6 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
true,
|
||||
@@ -551,21 +557,24 @@
|
||||
false,
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
}
|
||||
},
|
||||
"query": "SELECT id, package_id, created_at, code, level, title, message, data FROM notifications ORDER BY id DESC LIMIT ?"
|
||||
},
|
||||
"b376d9e77e0861a9af2d1081ca48d14e83abc5a1546213d15bb570972c403beb": {
|
||||
"query": "-- Add migration script here\nCREATE TABLE IF NOT EXISTS tor\n(\n package TEXT NOT NULL,\n interface TEXT NOT NULL,\n key BLOB NOT NULL CHECK (length(key) = 64),\n PRIMARY KEY (package, interface)\n);\nCREATE TABLE IF NOT EXISTS session\n(\n id TEXT NOT NULL PRIMARY KEY,\n logged_in TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n logged_out TIMESTAMP,\n last_active TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n user_agent TEXT,\n metadata TEXT NOT NULL DEFAULT 'null'\n);\nCREATE TABLE IF NOT EXISTS account\n(\n id INTEGER PRIMARY KEY CHECK (id = 0),\n password TEXT NOT NULL,\n tor_key BLOB NOT NULL CHECK (length(tor_key) = 64)\n);\nCREATE TABLE IF NOT EXISTS ssh_keys\n(\n fingerprint TEXT NOT NULL,\n openssh_pubkey TEXT NOT NULL,\n created_at TEXT NOT NULL,\n PRIMARY KEY (fingerprint)\n);\nCREATE TABLE IF NOT EXISTS certificates\n(\n id INTEGER PRIMARY KEY, -- Root = 0, Int = 1, Other = 2..\n priv_key_pem TEXT NOT NULL,\n certificate_pem TEXT NOT NULL,\n lookup_string TEXT UNIQUE,\n created_at TEXT,\n updated_at TEXT\n);\nCREATE TABLE IF NOT EXISTS notifications\n(\n id INTEGER PRIMARY KEY,\n package_id TEXT,\n created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n code INTEGER NOT NULL,\n level TEXT NOT NULL,\n title TEXT NOT NULL,\n message TEXT NOT NULL,\n data TEXT\n);\nCREATE TABLE IF NOT EXISTS cifs_shares\n(\n id INTEGER PRIMARY KEY,\n hostname TEXT NOT NULL,\n path TEXT NOT NULL,\n username TEXT NOT NULL,\n password TEXT\n);",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "-- Add migration script here\nCREATE TABLE IF NOT EXISTS tor\n(\n package TEXT NOT NULL,\n interface TEXT NOT NULL,\n key BLOB NOT NULL CHECK (length(key) = 64),\n PRIMARY KEY (package, interface)\n);\nCREATE TABLE IF NOT EXISTS session\n(\n id TEXT NOT NULL PRIMARY KEY,\n logged_in TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n logged_out TIMESTAMP,\n last_active TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n user_agent TEXT,\n metadata TEXT NOT NULL DEFAULT 'null'\n);\nCREATE TABLE IF NOT EXISTS account\n(\n id INTEGER PRIMARY KEY CHECK (id = 0),\n password TEXT NOT NULL,\n tor_key BLOB NOT NULL CHECK (length(tor_key) = 64)\n);\nCREATE TABLE IF NOT EXISTS ssh_keys\n(\n fingerprint TEXT NOT NULL,\n openssh_pubkey TEXT NOT NULL,\n created_at TEXT NOT NULL,\n PRIMARY KEY (fingerprint)\n);\nCREATE TABLE IF NOT EXISTS certificates\n(\n id INTEGER PRIMARY KEY, -- Root = 0, Int = 1, Other = 2..\n priv_key_pem TEXT NOT NULL,\n certificate_pem TEXT NOT NULL,\n lookup_string TEXT UNIQUE,\n created_at TEXT,\n updated_at TEXT\n);\nCREATE TABLE IF NOT EXISTS notifications\n(\n id INTEGER PRIMARY KEY,\n package_id TEXT,\n created_at TIMESTAMP NOT NULL DEFAULT CURRENT_TIMESTAMP,\n code INTEGER NOT NULL,\n level TEXT NOT NULL,\n title TEXT NOT NULL,\n message TEXT NOT NULL,\n data TEXT\n);\nCREATE TABLE IF NOT EXISTS cifs_shares\n(\n id INTEGER PRIMARY KEY,\n hostname TEXT NOT NULL,\n path TEXT NOT NULL,\n username TEXT NOT NULL,\n password TEXT\n);"
|
||||
},
|
||||
"cc33fe2958fe7caeac6999a217f918a68b45ad596664170b4d07671c6ea49566": {
|
||||
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -589,19 +598,19 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false,
|
||||
true
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
}
|
||||
},
|
||||
"query": "SELECT hostname, path, username, password FROM cifs_shares WHERE id = ?"
|
||||
},
|
||||
"d5117054072476377f3c4f040ea429d4c9b2cf534e76f35c80a2bf60e8599cca": {
|
||||
"query": "SELECT openssh_pubkey FROM ssh_keys",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -610,36 +619,36 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
}
|
||||
},
|
||||
"query": "SELECT openssh_pubkey FROM ssh_keys"
|
||||
},
|
||||
"d54bd5b53f8c760e1f8cde604aa8b1bdc66e4e025a636bc44ffbcd788b5168fd": {
|
||||
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES (?, ?, ?, ?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 6
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "INSERT INTO notifications (package_id, code, level, title, message, data) VALUES (?, ?, ?, ?, ?, ?)"
|
||||
},
|
||||
"d79d608ceb862c15b741a6040044c6dd54a837a3a0c5594d15a6041c7bc68ea8": {
|
||||
"query": "INSERT OR IGNORE INTO tor (package, interface, key) VALUES (?, ?, ?)",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 3
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "INSERT OR IGNORE INTO tor (package, interface, key) VALUES (?, ?, ?)"
|
||||
},
|
||||
"de2a5e90798d606047ab8180c044baac05469c0cdf151316bd58ee8c7196fdef": {
|
||||
"query": "SELECT * FROM ssh_keys WHERE fingerprint = ?",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -658,18 +667,18 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
}
|
||||
},
|
||||
"query": "SELECT * FROM ssh_keys WHERE fingerprint = ?"
|
||||
},
|
||||
"ed848affa5bf92997cd441e3a50b3616b6724df3884bd9d199b3225e0bea8a54": {
|
||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;",
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
@@ -683,23 +692,24 @@
|
||||
"type_info": "Text"
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
},
|
||||
"nullable": [
|
||||
false,
|
||||
false
|
||||
]
|
||||
}
|
||||
],
|
||||
"parameters": {
|
||||
"Right": 0
|
||||
}
|
||||
},
|
||||
"query": "SELECT priv_key_pem, certificate_pem FROM certificates WHERE id = 0;"
|
||||
},
|
||||
"f63c8c5a8754b34a49ef5d67802fa2b72aa409bbec92ecc6901492092974b71a": {
|
||||
"query": "DELETE FROM cifs_shares WHERE id = ?",
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Right": 1
|
||||
},
|
||||
"nullable": []
|
||||
}
|
||||
}
|
||||
},
|
||||
"query": "DELETE FROM cifs_shares WHERE id = ?"
|
||||
}
|
||||
}
|
||||
@@ -1,76 +1,23 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::eyre;
|
||||
use indexmap::IndexSet;
|
||||
use patch_db::HasModel;
|
||||
use rpc_toolkit::command;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use self::docker::DockerAction;
|
||||
use crate::config::{Config, ConfigSpec};
|
||||
use crate::context::RpcContext;
|
||||
use crate::id::{Id, ImageId, InvalidId};
|
||||
use crate::id::{ ImageId};
|
||||
use crate::procedure::{PackageProcedure, ProcedureName};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
|
||||
use crate::util::Version;
|
||||
use crate::volume::Volumes;
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
pub mod docker;
|
||||
|
||||
// TODO: create RPC endpoint that looks up the appropriate action and calls `execute`
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
|
||||
pub struct ActionId<S: AsRef<str> = String>(Id<S>);
|
||||
impl FromStr for ActionId {
|
||||
type Err = InvalidId;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(ActionId(Id::try_from(s.to_owned())?))
|
||||
}
|
||||
}
|
||||
impl From<ActionId> for String {
|
||||
fn from(value: ActionId) -> Self {
|
||||
value.0.into()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<ActionId<S>> for ActionId<S> {
|
||||
fn as_ref(&self) -> &ActionId<S> {
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::fmt::Display for ActionId<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", &self.0)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<str> for ActionId<S> {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<Path> for ActionId<S> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_ref().as_ref()
|
||||
}
|
||||
}
|
||||
impl<'de, S> Deserialize<'de> for ActionId<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
Id<S>: Deserialize<'de>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::de::Deserializer<'de>,
|
||||
{
|
||||
Ok(ActionId(Deserialize::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
|
||||
pub use models::ActionId;
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
||||
pub struct Actions(pub BTreeMap<ActionId, Action>);
|
||||
|
||||
@@ -103,7 +50,7 @@ pub struct Action {
|
||||
pub description: String,
|
||||
#[serde(default)]
|
||||
pub warning: Option<String>,
|
||||
pub implementation: ActionImplementation,
|
||||
pub implementation: PackageProcedure,
|
||||
pub allowed_statuses: IndexSet<DockerStatus>,
|
||||
#[serde(default)]
|
||||
pub input_spec: ConfigSpec,
|
||||
@@ -141,7 +88,7 @@ impl Action {
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Some(&format!("{}Action", action_id)),
|
||||
ProcedureName::Action(action_id.clone()),
|
||||
volumes,
|
||||
input,
|
||||
true,
|
||||
@@ -152,76 +99,6 @@ impl Action {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[serde(tag = "type")]
|
||||
pub enum ActionImplementation {
|
||||
Docker(DockerAction),
|
||||
}
|
||||
impl ActionImplementation {
|
||||
#[instrument]
|
||||
pub fn validate(
|
||||
&self,
|
||||
volumes: &Volumes,
|
||||
image_ids: &BTreeSet<ImageId>,
|
||||
expected_io: bool,
|
||||
) -> Result<(), color_eyre::eyre::Report> {
|
||||
match self {
|
||||
ActionImplementation::Docker(action) => {
|
||||
action.validate(volumes, image_ids, expected_io)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, input))]
|
||||
pub async fn execute<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
name: Option<&str>,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
allow_inject: bool,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
match self {
|
||||
ActionImplementation::Docker(action) => {
|
||||
action
|
||||
.execute(
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
name,
|
||||
volumes,
|
||||
input,
|
||||
allow_inject,
|
||||
timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
#[instrument(skip(ctx, input))]
|
||||
pub async fn sandboxed<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
match self {
|
||||
ActionImplementation::Docker(action) => {
|
||||
action
|
||||
.sandboxed(ctx, pkg_id, pkg_version, volumes, input, timeout)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn display_action_result(action_result: ActionResult, matches: &ArgMatches<'_>) {
|
||||
if matches.is_present("format") {
|
||||
return display_serializable(action_result, matches);
|
||||
@@ -278,13 +155,3 @@ pub async fn action(
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct NoOutput;
|
||||
impl<'de> Deserialize<'de> for NoOutput {
|
||||
fn deserialize<D>(_: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
Ok(NoOutput)
|
||||
}
|
||||
}
|
||||
@@ -4,6 +4,7 @@ use std::marker::PhantomData;
|
||||
use chrono::{DateTime, Utc};
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::{DbHandle, LockReceipt};
|
||||
use rpc_toolkit::command;
|
||||
use rpc_toolkit::command_helpers::prelude::{RequestParts, ResponseParts};
|
||||
use rpc_toolkit::yajrc::RpcError;
|
||||
@@ -18,7 +19,7 @@ use crate::util::display_none;
|
||||
use crate::util::serde::{display_serializable, IoFormat};
|
||||
use crate::{ensure_code, Error, ResultExt};
|
||||
|
||||
#[command(subcommands(login, logout, session))]
|
||||
#[command(subcommands(login, logout, session, reset_password))]
|
||||
pub fn auth() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
@@ -256,3 +257,113 @@ pub async fn kill(
|
||||
HasLoggedOutSessions::new(ids.into_iter().map(KillSessionId), &ctx).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, old_password, new_password))]
|
||||
async fn cli_reset_password(
|
||||
ctx: CliContext,
|
||||
old_password: Option<String>,
|
||||
new_password: Option<String>,
|
||||
) -> Result<(), RpcError> {
|
||||
let old_password = if let Some(old_password) = old_password {
|
||||
old_password
|
||||
} else {
|
||||
rpassword::prompt_password_stdout("Current Password: ")?
|
||||
};
|
||||
|
||||
let new_password = if let Some(new_password) = new_password {
|
||||
new_password
|
||||
} else {
|
||||
let new_password = rpassword::prompt_password_stdout("New Password: ")?;
|
||||
if new_password != rpassword::prompt_password_stdout("Confirm: ")? {
|
||||
return Err(Error::new(
|
||||
eyre!("Passwords do not match"),
|
||||
crate::ErrorKind::IncorrectPassword,
|
||||
)
|
||||
.into());
|
||||
}
|
||||
new_password
|
||||
};
|
||||
|
||||
rpc_toolkit::command_helpers::call_remote(
|
||||
ctx,
|
||||
"auth.reset-password",
|
||||
serde_json::json!({ "old-password": old_password, "new-password": new_password }),
|
||||
PhantomData::<()>,
|
||||
)
|
||||
.await?
|
||||
.result?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct SetPasswordReceipt(LockReceipt<String, ()>);
|
||||
impl SetPasswordReceipt {
|
||||
pub async fn new<Db: DbHandle>(db: &mut Db) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<patch_db::LockTargetId>,
|
||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
||||
let password_hash = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.password_hash()
|
||||
.make_locker(patch_db::LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| Ok(Self(password_hash.verify(skeleton_key)?))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set_password<Db: DbHandle, Ex>(
|
||||
db: &mut Db,
|
||||
receipt: &SetPasswordReceipt,
|
||||
secrets: &mut Ex,
|
||||
password: &str,
|
||||
) -> Result<(), Error>
|
||||
where
|
||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let password = argon2::hash_encoded(
|
||||
password.as_bytes(),
|
||||
&rand::random::<[u8; 16]>()[..],
|
||||
&argon2::Config::default(),
|
||||
)
|
||||
.with_kind(crate::ErrorKind::PasswordHashGeneration)?;
|
||||
|
||||
sqlx::query!("UPDATE account SET password = ?", password,)
|
||||
.execute(secrets)
|
||||
.await?;
|
||||
|
||||
receipt.0.set(db, password).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[command(
|
||||
rename = "reset-password",
|
||||
custom_cli(cli_reset_password(async, context(CliContext))),
|
||||
display(display_none)
|
||||
)]
|
||||
#[instrument(skip(ctx, old_password, new_password))]
|
||||
pub async fn reset_password(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg(rename = "old-password")] old_password: Option<String>,
|
||||
#[arg(rename = "new-password")] new_password: Option<String>,
|
||||
) -> Result<(), Error> {
|
||||
let old_password = old_password.unwrap_or_default();
|
||||
let new_password = new_password.unwrap_or_default();
|
||||
|
||||
let mut secrets = ctx.secret_store.acquire().await?;
|
||||
check_password_against_db(&mut secrets, &old_password).await?;
|
||||
|
||||
let mut db = ctx.db.handle();
|
||||
|
||||
let set_password_receipt = SetPasswordReceipt::new(&mut db).await?;
|
||||
|
||||
set_password(&mut db, &set_password_receipt, &mut secrets, &new_password).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::sync::Arc;
|
||||
|
||||
use chrono::Utc;
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::eyre;
|
||||
use openssl::pkey::{PKey, Private};
|
||||
use openssl::x509::X509;
|
||||
@@ -18,6 +19,7 @@ use super::PackageBackupReport;
|
||||
use crate::auth::check_password_against_db;
|
||||
use crate::backup::{BackupReport, ServerBackupReport};
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::BackupProgress;
|
||||
use crate::db::util::WithRevision;
|
||||
use crate::disk::mount::backup::BackupMountGuard;
|
||||
use crate::disk::mount::filesystem::ReadWrite;
|
||||
@@ -112,12 +114,24 @@ impl Serialize for OsBackup {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<BTreeSet<PackageId>, Error> {
|
||||
arg.split(',')
|
||||
.map(|s| s.trim().parse().map_err(Error::from))
|
||||
.collect()
|
||||
}
|
||||
|
||||
#[command(rename = "create", display(display_none))]
|
||||
#[instrument(skip(ctx, old_password, password))]
|
||||
pub async fn backup_all(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
||||
#[arg(rename = "old-password", long = "old-password")] old_password: Option<String>,
|
||||
#[arg(
|
||||
rename = "package-ids",
|
||||
long = "package-ids",
|
||||
parse(parse_comma_separated)
|
||||
)]
|
||||
package_ids: Option<BTreeSet<PackageId>>,
|
||||
#[arg] password: String,
|
||||
) -> Result<WithRevision<()>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
@@ -130,17 +144,27 @@ pub async fn backup_all(
|
||||
old_password.as_ref().unwrap_or(&password),
|
||||
)
|
||||
.await?;
|
||||
let all_packages = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.get(&mut db, false)
|
||||
.await?
|
||||
.0
|
||||
.keys()
|
||||
.into_iter()
|
||||
.cloned()
|
||||
.collect();
|
||||
let package_ids = package_ids.unwrap_or(all_packages);
|
||||
if old_password.is_some() {
|
||||
backup_guard.change_password(&password)?;
|
||||
}
|
||||
let revision = assure_backing_up(&mut db).await?;
|
||||
let revision = assure_backing_up(&mut db, &package_ids).await?;
|
||||
tokio::task::spawn(async move {
|
||||
let backup_res = perform_backup(&ctx, &mut db, backup_guard).await;
|
||||
let status_model = crate::db::DatabaseModel::new()
|
||||
let backup_res = perform_backup(&ctx, &mut db, backup_guard, &package_ids).await;
|
||||
let backup_progress = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.status_info()
|
||||
.backing_up();
|
||||
status_model
|
||||
.backup_progress();
|
||||
backup_progress
|
||||
.clone()
|
||||
.lock(&mut db, LockType::Write)
|
||||
.await
|
||||
@@ -207,8 +231,8 @@ pub async fn backup_all(
|
||||
.expect("failed to send notification");
|
||||
}
|
||||
}
|
||||
status_model
|
||||
.put(&mut db, &false)
|
||||
backup_progress
|
||||
.delete(&mut db)
|
||||
.await
|
||||
.expect("failed to change server status");
|
||||
});
|
||||
@@ -218,23 +242,40 @@ pub async fn backup_all(
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(db))]
|
||||
async fn assure_backing_up(db: &mut PatchDbHandle) -> Result<Option<Arc<Revision>>, Error> {
|
||||
#[instrument(skip(db, packages))]
|
||||
async fn assure_backing_up(
|
||||
db: &mut PatchDbHandle,
|
||||
packages: impl IntoIterator<Item = &PackageId>,
|
||||
) -> Result<Option<Arc<Revision>>, Error> {
|
||||
let mut tx = db.begin().await?;
|
||||
let mut backing_up = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.status_info()
|
||||
.backing_up()
|
||||
.backup_progress()
|
||||
.get_mut(&mut tx)
|
||||
.await?;
|
||||
|
||||
if *backing_up {
|
||||
if backing_up
|
||||
.iter()
|
||||
.flat_map(|x| x.values())
|
||||
.fold(false, |acc, x| {
|
||||
if !x.complete {
|
||||
return true;
|
||||
}
|
||||
acc
|
||||
})
|
||||
{
|
||||
return Err(Error::new(
|
||||
eyre!("Server is already backing up!"),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
*backing_up = true;
|
||||
*backing_up = Some(
|
||||
packages
|
||||
.into_iter()
|
||||
.map(|x| (x.clone(), BackupProgress { complete: false }))
|
||||
.collect(),
|
||||
);
|
||||
backing_up.save(&mut tx).await?;
|
||||
Ok(tx.commit(None).await?)
|
||||
}
|
||||
@@ -244,6 +285,7 @@ async fn perform_backup<Db: DbHandle>(
|
||||
ctx: &RpcContext,
|
||||
mut db: Db,
|
||||
mut backup_guard: BackupMountGuard<TmpMountGuard>,
|
||||
package_ids: &BTreeSet<PackageId>,
|
||||
) -> Result<BTreeMap<PackageId, PackageBackupReport>, Error> {
|
||||
let mut backup_report = BTreeMap::new();
|
||||
|
||||
@@ -251,6 +293,8 @@ async fn perform_backup<Db: DbHandle>(
|
||||
.package_data()
|
||||
.keys(&mut db, false)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|id| package_ids.contains(id))
|
||||
{
|
||||
let mut tx = db.begin().await?; // for lock scope
|
||||
let installed_model = if let Some(installed_model) = crate::db::DatabaseModel::new()
|
||||
@@ -268,9 +312,11 @@ async fn perform_backup<Db: DbHandle>(
|
||||
|
||||
main_status_model.lock(&mut tx, LockType::Write).await?;
|
||||
let (started, health) = match main_status_model.get(&mut tx, true).await?.into_owned() {
|
||||
MainStatus::Starting => (Some(Utc::now()), Default::default()),
|
||||
MainStatus::Starting { .. } => (Some(Utc::now()), Default::default()),
|
||||
MainStatus::Running { started, health } => (Some(started), health.clone()),
|
||||
MainStatus::Stopped | MainStatus::Stopping => (None, Default::default()),
|
||||
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restarting => {
|
||||
(None, Default::default())
|
||||
}
|
||||
MainStatus::BackingUp { .. } => {
|
||||
backup_report.insert(
|
||||
package_id,
|
||||
@@ -341,7 +387,7 @@ async fn perform_backup<Db: DbHandle>(
|
||||
backup_guard
|
||||
.metadata
|
||||
.package_backups
|
||||
.insert(package_id, pkg_meta);
|
||||
.insert(package_id.clone(), pkg_meta);
|
||||
}
|
||||
|
||||
main_status_model
|
||||
@@ -353,6 +399,23 @@ async fn perform_backup<Db: DbHandle>(
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut backup_progress = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.status_info()
|
||||
.backup_progress()
|
||||
.get_mut(&mut tx)
|
||||
.await?;
|
||||
if backup_progress.is_none() {
|
||||
*backup_progress = Some(Default::default());
|
||||
}
|
||||
if let Some(mut backup_progress) = backup_progress
|
||||
.as_mut()
|
||||
.and_then(|bp| bp.get_mut(&package_id))
|
||||
{
|
||||
(*backup_progress).complete = true;
|
||||
}
|
||||
backup_progress.save(&mut tx).await?;
|
||||
tx.save().await?;
|
||||
}
|
||||
|
||||
@@ -392,6 +455,5 @@ async fn perform_backup<Db: DbHandle>(
|
||||
.last_backup()
|
||||
.put(&mut db, ×tamp)
|
||||
.await?;
|
||||
|
||||
Ok(backup_report)
|
||||
}
|
||||
|
||||
@@ -12,12 +12,12 @@ use tokio::io::AsyncWriteExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use self::target::PackageBackupInfo;
|
||||
use crate::action::{ActionImplementation, NoOutput};
|
||||
use crate::context::RpcContext;
|
||||
use crate::dependencies::reconfigure_dependents_with_live_pointers;
|
||||
use crate::id::ImageId;
|
||||
use crate::install::PKG_ARCHIVE_DIR;
|
||||
use crate::net::interface::{InterfaceId, Interfaces};
|
||||
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::IoFormat;
|
||||
use crate::util::{AtomicFile, Version};
|
||||
@@ -64,8 +64,8 @@ struct BackupMetadata {
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
||||
pub struct BackupActions {
|
||||
pub create: ActionImplementation,
|
||||
pub restore: ActionImplementation,
|
||||
pub create: PackageProcedure,
|
||||
pub restore: PackageProcedure,
|
||||
}
|
||||
impl BackupActions {
|
||||
pub fn validate(&self, volumes: &Volumes, image_ids: &BTreeSet<ImageId>) -> Result<(), Error> {
|
||||
@@ -99,7 +99,7 @@ impl BackupActions {
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Some("CreateBackup"),
|
||||
ProcedureName::CreateBackup,
|
||||
&volumes,
|
||||
None,
|
||||
false,
|
||||
@@ -178,7 +178,7 @@ impl BackupActions {
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Some("RestoreBackup"),
|
||||
ProcedureName::RestoreBackup,
|
||||
&volumes,
|
||||
None,
|
||||
false,
|
||||
@@ -240,7 +240,8 @@ impl BackupActions {
|
||||
.get(db, true)
|
||||
.await?;
|
||||
|
||||
reconfigure_dependents_with_live_pointers(ctx, db, &entry).await?;
|
||||
let receipts = crate::config::ConfigReceipts::new(db).await?;
|
||||
reconfigure_dependents_with_live_pointers(ctx, db, &receipts, &entry).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ use crate::disk::mount::guard::TmpMountGuard;
|
||||
use crate::install::progress::InstallProgress;
|
||||
use crate::install::{download_install_s9pk, PKG_PUBLIC_DIR};
|
||||
use crate::net::ssl::SslManager;
|
||||
use crate::notifications::NotificationLevel;
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::s9pk::reader::S9pkReader;
|
||||
use crate::setup::RecoveryStatus;
|
||||
@@ -34,7 +35,6 @@ use crate::util::display_none;
|
||||
use crate::util::io::dir_size;
|
||||
use crate::util::serde::IoFormat;
|
||||
use crate::volume::{backup_dir, BACKUP_DIR, PKG_VOLUME_DIR};
|
||||
use crate::{auth::check_password_against_db, notifications::NotificationLevel};
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<Vec<PackageId>, Error> {
|
||||
@@ -44,27 +44,22 @@ fn parse_comma_separated(arg: &str, _: &ArgMatches<'_>) -> Result<Vec<PackageId>
|
||||
}
|
||||
|
||||
#[command(rename = "restore", display(display_none))]
|
||||
#[instrument(skip(ctx, old_password, password))]
|
||||
#[instrument(skip(ctx, password))]
|
||||
pub async fn restore_packages_rpc(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg(parse(parse_comma_separated))] ids: Vec<PackageId>,
|
||||
#[arg(rename = "target-id")] target_id: BackupTargetId,
|
||||
#[arg(rename = "old-password", long = "old-password")] old_password: Option<String>,
|
||||
#[arg] password: String,
|
||||
) -> Result<WithRevision<()>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
check_password_against_db(&mut ctx.secret_store.acquire().await?, &password).await?;
|
||||
let fs = target_id
|
||||
.load(&mut ctx.secret_store.acquire().await?)
|
||||
.await?;
|
||||
let mut backup_guard = BackupMountGuard::mount(
|
||||
let backup_guard = BackupMountGuard::mount(
|
||||
TmpMountGuard::mount(&fs, ReadOnly).await?,
|
||||
old_password.as_ref().unwrap_or(&password),
|
||||
&password,
|
||||
)
|
||||
.await?;
|
||||
if old_password.is_some() {
|
||||
backup_guard.change_password(&password)?;
|
||||
}
|
||||
|
||||
let (revision, backup_guard, tasks, _) =
|
||||
restore_packages(&ctx, &mut db, backup_guard, ids).await?;
|
||||
|
||||
@@ -6,7 +6,7 @@ use chrono::{DateTime, Utc};
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::eyre;
|
||||
use digest::generic_array::GenericArray;
|
||||
use digest::Digest;
|
||||
use digest::{Digest, OutputSizeUser};
|
||||
use rpc_toolkit::command;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
@@ -119,7 +119,9 @@ impl FileSystem for BackupTargetFS {
|
||||
BackupTargetFS::Cifs(a) => a.mount(mountpoint, mount_type).await,
|
||||
}
|
||||
}
|
||||
async fn source_hash(&self) -> Result<GenericArray<u8, <Sha256 as Digest>::OutputSize>, Error> {
|
||||
async fn source_hash(
|
||||
&self,
|
||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
||||
match self {
|
||||
BackupTargetFS::Disk(a) => a.source_hash().await,
|
||||
BackupTargetFS::Cifs(a) => a.source_hash().await,
|
||||
|
||||
@@ -79,7 +79,7 @@ async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
|
||||
let guid_string = tokio::fs::read_to_string("/embassy-os/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
||||
.await?;
|
||||
let guid = guid_string.trim();
|
||||
let reboot = embassy::disk::main::import(
|
||||
let requires_reboot = embassy::disk::main::import(
|
||||
guid,
|
||||
cfg.datadir(),
|
||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
||||
@@ -95,7 +95,7 @@ async fn setup_or_init(cfg_path: Option<&str>) -> Result<(), Error> {
|
||||
.await
|
||||
.with_ctx(|_| (embassy::ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
|
||||
}
|
||||
if reboot.0 {
|
||||
if requires_reboot.0 {
|
||||
embassy::disk::main::export(guid, cfg.datadir()).await?;
|
||||
Command::new("reboot")
|
||||
.invoke(embassy::ErrorKind::Unknown)
|
||||
|
||||
@@ -20,7 +20,7 @@ fn inner_main() -> Result<(), Error> {
|
||||
),
|
||||
context: matches => {
|
||||
if let Err(_) = std::env::var("RUST_LOG") {
|
||||
std::env::set_var("RUST_LOG", "embassy=warn");
|
||||
std::env::set_var("RUST_LOG", "embassy=warn,js_engine=warn");
|
||||
}
|
||||
EmbassyLogger::init();
|
||||
SdkContext::init(matches)?
|
||||
|
||||
@@ -81,7 +81,11 @@ async fn inner_main(cfg_path: Option<&str>) -> Result<Option<Shutdown>, Error> {
|
||||
.expect("send shutdown signal");
|
||||
});
|
||||
|
||||
rpc_ctx.set_nginx_conf(&mut rpc_ctx.db.handle()).await?;
|
||||
let mut db = rpc_ctx.db.handle();
|
||||
let receipts = embassy::context::rpc::RpcSetNginxReceipts::new(&mut db).await?;
|
||||
|
||||
rpc_ctx.set_nginx_conf(&mut db, receipts).await?;
|
||||
drop(db);
|
||||
let auth = auth(rpc_ctx.clone());
|
||||
let ctx = rpc_ctx.clone();
|
||||
let server = rpc_server!({
|
||||
|
||||
@@ -7,10 +7,10 @@ use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use super::{Config, ConfigSpec};
|
||||
use crate::action::ActionImplementation;
|
||||
use crate::context::RpcContext;
|
||||
use crate::dependencies::Dependencies;
|
||||
use crate::id::ImageId;
|
||||
use crate::procedure::{PackageProcedure, ProcedureName};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::status::health_check::HealthCheckId;
|
||||
use crate::util::Version;
|
||||
@@ -26,8 +26,8 @@ pub struct ConfigRes {
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
||||
pub struct ConfigActions {
|
||||
pub get: ActionImplementation,
|
||||
pub set: ActionImplementation,
|
||||
pub get: PackageProcedure,
|
||||
pub set: PackageProcedure,
|
||||
}
|
||||
impl ConfigActions {
|
||||
#[instrument]
|
||||
@@ -53,7 +53,7 @@ impl ConfigActions {
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Some("GetConfig"),
|
||||
ProcedureName::GetConfig,
|
||||
volumes,
|
||||
None::<()>,
|
||||
false,
|
||||
@@ -81,7 +81,7 @@ impl ConfigActions {
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Some("SetConfig"),
|
||||
ProcedureName::SetConfig,
|
||||
volumes,
|
||||
Some(input),
|
||||
false,
|
||||
@@ -107,6 +107,7 @@ impl ConfigActions {
|
||||
#[derive(Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct SetResult {
|
||||
#[serde(default)]
|
||||
#[serde(deserialize_with = "crate::util::serde::deserialize_from_str_opt")]
|
||||
#[serde(serialize_with = "crate::util::serde::serialize_display_opt")]
|
||||
pub signal: Option<Signal>,
|
||||
|
||||
@@ -6,7 +6,7 @@ use color_eyre::eyre::eyre;
|
||||
use futures::future::{BoxFuture, FutureExt};
|
||||
use indexmap::IndexSet;
|
||||
use itertools::Itertools;
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use patch_db::{DbHandle, LockReceipt, LockTarget, LockTargetId, LockType, Verifier};
|
||||
use rand::SeedableRng;
|
||||
use regex::Regex;
|
||||
use rpc_toolkit::command;
|
||||
@@ -14,17 +14,18 @@ use serde_json::Value;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::CurrentDependencyInfo;
|
||||
use crate::db::model::{CurrentDependencies, CurrentDependencyInfo, CurrentDependents};
|
||||
use crate::db::util::WithRevision;
|
||||
use crate::dependencies::{
|
||||
add_dependent_to_current_dependents_lists, break_transitive, heal_all_dependents_transitive,
|
||||
BreakageRes, DependencyError, DependencyErrors, TaggedDependencyError,
|
||||
BreakTransitiveReceipts, BreakageRes, Dependencies, DependencyConfig, DependencyError,
|
||||
DependencyErrors, DependencyReceipt, TaggedDependencyError, TryHealReceipts,
|
||||
};
|
||||
use crate::install::cleanup::remove_from_current_dependents_lists;
|
||||
use crate::install::cleanup::{remove_from_current_dependents_lists, UpdateDependencyReceipts};
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::util::display_none;
|
||||
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
|
||||
use crate::{Error, ResultExt as _};
|
||||
use crate::Error;
|
||||
|
||||
pub mod action;
|
||||
pub mod spec;
|
||||
@@ -33,8 +34,8 @@ pub mod util;
|
||||
pub use spec::{ConfigSpec, Defaultable};
|
||||
use util::NumRange;
|
||||
|
||||
use self::action::ConfigRes;
|
||||
use self::spec::{PackagePointerSpec, ValueSpecPointer};
|
||||
use self::action::{ConfigActions, ConfigRes};
|
||||
use self::spec::{ConfigPointerReceipts, PackagePointerSpec, ValueSpecPointer};
|
||||
|
||||
pub type Config = serde_json::Map<String, Value>;
|
||||
pub trait TypeOf {
|
||||
@@ -163,6 +164,55 @@ pub fn config(#[arg] id: PackageId) -> Result<PackageId, Error> {
|
||||
Ok(id)
|
||||
}
|
||||
|
||||
pub struct ConfigGetReceipts {
|
||||
manifest_volumes: LockReceipt<crate::volume::Volumes, ()>,
|
||||
manifest_version: LockReceipt<crate::util::Version, ()>,
|
||||
manifest_config: LockReceipt<Option<ConfigActions>, ()>,
|
||||
}
|
||||
|
||||
impl ConfigGetReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks, id);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<LockTargetId>,
|
||||
id: &PackageId,
|
||||
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let manifest_version = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.manifest().version())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let manifest_volumes = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.manifest().volumes())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let manifest_config = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.manifest().config())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
manifest_volumes: manifest_volumes.verify(skeleton_key)?,
|
||||
manifest_version: manifest_version.verify(skeleton_key)?,
|
||||
manifest_config: manifest_config.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[command(display(display_serializable))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn get(
|
||||
@@ -173,29 +223,16 @@ pub async fn get(
|
||||
format: Option<IoFormat>,
|
||||
) -> Result<ConfigRes, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let pkg_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut db)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
let action = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.config()
|
||||
.get(&mut db, true)
|
||||
let receipts = ConfigGetReceipts::new(&mut db, &id).await?;
|
||||
let action = receipts
|
||||
.manifest_config
|
||||
.get(&mut db)
|
||||
.await?
|
||||
.to_owned()
|
||||
.ok_or_else(|| Error::new(eyre!("{} has no config", id), crate::ErrorKind::NotFound))?;
|
||||
let version = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.version()
|
||||
.get(&mut db, true)
|
||||
.await?;
|
||||
let volumes = pkg_model.manifest().volumes().get(&mut db, true).await?;
|
||||
action.get(&ctx, &id, &*version, &*volumes).await
|
||||
|
||||
let volumes = receipts.manifest_volumes.get(&mut db).await?;
|
||||
let version = receipts.manifest_version.get(&mut db).await?;
|
||||
action.get(&ctx, &id, &version, &volumes).await
|
||||
}
|
||||
|
||||
#[command(
|
||||
@@ -215,6 +252,157 @@ pub fn set(
|
||||
Ok((id, config, timeout.map(|d| *d), expire_id))
|
||||
}
|
||||
|
||||
/// So, the new locking finds all the possible locks and lifts them up into a bundle of locks.
|
||||
/// Then this bundle will be passed down into the functions that will need to touch the db, and
|
||||
/// instead of doing the locks down in the system, we have already done the locks and can
|
||||
/// do the operation on the db.
|
||||
/// An UnlockedLock has two types, the type of setting and getting from the db, and the second type
|
||||
/// is the keys that we need to insert on getting/setting because we have included wild cards into the paths.
|
||||
pub struct ConfigReceipts {
|
||||
pub dependency_receipt: DependencyReceipt,
|
||||
pub config_receipts: ConfigPointerReceipts,
|
||||
pub update_dependency_receipts: UpdateDependencyReceipts,
|
||||
pub try_heal_receipts: TryHealReceipts,
|
||||
pub break_transitive_receipts: BreakTransitiveReceipts,
|
||||
configured: LockReceipt<bool, String>,
|
||||
config_actions: LockReceipt<ConfigActions, String>,
|
||||
dependencies: LockReceipt<Dependencies, String>,
|
||||
volumes: LockReceipt<crate::volume::Volumes, String>,
|
||||
version: LockReceipt<crate::util::Version, String>,
|
||||
manifest: LockReceipt<Manifest, String>,
|
||||
system_pointers: LockReceipt<Vec<spec::SystemPointerSpec>, String>,
|
||||
pub current_dependents: LockReceipt<CurrentDependents, String>,
|
||||
pub current_dependencies: LockReceipt<CurrentDependencies, String>,
|
||||
dependency_errors: LockReceipt<DependencyErrors, String>,
|
||||
manifest_dependencies_config: LockReceipt<DependencyConfig, (String, String)>,
|
||||
}
|
||||
|
||||
impl ConfigReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let dependency_receipt = DependencyReceipt::setup(locks);
|
||||
let config_receipts = ConfigPointerReceipts::setup(locks);
|
||||
let update_dependency_receipts = UpdateDependencyReceipts::setup(locks);
|
||||
let break_transitive_receipts = BreakTransitiveReceipts::setup(locks);
|
||||
let try_heal_receipts = TryHealReceipts::setup(locks);
|
||||
|
||||
let configured: LockTarget<bool, String> = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.status().configured())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let config_actions = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.and_then(|x| x.manifest().config())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let dependencies = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest().dependencies())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let volumes = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest().volumes())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let version = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest().version())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let system_pointers = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.system_pointers())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let current_dependents = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.current_dependents())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let current_dependencies = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.current_dependencies())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let dependency_errors = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.status().dependency_errors())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let manifest_dependencies_config = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.and_then(|x| x.manifest().dependencies().star().config())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
dependency_receipt: dependency_receipt(skeleton_key)?,
|
||||
config_receipts: config_receipts(skeleton_key)?,
|
||||
try_heal_receipts: try_heal_receipts(skeleton_key)?,
|
||||
break_transitive_receipts: break_transitive_receipts(skeleton_key)?,
|
||||
update_dependency_receipts: update_dependency_receipts(skeleton_key)?,
|
||||
configured: configured.verify(skeleton_key)?,
|
||||
config_actions: config_actions.verify(skeleton_key)?,
|
||||
dependencies: dependencies.verify(skeleton_key)?,
|
||||
volumes: volumes.verify(skeleton_key)?,
|
||||
version: version.verify(skeleton_key)?,
|
||||
manifest: manifest.verify(skeleton_key)?,
|
||||
system_pointers: system_pointers.verify(skeleton_key)?,
|
||||
current_dependents: current_dependents.verify(skeleton_key)?,
|
||||
current_dependencies: current_dependencies.verify(skeleton_key)?,
|
||||
dependency_errors: dependency_errors.verify(skeleton_key)?,
|
||||
manifest_dependencies_config: manifest_dependencies_config.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[command(rename = "dry", display(display_serializable))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub async fn set_dry(
|
||||
@@ -229,6 +417,7 @@ pub async fn set_dry(
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
let mut breakages = BTreeMap::new();
|
||||
let locks = ConfigReceipts::new(&mut tx).await?;
|
||||
configure(
|
||||
&ctx,
|
||||
&mut tx,
|
||||
@@ -238,20 +427,11 @@ pub async fn set_dry(
|
||||
true,
|
||||
&mut BTreeMap::new(),
|
||||
&mut breakages,
|
||||
&locks,
|
||||
)
|
||||
.await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.status()
|
||||
.configured()
|
||||
.put(&mut tx, &true)
|
||||
.await?;
|
||||
|
||||
locks.configured.set(&mut tx, true, &id).await?;
|
||||
tx.abort().await?;
|
||||
Ok(BreakageRes(breakages))
|
||||
}
|
||||
@@ -264,6 +444,7 @@ pub async fn set_impl(
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
let mut breakages = BTreeMap::new();
|
||||
let locks = ConfigReceipts::new(&mut tx).await?;
|
||||
configure(
|
||||
&ctx,
|
||||
&mut tx,
|
||||
@@ -273,6 +454,7 @@ pub async fn set_impl(
|
||||
false,
|
||||
&mut BTreeMap::new(),
|
||||
&mut breakages,
|
||||
&locks,
|
||||
)
|
||||
.await?;
|
||||
Ok(WithRevision {
|
||||
@@ -281,34 +463,27 @@ pub async fn set_impl(
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
pub async fn configure<Db: DbHandle>(
|
||||
#[instrument(skip(ctx, db, receipts))]
|
||||
pub async fn configure<'a, Db: DbHandle>(
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
db: &'a mut Db,
|
||||
id: &PackageId,
|
||||
config: Option<Config>,
|
||||
timeout: &Option<Duration>,
|
||||
dry_run: bool,
|
||||
overrides: &mut BTreeMap<PackageId, Config>,
|
||||
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
receipts: &ConfigReceipts,
|
||||
) -> Result<(), Error> {
|
||||
configure_rec(ctx, db, id, config, timeout, dry_run, overrides, breakages).await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.expect(db)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(db)
|
||||
.await?
|
||||
.status()
|
||||
.configured()
|
||||
.put(db, &true)
|
||||
.await?;
|
||||
configure_rec(
|
||||
ctx, db, id, config, timeout, dry_run, overrides, breakages, receipts,
|
||||
)
|
||||
.await?;
|
||||
receipts.configured.set(db, true, &id).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
#[instrument(skip(ctx, db, receipts))]
|
||||
pub fn configure_rec<'a, Db: DbHandle>(
|
||||
ctx: &'a RpcContext,
|
||||
db: &'a mut Db,
|
||||
@@ -318,48 +493,33 @@ pub fn configure_rec<'a, Db: DbHandle>(
|
||||
dry_run: bool,
|
||||
overrides: &'a mut BTreeMap<PackageId, Config>,
|
||||
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
receipts: &'a ConfigReceipts,
|
||||
) -> BoxFuture<'a, Result<(), Error>> {
|
||||
async move {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(db, LockType::Write)
|
||||
.await?;
|
||||
// fetch data from db
|
||||
let pkg_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
let action = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.config()
|
||||
.get(db, true)
|
||||
let action = receipts
|
||||
.config_actions
|
||||
.get(db, id)
|
||||
.await?
|
||||
.to_owned()
|
||||
.ok_or_else(|| Error::new(eyre!("{} has no config", id), crate::ErrorKind::NotFound))?;
|
||||
let version = pkg_model.clone().manifest().version().get(db, true).await?;
|
||||
let dependencies = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let volumes = pkg_model.clone().manifest().volumes().get(db, true).await?;
|
||||
let is_needs_config = !*pkg_model
|
||||
.clone()
|
||||
.status()
|
||||
.configured()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
.ok_or_else(not_found)?;
|
||||
let dependencies = receipts
|
||||
.dependencies
|
||||
.get(db, id)
|
||||
.await?
|
||||
.ok_or_else(not_found)?;
|
||||
let volumes = receipts.volumes.get(db, id).await?.ok_or_else(not_found)?;
|
||||
let is_needs_config = !receipts
|
||||
.configured
|
||||
.get(db, id)
|
||||
.await?
|
||||
.ok_or_else(not_found)?;
|
||||
let version = receipts.version.get(db, id).await?.ok_or_else(not_found)?;
|
||||
|
||||
// get current config and current spec
|
||||
let ConfigRes {
|
||||
config: old_config,
|
||||
spec,
|
||||
} = action.get(ctx, id, &*version, &*volumes).await?;
|
||||
} = action.get(ctx, id, &version, &volumes).await?;
|
||||
|
||||
// determine new config to use
|
||||
let mut config = if let Some(config) = config.or_else(|| old_config.clone()) {
|
||||
@@ -368,45 +528,49 @@ pub fn configure_rec<'a, Db: DbHandle>(
|
||||
spec.gen(&mut rand::rngs::StdRng::from_entropy(), timeout)?
|
||||
};
|
||||
|
||||
let manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Manifest>(|i| i.manifest())
|
||||
.expect(db)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
let manifest = receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
|
||||
|
||||
spec.validate(&*manifest)?;
|
||||
spec.validate(&manifest)?;
|
||||
spec.matches(&config)?; // check that new config matches spec
|
||||
spec.update(ctx, db, &*manifest, &*overrides, &mut config)
|
||||
.await?; // dereference pointers in the new config
|
||||
spec.update(
|
||||
ctx,
|
||||
db,
|
||||
&manifest,
|
||||
&*overrides,
|
||||
&mut config,
|
||||
&receipts.config_receipts,
|
||||
)
|
||||
.await?; // dereference pointers in the new config
|
||||
|
||||
// create backreferences to pointers
|
||||
let mut sys = pkg_model.clone().system_pointers().get_mut(db).await?;
|
||||
let mut sys = receipts
|
||||
.system_pointers
|
||||
.get(db, &id)
|
||||
.await?
|
||||
.ok_or_else(not_found)?;
|
||||
sys.truncate(0);
|
||||
let mut current_dependencies: BTreeMap<PackageId, CurrentDependencyInfo> = dependencies
|
||||
.0
|
||||
.iter()
|
||||
.filter_map(|(id, info)| {
|
||||
if info.requirement.required() {
|
||||
Some((id.clone(), CurrentDependencyInfo::default()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let mut current_dependencies: CurrentDependencies = CurrentDependencies(
|
||||
dependencies
|
||||
.0
|
||||
.iter()
|
||||
.filter_map(|(id, info)| {
|
||||
if info.requirement.required() {
|
||||
Some((id.clone(), CurrentDependencyInfo::default()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
);
|
||||
for ptr in spec.pointers(&config)? {
|
||||
match ptr {
|
||||
ValueSpecPointer::Package(pkg_ptr) => {
|
||||
if let Some(current_dependency) =
|
||||
current_dependencies.get_mut(pkg_ptr.package_id())
|
||||
current_dependencies.0.get_mut(pkg_ptr.package_id())
|
||||
{
|
||||
current_dependency.pointers.push(pkg_ptr);
|
||||
} else {
|
||||
current_dependencies.insert(
|
||||
current_dependencies.0.insert(
|
||||
pkg_ptr.package_id().to_owned(),
|
||||
CurrentDependencyInfo {
|
||||
pointers: vec![pkg_ptr],
|
||||
@@ -418,20 +582,20 @@ pub fn configure_rec<'a, Db: DbHandle>(
|
||||
ValueSpecPointer::System(s) => sys.push(s),
|
||||
}
|
||||
}
|
||||
sys.save(db).await?;
|
||||
receipts.system_pointers.set(db, sys, &id).await?;
|
||||
|
||||
let signal = if !dry_run {
|
||||
// run config action
|
||||
let res = action
|
||||
.set(ctx, id, &*version, &*dependencies, &*volumes, &config)
|
||||
.set(ctx, id, &version, &dependencies, &volumes, &config)
|
||||
.await?;
|
||||
|
||||
// track dependencies with no pointers
|
||||
for (package_id, health_checks) in res.depends_on.into_iter() {
|
||||
if let Some(current_dependency) = current_dependencies.get_mut(&package_id) {
|
||||
if let Some(current_dependency) = current_dependencies.0.get_mut(&package_id) {
|
||||
current_dependency.health_checks.extend(health_checks);
|
||||
} else {
|
||||
current_dependencies.insert(
|
||||
current_dependencies.0.insert(
|
||||
package_id,
|
||||
CurrentDependencyInfo {
|
||||
pointers: Vec::new(),
|
||||
@@ -442,79 +606,111 @@ pub fn configure_rec<'a, Db: DbHandle>(
|
||||
}
|
||||
|
||||
// track dependency health checks
|
||||
current_dependencies = current_dependencies
|
||||
.into_iter()
|
||||
.filter(|(dep_id, _)| {
|
||||
if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) {
|
||||
tracing::warn!("Illegal dependency specified: {}", dep_id);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
current_dependencies = current_dependencies.map(|x| {
|
||||
x.into_iter()
|
||||
.filter(|(dep_id, _)| {
|
||||
if dep_id != id && !manifest.dependencies.0.contains_key(dep_id) {
|
||||
tracing::warn!("Illegal dependency specified: {}", dep_id);
|
||||
false
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
.collect()
|
||||
});
|
||||
res.signal
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// update dependencies
|
||||
let mut deps = pkg_model.clone().current_dependencies().get_mut(db).await?;
|
||||
remove_from_current_dependents_lists(db, id, deps.keys()).await?; // remove previous
|
||||
add_dependent_to_current_dependents_lists(db, id, ¤t_dependencies).await?; // add new
|
||||
current_dependencies.remove(id);
|
||||
*deps = current_dependencies.clone();
|
||||
deps.save(db).await?;
|
||||
let mut errs = pkg_model
|
||||
.clone()
|
||||
.status()
|
||||
.dependency_errors()
|
||||
.get_mut(db)
|
||||
let prev_current_dependencies = receipts
|
||||
.current_dependencies
|
||||
.get(db, &id)
|
||||
.await?
|
||||
.unwrap_or_default();
|
||||
remove_from_current_dependents_lists(
|
||||
db,
|
||||
id,
|
||||
&prev_current_dependencies,
|
||||
&receipts.current_dependents,
|
||||
)
|
||||
.await?; // remove previous
|
||||
add_dependent_to_current_dependents_lists(
|
||||
db,
|
||||
id,
|
||||
¤t_dependencies,
|
||||
&receipts.current_dependents,
|
||||
)
|
||||
.await?; // add new
|
||||
current_dependencies.0.remove(id);
|
||||
receipts
|
||||
.current_dependencies
|
||||
.set(db, current_dependencies.clone(), &id)
|
||||
.await?;
|
||||
*errs = DependencyErrors::init(ctx, db, &*manifest, ¤t_dependencies).await?;
|
||||
errs.save(db).await?;
|
||||
|
||||
let errs = receipts
|
||||
.dependency_errors
|
||||
.get(db, &id)
|
||||
.await?
|
||||
.ok_or_else(not_found)?;
|
||||
tracing::warn!("Dependency Errors: {:?}", errs);
|
||||
let errs = DependencyErrors::init(
|
||||
ctx,
|
||||
db,
|
||||
&manifest,
|
||||
¤t_dependencies,
|
||||
&receipts.dependency_receipt.try_heal,
|
||||
)
|
||||
.await?;
|
||||
receipts.dependency_errors.set(db, errs, &id).await?;
|
||||
|
||||
// cache current config for dependents
|
||||
overrides.insert(id.clone(), config.clone());
|
||||
|
||||
// handle dependents
|
||||
let dependents = pkg_model.clone().current_dependents().get(db, true).await?;
|
||||
let dependents = receipts
|
||||
.current_dependents
|
||||
.get(db, id)
|
||||
.await?
|
||||
.ok_or_else(not_found)?;
|
||||
let prev = if is_needs_config { None } else { old_config }
|
||||
.map(Value::Object)
|
||||
.unwrap_or_default();
|
||||
let next = Value::Object(config.clone());
|
||||
for (dependent, dep_info) in dependents.iter().filter(|(dep_id, _)| dep_id != &id) {
|
||||
for (dependent, dep_info) in dependents.0.iter().filter(|(dep_id, _)| dep_id != &id) {
|
||||
// check if config passes dependent check
|
||||
let dependent_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependent)
|
||||
.and_then(|pkg| pkg.installed())
|
||||
.expect(db)
|
||||
.await?;
|
||||
if let Some(cfg) = &*dependent_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.idx_model(id)
|
||||
.expect(db)
|
||||
.await?
|
||||
.config()
|
||||
.get(db, true)
|
||||
if let Some(cfg) = receipts
|
||||
.manifest_dependencies_config
|
||||
.get(db, (&dependent, &id))
|
||||
.await?
|
||||
{
|
||||
let manifest = dependent_model.clone().manifest().get(db, true).await?;
|
||||
let manifest = receipts
|
||||
.manifest
|
||||
.get(db, &dependent)
|
||||
.await?
|
||||
.ok_or_else(not_found)?;
|
||||
if let Err(error) = cfg
|
||||
.check(
|
||||
ctx,
|
||||
dependent,
|
||||
&manifest.version,
|
||||
&manifest.volumes,
|
||||
id,
|
||||
&config,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
let dep_err = DependencyError::ConfigUnsatisfied { error };
|
||||
break_transitive(db, dependent, id, dep_err, breakages).await?;
|
||||
break_transitive(
|
||||
db,
|
||||
dependent,
|
||||
id,
|
||||
dep_err,
|
||||
breakages,
|
||||
&receipts.break_transitive_receipts,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// handle backreferences
|
||||
@@ -523,6 +719,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
|
||||
if cfg_ptr.select(&next) != cfg_ptr.select(&prev) {
|
||||
if let Err(e) = configure_rec(
|
||||
ctx, db, dependent, None, timeout, dry_run, overrides, breakages,
|
||||
receipts,
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -535,6 +732,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
|
||||
error: format!("{}", e),
|
||||
},
|
||||
breakages,
|
||||
&receipts.break_transitive_receipts,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
@@ -544,7 +742,7 @@ pub fn configure_rec<'a, Db: DbHandle>(
|
||||
}
|
||||
}
|
||||
}
|
||||
heal_all_dependents_transitive(ctx, db, id).await?;
|
||||
heal_all_dependents_transitive(ctx, db, id, &receipts.dependency_receipt).await?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -568,3 +766,67 @@ pub fn configure_rec<'a, Db: DbHandle>(
|
||||
}
|
||||
.boxed()
|
||||
}
|
||||
#[instrument]
|
||||
pub fn not_found() -> Error {
|
||||
Error::new(eyre!("Could not find"), crate::ErrorKind::Incoherent)
|
||||
}
|
||||
|
||||
/// We want to have a double check that the paths are what we expect them to be.
|
||||
/// Found that earlier the paths where not what we expected them to be.
|
||||
#[tokio::test]
|
||||
async fn ensure_creation_of_config_paths_makes_sense() {
|
||||
let mut fake = patch_db::test_utils::NoOpDb();
|
||||
let config_locks = ConfigReceipts::new(&mut fake).await.unwrap();
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.configured.lock.glob),
|
||||
"/package-data/*/installed/status/configured"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.config_actions.lock.glob),
|
||||
"/package-data/*/installed/manifest/config"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.dependencies.lock.glob),
|
||||
"/package-data/*/installed/manifest/dependencies"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.volumes.lock.glob),
|
||||
"/package-data/*/installed/manifest/volumes"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.version.lock.glob),
|
||||
"/package-data/*/installed/manifest/version"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.volumes.lock.glob),
|
||||
"/package-data/*/installed/manifest/volumes"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.manifest.lock.glob),
|
||||
"/package-data/*/installed/manifest"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.manifest.lock.glob),
|
||||
"/package-data/*/installed/manifest"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.system_pointers.lock.glob),
|
||||
"/package-data/*/installed/system-pointers"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.current_dependents.lock.glob),
|
||||
"/package-data/*/installed/current-dependents"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.dependency_errors.lock.glob),
|
||||
"/package-data/*/installed/status/dependency-errors"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.manifest_dependencies_config.lock.glob),
|
||||
"/package-data/*/installed/manifest/dependencies/*/config"
|
||||
);
|
||||
assert_eq!(
|
||||
&format!("{}", config_locks.system_pointers.lock.glob),
|
||||
"/package-data/*/installed/system-pointers"
|
||||
);
|
||||
}
|
||||
|
||||
@@ -12,9 +12,10 @@ use async_trait::async_trait;
|
||||
use indexmap::{IndexMap, IndexSet};
|
||||
use itertools::Itertools;
|
||||
use jsonpath_lib::Compiled as CompiledJsonPath;
|
||||
use patch_db::{DbHandle, OptionModel};
|
||||
use patch_db::{DbHandle, LockReceipt, LockType};
|
||||
use rand::{CryptoRng, Rng};
|
||||
use regex::Regex;
|
||||
use serde::de::{MapAccess, Visitor};
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde_json::{Number, Value};
|
||||
use sqlx::SqlitePool;
|
||||
@@ -44,6 +45,7 @@ pub trait ValueSpec {
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError>;
|
||||
// returns all pointers that are live in the provided config
|
||||
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath>;
|
||||
@@ -160,9 +162,10 @@ where
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
self.inner
|
||||
.update(ctx, db, manifest, config_overrides, value)
|
||||
.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
|
||||
@@ -204,9 +207,10 @@ where
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
self.inner
|
||||
.update(ctx, db, manifest, config_overrides, value)
|
||||
.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
|
||||
@@ -281,9 +285,10 @@ where
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
self.inner
|
||||
.update(ctx, db, manifest, config_overrides, value)
|
||||
.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
|
||||
@@ -343,7 +348,7 @@ pub enum ValueSpecAny {
|
||||
Pointer(WithDescription<ValueSpecPointer>),
|
||||
}
|
||||
impl ValueSpecAny {
|
||||
pub fn name<'a>(&'a self) -> &'a str {
|
||||
pub fn name(&self) -> &'_ str {
|
||||
match self {
|
||||
ValueSpecAny::Boolean(b) => b.name.as_str(),
|
||||
ValueSpecAny::Enum(e) => e.name.as_str(),
|
||||
@@ -395,16 +400,41 @@ impl ValueSpec for ValueSpecAny {
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
match self {
|
||||
ValueSpecAny::Boolean(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecAny::Enum(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecAny::List(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecAny::Number(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecAny::Object(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecAny::String(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecAny::Union(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecAny::Pointer(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecAny::Boolean(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecAny::Enum(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecAny::List(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecAny::Number(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecAny::Object(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecAny::String(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecAny::Union(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecAny::Pointer(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
|
||||
@@ -489,6 +519,7 @@ impl ValueSpec for ValueSpecBoolean {
|
||||
_manifest: &Manifest,
|
||||
_config_overrides: &BTreeMap<PackageId, Config>,
|
||||
_value: &mut Value,
|
||||
_receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
Ok(())
|
||||
}
|
||||
@@ -578,6 +609,7 @@ impl ValueSpec for ValueSpecEnum {
|
||||
_manifest: &Manifest,
|
||||
_config_overrides: &BTreeMap<PackageId, Config>,
|
||||
_value: &mut Value,
|
||||
_receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
Ok(())
|
||||
}
|
||||
@@ -664,12 +696,13 @@ where
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
if let Value::Array(ref mut ls) = value {
|
||||
for (i, val) in ls.into_iter().enumerate() {
|
||||
match self
|
||||
.spec
|
||||
.update(ctx, db, manifest, config_overrides, val)
|
||||
.update(ctx, db, manifest, config_overrides, val, receipts)
|
||||
.await
|
||||
{
|
||||
Err(ConfigurationError::NoMatch(e)) => {
|
||||
@@ -771,13 +804,29 @@ impl ValueSpec for ValueSpecList {
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
match self {
|
||||
ValueSpecList::Enum(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecList::Number(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecList::Object(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecList::String(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecList::Union(a) => a.update(ctx, db, manifest, config_overrides, value).await,
|
||||
ValueSpecList::Enum(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecList::Number(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecList::Object(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecList::String(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecList::Union(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
fn pointers(&self, value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
|
||||
@@ -898,6 +947,7 @@ impl ValueSpec for ValueSpecNumber {
|
||||
_manifest: &Manifest,
|
||||
_config_overrides: &BTreeMap<PackageId, Config>,
|
||||
_value: &mut Value,
|
||||
_receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
Ok(())
|
||||
}
|
||||
@@ -961,10 +1011,11 @@ impl ValueSpec for ValueSpecObject {
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
if let Value::Object(o) = value {
|
||||
self.spec
|
||||
.update(ctx, db, manifest, config_overrides, o)
|
||||
.update(ctx, db, manifest, config_overrides, o, receipts)
|
||||
.await
|
||||
} else {
|
||||
Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
|
||||
@@ -1063,16 +1114,20 @@ impl ConfigSpec {
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
cfg: &mut Config,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
for (k, vs) in self.0.iter() {
|
||||
match cfg.get_mut(k) {
|
||||
None => {
|
||||
let mut v = Value::Null;
|
||||
vs.update(ctx, db, manifest, config_overrides, &mut v)
|
||||
vs.update(ctx, db, manifest, config_overrides, &mut v, receipts)
|
||||
.await?;
|
||||
cfg.insert(k.clone(), v);
|
||||
}
|
||||
Some(v) => match vs.update(ctx, db, manifest, config_overrides, v).await {
|
||||
Some(v) => match vs
|
||||
.update(ctx, db, manifest, config_overrides, v, receipts)
|
||||
.await
|
||||
{
|
||||
Err(ConfigurationError::NoMatch(e)) => {
|
||||
Err(ConfigurationError::NoMatch(e.prepend(k.clone())))
|
||||
}
|
||||
@@ -1113,18 +1168,95 @@ pub struct Pattern {
|
||||
pub pattern_description: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
#[derive(Clone, Debug, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct ValueSpecString {
|
||||
#[serde(flatten)]
|
||||
pub pattern: Option<Pattern>,
|
||||
#[serde(default)]
|
||||
pub textarea: bool,
|
||||
pub copyable: bool,
|
||||
#[serde(default)]
|
||||
pub masked: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
#[serde(default)]
|
||||
pub placeholder: Option<String>,
|
||||
}
|
||||
impl<'de> Deserialize<'de> for ValueSpecString {
|
||||
fn deserialize<D: Deserializer<'de>>(deserializer: D) -> Result<ValueSpecString, D::Error> {
|
||||
struct ValueSpecStringVisitor;
|
||||
impl<'de> Visitor<'de> for ValueSpecStringVisitor {
|
||||
type Value = ValueSpecString;
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("struct ValueSpecString")
|
||||
}
|
||||
fn visit_map<V: MapAccess<'de>>(self, mut map: V) -> Result<ValueSpecString, V::Error> {
|
||||
let mut pattern = None;
|
||||
let mut pattern_description = None;
|
||||
let mut textarea = false;
|
||||
let mut copyable = false;
|
||||
let mut masked = false;
|
||||
let mut placeholder = None;
|
||||
while let Some::<String>(key) = map.next_key()? {
|
||||
if &key == "pattern" {
|
||||
if pattern.is_some() {
|
||||
return Err(serde::de::Error::duplicate_field("pattern"));
|
||||
} else {
|
||||
pattern = Some(
|
||||
Regex::new(&map.next_value::<String>()?)
|
||||
.map_err(serde::de::Error::custom)?,
|
||||
);
|
||||
}
|
||||
} else if &key == "pattern-description" {
|
||||
if pattern_description.is_some() {
|
||||
return Err(serde::de::Error::duplicate_field("pattern-description"));
|
||||
} else {
|
||||
pattern_description = Some(map.next_value()?);
|
||||
}
|
||||
} else if &key == "textarea" {
|
||||
textarea = map.next_value()?;
|
||||
} else if &key == "copyable" {
|
||||
copyable = map.next_value()?;
|
||||
} else if &key == "masked" {
|
||||
masked = map.next_value()?;
|
||||
} else if &key == "placeholder" {
|
||||
if placeholder.is_some() {
|
||||
return Err(serde::de::Error::duplicate_field("placeholder"));
|
||||
} else {
|
||||
placeholder = Some(map.next_value()?);
|
||||
}
|
||||
}
|
||||
}
|
||||
let regex = match (pattern, pattern_description) {
|
||||
(None, None) => None,
|
||||
(Some(p), Some(d)) => Some(Pattern {
|
||||
pattern: p,
|
||||
pattern_description: d,
|
||||
}),
|
||||
(Some(_), None) => {
|
||||
return Err(serde::de::Error::missing_field("pattern-description"));
|
||||
}
|
||||
(None, Some(_)) => {
|
||||
return Err(serde::de::Error::missing_field("pattern"));
|
||||
}
|
||||
};
|
||||
Ok(ValueSpecString {
|
||||
pattern: regex,
|
||||
textarea,
|
||||
copyable,
|
||||
masked,
|
||||
placeholder,
|
||||
})
|
||||
}
|
||||
}
|
||||
const FIELDS: &'static [&'static str] = &[
|
||||
"pattern",
|
||||
"pattern-description",
|
||||
"textarea",
|
||||
"copyable",
|
||||
"masked",
|
||||
"placeholder",
|
||||
];
|
||||
deserializer.deserialize_struct("ValueSpecString", FIELDS, ValueSpecStringVisitor)
|
||||
}
|
||||
}
|
||||
#[async_trait]
|
||||
impl ValueSpec for ValueSpecString {
|
||||
fn matches(&self, value: &Value) -> Result<(), NoMatchWithPath> {
|
||||
@@ -1160,6 +1292,7 @@ impl ValueSpec for ValueSpecString {
|
||||
_manifest: &Manifest,
|
||||
_config_overrides: &BTreeMap<PackageId, Config>,
|
||||
_value: &mut Value,
|
||||
_receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
Ok(())
|
||||
}
|
||||
@@ -1192,10 +1325,7 @@ impl DefaultableWith for ValueSpecString {
|
||||
let candidate = spec.gen(rng);
|
||||
match (spec, &self.pattern) {
|
||||
(DefaultString::Entropy(_), Some(pattern))
|
||||
if !pattern.pattern.is_match(&candidate) =>
|
||||
{
|
||||
()
|
||||
}
|
||||
if !pattern.pattern.is_match(&candidate) => {}
|
||||
_ => {
|
||||
return Ok(Value::String(candidate));
|
||||
}
|
||||
@@ -1371,6 +1501,7 @@ impl ValueSpec for ValueSpecUnion {
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
if let Value::Object(o) = value {
|
||||
match o.get(&self.tag.id) {
|
||||
@@ -1381,7 +1512,10 @@ impl ValueSpec for ValueSpecUnion {
|
||||
None => Err(ConfigurationError::NoMatch(NoMatchWithPath::new(
|
||||
MatchError::Union(tag.clone(), self.variants.keys().cloned().collect()),
|
||||
))),
|
||||
Some(spec) => spec.update(ctx, db, manifest, config_overrides, o).await,
|
||||
Some(spec) => {
|
||||
spec.update(ctx, db, manifest, config_overrides, o, receipts)
|
||||
.await
|
||||
}
|
||||
},
|
||||
Some(other) => Err(ConfigurationError::NoMatch(
|
||||
NoMatchWithPath::new(MatchError::InvalidType("string", other.type_of()))
|
||||
@@ -1513,13 +1647,16 @@ impl ValueSpec for ValueSpecPointer {
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
match self {
|
||||
ValueSpecPointer::Package(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value).await
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
ValueSpecPointer::System(a) => {
|
||||
a.update(ctx, db, manifest, config_overrides, value).await
|
||||
a.update(ctx, db, manifest, config_overrides, value, receipts)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1563,12 +1700,17 @@ impl PackagePointerSpec {
|
||||
db: &mut Db,
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<Value, ConfigurationError> {
|
||||
match &self {
|
||||
PackagePointerSpec::TorKey(key) => key.deref(&manifest.id, &ctx.secret_store).await,
|
||||
PackagePointerSpec::TorAddress(tor) => tor.deref(db).await,
|
||||
PackagePointerSpec::LanAddress(lan) => lan.deref(db).await,
|
||||
PackagePointerSpec::Config(cfg) => cfg.deref(ctx, db, config_overrides).await,
|
||||
PackagePointerSpec::TorAddress(tor) => {
|
||||
tor.deref(db, &receipts.interface_addresses_receipt).await
|
||||
}
|
||||
PackagePointerSpec::LanAddress(lan) => {
|
||||
lan.deref(db, &receipts.interface_addresses_receipt).await
|
||||
}
|
||||
PackagePointerSpec::Config(cfg) => cfg.deref(ctx, db, config_overrides, receipts).await,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1616,8 +1758,11 @@ impl ValueSpec for PackagePointerSpec {
|
||||
manifest: &Manifest,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
*value = self.deref(ctx, db, manifest, config_overrides).await?;
|
||||
*value = self
|
||||
.deref(ctx, db, manifest, config_overrides, receipts)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
fn pointers(&self, _value: &Value) -> Result<BTreeSet<ValueSpecPointer>, NoMatchWithPath> {
|
||||
@@ -1640,16 +1785,17 @@ pub struct TorAddressPointer {
|
||||
interface: InterfaceId,
|
||||
}
|
||||
impl TorAddressPointer {
|
||||
async fn deref<Db: DbHandle>(&self, db: &mut Db) -> Result<Value, ConfigurationError> {
|
||||
let addr = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&self.package_id)
|
||||
.and_then(|pde| pde.installed())
|
||||
.and_then(|installed| installed.interface_addresses().idx_model(&self.interface))
|
||||
.and_then(|addresses| addresses.tor_address())
|
||||
.get(db, true)
|
||||
async fn deref<Db: DbHandle>(
|
||||
&self,
|
||||
db: &mut Db,
|
||||
receipt: &InterfaceAddressesReceipt,
|
||||
) -> Result<Value, ConfigurationError> {
|
||||
let addr = receipt
|
||||
.interface_addresses
|
||||
.get(db, (&self.package_id, &self.interface))
|
||||
.await
|
||||
.map_err(|e| ConfigurationError::SystemError(Error::from(e)))?;
|
||||
.map_err(|e| ConfigurationError::SystemError(Error::from(e)))?
|
||||
.and_then(|addresses| addresses.tor_address);
|
||||
Ok(addr.to_owned().map(Value::String).unwrap_or(Value::Null))
|
||||
}
|
||||
}
|
||||
@@ -1664,6 +1810,39 @@ impl fmt::Display for TorAddressPointer {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct InterfaceAddressesReceipt {
|
||||
interface_addresses: LockReceipt<crate::db::model::InterfaceAddresses, (String, String)>,
|
||||
}
|
||||
|
||||
impl InterfaceAddressesReceipt {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<patch_db::LockTargetId>,
|
||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
||||
// let cleanup_receipts = CleanupFailedReceipts::setup(locks);
|
||||
|
||||
let interface_addresses = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.interface_addresses().star())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
// cleanup_receipts: cleanup_receipts(skeleton_key)?,
|
||||
interface_addresses: interface_addresses.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct LanAddressPointer {
|
||||
@@ -1672,28 +1851,81 @@ pub struct LanAddressPointer {
|
||||
}
|
||||
impl fmt::Display for LanAddressPointer {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
LanAddressPointer {
|
||||
package_id,
|
||||
interface,
|
||||
} => write!(f, "{}: lan-address: {}", package_id, interface),
|
||||
}
|
||||
let LanAddressPointer {
|
||||
package_id,
|
||||
interface,
|
||||
} = self;
|
||||
write!(f, "{}: lan-address: {}", package_id, interface)
|
||||
}
|
||||
}
|
||||
impl LanAddressPointer {
|
||||
async fn deref<Db: DbHandle>(&self, db: &mut Db) -> Result<Value, ConfigurationError> {
|
||||
let addr = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&self.package_id)
|
||||
.and_then(|pde| pde.installed())
|
||||
.and_then(|installed| installed.interface_addresses().idx_model(&self.interface))
|
||||
.and_then(|addresses| addresses.lan_address())
|
||||
.get(db, true)
|
||||
async fn deref<Db: DbHandle>(
|
||||
&self,
|
||||
db: &mut Db,
|
||||
receipts: &InterfaceAddressesReceipt,
|
||||
) -> Result<Value, ConfigurationError> {
|
||||
let addr = receipts
|
||||
.interface_addresses
|
||||
.get(db, (&self.package_id, &self.interface))
|
||||
.await
|
||||
.map_err(|e| ConfigurationError::SystemError(Error::from(e)))?;
|
||||
.ok()
|
||||
.flatten()
|
||||
.and_then(|x| x.lan_address);
|
||||
Ok(addr.to_owned().map(Value::String).unwrap_or(Value::Null))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ConfigPointerReceipts {
|
||||
interface_addresses_receipt: InterfaceAddressesReceipt,
|
||||
manifest_volumes: LockReceipt<crate::volume::Volumes, String>,
|
||||
manifest_version: LockReceipt<crate::util::Version, String>,
|
||||
config_actions: LockReceipt<super::action::ConfigActions, String>,
|
||||
}
|
||||
|
||||
impl ConfigPointerReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<patch_db::LockTargetId>,
|
||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
||||
let interface_addresses_receipt = InterfaceAddressesReceipt::setup(locks);
|
||||
|
||||
let manifest_volumes = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest().volumes())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
let manifest_version = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest().version())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
let config_actions = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.and_then(|x| x.manifest().config())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
interface_addresses_receipt: interface_addresses_receipt(skeleton_key)?,
|
||||
manifest_volumes: manifest_volumes.verify(skeleton_key)?,
|
||||
config_actions: config_actions.verify(skeleton_key)?,
|
||||
manifest_version: manifest_version.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct ConfigPointer {
|
||||
@@ -1710,40 +1942,22 @@ impl ConfigPointer {
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
config_overrides: &BTreeMap<PackageId, Config>,
|
||||
receipts: &ConfigPointerReceipts,
|
||||
) -> Result<Value, ConfigurationError> {
|
||||
if let Some(cfg) = config_overrides.get(&self.package_id) {
|
||||
Ok(self.select(&Value::Object(cfg.clone())))
|
||||
} else {
|
||||
let manifest_model: OptionModel<Manifest> = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&self.package_id)
|
||||
.and_then(|pde| pde.installed())
|
||||
.map(|installed| installed.manifest())
|
||||
.into();
|
||||
let version = manifest_model
|
||||
.clone()
|
||||
.map(|manifest| manifest.version())
|
||||
.get(db, true)
|
||||
.await
|
||||
.map_err(|e| ConfigurationError::SystemError(Error::from(e)))?;
|
||||
let cfg_actions = manifest_model
|
||||
.clone()
|
||||
.and_then(|manifest| manifest.config())
|
||||
.get(db, true)
|
||||
.await
|
||||
.map_err(|e| ConfigurationError::SystemError(Error::from(e)))?;
|
||||
let volumes = manifest_model
|
||||
.map(|manifest| manifest.volumes())
|
||||
.get(db, true)
|
||||
.await
|
||||
.map_err(|e| ConfigurationError::SystemError(Error::from(e)))?;
|
||||
let id = &self.package_id;
|
||||
let version = receipts.manifest_version.get(db, id).await.ok().flatten();
|
||||
let cfg_actions = receipts.config_actions.get(db, id).await.ok().flatten();
|
||||
let volumes = receipts.manifest_volumes.get(db, id).await.ok().flatten();
|
||||
if let (Some(version), Some(cfg_actions), Some(volumes)) =
|
||||
(&*version, &*cfg_actions, &*volumes)
|
||||
(&version, &cfg_actions, &volumes)
|
||||
{
|
||||
let cfg_res = cfg_actions
|
||||
.get(&ctx, &self.package_id, version, volumes)
|
||||
.get(ctx, &self.package_id, version, volumes)
|
||||
.await
|
||||
.map_err(|e| ConfigurationError::SystemError(Error::from(e)))?;
|
||||
.map_err(|e| ConfigurationError::SystemError(e))?;
|
||||
if let Some(cfg) = cfg_res.config {
|
||||
Ok(self.select(&Value::Object(cfg)))
|
||||
} else {
|
||||
@@ -1757,13 +1971,12 @@ impl ConfigPointer {
|
||||
}
|
||||
impl fmt::Display for ConfigPointer {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
ConfigPointer {
|
||||
package_id,
|
||||
selector,
|
||||
..
|
||||
} => write!(f, "{}: config: {}", package_id, selector),
|
||||
}
|
||||
let ConfigPointer {
|
||||
package_id,
|
||||
selector,
|
||||
..
|
||||
} = self;
|
||||
write!(f, "{}: config: {}", package_id, selector)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1909,6 +2122,8 @@ impl ValueSpec for SystemPointerSpec {
|
||||
_manifest: &Manifest,
|
||||
_config_overrides: &BTreeMap<PackageId, Config>,
|
||||
value: &mut Value,
|
||||
|
||||
_receipts: &ConfigPointerReceipts,
|
||||
) -> Result<(), ConfigurationError> {
|
||||
*value = self.deref(db).await?;
|
||||
Ok(())
|
||||
@@ -1926,3 +2141,42 @@ impl ValueSpec for SystemPointerSpec {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn invalid_regex_produces_error() {
|
||||
assert!(
|
||||
serde_yaml::from_reader::<_, ConfigSpec>(std::io::Cursor::new(include_bytes!(
|
||||
"../../test/config-spec/lnd-invalid-regex.yaml"
|
||||
)))
|
||||
.is_err()
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_pattern_description_produces_error() {
|
||||
assert!(
|
||||
serde_yaml::from_reader::<_, ConfigSpec>(std::io::Cursor::new(include_bytes!(
|
||||
"../../test/config-spec/lnd-missing-pattern-description.yaml"
|
||||
)))
|
||||
.is_err()
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn missing_pattern_produces_error() {
|
||||
assert!(
|
||||
serde_yaml::from_reader::<_, ConfigSpec>(std::io::Cursor::new(include_bytes!(
|
||||
"../../test/config-spec/lnd-missing-pattern.yaml"
|
||||
)))
|
||||
.is_err()
|
||||
)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn regex_control() {
|
||||
let spec = serde_yaml::from_reader::<_, ConfigSpec>(std::io::Cursor::new(include_bytes!(
|
||||
"../../test/config-spec/lnd-correct.yaml"
|
||||
)))
|
||||
.unwrap();
|
||||
println!("{}", serde_json::to_string_pretty(&spec).unwrap());
|
||||
}
|
||||
|
||||
@@ -15,6 +15,7 @@ use rpc_toolkit::Context;
|
||||
use serde::Deserialize;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::util::config::{load_config_from_paths, local_config_path};
|
||||
use crate::ResultExt;
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
@@ -23,6 +24,7 @@ pub struct CliContextConfig {
|
||||
pub bind_rpc: Option<SocketAddr>,
|
||||
pub host: Option<Url>,
|
||||
#[serde(deserialize_with = "crate::util::serde::deserialize_from_str_opt")]
|
||||
#[serde(default)]
|
||||
pub proxy: Option<Url>,
|
||||
pub cookie_path: Option<PathBuf>,
|
||||
}
|
||||
@@ -38,6 +40,10 @@ pub struct CliContextSeed {
|
||||
impl Drop for CliContextSeed {
|
||||
fn drop(&mut self) {
|
||||
let tmp = format!("{}.tmp", self.cookie_path.display());
|
||||
let parent_dir = self.cookie_path.parent().unwrap_or(Path::new("/"));
|
||||
if !parent_dir.exists() {
|
||||
std::fs::create_dir_all(&parent_dir).unwrap();
|
||||
}
|
||||
let mut writer = fd_lock_rs::FdLock::lock(
|
||||
File::create(&tmp).unwrap(),
|
||||
fd_lock_rs::LockType::Exclusive,
|
||||
@@ -60,16 +66,16 @@ impl CliContext {
|
||||
/// BLOCKING
|
||||
#[instrument(skip(matches))]
|
||||
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
|
||||
let cfg_path = Path::new(matches.value_of("config").unwrap_or(crate::CONFIG_PATH));
|
||||
let base = if cfg_path.exists() {
|
||||
serde_yaml::from_reader(
|
||||
File::open(cfg_path)
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?,
|
||||
)
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
} else {
|
||||
CliContextConfig::default()
|
||||
};
|
||||
let local_config_path = local_config_path();
|
||||
let base: CliContextConfig = load_config_from_paths(
|
||||
matches
|
||||
.values_of("config")
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.map(|p| Path::new(p))
|
||||
.chain(local_config_path.as_deref().into_iter())
|
||||
.chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
|
||||
)?;
|
||||
let mut url = if let Some(host) = matches.value_of("host") {
|
||||
host.parse()?
|
||||
} else if let Some(host) = base.host {
|
||||
@@ -88,7 +94,9 @@ impl CliContext {
|
||||
};
|
||||
|
||||
let cookie_path = base.cookie_path.unwrap_or_else(|| {
|
||||
cfg_path
|
||||
local_config_path
|
||||
.as_deref()
|
||||
.unwrap_or_else(|| Path::new(crate::util::config::CONFIG_PATH))
|
||||
.parent()
|
||||
.unwrap_or(Path::new("/"))
|
||||
.join(".cookies.json")
|
||||
@@ -149,3 +157,13 @@ impl Context for CliContext {
|
||||
&self.0.client
|
||||
}
|
||||
}
|
||||
/// When we had an empty proxy the system wasn't working like it used to, which allowed empty proxy
|
||||
#[test]
|
||||
fn test_cli_proxy_empty() {
|
||||
serde_yaml::from_str::<CliContextConfig>(
|
||||
"
|
||||
bind_rpc:
|
||||
",
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
@@ -28,7 +28,7 @@ impl DiagnosticContextConfig {
|
||||
let cfg_path = path
|
||||
.as_ref()
|
||||
.map(|p| p.as_ref())
|
||||
.unwrap_or(Path::new(crate::CONFIG_PATH));
|
||||
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
|
||||
if let Some(f) = File::maybe_open(cfg_path)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
||||
|
||||
@@ -7,9 +7,8 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use bollard::Docker;
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::json_ptr::JsonPointer;
|
||||
use patch_db::{DbHandle, LockType, PatchDb, Revision};
|
||||
use patch_db::{DbHandle, LockReceipt, LockType, PatchDb, Revision};
|
||||
use reqwest::Url;
|
||||
use rpc_toolkit::url::Host;
|
||||
use rpc_toolkit::Context;
|
||||
@@ -24,7 +23,7 @@ use tracing::instrument;
|
||||
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
|
||||
use crate::db::model::{Database, InstalledPackageDataEntry, PackageDataEntry};
|
||||
use crate::hostname::{derive_hostname, derive_id, get_product_key};
|
||||
use crate::install::cleanup::{cleanup_failed, uninstall};
|
||||
use crate::install::cleanup::{cleanup_failed, uninstall, CleanupFailedReceipts};
|
||||
use crate::manager::ManagerMap;
|
||||
use crate::middleware::auth::HashSessionToken;
|
||||
use crate::net::tor::os_key;
|
||||
@@ -46,6 +45,7 @@ pub struct RpcContextConfig {
|
||||
pub bind_static: Option<SocketAddr>,
|
||||
pub tor_control: Option<SocketAddr>,
|
||||
pub tor_socks: Option<SocketAddr>,
|
||||
pub dns_bind: Option<Vec<SocketAddr>>,
|
||||
pub revision_cache_size: Option<usize>,
|
||||
pub datadir: Option<PathBuf>,
|
||||
pub log_server: Option<Url>,
|
||||
@@ -55,7 +55,7 @@ impl RpcContextConfig {
|
||||
let cfg_path = path
|
||||
.as_ref()
|
||||
.map(|p| p.as_ref())
|
||||
.unwrap_or(Path::new(crate::CONFIG_PATH));
|
||||
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
|
||||
if let Some(f) = File::maybe_open(cfg_path)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
||||
@@ -132,6 +132,71 @@ pub struct RpcContextSeed {
|
||||
pub wifi_manager: Arc<RwLock<WpaCli>>,
|
||||
}
|
||||
|
||||
pub struct RpcCleanReceipts {
|
||||
cleanup_receipts: CleanupFailedReceipts,
|
||||
packages: LockReceipt<crate::db::model::AllPackageData, ()>,
|
||||
package: LockReceipt<crate::db::model::PackageDataEntry, String>,
|
||||
}
|
||||
|
||||
impl RpcCleanReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<patch_db::LockTargetId>,
|
||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
||||
let cleanup_receipts = CleanupFailedReceipts::setup(locks);
|
||||
|
||||
let packages = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let package = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
cleanup_receipts: cleanup_receipts(skeleton_key)?,
|
||||
packages: packages.verify(skeleton_key)?,
|
||||
package: package.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RpcSetNginxReceipts {
|
||||
server_info: LockReceipt<crate::db::model::ServerInfo, ()>,
|
||||
}
|
||||
|
||||
impl RpcSetNginxReceipts {
|
||||
pub async fn new(db: &'_ mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<patch_db::LockTargetId>,
|
||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
||||
let server_info = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
server_info: server_info.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RpcContext(Arc<RpcContextSeed>);
|
||||
impl RpcContext {
|
||||
@@ -158,6 +223,10 @@ impl RpcContext {
|
||||
crate::net::tor::os_key(&mut secret_store.acquire().await?).await?,
|
||||
base.tor_control
|
||||
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
|
||||
base.dns_bind
|
||||
.as_ref()
|
||||
.map(|v| v.as_slice())
|
||||
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
|
||||
secret_store.clone(),
|
||||
None,
|
||||
)
|
||||
@@ -203,13 +272,15 @@ impl RpcContext {
|
||||
tracing::info!("Initialized Package Managers");
|
||||
Ok(res)
|
||||
}
|
||||
#[instrument(skip(self, db))]
|
||||
pub async fn set_nginx_conf<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
|
||||
|
||||
#[instrument(skip(self, db, receipts))]
|
||||
pub async fn set_nginx_conf<Db: DbHandle>(
|
||||
&self,
|
||||
db: &mut Db,
|
||||
receipts: RpcSetNginxReceipts,
|
||||
) -> Result<(), Error> {
|
||||
tokio::fs::write("/etc/nginx/sites-available/default", {
|
||||
let info = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let info = receipts.server_info.get(db).await?;
|
||||
format!(
|
||||
include_str!("../nginx/main-ui.conf.template"),
|
||||
lan_hostname = info.lan_address.host_str().unwrap(),
|
||||
@@ -237,34 +308,19 @@ impl RpcContext {
|
||||
self.is_closed.store(true, Ordering::SeqCst);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn cleanup(&self) -> Result<(), Error> {
|
||||
let mut db = self.db.handle();
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(&mut db, LockType::Write)
|
||||
.await?;
|
||||
for package_id in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.keys(&mut db, true)
|
||||
.await?
|
||||
{
|
||||
let receipts = RpcCleanReceipts::new(&mut db).await?;
|
||||
for (package_id, package) in receipts.packages.get(&mut db).await?.0 {
|
||||
if let Err(e) = async {
|
||||
let mut pde = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&package_id)
|
||||
.get_mut(&mut db)
|
||||
.await?;
|
||||
match pde.as_mut().ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("Node does not exist: /package-data/{}", package_id),
|
||||
crate::ErrorKind::Database,
|
||||
)
|
||||
})? {
|
||||
match package {
|
||||
PackageDataEntry::Installing { .. }
|
||||
| PackageDataEntry::Restoring { .. }
|
||||
| PackageDataEntry::Updating { .. } => {
|
||||
cleanup_failed(self, &mut db, &package_id).await?;
|
||||
cleanup_failed(self, &mut db, &package_id, &receipts.cleanup_receipts)
|
||||
.await?;
|
||||
}
|
||||
PackageDataEntry::Removing { .. } => {
|
||||
uninstall(
|
||||
@@ -276,30 +332,36 @@ impl RpcContext {
|
||||
.await?;
|
||||
}
|
||||
PackageDataEntry::Installed {
|
||||
installed:
|
||||
InstalledPackageDataEntry {
|
||||
status: Status { main, .. },
|
||||
..
|
||||
},
|
||||
..
|
||||
installed,
|
||||
static_files,
|
||||
manifest,
|
||||
} => {
|
||||
let new_main = match std::mem::replace(
|
||||
main,
|
||||
MainStatus::Stopped, /* placeholder */
|
||||
) {
|
||||
let status = installed.status;
|
||||
let main = match status.main {
|
||||
MainStatus::BackingUp { started, .. } => {
|
||||
if let Some(_) = started {
|
||||
MainStatus::Starting
|
||||
MainStatus::Starting { restarting: false }
|
||||
} else {
|
||||
MainStatus::Stopped
|
||||
}
|
||||
}
|
||||
MainStatus::Running { .. } => MainStatus::Starting,
|
||||
a => a,
|
||||
MainStatus::Running { .. } => {
|
||||
MainStatus::Starting { restarting: false }
|
||||
}
|
||||
a => a.clone(),
|
||||
};
|
||||
*main = new_main;
|
||||
|
||||
pde.save(&mut db).await?;
|
||||
let new_package = PackageDataEntry::Installed {
|
||||
installed: InstalledPackageDataEntry {
|
||||
status: Status { main, ..status },
|
||||
..installed
|
||||
},
|
||||
static_files,
|
||||
manifest,
|
||||
};
|
||||
receipts
|
||||
.package
|
||||
.set(&mut db, new_package, &package_id)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok::<_, Error>(())
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
@@ -9,6 +7,7 @@ use rpc_toolkit::Context;
|
||||
use serde::Deserialize;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::util::config::{load_config_from_paths, local_config_path};
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
#[derive(Debug, Default, Deserialize)]
|
||||
@@ -28,22 +27,24 @@ impl SdkContext {
|
||||
/// BLOCKING
|
||||
#[instrument(skip(matches))]
|
||||
pub fn init(matches: &ArgMatches) -> Result<Self, crate::Error> {
|
||||
let cfg_path = Path::new(matches.value_of("config").unwrap_or(crate::CONFIG_PATH));
|
||||
let base = if cfg_path.exists() {
|
||||
serde_yaml::from_reader(
|
||||
File::open(cfg_path)
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?,
|
||||
)
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
} else {
|
||||
SdkContextConfig::default()
|
||||
};
|
||||
let local_config_path = local_config_path();
|
||||
let base: SdkContextConfig = load_config_from_paths(
|
||||
matches
|
||||
.values_of("config")
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.map(|p| Path::new(p))
|
||||
.chain(local_config_path.as_deref().into_iter())
|
||||
.chain(std::iter::once(Path::new(crate::util::config::CONFIG_PATH))),
|
||||
)?;
|
||||
Ok(SdkContext(Arc::new(SdkContextSeed {
|
||||
developer_key_path: base.developer_key_path.unwrap_or_else(|| {
|
||||
cfg_path
|
||||
local_config_path
|
||||
.as_deref()
|
||||
.unwrap_or_else(|| Path::new(crate::util::config::CONFIG_PATH))
|
||||
.parent()
|
||||
.unwrap_or(Path::new("/"))
|
||||
.join(".developer_key")
|
||||
.join("developer.key.pem")
|
||||
}),
|
||||
})))
|
||||
}
|
||||
@@ -53,9 +54,17 @@ impl SdkContext {
|
||||
if !self.developer_key_path.exists() {
|
||||
return Err(Error::new(eyre!("Developer Key does not exist! Please run `embassy-sdk init` before running this command."), crate::ErrorKind::Uninitialized));
|
||||
}
|
||||
let mut keypair_buf = [0; ed25519_dalek::KEYPAIR_LENGTH];
|
||||
File::open(&self.developer_key_path)?.read_exact(&mut keypair_buf)?;
|
||||
Ok(ed25519_dalek::Keypair::from_bytes(&keypair_buf)?)
|
||||
let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
|
||||
&std::fs::read_to_string(&self.developer_key_path)?,
|
||||
)
|
||||
.with_kind(crate::ErrorKind::Pem)?;
|
||||
let secret = ed25519_dalek::SecretKey::from_bytes(&pair.secret_key[..])?;
|
||||
let public = if let Some(public) = pair.public_key {
|
||||
ed25519_dalek::PublicKey::from_bytes(&public[..])?
|
||||
} else {
|
||||
(&secret).into()
|
||||
};
|
||||
Ok(ed25519_dalek::Keypair { secret, public })
|
||||
}
|
||||
}
|
||||
impl std::ops::Deref for SdkContext {
|
||||
|
||||
@@ -45,7 +45,7 @@ impl SetupContextConfig {
|
||||
let cfg_path = path
|
||||
.as_ref()
|
||||
.map(|p| p.as_ref())
|
||||
.unwrap_or(Path::new(crate::CONFIG_PATH));
|
||||
.unwrap_or(Path::new(crate::util::config::CONFIG_PATH));
|
||||
if let Some(f) = File::maybe_open(cfg_path)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, cfg_path.display().to_string()))?
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use patch_db::{DbHandle, LockReceipt, LockType};
|
||||
use rpc_toolkit::command;
|
||||
use tracing::instrument;
|
||||
|
||||
@@ -9,13 +9,57 @@ use crate::context::RpcContext;
|
||||
use crate::db::util::WithRevision;
|
||||
use crate::dependencies::{
|
||||
break_all_dependents_transitive, heal_all_dependents_transitive, BreakageRes, DependencyError,
|
||||
TaggedDependencyError,
|
||||
DependencyReceipt, TaggedDependencyError,
|
||||
};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::status::MainStatus;
|
||||
use crate::util::display_none;
|
||||
use crate::util::serde::display_serializable;
|
||||
use crate::{Error, ResultExt};
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct StartReceipts {
|
||||
dependency_receipt: DependencyReceipt,
|
||||
status: LockReceipt<MainStatus, ()>,
|
||||
version: LockReceipt<crate::util::Version, ()>,
|
||||
}
|
||||
|
||||
impl StartReceipts {
|
||||
pub async fn new(db: &mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks, id);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<patch_db::LockTargetId>,
|
||||
id: &PackageId,
|
||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
||||
let dependency_receipt = DependencyReceipt::setup(locks);
|
||||
let status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.status().main())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let version = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.manifest().version())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
dependency_receipt: dependency_receipt(skeleton_key)?,
|
||||
status: status.verify(skeleton_key)?,
|
||||
version: version.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[command(display(display_none))]
|
||||
#[instrument(skip(ctx))]
|
||||
@@ -25,37 +69,16 @@ pub async fn start(
|
||||
) -> Result<WithRevision<()>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(&mut tx, LockType::Write)
|
||||
let receipts = StartReceipts::new(&mut tx, &id).await?;
|
||||
let version = receipts.version.get(&mut tx).await?;
|
||||
receipts
|
||||
.status
|
||||
.set(&mut tx, MainStatus::Starting { restarting: false })
|
||||
.await?;
|
||||
let installed = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.and_then(|pkg| pkg.installed())
|
||||
.expect(&mut tx)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::NotFound,
|
||||
format!("{} is not installed", id),
|
||||
)
|
||||
})?;
|
||||
installed.lock(&mut tx, LockType::Read).await?;
|
||||
let version = installed
|
||||
.clone()
|
||||
.manifest()
|
||||
.version()
|
||||
.get(&mut tx, true)
|
||||
.await?
|
||||
.to_owned();
|
||||
let mut status = installed.status().main().get_mut(&mut tx).await?;
|
||||
|
||||
*status = MainStatus::Starting;
|
||||
status.save(&mut tx).await?;
|
||||
heal_all_dependents_transitive(&ctx, &mut tx, &id).await?;
|
||||
heal_all_dependents_transitive(&ctx, &mut tx, &id, &receipts.dependency_receipt).await?;
|
||||
|
||||
let revision = tx.commit(None).await?;
|
||||
drop(receipts);
|
||||
|
||||
ctx.managers
|
||||
.get(&(id, version))
|
||||
@@ -69,6 +92,40 @@ pub async fn start(
|
||||
response: (),
|
||||
})
|
||||
}
|
||||
#[derive(Clone)]
|
||||
pub struct StopReceipts {
|
||||
breaks: crate::dependencies::BreakTransitiveReceipts,
|
||||
status: LockReceipt<MainStatus, ()>,
|
||||
}
|
||||
|
||||
impl StopReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks, id);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<patch_db::LockTargetId>,
|
||||
id: &PackageId,
|
||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
||||
let breaks = crate::dependencies::BreakTransitiveReceipts::setup(locks);
|
||||
let status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.status().main())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
breaks: breaks(skeleton_key)?,
|
||||
status: status.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(db))]
|
||||
async fn stop_common<Db: DbHandle>(
|
||||
@@ -77,27 +134,18 @@ async fn stop_common<Db: DbHandle>(
|
||||
breakages: &mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
) -> Result<(), Error> {
|
||||
let mut tx = db.begin().await?;
|
||||
let mut status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.and_then(|pkg| pkg.installed())
|
||||
.expect(&mut tx)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::NotFound,
|
||||
format!("{} is not installed", id),
|
||||
)
|
||||
})?
|
||||
.status()
|
||||
.main()
|
||||
.get_mut(&mut tx)
|
||||
.await?;
|
||||
let receipts = StopReceipts::new(&mut tx, id).await?;
|
||||
receipts.status.set(&mut tx, MainStatus::Stopping).await?;
|
||||
|
||||
*status = MainStatus::Stopping;
|
||||
status.save(&mut tx).await?;
|
||||
tx.save().await?;
|
||||
break_all_dependents_transitive(db, &id, DependencyError::NotRunning, breakages).await?;
|
||||
break_all_dependents_transitive(
|
||||
db,
|
||||
id,
|
||||
DependencyError::NotRunning,
|
||||
breakages,
|
||||
&receipts.breaks,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -136,3 +184,33 @@ pub async fn stop_impl(ctx: RpcContext, id: PackageId) -> Result<WithRevision<()
|
||||
response: (),
|
||||
})
|
||||
}
|
||||
|
||||
#[command(display(display_none))]
|
||||
pub async fn restart(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg] id: PackageId,
|
||||
) -> Result<WithRevision<()>, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
|
||||
let mut status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.and_then(|pde| pde.installed())
|
||||
.map(|i| i.status().main())
|
||||
.get_mut(&mut tx)
|
||||
.await?;
|
||||
if !matches!(&*status, Some(MainStatus::Running { .. })) {
|
||||
return Err(Error::new(
|
||||
eyre!("{} is not running", id),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
*status = Some(MainStatus::Restarting);
|
||||
status.save(&mut tx).await?;
|
||||
|
||||
Ok(WithRevision {
|
||||
revision: tx.commit(None).await?,
|
||||
response: (),
|
||||
})
|
||||
}
|
||||
|
||||
@@ -51,7 +51,7 @@ impl Database {
|
||||
.parse()
|
||||
.unwrap(),
|
||||
status_info: ServerStatus {
|
||||
backing_up: false,
|
||||
backup_progress: None,
|
||||
updated: false,
|
||||
update_progress: None,
|
||||
},
|
||||
@@ -99,10 +99,16 @@ pub struct ServerInfo {
|
||||
pub password_hash: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
|
||||
pub struct BackupProgress {
|
||||
pub complete: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct ServerStatus {
|
||||
pub backing_up: bool,
|
||||
#[model]
|
||||
pub backup_progress: Option<BTreeMap<PackageId, BackupProgress>>,
|
||||
pub updated: bool,
|
||||
#[model]
|
||||
pub update_progress: Option<UpdateProgress>,
|
||||
@@ -260,17 +266,66 @@ pub struct InstalledPackageDataEntry {
|
||||
#[model]
|
||||
pub manifest: Manifest,
|
||||
pub last_backup: Option<DateTime<Utc>>,
|
||||
#[model]
|
||||
pub system_pointers: Vec<SystemPointerSpec>,
|
||||
#[model]
|
||||
pub dependency_info: BTreeMap<PackageId, StaticDependencyInfo>,
|
||||
#[model]
|
||||
pub current_dependents: BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
pub current_dependents: CurrentDependents,
|
||||
#[model]
|
||||
pub current_dependencies: BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
pub current_dependencies: CurrentDependencies,
|
||||
#[model]
|
||||
pub interface_addresses: InterfaceAddressMap,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct CurrentDependents(pub BTreeMap<PackageId, CurrentDependencyInfo>);
|
||||
impl CurrentDependents {
|
||||
pub fn map(
|
||||
mut self,
|
||||
transform: impl Fn(
|
||||
BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
) -> BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
) -> Self {
|
||||
self.0 = transform(self.0);
|
||||
self
|
||||
}
|
||||
}
|
||||
impl Map for CurrentDependents {
|
||||
type Key = PackageId;
|
||||
type Value = CurrentDependencyInfo;
|
||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
||||
self.0.get(key)
|
||||
}
|
||||
}
|
||||
impl HasModel for CurrentDependents {
|
||||
type Model = MapModel<Self>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
|
||||
pub struct CurrentDependencies(pub BTreeMap<PackageId, CurrentDependencyInfo>);
|
||||
impl CurrentDependencies {
|
||||
pub fn map(
|
||||
mut self,
|
||||
transform: impl Fn(
|
||||
BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
) -> BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
) -> Self {
|
||||
self.0 = transform(self.0);
|
||||
self
|
||||
}
|
||||
}
|
||||
impl Map for CurrentDependencies {
|
||||
type Key = PackageId;
|
||||
type Value = CurrentDependencyInfo;
|
||||
fn get(&self, key: &Self::Key) -> Option<&Self::Value> {
|
||||
self.0.get(key)
|
||||
}
|
||||
}
|
||||
impl HasModel for CurrentDependencies {
|
||||
type Model = MapModel<Self>;
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct StaticDependencyInfo {
|
||||
|
||||
@@ -1,25 +1,75 @@
|
||||
use patch_db::DbHandle;
|
||||
use patch_db::{DbHandle, LockReceipt, LockTargetId, LockType, Verifier};
|
||||
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::Error;
|
||||
|
||||
pub async fn get_packages<Db: DbHandle>(db: &mut Db) -> Result<Vec<PackageId>, Error> {
|
||||
let packages = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.get(db, false)
|
||||
.await?;
|
||||
pub struct PackageReceipts {
|
||||
package_data: LockReceipt<super::model::AllPackageData, ()>,
|
||||
}
|
||||
|
||||
impl PackageReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let package_data = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
package_data: package_data.verify(&skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_packages<Db: DbHandle>(
|
||||
db: &mut Db,
|
||||
receipts: &PackageReceipts,
|
||||
) -> Result<Vec<PackageId>, Error> {
|
||||
let packages = receipts.package_data.get(db).await?;
|
||||
Ok(packages.0.keys().cloned().collect())
|
||||
}
|
||||
|
||||
pub struct ManifestReceipts {
|
||||
manifest: LockReceipt<Manifest, String>,
|
||||
}
|
||||
|
||||
impl ManifestReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks, id);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<LockTargetId>,
|
||||
_id: &PackageId,
|
||||
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.manifest()
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
manifest: manifest.verify(&skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_manifest<Db: DbHandle>(
|
||||
db: &mut Db,
|
||||
pkg: &PackageId,
|
||||
receipts: &ManifestReceipts,
|
||||
) -> Result<Option<Manifest>, Error> {
|
||||
let mpde = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(pkg)
|
||||
.get(db, false)
|
||||
.await?
|
||||
.into_owned();
|
||||
Ok(mpde.map(|pde| pde.manifest()))
|
||||
Ok(receipts.manifest.get(db, pkg).await?)
|
||||
}
|
||||
|
||||
@@ -6,19 +6,20 @@ use color_eyre::eyre::eyre;
|
||||
use emver::VersionRange;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::FutureExt;
|
||||
use patch_db::{DbHandle, HasModel, LockType, Map, MapModel, PatchDbHandle};
|
||||
use patch_db::{
|
||||
DbHandle, HasModel, LockReceipt, LockTargetId, LockType, Map, MapModel, PatchDbHandle, Verifier,
|
||||
};
|
||||
use rand::SeedableRng;
|
||||
use rpc_toolkit::command;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::action::{ActionImplementation, NoOutput};
|
||||
use crate::config::action::ConfigRes;
|
||||
use crate::config::action::{ConfigActions, ConfigRes};
|
||||
use crate::config::spec::PackagePointerSpec;
|
||||
use crate::config::{Config, ConfigSpec};
|
||||
use crate::config::{not_found, Config, ConfigReceipts, ConfigSpec};
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::{CurrentDependencyInfo, InstalledPackageDataEntry};
|
||||
use crate::error::ResultExt;
|
||||
use crate::db::model::{CurrentDependencies, CurrentDependents, InstalledPackageDataEntry};
|
||||
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::status::health_check::{HealthCheckId, HealthCheckResult};
|
||||
use crate::status::{MainStatus, Status};
|
||||
@@ -55,6 +56,72 @@ pub enum DependencyError {
|
||||
Transitive, // { "type": "transitive" }
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct TryHealReceipts {
|
||||
status: LockReceipt<Status, String>,
|
||||
manifest: LockReceipt<Manifest, String>,
|
||||
manifest_version: LockReceipt<Version, String>,
|
||||
current_dependencies: LockReceipt<CurrentDependencies, String>,
|
||||
dependency_errors: LockReceipt<DependencyErrors, String>,
|
||||
}
|
||||
|
||||
impl TryHealReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let manifest_version = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest().version())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.status())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let current_dependencies = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.current_dependencies())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let dependency_errors = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.status().dependency_errors())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
status: status.verify(skeleton_key)?,
|
||||
manifest_version: manifest_version.verify(skeleton_key)?,
|
||||
current_dependencies: current_dependencies.verify(skeleton_key)?,
|
||||
manifest: manifest.verify(skeleton_key)?,
|
||||
dependency_errors: dependency_errors.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl DependencyError {
|
||||
pub fn cmp_priority(&self, other: &DependencyError) -> std::cmp::Ordering {
|
||||
use std::cmp::Ordering::*;
|
||||
@@ -114,7 +181,7 @@ impl DependencyError {
|
||||
(DependencyError::Transitive, _) => DependencyError::Transitive,
|
||||
}
|
||||
}
|
||||
#[instrument(skip(ctx, db))]
|
||||
#[instrument(skip(ctx, db, receipts))]
|
||||
pub fn try_heal<'a, Db: DbHandle>(
|
||||
self,
|
||||
ctx: &'a RpcContext,
|
||||
@@ -123,42 +190,33 @@ impl DependencyError {
|
||||
dependency: &'a PackageId,
|
||||
mut dependency_config: Option<Config>,
|
||||
info: &'a DepInfo,
|
||||
receipts: &'a TryHealReceipts,
|
||||
) -> BoxFuture<'a, Result<Option<Self>, Error>> {
|
||||
async move {
|
||||
Ok(match self {
|
||||
DependencyError::NotInstalled => {
|
||||
if crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.exists(db, true)
|
||||
.await?
|
||||
{
|
||||
if receipts.status.get(db, dependency).await?.is_some() {
|
||||
DependencyError::IncorrectVersion {
|
||||
expected: info.version.clone(),
|
||||
received: Default::default(),
|
||||
}
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info, receipts)
|
||||
.await?
|
||||
} else {
|
||||
Some(DependencyError::NotInstalled)
|
||||
}
|
||||
}
|
||||
DependencyError::IncorrectVersion { expected, .. } => {
|
||||
let version: Version = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map(|m| m.manifest().version())
|
||||
.get(db, true)
|
||||
let version: Version = receipts
|
||||
.manifest_version
|
||||
.get(db, dependency)
|
||||
.await?
|
||||
.into_owned()
|
||||
.unwrap_or_default();
|
||||
if version.satisfies(&expected) {
|
||||
DependencyError::ConfigUnsatisfied {
|
||||
error: String::new(),
|
||||
}
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info, receipts)
|
||||
.await?
|
||||
} else {
|
||||
Some(DependencyError::IncorrectVersion {
|
||||
@@ -168,24 +226,14 @@ impl DependencyError {
|
||||
}
|
||||
}
|
||||
DependencyError::ConfigUnsatisfied { .. } => {
|
||||
let dependent_manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Manifest>(|m| m.manifest())
|
||||
.expect(db)
|
||||
let dependent_manifest =
|
||||
receipts.manifest.get(db, id).await?.ok_or_else(not_found)?;
|
||||
let dependency_manifest = receipts
|
||||
.manifest
|
||||
.get(db, dependency)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let dependency_manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Manifest>(|m| m.manifest())
|
||||
.expect(db)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?;
|
||||
.ok_or_else(not_found)?;
|
||||
|
||||
let dependency_config = if let Some(cfg) = dependency_config.take() {
|
||||
cfg
|
||||
} else if let Some(cfg_info) = &dependency_manifest.config {
|
||||
@@ -209,6 +257,7 @@ impl DependencyError {
|
||||
id,
|
||||
&dependent_manifest.version,
|
||||
&dependent_manifest.volumes,
|
||||
dependency,
|
||||
&dependency_config,
|
||||
)
|
||||
.await?
|
||||
@@ -217,40 +266,39 @@ impl DependencyError {
|
||||
}
|
||||
}
|
||||
DependencyError::NotRunning
|
||||
.try_heal(ctx, db, id, dependency, Some(dependency_config), info)
|
||||
.try_heal(
|
||||
ctx,
|
||||
db,
|
||||
id,
|
||||
dependency,
|
||||
Some(dependency_config),
|
||||
info,
|
||||
receipts,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
DependencyError::NotRunning => {
|
||||
let status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Status>(|m| m.status())
|
||||
.expect(db)
|
||||
let status = receipts
|
||||
.status
|
||||
.get(db, dependency)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?;
|
||||
.ok_or_else(not_found)?;
|
||||
if status.main.running() {
|
||||
DependencyError::HealthChecksFailed {
|
||||
failures: BTreeMap::new(),
|
||||
}
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info, receipts)
|
||||
.await?
|
||||
} else {
|
||||
Some(DependencyError::NotRunning)
|
||||
}
|
||||
}
|
||||
DependencyError::HealthChecksFailed { .. } => {
|
||||
let status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Status>(|m| m.status())
|
||||
.expect(db)
|
||||
let status = receipts
|
||||
.status
|
||||
.get(db, dependency)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?
|
||||
.into_owned();
|
||||
.ok_or_else(not_found)?;
|
||||
match status.main {
|
||||
MainStatus::BackingUp {
|
||||
started: Some(_),
|
||||
@@ -260,19 +308,14 @@ impl DependencyError {
|
||||
let mut failures = BTreeMap::new();
|
||||
for (check, res) in health {
|
||||
if !matches!(res, HealthCheckResult::Success)
|
||||
&& crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.and_then::<_, CurrentDependencyInfo>(|m| {
|
||||
m.current_dependencies().idx_model(dependency)
|
||||
})
|
||||
.get(db, true)
|
||||
&& receipts
|
||||
.current_dependencies
|
||||
.get(db, id)
|
||||
.await?
|
||||
.into_owned()
|
||||
.map(|i| i.health_checks)
|
||||
.unwrap_or_default()
|
||||
.contains(&check)
|
||||
.ok_or_else(not_found)?
|
||||
.get(dependency)
|
||||
.map(|x| x.health_checks.contains(&check))
|
||||
.unwrap_or(false)
|
||||
{
|
||||
failures.insert(check.clone(), res.clone());
|
||||
}
|
||||
@@ -281,27 +324,39 @@ impl DependencyError {
|
||||
Some(DependencyError::HealthChecksFailed { failures })
|
||||
} else {
|
||||
DependencyError::Transitive
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.try_heal(
|
||||
ctx,
|
||||
db,
|
||||
id,
|
||||
dependency,
|
||||
dependency_config,
|
||||
info,
|
||||
receipts,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
}
|
||||
MainStatus::Starting => {
|
||||
MainStatus::Starting { .. } | MainStatus::Restarting => {
|
||||
DependencyError::Transitive
|
||||
.try_heal(ctx, db, id, dependency, dependency_config, info)
|
||||
.try_heal(
|
||||
ctx,
|
||||
db,
|
||||
id,
|
||||
dependency,
|
||||
dependency_config,
|
||||
info,
|
||||
receipts,
|
||||
)
|
||||
.await?
|
||||
}
|
||||
_ => return Ok(Some(DependencyError::NotRunning)),
|
||||
}
|
||||
}
|
||||
DependencyError::Transitive => {
|
||||
if crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, DependencyErrors>(|m| m.status().dependency_errors())
|
||||
.get(db, true)
|
||||
if receipts
|
||||
.dependency_errors
|
||||
.get(db, dependency)
|
||||
.await?
|
||||
.into_owned()
|
||||
.unwrap_or_default()
|
||||
.0
|
||||
.is_empty()
|
||||
@@ -406,6 +461,7 @@ impl DepInfo {
|
||||
dependency_id: &PackageId,
|
||||
dependency_config: Option<Config>, // fetch if none
|
||||
dependent_id: &PackageId,
|
||||
receipts: &TryHealReceipts,
|
||||
) -> Result<Result<(), DependencyError>, Error> {
|
||||
Ok(
|
||||
if let Some(err) = DependencyError::NotInstalled
|
||||
@@ -416,6 +472,7 @@ impl DepInfo {
|
||||
dependency_id,
|
||||
dependency_config,
|
||||
self,
|
||||
receipts,
|
||||
)
|
||||
.await?
|
||||
{
|
||||
@@ -430,8 +487,8 @@ impl DepInfo {
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DependencyConfig {
|
||||
check: ActionImplementation,
|
||||
auto_configure: ActionImplementation,
|
||||
check: PackageProcedure,
|
||||
auto_configure: PackageProcedure,
|
||||
}
|
||||
impl DependencyConfig {
|
||||
pub async fn check(
|
||||
@@ -440,6 +497,7 @@ impl DependencyConfig {
|
||||
dependent_id: &PackageId,
|
||||
dependent_version: &Version,
|
||||
dependent_volumes: &Volumes,
|
||||
dependency_id: &PackageId,
|
||||
dependency_config: &Config,
|
||||
) -> Result<Result<NoOutput, String>, Error> {
|
||||
Ok(self
|
||||
@@ -451,6 +509,7 @@ impl DependencyConfig {
|
||||
dependent_volumes,
|
||||
Some(dependency_config),
|
||||
None,
|
||||
ProcedureName::Check(dependency_id.clone()),
|
||||
)
|
||||
.await?
|
||||
.map_err(|(_, e)| e))
|
||||
@@ -471,12 +530,97 @@ impl DependencyConfig {
|
||||
dependent_volumes,
|
||||
Some(old),
|
||||
None,
|
||||
ProcedureName::AutoConfig(dependent_id.clone()),
|
||||
)
|
||||
.await?
|
||||
.map_err(|e| Error::new(eyre!("{}", e.1), crate::ErrorKind::AutoConfigure))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct DependencyConfigReceipts {
|
||||
config: ConfigReceipts,
|
||||
dependencies: LockReceipt<Dependencies, ()>,
|
||||
dependency_volumes: LockReceipt<Volumes, ()>,
|
||||
dependency_version: LockReceipt<Version, ()>,
|
||||
dependency_config_action: LockReceipt<ConfigActions, ()>,
|
||||
package_volumes: LockReceipt<Volumes, ()>,
|
||||
package_version: LockReceipt<Version, ()>,
|
||||
}
|
||||
|
||||
impl DependencyConfigReceipts {
|
||||
pub async fn new<'a>(
|
||||
db: &'a mut impl DbHandle,
|
||||
package_id: &PackageId,
|
||||
dependency_id: &PackageId,
|
||||
) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks, package_id, dependency_id);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<LockTargetId>,
|
||||
package_id: &PackageId,
|
||||
dependency_id: &PackageId,
|
||||
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let config = ConfigReceipts::setup(locks);
|
||||
let dependencies = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(package_id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.manifest().dependencies())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let dependency_volumes = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency_id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.manifest().volumes())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let dependency_version = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency_id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.manifest().version())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let dependency_config_action = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dependency_id)
|
||||
.and_then(|x| x.installed())
|
||||
.and_then(|x| x.manifest().config())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let package_volumes = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(package_id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.manifest().volumes())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let package_version = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(package_id)
|
||||
.and_then(|x| x.installed())
|
||||
.map(|x| x.manifest().version())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
config: config(skeleton_key)?,
|
||||
dependencies: dependencies.verify(&skeleton_key)?,
|
||||
dependency_volumes: dependency_volumes.verify(&skeleton_key)?,
|
||||
dependency_version: dependency_version.verify(&skeleton_key)?,
|
||||
dependency_config_action: dependency_config_action.verify(&skeleton_key)?,
|
||||
package_volumes: package_volumes.verify(&skeleton_key)?,
|
||||
package_version: package_version.verify(&skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[command(
|
||||
subcommands(self(configure_impl(async)), configure_dry),
|
||||
display(display_none)
|
||||
@@ -493,11 +637,14 @@ pub async fn configure_impl(
|
||||
(pkg_id, dep_id): (PackageId, PackageId),
|
||||
) -> Result<(), Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
let receipts = DependencyConfigReceipts::new(&mut db, &pkg_id, &dep_id).await?;
|
||||
let ConfigDryRes {
|
||||
old_config: _,
|
||||
new_config,
|
||||
spec: _,
|
||||
} = configure_logic(ctx.clone(), &mut db, (pkg_id, dep_id.clone())).await?;
|
||||
} = configure_logic(ctx.clone(), &mut db, (pkg_id, dep_id.clone()), &receipts).await?;
|
||||
|
||||
let locks = &receipts.config;
|
||||
Ok(crate::config::configure(
|
||||
&ctx,
|
||||
&mut db,
|
||||
@@ -507,6 +654,7 @@ pub async fn configure_impl(
|
||||
false,
|
||||
&mut BTreeMap::new(),
|
||||
&mut BTreeMap::new(),
|
||||
locks,
|
||||
)
|
||||
.await?)
|
||||
}
|
||||
@@ -526,67 +674,25 @@ pub async fn configure_dry(
|
||||
#[parent_data] (pkg_id, dependency_id): (PackageId, PackageId),
|
||||
) -> Result<ConfigDryRes, Error> {
|
||||
let mut db = ctx.db.handle();
|
||||
configure_logic(ctx, &mut db, (pkg_id, dependency_id)).await
|
||||
let receipts = DependencyConfigReceipts::new(&mut db, &pkg_id, &dependency_id).await?;
|
||||
configure_logic(ctx, &mut db, (pkg_id, dependency_id), &receipts).await
|
||||
}
|
||||
|
||||
pub async fn configure_logic(
|
||||
ctx: RpcContext,
|
||||
db: &mut PatchDbHandle,
|
||||
(pkg_id, dependency_id): (PackageId, PackageId),
|
||||
receipts: &DependencyConfigReceipts,
|
||||
) -> Result<ConfigDryRes, Error> {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(db, LockType::Read)
|
||||
.await?;
|
||||
let pkg_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&pkg_id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
let pkg_version = pkg_model.clone().manifest().version().get(db, true).await?;
|
||||
let pkg_volumes = pkg_model.clone().manifest().volumes().get(db, true).await?;
|
||||
let dependency_model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dependency_id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::NotFound)?;
|
||||
let dependency_config_action = dependency_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.config()
|
||||
.get(db, true)
|
||||
.await?
|
||||
.to_owned()
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("{} has no config", dependency_id),
|
||||
crate::ErrorKind::NotFound,
|
||||
)
|
||||
})?;
|
||||
let dependency_version = dependency_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.version()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let dependency_volumes = dependency_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.volumes()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let dependencies = pkg_model
|
||||
.clone()
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.get(db, true)
|
||||
.await?;
|
||||
let pkg_version = receipts.package_version.get(db).await?;
|
||||
let pkg_volumes = receipts.package_volumes.get(db).await?;
|
||||
let dependency_config_action = receipts.dependency_config_action.get(db).await?;
|
||||
let dependency_version = receipts.dependency_version.get(db).await?;
|
||||
let dependency_volumes = receipts.dependency_volumes.get(db).await?;
|
||||
let dependencies = receipts.dependencies.get(db).await?;
|
||||
|
||||
let dependency = dependencies
|
||||
.0
|
||||
.get(&dependency_id)
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
@@ -617,8 +723,8 @@ pub async fn configure_logic(
|
||||
.get(
|
||||
&ctx,
|
||||
&dependency_id,
|
||||
&*dependency_version,
|
||||
&*dependency_volumes,
|
||||
&dependency_version,
|
||||
&dependency_volumes,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -640,6 +746,7 @@ pub async fn configure_logic(
|
||||
&pkg_volumes,
|
||||
Some(&old_config),
|
||||
None,
|
||||
ProcedureName::AutoConfig(dependency_id.clone()),
|
||||
)
|
||||
.await?
|
||||
.map_err(|e| Error::new(eyre!("{}", e.1), crate::ErrorKind::AutoConfigure))?;
|
||||
@@ -650,29 +757,22 @@ pub async fn configure_logic(
|
||||
spec,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(db, current_dependencies))]
|
||||
pub async fn add_dependent_to_current_dependents_lists<
|
||||
'a,
|
||||
Db: DbHandle,
|
||||
I: IntoIterator<Item = (&'a PackageId, &'a CurrentDependencyInfo)>,
|
||||
>(
|
||||
#[instrument(skip(db, current_dependencies, current_dependent_receipt))]
|
||||
pub async fn add_dependent_to_current_dependents_lists<'a, Db: DbHandle>(
|
||||
db: &mut Db,
|
||||
dependent_id: &PackageId,
|
||||
current_dependencies: I,
|
||||
current_dependencies: &CurrentDependencies,
|
||||
current_dependent_receipt: &LockReceipt<CurrentDependents, String>,
|
||||
) -> Result<(), Error> {
|
||||
for (dependency, dep_info) in current_dependencies {
|
||||
if let Some(dependency_model) = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dependency)
|
||||
.and_then(|pkg| pkg.installed())
|
||||
.check(db)
|
||||
.await?
|
||||
for (dependency, dep_info) in ¤t_dependencies.0 {
|
||||
if let Some(mut dependency_dependents) =
|
||||
current_dependent_receipt.get(db, dependency).await?
|
||||
{
|
||||
dependency_model
|
||||
.current_dependents()
|
||||
.idx_model(dependent_id)
|
||||
.put(db, &dep_info)
|
||||
dependency_dependents
|
||||
.0
|
||||
.insert(dependent_id.clone(), dep_info.clone());
|
||||
current_dependent_receipt
|
||||
.set(db, dependency_dependents, dependency)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
@@ -696,10 +796,11 @@ impl DependencyErrors {
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
manifest: &Manifest,
|
||||
current_dependencies: &BTreeMap<PackageId, CurrentDependencyInfo>,
|
||||
current_dependencies: &CurrentDependencies,
|
||||
receipts: &TryHealReceipts,
|
||||
) -> Result<DependencyErrors, Error> {
|
||||
let mut res = BTreeMap::new();
|
||||
for (dependency_id, info) in current_dependencies.keys().filter_map(|dependency_id| {
|
||||
for (dependency_id, info) in current_dependencies.0.keys().filter_map(|dependency_id| {
|
||||
manifest
|
||||
.dependencies
|
||||
.0
|
||||
@@ -707,7 +808,7 @@ impl DependencyErrors {
|
||||
.map(|info| (dependency_id, info))
|
||||
}) {
|
||||
if let Err(e) = info
|
||||
.satisfied(ctx, db, dependency_id, None, &manifest.id)
|
||||
.satisfied(ctx, db, dependency_id, None, &manifest.id, receipts)
|
||||
.await?
|
||||
{
|
||||
res.insert(dependency_id.clone(), e);
|
||||
@@ -735,49 +836,86 @@ pub async fn break_all_dependents_transitive<'a, Db: DbHandle>(
|
||||
id: &'a PackageId,
|
||||
error: DependencyError,
|
||||
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
receipts: &'a BreakTransitiveReceipts,
|
||||
) -> Result<(), Error> {
|
||||
for dependent in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
for dependent in receipts
|
||||
.current_dependents
|
||||
.get(db, id)
|
||||
.await?
|
||||
.current_dependents()
|
||||
.keys(db, true)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|dependent| id != dependent)
|
||||
.iter()
|
||||
.flat_map(|x| x.0.keys())
|
||||
.filter(|dependent| id != *dependent)
|
||||
{
|
||||
break_transitive(db, &dependent, id, error.clone(), breakages).await?;
|
||||
break_transitive(db, dependent, id, error.clone(), breakages, receipts).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(db))]
|
||||
#[derive(Clone)]
|
||||
pub struct BreakTransitiveReceipts {
|
||||
pub dependency_receipt: DependencyReceipt,
|
||||
dependency_errors: LockReceipt<DependencyErrors, String>,
|
||||
current_dependents: LockReceipt<CurrentDependents, String>,
|
||||
}
|
||||
|
||||
impl BreakTransitiveReceipts {
|
||||
pub async fn new(db: &'_ mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let dependency_receipt = DependencyReceipt::setup(locks);
|
||||
let dependency_errors = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.status().dependency_errors())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let current_dependents = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.current_dependents())
|
||||
.make_locker(LockType::Exist)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
dependency_receipt: dependency_receipt(skeleton_key)?,
|
||||
dependency_errors: dependency_errors.verify(skeleton_key)?,
|
||||
current_dependents: current_dependents.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(db, receipts))]
|
||||
pub fn break_transitive<'a, Db: DbHandle>(
|
||||
db: &'a mut Db,
|
||||
id: &'a PackageId,
|
||||
dependency: &'a PackageId,
|
||||
error: DependencyError,
|
||||
breakages: &'a mut BTreeMap<PackageId, TaggedDependencyError>,
|
||||
receipts: &'a BreakTransitiveReceipts,
|
||||
) -> BoxFuture<'a, Result<(), Error>> {
|
||||
async move {
|
||||
let mut tx = db.begin().await?;
|
||||
let model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut tx)
|
||||
.await?;
|
||||
let mut status = model.clone().status().get_mut(&mut tx).await?;
|
||||
let mut dependency_errors = receipts
|
||||
.dependency_errors
|
||||
.get(&mut tx, id)
|
||||
.await?
|
||||
.ok_or_else(not_found)?;
|
||||
|
||||
let old = status.dependency_errors.0.remove(dependency);
|
||||
let old = dependency_errors.0.remove(dependency);
|
||||
let newly_broken = if let Some(e) = &old {
|
||||
error.cmp_priority(&e) == Ordering::Greater
|
||||
} else {
|
||||
true
|
||||
};
|
||||
status.dependency_errors.0.insert(
|
||||
dependency_errors.0.insert(
|
||||
dependency.clone(),
|
||||
if let Some(old) = old {
|
||||
old.merge_with(error.clone())
|
||||
@@ -793,12 +931,25 @@ pub fn break_transitive<'a, Db: DbHandle>(
|
||||
error: error.clone(),
|
||||
},
|
||||
);
|
||||
status.save(&mut tx).await?;
|
||||
receipts
|
||||
.dependency_errors
|
||||
.set(&mut tx, dependency_errors, id)
|
||||
.await?;
|
||||
|
||||
tx.save().await?;
|
||||
break_all_dependents_transitive(db, id, DependencyError::Transitive, breakages).await?;
|
||||
break_all_dependents_transitive(
|
||||
db,
|
||||
id,
|
||||
DependencyError::Transitive,
|
||||
breakages,
|
||||
receipts,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
status.save(&mut tx).await?;
|
||||
receipts
|
||||
.dependency_errors
|
||||
.set(&mut tx, dependency_errors, id)
|
||||
.await?;
|
||||
|
||||
tx.save().await?;
|
||||
}
|
||||
@@ -808,68 +959,52 @@ pub fn break_transitive<'a, Db: DbHandle>(
|
||||
.boxed()
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
#[instrument(skip(ctx, db, locks))]
|
||||
pub async fn heal_all_dependents_transitive<'a, Db: DbHandle>(
|
||||
ctx: &'a RpcContext,
|
||||
db: &'a mut Db,
|
||||
id: &'a PackageId,
|
||||
locks: &'a DependencyReceipt,
|
||||
) -> Result<(), Error> {
|
||||
for dependent in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(db)
|
||||
let dependents = locks
|
||||
.current_dependents
|
||||
.get(db, id)
|
||||
.await?
|
||||
.current_dependents()
|
||||
.keys(db, true)
|
||||
.await?
|
||||
.into_iter()
|
||||
.filter(|dependent| id != dependent)
|
||||
{
|
||||
heal_transitive(ctx, db, &dependent, id).await?;
|
||||
.ok_or_else(not_found)?;
|
||||
for dependent in dependents.0.keys().filter(|dependent| id != *dependent) {
|
||||
heal_transitive(ctx, db, dependent, id, locks).await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
#[instrument(skip(ctx, db, receipts))]
|
||||
pub fn heal_transitive<'a, Db: DbHandle>(
|
||||
ctx: &'a RpcContext,
|
||||
db: &'a mut Db,
|
||||
id: &'a PackageId,
|
||||
dependency: &'a PackageId,
|
||||
receipts: &'a DependencyReceipt,
|
||||
) -> BoxFuture<'a, Result<(), Error>> {
|
||||
async move {
|
||||
let mut tx = db.begin().await?;
|
||||
let model = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut tx)
|
||||
.await?;
|
||||
let mut status = model.clone().status().get_mut(&mut tx).await?;
|
||||
let mut status = receipts.status.get(db, id).await?.ok_or_else(not_found)?;
|
||||
|
||||
let old = status.dependency_errors.0.remove(dependency);
|
||||
|
||||
if let Some(old) = old {
|
||||
let info = model
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.idx_model(dependency)
|
||||
.expect(&mut tx)
|
||||
let info = receipts
|
||||
.dependency
|
||||
.get(db, (id, dependency))
|
||||
.await?
|
||||
.get(&mut tx, true)
|
||||
.await?;
|
||||
.ok_or_else(not_found)?;
|
||||
if let Some(new) = old
|
||||
.try_heal(ctx, &mut tx, id, dependency, None, &*info)
|
||||
.try_heal(ctx, db, id, dependency, None, &info, &receipts.try_heal)
|
||||
.await?
|
||||
{
|
||||
status.dependency_errors.0.insert(dependency.clone(), new);
|
||||
status.save(&mut tx).await?;
|
||||
tx.save().await?;
|
||||
receipts.status.set(db, status, id).await?;
|
||||
} else {
|
||||
status.save(&mut tx).await?;
|
||||
tx.save().await?;
|
||||
heal_all_dependents_transitive(ctx, db, id).await?;
|
||||
receipts.status.set(db, status, id).await?;
|
||||
heal_all_dependents_transitive(ctx, db, id, receipts).await?;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -881,11 +1016,12 @@ pub fn heal_transitive<'a, Db: DbHandle>(
|
||||
pub async fn reconfigure_dependents_with_live_pointers(
|
||||
ctx: &RpcContext,
|
||||
mut tx: impl DbHandle,
|
||||
receipts: &ConfigReceipts,
|
||||
pde: &InstalledPackageDataEntry,
|
||||
) -> Result<(), Error> {
|
||||
let dependents = &pde.current_dependents;
|
||||
let me = &pde.manifest.id;
|
||||
for (dependent_id, dependency_info) in dependents {
|
||||
for (dependent_id, dependency_info) in &dependents.0 {
|
||||
if dependency_info.pointers.iter().any(|ptr| match ptr {
|
||||
// dependency id matches the package being uninstalled
|
||||
PackagePointerSpec::TorAddress(ptr) => &ptr.package_id == me && dependent_id != me,
|
||||
@@ -903,9 +1039,60 @@ pub async fn reconfigure_dependents_with_live_pointers(
|
||||
false,
|
||||
&mut BTreeMap::new(),
|
||||
&mut BTreeMap::new(),
|
||||
receipts,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct DependencyReceipt {
|
||||
pub try_heal: TryHealReceipts,
|
||||
current_dependents: LockReceipt<CurrentDependents, String>,
|
||||
status: LockReceipt<Status, String>,
|
||||
dependency: LockReceipt<DepInfo, (String, String)>,
|
||||
}
|
||||
|
||||
impl DependencyReceipt {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let try_heal = TryHealReceipts::setup(locks);
|
||||
let dependency = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest().dependencies().star())
|
||||
.make_locker(LockType::Read)
|
||||
.add_to_keys(locks);
|
||||
let current_dependents = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.current_dependents())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.status())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
try_heal: try_heal(skeleton_key)?,
|
||||
current_dependents: current_dependents.verify(skeleton_key)?,
|
||||
status: status.verify(skeleton_key)?,
|
||||
dependency: dependency.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::fs::File;
|
||||
use std::io::Write;
|
||||
use std::path::Path;
|
||||
|
||||
use ed25519::pkcs8::EncodePrivateKey;
|
||||
use ed25519_dalek::Keypair;
|
||||
use rpc_toolkit::command;
|
||||
use tracing::instrument;
|
||||
@@ -22,8 +23,17 @@ pub fn init(#[context] ctx: SdkContext) -> Result<(), Error> {
|
||||
tracing::info!("Generating new developer key...");
|
||||
let keypair = Keypair::generate(&mut rand::thread_rng());
|
||||
tracing::info!("Writing key to {}", ctx.developer_key_path.display());
|
||||
let keypair_bytes = ed25519::KeypairBytes {
|
||||
secret_key: keypair.secret.to_bytes(),
|
||||
public_key: Some(keypair.public.to_bytes()),
|
||||
};
|
||||
let mut dev_key_file = File::create(&ctx.developer_key_path)?;
|
||||
dev_key_file.write_all(&keypair.to_bytes())?;
|
||||
dev_key_file.write_all(
|
||||
keypair_bytes
|
||||
.to_pkcs8_pem(base64ct::LineEnding::default())
|
||||
.with_kind(crate::ErrorKind::Pem)?
|
||||
.as_bytes(),
|
||||
)?;
|
||||
dev_key_file.sync_all()?;
|
||||
}
|
||||
Ok(())
|
||||
|
||||
@@ -7,7 +7,7 @@ use futures::FutureExt;
|
||||
use tokio::process::Command;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{Error, ResultExt};
|
||||
use crate::Error;
|
||||
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
#[must_use]
|
||||
|
||||
@@ -3,7 +3,7 @@ use std::path::Path;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use digest::generic_array::GenericArray;
|
||||
use digest::Digest;
|
||||
use digest::{Digest, OutputSizeUser};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
|
||||
@@ -45,7 +45,9 @@ impl<LogicalName: AsRef<Path> + Send + Sync> FileSystem for BlockDev<LogicalName
|
||||
) -> Result<(), Error> {
|
||||
mount(self.logicalname.as_ref(), mountpoint, mount_type).await
|
||||
}
|
||||
async fn source_hash(&self) -> Result<GenericArray<u8, <Sha256 as Digest>::OutputSize>, Error> {
|
||||
async fn source_hash(
|
||||
&self,
|
||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
||||
let mut sha = Sha256::new();
|
||||
sha.update("BlockDev");
|
||||
sha.update(
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::path::{Path, PathBuf};
|
||||
|
||||
use async_trait::async_trait;
|
||||
use digest::generic_array::GenericArray;
|
||||
use digest::Digest;
|
||||
use digest::{Digest, OutputSizeUser};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::Sha256;
|
||||
use tokio::process::Command;
|
||||
@@ -18,7 +18,7 @@ use crate::Error;
|
||||
async fn resolve_hostname(hostname: &str) -> Result<IpAddr, Error> {
|
||||
#[cfg(feature = "avahi")]
|
||||
if hostname.ends_with(".local") {
|
||||
return Ok(crate::net::mdns::resolve_mdns(hostname).await?);
|
||||
return Ok(IpAddr::V4(crate::net::mdns::resolve_mdns(hostname).await?));
|
||||
}
|
||||
Ok(String::from_utf8(
|
||||
Command::new("nmblookup")
|
||||
@@ -93,7 +93,9 @@ impl FileSystem for Cifs {
|
||||
)
|
||||
.await
|
||||
}
|
||||
async fn source_hash(&self) -> Result<GenericArray<u8, <Sha256 as Digest>::OutputSize>, Error> {
|
||||
async fn source_hash(
|
||||
&self,
|
||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
||||
let mut sha = Sha256::new();
|
||||
sha.update("Cifs");
|
||||
sha.update(self.hostname.as_bytes());
|
||||
|
||||
@@ -4,7 +4,7 @@ use std::path::Path;
|
||||
use async_trait::async_trait;
|
||||
use color_eyre::eyre::eyre;
|
||||
use digest::generic_array::GenericArray;
|
||||
use digest::Digest;
|
||||
use digest::{Digest, OutputSizeUser};
|
||||
use sha2::Sha256;
|
||||
use tokio::io::{AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
@@ -63,7 +63,9 @@ impl<EncryptedDir: AsRef<Path> + Send + Sync, Key: AsRef<str> + Send + Sync> Fil
|
||||
) -> Result<(), Error> {
|
||||
mount_ecryptfs(self.encrypted_dir.as_ref(), mountpoint, self.key.as_ref()).await
|
||||
}
|
||||
async fn source_hash(&self) -> Result<GenericArray<u8, <Sha256 as Digest>::OutputSize>, Error> {
|
||||
async fn source_hash(
|
||||
&self,
|
||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
||||
let mut sha = Sha256::new();
|
||||
sha.update("EcryptFS");
|
||||
sha.update(
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::path::Path;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use digest::generic_array::GenericArray;
|
||||
use digest::Digest;
|
||||
use digest::{Digest, OutputSizeUser};
|
||||
use sha2::Sha256;
|
||||
|
||||
use super::{FileSystem, MountType, ReadOnly};
|
||||
@@ -41,7 +41,9 @@ impl<S: AsRef<str> + Send + Sync> FileSystem for Label<S> {
|
||||
) -> Result<(), Error> {
|
||||
mount_label(self.label.as_ref(), mountpoint, mount_type).await
|
||||
}
|
||||
async fn source_hash(&self) -> Result<GenericArray<u8, <Sha256 as Digest>::OutputSize>, Error> {
|
||||
async fn source_hash(
|
||||
&self,
|
||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
||||
let mut sha = Sha256::new();
|
||||
sha.update("Label");
|
||||
sha.update(self.label.as_ref().as_bytes());
|
||||
|
||||
@@ -2,7 +2,7 @@ use std::path::Path;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use digest::generic_array::GenericArray;
|
||||
use digest::Digest;
|
||||
use digest::OutputSizeUser;
|
||||
use sha2::Sha256;
|
||||
|
||||
use crate::Error;
|
||||
@@ -27,5 +27,7 @@ pub trait FileSystem {
|
||||
mountpoint: P,
|
||||
mount_type: MountType,
|
||||
) -> Result<(), Error>;
|
||||
async fn source_hash(&self) -> Result<GenericArray<u8, <Sha256 as Digest>::OutputSize>, Error>;
|
||||
async fn source_hash(
|
||||
&self,
|
||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error>;
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::fmt::Display;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use models::InvalidId;
|
||||
use patch_db::Revision;
|
||||
use rpc_toolkit::yajrc::RpcError;
|
||||
|
||||
@@ -30,7 +31,7 @@ pub enum ErrorKind {
|
||||
InvalidOnionAddress = 22,
|
||||
Pack = 23,
|
||||
ValidateS9pk = 24,
|
||||
DiskCorrupted = 25,
|
||||
DiskCorrupted = 25, // Remove
|
||||
Tor = 26,
|
||||
ConfigGen = 27,
|
||||
ParseNumber = 28,
|
||||
@@ -64,6 +65,8 @@ pub enum ErrorKind {
|
||||
InvalidBackupTargetId = 56,
|
||||
ProductKeyMismatch = 57,
|
||||
LanPortConflict = 58,
|
||||
Javascript = 59,
|
||||
Pem = 60,
|
||||
}
|
||||
impl ErrorKind {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
@@ -126,7 +129,9 @@ impl ErrorKind {
|
||||
Incoherent => "Incoherent",
|
||||
InvalidBackupTargetId => "Invalid Backup Target ID",
|
||||
ProductKeyMismatch => "Incompatible Product Keys",
|
||||
LanPortConflict => "Incompatible LAN port configuration",
|
||||
LanPortConflict => "Incompatible LAN Port Configuration",
|
||||
Javascript => "Javascript Engine Error",
|
||||
Pem => "PEM Encoding Error",
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -142,6 +147,7 @@ pub struct Error {
|
||||
pub kind: ErrorKind,
|
||||
pub revision: Option<Revision>,
|
||||
}
|
||||
|
||||
impl Display for Error {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}: {}", self.kind.as_str(), self.source)
|
||||
@@ -156,6 +162,11 @@ impl Error {
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<InvalidId> for Error {
|
||||
fn from(err: InvalidId) -> Self {
|
||||
Error::new(err, crate::error::ErrorKind::InvalidPackageId)
|
||||
}
|
||||
}
|
||||
impl From<std::io::Error> for Error {
|
||||
fn from(e: std::io::Error) -> Self {
|
||||
Error::new(e, ErrorKind::Filesystem)
|
||||
|
||||
@@ -1,142 +1,13 @@
|
||||
use std::borrow::{Borrow, Cow};
|
||||
use std::fmt::Debug;
|
||||
use std::str::FromStr;
|
||||
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
|
||||
use crate::util::Version;
|
||||
use crate::Error;
|
||||
|
||||
pub const SYSTEM_ID: Id<&'static str> = Id("x_system");
|
||||
pub use models::{Id, InvalidId, IdUnchecked, SYSTEM_ID};
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("Invalid ID")]
|
||||
pub struct InvalidId;
|
||||
impl From<InvalidId> for Error {
|
||||
fn from(err: InvalidId) -> Self {
|
||||
Error::new(err, crate::error::ErrorKind::InvalidPackageId)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
|
||||
pub struct IdUnchecked<S: AsRef<str>>(pub S);
|
||||
impl<'de> Deserialize<'de> for IdUnchecked<Cow<'de, str>> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
struct Visitor;
|
||||
impl<'de> serde::de::Visitor<'de> for Visitor {
|
||||
type Value = IdUnchecked<Cow<'de, str>>;
|
||||
fn expecting(&self, formatter: &mut std::fmt::Formatter) -> std::fmt::Result {
|
||||
write!(formatter, "a valid ID")
|
||||
}
|
||||
fn visit_str<E>(self, v: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(IdUnchecked(Cow::Owned(v.to_owned())))
|
||||
}
|
||||
fn visit_string<E>(self, v: String) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(IdUnchecked(Cow::Owned(v)))
|
||||
}
|
||||
fn visit_borrowed_str<E>(self, v: &'de str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
Ok(IdUnchecked(Cow::Borrowed(v)))
|
||||
}
|
||||
}
|
||||
deserializer.deserialize_any(Visitor)
|
||||
}
|
||||
}
|
||||
impl<'de> Deserialize<'de> for IdUnchecked<String> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Ok(IdUnchecked(String::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
impl<'de> Deserialize<'de> for IdUnchecked<&'de str> {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Ok(IdUnchecked(<&'de str>::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct Id<S: AsRef<str> = String>(S);
|
||||
impl<S: AsRef<str>> Id<S> {
|
||||
pub fn try_from(value: S) -> Result<Self, InvalidId> {
|
||||
if value
|
||||
.as_ref()
|
||||
.chars()
|
||||
.all(|c| c.is_ascii_lowercase() || c == '-')
|
||||
{
|
||||
Ok(Id(value))
|
||||
} else {
|
||||
Err(InvalidId)
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<'a> Id<&'a str> {
|
||||
pub fn owned(&self) -> Id {
|
||||
Id(self.0.to_owned())
|
||||
}
|
||||
}
|
||||
impl From<Id> for String {
|
||||
fn from(value: Id) -> Self {
|
||||
value.0
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::ops::Deref for Id<S> {
|
||||
type Target = S;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::fmt::Display for Id<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.0.as_ref())
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<str> for Id<S> {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> Borrow<str> for Id<S> {
|
||||
fn borrow(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<'de, S> Deserialize<'de> for Id<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
IdUnchecked<S>: Deserialize<'de>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let unchecked: IdUnchecked<S> = Deserialize::deserialize(deserializer)?;
|
||||
Id::try_from(unchecked.0).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> Serialize for Id<S> {
|
||||
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
|
||||
where
|
||||
Ser: Serializer,
|
||||
{
|
||||
serializer.serialize_str(self.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
|
||||
pub struct ImageId<S: AsRef<str> = String>(Id<S>);
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use std::time::Duration;
|
||||
|
||||
use patch_db::{DbHandle, LockReceipt, LockType};
|
||||
use tokio::process::Command;
|
||||
|
||||
use crate::context::rpc::RpcContextConfig;
|
||||
@@ -23,6 +24,48 @@ pub async fn check_time_is_synchronized() -> Result<bool, Error> {
|
||||
== "NTPSynchronized=yes")
|
||||
}
|
||||
|
||||
pub struct InitReceipts {
|
||||
pub server_version: LockReceipt<crate::util::Version, ()>,
|
||||
pub version_range: LockReceipt<emver::VersionRange, ()>,
|
||||
pub last_wifi_region: LockReceipt<Option<isocountry::CountryCode>, ()>,
|
||||
pub status_info: LockReceipt<ServerStatus, ()>,
|
||||
}
|
||||
impl InitReceipts {
|
||||
pub async fn new(db: &mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let server_version = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.version()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(&mut locks);
|
||||
let version_range = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.eos_version_compat()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(&mut locks);
|
||||
let last_wifi_region = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.last_wifi_region()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(&mut locks);
|
||||
let status_info = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.status_info()
|
||||
.into_model()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(&mut locks);
|
||||
|
||||
let skeleton_key = db.lock_all(locks).await?;
|
||||
Ok(Self {
|
||||
server_version: server_version.verify(&skeleton_key)?,
|
||||
version_range: version_range.verify(&skeleton_key)?,
|
||||
status_info: status_info.verify(&skeleton_key)?,
|
||||
last_wifi_region: last_wifi_region.verify(&skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error> {
|
||||
let should_rebuild = tokio::fs::metadata(SYSTEM_REBUILD_PATH).await.is_ok();
|
||||
let secret_store = cfg.secret_store().await?;
|
||||
@@ -82,38 +125,30 @@ pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error
|
||||
tracing::info!("Loaded Package Docker Images");
|
||||
}
|
||||
|
||||
crate::ssh::sync_keys_from_db(&secret_store, "/root/.ssh/authorized_keys").await?;
|
||||
crate::ssh::sync_keys_from_db(&secret_store, "/home/start9/.ssh/authorized_keys").await?;
|
||||
tracing::info!("Synced SSH Keys");
|
||||
let db = cfg.db(&secret_store, product_key).await?;
|
||||
|
||||
let mut handle = db.handle();
|
||||
let receipts = InitReceipts::new(&mut handle).await?;
|
||||
|
||||
crate::net::wifi::synchronize_wpa_supplicant_conf(
|
||||
&cfg.datadir().join("main"),
|
||||
&*crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.last_wifi_region()
|
||||
.get(&mut handle, false)
|
||||
.await
|
||||
.map_err(|_e| {
|
||||
Error::new(
|
||||
color_eyre::eyre::eyre!("Could not find the last wifi region"),
|
||||
crate::ErrorKind::NotFound,
|
||||
)
|
||||
})?,
|
||||
&receipts.last_wifi_region.get(&mut handle).await?,
|
||||
)
|
||||
.await?;
|
||||
tracing::info!("Synchronized wpa_supplicant.conf");
|
||||
let mut info = crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.get_mut(&mut handle)
|
||||
receipts
|
||||
.status_info
|
||||
.set(
|
||||
&mut handle,
|
||||
ServerStatus {
|
||||
updated: false,
|
||||
update_progress: None,
|
||||
backup_progress: None,
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
info.status_info = ServerStatus {
|
||||
backing_up: false,
|
||||
updated: false,
|
||||
update_progress: None,
|
||||
};
|
||||
info.save(&mut handle).await?;
|
||||
|
||||
let mut warn_time_not_synced = true;
|
||||
for _ in 0..60 {
|
||||
@@ -127,7 +162,7 @@ pub async fn init(cfg: &RpcContextConfig, product_key: &str) -> Result<(), Error
|
||||
tracing::warn!("Timed out waiting for system time to synchronize");
|
||||
}
|
||||
|
||||
crate::version::init(&mut handle).await?;
|
||||
crate::version::init(&mut handle, &receipts).await?;
|
||||
|
||||
if should_rebuild {
|
||||
tokio::fs::remove_file(SYSTEM_REBUILD_PATH).await?;
|
||||
|
||||
@@ -1,74 +1,96 @@
|
||||
use std::collections::{BTreeMap, HashMap};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use bollard::image::ListImagesOptions;
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::{DbHandle, LockType, PatchDbHandle};
|
||||
use patch_db::{DbHandle, LockReceipt, LockTargetId, LockType, PatchDbHandle, Verifier};
|
||||
use sqlx::{Executor, Sqlite};
|
||||
use tracing::instrument;
|
||||
|
||||
use super::{PKG_ARCHIVE_DIR, PKG_DOCKER_DIR};
|
||||
use crate::config::{not_found, ConfigReceipts};
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::{CurrentDependencyInfo, InstalledPackageDataEntry, PackageDataEntry};
|
||||
use crate::dependencies::reconfigure_dependents_with_live_pointers;
|
||||
use crate::db::model::{
|
||||
AllPackageData, CurrentDependencies, CurrentDependents, InstalledPackageDataEntry,
|
||||
PackageDataEntry,
|
||||
};
|
||||
use crate::dependencies::{
|
||||
reconfigure_dependents_with_live_pointers, DependencyErrors, TryHealReceipts,
|
||||
};
|
||||
use crate::error::ErrorCollection;
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::util::{Apply, Version};
|
||||
use crate::volume::{asset_dir, script_dir};
|
||||
use crate::Error;
|
||||
|
||||
#[instrument(skip(ctx, db, deps))]
|
||||
pub async fn update_dependency_errors_of_dependents<
|
||||
'a,
|
||||
Db: DbHandle,
|
||||
I: IntoIterator<Item = &'a PackageId>,
|
||||
>(
|
||||
pub struct UpdateDependencyReceipts {
|
||||
try_heal: TryHealReceipts,
|
||||
dependency_errors: LockReceipt<DependencyErrors, String>,
|
||||
manifest: LockReceipt<Manifest, String>,
|
||||
}
|
||||
impl UpdateDependencyReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let dependency_errors = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.status().dependency_errors())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let manifest = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let try_heal = TryHealReceipts::setup(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
dependency_errors: dependency_errors.verify(skeleton_key)?,
|
||||
manifest: manifest.verify(skeleton_key)?,
|
||||
try_heal: try_heal(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db, deps, receipts))]
|
||||
pub async fn update_dependency_errors_of_dependents<'a, Db: DbHandle>(
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
id: &PackageId,
|
||||
deps: I,
|
||||
deps: &CurrentDependents,
|
||||
receipts: &UpdateDependencyReceipts,
|
||||
) -> Result<(), Error> {
|
||||
for dep in deps {
|
||||
if let Some(man) = &*crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dep)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, Manifest>(|m| m.manifest())
|
||||
.get(db, true)
|
||||
.await?
|
||||
{
|
||||
for dep in deps.0.keys() {
|
||||
if let Some(man) = receipts.manifest.get(db, dep).await? {
|
||||
if let Err(e) = if let Some(info) = man.dependencies.0.get(id) {
|
||||
info.satisfied(ctx, db, id, None, dep).await?
|
||||
info.satisfied(ctx, db, id, None, dep, &receipts.try_heal)
|
||||
.await?
|
||||
} else {
|
||||
Ok(())
|
||||
} {
|
||||
let mut errs = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dep)
|
||||
.expect(db)
|
||||
let mut errs = receipts
|
||||
.dependency_errors
|
||||
.get(db, dep)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(db)
|
||||
.await?
|
||||
.status()
|
||||
.dependency_errors()
|
||||
.get_mut(db)
|
||||
.await?;
|
||||
.ok_or_else(not_found)?;
|
||||
errs.0.insert(id.clone(), e);
|
||||
errs.save(db).await?;
|
||||
receipts.dependency_errors.set(db, errs, dep).await?
|
||||
} else {
|
||||
let mut errs = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dep)
|
||||
.expect(db)
|
||||
let mut errs = receipts
|
||||
.dependency_errors
|
||||
.get(db, dep)
|
||||
.await?
|
||||
.installed()
|
||||
.expect(db)
|
||||
.await?
|
||||
.status()
|
||||
.dependency_errors()
|
||||
.get_mut(db)
|
||||
.await?;
|
||||
.ok_or_else(not_found)?;
|
||||
errs.0.remove(id);
|
||||
errs.save(db).await?;
|
||||
receipts.dependency_errors.set(db, errs, dep).await?
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -123,28 +145,66 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, version: &Version) -> Res
|
||||
.await
|
||||
.apply(|res| errors.handle(res));
|
||||
}
|
||||
let assets_path = asset_dir(&ctx.datadir, id, version);
|
||||
if tokio::fs::metadata(&assets_path).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&assets_path)
|
||||
.await
|
||||
.apply(|res| errors.handle(res));
|
||||
}
|
||||
let scripts_path = script_dir(&ctx.datadir, id, version);
|
||||
if tokio::fs::metadata(&scripts_path).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&scripts_path)
|
||||
.await
|
||||
.apply(|res| errors.handle(res));
|
||||
}
|
||||
|
||||
errors.into_result()
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db))]
|
||||
pub struct CleanupFailedReceipts {
|
||||
package_data_entry: LockReceipt<PackageDataEntry, String>,
|
||||
package_entries: LockReceipt<AllPackageData, ()>,
|
||||
}
|
||||
|
||||
impl CleanupFailedReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let package_data_entry = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let package_entries = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
package_data_entry: package_data_entry.verify(skeleton_key).unwrap(),
|
||||
package_entries: package_entries.verify(skeleton_key).unwrap(),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, db, receipts))]
|
||||
pub async fn cleanup_failed<Db: DbHandle>(
|
||||
ctx: &RpcContext,
|
||||
db: &mut Db,
|
||||
id: &PackageId,
|
||||
receipts: &CleanupFailedReceipts,
|
||||
) -> Result<(), Error> {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(db, LockType::Write)
|
||||
.await?;
|
||||
let pde = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.expect(db)
|
||||
let pde = receipts
|
||||
.package_data_entry
|
||||
.get(db, id)
|
||||
.await?
|
||||
.get(db, true)
|
||||
.await?
|
||||
.into_owned();
|
||||
.ok_or_else(not_found)?;
|
||||
if let Some(manifest) = match &pde {
|
||||
PackageDataEntry::Installing { manifest, .. }
|
||||
| PackageDataEntry::Restoring { manifest, .. } => Some(manifest),
|
||||
@@ -173,26 +233,25 @@ pub async fn cleanup_failed<Db: DbHandle>(
|
||||
|
||||
match pde {
|
||||
PackageDataEntry::Installing { .. } | PackageDataEntry::Restoring { .. } => {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.remove(db, id)
|
||||
.await?;
|
||||
let mut entries = receipts.package_entries.get(db).await?;
|
||||
entries.0.remove(id);
|
||||
receipts.package_entries.set(db, entries).await?;
|
||||
}
|
||||
PackageDataEntry::Updating {
|
||||
installed,
|
||||
static_files,
|
||||
..
|
||||
} => {
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.put(
|
||||
receipts
|
||||
.package_data_entry
|
||||
.set(
|
||||
db,
|
||||
&PackageDataEntry::Installed {
|
||||
PackageDataEntry::Installed {
|
||||
manifest: installed.manifest.clone(),
|
||||
installed,
|
||||
static_files,
|
||||
},
|
||||
id,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
@@ -202,38 +261,74 @@ pub async fn cleanup_failed<Db: DbHandle>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(db, current_dependencies))]
|
||||
pub async fn remove_from_current_dependents_lists<
|
||||
'a,
|
||||
Db: DbHandle,
|
||||
I: IntoIterator<Item = &'a PackageId>,
|
||||
>(
|
||||
#[instrument(skip(db, current_dependencies, current_dependent_receipt))]
|
||||
pub async fn remove_from_current_dependents_lists<'a, Db: DbHandle>(
|
||||
db: &mut Db,
|
||||
id: &'a PackageId,
|
||||
current_dependencies: I,
|
||||
current_dependencies: &'a CurrentDependencies,
|
||||
current_dependent_receipt: &LockReceipt<CurrentDependents, String>,
|
||||
) -> Result<(), Error> {
|
||||
for dep in current_dependencies.into_iter().chain(std::iter::once(id)) {
|
||||
if let Some(current_dependents) = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(dep)
|
||||
.and_then(|m| m.installed())
|
||||
.map::<_, BTreeMap<PackageId, CurrentDependencyInfo>>(|m| m.current_dependents())
|
||||
.check(db)
|
||||
.await?
|
||||
{
|
||||
if current_dependents
|
||||
.clone()
|
||||
.idx_model(id)
|
||||
.exists(db, true)
|
||||
.await?
|
||||
{
|
||||
current_dependents.remove(db, id).await?
|
||||
for dep in current_dependencies.0.keys().chain(std::iter::once(id)) {
|
||||
if let Some(mut current_dependents) = current_dependent_receipt.get(db, dep).await? {
|
||||
if current_dependents.0.remove(id).is_some() {
|
||||
current_dependent_receipt
|
||||
.set(db, current_dependents, dep)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub struct UninstallReceipts {
|
||||
config: ConfigReceipts,
|
||||
removing: LockReceipt<InstalledPackageDataEntry, ()>,
|
||||
packages: LockReceipt<AllPackageData, ()>,
|
||||
current_dependents: LockReceipt<CurrentDependents, String>,
|
||||
update_depenency_receipts: UpdateDependencyReceipts,
|
||||
}
|
||||
impl UninstallReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks, id);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<LockTargetId>,
|
||||
id: &PackageId,
|
||||
) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let config = ConfigReceipts::setup(locks);
|
||||
let removing = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|pde| pde.removing())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
|
||||
let current_dependents = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.current_dependents())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let packages = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let update_depenency_receipts = UpdateDependencyReceipts::setup(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
config: config(skeleton_key)?,
|
||||
removing: removing.verify(skeleton_key)?,
|
||||
current_dependents: current_dependents.verify(skeleton_key)?,
|
||||
update_depenency_receipts: update_depenency_receipts(skeleton_key)?,
|
||||
packages: packages.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
#[instrument(skip(ctx, secrets, db))]
|
||||
pub async fn uninstall<Ex>(
|
||||
ctx: &RpcContext,
|
||||
@@ -245,44 +340,32 @@ where
|
||||
for<'a> &'a mut Ex: Executor<'a, Database = Sqlite>,
|
||||
{
|
||||
let mut tx = db.begin().await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(&mut tx, LockType::Write)
|
||||
.await?;
|
||||
let entry = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(id)
|
||||
.and_then(|pde| pde.removing())
|
||||
.get(&mut tx, true)
|
||||
.await?
|
||||
.into_owned()
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("Package not in removing state: {}", id),
|
||||
crate::ErrorKind::NotFound,
|
||||
)
|
||||
})?;
|
||||
let receipts = UninstallReceipts::new(&mut tx, id).await?;
|
||||
let entry = receipts.removing.get(&mut tx).await?;
|
||||
cleanup(ctx, &entry.manifest.id, &entry.manifest.version).await?;
|
||||
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.remove(&mut tx, id)
|
||||
.await?;
|
||||
|
||||
let packages = {
|
||||
let mut packages = receipts.packages.get(&mut tx).await?;
|
||||
packages.0.remove(id);
|
||||
packages
|
||||
};
|
||||
receipts.packages.set(&mut tx, packages).await?;
|
||||
// once we have removed the package entry, we can change all the dependent pointers to null
|
||||
reconfigure_dependents_with_live_pointers(ctx, &mut tx, &entry).await?;
|
||||
reconfigure_dependents_with_live_pointers(ctx, &mut tx, &receipts.config, &entry).await?;
|
||||
|
||||
remove_from_current_dependents_lists(
|
||||
&mut tx,
|
||||
&entry.manifest.id,
|
||||
entry.current_dependencies.keys(),
|
||||
&entry.current_dependencies,
|
||||
&receipts.current_dependents,
|
||||
)
|
||||
.await?;
|
||||
update_dependency_errors_of_dependents(
|
||||
ctx,
|
||||
&mut tx,
|
||||
&entry.manifest.id,
|
||||
entry.current_dependents.keys(),
|
||||
&entry.current_dependents,
|
||||
&receipts.update_depenency_receipts,
|
||||
)
|
||||
.await?;
|
||||
let volumes = ctx
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::collections::BTreeMap;
|
||||
use std::io::SeekFrom;
|
||||
use std::marker::PhantomData;
|
||||
use std::path::{Path, PathBuf};
|
||||
@@ -14,7 +14,7 @@ use futures::{FutureExt, StreamExt, TryStreamExt};
|
||||
use http::header::CONTENT_LENGTH;
|
||||
use http::{Request, Response, StatusCode};
|
||||
use hyper::Body;
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use patch_db::{DbHandle, LockReceipt, LockType};
|
||||
use reqwest::Url;
|
||||
use rpc_toolkit::yajrc::RpcError;
|
||||
use rpc_toolkit::{command, Context};
|
||||
@@ -25,16 +25,18 @@ use tokio_stream::wrappers::ReadDirStream;
|
||||
use tracing::instrument;
|
||||
|
||||
use self::cleanup::{cleanup_failed, remove_from_current_dependents_lists};
|
||||
use crate::config::ConfigReceipts;
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::core::rpc_continuations::{RequestGuid, RpcContinuation};
|
||||
use crate::db::model::{
|
||||
CurrentDependencyInfo, InstalledPackageDataEntry, PackageDataEntry, RecoveredPackageInfo,
|
||||
StaticDependencyInfo, StaticFiles,
|
||||
CurrentDependencies, CurrentDependencyInfo, CurrentDependents, InstalledPackageDataEntry,
|
||||
PackageDataEntry, RecoveredPackageInfo, StaticDependencyInfo, StaticFiles,
|
||||
};
|
||||
use crate::db::util::WithRevision;
|
||||
use crate::dependencies::{
|
||||
add_dependent_to_current_dependents_lists, break_all_dependents_transitive,
|
||||
reconfigure_dependents_with_live_pointers, BreakageRes, DependencyError, DependencyErrors,
|
||||
reconfigure_dependents_with_live_pointers, BreakTransitiveReceipts, BreakageRes,
|
||||
DependencyError, DependencyErrors,
|
||||
};
|
||||
use crate::install::cleanup::{cleanup, update_dependency_errors_of_dependents};
|
||||
use crate::install::progress::{InstallProgress, InstallProgressTracker};
|
||||
@@ -46,17 +48,17 @@ use crate::util::io::{copy_and_shutdown, response_to_reader};
|
||||
use crate::util::serde::{display_serializable, IoFormat, Port};
|
||||
use crate::util::{display_none, AsyncFileExt, Version};
|
||||
use crate::version::{Current, VersionT};
|
||||
use crate::volume::asset_dir;
|
||||
use crate::volume::{asset_dir, script_dir};
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
pub mod cleanup;
|
||||
pub mod progress;
|
||||
pub mod update;
|
||||
|
||||
pub const PKG_ARCHIVE_DIR: &'static str = "package-data/archive";
|
||||
pub const PKG_PUBLIC_DIR: &'static str = "package-data/public";
|
||||
pub const PKG_DOCKER_DIR: &'static str = "package-data/docker";
|
||||
pub const PKG_WASM_DIR: &'static str = "package-data/wasm";
|
||||
pub const PKG_ARCHIVE_DIR: &str = "package-data/archive";
|
||||
pub const PKG_PUBLIC_DIR: &str = "package-data/public";
|
||||
pub const PKG_DOCKER_DIR: &str = "package-data/docker";
|
||||
pub const PKG_WASM_DIR: &str = "package-data/wasm";
|
||||
|
||||
#[command(display(display_serializable))]
|
||||
pub async fn list(#[context] ctx: RpcContext) -> Result<Vec<(PackageId, Version)>, Error> {
|
||||
@@ -191,7 +193,8 @@ pub async fn install(
|
||||
Current::new().compat(),
|
||||
platforms::TARGET_ARCH,
|
||||
))
|
||||
.await?,
|
||||
.await?
|
||||
.error_for_status()?,
|
||||
),
|
||||
&mut File::create(public_dir_path.join("LICENSE.md")).await?,
|
||||
)
|
||||
@@ -209,7 +212,8 @@ pub async fn install(
|
||||
Current::new().compat(),
|
||||
platforms::TARGET_ARCH,
|
||||
))
|
||||
.await?,
|
||||
.await?
|
||||
.error_for_status()?,
|
||||
),
|
||||
&mut File::create(public_dir_path.join("INSTRUCTIONS.md")).await?,
|
||||
)
|
||||
@@ -227,7 +231,8 @@ pub async fn install(
|
||||
Current::new().compat(),
|
||||
platforms::TARGET_ARCH,
|
||||
))
|
||||
.await?,
|
||||
.await?
|
||||
.error_for_status()?,
|
||||
),
|
||||
&mut File::create(public_dir_path.join(format!("icon.{}", icon_type))).await?,
|
||||
)
|
||||
@@ -329,9 +334,37 @@ pub async fn install(
|
||||
pub async fn sideload(
|
||||
#[context] ctx: RpcContext,
|
||||
#[arg] manifest: Manifest,
|
||||
#[arg] icon: Option<String>,
|
||||
) -> Result<RequestGuid, Error> {
|
||||
let new_ctx = ctx.clone();
|
||||
let guid = RequestGuid::new();
|
||||
if let Some(icon) = icon {
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
let public_dir_path = ctx
|
||||
.datadir
|
||||
.join(PKG_PUBLIC_DIR)
|
||||
.join(&manifest.id)
|
||||
.join(manifest.version.as_str());
|
||||
tokio::fs::create_dir_all(&public_dir_path).await?;
|
||||
|
||||
let invalid_data_url =
|
||||
|| Error::new(eyre!("Invalid Icon Data URL"), ErrorKind::InvalidRequest);
|
||||
let data = icon
|
||||
.strip_prefix(&format!(
|
||||
"data:image/{};base64,",
|
||||
manifest.assets.icon_type()
|
||||
))
|
||||
.ok_or_else(&invalid_data_url)?;
|
||||
let mut icon_file =
|
||||
File::create(public_dir_path.join(format!("icon.{}", manifest.assets.icon_type())))
|
||||
.await?;
|
||||
icon_file
|
||||
.write_all(&base64::decode(data).with_kind(ErrorKind::InvalidRequest)?)
|
||||
.await?;
|
||||
icon_file.sync_all().await?;
|
||||
}
|
||||
|
||||
let handler = Box::new(|req: Request<Body>| {
|
||||
async move {
|
||||
let content_length = match req.headers().get(CONTENT_LENGTH).map(|a| a.to_str()) {
|
||||
@@ -447,7 +480,7 @@ pub async fn sideload(
|
||||
});
|
||||
let cont = RpcContinuation {
|
||||
created_at: Instant::now(), // TODO
|
||||
handler: handler,
|
||||
handler,
|
||||
};
|
||||
// gc the map
|
||||
let mut guard = ctx.rpc_stream_continuations.lock().await;
|
||||
@@ -477,14 +510,21 @@ async fn cli_install(
|
||||
let path = PathBuf::from(target);
|
||||
|
||||
// inspect manifest no verify
|
||||
let manifest = crate::inspect::manifest(path.clone(), true, Some(IoFormat::Json)).await?;
|
||||
let mut reader = S9pkReader::open(&path, false).await?;
|
||||
let manifest = reader.manifest().await?;
|
||||
let icon = reader.icon().await?.to_vec().await?;
|
||||
let icon_str = format!(
|
||||
"data:image/{};base64,{}",
|
||||
manifest.assets.icon_type(),
|
||||
base64::encode(&icon)
|
||||
);
|
||||
|
||||
// rpc call remote sideload
|
||||
tracing::debug!("calling package.sideload");
|
||||
let guid = rpc_toolkit::command_helpers::call_remote(
|
||||
ctx.clone(),
|
||||
"package.sideload",
|
||||
serde_json::json!({ "manifest": manifest }),
|
||||
serde_json::json!({ "manifest": manifest, "icon": icon_str }),
|
||||
PhantomData::<RequestGuid>,
|
||||
)
|
||||
.await?
|
||||
@@ -495,8 +535,8 @@ async fn cli_install(
|
||||
let file = tokio::fs::File::open(path).await?;
|
||||
let content_length = file.metadata().await?.len();
|
||||
let body = Body::wrap_stream(tokio_util::io::ReaderStream::new(file));
|
||||
let client = reqwest::Client::new();
|
||||
let res = client
|
||||
let res = ctx
|
||||
.client
|
||||
.post(format!(
|
||||
"{}://{}/rest/rpc/{}",
|
||||
ctx.protocol(),
|
||||
@@ -562,8 +602,15 @@ pub async fn uninstall_dry(
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
let mut breakages = BTreeMap::new();
|
||||
break_all_dependents_transitive(&mut tx, &id, DependencyError::NotInstalled, &mut breakages)
|
||||
.await?;
|
||||
let receipts = BreakTransitiveReceipts::new(&mut tx).await?;
|
||||
break_all_dependents_transitive(
|
||||
&mut tx,
|
||||
&id,
|
||||
DependencyError::NotInstalled,
|
||||
&mut breakages,
|
||||
&receipts,
|
||||
)
|
||||
.await?;
|
||||
|
||||
tx.abort().await?;
|
||||
|
||||
@@ -678,6 +725,35 @@ pub async fn delete_recovered(
|
||||
})
|
||||
}
|
||||
|
||||
pub struct DownloadInstallReceipts {
|
||||
package_receipts: crate::db::package::PackageReceipts,
|
||||
manifest_receipts: crate::db::package::ManifestReceipts,
|
||||
}
|
||||
|
||||
impl DownloadInstallReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle, id: &PackageId) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks, id);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<patch_db::LockTargetId>,
|
||||
id: &PackageId,
|
||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
||||
let package_receipts = crate::db::package::PackageReceipts::setup(locks);
|
||||
let manifest_receipts = crate::db::package::ManifestReceipts::setup(locks, id);
|
||||
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
package_receipts: package_receipts(skeleton_key)?,
|
||||
manifest_receipts: manifest_receipts(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, temp_manifest, s9pk))]
|
||||
pub async fn download_install_s9pk(
|
||||
ctx: &RpcContext,
|
||||
@@ -692,14 +768,14 @@ pub async fn download_install_s9pk(
|
||||
if let Err(e) = async {
|
||||
let mut db_handle = ctx.db.handle();
|
||||
let mut tx = db_handle.begin().await?;
|
||||
let receipts = DownloadInstallReceipts::new(&mut tx, &pkg_id).await?;
|
||||
// Build set of existing manifests
|
||||
let mut manifests = Vec::new();
|
||||
for pkg in crate::db::package::get_packages(&mut tx).await? {
|
||||
match crate::db::package::get_manifest(&mut tx, &pkg).await? {
|
||||
Some(m) => {
|
||||
manifests.push(m);
|
||||
}
|
||||
None => {}
|
||||
for pkg in crate::db::package::get_packages(&mut tx, &receipts.package_receipts).await? {
|
||||
if let Some(m) =
|
||||
crate::db::package::get_manifest(&mut tx, &pkg, &receipts.manifest_receipts).await?
|
||||
{
|
||||
manifests.push(m);
|
||||
}
|
||||
}
|
||||
// Build map of current port -> ssl mappings
|
||||
@@ -732,6 +808,7 @@ pub async fn download_install_s9pk(
|
||||
}
|
||||
}
|
||||
}
|
||||
drop(receipts);
|
||||
tx.save().await?;
|
||||
drop(db_handle);
|
||||
|
||||
@@ -792,8 +869,9 @@ pub async fn download_install_s9pk(
|
||||
{
|
||||
let mut handle = ctx.db.handle();
|
||||
let mut tx = handle.begin().await?;
|
||||
let receipts = cleanup::CleanupFailedReceipts::new(&mut tx).await?;
|
||||
|
||||
if let Err(e) = cleanup_failed(&ctx, &mut tx, pkg_id).await {
|
||||
if let Err(e) = cleanup_failed(&ctx, &mut tx, pkg_id, &receipts).await {
|
||||
tracing::error!("Failed to clean up {}@{}: {}", pkg_id, version, e);
|
||||
tracing::debug!("{:?}", e);
|
||||
} else {
|
||||
@@ -805,6 +883,39 @@ pub async fn download_install_s9pk(
|
||||
}
|
||||
}
|
||||
|
||||
pub struct InstallS9Receipts {
|
||||
config: ConfigReceipts,
|
||||
|
||||
recovered_packages: LockReceipt<BTreeMap<PackageId, RecoveredPackageInfo>, ()>,
|
||||
}
|
||||
|
||||
impl InstallS9Receipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(
|
||||
locks: &mut Vec<patch_db::LockTargetId>,
|
||||
) -> impl FnOnce(&patch_db::Verifier) -> Result<Self, Error> {
|
||||
let config = ConfigReceipts::setup(locks);
|
||||
|
||||
let recovered_packages = crate::db::DatabaseModel::new()
|
||||
.recovered_packages()
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
config: config(skeleton_key)?,
|
||||
recovered_packages: recovered_packages.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, rdr))]
|
||||
pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
ctx: &RpcContext,
|
||||
@@ -1046,6 +1157,18 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
let mut tar = tokio_tar::Archive::new(rdr.assets().await?);
|
||||
tar.unpack(asset_dir).await?;
|
||||
|
||||
let script_dir = script_dir(&ctx.datadir, pkg_id, version);
|
||||
if tokio::fs::metadata(&script_dir).await.is_err() {
|
||||
tokio::fs::create_dir_all(&script_dir).await?;
|
||||
}
|
||||
if let Some(mut hdl) = rdr.scripts().await? {
|
||||
tokio::io::copy(
|
||||
&mut hdl,
|
||||
&mut File::create(dbg!(script_dir.join("embassy.js"))).await?,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
@@ -1082,18 +1205,20 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
tracing::info!("Install {}@{}: Created manager", pkg_id, version);
|
||||
|
||||
let static_files = StaticFiles::local(pkg_id, version, manifest.assets.icon_type());
|
||||
let current_dependencies: BTreeMap<_, _> = manifest
|
||||
.dependencies
|
||||
.0
|
||||
.iter()
|
||||
.filter_map(|(id, info)| {
|
||||
if info.requirement.required() {
|
||||
Some((id.clone(), CurrentDependencyInfo::default()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
let current_dependencies: CurrentDependencies = CurrentDependencies(
|
||||
manifest
|
||||
.dependencies
|
||||
.0
|
||||
.iter()
|
||||
.filter_map(|(id, info)| {
|
||||
if info.requirement.required() {
|
||||
Some((id.clone(), CurrentDependencyInfo::default()))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect(),
|
||||
);
|
||||
let current_dependents = {
|
||||
let mut deps = BTreeMap::new();
|
||||
for package in crate::db::DatabaseModel::new()
|
||||
@@ -1139,7 +1264,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
deps.insert(package, dep);
|
||||
}
|
||||
}
|
||||
deps
|
||||
CurrentDependents(deps)
|
||||
};
|
||||
let mut pde = model
|
||||
.clone()
|
||||
@@ -1183,6 +1308,8 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
},
|
||||
);
|
||||
pde.save(&mut tx).await?;
|
||||
let receipts = InstallS9Receipts::new(&mut tx).await?;
|
||||
// UpdateDependencyReceipts
|
||||
let mut dep_errs = model
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
@@ -1193,7 +1320,14 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
.dependency_errors()
|
||||
.get_mut(&mut tx)
|
||||
.await?;
|
||||
*dep_errs = DependencyErrors::init(ctx, &mut tx, &manifest, ¤t_dependencies).await?;
|
||||
*dep_errs = DependencyErrors::init(
|
||||
ctx,
|
||||
&mut tx,
|
||||
&manifest,
|
||||
¤t_dependencies,
|
||||
&receipts.config.try_heal_receipts,
|
||||
)
|
||||
.await?;
|
||||
dep_errs.save(&mut tx).await?;
|
||||
|
||||
if let PackageDataEntry::Updating {
|
||||
@@ -1244,8 +1378,26 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
false,
|
||||
&mut BTreeMap::new(),
|
||||
&mut BTreeMap::new(),
|
||||
&receipts.config,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
remove_from_current_dependents_lists(
|
||||
&mut tx,
|
||||
pkg_id,
|
||||
&prev.current_dependencies,
|
||||
&receipts.config.current_dependents,
|
||||
)
|
||||
.await?; // remove previous
|
||||
add_dependent_to_current_dependents_lists(
|
||||
&mut tx,
|
||||
pkg_id,
|
||||
¤t_dependencies,
|
||||
&receipts.config.current_dependents,
|
||||
)
|
||||
.await?; // add new
|
||||
}
|
||||
if configured || manifest.config.is_none() {
|
||||
let mut main_status = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(pkg_id)
|
||||
@@ -1261,17 +1413,16 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
*main_status = prev.status.main;
|
||||
main_status.save(&mut tx).await?;
|
||||
}
|
||||
remove_from_current_dependents_lists(&mut tx, pkg_id, prev.current_dependencies.keys())
|
||||
.await?; // remove previous
|
||||
add_dependent_to_current_dependents_lists(&mut tx, pkg_id, ¤t_dependencies).await?; // add new
|
||||
update_dependency_errors_of_dependents(
|
||||
ctx,
|
||||
&mut tx,
|
||||
pkg_id,
|
||||
current_dependents
|
||||
.keys()
|
||||
.chain(prev.current_dependents.keys())
|
||||
.collect::<BTreeSet<_>>(),
|
||||
&CurrentDependents({
|
||||
let mut current_dependents = current_dependents.0.clone();
|
||||
current_dependents.append(&mut prev.current_dependents.0.clone());
|
||||
current_dependents
|
||||
}),
|
||||
&receipts.config.update_dependency_receipts,
|
||||
)
|
||||
.await?;
|
||||
if &prev.manifest.version != version {
|
||||
@@ -1290,39 +1441,84 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
&manifest.volumes,
|
||||
)
|
||||
.await?;
|
||||
add_dependent_to_current_dependents_lists(&mut tx, pkg_id, ¤t_dependencies).await?;
|
||||
update_dependency_errors_of_dependents(ctx, &mut tx, pkg_id, current_dependents.keys())
|
||||
.await?;
|
||||
add_dependent_to_current_dependents_lists(
|
||||
&mut tx,
|
||||
pkg_id,
|
||||
¤t_dependencies,
|
||||
&receipts.config.current_dependents,
|
||||
)
|
||||
.await?;
|
||||
update_dependency_errors_of_dependents(
|
||||
ctx,
|
||||
&mut tx,
|
||||
pkg_id,
|
||||
¤t_dependents,
|
||||
&receipts.config.update_dependency_receipts,
|
||||
)
|
||||
.await?;
|
||||
} else if let Some(recovered) = {
|
||||
// solve taxonomy escalation
|
||||
crate::db::DatabaseModel::new()
|
||||
.recovered_packages()
|
||||
.lock(&mut tx, LockType::Write)
|
||||
.await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.recovered_packages()
|
||||
.idx_model(pkg_id)
|
||||
.get(&mut tx, true)
|
||||
receipts
|
||||
.recovered_packages
|
||||
.get(&mut tx)
|
||||
.await?
|
||||
.into_owned()
|
||||
.remove(pkg_id)
|
||||
} {
|
||||
handle_recovered_package(recovered, manifest, ctx, pkg_id, version, &mut tx).await?;
|
||||
add_dependent_to_current_dependents_lists(&mut tx, pkg_id, ¤t_dependencies).await?;
|
||||
update_dependency_errors_of_dependents(ctx, &mut tx, pkg_id, current_dependents.keys())
|
||||
.await?;
|
||||
handle_recovered_package(
|
||||
recovered,
|
||||
manifest,
|
||||
ctx,
|
||||
pkg_id,
|
||||
version,
|
||||
&mut tx,
|
||||
&receipts.config,
|
||||
)
|
||||
.await?;
|
||||
add_dependent_to_current_dependents_lists(
|
||||
&mut tx,
|
||||
pkg_id,
|
||||
¤t_dependencies,
|
||||
&receipts.config.current_dependents,
|
||||
)
|
||||
.await?;
|
||||
update_dependency_errors_of_dependents(
|
||||
ctx,
|
||||
&mut tx,
|
||||
pkg_id,
|
||||
¤t_dependents,
|
||||
&receipts.config.update_dependency_receipts,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
add_dependent_to_current_dependents_lists(&mut tx, pkg_id, ¤t_dependencies).await?;
|
||||
update_dependency_errors_of_dependents(ctx, &mut tx, pkg_id, current_dependents.keys())
|
||||
.await?;
|
||||
add_dependent_to_current_dependents_lists(
|
||||
&mut tx,
|
||||
pkg_id,
|
||||
¤t_dependencies,
|
||||
&receipts.config.current_dependents,
|
||||
)
|
||||
.await?;
|
||||
update_dependency_errors_of_dependents(
|
||||
ctx,
|
||||
&mut tx,
|
||||
pkg_id,
|
||||
¤t_dependents,
|
||||
&receipts.config.update_dependency_receipts,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
crate::db::DatabaseModel::new()
|
||||
.recovered_packages()
|
||||
.remove(&mut tx, pkg_id)
|
||||
let recovered_packages = {
|
||||
let mut r = receipts.recovered_packages.get(&mut tx).await?;
|
||||
r.remove(pkg_id);
|
||||
r
|
||||
};
|
||||
receipts
|
||||
.recovered_packages
|
||||
.set(&mut tx, recovered_packages)
|
||||
.await?;
|
||||
|
||||
if let Some(installed) = pde.installed() {
|
||||
reconfigure_dependents_with_live_pointers(ctx, &mut tx, installed).await?;
|
||||
reconfigure_dependents_with_live_pointers(ctx, &mut tx, &receipts.config, installed)
|
||||
.await?;
|
||||
}
|
||||
|
||||
sql_tx.commit().await?;
|
||||
@@ -1333,7 +1529,7 @@ pub async fn install_s9pk<R: AsyncRead + AsyncSeek + Unpin>(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, tx))]
|
||||
#[instrument(skip(ctx, tx, receipts))]
|
||||
async fn handle_recovered_package(
|
||||
recovered: RecoveredPackageInfo,
|
||||
manifest: Manifest,
|
||||
@@ -1341,6 +1537,7 @@ async fn handle_recovered_package(
|
||||
pkg_id: &PackageId,
|
||||
version: &Version,
|
||||
tx: &mut patch_db::Transaction<&mut patch_db::PatchDbHandle>,
|
||||
receipts: &ConfigReceipts,
|
||||
) -> Result<(), Error> {
|
||||
let configured = if let Some(migration) =
|
||||
manifest
|
||||
@@ -1361,6 +1558,7 @@ async fn handle_recovered_package(
|
||||
false,
|
||||
&mut BTreeMap::new(),
|
||||
&mut BTreeMap::new(),
|
||||
&receipts,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
@@ -44,10 +44,14 @@ impl InstallProgress {
|
||||
mut db: Db,
|
||||
) -> Result<(), Error> {
|
||||
while !self.download_complete.load(Ordering::SeqCst) {
|
||||
model.put(&mut db, &self).await?;
|
||||
let mut tx = db.begin().await?;
|
||||
model.put(&mut tx, &self).await?;
|
||||
tx.save().await?;
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
model.put(&mut db, &self).await?;
|
||||
let mut tx = db.begin().await?;
|
||||
model.put(&mut tx, &self).await?;
|
||||
tx.save().await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn track_download_during<
|
||||
@@ -74,10 +78,14 @@ impl InstallProgress {
|
||||
complete: Arc<AtomicBool>,
|
||||
) -> Result<(), Error> {
|
||||
while !complete.load(Ordering::SeqCst) {
|
||||
model.put(&mut db, &self).await?;
|
||||
let mut tx = db.begin().await?;
|
||||
model.put(&mut tx, &self).await?;
|
||||
tx.save().await?;
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
model.put(&mut db, &self).await?;
|
||||
let mut tx = db.begin().await?;
|
||||
model.put(&mut tx, &self).await?;
|
||||
tx.save().await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn track_read_during<
|
||||
|
||||
@@ -1,20 +1,66 @@
|
||||
use std::collections::BTreeMap;
|
||||
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use patch_db::{DbHandle, LockReceipt, LockTargetId, LockType, Verifier};
|
||||
use rpc_toolkit::command;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::config::not_found;
|
||||
use crate::context::RpcContext;
|
||||
use crate::dependencies::{break_transitive, BreakageRes, DependencyError};
|
||||
use crate::db::model::CurrentDependents;
|
||||
use crate::dependencies::{
|
||||
break_transitive, BreakTransitiveReceipts, BreakageRes, DependencyError,
|
||||
};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::display_serializable;
|
||||
use crate::util::Version;
|
||||
use crate::Error;
|
||||
|
||||
pub struct UpdateReceipts {
|
||||
break_receipts: BreakTransitiveReceipts,
|
||||
current_dependents: LockReceipt<CurrentDependents, String>,
|
||||
dependency: LockReceipt<crate::dependencies::DepInfo, (String, String)>,
|
||||
}
|
||||
|
||||
impl UpdateReceipts {
|
||||
pub async fn new<'a>(db: &'a mut impl DbHandle) -> Result<Self, Error> {
|
||||
let mut locks = Vec::new();
|
||||
|
||||
let setup = Self::setup(&mut locks);
|
||||
Ok(setup(&db.lock_all(locks).await?)?)
|
||||
}
|
||||
|
||||
pub fn setup(locks: &mut Vec<LockTargetId>) -> impl FnOnce(&Verifier) -> Result<Self, Error> {
|
||||
let break_receipts = BreakTransitiveReceipts::setup(locks);
|
||||
let current_dependents = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.current_dependents())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
let dependency = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.star()
|
||||
.installed()
|
||||
.map(|x| x.manifest().dependencies().star())
|
||||
.make_locker(LockType::Write)
|
||||
.add_to_keys(locks);
|
||||
move |skeleton_key| {
|
||||
Ok(Self {
|
||||
break_receipts: break_receipts(skeleton_key)?,
|
||||
current_dependents: current_dependents.verify(skeleton_key)?,
|
||||
dependency: dependency.verify(skeleton_key)?,
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[command(subcommands(dry))]
|
||||
pub async fn update() -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx))]
|
||||
#[command(display(display_serializable))]
|
||||
pub async fn dry(
|
||||
#[context] ctx: RpcContext,
|
||||
@@ -24,49 +70,34 @@ pub async fn dry(
|
||||
let mut db = ctx.db.handle();
|
||||
let mut tx = db.begin().await?;
|
||||
let mut breakages = BTreeMap::new();
|
||||
crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.lock(&mut tx, LockType::Read)
|
||||
.await?;
|
||||
for dependent in crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&id)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.current_dependents()
|
||||
.keys(&mut tx, true)
|
||||
let receipts = UpdateReceipts::new(&mut tx).await?;
|
||||
|
||||
for dependent in receipts
|
||||
.current_dependents
|
||||
.get(&mut tx, &id)
|
||||
.await?
|
||||
.ok_or_else(not_found)?
|
||||
.0
|
||||
.keys()
|
||||
.into_iter()
|
||||
.filter(|dependent| &id != dependent)
|
||||
.filter(|dependent| &&id != dependent)
|
||||
{
|
||||
let version_req = crate::db::DatabaseModel::new()
|
||||
.package_data()
|
||||
.idx_model(&dependent)
|
||||
.and_then(|m| m.installed())
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.manifest()
|
||||
.dependencies()
|
||||
.idx_model(&id)
|
||||
.expect(&mut tx)
|
||||
.await?
|
||||
.get(&mut tx, true)
|
||||
.await?
|
||||
.into_owned()
|
||||
.version;
|
||||
if !version.satisfies(&version_req) {
|
||||
break_transitive(
|
||||
&mut tx,
|
||||
&dependent,
|
||||
&id,
|
||||
DependencyError::IncorrectVersion {
|
||||
expected: version_req,
|
||||
received: version.clone(),
|
||||
},
|
||||
&mut breakages,
|
||||
)
|
||||
.await?;
|
||||
if let Some(dep_info) = receipts.dependency.get(&mut tx, (&dependent, &id)).await? {
|
||||
let version_req = dep_info.version;
|
||||
if !version.satisfies(&version_req) {
|
||||
break_transitive(
|
||||
&mut tx,
|
||||
&dependent,
|
||||
&id,
|
||||
DependencyError::IncorrectVersion {
|
||||
expected: version_req,
|
||||
received: version.clone(),
|
||||
},
|
||||
&mut breakages,
|
||||
&receipts.break_receipts,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
}
|
||||
tx.abort().await?;
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
pub const CONFIG_PATH: &str = "/etc/embassy/config.yaml";
|
||||
#[cfg(not(feature = "beta"))]
|
||||
pub const DEFAULT_MARKETPLACE: &str = "https://marketplace.start9.com";
|
||||
#[cfg(feature = "beta")]
|
||||
pub const DEFAULT_MARKETPLACE: &str = "https://beta-registry-0-3.start9labs.com";
|
||||
pub const BUFFER_SIZE: usize = 1024;
|
||||
pub const HOST_IP: [u8; 4] = [172, 18, 0, 1];
|
||||
|
||||
@@ -31,6 +27,7 @@ pub mod middleware;
|
||||
pub mod migration;
|
||||
pub mod net;
|
||||
pub mod notifications;
|
||||
pub mod procedure;
|
||||
pub mod properties;
|
||||
pub mod s9pk;
|
||||
pub mod setup;
|
||||
@@ -99,6 +96,7 @@ pub fn server() -> Result<(), RpcError> {
|
||||
config::config,
|
||||
control::start,
|
||||
control::stop,
|
||||
control::restart,
|
||||
logs::logs,
|
||||
properties::properties,
|
||||
dependencies::dependency,
|
||||
|
||||
@@ -12,8 +12,8 @@ use tokio::process::Command;
|
||||
use tokio_stream::wrappers::LinesStream;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::action::docker::DockerAction;
|
||||
use crate::error::ResultExt;
|
||||
use crate::procedure::docker::DockerProcedure;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::Reversible;
|
||||
use crate::Error;
|
||||
@@ -158,7 +158,7 @@ pub async fn fetch_logs(
|
||||
LogSource::Container(id) => {
|
||||
cmd.arg(format!(
|
||||
"CONTAINER_NAME={}",
|
||||
DockerAction::container_name(&id, None)
|
||||
DockerProcedure::container_name(&id, None)
|
||||
));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -5,7 +5,7 @@ use patch_db::{DbHandle, LockType};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::dependencies::{break_transitive, DependencyError};
|
||||
use crate::dependencies::{break_transitive, heal_transitive, DependencyError};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::status::health_check::{HealthCheckId, HealthCheckResult};
|
||||
use crate::status::MainStatus;
|
||||
@@ -98,7 +98,11 @@ pub async fn check<Db: DbHandle>(
|
||||
|
||||
checkpoint.save().await?;
|
||||
|
||||
for (dependent, info) in &*current_dependents {
|
||||
tracing::debug!("Checking health of {}", id);
|
||||
let receipts = crate::dependencies::BreakTransitiveReceipts::new(&mut tx).await?;
|
||||
tracing::debug!("Got receipts {}", id);
|
||||
|
||||
for (dependent, info) in (*current_dependents).0.iter() {
|
||||
let failures: BTreeMap<HealthCheckId, HealthCheckResult> = health_results
|
||||
.iter()
|
||||
.filter(|(_, hc_res)| !matches!(hc_res, HealthCheckResult::Success { .. }))
|
||||
@@ -113,8 +117,11 @@ pub async fn check<Db: DbHandle>(
|
||||
id,
|
||||
DependencyError::HealthChecksFailed { failures },
|
||||
&mut BTreeMap::new(),
|
||||
&receipts,
|
||||
)
|
||||
.await?;
|
||||
} else {
|
||||
heal_transitive(ctx, &mut tx, &dependent, id, &receipts.dependency_receipt).await?;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -19,13 +19,13 @@ use tokio::sync::{Notify, RwLock};
|
||||
use torut::onion::TorSecretKeyV3;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::action::docker::DockerAction;
|
||||
use crate::action::{ActionImplementation, NoOutput};
|
||||
use crate::context::RpcContext;
|
||||
use crate::manager::sync::synchronizer;
|
||||
use crate::net::interface::InterfaceId;
|
||||
use crate::net::GeneratedCertificateMountPoint;
|
||||
use crate::notifications::NotificationLevel;
|
||||
use crate::procedure::docker::DockerProcedure;
|
||||
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::status::MainStatus;
|
||||
use crate::util::{Container, NonDetachingJoinHandle, Version};
|
||||
@@ -293,6 +293,7 @@ async fn run_main(
|
||||
.net_controller
|
||||
.remove(
|
||||
&state.manifest.id,
|
||||
ip,
|
||||
state.manifest.interfaces.0.keys().cloned(),
|
||||
)
|
||||
.await?;
|
||||
@@ -312,7 +313,7 @@ async fn start_up_image(
|
||||
&rt_state.ctx,
|
||||
&rt_state.manifest.id,
|
||||
&rt_state.manifest.version,
|
||||
None,
|
||||
ProcedureName::Main,
|
||||
&rt_state.manifest.volumes,
|
||||
None,
|
||||
false,
|
||||
@@ -333,7 +334,7 @@ impl Manager {
|
||||
ctx,
|
||||
status: AtomicUsize::new(Status::Stopped as usize),
|
||||
on_stop,
|
||||
container_name: DockerAction::container_name(&manifest.id, None),
|
||||
container_name: DockerProcedure::container_name(&manifest.id, None),
|
||||
manifest,
|
||||
tor_keys,
|
||||
synchronized: Notify::new(),
|
||||
@@ -391,6 +392,11 @@ impl Manager {
|
||||
.commit_health_check_results
|
||||
.store(false, Ordering::SeqCst);
|
||||
let _ = self.shared.on_stop.send(OnStop::Exit);
|
||||
let action = match &self.shared.manifest.main {
|
||||
PackageProcedure::Docker(a) => a,
|
||||
#[cfg(feature = "js_engine")]
|
||||
PackageProcedure::Script(_) => return Ok(()),
|
||||
};
|
||||
match self
|
||||
.shared
|
||||
.ctx
|
||||
@@ -398,13 +404,11 @@ impl Manager {
|
||||
.stop_container(
|
||||
&self.shared.container_name,
|
||||
Some(StopContainerOptions {
|
||||
t: match &self.shared.manifest.main {
|
||||
ActionImplementation::Docker(a) => a,
|
||||
}
|
||||
.sigterm_timeout
|
||||
.map(|a| *a)
|
||||
.unwrap_or(Duration::from_secs(30))
|
||||
.as_secs_f64() as i64,
|
||||
t: action
|
||||
.sigterm_timeout
|
||||
.map(|a| *a)
|
||||
.unwrap_or(Duration::from_secs(30))
|
||||
.as_secs_f64() as i64,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
@@ -542,19 +546,22 @@ async fn stop(shared: &ManagerSharedState) -> Result<(), Error> {
|
||||
) {
|
||||
resume(shared).await?;
|
||||
}
|
||||
let action = match &shared.manifest.main {
|
||||
PackageProcedure::Docker(a) => a,
|
||||
#[cfg(feature = "js_engine")]
|
||||
PackageProcedure::Script(_) => return Ok(()),
|
||||
};
|
||||
match shared
|
||||
.ctx
|
||||
.docker
|
||||
.stop_container(
|
||||
&shared.container_name,
|
||||
Some(StopContainerOptions {
|
||||
t: match &shared.manifest.main {
|
||||
ActionImplementation::Docker(a) => a,
|
||||
}
|
||||
.sigterm_timeout
|
||||
.map(|a| *a)
|
||||
.unwrap_or(Duration::from_secs(30))
|
||||
.as_secs_f64() as i64,
|
||||
t: action
|
||||
.sigterm_timeout
|
||||
.map(|a| *a)
|
||||
.unwrap_or(Duration::from_secs(30))
|
||||
.as_secs_f64() as i64,
|
||||
}),
|
||||
)
|
||||
.await
|
||||
|
||||
@@ -31,7 +31,10 @@ async fn synchronize_once(shared: &ManagerSharedState) -> Result<Status, Error>
|
||||
MainStatus::Stopping => {
|
||||
*status = MainStatus::Stopped;
|
||||
}
|
||||
MainStatus::Starting => {
|
||||
MainStatus::Restarting => {
|
||||
*status = MainStatus::Starting { restarting: true };
|
||||
}
|
||||
MainStatus::Starting { .. } => {
|
||||
start(shared).await?;
|
||||
}
|
||||
MainStatus::Running { started, .. } => {
|
||||
@@ -41,19 +44,19 @@ async fn synchronize_once(shared: &ManagerSharedState) -> Result<Status, Error>
|
||||
MainStatus::BackingUp { .. } => (),
|
||||
},
|
||||
Status::Starting => match *status {
|
||||
MainStatus::Stopped | MainStatus::Stopping => {
|
||||
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restarting => {
|
||||
stop(shared).await?;
|
||||
}
|
||||
MainStatus::Starting | MainStatus::Running { .. } => (),
|
||||
MainStatus::Starting { .. } | MainStatus::Running { .. } => (),
|
||||
MainStatus::BackingUp { .. } => {
|
||||
pause(shared).await?;
|
||||
}
|
||||
},
|
||||
Status::Running => match *status {
|
||||
MainStatus::Stopped | MainStatus::Stopping => {
|
||||
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restarting => {
|
||||
stop(shared).await?;
|
||||
}
|
||||
MainStatus::Starting => {
|
||||
MainStatus::Starting { .. } => {
|
||||
*status = MainStatus::Running {
|
||||
started: Utc::now(),
|
||||
health: BTreeMap::new(),
|
||||
@@ -65,10 +68,10 @@ async fn synchronize_once(shared: &ManagerSharedState) -> Result<Status, Error>
|
||||
}
|
||||
},
|
||||
Status::Paused => match *status {
|
||||
MainStatus::Stopped | MainStatus::Stopping => {
|
||||
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restarting => {
|
||||
stop(shared).await?;
|
||||
}
|
||||
MainStatus::Starting | MainStatus::Running { .. } => {
|
||||
MainStatus::Starting { .. } | MainStatus::Running { .. } => {
|
||||
resume(shared).await?;
|
||||
}
|
||||
MainStatus::BackingUp { .. } => (),
|
||||
|
||||
@@ -34,33 +34,21 @@ impl HasLoggedOutSessions {
|
||||
logged_out_sessions: impl IntoIterator<Item = impl AsLogoutSessionId>,
|
||||
ctx: &RpcContext,
|
||||
) -> Result<Self, Error> {
|
||||
let sessions = logged_out_sessions
|
||||
.into_iter()
|
||||
.by_ref()
|
||||
.map(|x| x.as_logout_session_id())
|
||||
.collect::<Vec<_>>();
|
||||
let mut open_authed_websockets = ctx.open_authed_websockets.lock().await;
|
||||
let mut sqlx_conn = ctx.secret_store.acquire().await?;
|
||||
for session in &sessions {
|
||||
for session in logged_out_sessions {
|
||||
let session = session.as_logout_session_id();
|
||||
sqlx::query!(
|
||||
"UPDATE session SET logged_out = CURRENT_TIMESTAMP WHERE id = ?",
|
||||
session
|
||||
)
|
||||
.execute(&mut sqlx_conn)
|
||||
.await?;
|
||||
}
|
||||
drop(sqlx_conn);
|
||||
for session in sessions {
|
||||
for socket in ctx
|
||||
.open_authed_websockets
|
||||
.lock()
|
||||
.await
|
||||
.remove(&session)
|
||||
.unwrap_or_default()
|
||||
{
|
||||
for socket in open_authed_websockets.remove(&session).unwrap_or_default() {
|
||||
let _ = socket.send(());
|
||||
}
|
||||
}
|
||||
Ok(Self(()))
|
||||
Ok(HasLoggedOutSessions(()))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -8,9 +8,10 @@ use patch_db::HasModel;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::action::ActionImplementation;
|
||||
use crate::context::RpcContext;
|
||||
use crate::id::ImageId;
|
||||
use crate::procedure::PackageProcedure;
|
||||
use crate::procedure::ProcedureName;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::Version;
|
||||
use crate::volume::Volumes;
|
||||
@@ -19,8 +20,8 @@ use crate::{Error, ResultExt};
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Migrations {
|
||||
pub from: IndexMap<VersionRange, ActionImplementation>,
|
||||
pub to: IndexMap<VersionRange, ActionImplementation>,
|
||||
pub from: IndexMap<VersionRange, PackageProcedure>,
|
||||
pub to: IndexMap<VersionRange, PackageProcedure>,
|
||||
}
|
||||
impl Migrations {
|
||||
#[instrument]
|
||||
@@ -64,7 +65,7 @@ impl Migrations {
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Some("Migration"), // Migrations cannot be executed concurrently
|
||||
ProcedureName::Migration, // Migrations cannot be executed concurrently
|
||||
volumes,
|
||||
Some(version),
|
||||
false,
|
||||
@@ -99,7 +100,7 @@ impl Migrations {
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Some("Migration"),
|
||||
ProcedureName::Migration,
|
||||
volumes,
|
||||
Some(version),
|
||||
false,
|
||||
|
||||
173
backend/src/net/dns.rs
Normal file
173
backend/src/net/dns.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use futures::TryFutureExt;
|
||||
use helpers::NonDetachingJoinHandle;
|
||||
use models::PackageId;
|
||||
use tokio::net::{TcpListener, UdpSocket};
|
||||
use tokio::sync::RwLock;
|
||||
use trust_dns_server::authority::MessageResponseBuilder;
|
||||
use trust_dns_server::client::op::{Header, ResponseCode};
|
||||
use trust_dns_server::client::rr::{Name, Record, RecordType};
|
||||
use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
|
||||
use trust_dns_server::ServerFuture;
|
||||
|
||||
#[cfg(feature = "avahi")]
|
||||
use crate::net::mdns::resolve_mdns;
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
pub struct DnsController {
|
||||
services: Arc<RwLock<BTreeMap<PackageId, BTreeSet<Ipv4Addr>>>>,
|
||||
#[allow(dead_code)]
|
||||
dns_server: NonDetachingJoinHandle<Result<(), Error>>,
|
||||
}
|
||||
|
||||
struct Resolver {
|
||||
services: Arc<RwLock<BTreeMap<PackageId, BTreeSet<Ipv4Addr>>>>,
|
||||
}
|
||||
impl Resolver {
|
||||
async fn resolve(&self, name: &Name) -> Option<Vec<Ipv4Addr>> {
|
||||
match name.iter().next_back() {
|
||||
#[cfg(feature = "avahi")]
|
||||
Some(b"local") => match resolve_mdns(&format!(
|
||||
"{}.local",
|
||||
name.iter()
|
||||
.rev()
|
||||
.skip(1)
|
||||
.next()
|
||||
.and_then(|v| std::str::from_utf8(v).ok())
|
||||
.unwrap_or_default()
|
||||
))
|
||||
.await
|
||||
{
|
||||
Ok(ip) => Some(vec![ip]),
|
||||
Err(e) => {
|
||||
tracing::error!("{}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
None
|
||||
}
|
||||
},
|
||||
Some(b"embassy") => {
|
||||
if let Some(pkg) = name.iter().rev().skip(1).next() {
|
||||
if let Some(ip) = self
|
||||
.services
|
||||
.read()
|
||||
.await
|
||||
.get(std::str::from_utf8(pkg).unwrap_or_default())
|
||||
{
|
||||
Some(ip.iter().copied().collect())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl RequestHandler for Resolver {
|
||||
async fn handle_request<R: ResponseHandler>(
|
||||
&self,
|
||||
request: &Request,
|
||||
mut response_handle: R,
|
||||
) -> ResponseInfo {
|
||||
let query = request.request_info().query;
|
||||
if let Some(ip) = self.resolve(query.name().borrow()).await {
|
||||
if query.query_type() != RecordType::A {
|
||||
tracing::warn!("Non A-Record requested for {}", query.name());
|
||||
}
|
||||
response_handle
|
||||
.send_response(
|
||||
MessageResponseBuilder::from_message_request(&*request).build(
|
||||
Header::response_from_request(request.header()),
|
||||
&ip.into_iter()
|
||||
.map(|ip| {
|
||||
Record::from_rdata(
|
||||
request.request_info().query.name().to_owned().into(),
|
||||
0,
|
||||
trust_dns_server::client::rr::RData::A(ip),
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
[],
|
||||
[],
|
||||
[],
|
||||
),
|
||||
)
|
||||
.await
|
||||
} else {
|
||||
let mut res = Header::response_from_request(request.header());
|
||||
res.set_response_code(ResponseCode::NXDomain);
|
||||
response_handle
|
||||
.send_response(
|
||||
MessageResponseBuilder::from_message_request(&*request).build(
|
||||
res.into(),
|
||||
[],
|
||||
[],
|
||||
[],
|
||||
[],
|
||||
),
|
||||
)
|
||||
.await
|
||||
}
|
||||
.unwrap_or_else(|e| {
|
||||
tracing::error!("{}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
let mut res = Header::response_from_request(request.header());
|
||||
res.set_response_code(ResponseCode::ServFail);
|
||||
res.into()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl DnsController {
|
||||
pub async fn init(bind: &[SocketAddr]) -> Result<Self, Error> {
|
||||
let services = Arc::new(RwLock::new(BTreeMap::new()));
|
||||
|
||||
let mut server = ServerFuture::new(Resolver {
|
||||
services: services.clone(),
|
||||
});
|
||||
server.register_listener(
|
||||
TcpListener::bind(bind)
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?,
|
||||
Duration::from_secs(30),
|
||||
);
|
||||
server.register_socket(UdpSocket::bind(bind).await.with_kind(ErrorKind::Network)?);
|
||||
|
||||
let dns_server = tokio::spawn(
|
||||
server
|
||||
.block_until_done()
|
||||
.map_err(|e| Error::new(e, ErrorKind::Network)),
|
||||
)
|
||||
.into();
|
||||
|
||||
Ok(Self {
|
||||
services,
|
||||
dns_server,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn add(&self, pkg_id: &PackageId, ip: Ipv4Addr) {
|
||||
let mut writable = self.services.write().await;
|
||||
let mut ips = writable.remove(pkg_id).unwrap_or_default();
|
||||
ips.insert(ip);
|
||||
writable.insert(pkg_id.clone(), ips);
|
||||
}
|
||||
|
||||
pub async fn remove(&self, pkg_id: &PackageId, ip: Ipv4Addr) {
|
||||
let mut writable = self.services.write().await;
|
||||
let mut ips = writable.remove(pkg_id).unwrap_or_default();
|
||||
ips.remove(&ip);
|
||||
if !ips.is_empty() {
|
||||
writable.insert(pkg_id.clone(), ips);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::path::Path;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::TryStreamExt;
|
||||
@@ -16,6 +15,8 @@ use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::Port;
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
pub use models::InterfaceId;
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Interfaces(pub BTreeMap<InterfaceId, Interface>); // TODO
|
||||
@@ -113,46 +114,6 @@ impl Interfaces {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
|
||||
pub struct InterfaceId<S: AsRef<str> = String>(Id<S>);
|
||||
impl<S: AsRef<str>> From<Id<S>> for InterfaceId<S> {
|
||||
fn from(id: Id<S>) -> Self {
|
||||
Self(id)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::fmt::Display for InterfaceId<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", &self.0)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::ops::Deref for InterfaceId<S> {
|
||||
type Target = S;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<str> for InterfaceId<S> {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<'de, S> Deserialize<'de> for InterfaceId<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
Id<S>: Deserialize<'de>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Ok(InterfaceId(Deserialize::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<Path> for InterfaceId<S> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_ref().as_ref()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::IpAddr;
|
||||
use std::net::Ipv4Addr;
|
||||
|
||||
use avahi_sys::{
|
||||
self, avahi_client_errno, avahi_entry_group_add_service, avahi_entry_group_commit,
|
||||
@@ -17,7 +17,7 @@ use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::Invoke;
|
||||
use crate::Error;
|
||||
|
||||
pub async fn resolve_mdns(hostname: &str) -> Result<IpAddr, Error> {
|
||||
pub async fn resolve_mdns(hostname: &str) -> Result<Ipv4Addr, Error> {
|
||||
Ok(String::from_utf8(
|
||||
Command::new("avahi-resolve-host-name")
|
||||
.arg("-4")
|
||||
@@ -94,12 +94,7 @@ impl MdnsControllerInner {
|
||||
std::ptr::null::<libc::c_char>(),
|
||||
);
|
||||
if res < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(res);
|
||||
tracing::error!(
|
||||
"Could not add service to Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
log_str_error("add service to Avahi entry group", res);
|
||||
panic!("Failed to load Avahi services");
|
||||
}
|
||||
tracing::info!(
|
||||
@@ -129,12 +124,7 @@ impl MdnsControllerInner {
|
||||
self.hostname.len(),
|
||||
);
|
||||
if res < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(res);
|
||||
tracing::error!(
|
||||
"Could not add CNAME record to Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
log_str_error("add CNAME record to Avahi entry group", res);
|
||||
panic!("Failed to load Avahi services");
|
||||
}
|
||||
tracing::info!("Published {:?}", lan_address_ptr);
|
||||
@@ -156,12 +146,7 @@ impl MdnsControllerInner {
|
||||
err_c,
|
||||
);
|
||||
if avahi_client == std::ptr::null_mut::<AvahiClient>() {
|
||||
let e_str = avahi_strerror(*box_err);
|
||||
tracing::error!(
|
||||
"Could not create avahi client: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
log_str_error("create Avahi client", *box_err);
|
||||
panic!("Failed to create Avahi Client");
|
||||
}
|
||||
let group = avahi_sys::avahi_entry_group_new(
|
||||
@@ -170,12 +155,7 @@ impl MdnsControllerInner {
|
||||
std::ptr::null_mut(),
|
||||
);
|
||||
if group == std::ptr::null_mut() {
|
||||
let e_str = avahi_strerror(avahi_client_errno(avahi_client));
|
||||
tracing::error!(
|
||||
"Could not create avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
log_str_error("create Avahi entry group", avahi_client_errno(avahi_client));
|
||||
panic!("Failed to create Avahi Entry Group");
|
||||
}
|
||||
let mut hostname_buf = vec![0];
|
||||
@@ -199,12 +179,7 @@ impl MdnsControllerInner {
|
||||
res.load_services();
|
||||
let commit_err = avahi_entry_group_commit(res.entry_group);
|
||||
if commit_err < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(commit_err);
|
||||
tracing::error!(
|
||||
"Could not reset Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
log_str_error("reset Avahi entry group", commit_err);
|
||||
panic!("Failed to load Avahi services: reset");
|
||||
}
|
||||
res
|
||||
@@ -215,23 +190,13 @@ impl MdnsControllerInner {
|
||||
let mut res;
|
||||
res = avahi_entry_group_reset(self.entry_group);
|
||||
if res < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(res);
|
||||
tracing::error!(
|
||||
"Could not reset Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
log_str_error("reset Avahi entry group", res);
|
||||
panic!("Failed to load Avahi services: reset");
|
||||
}
|
||||
self.load_services();
|
||||
res = avahi_entry_group_commit(self.entry_group);
|
||||
if res < avahi_sys::AVAHI_OK {
|
||||
let e_str = avahi_strerror(res);
|
||||
tracing::error!(
|
||||
"Could not commit Avahi entry group: {:?}",
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
log_str_error("commit Avahi entry group", res);
|
||||
panic!("Failed to load Avahi services: commit");
|
||||
}
|
||||
}
|
||||
@@ -264,6 +229,18 @@ impl Drop for MdnsControllerInner {
|
||||
}
|
||||
}
|
||||
|
||||
fn log_str_error(action: &str, e: i32) {
|
||||
unsafe {
|
||||
let e_str = avahi_strerror(e);
|
||||
tracing::error!(
|
||||
"Could not {}: {:?}",
|
||||
action,
|
||||
std::ffi::CStr::from_ptr(e_str)
|
||||
);
|
||||
avahi_free(e_str as *mut c_void);
|
||||
}
|
||||
}
|
||||
|
||||
unsafe extern "C" fn entry_group_callback(
|
||||
_group: *mut avahi_sys::AvahiEntryGroup,
|
||||
state: avahi_sys::AvahiEntryGroupState,
|
||||
|
||||
@@ -14,11 +14,13 @@ use self::mdns::MdnsController;
|
||||
use self::nginx::NginxController;
|
||||
use self::ssl::SslManager;
|
||||
use self::tor::TorController;
|
||||
use crate::net::dns::DnsController;
|
||||
use crate::net::interface::TorConfig;
|
||||
use crate::net::nginx::InterfaceMetadata;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::Error;
|
||||
|
||||
pub mod dns;
|
||||
pub mod interface;
|
||||
#[cfg(feature = "avahi")]
|
||||
pub mod mdns;
|
||||
@@ -45,6 +47,7 @@ pub struct NetController {
|
||||
pub mdns: MdnsController,
|
||||
pub nginx: NginxController,
|
||||
pub ssl: SslManager,
|
||||
pub dns: DnsController,
|
||||
}
|
||||
impl NetController {
|
||||
#[instrument(skip(db))]
|
||||
@@ -52,6 +55,7 @@ impl NetController {
|
||||
embassyd_addr: SocketAddr,
|
||||
embassyd_tor_key: TorSecretKeyV3,
|
||||
tor_control: SocketAddr,
|
||||
dns_bind: &[SocketAddr],
|
||||
db: SqlitePool,
|
||||
import_root_ca: Option<(PKey<Private>, X509)>,
|
||||
) -> Result<Self, Error> {
|
||||
@@ -65,10 +69,11 @@ impl NetController {
|
||||
mdns: MdnsController::init(),
|
||||
nginx: NginxController::init(PathBuf::from("/etc/nginx"), &ssl).await?,
|
||||
ssl,
|
||||
dns: DnsController::init(dns_bind).await?,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn ssl_directory_for(&self, pkg_id: &PackageId) -> PathBuf {
|
||||
pub fn ssl_directory_for(pkg_id: &PackageId) -> PathBuf {
|
||||
PathBuf::from(format!("{}/{}", PACKAGE_CERT_PATH, pkg_id))
|
||||
}
|
||||
|
||||
@@ -92,7 +97,7 @@ impl NetController {
|
||||
Some(cfg) => Some((i.0, cfg, i.2)),
|
||||
})
|
||||
.collect::<Vec<(InterfaceId, TorConfig, TorSecretKeyV3)>>();
|
||||
let (tor_res, _, nginx_res) = tokio::join!(
|
||||
let (tor_res, _, nginx_res, _) = tokio::join!(
|
||||
self.tor.add(pkg_id, ip, interfaces_tor),
|
||||
{
|
||||
#[cfg(feature = "avahi")]
|
||||
@@ -123,7 +128,8 @@ impl NetController {
|
||||
)),
|
||||
});
|
||||
self.nginx.add(&self.ssl, pkg_id.clone(), ip, interfaces)
|
||||
}
|
||||
},
|
||||
self.dns.add(pkg_id, ip),
|
||||
);
|
||||
tor_res?;
|
||||
nginx_res?;
|
||||
@@ -135,9 +141,10 @@ impl NetController {
|
||||
pub async fn remove<I: IntoIterator<Item = InterfaceId> + Clone>(
|
||||
&self,
|
||||
pkg_id: &PackageId,
|
||||
ip: Ipv4Addr,
|
||||
interfaces: I,
|
||||
) -> Result<(), Error> {
|
||||
let (tor_res, _, nginx_res) = tokio::join!(
|
||||
let (tor_res, _, nginx_res, _) = tokio::join!(
|
||||
self.tor.remove(pkg_id, interfaces.clone()),
|
||||
{
|
||||
#[cfg(feature = "avahi")]
|
||||
@@ -146,7 +153,8 @@ impl NetController {
|
||||
let mdns_fut = futures::future::ready(());
|
||||
mdns_fut
|
||||
},
|
||||
self.nginx.remove(pkg_id)
|
||||
self.nginx.remove(pkg_id),
|
||||
self.dns.remove(pkg_id, ip),
|
||||
);
|
||||
tor_res?;
|
||||
nginx_res?;
|
||||
|
||||
@@ -14,5 +14,6 @@ server {{
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection $connection_upgrade;
|
||||
{proxy_redirect_directive}
|
||||
}}
|
||||
}}
|
||||
|
||||
@@ -91,7 +91,7 @@ impl NginxControllerInner {
|
||||
for (id, meta) in interface_map.iter() {
|
||||
for (port, lan_port_config) in meta.lan_config.iter() {
|
||||
// get ssl certificate chain
|
||||
let (listen_args, ssl_certificate_line, ssl_certificate_key_line) =
|
||||
let (listen_args, ssl_certificate_line, ssl_certificate_key_line, proxy_redirect_directive) =
|
||||
if lan_port_config.ssl {
|
||||
// these have already been written by the net controller
|
||||
let package_path = nginx_root.join(format!("ssl/{}", package));
|
||||
@@ -115,9 +115,10 @@ impl NginxControllerInner {
|
||||
format!("{} ssl", port.0),
|
||||
format!("ssl_certificate {};", ssl_path_cert.to_str().unwrap()),
|
||||
format!("ssl_certificate_key {};", ssl_path_key.to_str().unwrap()),
|
||||
format!("proxy_redirect http://$host/ https://$host/;"),
|
||||
)
|
||||
} else {
|
||||
(format!("{}", port.0), String::from(""), String::from(""))
|
||||
(format!("{}", port.0), String::from(""), String::from(""), String::from(""))
|
||||
};
|
||||
// write nginx configs
|
||||
let nginx_conf_path = nginx_root.join(format!(
|
||||
@@ -135,6 +136,7 @@ impl NginxControllerInner {
|
||||
ssl_certificate_key_line = ssl_certificate_key_line,
|
||||
app_ip = ipv4,
|
||||
internal_port = lan_port_config.internal,
|
||||
proxy_redirect_directive = proxy_redirect_directive,
|
||||
),
|
||||
)
|
||||
.await
|
||||
|
||||
@@ -164,7 +164,8 @@ impl SslManager {
|
||||
let (root_key, root_cert) = match store.load_root_certificate().await? {
|
||||
None => {
|
||||
let root_key = generate_key()?;
|
||||
let root_cert = make_root_cert(&root_key)?;
|
||||
let server_id = crate::hostname::get_id().await?;
|
||||
let root_cert = make_root_cert(&root_key, &server_id)?;
|
||||
store.save_root_certificate(&root_key, &root_cert).await?;
|
||||
Ok::<_, Error>((root_key, root_cert))
|
||||
}
|
||||
@@ -307,7 +308,7 @@ fn generate_key() -> Result<PKey<Private>, Error> {
|
||||
Ok(key)
|
||||
}
|
||||
#[instrument]
|
||||
fn make_root_cert(root_key: &PKey<Private>) -> Result<X509, Error> {
|
||||
fn make_root_cert(root_key: &PKey<Private>, server_id: &str) -> Result<X509, Error> {
|
||||
let mut builder = X509Builder::new()?;
|
||||
builder.set_version(CERTIFICATE_VERSION)?;
|
||||
|
||||
@@ -320,7 +321,8 @@ fn make_root_cert(root_key: &PKey<Private>) -> Result<X509, Error> {
|
||||
builder.set_serial_number(&*rand_serial()?)?;
|
||||
|
||||
let mut subject_name_builder = X509NameBuilder::new()?;
|
||||
subject_name_builder.append_entry_by_text("CN", "Embassy Local Root CA")?;
|
||||
subject_name_builder
|
||||
.append_entry_by_text("CN", &format!("Embassy Local Root CA ({})", server_id))?;
|
||||
subject_name_builder.append_entry_by_text("O", "Start9")?;
|
||||
subject_name_builder.append_entry_by_text("OU", "Embassy")?;
|
||||
let subject_name = subject_name_builder.build();
|
||||
|
||||
@@ -351,7 +351,9 @@ impl TorControllerInner {
|
||||
.get_info("onions/current")
|
||||
.await?
|
||||
.lines()
|
||||
.map(|l| l.trim().parse().with_kind(ErrorKind::Tor))
|
||||
.map(|l| l.trim())
|
||||
.filter(|l| !l.is_empty())
|
||||
.map(|l| l.parse().with_kind(ErrorKind::Tor))
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,3 +1,9 @@
|
||||
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' $http_connection;
|
||||
}
|
||||
|
||||
server {
|
||||
listen 80 default_server;
|
||||
listen [::]:80 default_server;
|
||||
|
||||
0
backend/src/procedure/build.rs
Normal file
0
backend/src/procedure/build.rs
Normal file
@@ -1,18 +1,25 @@
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::collections::{BTreeMap, BTreeSet, VecDeque};
|
||||
use std::ffi::{OsStr, OsString};
|
||||
use std::net::Ipv4Addr;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use async_stream::stream;
|
||||
use bollard::container::RemoveContainerOptions;
|
||||
use color_eyre::eyre::eyre;
|
||||
use color_eyre::Report;
|
||||
use futures::future::Either as EitherFuture;
|
||||
use futures::TryStreamExt;
|
||||
use helpers::NonDetachingJoinHandle;
|
||||
use nix::sys::signal;
|
||||
use nix::unistd::Pid;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use tokio::io::{AsyncBufRead, AsyncBufReadExt, BufReader};
|
||||
use tracing::instrument;
|
||||
|
||||
use super::ProcedureName;
|
||||
use crate::context::RpcContext;
|
||||
use crate::id::{Id, ImageId};
|
||||
use crate::s9pk::manifest::{PackageId, SYSTEM_PACKAGE_ID};
|
||||
@@ -36,7 +43,7 @@ lazy_static::lazy_static! {
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct DockerAction {
|
||||
pub struct DockerProcedure {
|
||||
pub image: ImageId,
|
||||
#[serde(default)]
|
||||
pub system: bool,
|
||||
@@ -54,7 +61,7 @@ pub struct DockerAction {
|
||||
#[serde(default)]
|
||||
pub sigterm_timeout: Option<SerdeDuration>,
|
||||
}
|
||||
impl DockerAction {
|
||||
impl DockerProcedure {
|
||||
pub fn validate(
|
||||
&self,
|
||||
volumes: &Volumes,
|
||||
@@ -87,12 +94,14 @@ impl DockerAction {
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
name: Option<&str>,
|
||||
name: ProcedureName,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
allow_inject: bool,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
let name = name.docker_name();
|
||||
let name: Option<&str> = name.as_ref().map(|x| &**x);
|
||||
let mut cmd = tokio::process::Command::new("docker");
|
||||
if self.inject && allow_inject {
|
||||
cmd.arg("exec");
|
||||
@@ -166,14 +175,73 @@ impl DockerAction {
|
||||
Done(T),
|
||||
TimedOut,
|
||||
}
|
||||
|
||||
let io_format = self.io_format;
|
||||
let mut output = BufReader::new(
|
||||
handle
|
||||
.stdout
|
||||
.take()
|
||||
.ok_or_else(|| eyre!("Can't takeout stout"))
|
||||
.with_kind(crate::ErrorKind::Docker)?,
|
||||
);
|
||||
let output = NonDetachingJoinHandle::from(tokio::spawn(async move {
|
||||
match async {
|
||||
if let Some(format) = io_format {
|
||||
return match max_by_lines(&mut output, None).await {
|
||||
MaxByLines::Done(buffer) => {
|
||||
Ok::<Value, Error>(
|
||||
match format.from_slice(buffer.as_bytes()) {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
|
||||
format,
|
||||
e
|
||||
);
|
||||
Value::String(buffer)
|
||||
}
|
||||
},
|
||||
)
|
||||
},
|
||||
MaxByLines::Error(e) => Err(e),
|
||||
MaxByLines::Overflow(buffer) => Ok(Value::String(buffer))
|
||||
}
|
||||
}
|
||||
|
||||
let lines = buf_reader_to_lines(&mut output, 1000).await?;
|
||||
if lines.is_empty() {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
|
||||
let joined_output = lines.join("\n");
|
||||
Ok(Value::String(joined_output))
|
||||
}.await {
|
||||
Ok(a) => Ok((a, output)),
|
||||
Err(e) => Err((e, output))
|
||||
}
|
||||
}));
|
||||
let err_output = BufReader::new(
|
||||
handle
|
||||
.stderr
|
||||
.take()
|
||||
.ok_or_else(|| eyre!("Can't takeout std err"))
|
||||
.with_kind(crate::ErrorKind::Docker)?,
|
||||
);
|
||||
|
||||
let err_output = NonDetachingJoinHandle::from(tokio::spawn(async move {
|
||||
let lines = buf_reader_to_lines(err_output, 1000).await?;
|
||||
let joined_output = lines.join("\n");
|
||||
Ok::<_, Error>(joined_output)
|
||||
}));
|
||||
|
||||
let res = tokio::select! {
|
||||
res = handle.wait_with_output() => Race::Done(res.with_kind(crate::ErrorKind::Docker)?),
|
||||
res = handle.wait() => Race::Done(res.with_kind(crate::ErrorKind::Docker)?),
|
||||
res = timeout_fut => {
|
||||
res?;
|
||||
Race::TimedOut
|
||||
},
|
||||
};
|
||||
let res = match res {
|
||||
let exit_status = match res {
|
||||
Race::Done(x) => x,
|
||||
Race::TimedOut => {
|
||||
if let Some(id) = id {
|
||||
@@ -183,32 +251,24 @@ impl DockerAction {
|
||||
return Ok(Err((143, "Timed out. Retrying soon...".to_owned())));
|
||||
}
|
||||
};
|
||||
Ok(if res.status.success() || res.status.code() == Some(143) {
|
||||
Ok(if let Some(format) = self.io_format {
|
||||
match format.from_slice(&res.stdout) {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
|
||||
format,
|
||||
e
|
||||
);
|
||||
serde_json::from_value(String::from_utf8(res.stdout)?.into())
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
}
|
||||
}
|
||||
} else if res.stdout.is_empty() {
|
||||
serde_json::from_value(Value::Null).with_kind(crate::ErrorKind::Deserialization)?
|
||||
Ok(
|
||||
if exit_status.success() || exit_status.code() == Some(143) {
|
||||
Ok(serde_json::from_value(
|
||||
output
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Unknown)?
|
||||
.map(|(v, _)| v)
|
||||
.map_err(|(e, _)| tracing::warn!("{}", e))
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
.with_kind(crate::ErrorKind::Deserialization)?)
|
||||
} else {
|
||||
serde_json::from_value(String::from_utf8(res.stdout)?.into())
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
})
|
||||
} else {
|
||||
Err((
|
||||
res.status.code().unwrap_or_default(),
|
||||
String::from_utf8(res.stderr)?,
|
||||
))
|
||||
})
|
||||
Err((
|
||||
exit_status.code().unwrap_or_default(),
|
||||
err_output.await.with_kind(crate::ErrorKind::Unknown)??,
|
||||
))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, input))]
|
||||
@@ -243,36 +303,84 @@ impl DockerAction {
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Docker)?;
|
||||
}
|
||||
let res = handle
|
||||
.wait_with_output()
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Docker)?;
|
||||
Ok(if res.status.success() || res.status.code() == Some(143) {
|
||||
Ok(if let Some(format) = &self.io_format {
|
||||
match format.from_slice(&res.stdout) {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
|
||||
format,
|
||||
e
|
||||
);
|
||||
serde_json::from_value(String::from_utf8(res.stdout)?.into())
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
|
||||
let err_output = BufReader::new(
|
||||
handle
|
||||
.stderr
|
||||
.take()
|
||||
.ok_or_else(|| eyre!("Can't takeout std err"))
|
||||
.with_kind(crate::ErrorKind::Docker)?,
|
||||
);
|
||||
let err_output = NonDetachingJoinHandle::from(tokio::spawn(async move {
|
||||
let lines = buf_reader_to_lines(err_output, 1000).await?;
|
||||
let joined_output = lines.join("\n");
|
||||
Ok::<_, Error>(joined_output)
|
||||
}));
|
||||
|
||||
let io_format = self.io_format;
|
||||
let mut output = BufReader::new(
|
||||
handle
|
||||
.stdout
|
||||
.take()
|
||||
.ok_or_else(|| eyre!("Can't takeout stout"))
|
||||
.with_kind(crate::ErrorKind::Docker)?,
|
||||
);
|
||||
let output = NonDetachingJoinHandle::from(tokio::spawn(async move {
|
||||
match async {
|
||||
if let Some(format) = io_format {
|
||||
return match max_by_lines(&mut output, None).await {
|
||||
MaxByLines::Done(buffer) => {
|
||||
Ok::<Value, Error>(
|
||||
match format.from_slice(buffer.as_bytes()) {
|
||||
Ok(a) => a,
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
"Failed to deserialize stdout from {}: {}, falling back to UTF-8 string.",
|
||||
format,
|
||||
e
|
||||
);
|
||||
Value::String(buffer)
|
||||
}
|
||||
},
|
||||
)
|
||||
},
|
||||
MaxByLines::Error(e) => Err(e),
|
||||
MaxByLines::Overflow(buffer) => Ok(Value::String(buffer))
|
||||
}
|
||||
}
|
||||
} else if res.stdout.is_empty() {
|
||||
serde_json::from_value(Value::Null).with_kind(crate::ErrorKind::Deserialization)?
|
||||
|
||||
let lines = buf_reader_to_lines(&mut output, 1000).await?;
|
||||
if lines.is_empty() {
|
||||
return Ok(Value::Null);
|
||||
}
|
||||
|
||||
let joined_output = lines.join("\n");
|
||||
Ok(Value::String(joined_output))
|
||||
}.await {
|
||||
Ok(a) => Ok((a, output)),
|
||||
Err(e) => Err((e, output))
|
||||
}
|
||||
}));
|
||||
|
||||
let exit_status = handle.wait().await.with_kind(crate::ErrorKind::Docker)?;
|
||||
Ok(
|
||||
if exit_status.success() || exit_status.code() == Some(143) {
|
||||
Ok(serde_json::from_value(
|
||||
output
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Unknown)?
|
||||
.map(|(v, _)| v)
|
||||
.map_err(|(e, _)| tracing::warn!("{}", e))
|
||||
.unwrap_or_default(),
|
||||
)
|
||||
.with_kind(crate::ErrorKind::Deserialization)?)
|
||||
} else {
|
||||
serde_json::from_value(String::from_utf8(res.stdout)?.into())
|
||||
.with_kind(crate::ErrorKind::Deserialization)?
|
||||
})
|
||||
} else {
|
||||
Err((
|
||||
res.status.code().unwrap_or_default(),
|
||||
String::from_utf8(res.stderr)?,
|
||||
))
|
||||
})
|
||||
Err((
|
||||
exit_status.code().unwrap_or_default(),
|
||||
err_output.await.with_kind(crate::ErrorKind::Unknown)??,
|
||||
))
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn container_name(pkg_id: &PackageId, name: Option<&str>) -> String {
|
||||
@@ -284,9 +392,9 @@ impl DockerAction {
|
||||
}
|
||||
|
||||
pub fn uncontainer_name(name: &str) -> Option<(PackageId<&str>, Option<&str>)> {
|
||||
let (pre_tld, _) = name.split_once(".")?;
|
||||
let (pre_tld, _) = name.split_once('.')?;
|
||||
if pre_tld.contains('_') {
|
||||
let (pkg, name) = name.split_once("_")?;
|
||||
let (pkg, name) = name.split_once('_')?;
|
||||
Some((Id::try_from(pkg).ok()?.into(), Some(name)))
|
||||
} else {
|
||||
Some((Id::try_from(pre_tld).ok()?.into(), None))
|
||||
@@ -313,7 +421,7 @@ impl DockerAction {
|
||||
} else {
|
||||
continue;
|
||||
};
|
||||
let src = volume.path_for(ctx, pkg_id, pkg_version, volume_id);
|
||||
let src = volume.path_for(&ctx.datadir, pkg_id, pkg_version, volume_id);
|
||||
if let Err(e) = tokio::fs::metadata(&src).await {
|
||||
tracing::warn!("{} not mounted to container: {}", src.display(), e);
|
||||
continue;
|
||||
@@ -352,3 +460,111 @@ impl DockerAction {
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
struct RingVec<T> {
|
||||
value: VecDeque<T>,
|
||||
capacity: usize,
|
||||
}
|
||||
impl<T> RingVec<T> {
|
||||
fn new(capacity: usize) -> Self {
|
||||
RingVec {
|
||||
value: VecDeque::with_capacity(capacity),
|
||||
capacity,
|
||||
}
|
||||
}
|
||||
fn push(&mut self, item: T) -> Option<T> {
|
||||
let popped_item = if self.value.len() == self.capacity {
|
||||
self.value.pop_front()
|
||||
} else {
|
||||
None
|
||||
};
|
||||
self.value.push_back(item);
|
||||
popped_item
|
||||
}
|
||||
}
|
||||
|
||||
async fn buf_reader_to_lines(
|
||||
reader: impl AsyncBufRead + Unpin,
|
||||
limit: impl Into<Option<usize>>,
|
||||
) -> Result<Vec<String>, Error> {
|
||||
let lines = stream! {
|
||||
let mut lines = reader.lines();
|
||||
while let Some(line) = lines.next_line().await? {
|
||||
yield Ok::<_, Report>(line);
|
||||
}
|
||||
};
|
||||
let output: RingVec<String> = lines
|
||||
.try_fold(
|
||||
RingVec::new(limit.into().unwrap_or(1000)),
|
||||
|mut acc, line| async move {
|
||||
acc.push(line);
|
||||
Ok(acc)
|
||||
},
|
||||
)
|
||||
.await
|
||||
.with_kind(crate::ErrorKind::Unknown)?;
|
||||
let output: Vec<String> = output.value.into_iter().collect();
|
||||
Ok(output)
|
||||
}
|
||||
|
||||
enum MaxByLines {
|
||||
Done(String),
|
||||
Overflow(String),
|
||||
Error(Error),
|
||||
}
|
||||
|
||||
async fn max_by_lines(
|
||||
reader: impl AsyncBufRead + Unpin,
|
||||
max_items: impl Into<Option<usize>>,
|
||||
) -> MaxByLines {
|
||||
let mut answer = String::new();
|
||||
|
||||
let mut lines = reader.lines();
|
||||
let mut has_over_blown = false;
|
||||
let max_items = max_items.into().unwrap_or(10_000_000);
|
||||
|
||||
while let Some(line) = {
|
||||
match lines.next_line().await {
|
||||
Ok(a) => a,
|
||||
Err(e) => return MaxByLines::Error(e.into()),
|
||||
}
|
||||
} {
|
||||
if has_over_blown {
|
||||
continue;
|
||||
}
|
||||
if !answer.is_empty() {
|
||||
answer.push('\n');
|
||||
}
|
||||
answer.push_str(&line);
|
||||
if answer.len() >= max_items {
|
||||
has_over_blown = true;
|
||||
tracing::warn!("Reading the buffer exceeding limits of {}", max_items);
|
||||
}
|
||||
}
|
||||
if has_over_blown {
|
||||
return MaxByLines::Overflow(answer);
|
||||
}
|
||||
MaxByLines::Done(answer)
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
/// Note, this size doesn't mean the vec will match. The vec will go to the next size, 0 -> 7 = 7 and so forth 7-15 = 15
|
||||
/// Just how the vec with capacity works.
|
||||
const CAPACITY_IN: usize = 7;
|
||||
#[test]
|
||||
fn default_capacity_is_set() {
|
||||
let ring: RingVec<usize> = RingVec::new(CAPACITY_IN);
|
||||
assert_eq!(CAPACITY_IN, ring.value.capacity());
|
||||
assert_eq!(0, ring.value.len());
|
||||
}
|
||||
#[test]
|
||||
fn capacity_can_not_be_exceeded() {
|
||||
let mut ring = RingVec::new(CAPACITY_IN);
|
||||
for i in 1..100usize {
|
||||
ring.push(i);
|
||||
}
|
||||
assert_eq!(CAPACITY_IN, ring.value.capacity());
|
||||
assert_eq!(CAPACITY_IN, ring.value.len());
|
||||
}
|
||||
}
|
||||
287
backend/src/procedure/js_scripts.rs
Normal file
287
backend/src/procedure/js_scripts.rs
Normal file
@@ -0,0 +1,287 @@
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use models::VolumeId;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::{
|
||||
context::RpcContext, s9pk::manifest::PackageId, util::Version, volume::Volumes, Error,
|
||||
};
|
||||
|
||||
use js_engine::{JsExecutionEnvironment, PathForVolumeId};
|
||||
|
||||
use super::ProcedureName;
|
||||
|
||||
pub use js_engine::JsError;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
|
||||
enum ErrorValue {
|
||||
Error(String),
|
||||
ErrorCode((i32, String)),
|
||||
Result(serde_json::Value),
|
||||
}
|
||||
|
||||
impl PathForVolumeId for Volumes {
|
||||
fn path_for(
|
||||
&self,
|
||||
data_dir: &Path,
|
||||
package_id: &PackageId,
|
||||
version: &Version,
|
||||
volume_id: &VolumeId,
|
||||
) -> Option<PathBuf> {
|
||||
let volume = self.get(volume_id)?;
|
||||
Some(volume.path_for(data_dir, package_id, version, volume_id))
|
||||
}
|
||||
|
||||
fn readonly(&self, volume_id: &VolumeId) -> bool {
|
||||
self.get(volume_id).map(|x| x.readonly()).unwrap_or(false)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct JsProcedure {}
|
||||
|
||||
impl JsProcedure {
|
||||
pub fn validate(&self, _volumes: &Volumes) -> Result<(), color_eyre::eyre::Report> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip(directory, input))]
|
||||
pub async fn execute<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
directory: &PathBuf,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
name: ProcedureName,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
Ok(async move {
|
||||
let running_action = JsExecutionEnvironment::load_from_package(
|
||||
directory,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Box::new(volumes.clone()),
|
||||
)
|
||||
.await?
|
||||
.run_action(name, input);
|
||||
let output: ErrorValue = match timeout {
|
||||
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action)
|
||||
.await
|
||||
.map_err(|_| (JsError::Timeout, "Timed out. Retrying soon...".to_owned()))??,
|
||||
None => running_action.await?,
|
||||
};
|
||||
let output: O = unwrap_known_error(output)?;
|
||||
Ok(output)
|
||||
}
|
||||
.await
|
||||
.map_err(|(error, message)| (error.as_code_num(), message)))
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, input))]
|
||||
pub async fn sandboxed<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
timeout: Option<Duration>,
|
||||
name: ProcedureName,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
Ok(async move {
|
||||
let running_action = JsExecutionEnvironment::load_from_package(
|
||||
&ctx.datadir,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Box::new(volumes.clone()),
|
||||
)
|
||||
.await?
|
||||
.read_only_effects()
|
||||
.run_action(name, input);
|
||||
let output: ErrorValue = match timeout {
|
||||
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action)
|
||||
.await
|
||||
.map_err(|_| (JsError::Timeout, "Timed out. Retrying soon...".to_owned()))??,
|
||||
None => running_action.await?,
|
||||
};
|
||||
let output: O = unwrap_known_error(output)?;
|
||||
Ok(output)
|
||||
}
|
||||
.await
|
||||
.map_err(|(error, message)| (error.as_code_num(), message)))
|
||||
}
|
||||
}
|
||||
|
||||
fn unwrap_known_error<O: for<'de> Deserialize<'de>>(
|
||||
error_value: ErrorValue,
|
||||
) -> Result<O, (JsError, String)> {
|
||||
match error_value {
|
||||
ErrorValue::Error(error) => Err((JsError::Javascript, error)),
|
||||
ErrorValue::ErrorCode((code, message)) => Err((JsError::Code(code), message)),
|
||||
ErrorValue::Result(ref value) => match serde_json::from_value(value.clone()) {
|
||||
Ok(a) => Ok(a),
|
||||
Err(err) => {
|
||||
tracing::error!("{}", err);
|
||||
tracing::debug!("{:?}", err);
|
||||
Err((
|
||||
JsError::BoundryLayerSerDe,
|
||||
format!(
|
||||
"Couldn't convert output = {:#?} to the correct type",
|
||||
serde_json::to_string_pretty(&error_value).unwrap_or_default()
|
||||
),
|
||||
))
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn js_action_execute() {
|
||||
let js_action = JsProcedure {};
|
||||
let path: PathBuf = "test/js_action_execute/"
|
||||
.parse::<PathBuf>()
|
||||
.unwrap()
|
||||
.canonicalize()
|
||||
.unwrap();
|
||||
let package_id = "test-package".parse().unwrap();
|
||||
let package_version: Version = "0.3.0.3".parse().unwrap();
|
||||
let name = ProcedureName::GetConfig;
|
||||
let volumes: Volumes = serde_json::from_value(serde_json::json!({
|
||||
"main": {
|
||||
"type": "data"
|
||||
},
|
||||
"compat": {
|
||||
"type": "assets"
|
||||
},
|
||||
"filebrowser" :{
|
||||
"package-id": "filebrowser",
|
||||
"path": "data",
|
||||
"readonly": true,
|
||||
"type": "pointer",
|
||||
"volume-id": "main",
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
let input: Option<serde_json::Value> = Some(serde_json::json!({"test":123}));
|
||||
let timeout = Some(Duration::from_secs(10));
|
||||
let _output: crate::config::action::ConfigRes = js_action
|
||||
.execute(
|
||||
&path,
|
||||
&package_id,
|
||||
&package_version,
|
||||
name,
|
||||
&volumes,
|
||||
input,
|
||||
timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
assert_eq!(
|
||||
&std::fs::read_to_string(
|
||||
"test/js_action_execute/package-data/volumes/test-package/data/main/test.log"
|
||||
)
|
||||
.unwrap(),
|
||||
"This is a test"
|
||||
);
|
||||
std::fs::remove_file(
|
||||
"test/js_action_execute/package-data/volumes/test-package/data/main/test.log",
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn js_action_execute_error() {
|
||||
let js_action = JsProcedure {};
|
||||
let path: PathBuf = "test/js_action_execute/"
|
||||
.parse::<PathBuf>()
|
||||
.unwrap()
|
||||
.canonicalize()
|
||||
.unwrap();
|
||||
let package_id = "test-package".parse().unwrap();
|
||||
let package_version: Version = "0.3.0.3".parse().unwrap();
|
||||
let name = ProcedureName::SetConfig;
|
||||
let volumes: Volumes = serde_json::from_value(serde_json::json!({
|
||||
"main": {
|
||||
"type": "data"
|
||||
},
|
||||
"compat": {
|
||||
"type": "assets"
|
||||
},
|
||||
"filebrowser" :{
|
||||
"package-id": "filebrowser",
|
||||
"path": "data",
|
||||
"readonly": true,
|
||||
"type": "pointer",
|
||||
"volume-id": "main",
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
let input: Option<serde_json::Value> = None;
|
||||
let timeout = Some(Duration::from_secs(10));
|
||||
let output: Result<serde_json::Value, _> = js_action
|
||||
.execute(
|
||||
&path,
|
||||
&package_id,
|
||||
&package_version,
|
||||
name,
|
||||
&volumes,
|
||||
input,
|
||||
timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
assert_eq!("Err((2, \"Not setup\"))", &format!("{:?}", output));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn js_action_fetch() {
|
||||
let js_action = JsProcedure {};
|
||||
let path: PathBuf = "test/js_action_execute/"
|
||||
.parse::<PathBuf>()
|
||||
.unwrap()
|
||||
.canonicalize()
|
||||
.unwrap();
|
||||
let package_id = "test-package".parse().unwrap();
|
||||
let package_version: Version = "0.3.0.3".parse().unwrap();
|
||||
let name = ProcedureName::Action("fetch".parse().unwrap());
|
||||
let volumes: Volumes = serde_json::from_value(serde_json::json!({
|
||||
"main": {
|
||||
"type": "data"
|
||||
},
|
||||
"compat": {
|
||||
"type": "assets"
|
||||
},
|
||||
"filebrowser" :{
|
||||
"package-id": "filebrowser",
|
||||
"path": "data",
|
||||
"readonly": true,
|
||||
"type": "pointer",
|
||||
"volume-id": "main",
|
||||
}
|
||||
}))
|
||||
.unwrap();
|
||||
let input: Option<serde_json::Value> = None;
|
||||
let timeout = Some(Duration::from_secs(10));
|
||||
js_action
|
||||
.execute::<serde_json::Value, serde_json::Value>(
|
||||
&path,
|
||||
&package_id,
|
||||
&package_version,
|
||||
name,
|
||||
&volumes,
|
||||
input,
|
||||
timeout,
|
||||
)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
}
|
||||
146
backend/src/procedure/mod.rs
Normal file
146
backend/src/procedure/mod.rs
Normal file
@@ -0,0 +1,146 @@
|
||||
use std::collections::BTreeSet;
|
||||
use std::time::Duration;
|
||||
|
||||
use patch_db::HasModel;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use self::docker::DockerProcedure;
|
||||
use crate::context::RpcContext;
|
||||
use crate::id::ImageId;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::Version;
|
||||
use crate::volume::Volumes;
|
||||
use crate::Error;
|
||||
|
||||
pub mod docker;
|
||||
#[cfg(feature = "js_engine")]
|
||||
pub mod js_scripts;
|
||||
pub use models::ProcedureName;
|
||||
|
||||
// TODO: create RPC endpoint that looks up the appropriate action and calls `execute`
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, HasModel)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[serde(tag = "type")]
|
||||
pub enum PackageProcedure {
|
||||
Docker(DockerProcedure),
|
||||
|
||||
#[cfg(feature = "js_engine")]
|
||||
Script(js_scripts::JsProcedure),
|
||||
}
|
||||
impl PackageProcedure {
|
||||
pub fn is_script(&self) -> bool {
|
||||
match self {
|
||||
#[cfg(feature = "js_engine")]
|
||||
Self::Script(_) => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
#[instrument]
|
||||
pub fn validate(
|
||||
&self,
|
||||
volumes: &Volumes,
|
||||
image_ids: &BTreeSet<ImageId>,
|
||||
expected_io: bool,
|
||||
) -> Result<(), color_eyre::eyre::Report> {
|
||||
match self {
|
||||
PackageProcedure::Docker(action) => action.validate(volumes, image_ids, expected_io),
|
||||
|
||||
#[cfg(feature = "js_engine")]
|
||||
PackageProcedure::Script(action) => action.validate(volumes),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(ctx, input))]
|
||||
pub async fn execute<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
name: ProcedureName,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
allow_inject: bool,
|
||||
timeout: Option<Duration>,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
tracing::trace!("Procedure execute {} {} - {:?}", self, pkg_id, name);
|
||||
match self {
|
||||
PackageProcedure::Docker(procedure) => {
|
||||
procedure
|
||||
.execute(
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
name,
|
||||
volumes,
|
||||
input,
|
||||
allow_inject,
|
||||
timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
#[cfg(feature = "js_engine")]
|
||||
PackageProcedure::Script(procedure) => {
|
||||
procedure
|
||||
.execute(
|
||||
&ctx.datadir,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
name,
|
||||
volumes,
|
||||
input,
|
||||
timeout,
|
||||
)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
#[instrument(skip(ctx, input))]
|
||||
pub async fn sandboxed<I: Serialize, O: for<'de> Deserialize<'de>>(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
pkg_id: &PackageId,
|
||||
pkg_version: &Version,
|
||||
volumes: &Volumes,
|
||||
input: Option<I>,
|
||||
timeout: Option<Duration>,
|
||||
name: ProcedureName,
|
||||
) -> Result<Result<O, (i32, String)>, Error> {
|
||||
tracing::trace!("Procedure sandboxed {} {} - {:?}", self, pkg_id, name);
|
||||
match self {
|
||||
PackageProcedure::Docker(procedure) => {
|
||||
procedure
|
||||
.sandboxed(ctx, pkg_id, pkg_version, volumes, input, timeout)
|
||||
.await
|
||||
}
|
||||
#[cfg(feature = "js_engine")]
|
||||
PackageProcedure::Script(procedure) => {
|
||||
procedure
|
||||
.sandboxed(ctx, pkg_id, pkg_version, volumes, input, timeout, name)
|
||||
.await
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for PackageProcedure {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
PackageProcedure::Docker(_) => write!(f, "Docker")?,
|
||||
#[cfg(feature = "js_engine")]
|
||||
PackageProcedure::Script(_) => write!(f, "JS")?,
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
#[derive(Debug)]
|
||||
pub struct NoOutput;
|
||||
impl<'de> Deserialize<'de> for NoOutput {
|
||||
fn deserialize<D>(_: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
Ok(NoOutput)
|
||||
}
|
||||
}
|
||||
@@ -5,6 +5,7 @@ use serde_json::Value;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::procedure::ProcedureName;
|
||||
use crate::s9pk::manifest::{Manifest, PackageId};
|
||||
use crate::{Error, ErrorKind};
|
||||
|
||||
@@ -35,7 +36,7 @@ pub async fn fetch_properties(ctx: RpcContext, id: PackageId) -> Result<Value, E
|
||||
&ctx,
|
||||
&manifest.id,
|
||||
&manifest.version,
|
||||
Some(&format!("Properties-{}", rand::random::<u64>())),
|
||||
ProcedureName::Properties,
|
||||
&manifest.volumes,
|
||||
None,
|
||||
false,
|
||||
|
||||
@@ -1,25 +1,25 @@
|
||||
use std::io::{Read, Seek, SeekFrom, Write};
|
||||
|
||||
use digest::Digest;
|
||||
use sha2::Sha512;
|
||||
use sha2_old::{Digest, Sha512};
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt, SeekFrom};
|
||||
use tracing::instrument;
|
||||
use typed_builder::TypedBuilder;
|
||||
|
||||
use super::header::{FileSection, Header};
|
||||
use super::manifest::Manifest;
|
||||
use super::SIG_CONTEXT;
|
||||
use crate::util::io::to_cbor_async_writer;
|
||||
use crate::util::HashWriter;
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
#[derive(TypedBuilder)]
|
||||
pub struct S9pkPacker<
|
||||
'a,
|
||||
W: Write + Seek,
|
||||
RLicense: Read,
|
||||
RInstructions: Read,
|
||||
RIcon: Read,
|
||||
RDockerImages: Read,
|
||||
RAssets: Read,
|
||||
W: AsyncWriteExt + AsyncSeekExt,
|
||||
RLicense: AsyncReadExt + Unpin,
|
||||
RInstructions: AsyncReadExt + Unpin,
|
||||
RIcon: AsyncReadExt + Unpin,
|
||||
RDockerImages: AsyncReadExt + Unpin,
|
||||
RAssets: AsyncReadExt + Unpin,
|
||||
RScripts: AsyncReadExt + Unpin,
|
||||
> {
|
||||
writer: W,
|
||||
manifest: &'a Manifest,
|
||||
@@ -28,102 +28,117 @@ pub struct S9pkPacker<
|
||||
icon: RIcon,
|
||||
docker_images: RDockerImages,
|
||||
assets: RAssets,
|
||||
scripts: Option<RScripts>,
|
||||
}
|
||||
impl<
|
||||
'a,
|
||||
W: Write + Seek,
|
||||
RLicense: Read,
|
||||
RInstructions: Read,
|
||||
RIcon: Read,
|
||||
RDockerImages: Read,
|
||||
RAssets: Read,
|
||||
> S9pkPacker<'a, W, RLicense, RInstructions, RIcon, RDockerImages, RAssets>
|
||||
W: AsyncWriteExt + AsyncSeekExt + Unpin,
|
||||
RLicense: AsyncReadExt + Unpin,
|
||||
RInstructions: AsyncReadExt + Unpin,
|
||||
RIcon: AsyncReadExt + Unpin,
|
||||
RDockerImages: AsyncReadExt + Unpin,
|
||||
RAssets: AsyncReadExt + Unpin,
|
||||
RScripts: AsyncReadExt + Unpin,
|
||||
> S9pkPacker<'a, W, RLicense, RInstructions, RIcon, RDockerImages, RAssets, RScripts>
|
||||
{
|
||||
/// BLOCKING
|
||||
#[instrument(skip(self))]
|
||||
pub fn pack(mut self, key: &ed25519_dalek::Keypair) -> Result<(), Error> {
|
||||
let header_pos = self.writer.stream_position()?;
|
||||
pub async fn pack(mut self, key: &ed25519_dalek::Keypair) -> Result<(), Error> {
|
||||
let header_pos = self.writer.stream_position().await?;
|
||||
if header_pos != 0 {
|
||||
tracing::warn!("Appending to non-empty file.");
|
||||
}
|
||||
let mut header = Header::placeholder();
|
||||
header.serialize(&mut self.writer).with_ctx(|_| {
|
||||
header.serialize(&mut self.writer).await.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Serialization,
|
||||
"Writing Placeholder Header",
|
||||
)
|
||||
})?;
|
||||
let mut position = self.writer.stream_position()?;
|
||||
let mut position = self.writer.stream_position().await?;
|
||||
|
||||
let mut writer = HashWriter::new(Sha512::new(), &mut self.writer);
|
||||
// manifest
|
||||
serde_cbor::ser::into_writer(self.manifest, &mut writer).with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Serialization,
|
||||
"Serializing Manifest (CBOR)",
|
||||
)
|
||||
})?;
|
||||
let new_pos = writer.inner_mut().stream_position()?;
|
||||
to_cbor_async_writer(&mut writer, self.manifest).await?;
|
||||
let new_pos = writer.inner_mut().stream_position().await?;
|
||||
header.table_of_contents.manifest = FileSection {
|
||||
position,
|
||||
length: new_pos - position,
|
||||
};
|
||||
position = new_pos;
|
||||
// license
|
||||
std::io::copy(&mut self.license, &mut writer)
|
||||
tokio::io::copy(&mut self.license, &mut writer)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying License"))?;
|
||||
let new_pos = writer.inner_mut().stream_position()?;
|
||||
let new_pos = writer.inner_mut().stream_position().await?;
|
||||
header.table_of_contents.license = FileSection {
|
||||
position,
|
||||
length: new_pos - position,
|
||||
};
|
||||
position = new_pos;
|
||||
// instructions
|
||||
std::io::copy(&mut self.instructions, &mut writer)
|
||||
tokio::io::copy(&mut self.instructions, &mut writer)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying Instructions"))?;
|
||||
let new_pos = writer.inner_mut().stream_position()?;
|
||||
let new_pos = writer.inner_mut().stream_position().await?;
|
||||
header.table_of_contents.instructions = FileSection {
|
||||
position,
|
||||
length: new_pos - position,
|
||||
};
|
||||
position = new_pos;
|
||||
// icon
|
||||
std::io::copy(&mut self.icon, &mut writer)
|
||||
tokio::io::copy(&mut self.icon, &mut writer)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying Icon"))?;
|
||||
let new_pos = writer.inner_mut().stream_position()?;
|
||||
let new_pos = writer.inner_mut().stream_position().await?;
|
||||
header.table_of_contents.icon = FileSection {
|
||||
position,
|
||||
length: new_pos - position,
|
||||
};
|
||||
position = new_pos;
|
||||
// docker_images
|
||||
std::io::copy(&mut self.docker_images, &mut writer)
|
||||
tokio::io::copy(&mut self.docker_images, &mut writer)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying Docker Images"))?;
|
||||
let new_pos = writer.inner_mut().stream_position()?;
|
||||
let new_pos = writer.inner_mut().stream_position().await?;
|
||||
header.table_of_contents.docker_images = FileSection {
|
||||
position,
|
||||
length: new_pos - position,
|
||||
};
|
||||
position = new_pos;
|
||||
// assets
|
||||
std::io::copy(&mut self.assets, &mut writer)
|
||||
tokio::io::copy(&mut self.assets, &mut writer)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying Assets"))?;
|
||||
let new_pos = writer.inner_mut().stream_position()?;
|
||||
let new_pos = writer.inner_mut().stream_position().await?;
|
||||
header.table_of_contents.assets = FileSection {
|
||||
position,
|
||||
length: new_pos - position,
|
||||
};
|
||||
position = new_pos;
|
||||
// scripts
|
||||
if let Some(mut scripts) = self.scripts {
|
||||
tokio::io::copy(&mut scripts, &mut writer)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, "Copying Scripts"))?;
|
||||
let new_pos = writer.inner_mut().stream_position().await?;
|
||||
header.table_of_contents.scripts = Some(FileSection {
|
||||
position,
|
||||
length: new_pos - position,
|
||||
});
|
||||
position = new_pos;
|
||||
}
|
||||
|
||||
// header
|
||||
let (hash, _) = writer.finish();
|
||||
self.writer.seek(SeekFrom::Start(header_pos))?;
|
||||
self.writer.seek(SeekFrom::Start(header_pos)).await?;
|
||||
header.pubkey = key.public.clone();
|
||||
header.signature = key.sign_prehashed(hash, Some(SIG_CONTEXT))?;
|
||||
header
|
||||
.serialize(&mut self.writer)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Serialization, "Writing Header"))?;
|
||||
self.writer.seek(SeekFrom::Start(position))?;
|
||||
self.writer.seek(SeekFrom::Start(position)).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,9 +1,8 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::io::Write;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use ed25519_dalek::{PublicKey, Signature};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt};
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncWriteExt};
|
||||
|
||||
use crate::Error;
|
||||
|
||||
@@ -20,17 +19,17 @@ impl Header {
|
||||
pub fn placeholder() -> Self {
|
||||
Header {
|
||||
pubkey: PublicKey::default(),
|
||||
signature: Signature::new([0; 64]),
|
||||
signature: Signature::from_bytes(&[0; 64]).expect("Invalid ed25519 signature"),
|
||||
table_of_contents: Default::default(),
|
||||
}
|
||||
}
|
||||
// MUST BE SAME SIZE REGARDLESS OF DATA
|
||||
pub fn serialize<W: Write>(&self, mut writer: W) -> std::io::Result<()> {
|
||||
writer.write_all(&MAGIC)?;
|
||||
writer.write_all(&[VERSION])?;
|
||||
writer.write_all(self.pubkey.as_bytes())?;
|
||||
writer.write_all(self.signature.as_ref())?;
|
||||
self.table_of_contents.serialize(writer)?;
|
||||
pub async fn serialize<W: AsyncWriteExt + Unpin>(&self, mut writer: W) -> std::io::Result<()> {
|
||||
writer.write_all(&MAGIC).await?;
|
||||
writer.write_all(&[VERSION]).await?;
|
||||
writer.write_all(self.pubkey.as_bytes()).await?;
|
||||
writer.write_all(self.signature.as_ref()).await?;
|
||||
self.table_of_contents.serialize(writer).await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn deserialize<R: AsyncRead + Unpin>(mut reader: R) -> Result<Self, Error> {
|
||||
@@ -38,7 +37,7 @@ impl Header {
|
||||
reader.read_exact(&mut magic).await?;
|
||||
if magic != MAGIC {
|
||||
return Err(Error::new(
|
||||
eyre!("Incorrect Magic"),
|
||||
eyre!("Incorrect Magic: {:?}", magic),
|
||||
crate::ErrorKind::ParseS9pk,
|
||||
));
|
||||
}
|
||||
@@ -46,7 +45,7 @@ impl Header {
|
||||
reader.read_exact(&mut version).await?;
|
||||
if version[0] != VERSION {
|
||||
return Err(Error::new(
|
||||
eyre!("Unknown Version"),
|
||||
eyre!("Unknown Version: {}", version[0]),
|
||||
crate::ErrorKind::ParseS9pk,
|
||||
));
|
||||
}
|
||||
@@ -56,7 +55,7 @@ impl Header {
|
||||
.map_err(|e| Error::new(e, crate::ErrorKind::ParseS9pk))?;
|
||||
let mut sig_bytes = [0; 64];
|
||||
reader.read_exact(&mut sig_bytes).await?;
|
||||
let signature = Signature::new(sig_bytes);
|
||||
let signature = Signature::from_bytes(&sig_bytes).expect("Invalid ed25519 signature");
|
||||
let table_of_contents = TableOfContents::deserialize(reader).await?;
|
||||
|
||||
Ok(Header {
|
||||
@@ -75,24 +74,34 @@ pub struct TableOfContents {
|
||||
pub icon: FileSection,
|
||||
pub docker_images: FileSection,
|
||||
pub assets: FileSection,
|
||||
pub scripts: Option<FileSection>,
|
||||
}
|
||||
impl TableOfContents {
|
||||
pub fn serialize<W: Write>(&self, mut writer: W) -> std::io::Result<()> {
|
||||
pub async fn serialize<W: AsyncWriteExt + Unpin>(&self, mut writer: W) -> std::io::Result<()> {
|
||||
let len: u32 = ((1 + "manifest".len() + 16)
|
||||
+ (1 + "license".len() + 16)
|
||||
+ (1 + "instructions".len() + 16)
|
||||
+ (1 + "icon".len() + 16)
|
||||
+ (1 + "docker_images".len() + 16)
|
||||
+ (1 + "assets".len() + 16)) as u32;
|
||||
writer.write_all(&u32::to_be_bytes(len))?;
|
||||
self.manifest.serialize_entry("manifest", &mut writer)?;
|
||||
self.license.serialize_entry("license", &mut writer)?;
|
||||
+ (1 + "assets".len() + 16)
|
||||
+ (1 + "scripts".len() + 16)) as u32;
|
||||
writer.write_all(&u32::to_be_bytes(len)).await?;
|
||||
self.manifest
|
||||
.serialize_entry("manifest", &mut writer)
|
||||
.await?;
|
||||
self.license.serialize_entry("license", &mut writer).await?;
|
||||
self.instructions
|
||||
.serialize_entry("instructions", &mut writer)?;
|
||||
self.icon.serialize_entry("icon", &mut writer)?;
|
||||
.serialize_entry("instructions", &mut writer)
|
||||
.await?;
|
||||
self.icon.serialize_entry("icon", &mut writer).await?;
|
||||
self.docker_images
|
||||
.serialize_entry("docker_images", &mut writer)?;
|
||||
self.assets.serialize_entry("assets", &mut writer)?;
|
||||
.serialize_entry("docker_images", &mut writer)
|
||||
.await?;
|
||||
self.assets.serialize_entry("assets", &mut writer).await?;
|
||||
self.scripts
|
||||
.unwrap_or_default()
|
||||
.serialize_entry("scripts", &mut writer)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn deserialize<R: AsyncRead + Unpin>(mut reader: R) -> std::io::Result<Self> {
|
||||
@@ -131,6 +140,7 @@ impl TableOfContents {
|
||||
icon: from_table(&table, "icon")?,
|
||||
docker_images: from_table(&table, "docker_images")?,
|
||||
assets: from_table(&table, "assets")?,
|
||||
scripts: table.get("scripts".as_bytes()).cloned(),
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -141,11 +151,15 @@ pub struct FileSection {
|
||||
pub length: u64,
|
||||
}
|
||||
impl FileSection {
|
||||
pub fn serialize_entry<W: Write>(self, label: &str, mut writer: W) -> std::io::Result<()> {
|
||||
writer.write_all(&[label.len() as u8])?;
|
||||
writer.write_all(label.as_bytes())?;
|
||||
writer.write_all(&u64::to_be_bytes(self.position))?;
|
||||
writer.write_all(&u64::to_be_bytes(self.length))?;
|
||||
pub async fn serialize_entry<W: AsyncWriteExt + Unpin>(
|
||||
self,
|
||||
label: &str,
|
||||
mut writer: W,
|
||||
) -> std::io::Result<()> {
|
||||
writer.write_all(&[label.len() as u8]).await?;
|
||||
writer.write_all(label.as_bytes()).await?;
|
||||
writer.write_all(&u64::to_be_bytes(self.position)).await?;
|
||||
writer.write_all(&u64::to_be_bytes(self.length)).await?;
|
||||
Ok(())
|
||||
}
|
||||
pub async fn deserialize_entry<R: AsyncRead + Unpin>(
|
||||
|
||||
@@ -1,104 +1,24 @@
|
||||
use std::borrow::Borrow;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::str::FromStr;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::HasModel;
|
||||
use serde::{Deserialize, Serialize, Serializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use url::Url;
|
||||
|
||||
use crate::action::{ActionImplementation, Actions};
|
||||
use crate::action::Actions;
|
||||
use crate::backup::BackupActions;
|
||||
use crate::config::action::ConfigActions;
|
||||
use crate::dependencies::Dependencies;
|
||||
use crate::id::{Id, InvalidId, SYSTEM_ID};
|
||||
use crate::migration::Migrations;
|
||||
use crate::net::interface::Interfaces;
|
||||
use crate::procedure::PackageProcedure;
|
||||
use crate::status::health_check::HealthChecks;
|
||||
use crate::util::Version;
|
||||
use crate::version::{Current, VersionT};
|
||||
use crate::volume::Volumes;
|
||||
use crate::Error;
|
||||
|
||||
pub const SYSTEM_PACKAGE_ID: PackageId<&'static str> = PackageId(SYSTEM_ID);
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub struct PackageId<S: AsRef<str> = String>(Id<S>);
|
||||
impl<'a> PackageId<&'a str> {
|
||||
pub fn owned(&self) -> PackageId {
|
||||
PackageId(self.0.owned())
|
||||
}
|
||||
}
|
||||
impl FromStr for PackageId {
|
||||
type Err = InvalidId;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(PackageId(Id::try_from(s.to_owned())?))
|
||||
}
|
||||
}
|
||||
impl From<PackageId> for String {
|
||||
fn from(value: PackageId) -> Self {
|
||||
value.0.into()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> From<Id<S>> for PackageId<S> {
|
||||
fn from(id: Id<S>) -> Self {
|
||||
PackageId(id)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::ops::Deref for PackageId<S> {
|
||||
type Target = S;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&*self.0
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<PackageId<S>> for PackageId<S> {
|
||||
fn as_ref(&self) -> &PackageId<S> {
|
||||
self
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> std::fmt::Display for PackageId<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", &self.0)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<str> for PackageId<S> {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> Borrow<str> for PackageId<S> {
|
||||
fn borrow(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<Path> for PackageId<S> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_ref().as_ref()
|
||||
}
|
||||
}
|
||||
impl<'de, S> Deserialize<'de> for PackageId<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
Id<S>: Deserialize<'de>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::de::Deserializer<'de>,
|
||||
{
|
||||
Ok(PackageId(Deserialize::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
impl<S> Serialize for PackageId<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
{
|
||||
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
|
||||
where
|
||||
Ser: Serializer,
|
||||
{
|
||||
Serialize::serialize(&self.0, serializer)
|
||||
}
|
||||
}
|
||||
pub use models::{PackageId, SYSTEM_PACKAGE_ID};
|
||||
|
||||
fn current_version() -> Version {
|
||||
Current::new().semver().into()
|
||||
@@ -128,12 +48,12 @@ pub struct Manifest {
|
||||
#[serde(default)]
|
||||
pub alerts: Alerts,
|
||||
#[model]
|
||||
pub main: ActionImplementation,
|
||||
pub main: PackageProcedure,
|
||||
pub health_checks: HealthChecks,
|
||||
#[model]
|
||||
pub config: Option<ConfigActions>,
|
||||
#[model]
|
||||
pub properties: Option<ActionImplementation>,
|
||||
pub properties: Option<PackageProcedure>,
|
||||
#[model]
|
||||
pub volumes: Volumes,
|
||||
// #[serde(default)]
|
||||
@@ -153,6 +73,29 @@ pub struct Manifest {
|
||||
pub dependencies: Dependencies,
|
||||
}
|
||||
|
||||
impl Manifest {
|
||||
pub fn package_procedures(&self) -> impl Iterator<Item = &PackageProcedure> {
|
||||
use std::iter::once;
|
||||
let main = once(&self.main);
|
||||
let cfg_get = self.config.as_ref().map(|a| &a.get).into_iter();
|
||||
let cfg_set = self.config.as_ref().map(|a| &a.set).into_iter();
|
||||
let props = self.properties.iter();
|
||||
let backups = vec![&self.backup.create, &self.backup.restore].into_iter();
|
||||
let migrations = self
|
||||
.migrations
|
||||
.to
|
||||
.values()
|
||||
.chain(self.migrations.from.values());
|
||||
let actions = self.actions.0.values().map(|a| &a.implementation);
|
||||
main.chain(cfg_get)
|
||||
.chain(cfg_set)
|
||||
.chain(props)
|
||||
.chain(backups)
|
||||
.chain(migrations)
|
||||
.chain(actions)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub struct Assets {
|
||||
@@ -166,6 +109,8 @@ pub struct Assets {
|
||||
pub docker_images: Option<PathBuf>,
|
||||
#[serde(default)]
|
||||
pub assets: Option<PathBuf>,
|
||||
#[serde(default)]
|
||||
pub scripts: Option<PathBuf>,
|
||||
}
|
||||
impl Assets {
|
||||
pub fn license_path(&self) -> &Path {
|
||||
@@ -205,6 +150,12 @@ impl Assets {
|
||||
.map(|a| a.as_path())
|
||||
.unwrap_or(Path::new("assets"))
|
||||
}
|
||||
pub fn scripts_path(&self) -> &Path {
|
||||
self.scripts
|
||||
.as_ref()
|
||||
.map(|a| a.as_path())
|
||||
.unwrap_or(Path::new("scripts"))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
|
||||
@@ -13,7 +13,7 @@ use crate::s9pk::reader::S9pkReader;
|
||||
use crate::util::display_none;
|
||||
use crate::util::serde::IoFormat;
|
||||
use crate::volume::Volume;
|
||||
use crate::{Error, ResultExt};
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
pub mod builder;
|
||||
pub mod header;
|
||||
@@ -22,11 +22,10 @@ pub mod reader;
|
||||
|
||||
pub const SIG_CONTEXT: &'static [u8] = b"s9pk";
|
||||
|
||||
#[command(cli_only, display(display_none), blocking)]
|
||||
#[command(cli_only, display(display_none))]
|
||||
#[instrument(skip(ctx))]
|
||||
pub fn pack(#[context] ctx: SdkContext, #[arg] path: Option<PathBuf>) -> Result<(), Error> {
|
||||
use std::fs::File;
|
||||
use std::io::Read;
|
||||
pub async fn pack(#[context] ctx: SdkContext, #[arg] path: Option<PathBuf>) -> Result<(), Error> {
|
||||
use tokio::fs::File;
|
||||
|
||||
let path = if let Some(path) = path {
|
||||
path
|
||||
@@ -34,11 +33,17 @@ pub fn pack(#[context] ctx: SdkContext, #[arg] path: Option<PathBuf>) -> Result<
|
||||
std::env::current_dir()?
|
||||
};
|
||||
let manifest_value: Value = if path.join("manifest.toml").exists() {
|
||||
IoFormat::Toml.from_reader(File::open(path.join("manifest.toml"))?)?
|
||||
IoFormat::Toml
|
||||
.from_async_reader(File::open(path.join("manifest.toml")).await?)
|
||||
.await?
|
||||
} else if path.join("manifest.yaml").exists() {
|
||||
IoFormat::Yaml.from_reader(File::open(path.join("manifest.yaml"))?)?
|
||||
IoFormat::Yaml
|
||||
.from_async_reader(File::open(path.join("manifest.yaml")).await?)
|
||||
.await?
|
||||
} else if path.join("manifest.json").exists() {
|
||||
IoFormat::Json.from_reader(File::open(path.join("manifest.json"))?)?
|
||||
IoFormat::Json
|
||||
.from_async_reader(File::open(path.join("manifest.json")).await?)
|
||||
.await?
|
||||
} else {
|
||||
return Err(Error::new(
|
||||
eyre!("manifest not found"),
|
||||
@@ -54,61 +59,88 @@ pub fn pack(#[context] ctx: SdkContext, #[arg] path: Option<PathBuf>) -> Result<
|
||||
}
|
||||
|
||||
let outfile_path = path.join(format!("{}.s9pk", manifest.id));
|
||||
let mut outfile = File::create(outfile_path)?;
|
||||
let mut outfile = File::create(outfile_path).await?;
|
||||
S9pkPacker::builder()
|
||||
.manifest(&manifest)
|
||||
.writer(&mut outfile)
|
||||
.license(
|
||||
File::open(path.join(manifest.assets.license_path())).with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.license_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
File::open(path.join(manifest.assets.license_path()))
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.license_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
)
|
||||
.icon(
|
||||
File::open(path.join(manifest.assets.icon_path())).with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.icon_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
File::open(path.join(manifest.assets.icon_path()))
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.icon_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
)
|
||||
.instructions(
|
||||
File::open(path.join(manifest.assets.instructions_path())).with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.instructions_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
File::open(path.join(manifest.assets.instructions_path()))
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.instructions_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
)
|
||||
.docker_images(
|
||||
File::open(path.join(manifest.assets.docker_images_path())).with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.docker_images_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
File::open(path.join(manifest.assets.docker_images_path()))
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
crate::ErrorKind::Filesystem,
|
||||
manifest.assets.docker_images_path().display().to_string(),
|
||||
)
|
||||
})?,
|
||||
)
|
||||
.assets({
|
||||
let mut assets = tar::Builder::new(Vec::new()); // TODO: Ideally stream this? best not to buffer in memory
|
||||
let mut assets = tokio_tar::Builder::new(Vec::new()); // TODO: Ideally stream this? best not to buffer in memory
|
||||
|
||||
for (asset_volume, _) in manifest
|
||||
.volumes
|
||||
.iter()
|
||||
.filter(|(_, v)| matches!(v, &&Volume::Assets {}))
|
||||
{
|
||||
assets.append_dir_all(
|
||||
asset_volume,
|
||||
path.join(manifest.assets.assets_path()).join(asset_volume),
|
||||
)?;
|
||||
assets
|
||||
.append_dir_all(
|
||||
asset_volume,
|
||||
path.join(manifest.assets.assets_path()).join(asset_volume),
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
std::io::Cursor::new(assets.into_inner()?)
|
||||
std::io::Cursor::new(assets.into_inner().await?)
|
||||
})
|
||||
.scripts({
|
||||
let script_path = path.join(manifest.assets.scripts_path()).join("embassy.js");
|
||||
let needs_script = manifest.package_procedures().any(|a| a.is_script());
|
||||
let has_script = script_path.exists();
|
||||
match (needs_script, has_script) {
|
||||
(true, true) => Some(File::open(script_path).await?),
|
||||
(true, false) => {
|
||||
return Err(Error::new(eyre!("Script is declared in manifest, but no such script exists at ./scripts/embassy.js"), ErrorKind::Pack).into())
|
||||
}
|
||||
(false, true) => {
|
||||
tracing::warn!("Manifest does not declare any actions that use scripts, but a script exists at ./scripts/embassy.js");
|
||||
None
|
||||
}
|
||||
(false, false) => None
|
||||
}
|
||||
})
|
||||
.build()
|
||||
.pack(&ctx.developer_key()?)?;
|
||||
outfile.sync_all()?;
|
||||
.pack(&ctx.developer_key()?)
|
||||
.await?;
|
||||
outfile.sync_all().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -6,10 +6,10 @@ use std::str::FromStr;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use digest::Output;
|
||||
use digest_old::Output;
|
||||
use ed25519_dalek::PublicKey;
|
||||
use futures::TryStreamExt;
|
||||
use sha2::{Digest, Sha512};
|
||||
use sha2_old::{Digest, Sha512};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeek, AsyncSeekExt, ReadBuf, Take};
|
||||
use tracing::instrument;
|
||||
@@ -305,4 +305,11 @@ impl<R: AsyncRead + AsyncSeek + Unpin> S9pkReader<R> {
|
||||
pub async fn assets<'a>(&'a mut self) -> Result<ReadHandle<'a, R>, Error> {
|
||||
Ok(self.read_handle(self.toc.assets).await?)
|
||||
}
|
||||
|
||||
pub async fn scripts<'a>(&'a mut self) -> Result<Option<ReadHandle<'a, R>>, Error> {
|
||||
Ok(match self.toc.scripts {
|
||||
None => None,
|
||||
Some(a) => Some(self.read_handle(a).await?),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -7,16 +7,17 @@ use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use digest::generic_array::GenericArray;
|
||||
use digest::OutputSizeUser;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{FutureExt, TryFutureExt, TryStreamExt};
|
||||
use nix::unistd::{Gid, Uid};
|
||||
use openssl::x509::X509;
|
||||
use patch_db::LockType;
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use rpc_toolkit::command;
|
||||
use rpc_toolkit::yajrc::RpcError;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sha2::{Digest, Sha256};
|
||||
use sqlx::{Executor, Sqlite};
|
||||
use sqlx::{Connection, Executor, Sqlite};
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
|
||||
@@ -35,6 +36,7 @@ use crate::disk::mount::filesystem::cifs::Cifs;
|
||||
use crate::disk::mount::filesystem::ReadOnly;
|
||||
use crate::disk::mount::guard::TmpMountGuard;
|
||||
use crate::disk::util::{pvscan, recovery_info, DiskListResponse, EmbassyOsRecoveryInfo};
|
||||
use crate::disk::REPAIR_DISK_PATH;
|
||||
use crate::hostname::PRODUCT_KEY_PATH;
|
||||
use crate::id::Id;
|
||||
use crate::init::init;
|
||||
@@ -75,9 +77,7 @@ pub struct StatusRes {
|
||||
#[command(rpc_only, metadata(authenticated = false))]
|
||||
pub async fn status(#[context] ctx: SetupContext) -> Result<StatusRes, Error> {
|
||||
Ok(StatusRes {
|
||||
product_key: tokio::fs::metadata("/embassy-os/product_key.txt")
|
||||
.await
|
||||
.is_ok(),
|
||||
product_key: tokio::fs::metadata(PRODUCT_KEY_PATH).await.is_ok(),
|
||||
migrating: ctx.recovery_status.read().await.is_some(),
|
||||
})
|
||||
}
|
||||
@@ -96,38 +96,87 @@ pub async fn list_disks() -> Result<DiskListResponse, Error> {
|
||||
pub async fn attach(
|
||||
#[context] ctx: SetupContext,
|
||||
#[arg] guid: Arc<String>,
|
||||
#[arg(rename = "embassy-password")] password: Option<String>,
|
||||
) -> Result<SetupResult, Error> {
|
||||
crate::disk::main::import(
|
||||
let requires_reboot = crate::disk::main::import(
|
||||
&*guid,
|
||||
&ctx.datadir,
|
||||
RepairStrategy::Preen,
|
||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
||||
RepairStrategy::Aggressive
|
||||
} else {
|
||||
RepairStrategy::Preen
|
||||
},
|
||||
DEFAULT_PASSWORD,
|
||||
)
|
||||
.await?;
|
||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
||||
tokio::fs::remove_file(REPAIR_DISK_PATH)
|
||||
.await
|
||||
.with_ctx(|_| (ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
|
||||
}
|
||||
if requires_reboot.0 {
|
||||
crate::disk::main::export(&*guid, &ctx.datadir).await?;
|
||||
return Err(Error::new(
|
||||
eyre!(
|
||||
"Errors were corrected with your disk, but the Embassy must be restarted in order to proceed"
|
||||
),
|
||||
ErrorKind::DiskManagement,
|
||||
));
|
||||
}
|
||||
let product_key = ctx.product_key().await?;
|
||||
let product_key_path = Path::new("/embassy-data/main/product_key.txt");
|
||||
if tokio::fs::metadata(product_key_path).await.is_ok() {
|
||||
let pkey = tokio::fs::read_to_string(product_key_path).await?;
|
||||
if pkey.trim() != &*ctx.product_key().await? {
|
||||
let pkey = Arc::new(
|
||||
tokio::fs::read_to_string(product_key_path)
|
||||
.await?
|
||||
.trim()
|
||||
.to_owned(),
|
||||
);
|
||||
if pkey != product_key {
|
||||
crate::disk::main::export(&*guid, &ctx.datadir).await?;
|
||||
return Err(Error::new(
|
||||
eyre!("The EmbassyOS product key does not match the supplied drive"),
|
||||
eyre!(
|
||||
"The EmbassyOS product key does not match the supplied drive: {}",
|
||||
pkey
|
||||
),
|
||||
ErrorKind::ProductKeyMismatch,
|
||||
));
|
||||
}
|
||||
}
|
||||
init(
|
||||
&RpcContextConfig::load(ctx.config_path.as_ref()).await?,
|
||||
&*ctx.product_key().await?,
|
||||
&*product_key,
|
||||
)
|
||||
.await?;
|
||||
let secrets = ctx.secret_store().await?;
|
||||
let tor_key = crate::net::tor::os_key(&mut secrets.acquire().await?).await?;
|
||||
let db = ctx.db(&secrets).await?;
|
||||
let mut secrets_handle = secrets.acquire().await?;
|
||||
let mut db_handle = db.handle();
|
||||
let mut secrets_tx = secrets_handle.begin().await?;
|
||||
let mut db_tx = db_handle.begin().await?;
|
||||
|
||||
if let Some(password) = password {
|
||||
let set_password_receipt = crate::auth::SetPasswordReceipt::new(&mut db_tx).await?;
|
||||
crate::auth::set_password(
|
||||
&mut db_tx,
|
||||
&set_password_receipt,
|
||||
&mut secrets_tx,
|
||||
&password,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let tor_key = crate::net::tor::os_key(&mut secrets_tx).await?;
|
||||
|
||||
db_tx.commit(None).await?;
|
||||
secrets_tx.commit().await?;
|
||||
|
||||
let (_, root_ca) = SslManager::init(secrets).await?.export_root_ca().await?;
|
||||
let setup_result = SetupResult {
|
||||
tor_address: format!("http://{}", tor_key.public().get_onion_address()),
|
||||
lan_address: format!(
|
||||
"https://embassy-{}.local",
|
||||
crate::hostname::derive_id(&*ctx.product_key().await?)
|
||||
crate::hostname::derive_id(&*product_key)
|
||||
),
|
||||
root_ca: String::from_utf8(root_ca.to_pem()?)?,
|
||||
};
|
||||
@@ -308,7 +357,7 @@ pub async fn execute_inner(
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
crate::disk::main::import(
|
||||
let _ = crate::disk::main::import(
|
||||
&*guid,
|
||||
&ctx.datadir,
|
||||
RepairStrategy::Preen,
|
||||
@@ -352,7 +401,7 @@ pub async fn execute_inner(
|
||||
})
|
||||
.await
|
||||
{
|
||||
BEETHOVEN.play().await.unwrap_or_default(); // ignore error in playing the song
|
||||
(&BEETHOVEN).play().await.unwrap_or_default(); // ignore error in playing the song
|
||||
tracing::error!("Error recovering drive!: {}", e);
|
||||
tracing::debug!("{:?}", e);
|
||||
*ctx.recovery_status.write().await = Some(Err(e.into()));
|
||||
@@ -451,7 +500,7 @@ async fn recover(
|
||||
|
||||
async fn shasum(
|
||||
path: impl AsRef<Path>,
|
||||
) -> Result<GenericArray<u8, <Sha256 as Digest>::OutputSize>, Error> {
|
||||
) -> Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error> {
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
let mut rdr = tokio::fs::File::open(path).await?;
|
||||
|
||||
@@ -112,19 +112,23 @@ pub struct Song<Notes> {
|
||||
tempo_qpm: u16,
|
||||
note_sequence: Notes,
|
||||
}
|
||||
impl<'a, T: 'a> Song<T>
|
||||
impl<'a, T> Song<T>
|
||||
where
|
||||
&'a T: IntoIterator<Item = &'a (Option<Note>, TimeSlice)>,
|
||||
T: IntoIterator<Item = (Option<Note>, TimeSlice)> + Clone,
|
||||
{
|
||||
#[instrument(skip(self))]
|
||||
pub async fn play(&'a self) -> Result<(), Error> {
|
||||
pub async fn play(&self) -> Result<(), Error> {
|
||||
#[cfg(feature = "sound")]
|
||||
{
|
||||
let mut sound = SoundInterface::lease().await?;
|
||||
for (note, slice) in &self.note_sequence {
|
||||
for (note, slice) in self.note_sequence.clone() {
|
||||
match note {
|
||||
None => tokio::time::sleep(slice.to_duration(self.tempo_qpm)).await,
|
||||
Some(n) => sound.play_for_time_slice(self.tempo_qpm, n, slice).await?,
|
||||
Some(n) => {
|
||||
sound
|
||||
.play_for_time_slice(self.tempo_qpm, &n, &slice)
|
||||
.await?
|
||||
}
|
||||
};
|
||||
}
|
||||
sound.close().await?;
|
||||
@@ -285,6 +289,7 @@ pub fn circle_of_fourths(note: &Note) -> impl Iterator<Item = Note> {
|
||||
iterate(|n| interval(&FOURTH, n), note)
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CircleOf<'a> {
|
||||
current: Note,
|
||||
duration: TimeSlice,
|
||||
|
||||
@@ -12,7 +12,7 @@ use crate::util::display_none;
|
||||
use crate::util::serde::{display_serializable, IoFormat};
|
||||
use crate::{Error, ErrorKind};
|
||||
|
||||
static SSH_AUTHORIZED_KEYS_FILE: &str = "/root/.ssh/authorized_keys";
|
||||
static SSH_AUTHORIZED_KEYS_FILE: &str = "/home/start9/.ssh/authorized_keys";
|
||||
|
||||
#[derive(Debug, serde::Deserialize, serde::Serialize)]
|
||||
pub struct PubKey(
|
||||
|
||||
@@ -1,48 +1,19 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::path::Path;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::action::{ActionImplementation, NoOutput};
|
||||
use crate::context::RpcContext;
|
||||
use crate::id::{Id, ImageId};
|
||||
use crate::id::{ ImageId};
|
||||
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::serde::Duration;
|
||||
use crate::util::Version;
|
||||
use crate::volume::Volumes;
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize)]
|
||||
pub struct HealthCheckId<S: AsRef<str> = String>(Id<S>);
|
||||
impl<S: AsRef<str>> std::fmt::Display for HealthCheckId<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", &self.0)
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<str> for HealthCheckId<S> {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_ref()
|
||||
}
|
||||
}
|
||||
impl<'de, S> Deserialize<'de> for HealthCheckId<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
Id<S>: Deserialize<'de>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Ok(HealthCheckId(Deserialize::deserialize(deserializer)?))
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<Path> for HealthCheckId<S> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
self.0.as_ref().as_ref()
|
||||
}
|
||||
}
|
||||
pub use models::HealthCheckId;
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct HealthChecks(pub BTreeMap<HealthCheckId, HealthCheck>);
|
||||
@@ -89,7 +60,7 @@ pub struct HealthCheck {
|
||||
pub name: String,
|
||||
pub success_message: Option<String>,
|
||||
#[serde(flatten)]
|
||||
implementation: ActionImplementation,
|
||||
implementation: PackageProcedure,
|
||||
pub timeout: Option<Duration>,
|
||||
}
|
||||
impl HealthCheck {
|
||||
@@ -109,7 +80,7 @@ impl HealthCheck {
|
||||
ctx,
|
||||
pkg_id,
|
||||
pkg_version,
|
||||
Some(&format!("{}Health", id)),
|
||||
ProcedureName::Health(id.clone()),
|
||||
volumes,
|
||||
Some(Utc::now().signed_duration_since(started).num_milliseconds()),
|
||||
true,
|
||||
|
||||
@@ -24,8 +24,11 @@ pub struct Status {
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum MainStatus {
|
||||
Stopped,
|
||||
Restarting,
|
||||
Stopping,
|
||||
Starting,
|
||||
Starting {
|
||||
restarting: bool,
|
||||
},
|
||||
Running {
|
||||
started: DateTime<Utc>,
|
||||
health: BTreeMap<HealthCheckId, HealthCheckResult>,
|
||||
@@ -38,25 +41,26 @@ pub enum MainStatus {
|
||||
impl MainStatus {
|
||||
pub fn running(&self) -> bool {
|
||||
match self {
|
||||
MainStatus::Starting
|
||||
MainStatus::Starting { .. }
|
||||
| MainStatus::Running { .. }
|
||||
| MainStatus::BackingUp {
|
||||
started: Some(_), ..
|
||||
} => true,
|
||||
MainStatus::Stopped
|
||||
| MainStatus::Stopping
|
||||
| MainStatus::Restarting
|
||||
| MainStatus::BackingUp { started: None, .. } => false,
|
||||
}
|
||||
}
|
||||
pub fn stop(&mut self) {
|
||||
match self {
|
||||
MainStatus::Starting | MainStatus::Running { .. } => {
|
||||
MainStatus::Starting { .. } | MainStatus::Running { .. } => {
|
||||
*self = MainStatus::Stopping;
|
||||
}
|
||||
MainStatus::BackingUp { started, .. } => {
|
||||
*started = None;
|
||||
}
|
||||
MainStatus::Stopped | MainStatus::Stopping => (),
|
||||
MainStatus::Stopped | MainStatus::Stopping | MainStatus::Restarting => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,7 +30,9 @@ use crate::disk::mount::filesystem::{FileSystem, ReadWrite};
|
||||
use crate::disk::mount::guard::TmpMountGuard;
|
||||
use crate::disk::BOOT_RW_PATH;
|
||||
use crate::notifications::NotificationLevel;
|
||||
use crate::sound::{BEP, UPDATE_FAILED_1, UPDATE_FAILED_2, UPDATE_FAILED_3, UPDATE_FAILED_4};
|
||||
use crate::sound::{
|
||||
CIRCLE_OF_5THS_SHORT, UPDATE_FAILED_1, UPDATE_FAILED_2, UPDATE_FAILED_3, UPDATE_FAILED_4,
|
||||
};
|
||||
use crate::update::latest_information::LatestInformation;
|
||||
use crate::util::Invoke;
|
||||
use crate::version::{Current, VersionT};
|
||||
@@ -204,9 +206,10 @@ async fn maybe_do_update(
|
||||
Ok(()) => {
|
||||
status.updated = true;
|
||||
status.save(&mut db).await.expect("could not save status");
|
||||
BEP.play().await.expect("could not bep");
|
||||
BEP.play().await.expect("could not bep");
|
||||
BEP.play().await.expect("could not bep");
|
||||
CIRCLE_OF_5THS_SHORT
|
||||
.play()
|
||||
.await
|
||||
.expect("could not play sound");
|
||||
}
|
||||
Err(e) => {
|
||||
status.save(&mut db).await.expect("could not save status");
|
||||
|
||||
56
backend/src/util/config.rs
Normal file
56
backend/src/util/config.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
use std::fs::File;
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::util::serde::IoFormat;
|
||||
use crate::{Config, Error, ResultExt};
|
||||
|
||||
pub const CONFIG_PATH: &str = "/etc/embassy/config.yaml";
|
||||
pub const CONFIG_PATH_LOCAL: &str = ".embassy/config.yaml";
|
||||
|
||||
pub fn local_config_path() -> Option<PathBuf> {
|
||||
if let Ok(home) = std::env::var("HOME") {
|
||||
Some(Path::new(&home).join(CONFIG_PATH_LOCAL))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// BLOCKING
|
||||
pub fn load_config_from_paths<'a, T: for<'de> Deserialize<'de>>(
|
||||
paths: impl IntoIterator<Item = impl AsRef<Path>>,
|
||||
) -> Result<T, Error> {
|
||||
let mut config = Default::default();
|
||||
for path in paths {
|
||||
if path.as_ref().exists() {
|
||||
let format: IoFormat = path
|
||||
.as_ref()
|
||||
.extension()
|
||||
.and_then(|s| s.to_str())
|
||||
.map(|f| f.parse())
|
||||
.transpose()?
|
||||
.unwrap_or_default();
|
||||
let new = format.from_reader(File::open(path)?)?;
|
||||
config = merge_configs(config, new);
|
||||
}
|
||||
}
|
||||
serde_json::from_value(Value::Object(config)).with_kind(crate::ErrorKind::Deserialization)
|
||||
}
|
||||
|
||||
pub fn merge_configs(mut first: Config, second: Config) -> Config {
|
||||
for (k, v) in second.into_iter() {
|
||||
let new = match first.remove(&k) {
|
||||
None => v,
|
||||
Some(old) => match (old, v) {
|
||||
(Value::Object(first), Value::Object(second)) => {
|
||||
Value::Object(merge_configs(first, second))
|
||||
}
|
||||
(first, _) => first,
|
||||
},
|
||||
};
|
||||
first.insert(k, new);
|
||||
}
|
||||
first
|
||||
}
|
||||
@@ -153,6 +153,17 @@ where
|
||||
.map_err(color_eyre::eyre::Error::from)
|
||||
.with_kind(crate::ErrorKind::Deserialization)
|
||||
}
|
||||
pub async fn to_cbor_async_writer<T, W>(mut writer: W, value: &T) -> Result<(), crate::Error>
|
||||
where
|
||||
T: serde::Serialize,
|
||||
W: AsyncWrite + Unpin,
|
||||
{
|
||||
let mut buffer = Vec::new();
|
||||
serde_cbor::ser::into_writer(value, &mut buffer).with_kind(crate::ErrorKind::Serialization)?;
|
||||
buffer.extend_from_slice(b"\n");
|
||||
writer.write_all(&buffer).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn from_json_async_reader<T, R>(mut reader: R) -> Result<T, crate::Error>
|
||||
where
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use tracing::metadata::LevelFilter;
|
||||
use tracing::Subscriber;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
|
||||
@@ -1,31 +1,30 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::future::Future;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::marker::PhantomData;
|
||||
use std::ops::Deref;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::pin::Pin;
|
||||
use std::process::Stdio;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::task::{Context, Poll};
|
||||
|
||||
use ::serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use async_trait::async_trait;
|
||||
use clap::ArgMatches;
|
||||
use color_eyre::eyre::{self, eyre};
|
||||
use digest::Digest;
|
||||
use fd_lock_rs::FdLock;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::FutureExt;
|
||||
pub use helpers::NonDetachingJoinHandle;
|
||||
use lazy_static::lazy_static;
|
||||
use patch_db::{HasModel, Model};
|
||||
pub use models::Version;
|
||||
use pin_project::pin_project;
|
||||
use sha2_old::Digest;
|
||||
use tokio::fs::File;
|
||||
use tokio::sync::{Mutex, OwnedMutexGuard, RwLock};
|
||||
use tokio::task::{JoinError, JoinHandle};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::shutdown::Shutdown;
|
||||
use crate::{Error, ResultExt as _};
|
||||
|
||||
pub mod config;
|
||||
pub mod io;
|
||||
pub mod logger;
|
||||
pub mod serde;
|
||||
@@ -122,107 +121,6 @@ impl<T> SNone<T> {
|
||||
}
|
||||
impl<T> SOption<T> for SNone<T> {}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Version {
|
||||
version: emver::Version,
|
||||
string: String,
|
||||
}
|
||||
impl Version {
|
||||
pub fn as_str(&self) -> &str {
|
||||
self.string.as_str()
|
||||
}
|
||||
}
|
||||
impl std::fmt::Display for Version {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{}", self.string)
|
||||
}
|
||||
}
|
||||
impl std::str::FromStr for Version {
|
||||
type Err = <emver::Version as FromStr>::Err;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
Ok(Version {
|
||||
string: s.to_owned(),
|
||||
version: s.parse()?,
|
||||
})
|
||||
}
|
||||
}
|
||||
impl From<emver::Version> for Version {
|
||||
fn from(v: emver::Version) -> Self {
|
||||
Version {
|
||||
string: v.to_string(),
|
||||
version: v,
|
||||
}
|
||||
}
|
||||
}
|
||||
impl From<Version> for emver::Version {
|
||||
fn from(v: Version) -> Self {
|
||||
v.version
|
||||
}
|
||||
}
|
||||
impl Default for Version {
|
||||
fn default() -> Self {
|
||||
Self::from(emver::Version::default())
|
||||
}
|
||||
}
|
||||
impl Deref for Version {
|
||||
type Target = emver::Version;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.version
|
||||
}
|
||||
}
|
||||
impl AsRef<emver::Version> for Version {
|
||||
fn as_ref(&self) -> &emver::Version {
|
||||
&self.version
|
||||
}
|
||||
}
|
||||
impl AsRef<str> for Version {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.as_str()
|
||||
}
|
||||
}
|
||||
impl PartialEq for Version {
|
||||
fn eq(&self, other: &Version) -> bool {
|
||||
self.version.eq(&other.version)
|
||||
}
|
||||
}
|
||||
impl Eq for Version {}
|
||||
impl PartialOrd for Version {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
self.version.partial_cmp(&other.version)
|
||||
}
|
||||
}
|
||||
impl Ord for Version {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
self.version.cmp(&other.version)
|
||||
}
|
||||
}
|
||||
impl Hash for Version {
|
||||
fn hash<H: Hasher>(&self, state: &mut H) {
|
||||
self.version.hash(state)
|
||||
}
|
||||
}
|
||||
impl<'de> Deserialize<'de> for Version {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let string = String::deserialize(deserializer)?;
|
||||
let version = emver::Version::from_str(&string).map_err(::serde::de::Error::custom)?;
|
||||
Ok(Self { string, version })
|
||||
}
|
||||
}
|
||||
impl Serialize for Version {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
self.string.serialize(serializer)
|
||||
}
|
||||
}
|
||||
impl HasModel for Version {
|
||||
type Model = Model<Version>;
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait AsyncFileExt: Sized {
|
||||
async fn maybe_open<P: AsRef<Path> + Send + Sync>(path: P) -> std::io::Result<Option<Self>>;
|
||||
@@ -289,11 +187,13 @@ impl<T> Container<T> {
|
||||
}
|
||||
}
|
||||
|
||||
pub struct HashWriter<H: Digest, W: std::io::Write> {
|
||||
#[pin_project]
|
||||
pub struct HashWriter<H: Digest, W: tokio::io::AsyncWrite> {
|
||||
hasher: H,
|
||||
#[pin]
|
||||
writer: W,
|
||||
}
|
||||
impl<H: Digest, W: std::io::Write> HashWriter<H, W> {
|
||||
impl<H: Digest, W: tokio::io::AsyncWrite> HashWriter<H, W> {
|
||||
pub fn new(hasher: H, writer: W) -> Self {
|
||||
HashWriter { hasher, writer }
|
||||
}
|
||||
@@ -307,14 +207,31 @@ impl<H: Digest, W: std::io::Write> HashWriter<H, W> {
|
||||
&mut self.writer
|
||||
}
|
||||
}
|
||||
impl<H: Digest, W: std::io::Write> std::io::Write for HashWriter<H, W> {
|
||||
fn write(&mut self, buf: &[u8]) -> std::io::Result<usize> {
|
||||
let written = self.writer.write(buf)?;
|
||||
self.hasher.update(&buf[..written]);
|
||||
Ok(written)
|
||||
impl<H: Digest, W: tokio::io::AsyncWrite> tokio::io::AsyncWrite for HashWriter<H, W> {
|
||||
fn poll_write(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context,
|
||||
buf: &[u8],
|
||||
) -> Poll<std::io::Result<usize>> {
|
||||
let this = self.project();
|
||||
let written = tokio::io::AsyncWrite::poll_write(this.writer, cx, &buf);
|
||||
match written {
|
||||
// only update the hasher once
|
||||
Poll::Ready(res) => {
|
||||
if let Ok(n) = res {
|
||||
this.hasher.update(&buf[..n]);
|
||||
}
|
||||
Poll::Ready(res)
|
||||
}
|
||||
Poll::Pending => Poll::Pending,
|
||||
}
|
||||
}
|
||||
fn flush(&mut self) -> std::io::Result<()> {
|
||||
self.writer.flush()
|
||||
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context) -> Poll<std::io::Result<()>> {
|
||||
self.project().writer.poll_flush(cx)
|
||||
}
|
||||
|
||||
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context) -> Poll<std::io::Result<()>> {
|
||||
self.project().writer.poll_shutdown(cx)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -333,31 +250,6 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project(PinnedDrop)]
|
||||
pub struct NonDetachingJoinHandle<T>(#[pin] JoinHandle<T>);
|
||||
impl<T> From<JoinHandle<T>> for NonDetachingJoinHandle<T> {
|
||||
fn from(t: JoinHandle<T>) -> Self {
|
||||
NonDetachingJoinHandle(t)
|
||||
}
|
||||
}
|
||||
#[pin_project::pinned_drop]
|
||||
impl<T> PinnedDrop for NonDetachingJoinHandle<T> {
|
||||
fn drop(self: std::pin::Pin<&mut Self>) {
|
||||
let this = self.project();
|
||||
this.0.into_ref().get_ref().abort()
|
||||
}
|
||||
}
|
||||
impl<T> Future for NonDetachingJoinHandle<T> {
|
||||
type Output = Result<T, JoinError>;
|
||||
fn poll(
|
||||
self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Self::Output> {
|
||||
let this = self.project();
|
||||
this.0.poll(cx)
|
||||
}
|
||||
}
|
||||
|
||||
pub struct GeneralGuard<F: FnOnce() -> T, T = ()>(Option<F>);
|
||||
impl<F: FnOnce() -> T, T> GeneralGuard<F, T> {
|
||||
pub fn new(f: F) -> Self {
|
||||
|
||||
@@ -379,6 +379,21 @@ impl IoFormat {
|
||||
}
|
||||
}
|
||||
}
|
||||
pub async fn from_async_reader<
|
||||
R: tokio::io::AsyncRead + Unpin,
|
||||
T: for<'de> Deserialize<'de>,
|
||||
>(
|
||||
&self,
|
||||
reader: R,
|
||||
) -> Result<T, Error> {
|
||||
use crate::util::io::*;
|
||||
match self {
|
||||
IoFormat::Json | IoFormat::JsonPretty => from_json_async_reader(reader).await,
|
||||
IoFormat::Yaml => from_yaml_async_reader(reader).await,
|
||||
IoFormat::Cbor => from_cbor_async_reader(reader).await,
|
||||
IoFormat::Toml | IoFormat::TomlPretty => from_toml_async_reader(reader).await,
|
||||
}
|
||||
}
|
||||
pub fn from_slice<T: for<'de> Deserialize<'de>>(&self, slice: &[u8]) -> Result<T, Error> {
|
||||
match self {
|
||||
IoFormat::Json | IoFormat::JsonPretty => {
|
||||
|
||||
@@ -2,27 +2,52 @@ use std::cmp::Ordering;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use color_eyre::eyre::eyre;
|
||||
use patch_db::json_ptr::JsonPointer;
|
||||
use patch_db::{DbHandle, LockType};
|
||||
use patch_db::DbHandle;
|
||||
use rpc_toolkit::command;
|
||||
|
||||
use crate::{Error, ResultExt};
|
||||
use crate::{init::InitReceipts, Error};
|
||||
|
||||
mod v0_3_0;
|
||||
mod v0_3_0_1;
|
||||
mod v0_3_0_2;
|
||||
mod v0_3_0_3;
|
||||
mod v0_3_1;
|
||||
|
||||
pub type Current = v0_3_0_2::Version;
|
||||
pub type Current = v0_3_1::Version;
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize)]
|
||||
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
|
||||
#[serde(untagged)]
|
||||
enum Version {
|
||||
V0_3_0(Wrapper<v0_3_0::Version>),
|
||||
V0_3_0_1(Wrapper<v0_3_0_1::Version>),
|
||||
V0_3_0_2(Wrapper<v0_3_0_2::Version>),
|
||||
V0_3_0_3(Wrapper<v0_3_0_3::Version>),
|
||||
V0_3_1(Wrapper<v0_3_1::Version>),
|
||||
Other(emver::Version),
|
||||
}
|
||||
|
||||
impl Version {
|
||||
fn from_util_version(version: crate::util::Version) -> Self {
|
||||
serde_json::to_value(version.clone())
|
||||
.and_then(serde_json::from_value)
|
||||
.unwrap_or_else(|_e| {
|
||||
tracing::warn!("Can't deserialize: {:?} and falling back to other", version);
|
||||
Version::Other(version.into_version())
|
||||
})
|
||||
}
|
||||
#[cfg(test)]
|
||||
fn as_sem_ver(&self) -> emver::Version {
|
||||
match self {
|
||||
Version::V0_3_0(Wrapper(x)) => x.semver(),
|
||||
Version::V0_3_0_1(Wrapper(x)) => x.semver(),
|
||||
Version::V0_3_0_2(Wrapper(x)) => x.semver(),
|
||||
Version::V0_3_0_3(Wrapper(x)) => x.semver(),
|
||||
Version::V0_3_1(Wrapper(x)) => x.semver(),
|
||||
Version::Other(x) => x.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
pub trait VersionT
|
||||
where
|
||||
@@ -34,16 +59,18 @@ where
|
||||
fn compat(&self) -> &'static emver::VersionRange;
|
||||
async fn up<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>;
|
||||
async fn down<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error>;
|
||||
async fn commit<Db: DbHandle>(&self, db: &mut Db) -> Result<(), Error> {
|
||||
crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.eos_version_compat()
|
||||
.put(db, &self.compat())
|
||||
async fn commit<Db: DbHandle>(
|
||||
&self,
|
||||
db: &mut Db,
|
||||
receipts: &InitReceipts,
|
||||
) -> Result<(), Error> {
|
||||
receipts
|
||||
.version_range
|
||||
.set(db, self.compat().clone())
|
||||
.await?;
|
||||
crate::db::DatabaseModel::new()
|
||||
.server_info()
|
||||
.version()
|
||||
.put(db, &self.semver().into())
|
||||
receipts
|
||||
.server_version
|
||||
.set(db, self.semver().into())
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
@@ -52,10 +79,11 @@ where
|
||||
&self,
|
||||
version: &V,
|
||||
db: &mut Db,
|
||||
receipts: &InitReceipts,
|
||||
) -> Result<(), Error> {
|
||||
match self.semver().cmp(&version.semver()) {
|
||||
Ordering::Greater => self.rollback_to_unchecked(version, db).await,
|
||||
Ordering::Less => version.migrate_from_unchecked(self, db).await,
|
||||
Ordering::Greater => self.rollback_to_unchecked(version, db, receipts).await,
|
||||
Ordering::Less => version.migrate_from_unchecked(self, db, receipts).await,
|
||||
Ordering::Equal => Ok(()),
|
||||
}
|
||||
}
|
||||
@@ -63,31 +91,38 @@ where
|
||||
&self,
|
||||
version: &V,
|
||||
db: &mut Db,
|
||||
receipts: &InitReceipts,
|
||||
) -> Result<(), Error> {
|
||||
let previous = Self::Previous::new();
|
||||
if version.semver() != previous.semver() {
|
||||
previous.migrate_from_unchecked(version, db).await?;
|
||||
previous
|
||||
.migrate_from_unchecked(version, db, receipts)
|
||||
.await?;
|
||||
}
|
||||
tracing::info!("{} -> {}", previous.semver(), self.semver(),);
|
||||
self.up(db).await?;
|
||||
self.commit(db).await?;
|
||||
self.commit(db, receipts).await?;
|
||||
Ok(())
|
||||
}
|
||||
async fn rollback_to_unchecked<V: VersionT, Db: DbHandle>(
|
||||
&self,
|
||||
version: &V,
|
||||
db: &mut Db,
|
||||
receipts: &InitReceipts,
|
||||
) -> Result<(), Error> {
|
||||
let previous = Self::Previous::new();
|
||||
tracing::info!("{} -> {}", self.semver(), previous.semver(),);
|
||||
self.down(db).await?;
|
||||
previous.commit(db).await?;
|
||||
previous.commit(db, receipts).await?;
|
||||
if version.semver() != previous.semver() {
|
||||
previous.rollback_to_unchecked(version, db).await?;
|
||||
previous
|
||||
.rollback_to_unchecked(version, db, receipts)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
#[derive(Debug, Clone)]
|
||||
struct Wrapper<T>(T);
|
||||
impl<T> serde::Serialize for Wrapper<T>
|
||||
where
|
||||
@@ -104,7 +139,7 @@ where
|
||||
fn deserialize<D: serde::Deserializer<'de>>(deserializer: D) -> Result<Self, D::Error> {
|
||||
let v = crate::util::Version::deserialize(deserializer)?;
|
||||
let version = T::new();
|
||||
if &*v == &version.semver() {
|
||||
if *v == version.semver() {
|
||||
Ok(Wrapper(version))
|
||||
} else {
|
||||
Err(serde::de::Error::custom("Mismatched Version"))
|
||||
@@ -112,16 +147,17 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init<Db: DbHandle>(db: &mut Db) -> Result<(), Error> {
|
||||
let ptr: JsonPointer = "/server-info/version"
|
||||
.parse()
|
||||
.with_kind(crate::ErrorKind::Database)?;
|
||||
db.lock(ptr.clone(), LockType::Write).await?;
|
||||
let version: Version = db.get(&ptr).await?;
|
||||
pub async fn init<Db: DbHandle>(
|
||||
db: &mut Db,
|
||||
receipts: &crate::init::InitReceipts,
|
||||
) -> Result<(), Error> {
|
||||
let version = Version::from_util_version(receipts.server_version.get(db).await?);
|
||||
match version {
|
||||
Version::V0_3_0(v) => v.0.migrate_to(&Current::new(), db).await?,
|
||||
Version::V0_3_0_1(v) => v.0.migrate_to(&Current::new(), db).await?,
|
||||
Version::V0_3_0_2(v) => v.0.migrate_to(&Current::new(), db).await?,
|
||||
Version::V0_3_0(v) => v.0.migrate_to(&Current::new(), db, receipts).await?,
|
||||
Version::V0_3_0_1(v) => v.0.migrate_to(&Current::new(), db, receipts).await?,
|
||||
Version::V0_3_0_2(v) => v.0.migrate_to(&Current::new(), db, receipts).await?,
|
||||
Version::V0_3_0_3(v) => v.0.migrate_to(&Current::new(), db, receipts).await?,
|
||||
Version::V0_3_1(v) => v.0.migrate_to(&Current::new(), db, receipts).await?,
|
||||
Version::Other(_) => {
|
||||
return Err(Error::new(
|
||||
eyre!("Cannot downgrade"),
|
||||
@@ -132,10 +168,48 @@ pub async fn init<Db: DbHandle>(db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub const COMMIT_HASH: &'static str =
|
||||
pub const COMMIT_HASH: &str =
|
||||
git_version::git_version!(args = ["--always", "--abbrev=40", "--dirty=-modified"]);
|
||||
|
||||
#[command(rename = "git-info", local, metadata(authenticated = false))]
|
||||
pub fn git_info() -> Result<&'static str, Error> {
|
||||
Ok(COMMIT_HASH)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use proptest::prelude::*;
|
||||
|
||||
fn em_version() -> impl Strategy<Value = emver::Version> {
|
||||
any::<(usize, usize, usize, usize)>().prop_map(|(major, minor, patch, super_minor)| {
|
||||
emver::Version::new(major, minor, patch, super_minor)
|
||||
})
|
||||
}
|
||||
|
||||
fn versions() -> impl Strategy<Value = Version> {
|
||||
prop_oneof![
|
||||
Just(Version::V0_3_0(Wrapper(v0_3_0::Version::new()))),
|
||||
Just(Version::V0_3_0_1(Wrapper(v0_3_0_1::Version::new()))),
|
||||
Just(Version::V0_3_0_2(Wrapper(v0_3_0_2::Version::new()))),
|
||||
Just(Version::V0_3_0_3(Wrapper(v0_3_0_3::Version::new()))),
|
||||
Just(Version::V0_3_1(Wrapper(v0_3_1::Version::new()))),
|
||||
em_version().prop_map(Version::Other),
|
||||
]
|
||||
}
|
||||
|
||||
proptest! {
|
||||
#[test]
|
||||
fn emversion_isomorphic_version(original in em_version()) {
|
||||
let version = Version::from_util_version(original.clone().into());
|
||||
let back = version.as_sem_ver();
|
||||
prop_assert_eq!(original, back, "All versions should round trip");
|
||||
}
|
||||
#[test]
|
||||
fn version_isomorphic_em_version(version in versions()) {
|
||||
let sem_ver = version.as_sem_ver();
|
||||
let back = Version::from_util_version(sem_ver.into());
|
||||
prop_assert_eq!(format!("{:?}",version), format!("{:?}", back), "All versions should round trip");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@ lazy_static! {
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Version;
|
||||
#[async_trait]
|
||||
impl VersionT for Version {
|
||||
|
||||
@@ -11,6 +11,7 @@ use crate::util::Invoke;
|
||||
|
||||
const V0_3_0_1: emver::Version = emver::Version::new(0, 3, 0, 1);
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Version;
|
||||
#[async_trait]
|
||||
impl VersionT for Version {
|
||||
|
||||
@@ -1,16 +1,10 @@
|
||||
use std::path::Path;
|
||||
|
||||
use emver::VersionRange;
|
||||
use tokio::process::Command;
|
||||
|
||||
use super::*;
|
||||
use crate::disk::quirks::{fetch_quirks, save_quirks, update_quirks};
|
||||
use crate::disk::BOOT_RW_PATH;
|
||||
use crate::update::query_mounted_label;
|
||||
use crate::util::Invoke;
|
||||
|
||||
const V0_3_0_2: emver::Version = emver::Version::new(0, 3, 0, 2);
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Version;
|
||||
#[async_trait]
|
||||
impl VersionT for Version {
|
||||
|
||||
27
backend/src/version/v0_3_0_3.rs
Normal file
27
backend/src/version/v0_3_0_3.rs
Normal file
@@ -0,0 +1,27 @@
|
||||
use emver::VersionRange;
|
||||
|
||||
use super::*;
|
||||
|
||||
const V0_3_0_3: emver::Version = emver::Version::new(0, 3, 0, 3);
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Version;
|
||||
#[async_trait]
|
||||
impl VersionT for Version {
|
||||
type Previous = v0_3_0_2::Version;
|
||||
fn new() -> Self {
|
||||
Version
|
||||
}
|
||||
fn semver(&self) -> emver::Version {
|
||||
V0_3_0_3
|
||||
}
|
||||
fn compat(&self) -> &'static VersionRange {
|
||||
&*v0_3_0::V0_3_0_COMPAT
|
||||
}
|
||||
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
36
backend/src/version/v0_3_1.rs
Normal file
36
backend/src/version/v0_3_1.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
use emver::VersionRange;
|
||||
|
||||
use super::*;
|
||||
|
||||
const V0_3_1: emver::Version = emver::Version::new(0, 3, 1, 0);
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref V0_3_1_COMPAT: VersionRange = VersionRange::Conj(
|
||||
Box::new(VersionRange::Anchor(
|
||||
emver::GTE,
|
||||
emver::Version::new(0, 3, 0, 0),
|
||||
)),
|
||||
Box::new(VersionRange::Anchor(emver::LTE, Current::new().semver())),
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct Version;
|
||||
#[async_trait]
|
||||
impl VersionT for Version {
|
||||
type Previous = v0_3_0_3::Version;
|
||||
fn new() -> Self {
|
||||
Version
|
||||
}
|
||||
fn semver(&self) -> emver::Version {
|
||||
V0_3_1
|
||||
}
|
||||
fn compat(&self) -> &'static VersionRange {
|
||||
&*V0_3_1_COMPAT
|
||||
}
|
||||
async fn up<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
async fn down<Db: DbHandle>(&self, _db: &mut Db) -> Result<(), Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1,77 +1,23 @@
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::BTreeMap;
|
||||
use std::ops::{Deref, DerefMut};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use patch_db::{HasModel, Map, MapModel};
|
||||
use serde::{Deserialize, Deserializer, Serialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
use crate::id::{Id, IdUnchecked};
|
||||
use crate::net::interface::{InterfaceId, Interfaces};
|
||||
use crate::net::NetController;
|
||||
use crate::s9pk::manifest::PackageId;
|
||||
use crate::util::Version;
|
||||
use crate::{Error, ResultExt};
|
||||
|
||||
pub const PKG_VOLUME_DIR: &'static str = "package-data/volumes";
|
||||
pub const BACKUP_DIR: &'static str = "/media/embassy-os/backups";
|
||||
pub use helpers::script_dir;
|
||||
pub use models::VolumeId;
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
|
||||
pub enum VolumeId<S: AsRef<str> = String> {
|
||||
Backup,
|
||||
Custom(Id<S>),
|
||||
}
|
||||
impl<S: AsRef<str>> std::fmt::Display for VolumeId<S> {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
VolumeId::Backup => write!(f, "BACKUP"),
|
||||
VolumeId::Custom(id) => write!(f, "{}", id),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<str> for VolumeId<S> {
|
||||
fn as_ref(&self) -> &str {
|
||||
match self {
|
||||
VolumeId::Backup => "BACKUP",
|
||||
VolumeId::Custom(id) => id.as_ref(),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> Borrow<str> for VolumeId<S> {
|
||||
fn borrow(&self) -> &str {
|
||||
self.as_ref()
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> AsRef<Path> for VolumeId<S> {
|
||||
fn as_ref(&self) -> &Path {
|
||||
AsRef::<str>::as_ref(self).as_ref()
|
||||
}
|
||||
}
|
||||
impl<'de, S> Deserialize<'de> for VolumeId<S>
|
||||
where
|
||||
S: AsRef<str>,
|
||||
IdUnchecked<S>: Deserialize<'de>,
|
||||
{
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let unchecked: IdUnchecked<S> = Deserialize::deserialize(deserializer)?;
|
||||
Ok(match unchecked.0.as_ref() {
|
||||
"BACKUP" => VolumeId::Backup,
|
||||
_ => VolumeId::Custom(Id::try_from(unchecked.0).map_err(serde::de::Error::custom)?),
|
||||
})
|
||||
}
|
||||
}
|
||||
impl<S: AsRef<str>> Serialize for VolumeId<S> {
|
||||
fn serialize<Ser>(&self, serializer: Ser) -> Result<Ser::Ok, Ser::Error>
|
||||
where
|
||||
Ser: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_str(self.as_ref())
|
||||
}
|
||||
}
|
||||
pub const PKG_VOLUME_DIR: &str = "package-data/volumes";
|
||||
pub const BACKUP_DIR: &str = "/media/embassy-os/backups";
|
||||
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize)]
|
||||
pub struct Volumes(BTreeMap<VolumeId, Volume>);
|
||||
@@ -93,20 +39,22 @@ impl Volumes {
|
||||
version: &Version,
|
||||
) -> Result<(), Error> {
|
||||
for (volume_id, volume) in &self.0 {
|
||||
volume.install(ctx, pkg_id, version, volume_id).await?; // TODO: concurrent?
|
||||
volume
|
||||
.install(&ctx.datadir, pkg_id, version, volume_id)
|
||||
.await?; // TODO: concurrent?
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
pub fn get_path_for(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
path: &PathBuf,
|
||||
pkg_id: &PackageId,
|
||||
version: &Version,
|
||||
volume_id: &VolumeId,
|
||||
) -> Option<PathBuf> {
|
||||
self.0
|
||||
.get(volume_id)
|
||||
.map(|volume| volume.path_for(ctx, pkg_id, version, volume_id))
|
||||
.map(|volume| volume.path_for(path, pkg_id, version, volume_id))
|
||||
}
|
||||
pub fn to_readonly(&self) -> Self {
|
||||
Volumes(
|
||||
@@ -205,14 +153,14 @@ impl Volume {
|
||||
}
|
||||
pub async fn install(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
path: &PathBuf,
|
||||
pkg_id: &PackageId,
|
||||
version: &Version,
|
||||
volume_id: &VolumeId,
|
||||
) -> Result<(), Error> {
|
||||
match self {
|
||||
Volume::Data { .. } => {
|
||||
tokio::fs::create_dir_all(self.path_for(ctx, pkg_id, version, volume_id)).await?;
|
||||
tokio::fs::create_dir_all(self.path_for(path, pkg_id, version, volume_id)).await?;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
@@ -220,25 +168,25 @@ impl Volume {
|
||||
}
|
||||
pub fn path_for(
|
||||
&self,
|
||||
ctx: &RpcContext,
|
||||
data_dir_path: impl AsRef<Path>,
|
||||
pkg_id: &PackageId,
|
||||
version: &Version,
|
||||
volume_id: &VolumeId,
|
||||
) -> PathBuf {
|
||||
match self {
|
||||
Volume::Data { .. } => data_dir(&ctx.datadir, pkg_id, volume_id),
|
||||
Volume::Assets {} => asset_dir(&ctx.datadir, pkg_id, version).join(volume_id),
|
||||
Volume::Data { .. } => data_dir(&data_dir_path, pkg_id, volume_id),
|
||||
Volume::Assets {} => asset_dir(&data_dir_path, pkg_id, version).join(volume_id),
|
||||
Volume::Pointer {
|
||||
package_id,
|
||||
volume_id,
|
||||
path,
|
||||
..
|
||||
} => data_dir(&ctx.datadir, package_id, volume_id).join(if path.is_absolute() {
|
||||
} => data_dir(&data_dir_path, package_id, volume_id).join(if path.is_absolute() {
|
||||
path.strip_prefix("/").unwrap()
|
||||
} else {
|
||||
path.as_ref()
|
||||
}),
|
||||
Volume::Certificate { interface_id: _ } => ctx.net_controller.ssl_directory_for(pkg_id),
|
||||
Volume::Certificate { interface_id: _ } => NetController::ssl_directory_for(pkg_id),
|
||||
Volume::Backup { .. } => backup_dir(pkg_id),
|
||||
}
|
||||
}
|
||||
|
||||
547
backend/test/config-spec/lnd-correct.yaml
Normal file
547
backend/test/config-spec/lnd-correct.yaml
Normal file
@@ -0,0 +1,547 @@
|
||||
control-tor-address:
|
||||
name: Control Tor Address
|
||||
description: The Tor address for the control interface.
|
||||
type: pointer
|
||||
subtype: package
|
||||
package-id: lnd
|
||||
target: tor-address
|
||||
interface: control
|
||||
peer-tor-address:
|
||||
name: Peer Tor Address
|
||||
description: The Tor address for the peer interface.
|
||||
type: pointer
|
||||
subtype: package
|
||||
package-id: lnd
|
||||
target: tor-address
|
||||
interface: peer
|
||||
watchtower-tor-address:
|
||||
name: Watchtower Tor Address
|
||||
description: The Tor address for the watchtower interface.
|
||||
type: pointer
|
||||
subtype: package
|
||||
package-id: lnd
|
||||
target: tor-address
|
||||
interface: watchtower
|
||||
alias:
|
||||
type: string
|
||||
name: Alias
|
||||
description: The public, human-readable name of your Lightning node
|
||||
nullable: true
|
||||
placeholder: Enter a value
|
||||
pattern: ".{1,32}"
|
||||
pattern-description: Must be at least 1 character and no more than 32 characters
|
||||
color:
|
||||
type: string
|
||||
name: Color
|
||||
description: The public color dot of your Lightning node
|
||||
nullable: false
|
||||
pattern: "[0-9a-fA-F]{6}"
|
||||
pattern-description: |
|
||||
Must be a valid 6 digit hexadecimal RGB value. The first two digits are red, middle two are green, and final two are
|
||||
blue
|
||||
default:
|
||||
charset: "a-f,0-9"
|
||||
len: 6
|
||||
accept-keysend:
|
||||
type: boolean
|
||||
name: Accept Keysend
|
||||
description: |
|
||||
Allow others to send payments directly to your public key through keysend instead of having to get a new invoice
|
||||
default: false
|
||||
accept-amp:
|
||||
type: boolean
|
||||
name: Accept Spontaneous AMPs
|
||||
description: |
|
||||
If enabled, spontaneous payments through AMP will be accepted. Payments to AMP
|
||||
invoices will be accepted regardless of this setting.
|
||||
default: false
|
||||
reject-htlc:
|
||||
type: boolean
|
||||
name: Reject Routing Requests
|
||||
description: |
|
||||
If true, LND will not forward any HTLCs that are meant as onward payments. This option will still allow LND to send
|
||||
HTLCs and receive HTLCs but lnd won't be used as a hop.
|
||||
default: false
|
||||
min-chan-size:
|
||||
type: number
|
||||
name: Minimum Channel Size
|
||||
description: |
|
||||
The smallest channel size that we should accept. Incoming channels smaller than this will be rejected.
|
||||
nullable: true
|
||||
range: "[1,16777215]"
|
||||
integral: true
|
||||
units: satoshis
|
||||
max-chan-size:
|
||||
type: number
|
||||
name: Maximum Channel Size
|
||||
description: |
|
||||
The largest channel size that we should accept. Incoming channels larger than this will be rejected.
|
||||
For non-Wumbo channels this limit remains 16777215 satoshis by default as specified in BOLT-0002. For wumbo
|
||||
channels this limit is 1,000,000,000 satoshis (10 BTC). Set this config option explicitly to restrict your maximum
|
||||
channel size to better align with your risk tolerance. Don't forget to enable Wumbo channels under 'Advanced,' if desired.
|
||||
nullable: true
|
||||
range: "[1,1000000000]"
|
||||
integral: true
|
||||
units: satoshis
|
||||
tor:
|
||||
type: object
|
||||
name: Tor Config
|
||||
nullable: false
|
||||
spec:
|
||||
use-tor-only:
|
||||
type: boolean
|
||||
name: Use Tor for all traffic
|
||||
description: >-
|
||||
Use the tor proxy even for connections that are reachable on clearnet. This will hide your node's public IP
|
||||
address, but will slow down your node's performance
|
||||
nullable: false
|
||||
default: false
|
||||
stream-isolation:
|
||||
type: boolean
|
||||
name: Stream Isolation
|
||||
description: >-
|
||||
Enable Tor stream isolation by randomizing user credentials for each
|
||||
connection. With this mode active, each connection will use a new circuit.
|
||||
This means that multiple applications (other than lnd) using Tor won't be mixed
|
||||
in with lnd's traffic.
|
||||
|
||||
This option may not be used when 'Use Tor for all traffic' is disabled, since direct
|
||||
connections compromise source IP privacy by default.
|
||||
nullable: false
|
||||
default: false
|
||||
bitcoind:
|
||||
type: union
|
||||
name: Bitcoin Core
|
||||
description: |
|
||||
The Bitcoin Core node to connect to:
|
||||
- internal: The Bitcoin Core and Proxy services installed to your Embassy
|
||||
- external: An unpruned Bitcoin Core node running on a different device
|
||||
tag:
|
||||
id: type
|
||||
name: Type
|
||||
variant-names:
|
||||
internal: Internal (Bitcoin Core)
|
||||
internal-proxy: Internal (Bitcoin Proxy)
|
||||
external: External
|
||||
description: |
|
||||
The Bitcoin Core node to connect to:
|
||||
- internal: The Bitcoin Core and Proxy services installed to your Embassy
|
||||
- external: An unpruned Bitcoin Core node running on a different device
|
||||
default: internal
|
||||
variants:
|
||||
internal:
|
||||
user:
|
||||
type: pointer
|
||||
name: RPC Username
|
||||
description: The username for Bitcoin Core's RPC interface
|
||||
subtype: package
|
||||
package-id: bitcoind
|
||||
target: config
|
||||
multi: false
|
||||
selector: "$.rpc.username"
|
||||
password:
|
||||
type: pointer
|
||||
name: RPC Password
|
||||
description: The password for Bitcoin Core's RPC interface
|
||||
subtype: package
|
||||
package-id: bitcoind
|
||||
target: config
|
||||
multi: false
|
||||
selector: "$.rpc.password"
|
||||
internal-proxy:
|
||||
user:
|
||||
type: pointer
|
||||
name: RPC Username
|
||||
description: The username for the RPC user allocated to lnd
|
||||
subtype: package
|
||||
package-id: btc-rpc-proxy
|
||||
target: config
|
||||
multi: false
|
||||
selector: '$.users[?(@.name == "lnd")].name'
|
||||
# index: 'users.[first(item => ''item.name = "lnd")].name'
|
||||
password:
|
||||
type: pointer
|
||||
name: RPC Password
|
||||
description: The password for the RPC user allocated to lnd
|
||||
subtype: package
|
||||
package-id: btc-rpc-proxy
|
||||
target: config
|
||||
multi: false
|
||||
selector: '$.users[?(@.name == "lnd")].password'
|
||||
# index: 'users.[first(item => ''item.name = "lnd")].password'
|
||||
external:
|
||||
connection-settings:
|
||||
type: union
|
||||
name: Connection Settings
|
||||
description: Information to connect to an external unpruned Bitcoin Core node
|
||||
tag:
|
||||
id: type
|
||||
name: Type
|
||||
description: |
|
||||
- Manual: Raw information for finding a Bitcoin Core node
|
||||
- Quick Connect: A Quick Connect URL for a Bitcoin Core node
|
||||
variant-names:
|
||||
manual: Manual
|
||||
quick-connect: Quick Connect
|
||||
default: quick-connect
|
||||
variants:
|
||||
manual:
|
||||
host:
|
||||
type: string
|
||||
name: Public Address
|
||||
description: The public address of your Bitcoin Core server
|
||||
nullable: false
|
||||
rpc-user:
|
||||
type: string
|
||||
name: RPC Username
|
||||
description: The username for the RPC user on your Bitcoin Core RPC server
|
||||
nullable: false
|
||||
rpc-password:
|
||||
type: string
|
||||
name: RPC Password
|
||||
description: The password for the RPC user on your Bitcoin Core RPC server
|
||||
nullable: false
|
||||
rpc-port:
|
||||
type: number
|
||||
name: RPC Port
|
||||
description: The port that your Bitcoin Core RPC server is bound to
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 8332
|
||||
zmq-block-port:
|
||||
type: number
|
||||
name: ZeroMQ Block Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw blocks
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28332
|
||||
zmq-tx-port:
|
||||
type: number
|
||||
name: ZeroMQ Transaction Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw transactions
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28333
|
||||
quick-connect:
|
||||
quick-connect-url:
|
||||
type: string
|
||||
name: Quick Connect URL
|
||||
description: |
|
||||
The Quick Connect URL for your Bitcoin Core RPC server
|
||||
NOTE: LND will not accept a .onion url for this option
|
||||
nullable: false
|
||||
pattern: 'btcstandup://[^:]*:[^@]*@[a-zA-Z0-9.-]+:[0-9]+(/(\?(label=.+)?)?)?'
|
||||
pattern-description: Must be a valid Quick Connect URL. For help, check out https://github.com/BlockchainCommons/Gordian/blob/master/Docs/Quick-Connect-API.md
|
||||
zmq-block-port:
|
||||
type: number
|
||||
name: ZeroMQ Block Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw blocks
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28332
|
||||
zmq-tx-port:
|
||||
type: number
|
||||
name: ZeroMQ Transaction Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw transactions
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28333
|
||||
autopilot:
|
||||
type: object
|
||||
name: Autopilot
|
||||
description: Autopilot Settings
|
||||
nullable: false
|
||||
spec:
|
||||
enabled:
|
||||
type: boolean
|
||||
name: Enabled
|
||||
description: |
|
||||
If the autopilot agent should be active or not. The autopilot agent will
|
||||
attempt to AUTOMATICALLY OPEN CHANNELS to put your node in an advantageous
|
||||
position within the network graph. DO NOT ENABLE THIS IF YOU WANT TO MANAGE
|
||||
CHANNELS MANUALLY OR DO NOT UNDERSTAND IT.
|
||||
default: false
|
||||
private:
|
||||
type: boolean
|
||||
name: Private
|
||||
description: |
|
||||
Whether the channels created by the autopilot agent should be private or not.
|
||||
Private channels won't be announced to the network.
|
||||
default: false
|
||||
maxchannels:
|
||||
type: number
|
||||
name: Maximum Channels
|
||||
description: The maximum number of channels that should be created.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 5
|
||||
allocation:
|
||||
type: number
|
||||
name: Allocation
|
||||
description: |
|
||||
The fraction of total funds that should be committed to automatic channel
|
||||
establishment. For example 60% means that 60% of the total funds available
|
||||
within the wallet should be used to automatically establish channels. The total
|
||||
amount of attempted channels will still respect the "Maximum Channels" parameter.
|
||||
nullable: false
|
||||
range: "[0,100]"
|
||||
integral: false
|
||||
default: 60
|
||||
units: "%"
|
||||
min-channel-size:
|
||||
type: number
|
||||
name: Minimum Channel Size
|
||||
description: The smallest channel that the autopilot agent should create.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 20000
|
||||
units: "satoshis"
|
||||
max-channel-size:
|
||||
type: number
|
||||
name: Maximum Channel Size
|
||||
description: The largest channel that the autopilot agent should create.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 16777215
|
||||
units: "satoshis"
|
||||
advanced:
|
||||
type: object
|
||||
name: Advanced
|
||||
description: Advanced Options
|
||||
nullable: false
|
||||
spec:
|
||||
min-confirmations:
|
||||
type: number
|
||||
name: Minimum Confirmations
|
||||
description: |
|
||||
The minimum number of confirmations each of your inputs in funding transactions
|
||||
created by the autopilot agent must have.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: blocks
|
||||
confirmation-target:
|
||||
type: number
|
||||
name: Confirmation Target
|
||||
description: The confirmation target (in blocks) for channels opened by autopilot.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: blocks
|
||||
advanced:
|
||||
type: object
|
||||
name: Advanced
|
||||
description: Advanced Options
|
||||
nullable: false
|
||||
spec:
|
||||
debug-level:
|
||||
type: enum
|
||||
name: Log Verbosity
|
||||
values:
|
||||
- trace
|
||||
- debug
|
||||
- info
|
||||
- warn
|
||||
- error
|
||||
- critical
|
||||
description: |
|
||||
Sets the level of log filtration. Trace is the most verbose, Critical is the least.
|
||||
default: info
|
||||
db-bolt-no-freelist-sync:
|
||||
type: boolean
|
||||
name: Disallow Bolt DB Freelist Sync
|
||||
description: |
|
||||
If true, prevents the database from syncing its freelist to disk.
|
||||
default: false
|
||||
db-bolt-auto-compact:
|
||||
type: boolean
|
||||
name: Compact Database on Startup
|
||||
description: |
|
||||
Performs database compaction on startup. This is necessary to keep disk usage down over time at the cost of
|
||||
having longer startup times.
|
||||
default: true
|
||||
db-bolt-auto-compact-min-age:
|
||||
type: number
|
||||
name: Minimum Autocompaction Age for Bolt DB
|
||||
description: |
|
||||
How long ago (in hours) the last compaction of a database file must be for it to be considered for auto
|
||||
compaction again. Can be set to 0 to compact on every startup.
|
||||
nullable: false
|
||||
range: "[0, *)"
|
||||
integral: true
|
||||
default: 168
|
||||
units: hours
|
||||
db-bolt-db-timeout:
|
||||
type: number
|
||||
name: Bolt DB Timeout
|
||||
description: How long should LND try to open the database before giving up?
|
||||
nullable: false
|
||||
range: "[1, 86400]"
|
||||
integral: true
|
||||
default: 60
|
||||
units: seconds
|
||||
recovery-window:
|
||||
type: number
|
||||
name: Recovery Window
|
||||
description: Number of blocks in the past that LND should scan for unknown transactions
|
||||
nullable: true
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
units: "blocks"
|
||||
payments-expiration-grace-period:
|
||||
type: number
|
||||
name: Payments Expiration Grace Period
|
||||
description: |
|
||||
A period to wait before for closing channels with outgoing htlcs that have timed out and are a result of this
|
||||
nodes instead payment. In addition to our current block based deadline, is specified this grace period will
|
||||
also be taken into account.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 30
|
||||
units: "seconds"
|
||||
default-remote-max-htlcs:
|
||||
type: number
|
||||
name: Maximum Remote HTLCs
|
||||
description: |
|
||||
The default max_htlc applied when opening or accepting channels. This value limits the number of concurrent
|
||||
HTLCs that the remote party can add to the commitment. The maximum possible value is 483.
|
||||
nullable: false
|
||||
range: "[1,483]"
|
||||
integral: true
|
||||
default: 483
|
||||
units: htlcs
|
||||
max-channel-fee-allocation:
|
||||
type: number
|
||||
name: Maximum Channel Fee Allocation
|
||||
description: |
|
||||
The maximum percentage of total funds that can be allocated to a channel's commitment fee. This only applies for
|
||||
the initiator of the channel.
|
||||
nullable: false
|
||||
range: "[0.1, 1]"
|
||||
integral: false
|
||||
default: 0.5
|
||||
max-commit-fee-rate-anchors:
|
||||
type: number
|
||||
name: Maximum Commitment Fee for Anchor Channels
|
||||
description: |
|
||||
The maximum fee rate in sat/vbyte that will be used for commitments of channels of the anchors type. Must be
|
||||
large enough to ensure transaction propagation.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 10
|
||||
protocol-wumbo-channels:
|
||||
type: boolean
|
||||
name: Enable Wumbo Channels
|
||||
description: |
|
||||
If set, then lnd will create and accept requests for channels larger than 0.16 BTC
|
||||
nullable: false
|
||||
default: false
|
||||
protocol-no-anchors:
|
||||
type: boolean
|
||||
name: Disable Anchor Channels
|
||||
description: |
|
||||
Set to disable support for anchor commitments. Anchor channels allow you to determine your fees at close time by
|
||||
using a Child Pays For Parent transaction.
|
||||
nullable: false
|
||||
default: false
|
||||
protocol-disable-script-enforced-lease:
|
||||
type: boolean
|
||||
name: Disable Script Enforced Channel Leases
|
||||
description: >-
|
||||
Set to disable support for script enforced lease channel commitments. If not
|
||||
set, lnd will accept these channels by default if the remote channel party
|
||||
proposes them. Note that lnd will require 1 UTXO to be reserved for this
|
||||
channel type if it is enabled.
|
||||
|
||||
Note: This may cause you to be unable to close a channel and your wallets may not understand why
|
||||
nullable: false
|
||||
default: false
|
||||
gc-canceled-invoices-on-startup:
|
||||
type: boolean
|
||||
name: Cleanup Canceled Invoices on Startup
|
||||
description: |
|
||||
If true, LND will attempt to garbage collect canceled invoices upon start.
|
||||
nullable: false
|
||||
default: false
|
||||
bitcoin:
|
||||
type: object
|
||||
name: Bitcoin Channel Configuration
|
||||
description: Configuration options for lightning network channel management operating over the Bitcoin network
|
||||
nullable: false
|
||||
spec:
|
||||
default-channel-confirmations:
|
||||
type: number
|
||||
name: Default Channel Confirmations
|
||||
description: |
|
||||
The default number of confirmations a channel must have before it's considered
|
||||
open. LND will require any incoming channel requests to wait this many
|
||||
confirmations before it considers the channel active.
|
||||
nullable: false
|
||||
range: "[1,6]"
|
||||
integral: true
|
||||
default: 3
|
||||
units: "blocks"
|
||||
min-htlc:
|
||||
type: number
|
||||
name: Minimum Incoming HTLC Size
|
||||
description: |
|
||||
The smallest HTLC LND will to accept on your channels, in millisatoshis.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: "millisatoshis"
|
||||
min-htlc-out:
|
||||
type: number
|
||||
name: Minimum Outgoing HTLC Size
|
||||
description: |
|
||||
The smallest HTLC LND will send out on your channels, in millisatoshis.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 1000
|
||||
units: "millisatoshis"
|
||||
base-fee:
|
||||
type: number
|
||||
name: Routing Base Fee
|
||||
description: |
|
||||
The base fee in millisatoshi you will charge for forwarding payments on your
|
||||
channels.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 1000
|
||||
units: "millisatoshi"
|
||||
fee-rate:
|
||||
type: number
|
||||
name: Routing Fee Rate
|
||||
description: |
|
||||
The fee rate used when forwarding payments on your channels. The total fee
|
||||
charged is the Base Fee + (amount * Fee Rate / 1000000), where amount is the
|
||||
forwarded amount. Measured in sats per million
|
||||
nullable: false
|
||||
range: "[1,1000000)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: "sats per million"
|
||||
time-lock-delta:
|
||||
type: number
|
||||
name: Time Lock Delta
|
||||
description: The CLTV delta we will subtract from a forwarded HTLC's timelock value.
|
||||
nullable: false
|
||||
range: "[6, 144]"
|
||||
integral: true
|
||||
default: 40
|
||||
units: "blocks"
|
||||
546
backend/test/config-spec/lnd-invalid-regex.yaml
Normal file
546
backend/test/config-spec/lnd-invalid-regex.yaml
Normal file
@@ -0,0 +1,546 @@
|
||||
control-tor-address:
|
||||
name: Control Tor Address
|
||||
description: The Tor address for the control interface.
|
||||
type: pointer
|
||||
subtype: package
|
||||
package-id: lnd
|
||||
target: tor-address
|
||||
interface: control
|
||||
peer-tor-address:
|
||||
name: Peer Tor Address
|
||||
description: The Tor address for the peer interface.
|
||||
type: pointer
|
||||
subtype: package
|
||||
package-id: lnd
|
||||
target: tor-address
|
||||
interface: peer
|
||||
watchtower-tor-address:
|
||||
name: Watchtower Tor Address
|
||||
description: The Tor address for the watchtower interface.
|
||||
type: pointer
|
||||
subtype: package
|
||||
package-id: lnd
|
||||
target: tor-address
|
||||
interface: watchtower
|
||||
alias:
|
||||
type: string
|
||||
name: Alias
|
||||
description: The public, human-readable name of your Lightning node
|
||||
nullable: true
|
||||
pattern: ".{1,32}"
|
||||
pattern-description: Must be at least 1 character and no more than 32 characters
|
||||
color:
|
||||
type: string
|
||||
name: Color
|
||||
description: The public color dot of your Lightning node
|
||||
nullable: false
|
||||
pattern: "[0-9a-fA-F]{6"
|
||||
pattern-description: |
|
||||
Must be a valid 6 digit hexadecimal RGB value. The first two digits are red, middle two are green, and final two are
|
||||
blue
|
||||
default:
|
||||
charset: "a-f,0-9"
|
||||
len: 6
|
||||
accept-keysend:
|
||||
type: boolean
|
||||
name: Accept Keysend
|
||||
description: |
|
||||
Allow others to send payments directly to your public key through keysend instead of having to get a new invoice
|
||||
default: false
|
||||
accept-amp:
|
||||
type: boolean
|
||||
name: Accept Spontaneous AMPs
|
||||
description: |
|
||||
If enabled, spontaneous payments through AMP will be accepted. Payments to AMP
|
||||
invoices will be accepted regardless of this setting.
|
||||
default: false
|
||||
reject-htlc:
|
||||
type: boolean
|
||||
name: Reject Routing Requests
|
||||
description: |
|
||||
If true, LND will not forward any HTLCs that are meant as onward payments. This option will still allow LND to send
|
||||
HTLCs and receive HTLCs but lnd won't be used as a hop.
|
||||
default: false
|
||||
min-chan-size:
|
||||
type: number
|
||||
name: Minimum Channel Size
|
||||
description: |
|
||||
The smallest channel size that we should accept. Incoming channels smaller than this will be rejected.
|
||||
nullable: true
|
||||
range: "[1,16777215]"
|
||||
integral: true
|
||||
units: satoshis
|
||||
max-chan-size:
|
||||
type: number
|
||||
name: Maximum Channel Size
|
||||
description: |
|
||||
The largest channel size that we should accept. Incoming channels larger than this will be rejected.
|
||||
For non-Wumbo channels this limit remains 16777215 satoshis by default as specified in BOLT-0002. For wumbo
|
||||
channels this limit is 1,000,000,000 satoshis (10 BTC). Set this config option explicitly to restrict your maximum
|
||||
channel size to better align with your risk tolerance. Don't forget to enable Wumbo channels under 'Advanced,' if desired.
|
||||
nullable: true
|
||||
range: "[1,1000000000]"
|
||||
integral: true
|
||||
units: satoshis
|
||||
tor:
|
||||
type: object
|
||||
name: Tor Config
|
||||
nullable: false
|
||||
spec:
|
||||
use-tor-only:
|
||||
type: boolean
|
||||
name: Use Tor for all traffic
|
||||
description: >-
|
||||
Use the tor proxy even for connections that are reachable on clearnet. This will hide your node's public IP
|
||||
address, but will slow down your node's performance
|
||||
nullable: false
|
||||
default: false
|
||||
stream-isolation:
|
||||
type: boolean
|
||||
name: Stream Isolation
|
||||
description: >-
|
||||
Enable Tor stream isolation by randomizing user credentials for each
|
||||
connection. With this mode active, each connection will use a new circuit.
|
||||
This means that multiple applications (other than lnd) using Tor won't be mixed
|
||||
in with lnd's traffic.
|
||||
|
||||
This option may not be used when 'Use Tor for all traffic' is disabled, since direct
|
||||
connections compromise source IP privacy by default.
|
||||
nullable: false
|
||||
default: false
|
||||
bitcoind:
|
||||
type: union
|
||||
name: Bitcoin Core
|
||||
description: |
|
||||
The Bitcoin Core node to connect to:
|
||||
- internal: The Bitcoin Core and Proxy services installed to your Embassy
|
||||
- external: An unpruned Bitcoin Core node running on a different device
|
||||
tag:
|
||||
id: type
|
||||
name: Type
|
||||
variant-names:
|
||||
internal: Internal (Bitcoin Core)
|
||||
internal-proxy: Internal (Bitcoin Proxy)
|
||||
external: External
|
||||
description: |
|
||||
The Bitcoin Core node to connect to:
|
||||
- internal: The Bitcoin Core and Proxy services installed to your Embassy
|
||||
- external: An unpruned Bitcoin Core node running on a different device
|
||||
default: internal
|
||||
variants:
|
||||
internal:
|
||||
user:
|
||||
type: pointer
|
||||
name: RPC Username
|
||||
description: The username for Bitcoin Core's RPC interface
|
||||
subtype: package
|
||||
package-id: bitcoind
|
||||
target: config
|
||||
multi: false
|
||||
selector: "$.rpc.username"
|
||||
password:
|
||||
type: pointer
|
||||
name: RPC Password
|
||||
description: The password for Bitcoin Core's RPC interface
|
||||
subtype: package
|
||||
package-id: bitcoind
|
||||
target: config
|
||||
multi: false
|
||||
selector: "$.rpc.password"
|
||||
internal-proxy:
|
||||
user:
|
||||
type: pointer
|
||||
name: RPC Username
|
||||
description: The username for the RPC user allocated to lnd
|
||||
subtype: package
|
||||
package-id: btc-rpc-proxy
|
||||
target: config
|
||||
multi: false
|
||||
selector: '$.users[?(@.name == "lnd")].name'
|
||||
# index: 'users.[first(item => ''item.name = "lnd")].name'
|
||||
password:
|
||||
type: pointer
|
||||
name: RPC Password
|
||||
description: The password for the RPC user allocated to lnd
|
||||
subtype: package
|
||||
package-id: btc-rpc-proxy
|
||||
target: config
|
||||
multi: false
|
||||
selector: '$.users[?(@.name == "lnd")].password'
|
||||
# index: 'users.[first(item => ''item.name = "lnd")].password'
|
||||
external:
|
||||
connection-settings:
|
||||
type: union
|
||||
name: Connection Settings
|
||||
description: Information to connect to an external unpruned Bitcoin Core node
|
||||
tag:
|
||||
id: type
|
||||
name: Type
|
||||
description: |
|
||||
- Manual: Raw information for finding a Bitcoin Core node
|
||||
- Quick Connect: A Quick Connect URL for a Bitcoin Core node
|
||||
variant-names:
|
||||
manual: Manual
|
||||
quick-connect: Quick Connect
|
||||
default: quick-connect
|
||||
variants:
|
||||
manual:
|
||||
host:
|
||||
type: string
|
||||
name: Public Address
|
||||
description: The public address of your Bitcoin Core server
|
||||
nullable: false
|
||||
rpc-user:
|
||||
type: string
|
||||
name: RPC Username
|
||||
description: The username for the RPC user on your Bitcoin Core RPC server
|
||||
nullable: false
|
||||
rpc-password:
|
||||
type: string
|
||||
name: RPC Password
|
||||
description: The password for the RPC user on your Bitcoin Core RPC server
|
||||
nullable: false
|
||||
rpc-port:
|
||||
type: number
|
||||
name: RPC Port
|
||||
description: The port that your Bitcoin Core RPC server is bound to
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 8332
|
||||
zmq-block-port:
|
||||
type: number
|
||||
name: ZeroMQ Block Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw blocks
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28332
|
||||
zmq-tx-port:
|
||||
type: number
|
||||
name: ZeroMQ Transaction Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw transactions
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28333
|
||||
quick-connect:
|
||||
quick-connect-url:
|
||||
type: string
|
||||
name: Quick Connect URL
|
||||
description: |
|
||||
The Quick Connect URL for your Bitcoin Core RPC server
|
||||
NOTE: LND will not accept a .onion url for this option
|
||||
nullable: false
|
||||
pattern: 'btcstandup://[^:]*:[^@]*@[a-zA-Z0-9.-]+:[0-9]+(/(\?(label=.+)?)?)?'
|
||||
pattern-description: Must be a valid Quick Connect URL. For help, check out https://github.com/BlockchainCommons/Gordian/blob/master/Docs/Quick-Connect-API.md
|
||||
zmq-block-port:
|
||||
type: number
|
||||
name: ZeroMQ Block Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw blocks
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28332
|
||||
zmq-tx-port:
|
||||
type: number
|
||||
name: ZeroMQ Transaction Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw transactions
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28333
|
||||
autopilot:
|
||||
type: object
|
||||
name: Autopilot
|
||||
description: Autopilot Settings
|
||||
nullable: false
|
||||
spec:
|
||||
enabled:
|
||||
type: boolean
|
||||
name: Enabled
|
||||
description: |
|
||||
If the autopilot agent should be active or not. The autopilot agent will
|
||||
attempt to AUTOMATICALLY OPEN CHANNELS to put your node in an advantageous
|
||||
position within the network graph. DO NOT ENABLE THIS IF YOU WANT TO MANAGE
|
||||
CHANNELS MANUALLY OR DO NOT UNDERSTAND IT.
|
||||
default: false
|
||||
private:
|
||||
type: boolean
|
||||
name: Private
|
||||
description: |
|
||||
Whether the channels created by the autopilot agent should be private or not.
|
||||
Private channels won't be announced to the network.
|
||||
default: false
|
||||
maxchannels:
|
||||
type: number
|
||||
name: Maximum Channels
|
||||
description: The maximum number of channels that should be created.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 5
|
||||
allocation:
|
||||
type: number
|
||||
name: Allocation
|
||||
description: |
|
||||
The fraction of total funds that should be committed to automatic channel
|
||||
establishment. For example 60% means that 60% of the total funds available
|
||||
within the wallet should be used to automatically establish channels. The total
|
||||
amount of attempted channels will still respect the "Maximum Channels" parameter.
|
||||
nullable: false
|
||||
range: "[0,100]"
|
||||
integral: false
|
||||
default: 60
|
||||
units: "%"
|
||||
min-channel-size:
|
||||
type: number
|
||||
name: Minimum Channel Size
|
||||
description: The smallest channel that the autopilot agent should create.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 20000
|
||||
units: "satoshis"
|
||||
max-channel-size:
|
||||
type: number
|
||||
name: Maximum Channel Size
|
||||
description: The largest channel that the autopilot agent should create.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 16777215
|
||||
units: "satoshis"
|
||||
advanced:
|
||||
type: object
|
||||
name: Advanced
|
||||
description: Advanced Options
|
||||
nullable: false
|
||||
spec:
|
||||
min-confirmations:
|
||||
type: number
|
||||
name: Minimum Confirmations
|
||||
description: |
|
||||
The minimum number of confirmations each of your inputs in funding transactions
|
||||
created by the autopilot agent must have.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: blocks
|
||||
confirmation-target:
|
||||
type: number
|
||||
name: Confirmation Target
|
||||
description: The confirmation target (in blocks) for channels opened by autopilot.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: blocks
|
||||
advanced:
|
||||
type: object
|
||||
name: Advanced
|
||||
description: Advanced Options
|
||||
nullable: false
|
||||
spec:
|
||||
debug-level:
|
||||
type: enum
|
||||
name: Log Verbosity
|
||||
values:
|
||||
- trace
|
||||
- debug
|
||||
- info
|
||||
- warn
|
||||
- error
|
||||
- critical
|
||||
description: |
|
||||
Sets the level of log filtration. Trace is the most verbose, Critical is the least.
|
||||
default: info
|
||||
db-bolt-no-freelist-sync:
|
||||
type: boolean
|
||||
name: Disallow Bolt DB Freelist Sync
|
||||
description: |
|
||||
If true, prevents the database from syncing its freelist to disk.
|
||||
default: false
|
||||
db-bolt-auto-compact:
|
||||
type: boolean
|
||||
name: Compact Database on Startup
|
||||
description: |
|
||||
Performs database compaction on startup. This is necessary to keep disk usage down over time at the cost of
|
||||
having longer startup times.
|
||||
default: true
|
||||
db-bolt-auto-compact-min-age:
|
||||
type: number
|
||||
name: Minimum Autocompaction Age for Bolt DB
|
||||
description: |
|
||||
How long ago (in hours) the last compaction of a database file must be for it to be considered for auto
|
||||
compaction again. Can be set to 0 to compact on every startup.
|
||||
nullable: false
|
||||
range: "[0, *)"
|
||||
integral: true
|
||||
default: 168
|
||||
units: hours
|
||||
db-bolt-db-timeout:
|
||||
type: number
|
||||
name: Bolt DB Timeout
|
||||
description: How long should LND try to open the database before giving up?
|
||||
nullable: false
|
||||
range: "[1, 86400]"
|
||||
integral: true
|
||||
default: 60
|
||||
units: seconds
|
||||
recovery-window:
|
||||
type: number
|
||||
name: Recovery Window
|
||||
description: Number of blocks in the past that LND should scan for unknown transactions
|
||||
nullable: true
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
units: "blocks"
|
||||
payments-expiration-grace-period:
|
||||
type: number
|
||||
name: Payments Expiration Grace Period
|
||||
description: |
|
||||
A period to wait before for closing channels with outgoing htlcs that have timed out and are a result of this
|
||||
nodes instead payment. In addition to our current block based deadline, is specified this grace period will
|
||||
also be taken into account.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 30
|
||||
units: "seconds"
|
||||
default-remote-max-htlcs:
|
||||
type: number
|
||||
name: Maximum Remote HTLCs
|
||||
description: |
|
||||
The default max_htlc applied when opening or accepting channels. This value limits the number of concurrent
|
||||
HTLCs that the remote party can add to the commitment. The maximum possible value is 483.
|
||||
nullable: false
|
||||
range: "[1,483]"
|
||||
integral: true
|
||||
default: 483
|
||||
units: htlcs
|
||||
max-channel-fee-allocation:
|
||||
type: number
|
||||
name: Maximum Channel Fee Allocation
|
||||
description: |
|
||||
The maximum percentage of total funds that can be allocated to a channel's commitment fee. This only applies for
|
||||
the initiator of the channel.
|
||||
nullable: false
|
||||
range: "[0.1, 1]"
|
||||
integral: false
|
||||
default: 0.5
|
||||
max-commit-fee-rate-anchors:
|
||||
type: number
|
||||
name: Maximum Commitment Fee for Anchor Channels
|
||||
description: |
|
||||
The maximum fee rate in sat/vbyte that will be used for commitments of channels of the anchors type. Must be
|
||||
large enough to ensure transaction propagation.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 10
|
||||
protocol-wumbo-channels:
|
||||
type: boolean
|
||||
name: Enable Wumbo Channels
|
||||
description: |
|
||||
If set, then lnd will create and accept requests for channels larger than 0.16 BTC
|
||||
nullable: false
|
||||
default: false
|
||||
protocol-no-anchors:
|
||||
type: boolean
|
||||
name: Disable Anchor Channels
|
||||
description: |
|
||||
Set to disable support for anchor commitments. Anchor channels allow you to determine your fees at close time by
|
||||
using a Child Pays For Parent transaction.
|
||||
nullable: false
|
||||
default: false
|
||||
protocol-disable-script-enforced-lease:
|
||||
type: boolean
|
||||
name: Disable Script Enforced Channel Leases
|
||||
description: >-
|
||||
Set to disable support for script enforced lease channel commitments. If not
|
||||
set, lnd will accept these channels by default if the remote channel party
|
||||
proposes them. Note that lnd will require 1 UTXO to be reserved for this
|
||||
channel type if it is enabled.
|
||||
|
||||
Note: This may cause you to be unable to close a channel and your wallets may not understand why
|
||||
nullable: false
|
||||
default: false
|
||||
gc-canceled-invoices-on-startup:
|
||||
type: boolean
|
||||
name: Cleanup Canceled Invoices on Startup
|
||||
description: |
|
||||
If true, LND will attempt to garbage collect canceled invoices upon start.
|
||||
nullable: false
|
||||
default: false
|
||||
bitcoin:
|
||||
type: object
|
||||
name: Bitcoin Channel Configuration
|
||||
description: Configuration options for lightning network channel management operating over the Bitcoin network
|
||||
nullable: false
|
||||
spec:
|
||||
default-channel-confirmations:
|
||||
type: number
|
||||
name: Default Channel Confirmations
|
||||
description: |
|
||||
The default number of confirmations a channel must have before it's considered
|
||||
open. LND will require any incoming channel requests to wait this many
|
||||
confirmations before it considers the channel active.
|
||||
nullable: false
|
||||
range: "[1,6]"
|
||||
integral: true
|
||||
default: 3
|
||||
units: "blocks"
|
||||
min-htlc:
|
||||
type: number
|
||||
name: Minimum Incoming HTLC Size
|
||||
description: |
|
||||
The smallest HTLC LND will to accept on your channels, in millisatoshis.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: "millisatoshis"
|
||||
min-htlc-out:
|
||||
type: number
|
||||
name: Minimum Outgoing HTLC Size
|
||||
description: |
|
||||
The smallest HTLC LND will send out on your channels, in millisatoshis.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 1000
|
||||
units: "millisatoshis"
|
||||
base-fee:
|
||||
type: number
|
||||
name: Routing Base Fee
|
||||
description: |
|
||||
The base fee in millisatoshi you will charge for forwarding payments on your
|
||||
channels.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 1000
|
||||
units: "millisatoshi"
|
||||
fee-rate:
|
||||
type: number
|
||||
name: Routing Fee Rate
|
||||
description: |
|
||||
The fee rate used when forwarding payments on your channels. The total fee
|
||||
charged is the Base Fee + (amount * Fee Rate / 1000000), where amount is the
|
||||
forwarded amount. Measured in sats per million
|
||||
nullable: false
|
||||
range: "[1,1000000)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: "sats per million"
|
||||
time-lock-delta:
|
||||
type: number
|
||||
name: Time Lock Delta
|
||||
description: The CLTV delta we will subtract from a forwarded HTLC's timelock value.
|
||||
nullable: false
|
||||
range: "[6, 144]"
|
||||
integral: true
|
||||
default: 40
|
||||
units: "blocks"
|
||||
545
backend/test/config-spec/lnd-missing-pattern-description.yaml
Normal file
545
backend/test/config-spec/lnd-missing-pattern-description.yaml
Normal file
@@ -0,0 +1,545 @@
|
||||
control-tor-address:
|
||||
name: Control Tor Address
|
||||
description: The Tor address for the control interface.
|
||||
type: pointer
|
||||
subtype: package
|
||||
package-id: lnd
|
||||
target: tor-address
|
||||
interface: control
|
||||
peer-tor-address:
|
||||
name: Peer Tor Address
|
||||
description: The Tor address for the peer interface.
|
||||
type: pointer
|
||||
subtype: package
|
||||
package-id: lnd
|
||||
target: tor-address
|
||||
interface: peer
|
||||
watchtower-tor-address:
|
||||
name: Watchtower Tor Address
|
||||
description: The Tor address for the watchtower interface.
|
||||
type: pointer
|
||||
subtype: package
|
||||
package-id: lnd
|
||||
target: tor-address
|
||||
interface: watchtower
|
||||
alias:
|
||||
type: string
|
||||
name: Alias
|
||||
description: The public, human-readable name of your Lightning node
|
||||
nullable: true
|
||||
pattern: ".{1,32}"
|
||||
pattern-description: Must be at least 1 character and no more than 32 characters
|
||||
color:
|
||||
type: string
|
||||
name: Color
|
||||
description: The public color dot of your Lightning node
|
||||
nullable: false
|
||||
pattern: "[0-9a-fA-F]{6"
|
||||
pattern-description: |
|
||||
Must be a valid 6 digit hexadecimal RGB value. The first two digits are red, middle two are green, and final two are
|
||||
blue
|
||||
default:
|
||||
charset: "a-f,0-9"
|
||||
len: 6
|
||||
accept-keysend:
|
||||
type: boolean
|
||||
name: Accept Keysend
|
||||
description: |
|
||||
Allow others to send payments directly to your public key through keysend instead of having to get a new invoice
|
||||
default: false
|
||||
accept-amp:
|
||||
type: boolean
|
||||
name: Accept Spontaneous AMPs
|
||||
description: |
|
||||
If enabled, spontaneous payments through AMP will be accepted. Payments to AMP
|
||||
invoices will be accepted regardless of this setting.
|
||||
default: false
|
||||
reject-htlc:
|
||||
type: boolean
|
||||
name: Reject Routing Requests
|
||||
description: |
|
||||
If true, LND will not forward any HTLCs that are meant as onward payments. This option will still allow LND to send
|
||||
HTLCs and receive HTLCs but lnd won't be used as a hop.
|
||||
default: false
|
||||
min-chan-size:
|
||||
type: number
|
||||
name: Minimum Channel Size
|
||||
description: |
|
||||
The smallest channel size that we should accept. Incoming channels smaller than this will be rejected.
|
||||
nullable: true
|
||||
range: "[1,16777215]"
|
||||
integral: true
|
||||
units: satoshis
|
||||
max-chan-size:
|
||||
type: number
|
||||
name: Maximum Channel Size
|
||||
description: |
|
||||
The largest channel size that we should accept. Incoming channels larger than this will be rejected.
|
||||
For non-Wumbo channels this limit remains 16777215 satoshis by default as specified in BOLT-0002. For wumbo
|
||||
channels this limit is 1,000,000,000 satoshis (10 BTC). Set this config option explicitly to restrict your maximum
|
||||
channel size to better align with your risk tolerance. Don't forget to enable Wumbo channels under 'Advanced,' if desired.
|
||||
nullable: true
|
||||
range: "[1,1000000000]"
|
||||
integral: true
|
||||
units: satoshis
|
||||
tor:
|
||||
type: object
|
||||
name: Tor Config
|
||||
nullable: false
|
||||
spec:
|
||||
use-tor-only:
|
||||
type: boolean
|
||||
name: Use Tor for all traffic
|
||||
description: >-
|
||||
Use the tor proxy even for connections that are reachable on clearnet. This will hide your node's public IP
|
||||
address, but will slow down your node's performance
|
||||
nullable: false
|
||||
default: false
|
||||
stream-isolation:
|
||||
type: boolean
|
||||
name: Stream Isolation
|
||||
description: >-
|
||||
Enable Tor stream isolation by randomizing user credentials for each
|
||||
connection. With this mode active, each connection will use a new circuit.
|
||||
This means that multiple applications (other than lnd) using Tor won't be mixed
|
||||
in with lnd's traffic.
|
||||
|
||||
This option may not be used when 'Use Tor for all traffic' is disabled, since direct
|
||||
connections compromise source IP privacy by default.
|
||||
nullable: false
|
||||
default: false
|
||||
bitcoind:
|
||||
type: union
|
||||
name: Bitcoin Core
|
||||
description: |
|
||||
The Bitcoin Core node to connect to:
|
||||
- internal: The Bitcoin Core and Proxy services installed to your Embassy
|
||||
- external: An unpruned Bitcoin Core node running on a different device
|
||||
tag:
|
||||
id: type
|
||||
name: Type
|
||||
variant-names:
|
||||
internal: Internal (Bitcoin Core)
|
||||
internal-proxy: Internal (Bitcoin Proxy)
|
||||
external: External
|
||||
description: |
|
||||
The Bitcoin Core node to connect to:
|
||||
- internal: The Bitcoin Core and Proxy services installed to your Embassy
|
||||
- external: An unpruned Bitcoin Core node running on a different device
|
||||
default: internal
|
||||
variants:
|
||||
internal:
|
||||
user:
|
||||
type: pointer
|
||||
name: RPC Username
|
||||
description: The username for Bitcoin Core's RPC interface
|
||||
subtype: package
|
||||
package-id: bitcoind
|
||||
target: config
|
||||
multi: false
|
||||
selector: "$.rpc.username"
|
||||
password:
|
||||
type: pointer
|
||||
name: RPC Password
|
||||
description: The password for Bitcoin Core's RPC interface
|
||||
subtype: package
|
||||
package-id: bitcoind
|
||||
target: config
|
||||
multi: false
|
||||
selector: "$.rpc.password"
|
||||
internal-proxy:
|
||||
user:
|
||||
type: pointer
|
||||
name: RPC Username
|
||||
description: The username for the RPC user allocated to lnd
|
||||
subtype: package
|
||||
package-id: btc-rpc-proxy
|
||||
target: config
|
||||
multi: false
|
||||
selector: '$.users[?(@.name == "lnd")].name'
|
||||
# index: 'users.[first(item => ''item.name = "lnd")].name'
|
||||
password:
|
||||
type: pointer
|
||||
name: RPC Password
|
||||
description: The password for the RPC user allocated to lnd
|
||||
subtype: package
|
||||
package-id: btc-rpc-proxy
|
||||
target: config
|
||||
multi: false
|
||||
selector: '$.users[?(@.name == "lnd")].password'
|
||||
# index: 'users.[first(item => ''item.name = "lnd")].password'
|
||||
external:
|
||||
connection-settings:
|
||||
type: union
|
||||
name: Connection Settings
|
||||
description: Information to connect to an external unpruned Bitcoin Core node
|
||||
tag:
|
||||
id: type
|
||||
name: Type
|
||||
description: |
|
||||
- Manual: Raw information for finding a Bitcoin Core node
|
||||
- Quick Connect: A Quick Connect URL for a Bitcoin Core node
|
||||
variant-names:
|
||||
manual: Manual
|
||||
quick-connect: Quick Connect
|
||||
default: quick-connect
|
||||
variants:
|
||||
manual:
|
||||
host:
|
||||
type: string
|
||||
name: Public Address
|
||||
description: The public address of your Bitcoin Core server
|
||||
nullable: false
|
||||
rpc-user:
|
||||
type: string
|
||||
name: RPC Username
|
||||
description: The username for the RPC user on your Bitcoin Core RPC server
|
||||
nullable: false
|
||||
rpc-password:
|
||||
type: string
|
||||
name: RPC Password
|
||||
description: The password for the RPC user on your Bitcoin Core RPC server
|
||||
nullable: false
|
||||
rpc-port:
|
||||
type: number
|
||||
name: RPC Port
|
||||
description: The port that your Bitcoin Core RPC server is bound to
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 8332
|
||||
zmq-block-port:
|
||||
type: number
|
||||
name: ZeroMQ Block Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw blocks
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28332
|
||||
zmq-tx-port:
|
||||
type: number
|
||||
name: ZeroMQ Transaction Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw transactions
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28333
|
||||
quick-connect:
|
||||
quick-connect-url:
|
||||
type: string
|
||||
name: Quick Connect URL
|
||||
description: |
|
||||
The Quick Connect URL for your Bitcoin Core RPC server
|
||||
NOTE: LND will not accept a .onion url for this option
|
||||
nullable: false
|
||||
pattern: 'btcstandup://[^:]*:[^@]*@[a-zA-Z0-9.-]+:[0-9]+(/(\?(label=.+)?)?)?'
|
||||
zmq-block-port:
|
||||
type: number
|
||||
name: ZeroMQ Block Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw blocks
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28332
|
||||
zmq-tx-port:
|
||||
type: number
|
||||
name: ZeroMQ Transaction Port
|
||||
description: The port that your Bitcoin Core ZeroMQ server is bound to for raw transactions
|
||||
nullable: false
|
||||
range: "[0,65535]"
|
||||
integral: true
|
||||
default: 28333
|
||||
autopilot:
|
||||
type: object
|
||||
name: Autopilot
|
||||
description: Autopilot Settings
|
||||
nullable: false
|
||||
spec:
|
||||
enabled:
|
||||
type: boolean
|
||||
name: Enabled
|
||||
description: |
|
||||
If the autopilot agent should be active or not. The autopilot agent will
|
||||
attempt to AUTOMATICALLY OPEN CHANNELS to put your node in an advantageous
|
||||
position within the network graph. DO NOT ENABLE THIS IF YOU WANT TO MANAGE
|
||||
CHANNELS MANUALLY OR DO NOT UNDERSTAND IT.
|
||||
default: false
|
||||
private:
|
||||
type: boolean
|
||||
name: Private
|
||||
description: |
|
||||
Whether the channels created by the autopilot agent should be private or not.
|
||||
Private channels won't be announced to the network.
|
||||
default: false
|
||||
maxchannels:
|
||||
type: number
|
||||
name: Maximum Channels
|
||||
description: The maximum number of channels that should be created.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 5
|
||||
allocation:
|
||||
type: number
|
||||
name: Allocation
|
||||
description: |
|
||||
The fraction of total funds that should be committed to automatic channel
|
||||
establishment. For example 60% means that 60% of the total funds available
|
||||
within the wallet should be used to automatically establish channels. The total
|
||||
amount of attempted channels will still respect the "Maximum Channels" parameter.
|
||||
nullable: false
|
||||
range: "[0,100]"
|
||||
integral: false
|
||||
default: 60
|
||||
units: "%"
|
||||
min-channel-size:
|
||||
type: number
|
||||
name: Minimum Channel Size
|
||||
description: The smallest channel that the autopilot agent should create.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 20000
|
||||
units: "satoshis"
|
||||
max-channel-size:
|
||||
type: number
|
||||
name: Maximum Channel Size
|
||||
description: The largest channel that the autopilot agent should create.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 16777215
|
||||
units: "satoshis"
|
||||
advanced:
|
||||
type: object
|
||||
name: Advanced
|
||||
description: Advanced Options
|
||||
nullable: false
|
||||
spec:
|
||||
min-confirmations:
|
||||
type: number
|
||||
name: Minimum Confirmations
|
||||
description: |
|
||||
The minimum number of confirmations each of your inputs in funding transactions
|
||||
created by the autopilot agent must have.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: blocks
|
||||
confirmation-target:
|
||||
type: number
|
||||
name: Confirmation Target
|
||||
description: The confirmation target (in blocks) for channels opened by autopilot.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: blocks
|
||||
advanced:
|
||||
type: object
|
||||
name: Advanced
|
||||
description: Advanced Options
|
||||
nullable: false
|
||||
spec:
|
||||
debug-level:
|
||||
type: enum
|
||||
name: Log Verbosity
|
||||
values:
|
||||
- trace
|
||||
- debug
|
||||
- info
|
||||
- warn
|
||||
- error
|
||||
- critical
|
||||
description: |
|
||||
Sets the level of log filtration. Trace is the most verbose, Critical is the least.
|
||||
default: info
|
||||
db-bolt-no-freelist-sync:
|
||||
type: boolean
|
||||
name: Disallow Bolt DB Freelist Sync
|
||||
description: |
|
||||
If true, prevents the database from syncing its freelist to disk.
|
||||
default: false
|
||||
db-bolt-auto-compact:
|
||||
type: boolean
|
||||
name: Compact Database on Startup
|
||||
description: |
|
||||
Performs database compaction on startup. This is necessary to keep disk usage down over time at the cost of
|
||||
having longer startup times.
|
||||
default: true
|
||||
db-bolt-auto-compact-min-age:
|
||||
type: number
|
||||
name: Minimum Autocompaction Age for Bolt DB
|
||||
description: |
|
||||
How long ago (in hours) the last compaction of a database file must be for it to be considered for auto
|
||||
compaction again. Can be set to 0 to compact on every startup.
|
||||
nullable: false
|
||||
range: "[0, *)"
|
||||
integral: true
|
||||
default: 168
|
||||
units: hours
|
||||
db-bolt-db-timeout:
|
||||
type: number
|
||||
name: Bolt DB Timeout
|
||||
description: How long should LND try to open the database before giving up?
|
||||
nullable: false
|
||||
range: "[1, 86400]"
|
||||
integral: true
|
||||
default: 60
|
||||
units: seconds
|
||||
recovery-window:
|
||||
type: number
|
||||
name: Recovery Window
|
||||
description: Number of blocks in the past that LND should scan for unknown transactions
|
||||
nullable: true
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
units: "blocks"
|
||||
payments-expiration-grace-period:
|
||||
type: number
|
||||
name: Payments Expiration Grace Period
|
||||
description: |
|
||||
A period to wait before for closing channels with outgoing htlcs that have timed out and are a result of this
|
||||
nodes instead payment. In addition to our current block based deadline, is specified this grace period will
|
||||
also be taken into account.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 30
|
||||
units: "seconds"
|
||||
default-remote-max-htlcs:
|
||||
type: number
|
||||
name: Maximum Remote HTLCs
|
||||
description: |
|
||||
The default max_htlc applied when opening or accepting channels. This value limits the number of concurrent
|
||||
HTLCs that the remote party can add to the commitment. The maximum possible value is 483.
|
||||
nullable: false
|
||||
range: "[1,483]"
|
||||
integral: true
|
||||
default: 483
|
||||
units: htlcs
|
||||
max-channel-fee-allocation:
|
||||
type: number
|
||||
name: Maximum Channel Fee Allocation
|
||||
description: |
|
||||
The maximum percentage of total funds that can be allocated to a channel's commitment fee. This only applies for
|
||||
the initiator of the channel.
|
||||
nullable: false
|
||||
range: "[0.1, 1]"
|
||||
integral: false
|
||||
default: 0.5
|
||||
max-commit-fee-rate-anchors:
|
||||
type: number
|
||||
name: Maximum Commitment Fee for Anchor Channels
|
||||
description: |
|
||||
The maximum fee rate in sat/vbyte that will be used for commitments of channels of the anchors type. Must be
|
||||
large enough to ensure transaction propagation.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 10
|
||||
protocol-wumbo-channels:
|
||||
type: boolean
|
||||
name: Enable Wumbo Channels
|
||||
description: |
|
||||
If set, then lnd will create and accept requests for channels larger than 0.16 BTC
|
||||
nullable: false
|
||||
default: false
|
||||
protocol-no-anchors:
|
||||
type: boolean
|
||||
name: Disable Anchor Channels
|
||||
description: |
|
||||
Set to disable support for anchor commitments. Anchor channels allow you to determine your fees at close time by
|
||||
using a Child Pays For Parent transaction.
|
||||
nullable: false
|
||||
default: false
|
||||
protocol-disable-script-enforced-lease:
|
||||
type: boolean
|
||||
name: Disable Script Enforced Channel Leases
|
||||
description: >-
|
||||
Set to disable support for script enforced lease channel commitments. If not
|
||||
set, lnd will accept these channels by default if the remote channel party
|
||||
proposes them. Note that lnd will require 1 UTXO to be reserved for this
|
||||
channel type if it is enabled.
|
||||
|
||||
Note: This may cause you to be unable to close a channel and your wallets may not understand why
|
||||
nullable: false
|
||||
default: false
|
||||
gc-canceled-invoices-on-startup:
|
||||
type: boolean
|
||||
name: Cleanup Canceled Invoices on Startup
|
||||
description: |
|
||||
If true, LND will attempt to garbage collect canceled invoices upon start.
|
||||
nullable: false
|
||||
default: false
|
||||
bitcoin:
|
||||
type: object
|
||||
name: Bitcoin Channel Configuration
|
||||
description: Configuration options for lightning network channel management operating over the Bitcoin network
|
||||
nullable: false
|
||||
spec:
|
||||
default-channel-confirmations:
|
||||
type: number
|
||||
name: Default Channel Confirmations
|
||||
description: |
|
||||
The default number of confirmations a channel must have before it's considered
|
||||
open. LND will require any incoming channel requests to wait this many
|
||||
confirmations before it considers the channel active.
|
||||
nullable: false
|
||||
range: "[1,6]"
|
||||
integral: true
|
||||
default: 3
|
||||
units: "blocks"
|
||||
min-htlc:
|
||||
type: number
|
||||
name: Minimum Incoming HTLC Size
|
||||
description: |
|
||||
The smallest HTLC LND will to accept on your channels, in millisatoshis.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: "millisatoshis"
|
||||
min-htlc-out:
|
||||
type: number
|
||||
name: Minimum Outgoing HTLC Size
|
||||
description: |
|
||||
The smallest HTLC LND will send out on your channels, in millisatoshis.
|
||||
nullable: false
|
||||
range: "[1,*)"
|
||||
integral: true
|
||||
default: 1000
|
||||
units: "millisatoshis"
|
||||
base-fee:
|
||||
type: number
|
||||
name: Routing Base Fee
|
||||
description: |
|
||||
The base fee in millisatoshi you will charge for forwarding payments on your
|
||||
channels.
|
||||
nullable: false
|
||||
range: "[0,*)"
|
||||
integral: true
|
||||
default: 1000
|
||||
units: "millisatoshi"
|
||||
fee-rate:
|
||||
type: number
|
||||
name: Routing Fee Rate
|
||||
description: |
|
||||
The fee rate used when forwarding payments on your channels. The total fee
|
||||
charged is the Base Fee + (amount * Fee Rate / 1000000), where amount is the
|
||||
forwarded amount. Measured in sats per million
|
||||
nullable: false
|
||||
range: "[1,1000000)"
|
||||
integral: true
|
||||
default: 1
|
||||
units: "sats per million"
|
||||
time-lock-delta:
|
||||
type: number
|
||||
name: Time Lock Delta
|
||||
description: The CLTV delta we will subtract from a forwarded HTLC's timelock value.
|
||||
nullable: false
|
||||
range: "[6, 144]"
|
||||
integral: true
|
||||
default: 40
|
||||
units: "blocks"
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user