Compare commits

...

82 Commits

Author SHA1 Message Date
Aiden McClelland
2191707b94 wip: iroh 2025-08-31 15:43:34 -06:00
Aiden McClelland
63a4bba19a fix gha sccache 2025-08-29 13:32:12 -06:00
Aiden McClelland
d64b80987c support for sccache 2025-08-29 13:07:29 -06:00
Aiden McClelland
fbea3c56e6 clean up logs 2025-08-29 11:59:36 -06:00
Aiden McClelland
58b6b5c4ea Merge branch 'feature/proxies' of github.com:Start9Labs/start-os into feature/proxies 2025-08-29 11:48:31 -06:00
Aiden McClelland
369e559518 fix file_stream and remove non-terminating test 2025-08-29 11:48:30 -06:00
Alex Inkin
ca39ffb9eb refactor: fix multiple comments (#3013)
* refactor: fix multiple comments

* styling changes, add documentation to sidebar

* translations for dns page

* refactor: subtle colors

* rearrange service page

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2025-08-29 11:37:34 -06:00
Aiden McClelland
8163db7ac3 socks5 proxy working 2025-08-29 11:19:30 -06:00
Aiden McClelland
b3b031ed47 wip: debugging tor 2025-08-27 15:10:54 -06:00
Matt Hill
c5fa09c4d4 handle wh file uploads 2025-08-27 10:06:39 -06:00
Alex Inkin
b7438ef155 refactor: refactor forms components and remove legacy Taiga UI package (#3012) 2025-08-27 09:57:49 -06:00
Matt Hill
2a27716e29 remove unnecessary truthy check 2025-08-26 22:16:57 -06:00
Matt Hill
7a94086d45 move status column in service list 2025-08-26 13:59:16 -06:00
Matt Hill
ec72fb4bfd fix showing dns records 2025-08-26 13:08:24 -06:00
Matt Hill
9eaaa85625 implement toggling gateways for service interface 2025-08-26 12:29:14 -06:00
Aiden McClelland
f876cd796e Merge branch 'feature/proxies' of github.com:Start9Labs/start-os into feature/proxies 2025-08-26 12:13:41 -06:00
Aiden McClelland
9fe9608560 misc fixes 2025-08-26 12:13:39 -06:00
Matt Hill
303f6a55ac Merge branch 'feature/proxies' of github.com:Start9Labs/start-os into feature/proxies 2025-08-26 11:37:12 -06:00
Aiden McClelland
ff686d3c52 Merge branch 'feature/proxies' of github.com:Start9Labs/start-os into feature/proxies 2025-08-25 19:29:52 -06:00
Aiden McClelland
f4cf94acd2 fix dns 2025-08-25 19:29:39 -06:00
Matt Hill
0709a5c242 reason instead of description 2025-08-24 10:24:48 -06:00
Matt Hill
701db35ca3 remove logs 2025-08-24 09:41:58 -06:00
Matt Hill
57bdc400b4 honor hidden form values 2025-08-24 09:40:24 -06:00
Matt Hill
611e19da26 placeholder for empty service interfaces table 2025-08-24 08:54:44 -06:00
Matt Hill
0e9b9fce3e simple renaming 2025-08-24 08:46:12 -06:00
Aiden McClelland
d6d91822cc coukd work 2025-08-22 08:53:38 -06:00
Aiden McClelland
5bee2cef96 fix deadlock 2025-08-21 18:40:53 -06:00
Aiden McClelland
359146f02c wip 2025-08-20 14:46:15 -06:00
Matt Hill
d564471825 more translations 2025-08-20 11:45:17 -06:00
Alex Inkin
931505ff08 fix: refactor legacy components (#3010)
* fix: comments

* fix: refactor legacy components

* remove default again

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2025-08-19 08:13:36 -06:00
Alex Inkin
0709ea65d7 fix: comments (#3009)
* fix: comments

* undo default

---------

Co-authored-by: Matt Hill <mattnine@protonmail.com>
2025-08-19 08:10:14 -06:00
Aiden McClelland
75a20ae5c5 it builds 2025-08-18 18:12:03 -06:00
Matt Hill
aaf2361909 add missing translations 2025-08-18 15:16:43 -06:00
Matt Hill
17c4f3a1e8 fix dns form 2025-08-17 09:01:09 -06:00
Matt Hill
a0a2c20b08 fix all types 2025-08-16 23:14:19 -06:00
Aiden McClelland
f7f0b7dc1a revert to ts-rs v9 2025-08-16 22:33:53 -06:00
Aiden McClelland
d06c443c7d clean up tech debt, bump dependencies 2025-08-15 18:32:27 -06:00
Aiden McClelland
7094d1d939 update types 2025-08-15 18:05:52 -06:00
Aiden McClelland
8f573386c6 with todos 2025-08-15 16:07:23 -06:00
Matt Hill
bfc88a2225 fix sort functions for public and private domains 2025-08-13 14:28:53 -06:00
Matt Hill
d5bb537368 dns 2025-08-13 13:27:05 -06:00
Matt Hill
3abae65b22 better icon for restart tor 2025-08-13 10:54:07 -06:00
Matt Hill
3848e8f2df restart tor instead of reset 2025-08-13 10:53:46 -06:00
Matt Hill
63323faa97 nix StartOS domains, implement public and private domains at interface scope 2025-08-11 23:01:31 -06:00
Matt Hill
e8b7a35d43 public domain, max width, descriptions for dns 2025-08-11 10:03:35 -06:00
waterplea
da9a1b99d9 fix: dns testing 2025-08-11 13:50:58 +07:00
Matt Hill
68780ccbdd forms for adding domain, rework things based on new ideas 2025-08-10 23:33:05 -06:00
Aiden McClelland
022f7134be wip: start-tunnel & fix build 2025-08-09 21:57:32 -06:00
Matt Hill
b4491a3f39 only translations left 2025-08-09 09:29:47 -06:00
waterplea
29ddfad9d7 fix: address comments 2025-08-09 17:45:31 +07:00
Matt Hill
86a24ec067 domains preferred 2025-08-08 21:00:32 -06:00
Matt Hill
35ace3997b MVP of service interface page 2025-08-08 20:57:16 -06:00
Aiden McClelland
4f24658d33 fix unnecessary export 2025-08-08 11:12:11 -06:00
Aiden McClelland
3a84cc97fe comments 2025-08-07 17:21:09 -06:00
Aiden McClelland
3845550e90 best address logic 2025-08-07 17:15:23 -06:00
Matt Hill
4d5ff1a97b start sorting addresses 2025-08-07 13:47:27 -06:00
Matt Hill
b864816033 better placeholder for no addresses 2025-08-07 09:08:41 -06:00
Matt Hill
2762076683 minor 2025-08-07 09:03:54 -06:00
Matt Hill
8796e41ea0 merge 2025-08-07 08:18:47 -06:00
waterplea
8edb7429f5 refactor: styles for interfaces page 2025-08-07 18:53:35 +07:00
Matt Hill
5109efcee2 different options for clearnet domains 2025-08-06 18:45:41 -06:00
Matt Hill
177232ab28 start service interface page, WIP 2025-08-06 17:55:21 -06:00
Aiden McClelland
d6dfaf8feb domains api + migration 2025-08-06 14:29:35 -06:00
Aiden McClelland
ea12251a7e add ip util to sdk 2025-08-06 11:14:41 -06:00
waterplea
b35a89da29 refactor: add file control to form service 2025-08-06 19:07:21 +07:00
Matt Hill
d8d1009417 domains mostly finished 2025-08-05 17:29:48 -06:00
Aiden McClelland
3835562200 fix fe types 2025-08-05 17:14:17 -06:00
Aiden McClelland
0d227e62dc Merge branch 'feature/proxies' of github.com:Start9Labs/start-os into feature/proxies 2025-08-05 17:07:27 -06:00
Aiden McClelland
10af26116d refactor public/private gateways 2025-08-05 17:07:25 -06:00
Matt Hill
f8b03ea917 certificate authorities 2025-08-05 13:03:04 -06:00
Matt Hill
4a2777c52f domains and acme refactor 2025-08-05 09:29:04 -06:00
waterplea
86dbf26253 refactor: gateways page 2025-08-05 17:39:48 +07:00
waterplea
32999fc55f refactor: domains page 2025-08-04 19:34:57 +07:00
Matt Hill
ea2b1f5920 edit instead of chnage acme and change gateway 2025-08-01 22:59:10 -06:00
Matt Hill
716ed64aa8 show and test dns 2025-07-31 19:57:04 -06:00
Matt Hill
f23659f4ea dont show hidden actions 2025-07-31 13:42:43 -06:00
Matt Hill
daf584b33e add domains and gateways, remove routers, fix docs links 2025-07-30 15:33:13 -06:00
Aiden McClelland
e6b7390a61 wip start-tunneld 2025-07-24 18:33:55 -06:00
Aiden McClelland
84f554269f proxy -> tunnel, implement backend apis 2025-07-23 15:44:57 -06:00
Matt Hill
21adce5c5d fix file type 2025-07-22 17:07:20 -06:00
Aiden McClelland
d3e7e37f59 backend changes 2025-07-22 16:48:16 -06:00
Matt Hill
4d9709eb1c add support for inbound proxies 2025-07-22 16:40:31 -06:00
493 changed files with 18728 additions and 17521 deletions

View File

@@ -93,8 +93,18 @@ jobs:
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Configure sccache
uses: actions/github-script@v7
with:
script: |
core.exportVariable('ACTIONS_RESULTS_URL', process.env.ACTIONS_RESULTS_URL || '');
core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '');
- name: Make
run: make ARCH=${{ matrix.arch }} compiled-${{ matrix.arch }}.tar
env:
SCCACHE_GHA_ENABLED: on
SCCACHE_GHA_VERSION: 0
- uses: actions/upload-artifact@v4
with:

3
.gitignore vendored
View File

@@ -1,8 +1,5 @@
.DS_Store
.idea
system-images/binfmt/binfmt.tar
system-images/compat/compat.tar
system-images/util/util.tar
/*.img
/*.img.gz
/*.img.xz

View File

@@ -1,3 +1,6 @@
ls-files = $(shell git ls-files --cached --others --exclude-standard $1)
PROFILE = release
PLATFORM_FILE := $(shell ./check-platform.sh)
ENVIRONMENT_FILE := $(shell ./check-environment.sh)
GIT_HASH_FILE := $(shell ./check-git-hash.sh)
@@ -9,23 +12,27 @@ IMAGE_TYPE=$(shell if [ "$(PLATFORM)" = raspberrypi ]; then echo img; else echo
WEB_UIS := web/dist/raw/ui/index.html web/dist/raw/setup-wizard/index.html web/dist/raw/install-wizard/index.html
COMPRESSED_WEB_UIS := web/dist/static/ui/index.html web/dist/static/setup-wizard/index.html web/dist/static/install-wizard/index.html
FIRMWARE_ROMS := ./firmware/$(PLATFORM) $(shell jq --raw-output '.[] | select(.platform[] | contains("$(PLATFORM)")) | "./firmware/$(PLATFORM)/" + .id + ".rom.gz"' build/lib/firmware.json)
BUILD_SRC := $(shell git ls-files build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
DEBIAN_SRC := $(shell git ls-files debian/)
IMAGE_RECIPE_SRC := $(shell git ls-files image-recipe/)
BUILD_SRC := $(call ls-files, build) build/lib/depends build/lib/conflicts $(FIRMWARE_ROMS)
DEBIAN_SRC := $(call ls-files, debian/)
IMAGE_RECIPE_SRC := $(call ls-files, image-recipe/)
STARTD_SRC := core/startos/startd.service $(BUILD_SRC)
COMPAT_SRC := $(shell git ls-files system-images/compat/)
UTILS_SRC := $(shell git ls-files system-images/utils/)
BINFMT_SRC := $(shell git ls-files system-images/binfmt/)
CORE_SRC := $(shell git ls-files core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(shell git ls-files web/projects/shared) $(shell git ls-files web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
WEB_UI_SRC := $(shell git ls-files web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(shell git ls-files web/projects/setup-wizard)
WEB_INSTALL_WIZARD_SRC := $(shell git ls-files web/projects/install-wizard)
CORE_SRC := $(call ls-files, core) $(shell git ls-files --recurse-submodules patch-db) $(GIT_HASH_FILE)
WEB_SHARED_SRC := $(call ls-files, web/projects/shared) $(call ls-files, web/projects/marketplace) $(shell ls -p web/ | grep -v / | sed 's/^/web\//g') web/node_modules/.package-lock.json web/config.json patch-db/client/dist/index.js sdk/baseDist/package.json web/patchdb-ui-seed.json sdk/dist/package.json
WEB_UI_SRC := $(call ls-files, web/projects/ui)
WEB_SETUP_WIZARD_SRC := $(call ls-files, web/projects/setup-wizard)
WEB_INSTALL_WIZARD_SRC := $(call ls-files, web/projects/install-wizard)
PATCH_DB_CLIENT_SRC := $(shell git ls-files --recurse-submodules patch-db/client)
GZIP_BIN := $(shell which pigz || which gzip)
TAR_BIN := $(shell which gtar || which tar)
COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar container-runtime/rootfs.$(ARCH).squashfs
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; fi') $(PLATFORM_FILE)
COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox container-runtime/rootfs.$(ARCH).squashfs
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs $(PLATFORM_FILE) \
$(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then \
echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; \
fi) \
$(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then \
echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; \
echo cargo-deps/$(ARCH)-unknown-linux-musl/release/flamegraph; \
fi')
REBUILD_TYPES = 1
ifeq ($(REMOTE),)
@@ -59,8 +66,6 @@ touch:
metadata: $(VERSION_FILE) $(PLATFORM_FILE) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE)
clean:
rm -f system-images/**/*.tar
rm -rf system-images/compat/target
rm -rf core/target
rm -rf core/startos/bindings
rm -rf web/.angular
@@ -95,17 +100,20 @@ test: | test-core test-sdk test-container-runtime
test-core: $(CORE_SRC) $(ENVIRONMENT_FILE)
./core/run-tests.sh
test-sdk: $(shell git ls-files sdk) sdk/base/lib/osBindings/index.ts
test-sdk: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
cd sdk && make test
test-container-runtime: container-runtime/node_modules/.package-lock.json $(shell git ls-files container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
test-container-runtime: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
cd container-runtime && npm test
cli:
cd core && ./install-cli.sh
./core/install-cli.sh
registry:
cd core && ./build-registrybox.sh
./core/build-registrybox.sh
tunnel:
./core/build-tunnelbox.sh
deb: results/$(BASENAME).deb
@@ -126,12 +134,14 @@ results/$(BASENAME).$(IMAGE_TYPE) results/$(BASENAME).squashfs: $(IMAGE_RECIPE_S
install: $(ALL_TARGETS)
$(call mkdir,$(DESTDIR)/usr/bin)
$(call mkdir,$(DESTDIR)/usr/sbin)
$(call cp,core/target/$(ARCH)-unknown-linux-musl/release/startbox,$(DESTDIR)/usr/bin/startbox)
$(call cp,core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox,$(DESTDIR)/usr/bin/startbox)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/startd)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-cli)
$(call ln,/usr/bin/startbox,$(DESTDIR)/usr/bin/start-sdk)
if [ "$(PLATFORM)" = "raspberrypi" ]; then $(call cp,cargo-deps/aarch64-unknown-linux-musl/release/pi-beep,$(DESTDIR)/usr/bin/pi-beep); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then $(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); fi
if /bin/bash -c '[[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]'; then \
$(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console,$(DESTDIR)/usr/bin/tokio-console); \
$(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/flamegraph,$(DESTDIR)/usr/bin/flamegraph); \
fi
$(call cp,cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs,$(DESTDIR)/usr/bin/startos-backup-fs)
$(call ln,/usr/bin/startos-backup-fs,$(DESTDIR)/usr/sbin/mount.backup-fs)
@@ -149,10 +159,6 @@ install: $(ALL_TARGETS)
$(call cp,GIT_HASH.txt,$(DESTDIR)/usr/lib/startos/GIT_HASH.txt)
$(call cp,VERSION.txt,$(DESTDIR)/usr/lib/startos/VERSION.txt)
$(call mkdir,$(DESTDIR)/usr/lib/startos/system-images)
$(call cp,system-images/compat/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/compat.tar)
$(call cp,system-images/utils/docker-images/$(ARCH).tar,$(DESTDIR)/usr/lib/startos/system-images/utils.tar)
$(call cp,firmware/$(PLATFORM),$(DESTDIR)/usr/lib/startos/firmware)
update-overlay: $(ALL_TARGETS)
@@ -164,10 +170,10 @@ update-overlay: $(ALL_TARGETS)
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) PLATFORM=$(PLATFORM)
$(call ssh,"sudo systemctl start startd")
wormhole: core/target/$(ARCH)-unknown-linux-musl/release/startbox
wormhole: core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox
@echo "Paste the following command into the shell of your StartOS server:"
@echo
@wormhole send core/target/$(ARCH)-unknown-linux-musl/release/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
@wormhole send core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox 2>&1 | awk -Winteractive '/wormhole receive/ { printf "sudo /usr/lib/startos/scripts/chroot-and-upgrade \"cd /usr/bin && rm startbox && wormhole receive --accept-file %s && chmod +x startbox\"\n", $$3 }'
wormhole-deb: results/$(BASENAME).deb
@echo "Paste the following command into the shell of your StartOS server:"
@@ -187,10 +193,10 @@ update: $(ALL_TARGETS)
$(MAKE) install REMOTE=$(REMOTE) SSHPASS=$(SSHPASS) DESTDIR=/media/startos/next PLATFORM=$(PLATFORM)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync "apt-get install -y $(shell cat ./build/lib/depends)"')
update-startbox: core/target/$(ARCH)-unknown-linux-musl/release/startbox # only update binary (faster than full update)
update-startbox: core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox # only update binary (faster than full update)
@if [ -z "$(REMOTE)" ]; then >&2 echo "Must specify REMOTE" && false; fi
$(call ssh,'sudo /usr/lib/startos/scripts/chroot-and-upgrade --create')
$(call cp,core/target/$(ARCH)-unknown-linux-musl/release/startbox,/media/startos/next/usr/bin/startbox)
$(call cp,core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox,/media/startos/next/usr/bin/startbox)
$(call ssh,'sudo /media/startos/next/usr/lib/startos/scripts/chroot-and-upgrade --no-sync true')
update-deb: results/$(BASENAME).deb # better than update, but only available from debian
@@ -235,20 +241,20 @@ sdk/base/lib/osBindings/index.ts: $(shell if [ "$(REBUILD_TYPES)" -ne 0 ]; then
rsync -ac --delete core/startos/bindings/ sdk/base/lib/osBindings/
touch sdk/base/lib/osBindings/index.ts
core/startos/bindings/index.ts: $(shell git ls-files core) $(ENVIRONMENT_FILE)
core/startos/bindings/index.ts: $(call ls-files, core) $(ENVIRONMENT_FILE)
rm -rf core/startos/bindings
./core/build-ts.sh
ls core/startos/bindings/*.ts | sed 's/core\/startos\/bindings\/\([^.]*\)\.ts/export { \1 } from ".\/\1";/g' | grep -v '"./index"' | tee core/startos/bindings/index.ts
npm --prefix sdk exec -- prettier --config ./sdk/base/package.json -w ./core/startos/bindings/*.ts
touch core/startos/bindings/index.ts
sdk/dist/package.json sdk/baseDist/package.json: $(shell git ls-files sdk) sdk/base/lib/osBindings/index.ts
sdk/dist/package.json sdk/baseDist/package.json: $(call ls-files, sdk) sdk/base/lib/osBindings/index.ts
(cd sdk && make bundle)
touch sdk/dist/package.json
touch sdk/baseDist/package.json
# TODO: make container-runtime its own makefile?
container-runtime/dist/index.js: container-runtime/node_modules/.package-lock.json $(shell git ls-files container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
container-runtime/dist/index.js: container-runtime/node_modules/.package-lock.json $(call ls-files, container-runtime/src) container-runtime/package.json container-runtime/tsconfig.json
npm --prefix container-runtime run build
container-runtime/dist/node_modules/.package-lock.json container-runtime/dist/package.json container-runtime/dist/package-lock.json: container-runtime/package.json container-runtime/package-lock.json sdk/dist/package.json container-runtime/install-dist-deps.sh
@@ -264,18 +270,9 @@ build/lib/depends build/lib/conflicts: build/dpkg-deps/*
$(FIRMWARE_ROMS): build/lib/firmware.json download-firmware.sh $(PLATFORM_FILE)
./download-firmware.sh $(PLATFORM)
system-images/compat/docker-images/$(ARCH).tar: $(COMPAT_SRC)
cd system-images/compat && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
system-images/utils/docker-images/$(ARCH).tar: $(UTILS_SRC)
cd system-images/utils && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
system-images/binfmt/docker-images/$(ARCH).tar: $(BINFMT_SRC)
cd system-images/binfmt && make docker-images/$(ARCH).tar && touch docker-images/$(ARCH).tar
core/target/$(ARCH)-unknown-linux-musl/release/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build-startbox.sh
touch core/target/$(ARCH)-unknown-linux-musl/release/startbox
core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox: $(CORE_SRC) $(COMPRESSED_WEB_UIS) web/patchdb-ui-seed.json $(ENVIRONMENT_FILE)
ARCH=$(ARCH) PROFILE=$(PROFILE) ./core/build-startbox.sh
touch core/target/$(ARCH)-unknown-linux-musl/$(PROFILE)/startbox
core/target/$(ARCH)-unknown-linux-musl/release/containerbox: $(CORE_SRC) $(ENVIRONMENT_FILE)
ARCH=$(ARCH) ./core/build-containerbox.sh
@@ -339,3 +336,6 @@ cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console:
cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs:
ARCH=$(ARCH) PREINSTALL="apk add fuse3 fuse3-dev fuse3-static musl-dev pkgconfig" ./build-cargo-dep.sh --git https://github.com/Start9Labs/start-fs.git startos-backup-fs
cargo-deps/$(ARCH)-unknown-linux-musl/release/flamegraph:
ARCH=$(ARCH) PREINSTALL="apk add musl-dev pkgconfig" ./build-cargo-dep.sh flamegraph

View File

@@ -8,8 +8,8 @@ IFS="-" read -ra FEATURES <<< "$ENVIRONMENT"
feature_file_checker='
/^#/ { next }
/^\+ [a-z0-9]+$/ { next }
/^- [a-z0-9]+$/ { next }
/^\+ [a-z0-9-]+$/ { next }
/^- [a-z0-9-]+$/ { next }
{ exit 1 }
'

View File

@@ -1,2 +1,3 @@
+ gdb
+ heaptrack
+ heaptrack
+ linux-perf

View File

@@ -3,4 +3,4 @@ Description=StartOS Container Runtime Failure Handler
[Service]
Type=oneshot
ExecStart=/usr/bin/start-cli rebuild
ExecStart=/usr/bin/start-container rebuild

View File

@@ -35,13 +35,13 @@ const SOCKET_PATH = "/media/startos/rpc/host.sock"
let hostSystemId = 0
export type EffectContext = {
procedureId: string | null
eventId: string | null
callbacks?: CallbackHolder
constRetry?: () => void
}
const rpcRoundFor =
(procedureId: string | null) =>
(eventId: string | null) =>
<K extends T.EffectMethod | "clearCallbacks">(
method: K,
params: Record<string, unknown>,
@@ -52,7 +52,7 @@ const rpcRoundFor =
JSON.stringify({
id,
method,
params: { ...params, procedureId: procedureId || undefined },
params: { ...params, eventId: eventId ?? undefined },
}) + "\n",
)
})
@@ -103,8 +103,9 @@ const rpcRoundFor =
}
export function makeEffects(context: EffectContext): Effects {
const rpcRound = rpcRoundFor(context.procedureId)
const rpcRound = rpcRoundFor(context.eventId)
const self: Effects = {
eventId: context.eventId,
child: (name) =>
makeEffects({ ...context, callbacks: context.callbacks?.child(name) }),
constRetry: context.constRetry,

View File

@@ -242,11 +242,11 @@ export class RpcListener {
.when(runType, async ({ id, params }) => {
const system = this.system
const procedure = jsonPath.unsafeCast(params.procedure)
const { input, timeout, id: procedureId } = params
const { input, timeout, id: eventId } = params
const result = this.getResult(
procedure,
system,
procedureId,
eventId,
timeout,
input,
)
@@ -256,11 +256,11 @@ export class RpcListener {
.when(sandboxRunType, async ({ id, params }) => {
const system = this.system
const procedure = jsonPath.unsafeCast(params.procedure)
const { input, timeout, id: procedureId } = params
const { input, timeout, id: eventId } = params
const result = this.getResult(
procedure,
system,
procedureId,
eventId,
timeout,
input,
)
@@ -275,7 +275,7 @@ export class RpcListener {
const callbacks =
this.callbacks?.getChild("main") || this.callbacks?.child("main")
const effects = makeEffects({
procedureId: null,
eventId: null,
callbacks,
})
return handleRpc(
@@ -304,7 +304,7 @@ export class RpcListener {
}
await this._system.exit(
makeEffects({
procedureId: params.id,
eventId: params.id,
}),
target,
)
@@ -320,14 +320,14 @@ export class RpcListener {
const system = await this.getDependencies.system()
this.callbacks = new CallbackHolder(
makeEffects({
procedureId: params.id,
eventId: params.id,
}),
)
const callbacks = this.callbacks.child("init")
console.error("Initializing...")
await system.init(
makeEffects({
procedureId: params.id,
eventId: params.id,
callbacks,
}),
params.kind,
@@ -399,7 +399,7 @@ export class RpcListener {
private getResult(
procedure: typeof jsonPath._TYPE,
system: System,
procedureId: string,
eventId: string,
timeout: number | null | undefined,
input: any,
) {
@@ -410,7 +410,7 @@ export class RpcListener {
}
const callbacks = this.callbacks?.child(procedure)
const effects = makeEffects({
procedureId,
eventId,
callbacks,
})

View File

@@ -509,13 +509,18 @@ export class SystemForEmbassy implements System {
): Promise<T.ActionInput | null> {
if (actionId === "config") {
const config = await this.getConfig(effects, timeoutMs)
return { spec: config.spec, value: config.config }
return {
eventId: effects.eventId!,
spec: config.spec,
value: config.config,
}
} else if (actionId === "properties") {
return null
} else {
const oldSpec = this.manifest.actions?.[actionId]?.["input-spec"]
if (!oldSpec) return null
return {
eventId: effects.eventId!,
spec: transformConfigSpec(oldSpec as OldConfigSpec),
value: null,
}
@@ -1233,14 +1238,14 @@ async function updateConfig(
const url: string =
filled === null || filled.addressInfo === null
? ""
: catchFn(() =>
utils.hostnameInfoToAddress(
specValue.target === "lan-address"
: catchFn(
() =>
(specValue.target === "lan-address"
? filled.addressInfo!.localHostnames[0] ||
filled.addressInfo!.onionHostnames[0]
filled.addressInfo!.onionHostnames[0]
: filled.addressInfo!.onionHostnames[0] ||
filled.addressInfo!.localHostnames[0],
),
filled.addressInfo!.localHostnames[0]
).hostname.value,
) || ""
mutConfigValue[key] = url
}

View File

@@ -39,8 +39,10 @@ sudo cp container-runtime.service tmp/combined/lib/systemd/system/container-runt
sudo chown 0:0 tmp/combined/lib/systemd/system/container-runtime.service
sudo cp container-runtime-failure.service tmp/combined/lib/systemd/system/container-runtime-failure.service
sudo chown 0:0 tmp/combined/lib/systemd/system/container-runtime-failure.service
sudo cp ../core/target/$ARCH-unknown-linux-musl/release/containerbox tmp/combined/usr/bin/start-cli
sudo chown 0:0 tmp/combined/usr/bin/start-cli
sudo cp ../core/target/$ARCH-unknown-linux-musl/release/containerbox tmp/combined/usr/bin/start-container
echo -e '#!/bin/bash\nexec start-container $@' | sudo tee tmp/combined/usr/bin/start-cli # TODO: remove
sudo chmod +x tmp/combined/usr/bin/start-cli
sudo chown 0:0 tmp/combined/usr/bin/start-container
echo container-runtime | sha256sum | head -c 32 | cat - <(echo) | sudo tee tmp/combined/etc/machine-id
cat deb-install.sh | sudo systemd-nspawn --console=pipe -D tmp/combined $QEMU /bin/bash
sudo truncate -s 0 tmp/combined/etc/machine-id

5552
core/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -26,11 +26,11 @@ if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
source ./core/builder-alias.sh
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features container-runtime,$FEATURES --locked --bin containerbox --target=$ARCH-unknown-linux-musl"
rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features cli-container,$FEATURES --locked --bin containerbox --target=$ARCH-unknown-linux-musl"
if [ "$(ls -nd core/target/$ARCH-unknown-linux-musl/release/containerbox | awk '{ print $3 }')" != "$UID" ]; then
rust-musl-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi

View File

@@ -26,11 +26,11 @@ if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
source ./core/builder-alias.sh
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features cli,registry,$FEATURES --locked --bin registrybox --target=$ARCH-unknown-linux-musl"
rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features cli-registry,registry,$FEATURES --locked --bin registrybox --target=$ARCH-unknown-linux-musl"
if [ "$(ls -nd core/target/$ARCH-unknown-linux-musl/release/registrybox | awk '{ print $3 }')" != "$UID" ]; then
rust-musl-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi

View File

@@ -1,5 +1,10 @@
#!/bin/bash
PROFILE=${PROFILE:-release}
if [ "${PROFILE}" = "release" ]; then
BUILD_FLAGS="--release"
fi
cd "$(dirname "${BASH_SOURCE[0]}")"
set -ea
@@ -26,11 +31,11 @@ if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
source ./core/builder-alias.sh
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features cli,daemon,$FEATURES --locked --bin startbox --target=$ARCH-unknown-linux-musl"
if [ "$(ls -nd core/target/$ARCH-unknown-linux-musl/release/startbox | awk '{ print $3 }')" != "$UID" ]; then
rust-musl-builder sh -c "cd core && cargo build $BUILD_FLAGS --no-default-features --features cli,startd,$FEATURES --locked --bin startbox --target=$ARCH-unknown-linux-musl"
if [ "$(ls -nd core/target/$ARCH-unknown-linux-musl/${PROFILE}/startbox | awk '{ print $3 }')" != "$UID" ]; then
rust-musl-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi

View File

@@ -26,7 +26,7 @@ if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$(pwd)":/home/rust/src -w /home/rust/src -P messense/rust-musl-cross:$ARCH-musl'
source ./core/builder-alias.sh
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""

36
core/build-tunnelbox.sh Executable file
View File

@@ -0,0 +1,36 @@
#!/bin/bash
cd "$(dirname "${BASH_SOURCE[0]}")"
set -ea
shopt -s expand_aliases
if [ -z "$ARCH" ]; then
ARCH=$(uname -m)
fi
if [ "$ARCH" = "arm64" ]; then
ARCH="aarch64"
fi
USE_TTY=
if tty -s; then
USE_TTY="-it"
fi
cd ..
FEATURES="$(echo $ENVIRONMENT | sed 's/-/,/g')"
RUSTFLAGS=""
if [[ "${ENVIRONMENT}" =~ (^|-)unstable($|-) ]]; then
RUSTFLAGS="--cfg tokio_unstable"
fi
source ./core/builder-alias.sh
echo "FEATURES=\"$FEATURES\""
echo "RUSTFLAGS=\"$RUSTFLAGS\""
rust-musl-builder sh -c "cd core && cargo build --release --no-default-features --features cli-tunnel,tunnel,$FEATURES --locked --bin tunnelbox --target=$ARCH-unknown-linux-musl"
if [ "$(ls -nd core/target/$ARCH-unknown-linux-musl/release/tunnelbox | awk '{ print $3 }')" != "$UID" ]; then
rust-musl-builder sh -c "cd core && chown -R $UID:$UID target && chown -R $UID:$UID /root/.cargo"
fi

3
core/builder-alias.sh Normal file
View File

@@ -0,0 +1,3 @@
#!/bin/bash
alias 'rust-musl-builder'='docker run $USE_TTY --rm -e "RUSTFLAGS=$RUSTFLAGS" -e SCCACHE_GHA_ENABLED -e SCCACHE_GHA_VERSION -e ACTIONS_RESULTS_URL -e ACTIONS_RUNTIME_TOKEN -v "$HOME/.cargo/registry":/root/.cargo/registry -v "$HOME/.cargo/git":/root/.cargo/git -v "$HOME/.cache/sccache":/root/.cache/sccache -v "$(pwd)":/home/rust/src -w /home/rust/src -P start9/rust-musl-cross:$ARCH-musl'

View File

@@ -16,4 +16,4 @@ if [ "$PLATFORM" = "arm64" ]; then
PLATFORM="aarch64"
fi
cargo install --path=./startos --no-default-features --features=cli,docker,registry --bin start-cli --locked
cargo install --path=./startos --no-default-features --features=cli,docker --bin start-cli --locked

View File

@@ -6,12 +6,14 @@ edition = "2021"
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
[dependencies]
arti-client = { version = "0.33", default-features = false, git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
axum = "0.8.4"
base64 = "0.22.1"
color-eyre = "0.6.2"
ed25519-dalek = { version = "2.0.0", features = ["serde"] }
gpt = "4.1.0"
lazy_static = "1.4"
lettre = { version = "0.11", default-features = false }
mbrman = "0.6.0"
exver = { version = "0.2.0", git = "https://github.com/Start9Labs/exver-rs.git", features = [
"serde",
@@ -29,16 +31,10 @@ rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch =
rustls = "0.23"
serde = { version = "1.0", features = ["derive", "rc"] }
serde_json = "1.0"
sqlx = { version = "0.8.6", features = [
"chrono",
"runtime-tokio-rustls",
"postgres",
] }
ssh-key = "0.6.2"
ts-rs = { git = "https://github.com/dr-bonez/ts-rs.git", branch = "feature/top-level-as" } # "8"
ts-rs = "9"
thiserror = "2.0"
tokio = { version = "1", features = ["full"] }
torut = { git = "https://github.com/Start9Labs/torut.git", branch = "update/dependencies" }
tracing = "0.1.39"
yasi = "0.1.5"
yasi = { version = "0.1.6", features = ["serde", "ts-rs"] }
zbus = "5"

View File

@@ -1,3 +1,3 @@
// This file was generated by [ts-rs](https://github.com/Aleph-Alpha/ts-rs). Do not edit this file manually.
export type ServiceInterfaceId = string;
export type ServiceInterfaceId = string;

View File

@@ -94,6 +94,7 @@ pub enum ErrorKind {
DBus = 75,
InstallFailed = 76,
UpdateFailed = 77,
Smtp = 78,
}
impl ErrorKind {
pub fn as_str(&self) -> &'static str {
@@ -176,6 +177,7 @@ impl ErrorKind {
DBus => "DBus Error",
InstallFailed => "Install Failed",
UpdateFailed => "Update Failed",
Smtp => "SMTP Error",
}
}
}
@@ -288,11 +290,6 @@ impl From<patch_db::Error> for Error {
Error::new(e, ErrorKind::Database)
}
}
impl From<sqlx::Error> for Error {
fn from(e: sqlx::Error) -> Self {
Error::new(e, ErrorKind::Database)
}
}
impl From<ed25519_dalek::SignatureError> for Error {
fn from(e: ed25519_dalek::SignatureError) -> Self {
Error::new(e, ErrorKind::InvalidSignature)
@@ -303,11 +300,6 @@ impl From<std::net::AddrParseError> for Error {
Error::new(e, ErrorKind::ParseNetAddress)
}
}
impl From<torut::control::ConnError> for Error {
fn from(e: torut::control::ConnError) -> Self {
Error::new(e, ErrorKind::Tor)
}
}
impl From<ipnet::AddrParseError> for Error {
fn from(e: ipnet::AddrParseError) -> Self {
Error::new(e, ErrorKind::ParseNetAddress)
@@ -353,8 +345,8 @@ impl From<reqwest::Error> for Error {
Error::new(e, kind)
}
}
impl From<torut::onion::OnionAddressParseError> for Error {
fn from(e: torut::onion::OnionAddressParseError) -> Self {
impl From<arti_client::Error> for Error {
fn from(e: arti_client::Error) -> Self {
Error::new(e, ErrorKind::Tor)
}
}
@@ -380,6 +372,21 @@ impl From<patch_db::value::Error> for Error {
}
}
}
impl From<lettre::error::Error> for Error {
fn from(e: lettre::error::Error) -> Self {
Error::new(e, ErrorKind::Smtp)
}
}
impl From<lettre::transport::smtp::Error> for Error {
fn from(e: lettre::transport::smtp::Error) -> Self {
Error::new(e, ErrorKind::Smtp)
}
}
impl From<lettre::address::AddressError> for Error {
fn from(e: lettre::address::AddressError) -> Self {
Error::new(e, ErrorKind::Smtp)
}
}
#[derive(Clone, Deserialize, Serialize)]
pub struct ErrorData {

View File

@@ -0,0 +1,58 @@
use std::convert::Infallible;
use std::path::Path;
use std::str::FromStr;
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use yasi::InternedString;
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, TS)]
#[ts(type = "string")]
pub struct GatewayId(InternedString);
impl GatewayId {
pub fn as_str(&self) -> &str {
&*self.0
}
}
impl<T> From<T> for GatewayId
where
T: Into<InternedString>,
{
fn from(value: T) -> Self {
Self(value.into())
}
}
impl FromStr for GatewayId {
type Err = Infallible;
fn from_str(s: &str) -> Result<Self, Self::Err> {
Ok(GatewayId(InternedString::intern(s)))
}
}
impl AsRef<GatewayId> for GatewayId {
fn as_ref(&self) -> &GatewayId {
self
}
}
impl std::fmt::Display for GatewayId {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", &self.0)
}
}
impl AsRef<str> for GatewayId {
fn as_ref(&self) -> &str {
self.0.as_ref()
}
}
impl AsRef<Path> for GatewayId {
fn as_ref(&self) -> &Path {
self.0.as_ref()
}
}
impl<'de> Deserialize<'de> for GatewayId {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::de::Deserializer<'de>,
{
Ok(GatewayId(serde::Deserialize::deserialize(deserializer)?))
}
}

View File

@@ -60,20 +60,3 @@ impl AsRef<Path> for HostId {
self.0.as_ref().as_ref()
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for HostId {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::Database>::ArgumentBuffer<'q>,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
<&str as sqlx::Encode<'q, sqlx::Postgres>>::encode_by_ref(&&**self, buf)
}
}
impl sqlx::Type<sqlx::Postgres> for HostId {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<&str as sqlx::Type<sqlx::Postgres>>::type_info()
}
fn compatible(ty: &sqlx::postgres::PgTypeInfo) -> bool {
<&str as sqlx::Type<sqlx::Postgres>>::compatible(ty)
}
}

View File

@@ -6,6 +6,7 @@ use serde::{Deserialize, Deserializer, Serialize, Serializer};
use yasi::InternedString;
mod action;
mod gateway;
mod health_check;
mod host;
mod image;
@@ -16,6 +17,7 @@ mod service_interface;
mod volume;
pub use action::ActionId;
pub use gateway::GatewayId;
pub use health_check::HealthCheckId;
pub use host::HostId;
pub use image::ImageId;
@@ -116,20 +118,3 @@ impl Serialize for Id {
serializer.serialize_str(self)
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for Id {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::Database>::ArgumentBuffer<'q>,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
<&str as sqlx::Encode<'q, sqlx::Postgres>>::encode_by_ref(&&**self, buf)
}
}
impl sqlx::Type<sqlx::Postgres> for Id {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<&str as sqlx::Type<sqlx::Postgres>>::type_info()
}
fn compatible(ty: &sqlx::postgres::PgTypeInfo) -> bool {
<&str as sqlx::Type<sqlx::Postgres>>::compatible(ty)
}
}

View File

@@ -87,20 +87,3 @@ impl Serialize for PackageId {
Serialize::serialize(&self.0, serializer)
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for PackageId {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::Database>::ArgumentBuffer<'q>,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
<&str as sqlx::Encode<'q, sqlx::Postgres>>::encode_by_ref(&&**self, buf)
}
}
impl sqlx::Type<sqlx::Postgres> for PackageId {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<&str as sqlx::Type<sqlx::Postgres>>::type_info()
}
fn compatible(ty: &sqlx::postgres::PgTypeInfo) -> bool {
<&str as sqlx::Type<sqlx::Postgres>>::compatible(ty)
}
}

View File

@@ -44,23 +44,6 @@ impl AsRef<Path> for ServiceInterfaceId {
self.0.as_ref().as_ref()
}
}
impl<'q> sqlx::Encode<'q, sqlx::Postgres> for ServiceInterfaceId {
fn encode_by_ref(
&self,
buf: &mut <sqlx::Postgres as sqlx::Database>::ArgumentBuffer<'q>,
) -> Result<sqlx::encode::IsNull, sqlx::error::BoxDynError> {
<&str as sqlx::Encode<'q, sqlx::Postgres>>::encode_by_ref(&&**self, buf)
}
}
impl sqlx::Type<sqlx::Postgres> for ServiceInterfaceId {
fn type_info() -> sqlx::postgres::PgTypeInfo {
<&str as sqlx::Type<sqlx::Postgres>>::type_info()
}
fn compatible(ty: &sqlx::postgres::PgTypeInfo) -> bool {
<&str as sqlx::Type<sqlx::Postgres>>::compatible(ty)
}
}
impl FromStr for ServiceInterfaceId {
type Err = <Id as FromStr>::Err;
fn from_str(s: &str) -> Result<Self, Self::Err> {

View File

@@ -2,7 +2,7 @@
authors = ["Aiden McClelland <me@drbonez.dev>"]
description = "The core of StartOS"
documentation = "https://docs.rs/start-os"
edition = "2021"
edition = "2024"
keywords = [
"self-hosted",
"raspberry-pi",
@@ -14,7 +14,7 @@ keywords = [
name = "start-os"
readme = "README.md"
repository = "https://github.com/Start9Labs/start-os"
version = "0.4.0-alpha.9" # VERSION_BUMP
version = "0.4.0-alpha.10" # VERSION_BUMP
license = "MIT"
[lib]
@@ -37,18 +37,36 @@ path = "src/main.rs"
name = "registrybox"
path = "src/main.rs"
[[bin]]
name = "tunnelbox"
path = "src/main.rs"
[features]
cli = []
container-runtime = ["procfs", "pty-process"]
daemon = ["mail-send"]
registry = []
default = ["cli", "daemon", "registry", "container-runtime"]
cli = ["cli-startd", "cli-registry", "cli-tunnel"]
cli-container = ["procfs", "pty-process"]
cli-registry = []
cli-startd = []
cli-tunnel = []
default = ["cli", "startd", "registry", "cli-container", "tunnel"]
dev = []
unstable = ["console-subscriber", "tokio/tracing"]
docker = []
registry = []
startd = []
test = []
tunnel = []
unstable = ["console-subscriber", "tokio/tracing"]
[dependencies]
arti-client = { version = "0.33", features = [
"compression",
"experimental-api",
"rustls",
"static",
"tokio",
"ephemeral-keystore",
"onion-service-client",
"onion-service-service",
], default-features = false, git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
aes = { version = "0.7.5", features = ["ctr"] }
async-acme = { version = "0.6.0", git = "https://github.com/dr-bonez/async-acme.git", features = [
"use_rustls",
@@ -63,26 +81,28 @@ async-stream = "0.3.5"
async-trait = "0.1.74"
axum = { version = "0.8.4", features = ["ws"] }
barrage = "0.2.3"
backhand = "0.21.0"
backhand = "0.23.0"
base32 = "0.5.0"
base64 = "0.22.1"
base64ct = "1.6.0"
basic-cookies = "0.1.4"
bech32 = "0.11.0"
blake3 = { version = "1.5.0", features = ["mmap", "rayon"] }
bytes = "1"
chrono = { version = "0.4.31", features = ["serde"] }
clap = { version = "4.4.12", features = ["string"] }
color-eyre = "0.6.2"
console = "0.15.7"
console = "0.16.0"
console-subscriber = { version = "0.4.1", optional = true }
const_format = "0.2.34"
cookie = "0.18.0"
cookie_store = "0.21.0"
cookie_store = "0.22.0"
der = { version = "0.7.9", features = ["derive", "pem"] }
digest = "0.10.7"
divrem = "1.0.0"
dns-lookup = "3.0.0"
ed25519 = { version = "2.2.3", features = ["pkcs8", "pem", "alloc"] }
ed25519-dalek = { version = "2.1.1", features = [
ed25519-dalek = { version = "2.2.0", features = [
"serde",
"zeroize",
"rand_core",
@@ -99,6 +119,8 @@ futures = "0.3.28"
gpt = "4.1.0"
helpers = { path = "../helpers" }
hex = "0.4.3"
hickory-client = "0.25.2"
hickory-server = "0.25.2"
hmac = "0.12.1"
http = "1.0.0"
http-body-util = "0.1"
@@ -116,14 +138,15 @@ id-pool = { version = "0.2.2", default-features = false, features = [
"serde",
"u16",
] }
imbl = "4.0.1"
imbl-value = "0.3.2"
imbl = { version = "6", features = ["serde", "small-chunks"] }
imbl-value = { version = "0.4.3", features = ["ts-rs"] }
include_dir = { version = "0.7.3", features = ["metadata"] }
indexmap = { version = "2.0.2", features = ["serde"] }
indicatif = { version = "0.17.7", features = ["tokio"] }
indicatif = { version = "0.18.0", features = ["tokio"] }
inotify = "0.11.0"
integer-encoding = { version = "4.0.0", features = ["tokio_async"] }
ipnet = { version = "2.8.0", features = ["serde"] }
iprange = { version = "0.6.7", features = ["serde"] }
iroh = { version = "0.91.2", features = ["discovery-pkarr-dht"] }
isocountry = "0.3.2"
itertools = "0.14.0"
jaq-core = "0.10.1"
@@ -132,7 +155,16 @@ josekit = "0.10.3"
jsonpath_lib = { git = "https://github.com/Start9Labs/jsonpath.git" }
lazy_async_pool = "0.3.3"
lazy_format = "2.0"
lazy_static = "1.4.0"
lazy_static = "1.5.0"
lettre = { version = "0.11.18", default-features = false, features = [
"smtp-transport",
"pool",
"hostname",
"builder",
"tokio1-rustls",
"rustls-platform-verifier",
"aws-lc-rs",
] }
libc = "0.2.149"
log = "0.4.20"
mio = "1"
@@ -165,22 +197,23 @@ pkcs8 = { version = "0.10.2", features = ["std"] }
prettytable-rs = "0.10.0"
procfs = { version = "0.17.0", optional = true }
proptest = "1.3.1"
proptest-derive = "0.5.0"
proptest-derive = "0.6.0"
pty-process = { version = "0.5.1", optional = true }
qrcode = "0.14.1"
rand = "0.9.0"
rand = "0.9.2"
regex = "1.10.2"
reqwest = { version = "0.12.4", features = ["stream", "json", "socks"] }
reqwest_cookie_store = "0.8.0"
reqwest_cookie_store = "0.9.0"
rpassword = "7.2.0"
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "master" }
rust-argon2 = "2.0.0"
rust-argon2 = "3.0.0"
rustyline-async = "0.4.1"
safelog = { version = "0.4.8", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
semver = { version = "1.0.20", features = ["serde"] }
serde = { version = "1.0", features = ["derive", "rc"] }
serde_cbor = { package = "ciborium", version = "0.2.1" }
serde_json = "1.0"
serde_toml = { package = "toml", version = "0.8.2" }
serde_toml = { package = "toml", version = "0.9.5" }
serde_urlencoded = "0.7"
serde_with = { version = "3.4.0", features = ["macros", "json"] }
serde_yaml = { package = "serde_yml", version = "0.0.12" }
@@ -189,12 +222,12 @@ sha2 = "0.10.2"
shell-words = "1"
signal-hook = "0.3.17"
simple-logging = "2.0.2"
socket2 = "0.5.7"
socket2 = { version = "0.6.0", features = ["all"] }
socks5-impl = { version = "0.7.2", features = ["server"] }
sqlx = { version = "0.8.6", features = [
"chrono",
"runtime-tokio-rustls",
"postgres",
] }
], default-features = false }
sscanf = "0.4.1"
ssh-key = { version = "0.6.2", features = ["ed25519"] }
tar = "0.4.40"
@@ -203,22 +236,33 @@ thiserror = "2.0.12"
textwrap = "0.16.1"
tokio = { version = "1.38.1", features = ["full"] }
tokio-rustls = "0.26.0"
tokio-socks = "0.5.1"
tokio-stream = { version = "0.1.14", features = ["io-util", "sync", "net"] }
tokio-tar = { git = "https://github.com/dr-bonez/tokio-tar.git" }
tokio-tungstenite = { version = "0.26.2", features = ["native-tls", "url"] }
tokio-tungstenite = { version = "0.27.0", features = ["native-tls", "url"] }
tokio-util = { version = "0.7.9", features = ["io"] }
torut = { git = "https://github.com/Start9Labs/torut.git", branch = "update/dependencies", features = [
"serialize",
] }
tor-cell = { version = "0.33", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
tor-hscrypto = { version = "0.33", features = [
"full",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
tor-hsservice = { version = "0.33", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
tor-keymgr = { version = "0.33", features = [
"ephemeral-keystore",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
tor-llcrypto = { version = "0.33", features = [
"full",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
tor-proto = { version = "0.33", git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
tor-rtcompat = { version = "0.33", features = [
"tokio",
"rustls",
], git = "https://github.com/Start9Labs/arti.git", branch = "patch/disable-exit" }
tower-service = "0.3.3"
tracing = "0.1.39"
tracing-error = "0.2.0"
tracing-futures = "0.2.5"
tracing-journald = "0.3.0"
tracing-subscriber = { version = "0.3.17", features = ["env-filter"] }
trust-dns-server = "0.23.1"
ts-rs = { git = "https://github.com/dr-bonez/ts-rs.git", branch = "feature/top-level-as" } # "8.1.0"
ts-rs = "9.0.1"
typed-builder = "0.21.0"
unix-named-pipe = "0.2.0"
url = { version = "2.4.1", features = ["serde"] }
@@ -226,7 +270,6 @@ urlencoding = "2.1.3"
uuid = { version = "1.4.1", features = ["v4"] }
zbus = "5.1.1"
zeroize = "1.6.0"
mail-send = { git = "https://github.com/dr-bonez/mail-send.git", branch = "main", optional = true }
rustls = "0.23.20"
rustls-pki-types = { version = "1.10.1", features = ["alloc"] }

View File

@@ -1,12 +1,13 @@
use std::time::SystemTime;
use imbl_value::InternedString;
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use torut::onion::TorSecretKeyV3;
use crate::db::model::DatabaseModel;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::hostname::{Hostname, generate_hostname, generate_id};
use crate::net::ssl::{generate_key, make_root_cert};
use crate::net::tor::TorSecretKey;
use crate::prelude::*;
use crate::util::serde::Pem;
@@ -19,28 +20,28 @@ fn hash_password(password: &str) -> Result<String, Error> {
.with_kind(crate::ErrorKind::PasswordHashGeneration)
}
#[derive(Debug, Clone)]
#[derive(Clone)]
pub struct AccountInfo {
pub server_id: String,
pub hostname: Hostname,
pub password: String,
pub tor_keys: Vec<TorSecretKeyV3>,
pub tor_keys: Vec<TorSecretKey>,
pub root_ca_key: PKey<Private>,
pub root_ca_cert: X509,
pub ssh_key: ssh_key::PrivateKey,
pub compat_s9pk_key: ed25519_dalek::SigningKey,
pub developer_key: ed25519_dalek::SigningKey,
}
impl AccountInfo {
pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> {
let server_id = generate_id();
let hostname = generate_hostname();
let tor_key = vec![TorSecretKeyV3::generate()];
let tor_key = vec![TorSecretKey::generate()];
let root_ca_key = generate_key()?;
let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?;
let ssh_key = ssh_key::PrivateKey::from(ssh_key::private::Ed25519Keypair::random(
&mut ssh_key::rand_core::OsRng::default(),
));
let compat_s9pk_key =
let developer_key =
ed25519_dalek::SigningKey::generate(&mut ssh_key::rand_core::OsRng::default());
Ok(Self {
server_id,
@@ -50,7 +51,7 @@ impl AccountInfo {
root_ca_key,
root_ca_cert,
ssh_key,
compat_s9pk_key,
developer_key,
})
}
@@ -74,7 +75,7 @@ impl AccountInfo {
let root_ca_key = cert_store.as_root_key().de()?.0;
let root_ca_cert = cert_store.as_root_cert().de()?.0;
let ssh_key = db.as_private().as_ssh_privkey().de()?.0;
let compat_s9pk_key = db.as_private().as_compat_s9pk_key().de()?.0;
let compat_s9pk_key = db.as_private().as_developer_key().de()?.0;
Ok(Self {
server_id,
@@ -84,7 +85,7 @@ impl AccountInfo {
root_ca_key,
root_ca_cert,
ssh_key,
compat_s9pk_key,
developer_key: compat_s9pk_key,
})
}
@@ -103,7 +104,7 @@ impl AccountInfo {
&self
.tor_keys
.iter()
.map(|tor_key| tor_key.public().get_onion_address())
.map(|tor_key| tor_key.onion_address())
.collect(),
)?;
db.as_private_mut().as_password_mut().ser(&self.password)?;
@@ -111,8 +112,8 @@ impl AccountInfo {
.as_ssh_privkey_mut()
.ser(Pem::new_ref(&self.ssh_key))?;
db.as_private_mut()
.as_compat_s9pk_key_mut()
.ser(Pem::new_ref(&self.compat_s9pk_key))?;
.as_developer_key_mut()
.ser(Pem::new_ref(&self.developer_key))?;
let key_store = db.as_private_mut().as_key_store_mut();
for tor_key in &self.tor_keys {
key_store.as_onion_mut().insert_key(tor_key)?;
@@ -131,4 +132,17 @@ impl AccountInfo {
self.password = hash_password(password)?;
Ok(())
}
pub fn hostnames(&self) -> impl IntoIterator<Item = InternedString> + Send + '_ {
[
self.hostname.no_dot_host_name(),
self.hostname.local_domain_name(),
]
.into_iter()
.chain(
self.tor_keys
.iter()
.map(|k| InternedString::from_display(&k.onion_address())),
)
}
}

View File

@@ -4,7 +4,7 @@ use clap::{CommandFactory, FromArgMatches, Parser};
pub use models::ActionId;
use models::{PackageId, ReplayId};
use qrcode::QrCode;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;
@@ -14,7 +14,7 @@ use crate::db::model::package::TaskSeverity;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::util::serde::{
display_serializable, HandlerExtSerde, StdinDeserializable, WithIoFormat,
HandlerExtSerde, StdinDeserializable, WithIoFormat, display_serializable,
};
pub fn action_api<C: Context>() -> ParentHandler<C> {
@@ -52,6 +52,7 @@ pub fn action_api<C: Context>() -> ParentHandler<C> {
#[ts(export)]
#[serde(rename_all = "camelCase")]
pub struct ActionInput {
pub event_id: Guid,
#[ts(type = "Record<string, unknown>")]
pub spec: Value,
#[ts(type = "Record<string, unknown> | null")]
@@ -270,6 +271,7 @@ pub fn display_action_result<T: Serialize>(
#[serde(rename_all = "camelCase")]
pub struct RunActionParams {
pub package_id: PackageId,
pub event_id: Option<Guid>,
pub action_id: ActionId,
#[ts(optional, type = "any")]
pub input: Option<Value>,
@@ -278,6 +280,7 @@ pub struct RunActionParams {
#[derive(Parser)]
struct CliRunActionParams {
pub package_id: PackageId,
pub event_id: Option<Guid>,
pub action_id: ActionId,
#[command(flatten)]
pub input: StdinDeserializable<Option<Value>>,
@@ -286,12 +289,14 @@ impl From<CliRunActionParams> for RunActionParams {
fn from(
CliRunActionParams {
package_id,
event_id,
action_id,
input,
}: CliRunActionParams,
) -> Self {
Self {
package_id,
event_id,
action_id,
input: input.0,
}
@@ -331,6 +336,7 @@ pub async fn run_action(
ctx: RpcContext,
RunActionParams {
package_id,
event_id,
action_id,
input,
}: RunActionParams,
@@ -340,7 +346,11 @@ pub async fn run_action(
.await
.as_ref()
.or_not_found(lazy_format!("Manager for {}", package_id))?
.run_action(Guid::new(), action_id, input.unwrap_or_default())
.run_action(
event_id.unwrap_or_default(),
action_id,
input.unwrap_or_default(),
)
.await
.map(|res| res.map(ActionResult::upcast))
}

View File

@@ -3,29 +3,27 @@ use std::collections::BTreeMap;
use chrono::{DateTime, Utc};
use clap::Parser;
use color_eyre::eyre::eyre;
use imbl_value::{json, InternedString};
use imbl_value::{InternedString, json};
use itertools::Itertools;
use josekit::jwk::Jwk;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use rpc_toolkit::{CallRemote, Context, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use tokio::io::AsyncWriteExt;
use tracing::instrument;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::middleware::auth::{
AsLogoutSessionId, HasLoggedOutSessions, HashSessionToken, LoginRes,
AsLogoutSessionId, AuthContext, HasLoggedOutSessions, HashSessionToken, LoginRes,
};
use crate::prelude::*;
use crate::util::crypto::EncryptedWire;
use crate::util::io::create_file_mod;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::{ensure_code, Error, ResultExt};
use crate::util::serde::{HandlerExtSerde, WithIoFormat, display_serializable};
use crate::{Error, ResultExt, ensure_code};
#[derive(Debug, Clone, Default, Deserialize, Serialize, TS)]
#[ts(as = "BTreeMap::<String, Session>")]
pub struct Sessions(pub BTreeMap<InternedString, Session>);
impl Sessions {
pub fn new() -> Self {
@@ -112,31 +110,34 @@ impl std::str::FromStr for PasswordType {
})
}
}
pub fn auth<C: Context>() -> ParentHandler<C> {
pub fn auth<C: Context, AC: AuthContext>() -> ParentHandler<C>
where
CliContext: CallRemote<AC>,
{
ParentHandler::new()
.subcommand(
"login",
from_fn_async(login_impl)
from_fn_async(login_impl::<AC>)
.with_metadata("login", Value::Bool(true))
.no_cli(),
)
.subcommand(
"login",
from_fn_async(cli_login)
from_fn_async(cli_login::<AC>)
.no_display()
.with_about("Log in to StartOS server"),
.with_about("Log in a new auth session"),
)
.subcommand(
"logout",
from_fn_async(logout)
from_fn_async(logout::<AC>)
.with_metadata("get_session", Value::Bool(true))
.no_display()
.with_about("Log out of StartOS server")
.with_about("Log out of current auth session")
.with_call_remote::<CliContext>(),
)
.subcommand(
"session",
session::<C>().with_about("List or kill StartOS sessions"),
session::<C, AC>().with_about("List or kill auth sessions"),
)
.subcommand(
"reset-password",
@@ -146,7 +147,7 @@ pub fn auth<C: Context>() -> ParentHandler<C> {
"reset-password",
from_fn_async(cli_reset_password)
.no_display()
.with_about("Reset StartOS password"),
.with_about("Reset password"),
)
.subcommand(
"get-pubkey",
@@ -172,17 +173,20 @@ fn gen_pwd() {
}
#[instrument(skip_all)]
async fn cli_login(
async fn cli_login<C: AuthContext>(
HandlerArgs {
context: ctx,
parent_method,
method,
..
}: HandlerArgs<CliContext>,
) -> Result<(), RpcError> {
) -> Result<(), RpcError>
where
CliContext: CallRemote<C>,
{
let password = rpassword::prompt_password("Password: ")?;
ctx.call_remote::<RpcContext>(
ctx.call_remote::<C>(
&parent_method.into_iter().chain(method).join("."),
json!({
"password": password,
@@ -210,17 +214,11 @@ pub fn check_password(hash: &str, password: &str) -> Result<(), Error> {
Ok(())
}
pub fn check_password_against_db(db: &DatabaseModel, password: &str) -> Result<(), Error> {
let pw_hash = db.as_private().as_password().de()?;
check_password(&pw_hash, password)?;
Ok(())
}
#[derive(Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct LoginParams {
password: Option<PasswordType>,
password: String,
#[ts(skip)]
#[serde(rename = "__auth_userAgent")] // from Auth middleware
user_agent: Option<String>,
@@ -229,20 +227,18 @@ pub struct LoginParams {
}
#[instrument(skip_all)]
pub async fn login_impl(
ctx: RpcContext,
pub async fn login_impl<C: AuthContext>(
ctx: C,
LoginParams {
password,
user_agent,
ephemeral,
}: LoginParams,
) -> Result<LoginRes, Error> {
let password = password.unwrap_or_default().decrypt(&ctx)?;
let tok = if ephemeral {
check_password_against_db(&ctx.db.peek().await, &password)?;
C::check_password(&ctx.db().peek().await, &password)?;
let hash_token = HashSessionToken::new();
ctx.ephemeral_sessions.mutate(|s| {
ctx.ephemeral_sessions().mutate(|s| {
s.0.insert(
hash_token.hashed().clone(),
Session {
@@ -254,11 +250,11 @@ pub async fn login_impl(
});
Ok(hash_token.to_login_res())
} else {
ctx.db
ctx.db()
.mutate(|db| {
check_password_against_db(db, &password)?;
C::check_password(db, &password)?;
let hash_token = HashSessionToken::new();
db.as_private_mut().as_sessions_mut().insert(
C::access_sessions(db).insert(
hash_token.hashed(),
&Session {
logged_in: Utc::now(),
@@ -273,12 +269,7 @@ pub async fn login_impl(
.result
}?;
if tokio::fs::metadata("/media/startos/config/overlay/etc/shadow")
.await
.is_err()
{
write_shadow(&password).await?;
}
ctx.post_login_hook(&password).await?;
Ok(tok)
}
@@ -292,8 +283,8 @@ pub struct LogoutParams {
session: InternedString,
}
pub async fn logout(
ctx: RpcContext,
pub async fn logout<C: AuthContext>(
ctx: C,
LogoutParams { session }: LogoutParams,
) -> Result<Option<HasLoggedOutSessions>, Error> {
Ok(Some(
@@ -321,22 +312,25 @@ pub struct SessionList {
sessions: Sessions,
}
pub fn session<C: Context>() -> ParentHandler<C> {
pub fn session<C: Context, AC: AuthContext>() -> ParentHandler<C>
where
CliContext: CallRemote<AC>,
{
ParentHandler::new()
.subcommand(
"list",
from_fn_async(list)
from_fn_async(list::<AC>)
.with_metadata("get_session", Value::Bool(true))
.with_display_serializable()
.with_custom_display_fn(|handle, result| display_sessions(handle.params, result))
.with_about("Display all server sessions")
.with_about("Display all auth sessions")
.with_call_remote::<CliContext>(),
)
.subcommand(
"kill",
from_fn_async(kill)
from_fn_async(kill::<AC>)
.no_display()
.with_about("Terminate existing server session(s)")
.with_about("Terminate existing auth session(s)")
.with_call_remote::<CliContext>(),
)
}
@@ -385,12 +379,12 @@ pub struct ListParams {
// #[command(display(display_sessions))]
#[instrument(skip_all)]
pub async fn list(
ctx: RpcContext,
pub async fn list<C: AuthContext>(
ctx: C,
ListParams { session, .. }: ListParams,
) -> Result<SessionList, Error> {
let mut sessions = ctx.db.peek().await.into_private().into_sessions().de()?;
ctx.ephemeral_sessions.peek(|s| {
let mut sessions = C::access_sessions(&mut ctx.db().peek().await).de()?;
ctx.ephemeral_sessions().peek(|s| {
sessions
.0
.extend(s.0.iter().map(|(k, v)| (k.clone(), v.clone())))
@@ -424,7 +418,7 @@ pub struct KillParams {
}
#[instrument(skip_all)]
pub async fn kill(ctx: RpcContext, KillParams { ids }: KillParams) -> Result<(), Error> {
pub async fn kill<C: AuthContext>(ctx: C, KillParams { ids }: KillParams) -> Result<(), Error> {
HasLoggedOutSessions::new(ids.into_iter().map(KillSessionId::new), &ctx).await?;
Ok(())
}

View File

@@ -13,9 +13,8 @@ use tokio::io::AsyncWriteExt;
use tracing::instrument;
use ts_rs::TS;
use super::target::{BackupTargetId, PackageBackupInfo};
use super::PackageBackupReport;
use crate::auth::check_password_against_db;
use super::target::{BackupTargetId, PackageBackupInfo};
use crate::backup::os::OsBackup;
use crate::backup::{BackupReport, ServerBackupReport};
use crate::context::RpcContext;
@@ -24,7 +23,8 @@ use crate::db::model::{Database, DatabaseModel};
use crate::disk::mount::backup::BackupMountGuard;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::notifications::{notify, NotificationLevel};
use crate::middleware::auth::AuthContext;
use crate::notifications::{NotificationLevel, notify};
use crate::prelude::*;
use crate::util::io::dir_copy;
use crate::util::serde::IoFormat;
@@ -170,7 +170,7 @@ pub async fn backup_all(
let ((fs, package_ids, server_id), status_guard) = (
ctx.db
.mutate(|db| {
check_password_against_db(db, &password)?;
RpcContext::check_password(db, &password)?;
let fs = target_id.load(db)?;
let package_ids = if let Some(ids) = package_ids {
ids.into_iter().collect()

View File

@@ -3,7 +3,7 @@ use std::collections::BTreeMap;
use chrono::{DateTime, Utc};
use models::{HostId, PackageId};
use reqwest::Url;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use crate::context::CliContext;

View File

@@ -4,10 +4,10 @@ use openssl::x509::X509;
use patch_db::Value;
use serde::{Deserialize, Serialize};
use ssh_key::private::Ed25519Keypair;
use torut::onion::TorSecretKeyV3;
use crate::account::AccountInfo;
use crate::hostname::{generate_hostname, generate_id, Hostname};
use crate::hostname::{Hostname, generate_hostname, generate_id};
use crate::net::tor::TorSecretKey;
use crate::prelude::*;
use crate::util::crypto::ed25519_expand_key;
use crate::util::serde::{Base32, Base64, Pem};
@@ -36,7 +36,7 @@ impl<'de> Deserialize<'de> for OsBackup {
v => {
return Err(serde::de::Error::custom(&format!(
"Unknown backup version {v}"
)))
)));
}
})
}
@@ -85,8 +85,11 @@ impl OsBackupV0 {
&mut ssh_key::rand_core::OsRng::default(),
ssh_key::Algorithm::Ed25519,
)?,
tor_keys: vec![TorSecretKeyV3::from(self.tor_key.0)],
compat_s9pk_key: ed25519_dalek::SigningKey::generate(
tor_keys: TorSecretKey::from_bytes(self.tor_key.0)
.ok()
.into_iter()
.collect(),
developer_key: ed25519_dalek::SigningKey::generate(
&mut ssh_key::rand_core::OsRng::default(),
),
},
@@ -116,8 +119,11 @@ impl OsBackupV1 {
root_ca_key: self.root_ca_key.0,
root_ca_cert: self.root_ca_cert.0,
ssh_key: ssh_key::PrivateKey::from(Ed25519Keypair::from_seed(&self.net_key.0)),
tor_keys: vec![TorSecretKeyV3::from(ed25519_expand_key(&self.net_key.0))],
compat_s9pk_key: ed25519_dalek::SigningKey::from_bytes(&self.net_key),
tor_keys: TorSecretKey::from_bytes(ed25519_expand_key(&self.net_key.0))
.ok()
.into_iter()
.collect(),
developer_key: ed25519_dalek::SigningKey::from_bytes(&self.net_key),
},
ui: self.ui,
}
@@ -134,7 +140,7 @@ struct OsBackupV2 {
root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key
root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate
ssh_key: Pem<ssh_key::PrivateKey>, // PEM Encoded OpenSSH Key
tor_keys: Vec<TorSecretKeyV3>, // Base64 Encoded Ed25519 Expanded Secret Key
tor_keys: Vec<TorSecretKey>, // Base64 Encoded Ed25519 Expanded Secret Key
compat_s9pk_key: Pem<ed25519_dalek::SigningKey>, // PEM Encoded ED25519 Key
ui: Value, // JSON Value
}
@@ -149,7 +155,7 @@ impl OsBackupV2 {
root_ca_cert: self.root_ca_cert.0,
ssh_key: self.ssh_key.0,
tor_keys: self.tor_keys,
compat_s9pk_key: self.compat_s9pk_key.0,
developer_key: self.compat_s9pk_key.0,
},
ui: self.ui,
}
@@ -162,7 +168,7 @@ impl OsBackupV2 {
root_ca_cert: Pem(backup.account.root_ca_cert.clone()),
ssh_key: Pem(backup.account.ssh_key.clone()),
tor_keys: backup.account.tor_keys.clone(),
compat_s9pk_key: Pem(backup.account.compat_s9pk_key.clone()),
compat_s9pk_key: Pem(backup.account.developer_key.clone()),
ui: backup.ui.clone(),
}
}

View File

@@ -2,7 +2,7 @@ use std::collections::BTreeMap;
use std::sync::Arc;
use clap::Parser;
use futures::{stream, StreamExt};
use futures::{StreamExt, stream};
use models::PackageId;
use patch_db::json_ptr::ROOT;
use serde::{Deserialize, Serialize};
@@ -11,6 +11,7 @@ use tracing::instrument;
use ts_rs::TS;
use super::target::BackupTargetId;
use crate::PLATFORM;
use crate::backup::os::OsBackup;
use crate::context::setup::SetupResult;
use crate::context::{RpcContext, SetupContext};
@@ -26,7 +27,6 @@ use crate::service::service_map::DownloadInstallFuture;
use crate::setup::SetupExecuteProgress;
use crate::system::sync_kiosk;
use crate::util::serde::IoFormat;
use crate::PLATFORM;
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]

View File

@@ -4,17 +4,17 @@ use std::path::{Path, PathBuf};
use clap::Parser;
use color_eyre::eyre::eyre;
use imbl_value::InternedString;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use super::{BackupTarget, BackupTargetId};
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::filesystem::ReadOnly;
use crate::disk::mount::filesystem::cifs::Cifs;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::disk::util::{recovery_info, StartOsRecoveryInfo};
use crate::disk::util::{StartOsRecoveryInfo, recovery_info};
use crate::prelude::*;
use crate::util::serde::KeyVal;

View File

@@ -2,15 +2,15 @@ use std::collections::BTreeMap;
use std::path::{Path, PathBuf};
use chrono::{DateTime, Utc};
use clap::builder::ValueParserFactory;
use clap::Parser;
use clap::builder::ValueParserFactory;
use color_eyre::eyre::eyre;
use digest::generic_array::GenericArray;
use digest::OutputSizeUser;
use digest::generic_array::GenericArray;
use exver::Version;
use imbl_value::InternedString;
use models::{FromStrParser, PackageId};
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use tokio::sync::Mutex;
@@ -27,10 +27,10 @@ use crate::disk::mount::filesystem::{FileSystem, MountType, ReadWrite};
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::disk::util::PartitionInfo;
use crate::prelude::*;
use crate::util::serde::{
deserialize_from_str, display_serializable, serialize_display, HandlerExtSerde, WithIoFormat,
};
use crate::util::VersionString;
use crate::util::serde::{
HandlerExtSerde, WithIoFormat, deserialize_from_str, display_serializable, serialize_display,
};
pub mod cifs;

View File

@@ -2,41 +2,64 @@ use std::collections::VecDeque;
use std::ffi::OsString;
use std::path::Path;
#[cfg(feature = "container-runtime")]
#[cfg(feature = "cli-container")]
pub mod container_cli;
pub mod deprecated;
#[cfg(feature = "registry")]
#[cfg(any(feature = "registry", feature = "cli-registry"))]
pub mod registry;
#[cfg(feature = "cli")]
pub mod start_cli;
#[cfg(feature = "daemon")]
#[cfg(feature = "startd")]
pub mod start_init;
#[cfg(feature = "daemon")]
#[cfg(feature = "startd")]
pub mod startd;
#[cfg(any(feature = "tunnel", feature = "cli-tunnel"))]
pub mod tunnel;
fn select_executable(name: &str) -> Option<fn(VecDeque<OsString>)> {
match name {
#[cfg(feature = "cli")]
"start-cli" => Some(start_cli::main),
#[cfg(feature = "container-runtime")]
"start-cli" => Some(container_cli::main),
#[cfg(feature = "daemon")]
#[cfg(feature = "startd")]
"startd" => Some(startd::main),
#[cfg(feature = "registry")]
"registry" => Some(registry::main),
"embassy-cli" => Some(|_| deprecated::renamed("embassy-cli", "start-cli")),
"embassy-sdk" => Some(|_| deprecated::renamed("embassy-sdk", "start-sdk")),
#[cfg(feature = "startd")]
"embassyd" => Some(|_| deprecated::renamed("embassyd", "startd")),
#[cfg(feature = "startd")]
"embassy-init" => Some(|_| deprecated::removed("embassy-init")),
#[cfg(feature = "cli-startd")]
"start-cli" => Some(start_cli::main),
#[cfg(feature = "cli-startd")]
"embassy-cli" => Some(|_| deprecated::renamed("embassy-cli", "start-cli")),
#[cfg(feature = "cli-startd")]
"embassy-sdk" => Some(|_| deprecated::removed("embassy-sdk")),
#[cfg(feature = "cli-container")]
"start-container" => Some(container_cli::main),
#[cfg(feature = "registry")]
"start-registryd" => Some(registry::main),
#[cfg(feature = "cli-registry")]
"start-registry" => Some(registry::cli),
#[cfg(feature = "tunnel")]
"start-tunneld" => Some(tunnel::main),
#[cfg(feature = "cli-tunnel")]
"start-tunnel" => Some(tunnel::cli),
"contents" => Some(|_| {
#[cfg(feature = "cli")]
println!("start-cli");
#[cfg(feature = "container-runtime")]
println!("start-cli (container)");
#[cfg(feature = "daemon")]
#[cfg(feature = "startd")]
println!("startd");
#[cfg(feature = "cli-startd")]
println!("start-cli");
#[cfg(feature = "cli-container")]
println!("start-container");
#[cfg(feature = "registry")]
println!("registry");
println!("start-registryd");
#[cfg(feature = "cli-registry")]
println!("start-registry");
#[cfg(feature = "tunnel")]
println!("start-tunneld");
#[cfg(feature = "cli-tunnel")]
println!("start-tunnel");
}),
_ => None,
}

View File

@@ -2,9 +2,12 @@ use std::ffi::OsString;
use clap::Parser;
use futures::FutureExt;
use rpc_toolkit::CliApp;
use tokio::signal::unix::signal;
use tracing::instrument;
use crate::context::CliContext;
use crate::context::config::ClientConfig;
use crate::net::web_server::{Acceptor, WebServer};
use crate::prelude::*;
use crate::registry::context::{RegistryConfig, RegistryContext};
@@ -85,3 +88,30 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
}
}
}
pub fn cli(args: impl IntoIterator<Item = OsString>) {
LOGGER.enable();
if let Err(e) = CliApp::new(
|cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?),
crate::registry::registry_api(),
)
.run(args)
{
match e.data {
Some(serde_json::Value::String(s)) => eprintln!("{}: {}", e.message, s),
Some(serde_json::Value::Object(o)) => {
if let Some(serde_json::Value::String(s)) = o.get("details") {
eprintln!("{}: {}", e.message, s);
if let Some(serde_json::Value::String(s)) = o.get("debug") {
tracing::debug!("{}", s)
}
}
}
Some(a) => eprintln!("{}: {}", e.message, a),
None => eprintln!("{}", e.message),
}
std::process::exit(e.code);
}
}

View File

@@ -3,8 +3,8 @@ use std::ffi::OsString;
use rpc_toolkit::CliApp;
use serde_json::Value;
use crate::context::config::ClientConfig;
use crate::context::CliContext;
use crate::context::config::ClientConfig;
use crate::util::logger::LOGGER;
use crate::version::{Current, VersionT};

View File

@@ -1,4 +1,3 @@
use std::path::Path;
use std::sync::Arc;
use tokio::process::Command;
@@ -7,9 +6,9 @@ use tracing::instrument;
use crate::context::config::ServerConfig;
use crate::context::rpc::InitRpcContextPhases;
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::disk::REPAIR_DISK_PATH;
use crate::disk::fsck::RepairStrategy;
use crate::disk::main::DEFAULT_PASSWORD;
use crate::disk::REPAIR_DISK_PATH;
use crate::firmware::{check_for_firmware_update, update_firmware};
use crate::init::{InitPhases, STANDBY_MODE_PATH};
use crate::net::web_server::{UpgradableListener, WebServer};
@@ -48,7 +47,7 @@ async fn setup_or_init(
update_phase.complete();
reboot_phase.start();
return Ok(Err(Shutdown {
export_args: None,
disk_guid: None,
restart: true,
}));
}
@@ -103,7 +102,7 @@ async fn setup_or_init(
.expect("context dropped");
return Ok(Err(Shutdown {
export_args: None,
disk_guid: None,
restart: true,
}));
}
@@ -117,7 +116,9 @@ async fn setup_or_init(
server.serve_setup(ctx.clone());
let mut shutdown = ctx.shutdown.subscribe();
shutdown.recv().await.expect("context dropped");
if let Some(shutdown) = shutdown.recv().await.expect("context dropped") {
return Ok(Err(shutdown));
}
tokio::task::yield_now().await;
if let Err(e) = Command::new("killall")
@@ -136,7 +137,7 @@ async fn setup_or_init(
return Err(Error::new(
eyre!("Setup mode exited before setup completed"),
ErrorKind::Unknown,
))
));
}
}))
} else {
@@ -183,7 +184,7 @@ async fn setup_or_init(
let mut reboot_phase = handle.add_phase("Rebooting".into(), Some(1));
reboot_phase.start();
return Ok(Err(Shutdown {
export_args: Some((disk_guid, Path::new(DATA_DIR).to_owned())),
disk_guid: Some(disk_guid),
restart: true,
}));
}

View File

@@ -12,7 +12,7 @@ use tracing::instrument;
use crate::context::config::ServerConfig;
use crate::context::rpc::InitRpcContextPhases;
use crate::context::{DiagnosticContext, InitContext, RpcContext};
use crate::net::network_interface::SelfContainedNetworkInterfaceListener;
use crate::net::gateway::SelfContainedNetworkInterfaceListener;
use crate::net::web_server::{Acceptor, UpgradableListener, WebServer};
use crate::shutdown::Shutdown;
use crate::system::launch_metrics_task;
@@ -144,7 +144,7 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
.worker_threads(max(4, num_cpus::get()))
.worker_threads(max(1, num_cpus::get()))
.enable_all()
.build()
.expect("failed to initialize runtime");

View File

@@ -0,0 +1,117 @@
use std::ffi::OsString;
use clap::Parser;
use futures::FutureExt;
use rpc_toolkit::CliApp;
use tokio::signal::unix::signal;
use tracing::instrument;
use crate::context::CliContext;
use crate::context::config::ClientConfig;
use crate::net::web_server::{Acceptor, WebServer};
use crate::prelude::*;
use crate::tunnel::context::{TunnelConfig, TunnelContext};
use crate::util::logger::LOGGER;
#[instrument(skip_all)]
async fn inner_main(config: &TunnelConfig) -> Result<(), Error> {
let server = async {
let ctx = TunnelContext::init(config).await?;
let mut server = WebServer::new(Acceptor::bind([ctx.listen]).await?);
server.serve_tunnel(ctx.clone());
let mut shutdown_recv = ctx.shutdown.subscribe();
let sig_handler_ctx = ctx;
let sig_handler = tokio::spawn(async move {
use tokio::signal::unix::SignalKind;
futures::future::select_all(
[
SignalKind::interrupt(),
SignalKind::quit(),
SignalKind::terminate(),
]
.iter()
.map(|s| {
async move {
signal(*s)
.unwrap_or_else(|_| panic!("register {:?} handler", s))
.recv()
.await
}
.boxed()
}),
)
.await;
sig_handler_ctx
.shutdown
.send(())
.map_err(|_| ())
.expect("send shutdown signal");
});
shutdown_recv
.recv()
.await
.with_kind(crate::ErrorKind::Unknown)?;
sig_handler.abort();
Ok::<_, Error>(server)
}
.await?;
server.shutdown().await;
Ok(())
}
pub fn main(args: impl IntoIterator<Item = OsString>) {
LOGGER.enable();
let config = TunnelConfig::parse_from(args).load().unwrap();
let res = {
let rt = tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("failed to initialize runtime");
rt.block_on(inner_main(&config))
};
match res {
Ok(()) => (),
Err(e) => {
eprintln!("{}", e.source);
tracing::debug!("{:?}", e.source);
drop(e.source);
std::process::exit(e.kind as i32)
}
}
}
pub fn cli(args: impl IntoIterator<Item = OsString>) {
LOGGER.enable();
if let Err(e) = CliApp::new(
|cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?),
crate::tunnel::api::tunnel_api(),
)
.run(args)
{
match e.data {
Some(serde_json::Value::String(s)) => eprintln!("{}: {}", e.message, s),
Some(serde_json::Value::Object(o)) => {
if let Some(serde_json::Value::String(s)) = o.get("details") {
eprintln!("{}: {}", e.message, s);
if let Some(serde_json::Value::String(s)) = o.get("debug") {
tracing::debug!("{}", s)
}
}
}
Some(a) => eprintln!("{}: {}", e.message, a),
None => eprintln!("{}", e.message),
}
std::process::exit(e.code);
}
}

View File

@@ -1,27 +1,32 @@
use std::fs::File;
use std::io::BufReader;
use std::net::SocketAddr;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use cookie_store::{CookieStore, RawCookie};
use cookie::{Cookie, Expiration, SameSite};
use cookie_store::CookieStore;
use imbl_value::InternedString;
use josekit::jwk::Jwk;
use once_cell::sync::OnceCell;
use reqwest::Proxy;
use reqwest_cookie_store::CookieStoreMutex;
use rpc_toolkit::reqwest::{Client, Url};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{call_remote_http, CallRemote, Context, Empty};
use rpc_toolkit::{CallRemote, Context, Empty};
use tokio::net::TcpStream;
use tokio::runtime::Runtime;
use tokio_tungstenite::{MaybeTlsStream, WebSocketStream};
use tracing::instrument;
use super::setup::CURRENT_SECRET;
use crate::context::config::{local_config_path, ClientConfig};
use crate::context::config::{ClientConfig, local_config_path};
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::developer::{OS_DEVELOPER_KEY_PATH, default_developer_key_path};
use crate::middleware::auth::AuthContext;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::tunnel::context::TunnelContext;
#[derive(Debug)]
pub struct CliContextSeed {
@@ -29,6 +34,10 @@ pub struct CliContextSeed {
pub base_url: Url,
pub rpc_url: Url,
pub registry_url: Option<Url>,
pub registry_hostname: Option<InternedString>,
pub registry_listen: Option<SocketAddr>,
pub tunnel_addr: Option<SocketAddr>,
pub tunnel_listen: Option<SocketAddr>,
pub client: Client,
pub cookie_store: Arc<CookieStoreMutex>,
pub cookie_path: PathBuf,
@@ -55,9 +64,8 @@ impl Drop for CliContextSeed {
true,
)
.unwrap();
let mut store = self.cookie_store.lock().unwrap();
store.remove("localhost", "", "local");
store.save_json(&mut *writer).unwrap();
let store = self.cookie_store.lock().unwrap();
cookie_store::serde::json::save(&store, &mut *writer).unwrap();
writer.sync_all().unwrap();
std::fs::rename(tmp, &self.cookie_path).unwrap();
}
@@ -85,26 +93,14 @@ impl CliContext {
.unwrap_or(Path::new("/"))
.join(".cookies.json")
});
let cookie_store = Arc::new(CookieStoreMutex::new({
let mut store = if cookie_path.exists() {
CookieStore::load_json(BufReader::new(
File::open(&cookie_path)
.with_ctx(|_| (ErrorKind::Filesystem, cookie_path.display()))?,
))
.map_err(|e| eyre!("{}", e))
.with_kind(crate::ErrorKind::Deserialization)?
} else {
CookieStore::default()
};
if let Ok(local) = std::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH) {
store
.insert_raw(
&RawCookie::new("local", local),
&"http://localhost".parse()?,
)
.with_kind(crate::ErrorKind::Network)?;
}
store
let cookie_store = Arc::new(CookieStoreMutex::new(if cookie_path.exists() {
cookie_store::serde::json::load(BufReader::new(
File::open(&cookie_path)
.with_ctx(|_| (ErrorKind::Filesystem, cookie_path.display()))?,
))
.unwrap_or_default()
} else {
CookieStore::default()
}));
Ok(CliContext(Arc::new(CliContextSeed {
@@ -129,9 +125,17 @@ impl CliContext {
Ok::<_, Error>(registry)
})
.transpose()?,
registry_hostname: config.registry_hostname,
registry_listen: config.registry_listen,
tunnel_addr: config.tunnel,
tunnel_listen: config.tunnel_listen,
client: {
let mut builder = Client::builder().cookie_provider(cookie_store.clone());
if let Some(proxy) = config.proxy {
if let Some(proxy) = config.proxy.or_else(|| {
config
.socks_listen
.and_then(|socks| format!("socks5h://{socks}").parse::<Url>().log_err())
}) {
builder =
builder.proxy(Proxy::all(proxy).with_kind(crate::ErrorKind::ParseUrl)?)
}
@@ -139,14 +143,9 @@ impl CliContext {
},
cookie_store,
cookie_path,
developer_key_path: config.developer_key_path.unwrap_or_else(|| {
local_config_path()
.as_deref()
.unwrap_or_else(|| Path::new(super::config::CONFIG_PATH))
.parent()
.unwrap_or(Path::new("/"))
.join("developer.key.pem")
}),
developer_key_path: config
.developer_key_path
.unwrap_or_else(default_developer_key_path),
developer_key: OnceCell::new(),
})))
}
@@ -155,20 +154,26 @@ impl CliContext {
#[instrument(skip_all)]
pub fn developer_key(&self) -> Result<&ed25519_dalek::SigningKey, Error> {
self.developer_key.get_or_try_init(|| {
if !self.developer_key_path.exists() {
return Err(Error::new(eyre!("Developer Key does not exist! Please run `start-cli init` before running this command."), crate::ErrorKind::Uninitialized));
}
let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
&std::fs::read_to_string(&self.developer_key_path)?,
)
.with_kind(crate::ErrorKind::Pem)?;
let secret = ed25519_dalek::SecretKey::try_from(&pair.secret_key[..]).map_err(|_| {
Error::new(
eyre!("pkcs8 key is of incorrect length"),
ErrorKind::OpenSsl,
for path in [Path::new(OS_DEVELOPER_KEY_PATH), &self.developer_key_path] {
if !path.exists() {
continue;
}
let pair = <ed25519::KeypairBytes as ed25519::pkcs8::DecodePrivateKey>::from_pkcs8_pem(
&std::fs::read_to_string(&self.developer_key_path)?,
)
})?;
Ok(secret.into())
.with_kind(crate::ErrorKind::Pem)?;
let secret = ed25519_dalek::SecretKey::try_from(&pair.secret_key[..]).map_err(|_| {
Error::new(
eyre!("pkcs8 key is of incorrect length"),
ErrorKind::OpenSsl,
)
})?;
return Ok(secret.into())
}
Err(Error::new(
eyre!("Developer Key does not exist! Please run `start-cli init` before running this command."),
crate::ErrorKind::Uninitialized
))
})
}
@@ -185,7 +190,7 @@ impl CliContext {
eyre!("Cannot parse scheme from base URL"),
crate::ErrorKind::ParseUrl,
)
.into())
.into());
}
};
url.set_scheme(ws_scheme)
@@ -276,27 +281,90 @@ impl Context for CliContext {
}
impl CallRemote<RpcContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
if let Ok(local) = std::fs::read_to_string(RpcContext::LOCAL_AUTH_COOKIE_PATH) {
self.cookie_store
.lock()
.unwrap()
.insert_raw(
&Cookie::build(("local", local))
.domain("localhost")
.expires(Expiration::Session)
.same_site(SameSite::Strict)
.build(),
&"http://localhost".parse()?,
)
.with_kind(crate::ErrorKind::Network)?;
}
crate::middleware::signature::call_remote(
self,
self.rpc_url.clone(),
self.rpc_url.host_str().or_not_found("rpc url hostname")?,
method,
params,
)
.await
}
}
impl CallRemote<DiagnosticContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
if let Ok(local) = std::fs::read_to_string(TunnelContext::LOCAL_AUTH_COOKIE_PATH) {
self.cookie_store
.lock()
.unwrap()
.insert_raw(
&Cookie::build(("local", local))
.domain("localhost")
.expires(Expiration::Session)
.same_site(SameSite::Strict)
.build(),
&"http://localhost".parse()?,
)
.with_kind(crate::ErrorKind::Network)?;
}
crate::middleware::signature::call_remote(
self,
self.rpc_url.clone(),
self.rpc_url.host_str().or_not_found("rpc url hostname")?,
method,
params,
)
.await
}
}
impl CallRemote<InitContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
crate::middleware::signature::call_remote(
self,
self.rpc_url.clone(),
self.rpc_url.host_str().or_not_found("rpc url hostname")?,
method,
params,
)
.await
}
}
impl CallRemote<SetupContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
crate::middleware::signature::call_remote(
self,
self.rpc_url.clone(),
self.rpc_url.host_str().or_not_found("rpc url hostname")?,
method,
params,
)
.await
}
}
impl CallRemote<InstallContext> for CliContext {
async fn call_remote(&self, method: &str, params: Value, _: Empty) -> Result<Value, RpcError> {
call_remote_http(&self.client, self.rpc_url.clone(), method, params).await
crate::middleware::signature::call_remote(
self,
self.rpc_url.clone(),
self.rpc_url.host_str().or_not_found("rpc url hostname")?,
method,
params,
)
.await
}
}

View File

@@ -3,15 +3,16 @@ use std::net::SocketAddr;
use std::path::{Path, PathBuf};
use clap::Parser;
use imbl_value::InternedString;
use reqwest::Url;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use crate::MAIN_DATA;
use crate::disk::OsPartitionInfo;
use crate::prelude::*;
use crate::util::serde::IoFormat;
use crate::version::VersionT;
use crate::MAIN_DATA;
pub const DEVICE_CONFIG_PATH: &str = "/media/startos/config/config.yaml"; // "/media/startos/config/config.yaml";
pub const CONFIG_PATH: &str = "/etc/startos/config.yaml";
@@ -55,7 +56,6 @@ pub trait ContextConfig: DeserializeOwned + Default {
#[derive(Debug, Default, Deserialize, Serialize, Parser)]
#[serde(rename_all = "kebab-case")]
#[command(rename_all = "kebab-case")]
#[command(name = "start-cli")]
#[command(version = crate::version::Current::default().semver().to_string())]
pub struct ClientConfig {
#[arg(short = 'c', long)]
@@ -64,8 +64,18 @@ pub struct ClientConfig {
pub host: Option<Url>,
#[arg(short = 'r', long)]
pub registry: Option<Url>,
#[arg(long)]
pub registry_hostname: Option<InternedString>,
#[arg(skip)]
pub registry_listen: Option<SocketAddr>,
#[arg(short = 't', long)]
pub tunnel: Option<SocketAddr>,
#[arg(skip)]
pub tunnel_listen: Option<SocketAddr>,
#[arg(short = 'p', long)]
pub proxy: Option<Url>,
#[arg(skip)]
pub socks_listen: Option<SocketAddr>,
#[arg(long)]
pub cookie_path: Option<PathBuf>,
#[arg(long)]
@@ -78,6 +88,8 @@ impl ContextConfig for ClientConfig {
fn merge_with(&mut self, other: Self) {
self.host = self.host.take().or(other.host);
self.registry = self.registry.take().or(other.registry);
self.registry_hostname = self.registry_hostname.take().or(other.registry_hostname);
self.tunnel = self.tunnel.take().or(other.tunnel);
self.proxy = self.proxy.take().or(other.proxy);
self.cookie_path = self.cookie_path.take().or(other.cookie_path);
self.developer_key_path = self.developer_key_path.take().or(other.developer_key_path);
@@ -104,15 +116,15 @@ pub struct ServerConfig {
#[arg(skip)]
pub os_partitions: Option<OsPartitionInfo>,
#[arg(long)]
pub tor_control: Option<SocketAddr>,
#[arg(long)]
pub tor_socks: Option<SocketAddr>,
pub socks_listen: Option<SocketAddr>,
#[arg(long)]
pub revision_cache_size: Option<usize>,
#[arg(long)]
pub disable_encryption: Option<bool>,
#[arg(long)]
pub multi_arch_s9pks: Option<bool>,
#[arg(long)]
pub developer_key_path: Option<PathBuf>,
}
impl ContextConfig for ServerConfig {
fn next(&mut self) -> Option<PathBuf> {
@@ -121,14 +133,14 @@ impl ContextConfig for ServerConfig {
fn merge_with(&mut self, other: Self) {
self.ethernet_interface = self.ethernet_interface.take().or(other.ethernet_interface);
self.os_partitions = self.os_partitions.take().or(other.os_partitions);
self.tor_control = self.tor_control.take().or(other.tor_control);
self.tor_socks = self.tor_socks.take().or(other.tor_socks);
self.socks_listen = self.socks_listen.take().or(other.socks_listen);
self.revision_cache_size = self
.revision_cache_size
.take()
.or(other.revision_cache_size);
self.disable_encryption = self.disable_encryption.take().or(other.disable_encryption);
self.multi_arch_s9pks = self.multi_arch_s9pks.take().or(other.multi_arch_s9pks);
self.developer_key_path = self.developer_key_path.take().or(other.developer_key_path);
}
}

View File

@@ -1,15 +1,15 @@
use std::ops::Deref;
use std::sync::Arc;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::Context;
use rpc_toolkit::yajrc::RpcError;
use tokio::sync::broadcast::Sender;
use tracing::instrument;
use crate::Error;
use crate::context::config::ServerConfig;
use crate::rpc_continuations::RpcContinuations;
use crate::shutdown::Shutdown;
use crate::Error;
pub struct DiagnosticContextSeed {
pub shutdown: Sender<Shutdown>,

View File

@@ -6,10 +6,10 @@ use tokio::sync::broadcast::Sender;
use tokio::sync::watch;
use tracing::instrument;
use crate::Error;
use crate::context::config::ServerConfig;
use crate::progress::FullProgressTracker;
use crate::rpc_continuations::RpcContinuations;
use crate::Error;
pub struct InitContextSeed {
pub config: ServerConfig,

View File

@@ -5,9 +5,9 @@ use rpc_toolkit::Context;
use tokio::sync::broadcast::Sender;
use tracing::instrument;
use crate::Error;
use crate::net::utils::find_eth_iface;
use crate::rpc_continuations::RpcContinuations;
use crate::Error;
pub struct InstallContextSeed {
pub ethernet_interface: String,

View File

@@ -18,7 +18,7 @@ use models::{ActionId, PackageId};
use reqwest::{Client, Proxy};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{CallRemote, Context, Empty};
use tokio::sync::{broadcast, oneshot, watch, Mutex, RwLock};
use tokio::sync::{broadcast, oneshot, watch, RwLock};
use tokio::time::Instant;
use tracing::instrument;
@@ -31,8 +31,9 @@ use crate::db::model::Database;
use crate::disk::OsPartitionInfo;
use crate::init::{check_time_is_synchronized, InitResult};
use crate::install::PKG_ARCHIVE_DIR;
use crate::lxc::{ContainerId, LxcContainer, LxcManager};
use crate::lxc::LxcManager;
use crate::net::net_controller::{NetController, NetService};
use crate::net::socks::DEFAULT_SOCKS_LISTEN;
use crate::net::utils::{find_eth_iface, find_wifi_iface};
use crate::net::web_server::{UpgradableListener, WebServerAcceptorSetter};
use crate::net::wifi::WpaCli;
@@ -46,7 +47,7 @@ use crate::shutdown::Shutdown;
use crate::util::io::delete_file;
use crate::util::lshw::LshwDevice;
use crate::util::sync::{SyncMutex, Watch};
use crate::DATA_DIR;
use crate::{DATA_DIR, HOST_IP};
pub struct RpcContextSeed {
is_closed: AtomicBool,
@@ -65,7 +66,6 @@ pub struct RpcContextSeed {
pub cancellable_installs: SyncMutex<BTreeMap<PackageId, oneshot::Sender<()>>>,
pub metrics_cache: Watch<Option<crate::system::Metrics>>,
pub shutdown: broadcast::Sender<Option<Shutdown>>,
pub tor_socks: SocketAddr,
pub lxc_manager: Arc<LxcManager>,
pub open_authed_continuations: OpenAuthedContinuations<Option<InternedString>>,
pub rpc_continuations: RpcContinuations,
@@ -75,12 +75,6 @@ pub struct RpcContextSeed {
pub client: Client,
pub start_time: Instant,
pub crons: SyncMutex<BTreeMap<Guid, NonDetachingJoinHandle<()>>>,
// #[cfg(feature = "dev")]
pub dev: Dev,
}
pub struct Dev {
pub lxc: Mutex<BTreeMap<ContainerId, LxcContainer>>,
}
pub struct Hardware {
@@ -138,10 +132,7 @@ impl RpcContext {
run_migrations,
}: InitRpcContextPhases,
) -> Result<Self, Error> {
let tor_proxy = config.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(127, 0, 0, 1),
9050,
)));
let socks_proxy = config.socks_listen.unwrap_or(DEFAULT_SOCKS_LISTEN);
let (shutdown, _) = tokio::sync::broadcast::channel(1);
load_db.start();
@@ -163,18 +154,9 @@ impl RpcContext {
{
(net_ctrl, os_net_service)
} else {
let net_ctrl = Arc::new(
NetController::init(
db.clone(),
config
.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
tor_proxy,
&account.hostname,
)
.await?,
);
webserver.try_upgrade(|a| net_ctrl.net_iface.upgrade_listener(a))?;
let net_ctrl =
Arc::new(NetController::init(db.clone(), &account.hostname, socks_proxy).await?);
webserver.try_upgrade(|a| net_ctrl.net_iface.watcher.upgrade_listener(a))?;
let os_net_service = net_ctrl.os_bindings().await?;
(net_ctrl, os_net_service)
};
@@ -183,7 +165,7 @@ impl RpcContext {
let services = ServiceMap::default();
let metrics_cache = Watch::<Option<crate::system::Metrics>>::new(None);
let tor_proxy_url = format!("socks5h://{tor_proxy}");
let socks_proxy_url = format!("socks5h://{socks_proxy}");
let crons = SyncMutex::new(BTreeMap::new());
@@ -251,7 +233,6 @@ impl RpcContext {
cancellable_installs: SyncMutex::new(BTreeMap::new()),
metrics_cache,
shutdown,
tor_socks: tor_proxy,
lxc_manager: Arc::new(LxcManager::new()),
open_authed_continuations: OpenAuthedContinuations::new(),
rpc_continuations: RpcContinuations::new(),
@@ -267,21 +248,11 @@ impl RpcContext {
})?,
),
client: Client::builder()
.proxy(Proxy::custom(move |url| {
if url.host_str().map_or(false, |h| h.ends_with(".onion")) {
Some(tor_proxy_url.clone())
} else {
None
}
}))
.proxy(Proxy::all(socks_proxy_url)?)
.build()
.with_kind(crate::ErrorKind::ParseUrl)?,
start_time: Instant::now(),
crons,
// #[cfg(feature = "dev")]
dev: Dev {
lxc: Mutex::new(BTreeMap::new()),
},
});
let res = Self(seed.clone());

View File

@@ -10,14 +10,15 @@ use josekit::jwk::Jwk;
use patch_db::PatchDb;
use rpc_toolkit::Context;
use serde::{Deserialize, Serialize};
use tokio::sync::broadcast::Sender;
use tokio::sync::OnceCell;
use tokio::sync::broadcast::Sender;
use tracing::instrument;
use ts_rs::TS;
use crate::MAIN_DATA;
use crate::account::AccountInfo;
use crate::context::config::ServerConfig;
use crate::context::RpcContext;
use crate::context::config::ServerConfig;
use crate::disk::OsPartitionInfo;
use crate::hostname::Hostname;
use crate::net::web_server::{UpgradableListener, WebServer, WebServerAcceptorSetter};
@@ -25,8 +26,8 @@ use crate::prelude::*;
use crate::progress::FullProgressTracker;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::setup::SetupProgress;
use crate::shutdown::Shutdown;
use crate::util::net::WebSocketExt;
use crate::MAIN_DATA;
lazy_static::lazy_static! {
pub static ref CURRENT_SECRET: Jwk = Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).unwrap_or_else(|e| {
@@ -54,7 +55,7 @@ impl TryFrom<&AccountInfo> for SetupResult {
tor_addresses: value
.tor_keys
.iter()
.map(|tor_key| format!("https://{}", tor_key.public().get_onion_address()))
.map(|tor_key| format!("https://{}", tor_key.onion_address()))
.collect(),
hostname: value.hostname.clone(),
lan_address: value.hostname.lan_address(),
@@ -71,7 +72,8 @@ pub struct SetupContextSeed {
pub progress: FullProgressTracker,
pub task: OnceCell<NonDetachingJoinHandle<()>>,
pub result: OnceCell<Result<(SetupResult, RpcContext), Error>>,
pub shutdown: Sender<()>,
pub disk_guid: OnceCell<Arc<String>>,
pub shutdown: Sender<Option<Shutdown>>,
pub rpc_continuations: RpcContinuations,
}
@@ -97,6 +99,7 @@ impl SetupContext {
progress: FullProgressTracker::new(),
task: OnceCell::new(),
result: OnceCell::new(),
disk_guid: OnceCell::new(),
shutdown,
rpc_continuations: RpcContinuations::new(),
})))

View File

@@ -5,10 +5,10 @@ use serde::{Deserialize, Serialize};
use tracing::instrument;
use ts_rs::TS;
use crate::Error;
use crate::context::RpcContext;
use crate::prelude::*;
use crate::rpc_continuations::Guid;
use crate::Error;
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]

View File

@@ -12,7 +12,7 @@ use itertools::Itertools;
use patch_db::json_ptr::{JsonPointer, ROOT};
use patch_db::{DiffPatch, Dump, Revision};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{from_fn_async, Context, HandlerArgs, HandlerExt, ParentHandler};
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use tokio::sync::mpsc::{self, UnboundedReceiver};
use tokio::sync::watch;
@@ -23,7 +23,7 @@ use crate::context::{CliContext, RpcContext};
use crate::prelude::*;
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::util::net::WebSocketExt;
use crate::util::serde::{apply_expr, HandlerExtSerde};
use crate::util::serde::{HandlerExtSerde, apply_expr};
lazy_static::lazy_static! {
static ref PUBLIC: JsonPointer = "/public".parse().unwrap();

View File

@@ -12,6 +12,7 @@ use crate::net::forward::AvailablePorts;
use crate::net::keys::KeyStore;
use crate::notifications::Notifications;
use crate::prelude::*;
use crate::sign::AnyVerifyingKey;
use crate::ssh::SshKeys;
use crate::util::serde::Pem;
@@ -33,6 +34,9 @@ impl Database {
private: Private {
key_store: KeyStore::new(account)?,
password: account.password.clone(),
auth_pubkeys: [AnyVerifyingKey::Ed25519((&account.developer_key).into())]
.into_iter()
.collect(),
ssh_privkey: Pem(account.ssh_key.clone()),
ssh_pubkeys: SshKeys::new(),
available_ports: AvailablePorts::new(),
@@ -40,7 +44,7 @@ impl Database {
notifications: Notifications::new(),
cifs: CifsTargets::new(),
package_stores: BTreeMap::new(),
compat_s9pk_key: Pem(account.compat_s9pk_key.clone()),
developer_key: Pem(account.developer_key.clone()),
}, // TODO
})
}

View File

@@ -5,8 +5,8 @@ use chrono::{DateTime, Utc};
use exver::VersionRange;
use imbl_value::InternedString;
use models::{ActionId, DataUrl, HealthCheckId, HostId, PackageId, ReplayId, ServiceInterfaceId};
use patch_db::json_ptr::JsonPointer;
use patch_db::HasModel;
use patch_db::json_ptr::JsonPointer;
use reqwest::Url;
use serde::{Deserialize, Serialize};
use ts_rs::TS;
@@ -17,7 +17,7 @@ use crate::prelude::*;
use crate::progress::FullProgress;
use crate::s9pk::manifest::Manifest;
use crate::status::MainStatus;
use crate::util::serde::{is_partial_of, Pem};
use crate::util::serde::{Pem, is_partial_of};
#[derive(Debug, Default, Deserialize, Serialize, TS)]
#[ts(export)]
@@ -268,7 +268,7 @@ impl Model<PackageState> {
return Err(Error::new(
eyre!("could not determine package state to get manifest"),
ErrorKind::Database,
))
));
}
})
}
@@ -375,7 +375,6 @@ pub struct PackageDataEntry {
pub last_backup: Option<DateTime<Utc>>,
pub current_dependencies: CurrentDependencies,
pub actions: BTreeMap<ActionId, ActionMetadata>,
#[ts(as = "BTreeMap::<String, TaskEntry>")]
pub tasks: BTreeMap<ReplayId, TaskEntry>,
pub service_interfaces: BTreeMap<ServiceInterfaceId, ServiceInterface>,
pub hosts: Hosts,

View File

@@ -1,4 +1,4 @@
use std::collections::BTreeMap;
use std::collections::{BTreeMap, HashSet};
use models::PackageId;
use patch_db::{HasModel, Value};
@@ -10,6 +10,7 @@ use crate::net::forward::AvailablePorts;
use crate::net::keys::KeyStore;
use crate::notifications::Notifications;
use crate::prelude::*;
use crate::sign::AnyVerifyingKey;
use crate::ssh::SshKeys;
use crate::util::serde::Pem;
@@ -19,8 +20,9 @@ use crate::util::serde::Pem;
pub struct Private {
pub key_store: KeyStore,
pub password: String, // argon2 hash
#[serde(default = "generate_compat_key")]
pub compat_s9pk_key: Pem<ed25519_dalek::SigningKey>,
pub auth_pubkeys: HashSet<AnyVerifyingKey>,
#[serde(default = "generate_developer_key")]
pub developer_key: Pem<ed25519_dalek::SigningKey>,
pub ssh_privkey: Pem<ssh_key::PrivateKey>,
pub ssh_pubkeys: SshKeys,
pub available_ports: AvailablePorts,
@@ -31,7 +33,7 @@ pub struct Private {
pub package_stores: BTreeMap<PackageId, Value>,
}
pub fn generate_compat_key() -> Pem<ed25519_dalek::SigningKey> {
pub fn generate_developer_key() -> Pem<ed25519_dalek::SigningKey> {
Pem(ed25519_dalek::SigningKey::generate(
&mut ssh_key::rand_core::OsRng::default(),
))

View File

@@ -1,13 +1,15 @@
use std::collections::{BTreeMap, BTreeSet};
use std::net::{IpAddr, Ipv4Addr};
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use chrono::{DateTime, Utc};
use exver::{Version, VersionRange};
use imbl::{OrdMap, OrdSet};
use imbl_value::InternedString;
use ipnet::IpNet;
use isocountry::CountryCode;
use itertools::Itertools;
use models::PackageId;
use lazy_static::lazy_static;
use models::{GatewayId, PackageId};
use openssl::hash::MessageDigest;
use patch_db::{HasModel, Value};
use serde::{Deserialize, Serialize};
@@ -16,8 +18,9 @@ use ts_rs::TS;
use crate::account::AccountInfo;
use crate::db::model::package::AllPackageData;
use crate::net::acme::AcmeProvider;
use crate::net::host::binding::{AddSslOptions, BindInfo, BindOptions, NetInfo};
use crate::net::forward::START9_BRIDGE_IFACE;
use crate::net::host::Host;
use crate::net::host::binding::{AddSslOptions, BindInfo, BindOptions, NetInfo};
use crate::net::utils::ipv6_is_local;
use crate::net::vhost::AlpnInfo;
use crate::prelude::*;
@@ -27,7 +30,7 @@ use crate::util::cpupower::Governor;
use crate::util::lshw::LshwDevice;
use crate::util::serde::MaybeUtf8String;
use crate::version::{Current, VersionT};
use crate::{ARCH, PLATFORM};
use crate::{ARCH, HOST_IP, PLATFORM};
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
@@ -71,26 +74,25 @@ impl Public {
net: NetInfo {
assigned_port: None,
assigned_ssl_port: Some(443),
public: false,
private_disabled: OrdSet::new(),
public_enabled: OrdSet::new(),
},
},
)]
.into_iter()
.collect(),
onions: account
.tor_keys
.iter()
.map(|k| k.public().get_onion_address())
.collect(),
domains: BTreeMap::new(),
onions: account.tor_keys.iter().map(|k| k.onion_address()).collect(),
public_domains: BTreeMap::new(),
private_domains: BTreeSet::new(),
hostname_info: BTreeMap::new(),
},
wifi: WifiInfo {
enabled: true,
..Default::default()
},
network_interfaces: BTreeMap::new(),
gateways: OrdMap::new(),
acme: BTreeMap::new(),
dns: Default::default(),
},
status_info: ServerStatus {
backup_progress: None,
@@ -186,11 +188,22 @@ pub struct ServerInfo {
pub struct NetworkInfo {
pub wifi: WifiInfo,
pub host: Host,
#[ts(as = "BTreeMap::<String, NetworkInterfaceInfo>")]
#[ts(as = "BTreeMap::<GatewayId, NetworkInterfaceInfo>")]
#[serde(default)]
pub network_interfaces: BTreeMap<InternedString, NetworkInterfaceInfo>,
pub gateways: OrdMap<GatewayId, NetworkInterfaceInfo>,
#[serde(default)]
pub acme: BTreeMap<AcmeProvider, AcmeSettings>,
#[serde(default)]
pub dns: DnsSettings,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct DnsSettings {
pub dhcp_servers: Vec<SocketAddr>,
pub static_servers: Option<Vec<SocketAddr>>,
}
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel, TS)]
@@ -198,13 +211,65 @@ pub struct NetworkInfo {
#[model = "Model<Self>"]
#[ts(export)]
pub struct NetworkInterfaceInfo {
pub inbound: Option<bool>,
pub outbound: Option<bool>,
pub public: Option<bool>,
pub secure: Option<bool>,
pub ip_info: Option<IpInfo>,
}
impl NetworkInterfaceInfo {
pub fn inbound(&self) -> bool {
self.inbound.unwrap_or_else(|| {
pub fn loopback() -> (&'static GatewayId, &'static Self) {
lazy_static! {
static ref LO: GatewayId = GatewayId::from("lo");
static ref LOOPBACK: NetworkInterfaceInfo = NetworkInterfaceInfo {
public: Some(false),
secure: Some(true),
ip_info: Some(IpInfo {
name: "lo".into(),
scope_id: 1,
device_type: None,
subnets: [
IpNet::new(Ipv4Addr::LOCALHOST.into(), 8).unwrap(),
IpNet::new(Ipv6Addr::LOCALHOST.into(), 128).unwrap(),
]
.into_iter()
.collect(),
lan_ip: [
IpAddr::from(Ipv4Addr::LOCALHOST),
IpAddr::from(Ipv6Addr::LOCALHOST)
]
.into_iter()
.collect(),
wan_ip: None,
ntp_servers: Default::default(),
dns_servers: Default::default(),
}),
};
}
(&*LO, &*LOOPBACK)
}
pub fn lxc_bridge() -> (&'static GatewayId, &'static Self) {
lazy_static! {
static ref LXCBR0: GatewayId = GatewayId::from(START9_BRIDGE_IFACE);
static ref LXC_BRIDGE: NetworkInterfaceInfo = NetworkInterfaceInfo {
public: Some(false),
secure: Some(true),
ip_info: Some(IpInfo {
name: START9_BRIDGE_IFACE.into(),
scope_id: 0,
device_type: None,
subnets: [IpNet::new(HOST_IP.into(), 24).unwrap()]
.into_iter()
.collect(),
lan_ip: [IpAddr::from(HOST_IP)].into_iter().collect(),
wan_ip: None,
ntp_servers: Default::default(),
dns_servers: Default::default(),
}),
};
}
(&*LXCBR0, &*LXC_BRIDGE)
}
pub fn public(&self) -> bool {
self.public.unwrap_or_else(|| {
!self.ip_info.as_ref().map_or(true, |ip_info| {
let ip4s = ip_info
.subnets
@@ -218,11 +283,9 @@ impl NetworkInterfaceInfo {
})
.collect::<BTreeSet<_>>();
if !ip4s.is_empty() {
return ip4s.iter().all(|ip4| {
ip4.is_loopback()
|| (ip4.is_private() && !ip4.octets().starts_with(&[10, 59])) // reserving 10.59 for public wireguard configurations
|| ip4.is_link_local()
});
return ip4s
.iter()
.all(|ip4| ip4.is_loopback() || ip4.is_private() || ip4.is_link_local());
}
ip_info.subnets.iter().all(|ipnet| {
if let IpAddr::V6(ip6) = ipnet.addr() {
@@ -234,6 +297,14 @@ impl NetworkInterfaceInfo {
})
})
}
pub fn secure(&self) -> bool {
self.secure.unwrap_or_else(|| {
self.ip_info.as_ref().map_or(false, |ip_info| {
ip_info.device_type == Some(NetworkInterfaceType::Wireguard)
})
})
}
}
#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize, TS, HasModel)]
@@ -246,10 +317,14 @@ pub struct IpInfo {
pub scope_id: u32,
pub device_type: Option<NetworkInterfaceType>,
#[ts(type = "string[]")]
pub subnets: BTreeSet<IpNet>,
pub subnets: OrdSet<IpNet>,
#[ts(type = "string[]")]
pub lan_ip: OrdSet<IpAddr>,
pub wan_ip: Option<Ipv4Addr>,
#[ts(type = "string[]")]
pub ntp_servers: BTreeSet<InternedString>,
pub ntp_servers: OrdSet<InternedString>,
#[ts(type = "string[]")]
pub dns_servers: OrdSet<IpAddr>,
}
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, TS)]
@@ -269,6 +344,14 @@ pub struct AcmeSettings {
pub contact: Vec<String>,
}
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct DomainSettings {
pub gateway: GatewayId,
}
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[model = "Model<Self>"]
#[ts(export)]

View File

@@ -3,6 +3,7 @@ use std::marker::PhantomData;
use std::str::FromStr;
use chrono::{DateTime, Utc};
use imbl::OrdMap;
pub use imbl_value::Value;
use patch_db::value::InternedString;
pub use patch_db::{HasModel, MutateResult, PatchDb};
@@ -199,6 +200,18 @@ where
}
}
impl<A, B> Map for OrdMap<A, B>
where
A: serde::Serialize + serde::de::DeserializeOwned + Clone + Ord + AsRef<str>,
B: serde::Serialize + serde::de::DeserializeOwned + Clone,
{
type Key = A;
type Value = B;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Ok(key.as_ref())
}
}
impl<T: Map> Model<T>
where
T::Value: Serialize,

View File

@@ -5,9 +5,9 @@ use models::PackageId;
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::Error;
use crate::prelude::*;
use crate::util::PathOrUrl;
use crate::Error;
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel, TS)]
#[model = "Model<Self>"]
@@ -37,7 +37,6 @@ pub struct DepInfo {
#[derive(Clone, Debug, Deserialize, Serialize, HasModel, TS)]
#[serde(rename_all = "camelCase")]
#[model = "Model<Self>"]
#[ts(export)]
pub struct DependencyMetadata {
#[ts(type = "string")]
pub title: InternedString,

View File

@@ -1,40 +1,57 @@
use std::fs::File;
use std::io::Write;
use std::path::Path;
use std::path::{Path, PathBuf};
use ed25519::pkcs8::EncodePrivateKey;
use ed25519::PublicKeyBytes;
use ed25519::pkcs8::EncodePrivateKey;
use ed25519_dalek::{SigningKey, VerifyingKey};
use tokio::io::AsyncWriteExt;
use tracing::instrument;
use crate::context::CliContext;
use crate::context::config::local_config_path;
use crate::prelude::*;
use crate::util::io::create_file_mod;
use crate::util::serde::Pem;
pub const OS_DEVELOPER_KEY_PATH: &str = "/run/startos/developer.key.pem";
pub fn default_developer_key_path() -> PathBuf {
local_config_path()
.as_deref()
.unwrap_or_else(|| Path::new(crate::context::config::CONFIG_PATH))
.parent()
.unwrap_or(Path::new("/"))
.join("developer.key.pem")
}
pub async fn write_developer_key(
secret: &ed25519_dalek::SigningKey,
path: impl AsRef<Path>,
) -> Result<(), Error> {
let keypair_bytes = ed25519::KeypairBytes {
secret_key: secret.to_bytes(),
public_key: Some(PublicKeyBytes(VerifyingKey::from(secret).to_bytes())),
};
let mut file = create_file_mod(path, 0o046).await?;
file.write_all(
keypair_bytes
.to_pkcs8_pem(base64ct::LineEnding::default())
.with_kind(crate::ErrorKind::Pem)?
.as_bytes(),
)
.await?;
file.sync_all().await?;
Ok(())
}
#[instrument(skip_all)]
pub fn init(ctx: CliContext) -> Result<(), Error> {
if !ctx.developer_key_path.exists() {
let parent = ctx.developer_key_path.parent().unwrap_or(Path::new("/"));
if !parent.exists() {
std::fs::create_dir_all(parent)
.with_ctx(|_| (crate::ErrorKind::Filesystem, parent.display().to_string()))?;
}
pub async fn init(ctx: CliContext) -> Result<(), Error> {
if tokio::fs::metadata(OS_DEVELOPER_KEY_PATH).await.is_ok() {
println!("Developer key already exists at {}", OS_DEVELOPER_KEY_PATH);
} else if tokio::fs::metadata(&ctx.developer_key_path).await.is_err() {
tracing::info!("Generating new developer key...");
let secret = SigningKey::generate(&mut ssh_key::rand_core::OsRng::default());
tracing::info!("Writing key to {}", ctx.developer_key_path.display());
let keypair_bytes = ed25519::KeypairBytes {
secret_key: secret.to_bytes(),
public_key: Some(PublicKeyBytes(VerifyingKey::from(&secret).to_bytes())),
};
let mut dev_key_file = File::create(&ctx.developer_key_path)
.with_ctx(|_| (ErrorKind::Filesystem, ctx.developer_key_path.display()))?;
dev_key_file.write_all(
keypair_bytes
.to_pkcs8_pem(base64ct::LineEnding::default())
.with_kind(crate::ErrorKind::Pem)?
.as_bytes(),
)?;
dev_key_file.sync_all()?;
write_developer_key(&secret, &ctx.developer_key_path).await?;
println!(
"New developer key generated at {}",
ctx.developer_key_path.display()

View File

@@ -1,9 +1,8 @@
use std::path::Path;
use std::sync::Arc;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{
from_fn, from_fn_async, CallRemoteHandler, Context, Empty, HandlerExt, ParentHandler,
CallRemoteHandler, Context, Empty, HandlerExt, ParentHandler, from_fn, from_fn_async,
};
use crate::context::{CliContext, DiagnosticContext, RpcContext};
@@ -12,7 +11,6 @@ use crate::init::SYSTEM_REBUILD_PATH;
use crate::prelude::*;
use crate::shutdown::Shutdown;
use crate::util::io::delete_file;
use crate::DATA_DIR;
pub fn diagnostic<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
@@ -70,10 +68,7 @@ pub fn error(ctx: DiagnosticContext) -> Result<Arc<RpcError>, Error> {
pub fn restart(ctx: DiagnosticContext) -> Result<(), Error> {
ctx.shutdown
.send(Shutdown {
export_args: ctx
.disk_guid
.clone()
.map(|guid| (guid, Path::new(DATA_DIR).to_owned())),
disk_guid: ctx.disk_guid.clone(),
restart: true,
})
.map_err(|_| eyre!("receiver dropped"))

View File

@@ -4,9 +4,9 @@ use std::path::Path;
use tokio::process::Command;
use tracing::instrument;
use crate::Error;
use crate::disk::fsck::RequiresReboot;
use crate::util::Invoke;
use crate::Error;
#[instrument(skip_all)]
pub async fn btrfs_check_readonly(logicalname: impl AsRef<Path>) -> Result<RequiresReboot, Error> {

View File

@@ -2,13 +2,13 @@ use std::ffi::OsStr;
use std::path::Path;
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::FutureExt;
use futures::future::BoxFuture;
use tokio::process::Command;
use tracing::instrument;
use crate::disk::fsck::RequiresReboot;
use crate::Error;
use crate::disk::fsck::RequiresReboot;
#[instrument(skip_all)]
pub async fn e2fsck_preen(

View File

@@ -3,10 +3,10 @@ use std::path::Path;
use color_eyre::eyre::eyre;
use tokio::process::Command;
use crate::Error;
use crate::disk::fsck::btrfs::{btrfs_check_readonly, btrfs_check_repair};
use crate::disk::fsck::ext4::{e2fsck_aggressive, e2fsck_preen};
use crate::util::Invoke;
use crate::Error;
pub mod btrfs;
pub mod ext4;
@@ -45,7 +45,7 @@ impl RepairStrategy {
return Err(Error::new(
eyre!("Unknown filesystem {fs}"),
crate::ErrorKind::DiskManagement,
))
));
}
}
}

View File

@@ -2,13 +2,13 @@ use std::path::{Path, PathBuf};
use itertools::Itertools;
use lazy_format::lazy_format;
use rpc_toolkit::{from_fn_async, CallRemoteHandler, Context, Empty, HandlerExt, ParentHandler};
use rpc_toolkit::{CallRemoteHandler, Context, Empty, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use crate::Error;
use crate::context::{CliContext, RpcContext};
use crate::disk::util::DiskInfo;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::Error;
use crate::util::serde::{HandlerExtSerde, WithIoFormat, display_serializable};
pub mod fsck;
pub mod main;
@@ -96,14 +96,13 @@ fn display_disk_info(params: WithIoFormat<Empty>, args: Vec<DiskInfo>) -> Result
"N/A"
},
part.capacity,
if let Some(used) = part
&if let Some(used) = part
.used
.map(|u| format!("{:.2} GiB", u as f64 / 1024.0 / 1024.0 / 1024.0))
.as_ref()
{
used
} else {
"N/A"
"N/A".to_owned()
},
&if part.start_os.is_empty() {
"N/A".to_owned()

View File

@@ -10,8 +10,8 @@ use tracing::instrument;
use super::guard::{GenericMountGuard, TmpMountGuard};
use crate::auth::check_password;
use crate::backup::target::BackupInfo;
use crate::disk::mount::filesystem::backupfs::BackupFS;
use crate::disk::mount::filesystem::ReadWrite;
use crate::disk::mount::filesystem::backupfs::BackupFS;
use crate::disk::mount::guard::SubPath;
use crate::disk::util::StartOsRecoveryInfo;
use crate::util::crypto::{decrypt_slice, encrypt_slice};

View File

@@ -11,9 +11,9 @@ use tracing::instrument;
use ts_rs::TS;
use super::{FileSystem, MountType, ReadOnly};
use crate::Error;
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
use crate::util::Invoke;
use crate::Error;
async fn resolve_hostname(hostname: &str) -> Result<IpAddr, Error> {
if let Ok(addr) = hostname.parse() {

View File

@@ -7,8 +7,8 @@ use serde::{Deserialize, Serialize};
use sha2::Sha256;
use super::{FileSystem, MountType};
use crate::util::Invoke;
use crate::Error;
use crate::util::Invoke;
pub async fn mount_httpdirfs(url: &Url, mountpoint: impl AsRef<Path>) -> Result<(), Error> {
tokio::fs::create_dir_all(mountpoint.as_ref()).await?;

View File

@@ -2,8 +2,8 @@ use std::ffi::OsStr;
use std::fmt::{Display, Write};
use std::path::Path;
use digest::generic_array::GenericArray;
use digest::OutputSizeUser;
use digest::generic_array::GenericArray;
use futures::Future;
use sha2::Sha256;
use tokio::process::Command;
@@ -106,6 +106,7 @@ pub trait FileSystem: Send + Sync {
}
fn source_hash(
&self,
) -> impl Future<Output = Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error>>
+ Send;
) -> impl Future<
Output = Result<GenericArray<u8, <Sha256 as OutputSizeUser>::OutputSize>, Error>,
> + Send;
}

View File

@@ -21,11 +21,8 @@ impl<P0: AsRef<Path>, P1: AsRef<Path>, P2: AsRef<Path>> OverlayFs<P0, P1, P2> {
Self { lower, upper, work }
}
}
impl<
P0: AsRef<Path> + Send + Sync,
P1: AsRef<Path> + Send + Sync,
P2: AsRef<Path> + Send + Sync,
> FileSystem for OverlayFs<P0, P1, P2>
impl<P0: AsRef<Path> + Send + Sync, P1: AsRef<Path> + Send + Sync, P2: AsRef<Path> + Send + Sync>
FileSystem for OverlayFs<P0, P1, P2>
{
fn mount_type(&self) -> Option<impl AsRef<str>> {
Some("overlay")

View File

@@ -10,8 +10,8 @@ use tracing::instrument;
use super::filesystem::{FileSystem, MountType, ReadOnly, ReadWrite};
use super::util::unmount;
use crate::util::{Invoke, Never};
use crate::Error;
use crate::util::{Invoke, Never};
pub const TMP_MOUNTPOINT: &'static str = "/media/startos/tmp";

View File

@@ -2,8 +2,8 @@ use std::path::Path;
use tracing::instrument;
use crate::util::Invoke;
use crate::Error;
use crate::util::Invoke;
pub async fn is_mountpoint(path: impl AsRef<Path>) -> Result<bool, Error> {
let is_mountpoint = tokio::process::Command::new("mountpoint")

View File

@@ -14,14 +14,14 @@ use serde::{Deserialize, Serialize};
use tokio::process::Command;
use tracing::instrument;
use super::mount::filesystem::block_dev::BlockDev;
use super::mount::filesystem::ReadOnly;
use super::mount::filesystem::block_dev::BlockDev;
use super::mount::guard::TmpMountGuard;
use crate::disk::mount::guard::GenericMountGuard;
use crate::disk::OsPartitionInfo;
use crate::disk::mount::guard::GenericMountGuard;
use crate::hostname::Hostname;
use crate::util::serde::IoFormat;
use crate::util::Invoke;
use crate::util::serde::IoFormat;
use crate::{Error, ResultExt as _};
#[derive(Clone, Copy, Debug, Deserialize, Serialize)]

View File

@@ -6,11 +6,11 @@ use serde::{Deserialize, Serialize};
use tokio::io::BufReader;
use tokio::process::Command;
use crate::PLATFORM;
use crate::disk::fsck::RequiresReboot;
use crate::prelude::*;
use crate::util::io::open_file;
use crate::util::Invoke;
use crate::PLATFORM;
use crate::util::io::open_file;
/// Part of the Firmware, look there for more about
#[derive(Debug, Clone, Deserialize, Serialize)]

View File

@@ -1,6 +1,6 @@
use imbl_value::InternedString;
use lazy_format::lazy_format;
use rand::{rng, Rng};
use rand::{Rng, rng};
use tokio::process::Command;
use tracing::instrument;
@@ -35,8 +35,8 @@ impl Hostname {
pub fn generate_hostname() -> Hostname {
let mut rng = rng();
let adjective = &ADJECTIVES[rng.gen_range(0..ADJECTIVES.len())];
let noun = &NOUNS[rng.gen_range(0..NOUNS.len())];
let adjective = &ADJECTIVES[rng.random_range(0..ADJECTIVES.len())];
let noun = &NOUNS[rng.random_range(0..NOUNS.len())];
Hostname(InternedString::from_display(&lazy_format!(
"{adjective}-{noun}"
)))

View File

@@ -1,18 +1,13 @@
use std::fs::Permissions;
use std::io::Cursor;
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::os::unix::fs::PermissionsExt;
use std::path::Path;
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use axum::extract::ws::{self};
use color_eyre::eyre::eyre;
use axum::extract::ws;
use const_format::formatcp;
use futures::{StreamExt, TryStreamExt};
use itertools::Itertools;
use models::ResultExt;
use rand::random;
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::process::Command;
@@ -21,13 +16,14 @@ use ts_rs::TS;
use crate::account::AccountInfo;
use crate::context::config::ServerConfig;
use crate::context::{CliContext, InitContext};
use crate::context::{CliContext, InitContext, RpcContext};
use crate::db::model::public::ServerStatus;
use crate::db::model::Database;
use crate::disk::mount::util::unmount;
use crate::developer::OS_DEVELOPER_KEY_PATH;
use crate::hostname::Hostname;
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
use crate::middleware::auth::AuthContext;
use crate::net::net_controller::{NetController, NetService};
use crate::net::socks::DEFAULT_SOCKS_LISTEN;
use crate::net::utils::find_wifi_iface;
use crate::net::web_server::{UpgradableListener, WebServerAcceptorSetter};
use crate::prelude::*;
@@ -38,7 +34,7 @@ use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::s9pk::v2::pack::{CONTAINER_DATADIR, CONTAINER_TOOL};
use crate::ssh::SSH_DIR;
use crate::system::{get_mem_info, sync_kiosk};
use crate::util::io::{create_file, open_file, IOHook};
use crate::util::io::{open_file, IOHook};
use crate::util::lshw::lshw;
use crate::util::net::WebSocketExt;
use crate::util::{cpupower, Invoke};
@@ -167,28 +163,7 @@ pub async fn init(
}
local_auth.start();
tokio::fs::create_dir_all("/run/startos")
.await
.with_ctx(|_| (crate::ErrorKind::Filesystem, "mkdir -p /run/startos"))?;
if tokio::fs::metadata(LOCAL_AUTH_COOKIE_PATH).await.is_err() {
tokio::fs::write(
LOCAL_AUTH_COOKIE_PATH,
base64::encode(random::<[u8; 32]>()).as_bytes(),
)
.await
.with_ctx(|_| {
(
crate::ErrorKind::Filesystem,
format!("write {}", LOCAL_AUTH_COOKIE_PATH),
)
})?;
tokio::fs::set_permissions(LOCAL_AUTH_COOKIE_PATH, Permissions::from_mode(0o046)).await?;
Command::new("chown")
.arg("root:startos")
.arg(LOCAL_AUTH_COOKIE_PATH)
.invoke(crate::ErrorKind::Filesystem)
.await?;
}
RpcContext::init_auth_cookie().await?;
local_auth.complete();
load_database.start();
@@ -199,6 +174,16 @@ pub async fn init(
load_database.complete();
load_ssh_keys.start();
crate::developer::write_developer_key(
&peek.as_private().as_developer_key().de()?.0,
OS_DEVELOPER_KEY_PATH,
)
.await?;
Command::new("chown")
.arg("root:startos")
.arg(OS_DEVELOPER_KEY_PATH)
.invoke(ErrorKind::Filesystem)
.await?;
crate::ssh::sync_keys(
&Hostname(peek.as_public().as_server_info().as_hostname().de()?),
&peek.as_private().as_ssh_privkey().de()?,
@@ -206,6 +191,13 @@ pub async fn init(
SSH_DIR,
)
.await?;
crate::ssh::sync_keys(
&Hostname(peek.as_public().as_server_info().as_hostname().de()?),
&peek.as_private().as_ssh_privkey().de()?,
&Default::default(),
"/root/.ssh",
)
.await?;
load_ssh_keys.complete();
let account = AccountInfo::load(&peek)?;
@@ -214,17 +206,12 @@ pub async fn init(
let net_ctrl = Arc::new(
NetController::init(
db.clone(),
cfg.tor_control
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
cfg.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(127, 0, 0, 1),
9050,
))),
&account.hostname,
cfg.socks_listen.unwrap_or(DEFAULT_SOCKS_LISTEN),
)
.await?,
);
webserver.try_upgrade(|a| net_ctrl.net_iface.upgrade_listener(a))?;
webserver.try_upgrade(|a| net_ctrl.net_iface.watcher.upgrade_listener(a))?;
let os_net_service = net_ctrl.os_bindings().await?;
start_net.complete();

View File

@@ -4,17 +4,17 @@ use std::time::Duration;
use axum::extract::ws;
use clap::builder::ValueParserFactory;
use clap::{value_parser, CommandFactory, FromArgMatches, Parser};
use clap::{CommandFactory, FromArgMatches, Parser, value_parser};
use color_eyre::eyre::eyre;
use exver::VersionRange;
use futures::{AsyncWriteExt, StreamExt};
use imbl_value::{json, InternedString};
use imbl_value::{InternedString, json};
use itertools::Itertools;
use models::{FromStrParser, VersionString};
use reqwest::header::{HeaderMap, CONTENT_LENGTH};
use reqwest::Url;
use rpc_toolkit::yajrc::RpcError;
use reqwest::header::{CONTENT_LENGTH, HeaderMap};
use rpc_toolkit::HandlerArgs;
use rpc_toolkit::yajrc::RpcError;
use rustyline_async::ReadlineEvent;
use serde::{Deserialize, Serialize};
use tokio::sync::oneshot;
@@ -31,9 +31,9 @@ use crate::registry::package::get::GetPackageResponse;
use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::s9pk::manifest::PackageId;
use crate::upload::upload;
use crate::util::Never;
use crate::util::io::open_file;
use crate::util::net::WebSocketExt;
use crate::util::Never;
pub const PKG_ARCHIVE_DIR: &str = "package-data/archive";
pub const PKG_PUBLIC_DIR: &str = "package-data/public";
@@ -253,7 +253,7 @@ pub async fn sideload(
.await;
tokio::spawn(async move {
if let Err(e) = async {
let key = ctx.db.peek().await.into_private().into_compat_s9pk_key();
let key = ctx.db.peek().await.into_private().into_developer_key();
ctx.services
.install(
@@ -483,7 +483,9 @@ pub async fn cli_install(
let version = if packages.best.len() == 1 {
packages.best.pop_first().map(|(k, _)| k).unwrap()
} else {
println!("Multiple flavors of {id} found. Please select one of the following versions to install:");
println!(
"Multiple flavors of {id} found. Please select one of the following versions to install:"
);
let version;
loop {
let (mut read, mut output) = rustyline_async::Readline::new("> ".into())

View File

@@ -60,10 +60,12 @@ pub mod s9pk;
pub mod service;
pub mod setup;
pub mod shutdown;
pub mod sign;
pub mod sound;
pub mod ssh;
pub mod status;
pub mod system;
pub mod tunnel;
pub mod update;
pub mod upload;
pub mod util;
@@ -77,8 +79,8 @@ pub use error::{Error, ErrorKind, ResultExt};
use imbl_value::Value;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{
from_fn, from_fn_async, from_fn_blocking, CallRemoteHandler, Context, Empty, HandlerExt,
ParentHandler,
CallRemoteHandler, Context, Empty, HandlerExt, ParentHandler, from_fn, from_fn_async,
from_fn_blocking,
};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
@@ -89,7 +91,7 @@ use crate::context::{
use crate::disk::fsck::RequiresReboot;
use crate::registry::context::{RegistryContext, RegistryUrlParams};
use crate::system::kiosk;
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
use crate::util::serde::{HandlerExtSerde, WithIoFormat, display_serializable};
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
@@ -148,13 +150,12 @@ pub fn main_api<C: Context>() -> ParentHandler<C> {
)
.subcommand(
"net",
net::net::<C>().with_about("Network commands related to tor and dhcp"),
net::net_api::<C>().with_about("Network commands related to tor and dhcp"),
)
.subcommand(
"auth",
auth::auth::<C>().with_about(
"Commands related to Authentication i.e. login, logout, reset-password",
),
auth::auth::<C, RpcContext>()
.with_about("Commands related to Authentication i.e. login, logout"),
)
.subcommand(
"db",
@@ -582,7 +583,7 @@ pub fn expanded_api() -> ParentHandler<CliContext> {
main_api()
.subcommand(
"init",
from_fn_blocking(developer::init)
from_fn_async(developer::init)
.no_display()
.with_about("Create developer key if it doesn't exist"),
)

View File

@@ -15,7 +15,7 @@ use itertools::Itertools;
use models::{FromStrParser, PackageId};
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{
from_fn_async, CallRemote, Context, Empty, HandlerArgs, HandlerExt, HandlerFor, ParentHandler,
CallRemote, Context, Empty, HandlerArgs, HandlerExt, HandlerFor, ParentHandler, from_fn_async,
};
use serde::de::{self, DeserializeOwned};
use serde::{Deserialize, Serialize};
@@ -30,9 +30,9 @@ use crate::error::ResultExt;
use crate::lxc::ContainerId;
use crate::prelude::*;
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
use crate::util::Invoke;
use crate::util::net::WebSocketExt;
use crate::util::serde::Reversible;
use crate::util::Invoke;
#[pin_project::pin_project]
pub struct LogStream {

View File

@@ -31,7 +31,7 @@ use crate::rpc_continuations::{Guid, RpcContinuation};
use crate::service::ServiceStats;
use crate::util::io::open_file;
use crate::util::rpc_client::UnixRpcClient;
use crate::util::{new_guid, Invoke};
use crate::util::{Invoke, new_guid};
const LXC_CONTAINER_DIR: &str = "/var/lib/lxc";
const RPC_DIR: &str = "media/startos/rpc"; // must not be absolute path

View File

@@ -1,29 +1,155 @@
use std::borrow::Borrow;
use std::collections::BTreeSet;
use std::future::Future;
use std::ops::Deref;
use std::sync::Arc;
use std::time::{Duration, Instant};
use axum::extract::Request;
use axum::response::Response;
use base64::Engine;
use basic_cookies::Cookie;
use chrono::Utc;
use color_eyre::eyre::eyre;
use digest::Digest;
use helpers::const_true;
use http::header::{COOKIE, USER_AGENT};
use http::HeaderValue;
use imbl_value::InternedString;
use http::header::{COOKIE, USER_AGENT};
use imbl_value::{InternedString, json};
use rand::random;
use rpc_toolkit::yajrc::INTERNAL_ERROR;
use rpc_toolkit::{Middleware, RpcRequest, RpcResponse};
use serde::{Deserialize, Serialize};
use sha2::Sha256;
use tokio::io::AsyncWriteExt;
use tokio::process::Command;
use tokio::sync::Mutex;
use crate::auth::{Sessions, check_password, write_shadow};
use crate::context::RpcContext;
use crate::db::model::Database;
use crate::middleware::signature::{SignatureAuth, SignatureAuthContext};
use crate::prelude::*;
use crate::rpc_continuations::OpenAuthedContinuations;
use crate::sign::AnyVerifyingKey;
use crate::util::Invoke;
use crate::util::io::{create_file_mod, read_file_to_string};
use crate::util::iter::TransposeResultIterExt;
use crate::util::serde::BASE64;
use crate::util::sync::SyncMutex;
pub const LOCAL_AUTH_COOKIE_PATH: &str = "/run/startos/rpc.authcookie";
pub trait AuthContext: SignatureAuthContext {
const LOCAL_AUTH_COOKIE_PATH: &str;
const LOCAL_AUTH_COOKIE_OWNERSHIP: &str;
fn init_auth_cookie() -> impl Future<Output = Result<(), Error>> + Send {
async {
let mut file = create_file_mod(Self::LOCAL_AUTH_COOKIE_PATH, 0o046).await?;
file.write_all(BASE64.encode(random::<[u8; 32]>()).as_bytes())
.await?;
file.sync_all().await?;
drop(file);
Command::new("chown")
.arg(Self::LOCAL_AUTH_COOKIE_OWNERSHIP)
.arg(Self::LOCAL_AUTH_COOKIE_PATH)
.invoke(crate::ErrorKind::Filesystem)
.await?;
Ok(())
}
}
fn ephemeral_sessions(&self) -> &SyncMutex<Sessions>;
fn open_authed_continuations(&self) -> &OpenAuthedContinuations<Option<InternedString>>;
fn access_sessions(db: &mut Model<Self::Database>) -> &mut Model<Sessions>;
fn check_password(db: &Model<Self::Database>, password: &str) -> Result<(), Error>;
#[allow(unused_variables)]
fn post_login_hook(&self, password: &str) -> impl Future<Output = Result<(), Error>> + Send {
async { Ok(()) }
}
}
impl SignatureAuthContext for RpcContext {
type Database = Database;
type AdditionalMetadata = ();
type CheckPubkeyRes = ();
fn db(&self) -> &TypedPatchDb<Self::Database> {
&self.db
}
async fn sig_context(
&self,
) -> impl IntoIterator<Item = Result<impl AsRef<str> + Send, Error>> + Send {
let peek = self.db.peek().await;
self.account
.read()
.await
.hostnames()
.into_iter()
.map(Ok)
.chain(
peek.as_public()
.as_server_info()
.as_network()
.as_host()
.as_public_domains()
.keys()
.map(|k| k.into_iter())
.transpose(),
)
.chain(
peek.as_public()
.as_server_info()
.as_network()
.as_host()
.as_private_domains()
.de()
.map(|k| k.into_iter())
.transpose(),
)
.collect::<Vec<_>>()
}
fn check_pubkey(
db: &Model<Self::Database>,
pubkey: Option<&AnyVerifyingKey>,
_: Self::AdditionalMetadata,
) -> Result<Self::CheckPubkeyRes, Error> {
if let Some(pubkey) = pubkey {
if db.as_private().as_auth_pubkeys().de()?.contains(pubkey) {
return Ok(());
}
}
Err(Error::new(
eyre!("Developer Key is not authorized"),
ErrorKind::IncorrectPassword,
))
}
async fn post_auth_hook(&self, _: Self::CheckPubkeyRes, _: &RpcRequest) -> Result<(), Error> {
Ok(())
}
}
impl AuthContext for RpcContext {
const LOCAL_AUTH_COOKIE_PATH: &str = "/run/startos/rpc.authcookie";
const LOCAL_AUTH_COOKIE_OWNERSHIP: &str = "root:startos";
fn ephemeral_sessions(&self) -> &SyncMutex<Sessions> {
&self.ephemeral_sessions
}
fn open_authed_continuations(&self) -> &OpenAuthedContinuations<Option<InternedString>> {
&self.open_authed_continuations
}
fn access_sessions(db: &mut Model<Self::Database>) -> &mut Model<Sessions> {
db.as_private_mut().as_sessions_mut()
}
fn check_password(db: &Model<Self::Database>, password: &str) -> Result<(), Error> {
check_password(&db.as_private().as_password().de()?, password)
}
async fn post_login_hook(&self, password: &str) -> Result<(), Error> {
if tokio::fs::metadata("/media/startos/config/overlay/etc/shadow")
.await
.is_err()
{
write_shadow(&password).await?;
}
Ok(())
}
}
#[derive(Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
@@ -40,25 +166,25 @@ pub trait AsLogoutSessionId {
pub struct HasLoggedOutSessions(());
impl HasLoggedOutSessions {
pub async fn new(
pub async fn new<C: AuthContext>(
sessions: impl IntoIterator<Item = impl AsLogoutSessionId>,
ctx: &RpcContext,
ctx: &C,
) -> Result<Self, Error> {
let to_log_out: BTreeSet<_> = sessions
.into_iter()
.map(|s| s.as_logout_session_id())
.collect();
for sid in &to_log_out {
ctx.open_authed_continuations.kill(&Some(sid.clone()))
ctx.open_authed_continuations().kill(&Some(sid.clone()))
}
ctx.ephemeral_sessions.mutate(|s| {
ctx.ephemeral_sessions().mutate(|s| {
for sid in &to_log_out {
s.0.remove(sid);
}
});
ctx.db
ctx.db()
.mutate(|db| {
let sessions = db.as_private_mut().as_sessions_mut();
let sessions = C::access_sessions(db);
for sid in &to_log_out {
sessions.remove(sid)?;
}
@@ -82,9 +208,9 @@ enum SessionType {
}
impl HasValidSession {
pub async fn from_header(
pub async fn from_header<C: AuthContext>(
header: Option<&HeaderValue>,
ctx: &RpcContext,
ctx: &C,
) -> Result<Self, Error> {
if let Some(cookie_header) = header {
let cookies = Cookie::parse(
@@ -94,7 +220,7 @@ impl HasValidSession {
)
.with_kind(crate::ErrorKind::Authorization)?;
if let Some(cookie) = cookies.iter().find(|c| c.get_name() == "local") {
if let Ok(s) = Self::from_local(cookie).await {
if let Ok(s) = Self::from_local::<C>(cookie).await {
return Ok(s);
}
}
@@ -111,12 +237,12 @@ impl HasValidSession {
))
}
pub async fn from_session(
pub async fn from_session<C: AuthContext>(
session_token: HashSessionToken,
ctx: &RpcContext,
ctx: &C,
) -> Result<Self, Error> {
let session_hash = session_token.hashed();
if !ctx.ephemeral_sessions.mutate(|s| {
if !ctx.ephemeral_sessions().mutate(|s| {
if let Some(session) = s.0.get_mut(session_hash) {
session.last_active = Utc::now();
true
@@ -124,10 +250,9 @@ impl HasValidSession {
false
}
}) {
ctx.db
ctx.db()
.mutate(|db| {
db.as_private_mut()
.as_sessions_mut()
C::access_sessions(db)
.as_idx_mut(session_hash)
.ok_or_else(|| {
Error::new(eyre!("UNAUTHORIZED"), crate::ErrorKind::Authorization)
@@ -143,8 +268,8 @@ impl HasValidSession {
Ok(Self(SessionType::Session(session_token)))
}
pub async fn from_local(local: &Cookie<'_>) -> Result<Self, Error> {
let token = tokio::fs::read_to_string(LOCAL_AUTH_COOKIE_PATH).await?;
pub async fn from_local<C: AuthContext>(local: &Cookie<'_>) -> Result<Self, Error> {
let token = read_file_to_string(C::LOCAL_AUTH_COOKIE_PATH).await?;
if local.get_value() == &*token {
Ok(Self(SessionType::Local))
} else {
@@ -258,6 +383,8 @@ pub struct Metadata {
login: bool,
#[serde(default)]
get_session: bool,
#[serde(default)]
get_signer: bool,
}
#[derive(Clone)]
@@ -267,6 +394,7 @@ pub struct Auth {
is_login: bool,
set_cookie: Option<HeaderValue>,
user_agent: Option<HeaderValue>,
signature_auth: SignatureAuth,
}
impl Auth {
pub fn new() -> Self {
@@ -276,62 +404,73 @@ impl Auth {
is_login: false,
set_cookie: None,
user_agent: None,
signature_auth: SignatureAuth::new(),
}
}
}
impl Middleware<RpcContext> for Auth {
impl<C: AuthContext> Middleware<C> for Auth {
type Metadata = Metadata;
async fn process_http_request(
&mut self,
_: &RpcContext,
context: &C,
request: &mut Request,
) -> Result<(), Response> {
self.cookie = request.headers_mut().remove(COOKIE);
self.user_agent = request.headers_mut().remove(USER_AGENT);
self.signature_auth
.process_http_request(context, request)
.await?;
Ok(())
}
async fn process_rpc_request(
&mut self,
context: &RpcContext,
context: &C,
metadata: Self::Metadata,
request: &mut RpcRequest,
) -> Result<(), RpcResponse> {
if metadata.login {
self.is_login = true;
let guard = self.rate_limiter.lock().await;
if guard.1.elapsed() < Duration::from_secs(20) && guard.0 >= 3 {
return Err(RpcResponse {
id: request.id.take(),
result: Err(Error::new(
async {
if metadata.login {
self.is_login = true;
let guard = self.rate_limiter.lock().await;
if guard.1.elapsed() < Duration::from_secs(20) && guard.0 >= 3 {
return Err(Error::new(
eyre!("Please limit login attempts to 3 per 20 seconds."),
crate::ErrorKind::RateLimited,
)
.into()),
});
}
if let Some(user_agent) = self.user_agent.as_ref().and_then(|h| h.to_str().ok()) {
request.params["__auth_userAgent"] = Value::String(Arc::new(user_agent.to_owned()))
// TODO: will this panic?
}
} else if metadata.authenticated {
match HasValidSession::from_header(self.cookie.as_ref(), &context).await {
Err(e) => {
return Err(RpcResponse {
id: request.id.take(),
result: Err(e.into()),
})
));
}
Ok(HasValidSession(SessionType::Session(s))) if metadata.get_session => {
request.params["__auth_session"] =
Value::String(Arc::new(s.hashed().deref().to_owned()));
if let Some(user_agent) = self.user_agent.as_ref().and_then(|h| h.to_str().ok()) {
request.params["__auth_userAgent"] =
Value::String(Arc::new(user_agent.to_owned()))
// TODO: will this panic?
}
_ => (),
} else if metadata.authenticated {
if self
.signature_auth
.process_rpc_request(
context,
from_value(json!({
"get_signer": metadata.get_signer
}))?,
request,
)
.await
.is_err()
{
match HasValidSession::from_header(self.cookie.as_ref(), context).await? {
HasValidSession(SessionType::Session(s)) if metadata.get_session => {
request.params["__auth_session"] =
Value::String(Arc::new(s.hashed().deref().to_owned()));
}
_ => (),
}
}
}
Ok(())
}
Ok(())
.await
.map_err(|e| RpcResponse::from_result(Err(e)))
}
async fn process_rpc_response(&mut self, _: &RpcContext, response: &mut RpcResponse) {
async fn process_rpc_response(&mut self, _: &C, response: &mut RpcResponse) {
if self.is_login {
let mut guard = self.rate_limiter.lock().await;
if guard.1.elapsed() < Duration::from_secs(20) {
@@ -349,7 +488,7 @@ impl Middleware<RpcContext> for Auth {
let login_res = from_value::<LoginRes>(res.clone())?;
self.set_cookie = Some(
HeaderValue::from_str(&format!(
"session={}; Path=/; SameSite=Lax; Expires=Fri, 31 Dec 9999 23:59:59 GMT;",
"session={}; Path=/; SameSite=Strict; Expires=Fri, 31 Dec 9999 23:59:59 GMT;",
login_res.session
))
.with_kind(crate::ErrorKind::Network)?,
@@ -361,7 +500,7 @@ impl Middleware<RpcContext> for Auth {
}
}
}
async fn process_http_response(&mut self, _: &RpcContext, response: &mut Response) {
async fn process_http_response(&mut self, _: &C, response: &mut Response) {
if let Some(set_cookie) = self.set_cookie.take() {
response.headers_mut().insert("set-cookie", set_cookie);
}

View File

@@ -1,6 +1,6 @@
use axum::response::Response;
use http::header::InvalidHeaderValue;
use http::HeaderValue;
use http::header::InvalidHeaderValue;
use rpc_toolkit::{Middleware, RpcRequest, RpcResponse};
use serde::Deserialize;

View File

@@ -1,3 +1,4 @@
pub mod auth;
pub mod cors;
pub mod db;
pub mod signature;

View File

@@ -1,45 +1,62 @@
use std::collections::BTreeMap;
use std::future::Future;
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime, UNIX_EPOCH};
use axum::body::Body;
use axum::extract::Request;
use axum::response::Response;
use chrono::Utc;
use http::HeaderValue;
use rpc_toolkit::yajrc::RpcError;
use rpc_toolkit::{Middleware, RpcRequest, RpcResponse};
use serde::{Deserialize, Serialize};
use tokio::io::AsyncWriteExt;
use rpc_toolkit::{Context, Middleware, RpcRequest, RpcResponse};
use serde::Deserialize;
use serde::de::DeserializeOwned;
use tokio::sync::Mutex;
use ts_rs::TS;
use url::Url;
use crate::context::CliContext;
use crate::prelude::*;
use crate::registry::context::RegistryContext;
use crate::registry::signer::commitment::request::RequestCommitment;
use crate::registry::signer::commitment::Commitment;
use crate::registry::signer::sign::{
AnySignature, AnySigningKey, AnyVerifyingKey, SignatureScheme,
};
use crate::sign::commitment::Commitment;
use crate::sign::commitment::request::RequestCommitment;
use crate::sign::{AnySignature, AnySigningKey, AnyVerifyingKey, SignatureScheme};
use crate::util::serde::Base64;
pub const AUTH_SIG_HEADER: &str = "X-StartOS-Registry-Auth-Sig";
pub trait SignatureAuthContext: Context {
type Database: HasModel<Model = Model<Self::Database>> + Send + Sync;
type AdditionalMetadata: DeserializeOwned + Send;
type CheckPubkeyRes: Send;
fn db(&self) -> &TypedPatchDb<Self::Database>;
fn sig_context(
&self,
) -> impl Future<Output = impl IntoIterator<Item = Result<impl AsRef<str> + Send, Error>> + Send>
+ Send;
fn check_pubkey(
db: &Model<Self::Database>,
pubkey: Option<&AnyVerifyingKey>,
metadata: Self::AdditionalMetadata,
) -> Result<Self::CheckPubkeyRes, Error>;
fn post_auth_hook(
&self,
check_pubkey_res: Self::CheckPubkeyRes,
request: &RpcRequest,
) -> impl Future<Output = Result<(), Error>> + Send;
}
pub const AUTH_SIG_HEADER: &str = "X-StartOS-Auth-Sig";
#[derive(Deserialize)]
pub struct Metadata {
#[serde(default)]
admin: bool,
pub struct Metadata<Additional> {
#[serde(flatten)]
additional: Additional,
#[serde(default)]
get_signer: bool,
}
#[derive(Clone)]
pub struct Auth {
pub struct SignatureAuth {
nonce_cache: Arc<Mutex<BTreeMap<Instant, u64>>>, // for replay protection
signer: Option<Result<AnyVerifyingKey, RpcError>>,
}
impl Auth {
impl SignatureAuth {
pub fn new() -> Self {
Self {
nonce_cache: Arc::new(Mutex::new(BTreeMap::new())),
@@ -65,15 +82,6 @@ impl Auth {
}
}
#[derive(Serialize, Deserialize, TS)]
pub struct RegistryAdminLogRecord {
pub timestamp: String,
pub name: String,
#[ts(type = "{ id: string | number | null; method: string; params: any }")]
pub request: RpcRequest,
pub key: AnyVerifyingKey,
}
pub struct SignatureHeader {
pub commitment: RequestCommitment,
pub signer: AnyVerifyingKey,
@@ -120,13 +128,13 @@ impl SignatureHeader {
}
}
impl Middleware<RegistryContext> for Auth {
type Metadata = Metadata;
impl<C: SignatureAuthContext> Middleware<C> for SignatureAuth {
type Metadata = Metadata<C::AdditionalMetadata>;
async fn process_http_request(
&mut self,
ctx: &RegistryContext,
context: &C,
request: &mut Request,
) -> Result<(), Response> {
) -> Result<(), axum::response::Response> {
if request.headers().contains_key(AUTH_SIG_HEADER) {
self.signer = Some(
async {
@@ -138,15 +146,27 @@ impl Middleware<RegistryContext> for Auth {
request
.headers()
.get(AUTH_SIG_HEADER)
.or_not_found("missing X-StartOS-Registry-Auth-Sig")
.or_not_found(AUTH_SIG_HEADER)
.with_kind(ErrorKind::InvalidRequest)?,
)?;
signer.scheme().verify_commitment(
&signer,
&commitment,
&ctx.hostname,
&signature,
context.sig_context().await.into_iter().fold(
Err(Error::new(
eyre!("no valid signature context available to verify"),
ErrorKind::Authorization,
)),
|acc, x| {
if acc.is_ok() {
acc
} else {
signer.scheme().verify_commitment(
&signer,
&commitment,
x?.as_ref(),
&signature,
)
}
},
)?;
let now = SystemTime::now()
@@ -175,48 +195,83 @@ impl Middleware<RegistryContext> for Auth {
}
async fn process_rpc_request(
&mut self,
ctx: &RegistryContext,
context: &C,
metadata: Self::Metadata,
request: &mut RpcRequest,
) -> Result<(), RpcResponse> {
async move {
async {
let signer = self.signer.take().transpose()?;
if metadata.get_signer {
if let Some(signer) = &signer {
request.params["__auth_signer"] = to_value(signer)?;
}
}
if metadata.admin {
let signer = signer
.ok_or_else(|| Error::new(eyre!("UNAUTHORIZED"), ErrorKind::Authorization))?;
let db = ctx.db.peek().await;
let (guid, admin) = db.as_index().as_signers().get_signer_info(&signer)?;
if db.into_admins().de()?.contains(&guid) {
let mut log = tokio::fs::OpenOptions::new()
.create(true)
.append(true)
.open(ctx.datadir.join("admin.log"))
.await?;
log.write_all(
(serde_json::to_string(&RegistryAdminLogRecord {
timestamp: Utc::now().to_rfc3339(),
name: admin.name,
request: request.clone(),
key: signer,
})
.with_kind(ErrorKind::Serialization)?
+ "\n")
.as_bytes(),
)
.await?;
} else {
return Err(Error::new(eyre!("UNAUTHORIZED"), ErrorKind::Authorization));
}
}
let db = context.db().peek().await;
let res = C::check_pubkey(&db, signer.as_ref(), metadata.additional)?;
context.post_auth_hook(res, request).await?;
Ok(())
}
.await
.map_err(|e| RpcResponse::from_result(Err(e)))
.map_err(|e: Error| rpc_toolkit::RpcResponse::from_result(Err(e)))
}
}
pub async fn call_remote(
ctx: &CliContext,
url: Url,
sig_context: &str,
method: &str,
params: Value,
) -> Result<Value, RpcError> {
use reqwest::Method;
use reqwest::header::{ACCEPT, CONTENT_LENGTH, CONTENT_TYPE};
use rpc_toolkit::RpcResponse;
use rpc_toolkit::yajrc::{GenericRpcMethod, Id, RpcRequest};
let rpc_req = RpcRequest {
id: Some(Id::Number(0.into())),
method: GenericRpcMethod::<_, _, Value>::new(method),
params,
};
let body = serde_json::to_vec(&rpc_req)?;
let mut req = ctx
.client
.request(Method::POST, url)
.header(CONTENT_TYPE, "application/json")
.header(ACCEPT, "application/json")
.header(CONTENT_LENGTH, body.len());
if let Ok(key) = ctx.developer_key() {
req = req.header(
AUTH_SIG_HEADER,
SignatureHeader::sign(&AnySigningKey::Ed25519(key.clone()), &body, sig_context)?
.to_header(),
);
}
let res = req.body(body).send().await?;
if !res.status().is_success() {
let status = res.status();
let txt = res.text().await?;
let mut res = Err(Error::new(
eyre!("{}", status.canonical_reason().unwrap_or(status.as_str())),
ErrorKind::Network,
));
if !txt.is_empty() {
res = res.with_ctx(|_| (ErrorKind::Network, txt));
}
return res.map_err(From::from);
}
match res
.headers()
.get(CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
{
Some("application/json") => {
serde_json::from_slice::<RpcResponse>(&*res.bytes().await?)
.with_kind(ErrorKind::Deserialization)?
.result
}
_ => Err(Error::new(eyre!("unknown content type"), ErrorKind::Network).into()),
}
}

View File

@@ -2,21 +2,21 @@ use std::collections::{BTreeMap, BTreeSet};
use std::str::FromStr;
use async_acme::acme::Identifier;
use clap::builder::ValueParserFactory;
use clap::Parser;
use clap::builder::ValueParserFactory;
use imbl_value::InternedString;
use itertools::Itertools;
use models::{ErrorData, FromStrParser};
use openssl::pkey::{PKey, Private};
use openssl::x509::X509;
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use url::Url;
use crate::context::{CliContext, RpcContext};
use crate::db::model::public::AcmeSettings;
use crate::db::model::Database;
use crate::db::model::public::AcmeSettings;
use crate::prelude::*;
use crate::util::serde::{Pem, Pkcs8Doc};
@@ -174,7 +174,7 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
}
}
pub fn acme<C: Context>() -> ParentHandler<C> {
pub fn acme_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"init",
@@ -257,7 +257,8 @@ pub async fn init(
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut().as_network_mut()
.as_server_info_mut()
.as_network_mut()
.as_acme_mut()
.insert(&provider, &AcmeSettings { contact })
})
@@ -279,7 +280,8 @@ pub async fn remove(
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut().as_network_mut()
.as_server_info_mut()
.as_network_mut()
.as_acme_mut()
.remove(&provider)
})

View File

@@ -1,69 +1,332 @@
use std::borrow::Borrow;
use std::collections::BTreeMap;
use std::net::Ipv4Addr;
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr};
use std::sync::{Arc, Weak};
use std::time::Duration;
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::future::BoxFuture;
use futures::{FutureExt, StreamExt, TryStreamExt};
use helpers::NonDetachingJoinHandle;
use models::PackageId;
use hickory_client::client::Client;
use hickory_client::proto::DnsHandle;
use hickory_client::proto::runtime::TokioRuntimeProvider;
use hickory_client::proto::tcp::TcpClientStream;
use hickory_client::proto::udp::UdpClientStream;
use hickory_client::proto::xfer::{DnsExchangeBackground, DnsRequestOptions};
use hickory_server::ServerFuture;
use hickory_server::authority::MessageResponseBuilder;
use hickory_server::proto::op::{Header, ResponseCode};
use hickory_server::proto::rr::{Name, Record, RecordType};
use hickory_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use imbl::OrdMap;
use imbl_value::InternedString;
use itertools::Itertools;
use models::{GatewayId, OptionExt, PackageId};
use rpc_toolkit::{
Context, HandlerArgs, HandlerExt, ParentHandler, from_fn_async, from_fn_blocking,
};
use serde::{Deserialize, Serialize};
use tokio::net::{TcpListener, UdpSocket};
use tokio::process::Command;
use tokio::sync::RwLock;
use tracing::instrument;
use trust_dns_server::authority::MessageResponseBuilder;
use trust_dns_server::proto::op::{Header, ResponseCode};
use trust_dns_server::proto::rr::{Name, Record, RecordType};
use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, ResponseInfo};
use trust_dns_server::ServerFuture;
use crate::net::forward::START9_BRIDGE_IFACE;
use crate::util::sync::Watch;
use crate::util::Invoke;
use crate::{Error, ErrorKind, ResultExt};
use crate::context::RpcContext;
use crate::db::model::Database;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::gateway::NetworkInterfaceWatcher;
use crate::prelude::*;
use crate::util::io::file_string_stream;
use crate::util::serde::{HandlerExtSerde, display_serializable};
use crate::util::sync::{SyncRwLock, Watch};
pub fn dns_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"query",
from_fn_blocking(query_dns::<C>)
.with_display_serializable()
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
if let Some(format) = params.format {
return display_serializable(format, res);
}
if let Some(ip) = res {
println!("{}", ip)
}
Ok(())
})
.with_about("Test the DNS configuration for a domain"),
)
.subcommand(
"set-static",
from_fn_async(set_static_dns)
.no_display()
.with_about("Set static DNS servers"),
)
}
#[derive(Deserialize, Serialize, Parser)]
pub struct QueryDnsParams {
pub fqdn: InternedString,
}
pub fn query_dns<C: Context>(
_: C,
QueryDnsParams { fqdn }: QueryDnsParams,
) -> Result<Option<Ipv4Addr>, Error> {
let hints = dns_lookup::AddrInfoHints {
flags: 0,
address: libc::AF_INET,
socktype: 0,
protocol: 0,
};
dns_lookup::getaddrinfo(Some(&*fqdn), None, Some(hints))
.map(Some)
.or_else(|e| {
if matches!(
e.kind(),
dns_lookup::LookupErrorKind::NoName | dns_lookup::LookupErrorKind::NoData
) {
Ok(None)
} else {
Err(std::io::Error::from(e))
}
})
.with_kind(ErrorKind::Network)?
.into_iter()
.flatten()
.find_map(|a| match a.map(|a| a.sockaddr.ip()) {
Ok(IpAddr::V4(a)) => Some(Ok(a)),
Err(e) => Some(Err(e)),
_ => None,
})
.transpose()
.map_err(Error::from)
}
#[derive(Deserialize, Serialize, Parser)]
pub struct SetStaticDnsParams {
pub servers: Option<Vec<String>>,
}
pub async fn set_static_dns(
ctx: RpcContext,
SetStaticDnsParams { servers }: SetStaticDnsParams,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
db.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_dns_mut()
.as_static_servers_mut()
.ser(
&servers
.map(|s| {
s.into_iter()
.map(|s| {
s.parse::<SocketAddr>()
.or_else(|_| s.parse::<IpAddr>().map(|a| (a, 53).into()))
})
.collect()
})
.transpose()?,
)
})
.await
.result
}
#[derive(Default)]
struct ResolveMap {
private_domains: BTreeMap<InternedString, Weak<()>>,
services: BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>,
}
pub struct DnsController {
services: Weak<RwLock<BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>>>,
resolve: Weak<SyncRwLock<ResolveMap>>,
#[allow(dead_code)]
dns_server: NonDetachingJoinHandle<Result<(), Error>>,
dns_server: NonDetachingJoinHandle<()>,
}
struct DnsClient {
client: Arc<SyncRwLock<Vec<(SocketAddr, hickory_client::client::Client)>>>,
_thread: NonDetachingJoinHandle<()>,
}
impl DnsClient {
pub fn new(db: TypedPatchDb<Database>) -> Self {
let client = Arc::new(SyncRwLock::new(Vec::new()));
Self {
client: client.clone(),
_thread: tokio::spawn(async move {
loop {
if let Err::<(), Error>(e) = async {
let mut stream = file_string_stream("/run/systemd/resolve/resolv.conf")
.filter_map(|a| futures::future::ready(a.transpose()))
.boxed();
let mut conf: String = stream
.next()
.await
.or_not_found("/run/systemd/resolve/resolv.conf")??;
let mut prev_nameservers = Vec::new();
let mut bg = BTreeMap::<SocketAddr, BoxFuture<_>>::new();
loop {
let nameservers = conf
.lines()
.map(|l| l.trim())
.filter_map(|l| l.strip_prefix("nameserver "))
.skip(2)
.map(|n| {
n.parse::<SocketAddr>()
.or_else(|_| n.parse::<IpAddr>().map(|a| (a, 53).into()))
})
.collect::<Result<Vec<_>, _>>()?;
let static_nameservers = db
.mutate(|db| {
let dns = db
.as_public_mut()
.as_server_info_mut()
.as_network_mut()
.as_dns_mut();
dns.as_dhcp_servers_mut().ser(&nameservers)?;
dns.as_static_servers().de()
})
.await
.result?;
let nameservers = static_nameservers.unwrap_or(nameservers);
if nameservers != prev_nameservers {
let mut existing: BTreeMap<_, _> =
client.peek(|c| c.iter().cloned().collect());
let mut new = Vec::with_capacity(nameservers.len());
for addr in &nameservers {
if let Some(existing) = existing.remove(addr) {
new.push((*addr, existing));
} else {
let client = if let Ok((client, bg_thread)) =
Client::connect(
UdpClientStream::builder(
*addr,
TokioRuntimeProvider::new(),
)
.build(),
)
.await
{
bg.insert(*addr, bg_thread.boxed());
client
} else {
let (stream, sender) = TcpClientStream::new(
*addr,
None,
Some(Duration::from_secs(30)),
TokioRuntimeProvider::new(),
);
let (client, bg_thread) =
Client::new(stream, sender, None)
.await
.with_kind(ErrorKind::Network)?;
bg.insert(*addr, bg_thread.boxed());
client
};
new.push((*addr, client));
}
}
bg.retain(|n, _| nameservers.iter().any(|a| a == n));
prev_nameservers = nameservers;
client.replace(new);
}
tokio::select! {
c = stream.next() => conf = c.or_not_found("/run/systemd/resolve/resolv.conf")??,
_ = futures::future::join(
futures::future::join_all(bg.values_mut()),
futures::future::pending::<()>(),
) => (),
}
}
}
.await
{
tracing::error!("{e}");
tracing::debug!("{e:?}");
}
}
})
.into(),
}
}
fn lookup(
&self,
query: hickory_client::proto::op::Query,
options: DnsRequestOptions,
) -> Vec<hickory_client::proto::xfer::DnsExchangeSend> {
self.client.peek(|c| {
c.iter()
.map(|(_, c)| c.lookup(query.clone(), options.clone()))
.collect()
})
}
}
struct Resolver {
services: Arc<RwLock<BTreeMap<Option<PackageId>, BTreeMap<Ipv4Addr, Weak<()>>>>>,
client: DnsClient,
net_iface: Watch<OrdMap<GatewayId, NetworkInterfaceInfo>>,
resolve: Arc<SyncRwLock<ResolveMap>>,
}
impl Resolver {
async fn resolve(&self, name: &Name) -> Option<Vec<Ipv4Addr>> {
match name.iter().next_back() {
Some(b"embassy") | Some(b"startos") => {
if let Some(pkg) = name.iter().rev().skip(1).next() {
if let Some(ip) = self.services.read().await.get(&Some(
std::str::from_utf8(pkg)
.unwrap_or_default()
.parse()
.unwrap_or_default(),
)) {
fn resolve(&self, name: &Name, src: IpAddr) -> Option<Vec<IpAddr>> {
self.resolve.peek(|r| {
if r.private_domains
.get(&*name.to_lowercase().to_ascii())
.map_or(false, |d| d.strong_count() > 0)
{
if let Some(res) = self.net_iface.peek(|i| {
i.values()
.chain([NetworkInterfaceInfo::lxc_bridge().1])
.flat_map(|i| i.ip_info.as_ref())
.find(|i| i.subnets.iter().any(|s| s.contains(&src)))
.map(|ip_info| {
let mut res = ip_info.subnets.iter().collect::<Vec<_>>();
res.sort_by_cached_key(|a| !a.contains(&src));
res.into_iter().map(|s| s.addr()).collect()
})
}) {
return Some(res);
}
}
match name.iter().next_back() {
Some(b"embassy") | Some(b"startos") => {
if let Some(pkg) = name.iter().rev().skip(1).next() {
if let Some(ip) = r.services.get(&Some(
std::str::from_utf8(pkg)
.unwrap_or_default()
.parse()
.unwrap_or_default(),
)) {
Some(
ip.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(ip, _)| (*ip).into())
.collect(),
)
} else {
None
}
} else if let Some(ip) = r.services.get(&None) {
Some(
ip.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(ip, _)| *ip)
.map(|(ip, _)| (*ip).into())
.collect(),
)
} else {
None
}
} else if let Some(ip) = self.services.read().await.get(&None) {
Some(
ip.iter()
.filter(|(_, rc)| rc.strong_count() > 0)
.map(|(ip, _)| *ip)
.collect(),
)
} else {
None
}
_ => None,
}
_ => None,
}
})
}
}
@@ -74,132 +337,200 @@ impl RequestHandler for Resolver {
request: &Request,
mut response_handle: R,
) -> ResponseInfo {
let query = request.request_info().query;
if let Some(ip) = self.resolve(query.name().borrow()).await {
match query.query_type() {
RecordType::A => {
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
Header::response_from_request(request.header()),
&ip.into_iter()
.map(|ip| {
Record::from_rdata(
request.request_info().query.name().to_owned().into(),
0,
trust_dns_server::proto::rr::RData::A(ip.into()),
)
})
.collect::<Vec<_>>(),
[],
[],
[],
),
)
.await
match async {
let req = request.request_info()?;
let query = req.query;
if let Some(ip) = self.resolve(query.name().borrow(), req.src.ip()) {
match query.query_type() {
RecordType::A => {
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
Header::response_from_request(request.header()),
&ip.into_iter()
.filter_map(|a| {
if let IpAddr::V4(a) = a { Some(a) } else { None }
})
.map(|ip| {
Record::from_rdata(
query.name().to_owned().into(),
0,
hickory_server::proto::rr::RData::A(ip.into()),
)
})
.collect::<Vec<_>>(),
[],
[],
[],
),
)
.await
}
RecordType::AAAA => {
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
Header::response_from_request(request.header()),
&ip.into_iter()
.filter_map(|a| {
if let IpAddr::V6(a) = a { Some(a) } else { None }
})
.map(|ip| {
Record::from_rdata(
query.name().to_owned().into(),
0,
hickory_server::proto::rr::RData::AAAA(ip.into()),
)
})
.collect::<Vec<_>>(),
[],
[],
[],
),
)
.await
}
_ => {
let res = Header::response_from_request(request.header());
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
res.into(),
[],
[],
[],
[],
),
)
.await
}
}
_ => {
let res = Header::response_from_request(request.header());
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
res.into(),
[],
[],
[],
[],
),
)
.await
} else {
let query = query.original().clone();
let mut streams = self.client.lookup(query, DnsRequestOptions::default());
let mut err = None;
for stream in streams.iter_mut() {
match tokio::time::timeout(Duration::from_secs(5), stream.next()).await {
Ok(Some(Err(e))) => err = Some(e),
Ok(Some(Ok(msg))) => {
return response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
Header::response_from_request(request.header()),
msg.answers(),
msg.name_servers(),
&msg.soa().map(|s| s.to_owned().into_record_of_rdata()),
msg.additionals(),
),
)
.await;
}
_ => (),
}
}
if let Some(e) = err {
tracing::error!("{e}");
tracing::debug!("{e:?}");
}
let mut res = Header::response_from_request(request.header());
res.set_response_code(ResponseCode::ServFail);
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
res,
[],
[],
[],
[],
),
)
.await
}
}
.await
{
Ok(a) => a,
Err(e) => {
tracing::error!("{}", e);
tracing::debug!("{:?}", e);
let mut res = Header::response_from_request(request.header());
res.set_response_code(ResponseCode::ServFail);
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
res,
[],
[],
[],
[],
),
)
.await
.unwrap_or(res.into())
}
} else {
let mut res = Header::response_from_request(request.header());
res.set_response_code(ResponseCode::NXDomain);
response_handle
.send_response(
MessageResponseBuilder::from_message_request(&*request).build(
res.into(),
[],
[],
[],
[],
),
)
.await
}
.unwrap_or_else(|e| {
tracing::error!("{}", e);
tracing::debug!("{:?}", e);
let mut res = Header::response_from_request(request.header());
res.set_response_code(ResponseCode::ServFail);
res.into()
})
}
}
impl DnsController {
#[instrument(skip_all)]
pub async fn init(mut lxcbr_status: Watch<bool>) -> Result<Self, Error> {
let services = Arc::new(RwLock::new(BTreeMap::new()));
pub async fn init(
db: TypedPatchDb<Database>,
watcher: &NetworkInterfaceWatcher,
) -> Result<Self, Error> {
let resolve = Arc::new(SyncRwLock::new(ResolveMap::default()));
let mut server = ServerFuture::new(Resolver {
services: services.clone(),
client: DnsClient::new(db),
net_iface: watcher.subscribe(),
resolve: resolve.clone(),
});
let dns_server = tokio::spawn(async move {
server.register_listener(
TcpListener::bind((Ipv4Addr::LOCALHOST, 53))
let dns_server = tokio::spawn(
async move {
server.register_listener(
TcpListener::bind((Ipv6Addr::UNSPECIFIED, 53))
.await
.with_kind(ErrorKind::Network)?,
Duration::from_secs(30),
);
server.register_socket(
UdpSocket::bind((Ipv6Addr::UNSPECIFIED, 53))
.await
.with_kind(ErrorKind::Network)?,
);
server
.block_until_done()
.await
.with_kind(ErrorKind::Network)?,
Duration::from_secs(30),
);
server.register_socket(
UdpSocket::bind((Ipv4Addr::LOCALHOST, 53))
.await
.with_kind(ErrorKind::Network)?,
);
lxcbr_status.wait_for(|a| *a).await;
Command::new("resolvectl")
.arg("dns")
.arg(START9_BRIDGE_IFACE)
.arg("127.0.0.1")
.invoke(ErrorKind::Network)
.await?;
Command::new("resolvectl")
.arg("domain")
.arg(START9_BRIDGE_IFACE)
.arg("embassy")
.invoke(ErrorKind::Network)
.await?;
server
.block_until_done()
.await
.map_err(|e| Error::new(e, ErrorKind::Network))
})
.with_kind(ErrorKind::Network)
}
.map(|r| {
r.log_err();
}),
)
.into();
Ok(Self {
services: Arc::downgrade(&services),
resolve: Arc::downgrade(&resolve),
dns_server,
})
}
pub async fn add(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<Arc<()>, Error> {
if let Some(services) = Weak::upgrade(&self.services) {
let mut writable = services.write().await;
let mut ips = writable.remove(&pkg_id).unwrap_or_default();
let rc = if let Some(rc) = Weak::upgrade(&ips.remove(&ip).unwrap_or_default()) {
rc
} else {
Arc::new(())
};
ips.insert(ip, Arc::downgrade(&rc));
writable.insert(pkg_id, ips);
Ok(rc)
pub fn add_service(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<Arc<()>, Error> {
if let Some(resolve) = Weak::upgrade(&self.resolve) {
resolve.mutate(|writable| {
let ips = writable.services.entry(pkg_id).or_default();
let weak = ips.entry(ip).or_default();
let rc = if let Some(rc) = Weak::upgrade(&*weak) {
rc
} else {
let new = Arc::new(());
*weak = Arc::downgrade(&new);
new
};
Ok(rc)
})
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
@@ -208,17 +539,65 @@ impl DnsController {
}
}
pub async fn gc(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<(), Error> {
if let Some(services) = Weak::upgrade(&self.services) {
let mut writable = services.write().await;
let mut ips = writable.remove(&pkg_id).unwrap_or_default();
if let Some(rc) = Weak::upgrade(&ips.remove(&ip).unwrap_or_default()) {
ips.insert(ip, Arc::downgrade(&rc));
}
if !ips.is_empty() {
writable.insert(pkg_id, ips);
}
Ok(())
pub fn gc_service(&self, pkg_id: Option<PackageId>, ip: Ipv4Addr) -> Result<(), Error> {
if let Some(resolve) = Weak::upgrade(&self.resolve) {
resolve.mutate(|writable| {
let mut ips = writable.services.remove(&pkg_id).unwrap_or_default();
if let Some(rc) = Weak::upgrade(&ips.remove(&ip).unwrap_or_default()) {
ips.insert(ip, Arc::downgrade(&rc));
}
if !ips.is_empty() {
writable.services.insert(pkg_id, ips);
}
Ok(())
})
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
crate::ErrorKind::Network,
))
}
}
pub fn add_private_domain(&self, fqdn: InternedString) -> Result<Arc<()>, Error> {
if let Some(resolve) = Weak::upgrade(&self.resolve) {
resolve.mutate(|writable| {
let weak = writable.private_domains.entry(fqdn).or_default();
let rc = if let Some(rc) = Weak::upgrade(&*weak) {
rc
} else {
let new = Arc::new(());
*weak = Arc::downgrade(&new);
new
};
Ok(rc)
})
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),
crate::ErrorKind::Network,
))
}
}
pub fn gc_private_domains<'a, BK: Ord + 'a>(
&self,
domains: impl IntoIterator<Item = &'a BK> + 'a,
) -> Result<(), Error>
where
InternedString: Borrow<BK>,
{
if let Some(resolve) = Weak::upgrade(&self.resolve) {
resolve.mutate(|writable| {
for domain in domains {
if let Some((k, v)) = writable.private_domains.remove_entry(domain) {
if v.strong_count() > 0 {
writable.private_domains.insert(k, v);
}
}
}
Ok(())
})
} else {
Err(Error::new(
eyre!("DNS Server Thread has exited"),

View File

@@ -1,19 +1,22 @@
use std::collections::{BTreeMap, BTreeSet};
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::net::{IpAddr, SocketAddr, SocketAddrV6};
use std::sync::{Arc, Weak};
use futures::channel::oneshot;
use helpers::NonDetachingJoinHandle;
use id_pool::IdPool;
use imbl_value::InternedString;
use imbl::OrdMap;
use models::GatewayId;
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use tokio::sync::mpsc;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::gateway::{DynInterfaceFilter, InterfaceFilter};
use crate::net::utils::ipv6_is_link_local;
use crate::prelude::*;
use crate::util::sync::Watch;
use crate::util::Invoke;
use crate::util::sync::Watch;
pub const START9_BRIDGE_IFACE: &str = "lxcbr0";
pub const FIRST_DYNAMIC_PRIVATE_PORT: u16 = 49152;
@@ -39,106 +42,162 @@ impl AvailablePorts {
}
}
#[derive(Debug)]
struct ForwardRequest {
public: bool,
external: u16,
target: SocketAddr,
filter: DynInterfaceFilter,
rc: Weak<()>,
}
#[derive(Debug, Default)]
struct ForwardState {
requested: BTreeMap<u16, ForwardRequest>,
current: BTreeMap<u16, BTreeMap<InternedString, SocketAddr>>,
struct ForwardEntry {
external: u16,
target: SocketAddr,
prev_filter: DynInterfaceFilter,
forwards: BTreeMap<SocketAddr, GatewayId>,
rc: Weak<()>,
}
impl ForwardState {
async fn sync(
impl ForwardEntry {
fn new(external: u16, target: SocketAddr, rc: Weak<()>) -> Self {
Self {
external,
target,
prev_filter: false.into_dyn(),
forwards: BTreeMap::new(),
rc,
}
}
fn take(&mut self) -> Self {
Self {
external: self.external,
target: self.target,
prev_filter: std::mem::replace(&mut self.prev_filter, false.into_dyn()),
forwards: std::mem::take(&mut self.forwards),
rc: self.rc.clone(),
}
}
async fn destroy(mut self) -> Result<(), Error> {
while let Some((source, interface)) = self.forwards.pop_first() {
unforward(interface.as_str(), source, self.target).await?;
}
Ok(())
}
async fn update(
&mut self,
interfaces: &BTreeMap<InternedString, (bool, Vec<Ipv4Addr>)>,
ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>,
filter: Option<DynInterfaceFilter>,
) -> Result<(), Error> {
let private_interfaces = interfaces
if self.rc.strong_count() == 0 {
return self.take().destroy().await;
}
let filter_ref = filter.as_ref().unwrap_or(&self.prev_filter);
let mut keep = BTreeSet::<SocketAddr>::new();
for (iface, info) in ip_info
.iter()
.filter(|(_, (public, _))| !*public)
.map(|(i, _)| i)
.collect::<BTreeSet<_>>();
let all_interfaces = interfaces.keys().collect::<BTreeSet<_>>();
self.requested.retain(|_, req| req.rc.strong_count() > 0);
for external in self
.requested
.keys()
.chain(self.current.keys())
.copied()
.collect::<BTreeSet<_>>()
.chain([NetworkInterfaceInfo::loopback()])
.filter(|(id, info)| filter_ref.filter(*id, *info))
{
match (
self.requested.get(&external),
self.current.get_mut(&external),
) {
(Some(req), Some(cur)) => {
let expected = if req.public {
&all_interfaces
} else {
&private_interfaces
if let Some(ip_info) = &info.ip_info {
for ipnet in &ip_info.subnets {
let addr = match ipnet.addr() {
IpAddr::V6(ip6) => SocketAddrV6::new(
ip6,
self.external,
0,
if ipv6_is_link_local(ip6) {
ip_info.scope_id
} else {
0
},
)
.into(),
ip => SocketAddr::new(ip, self.external),
};
let actual = cur.keys().collect::<BTreeSet<_>>();
let mut to_rm = actual
.difference(expected)
.copied()
.map(|i| (i.clone(), &interfaces[i].1))
.collect::<BTreeMap<_, _>>();
let mut to_add = expected
.difference(&actual)
.copied()
.map(|i| (i.clone(), &interfaces[i].1))
.collect::<BTreeMap<_, _>>();
for interface in actual.intersection(expected).copied() {
if cur[interface] != req.target {
to_rm.insert(interface.clone(), &interfaces[interface].1);
to_add.insert(interface.clone(), &interfaces[interface].1);
}
}
for (interface, ips) in to_rm {
for ip in ips {
unforward(&*interface, (*ip, external).into(), cur[&interface]).await?;
}
cur.remove(&interface);
}
for (interface, ips) in to_add {
cur.insert(interface.clone(), req.target);
for ip in ips {
forward(&*interface, (*ip, external).into(), cur[&interface]).await?;
}
keep.insert(addr);
if !self.forwards.contains_key(&addr) {
forward(iface.as_str(), addr, self.target).await?;
self.forwards.insert(addr, iface.clone());
}
}
(Some(req), None) => {
let cur = self.current.entry(external).or_default();
for interface in if req.public {
&all_interfaces
} else {
&private_interfaces
}
.into_iter()
.copied()
{
cur.insert(interface.clone(), req.target);
for ip in &interfaces[interface].1 {
forward(&**interface, (*ip, external).into(), req.target).await?;
}
}
}
(None, Some(cur)) => {
let to_rm = cur.keys().cloned().collect::<BTreeSet<_>>();
for interface in to_rm {
for ip in &interfaces[&interface].1 {
unforward(&*interface, (*ip, external).into(), cur[&interface]).await?;
}
cur.remove(&interface);
}
self.current.remove(&external);
}
_ => (),
}
}
let rm = self
.forwards
.keys()
.copied()
.filter(|a| !keep.contains(a))
.collect::<Vec<_>>();
for rm in rm {
if let Some((source, interface)) = self.forwards.remove_entry(&rm) {
unforward(interface.as_str(), source, self.target).await?;
}
}
if let Some(filter) = filter {
self.prev_filter = filter;
}
Ok(())
}
async fn update_request(
&mut self,
ForwardRequest {
external,
target,
filter,
rc,
}: ForwardRequest,
ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>,
) -> Result<(), Error> {
if external != self.external || target != self.target {
self.take().destroy().await?;
*self = Self::new(external, target, rc);
self.update(ip_info, Some(filter)).await?;
} else {
if self.prev_filter != filter {
self.update(ip_info, Some(filter)).await?;
}
self.rc = rc;
}
Ok(())
}
}
impl Drop for ForwardEntry {
fn drop(&mut self) {
if !self.forwards.is_empty() {
let take = self.take();
tokio::spawn(async move {
take.destroy().await.log_err();
});
}
}
}
#[derive(Default)]
struct ForwardState {
state: BTreeMap<u16, ForwardEntry>,
}
impl ForwardState {
async fn handle_request(
&mut self,
request: ForwardRequest,
ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>,
) -> Result<(), Error> {
self.state
.entry(request.external)
.or_insert_with(|| ForwardEntry::new(request.external, request.target, Weak::new()))
.update_request(request, ip_info)
.await
}
async fn sync(
&mut self,
ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>,
) -> Result<(), Error> {
for entry in self.state.values_mut() {
entry.update(ip_info, None).await?;
}
self.state.retain(|_, fwd| !fwd.forwards.is_empty());
Ok(())
}
}
@@ -150,87 +209,37 @@ fn err_has_exited<T>(_: T) -> Error {
)
}
pub struct LanPortForwardController {
req: mpsc::UnboundedSender<(
Option<(u16, ForwardRequest)>,
oneshot::Sender<Result<(), Error>>,
)>,
pub struct PortForwardController {
req: mpsc::UnboundedSender<(Option<ForwardRequest>, oneshot::Sender<Result<(), Error>>)>,
_thread: NonDetachingJoinHandle<()>,
}
impl LanPortForwardController {
pub fn new(mut ip_info: Watch<BTreeMap<InternedString, NetworkInterfaceInfo>>) -> Self {
let (req_send, mut req_recv) = mpsc::unbounded_channel();
impl PortForwardController {
pub fn new(mut ip_info: Watch<OrdMap<GatewayId, NetworkInterfaceInfo>>) -> Self {
let (req_send, mut req_recv) = mpsc::unbounded_channel::<(
Option<ForwardRequest>,
oneshot::Sender<Result<(), Error>>,
)>();
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
let mut state = ForwardState::default();
let mut interfaces = ip_info.peek_and_mark_seen(|ip_info| {
ip_info
.iter()
.map(|(iface, info)| {
(
iface.clone(),
(
info.inbound(),
info.ip_info.as_ref().map_or(Vec::new(), |i| {
i.subnets
.iter()
.filter_map(|s| {
if let IpAddr::V4(ip) = s.addr() {
Some(ip)
} else {
None
}
})
.collect()
}),
),
)
})
.collect()
});
let mut reply: Option<oneshot::Sender<Result<(), Error>>> = None;
let mut interfaces = ip_info.read_and_mark_seen();
loop {
tokio::select! {
msg = req_recv.recv() => {
if let Some((msg, re)) = msg {
if let Some((external, req)) = msg {
state.requested.insert(external, req);
if let Some(req) = msg {
re.send(state.handle_request(req, &interfaces).await).ok();
} else {
re.send(state.sync(&interfaces).await).ok();
}
reply = Some(re);
} else {
break;
}
}
_ = ip_info.changed() => {
interfaces = ip_info.peek(|ip_info| {
ip_info
.iter()
.map(|(iface, info)| (iface.clone(), (
info.inbound(),
info.ip_info.as_ref().map_or(Vec::new(), |i| {
i.subnets
.iter()
.filter_map(|s| {
if let IpAddr::V4(ip) = s.addr() {
Some(ip)
} else {
None
}
})
.collect()
}),
)))
.collect()
});
interfaces = ip_info.read();
state.sync(&interfaces).await.log_err();
}
}
let res = state.sync(&interfaces).await;
if let Err(e) = &res {
tracing::error!("Error in PortForwardController: {e}");
tracing::debug!("{e:?}");
}
if let Some(re) = reply.take() {
let _ = re.send(res);
}
}
}));
Self {
@@ -238,19 +247,22 @@ impl LanPortForwardController {
_thread: thread,
}
}
pub async fn add(&self, port: u16, public: bool, target: SocketAddr) -> Result<Arc<()>, Error> {
pub async fn add(
&self,
external: u16,
filter: impl InterfaceFilter,
target: SocketAddr,
) -> Result<Arc<()>, Error> {
let rc = Arc::new(());
let (send, recv) = oneshot::channel();
self.req
.send((
Some((
port,
ForwardRequest {
public,
target,
rc: Arc::downgrade(&rc),
},
)),
Some(ForwardRequest {
external,
target,
filter: filter.into_dyn(),
rc: Arc::downgrade(&rc),
}),
send,
))
.map_err(err_has_exited)?;

View File

@@ -1,47 +1,45 @@
use std::collections::BTreeSet;
use std::net::Ipv4Addr;
use clap::Parser;
use imbl_value::InternedString;
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
use models::GatewayId;
use rpc_toolkit::{Context, Empty, HandlerArgs, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::DatabaseModel;
use crate::net::acme::AcmeProvider;
use crate::net::host::{all_hosts, HostApiKind};
use crate::net::host::{HostApiKind, all_hosts};
use crate::net::tor::OnionAddress;
use crate::prelude::*;
use crate::util::serde::{display_serializable, HandlerExtSerde};
use crate::util::serde::{HandlerExtSerde, display_serializable};
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
#[serde(rename_all_fields = "camelCase")]
#[serde(tag = "kind")]
#[ts(export)]
pub enum HostAddress {
Onion {
#[ts(type = "string")]
address: OnionAddressV3,
address: OnionAddress,
},
Domain {
#[ts(type = "string")]
address: InternedString,
public: bool,
acme: Option<AcmeProvider>,
public: Option<PublicDomainConfig>,
},
}
#[derive(Debug, Deserialize, Serialize, TS)]
pub struct DomainConfig {
pub public: bool,
#[derive(Debug, Clone, Deserialize, Serialize, TS)]
pub struct PublicDomainConfig {
pub gateway: GatewayId,
pub acme: Option<AcmeProvider>,
}
fn check_duplicates(db: &DatabaseModel) -> Result<(), Error> {
let mut onions = BTreeSet::<OnionAddressV3>::new();
fn handle_duplicates(db: &mut DatabaseModel) -> Result<(), Error> {
let mut onions = BTreeSet::<OnionAddress>::new();
let mut domains = BTreeSet::<InternedString>::new();
let mut check_onion = |onion: OnionAddressV3| {
let check_onion = |onions: &mut BTreeSet<OnionAddress>, onion: OnionAddress| {
if onions.contains(&onion) {
return Err(Error::new(
eyre!("onion address {onion} is already in use"),
@@ -51,7 +49,7 @@ fn check_duplicates(db: &DatabaseModel) -> Result<(), Error> {
onions.insert(onion);
Ok(())
};
let mut check_domain = |domain: InternedString| {
let check_domain = |domains: &mut BTreeSet<InternedString>, domain: InternedString| {
if domains.contains(&domain) {
return Err(Error::new(
eyre!("domain {domain} is already in use"),
@@ -61,41 +59,96 @@ fn check_duplicates(db: &DatabaseModel) -> Result<(), Error> {
domains.insert(domain);
Ok(())
};
let mut not_in_use = Vec::new();
for host in all_hosts(db) {
let host = host?;
for onion in host.as_onions().de()? {
check_onion(onion)?;
let in_use = host.as_bindings().de()?.values().any(|v| v.enabled);
if !in_use {
not_in_use.push(host);
continue;
}
for domain in host.as_domains().keys()? {
check_domain(domain)?;
for onion in host.as_onions().de()? {
check_onion(&mut onions, onion)?;
}
for domain in host.as_public_domains().keys()? {
check_domain(&mut domains, domain)?;
}
for domain in host.as_private_domains().de()? {
check_domain(&mut domains, domain)?;
}
}
for host in not_in_use {
host.as_onions_mut()
.mutate(|o| Ok(o.retain(|o| !onions.contains(o))))?;
host.as_public_domains_mut()
.mutate(|d| Ok(d.retain(|d, _| !domains.contains(d))))?;
host.as_private_domains_mut()
.mutate(|d| Ok(d.retain(|d| !domains.contains(d))))?;
for onion in host.as_onions().de()? {
check_onion(&mut onions, onion)?;
}
for domain in host.as_public_domains().keys()? {
check_domain(&mut domains, domain)?;
}
for domain in host.as_private_domains().de()? {
check_domain(&mut domains, domain)?;
}
}
Ok(())
}
pub fn address_api<C: Context, Kind: HostApiKind>(
) -> ParentHandler<C, Kind::Params, Kind::InheritedParams> {
pub fn address_api<C: Context, Kind: HostApiKind>()
-> ParentHandler<C, Kind::Params, Kind::InheritedParams> {
ParentHandler::<C, Kind::Params, Kind::InheritedParams>::new()
.subcommand(
"domain",
ParentHandler::<C, Empty, Kind::Inheritance>::new()
.subcommand(
"add",
from_fn_async(add_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Add an address to this host")
.with_call_remote::<CliContext>(),
"public",
ParentHandler::<C, Empty, Kind::Inheritance>::new()
.subcommand(
"add",
from_fn_async(add_public_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Add a public domain to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_public_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Remove a public domain from this host")
.with_call_remote::<CliContext>(),
)
.with_inherited(|_, a| a),
)
.subcommand(
"remove",
from_fn_async(remove_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Remove an address from this host")
.with_call_remote::<CliContext>(),
"private",
ParentHandler::<C, Empty, Kind::Inheritance>::new()
.subcommand(
"add",
from_fn_async(add_private_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Add a private domain to this host")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_private_domain::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(|_, a| a)
.no_display()
.with_about("Remove a private domain from this host")
.with_call_remote::<CliContext>(),
)
.with_inherited(|_, a| a),
)
.with_inherited(Kind::inheritance),
)
@@ -131,7 +184,7 @@ pub fn address_api<C: Context, Kind: HostApiKind>(
use prettytable::*;
if let Some(format) = params.format {
display_serializable(format, res);
display_serializable(format, res)?;
return Ok(());
}
@@ -144,15 +197,20 @@ pub fn address_api<C: Context, Kind: HostApiKind>(
}
HostAddress::Domain {
address,
public,
acme,
public: Some(PublicDomainConfig { gateway, acme }),
} => {
table.add_row(row![
address,
*public,
&format!("YES ({gateway})"),
acme.as_ref().map(|a| a.0.as_str()).unwrap_or("NONE")
]);
}
HostAddress::Domain {
address,
public: None,
} => {
table.add_row(row![address, &format!("NO"), "N/A"]);
}
}
}
@@ -166,63 +224,109 @@ pub fn address_api<C: Context, Kind: HostApiKind>(
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddDomainParams {
pub domain: InternedString,
#[arg(long)]
pub private: bool,
pub struct AddPublicDomainParams {
pub fqdn: InternedString,
#[arg(long)]
pub acme: Option<AcmeProvider>,
pub gateway: GatewayId,
}
pub async fn add_domain<Kind: HostApiKind>(
pub async fn add_public_domain<Kind: HostApiKind>(
ctx: RpcContext,
AddDomainParams {
domain,
private,
AddPublicDomainParams {
fqdn,
acme,
}: AddDomainParams,
gateway,
}: AddPublicDomainParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
) -> Result<Option<Ipv4Addr>, Error> {
ctx.db
.mutate(|db| {
if let Some(acme) = &acme {
if !db.as_public().as_server_info().as_network().as_acme().contains_key(&acme)? {
if !db
.as_public()
.as_server_info()
.as_network()
.as_acme()
.contains_key(&acme)?
{
return Err(Error::new(eyre!("unknown acme provider {}, please run acme.init for this provider first", acme.0), ErrorKind::InvalidRequest));
}
}
Kind::host_for(&inheritance, db)?
.as_domains_mut()
.insert(
&domain,
&DomainConfig {
public: !private,
acme,
},
)?;
check_duplicates(db)
.as_public_domains_mut()
.insert(&fqdn, &PublicDomainConfig { acme, gateway })?;
handle_duplicates(db)
})
.await.result?;
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
tokio::task::spawn_blocking(|| {
crate::net::dns::query_dns(ctx, crate::net::dns::QueryDnsParams { fqdn })
})
.await
.with_kind(ErrorKind::Unknown)?
}
#[derive(Deserialize, Serialize, Parser)]
pub struct RemoveDomainParams {
pub fqdn: InternedString,
}
pub async fn remove_public_domain<Kind: HostApiKind>(
ctx: RpcContext,
RemoveDomainParams { fqdn }: RemoveDomainParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_public_domains_mut()
.remove(&fqdn)
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
#[derive(Deserialize, Serialize, Parser)]
pub struct RemoveDomainParams {
pub domain: InternedString,
pub struct AddPrivateDomainParams {
pub fqdn: InternedString,
}
pub async fn remove_domain<Kind: HostApiKind>(
pub async fn add_private_domain<Kind: HostApiKind>(
ctx: RpcContext,
RemoveDomainParams { domain }: RemoveDomainParams,
AddPrivateDomainParams { fqdn }: AddPrivateDomainParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_domains_mut()
.remove(&domain)
.as_private_domains_mut()
.mutate(|d| Ok(d.insert(fqdn)))?;
handle_duplicates(db)
})
.await
.result?;
Kind::sync_host(&ctx, inheritance).await?;
Ok(())
}
pub async fn remove_private_domain<Kind: HostApiKind>(
ctx: RpcContext,
RemoveDomainParams { fqdn: domain }: RemoveDomainParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_private_domains_mut()
.mutate(|d| Ok(d.remove(&domain)))
})
.await
.result?;
@@ -249,7 +353,7 @@ pub async fn add_onion<Kind: HostApiKind>(
ErrorKind::InvalidOnionAddress,
)
})?
.parse::<OnionAddressV3>()?;
.parse::<OnionAddress>()?;
ctx.db
.mutate(|db| {
db.as_private().as_key_store().as_onion().get_key(&onion)?;
@@ -257,7 +361,7 @@ pub async fn add_onion<Kind: HostApiKind>(
Kind::host_for(&inheritance, db)?
.as_onions_mut()
.mutate(|a| Ok(a.insert(onion)))?;
check_duplicates(db)
handle_duplicates(db)
})
.await
.result?;
@@ -280,7 +384,7 @@ pub async fn remove_onion<Kind: HostApiKind>(
ErrorKind::InvalidOnionAddress,
)
})?
.parse::<OnionAddressV3>()?;
.parse::<OnionAddress>()?;
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?

View File

@@ -1,15 +1,18 @@
use std::collections::BTreeMap;
use std::collections::{BTreeMap, BTreeSet};
use std::str::FromStr;
use clap::builder::ValueParserFactory;
use clap::Parser;
use models::{FromStrParser, HostId};
use imbl::OrdSet;
use models::{FromStrParser, GatewayId, HostId};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::forward::AvailablePorts;
use crate::net::gateway::InterfaceFilter;
use crate::net::host::HostApiKind;
use crate::net::vhost::AlpnInfo;
use crate::prelude::*;
@@ -50,11 +53,16 @@ pub struct BindInfo {
pub net: NetInfo,
}
#[derive(Clone, Copy, Debug, Deserialize, Serialize, TS, PartialEq, Eq, PartialOrd, Ord)]
#[derive(Clone, Debug, Deserialize, Serialize, TS, PartialEq, Eq, PartialOrd, Ord)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct NetInfo {
pub public: bool,
#[ts(as = "BTreeSet::<GatewayId>")]
#[serde(default)]
pub private_disabled: OrdSet<GatewayId>,
#[ts(as = "BTreeSet::<GatewayId>")]
#[serde(default)]
pub public_enabled: OrdSet<GatewayId>,
pub assigned_port: Option<u16>,
pub assigned_ssl_port: Option<u16>,
}
@@ -65,16 +73,19 @@ impl BindInfo {
if options.add_ssl.is_some() {
assigned_ssl_port = Some(available_ports.alloc()?);
}
if let Some(secure) = options.secure {
if !secure.ssl || !options.add_ssl.is_some() {
assigned_port = Some(available_ports.alloc()?);
}
if options
.secure
.map_or(true, |s| !(s.ssl && options.add_ssl.is_some()))
{
assigned_port = Some(available_ports.alloc()?);
}
Ok(Self {
enabled: true,
options,
net: NetInfo {
public: false,
private_disabled: OrdSet::new(),
public_enabled: OrdSet::new(),
assigned_port,
assigned_ssl_port,
},
@@ -88,7 +99,7 @@ impl BindInfo {
let Self { net: mut lan, .. } = self;
if options
.secure
.map_or(false, |s| !(s.ssl && options.add_ssl.is_some()))
.map_or(true, |s| !(s.ssl && options.add_ssl.is_some()))
// doesn't make sense to have 2 listening ports, both with ssl
{
lan.assigned_port = if let Some(port) = lan.assigned_port.take() {
@@ -122,6 +133,15 @@ impl BindInfo {
self.enabled = false;
}
}
impl InterfaceFilter for NetInfo {
fn filter(&self, id: &GatewayId, info: &NetworkInterfaceInfo) -> bool {
if info.public() {
self.public_enabled.contains(id)
} else {
!self.private_disabled.contains(id)
}
}
}
#[derive(Debug, Clone, Copy, serde::Serialize, serde::Deserialize, TS)]
#[ts(export)]
@@ -165,12 +185,11 @@ pub fn binding<C: Context, Kind: HostApiKind>(
}
let mut table = Table::new();
table.add_row(row![bc => "INTERNAL PORT", "ENABLED", "PUBLIC", "EXTERNAL PORT", "EXTERNAL SSL PORT"]);
table.add_row(row![bc => "INTERNAL PORT", "ENABLED", "EXTERNAL PORT", "EXTERNAL SSL PORT"]);
for (internal, info) in res {
table.add_row(row![
internal,
info.enabled,
info.net.public,
if let Some(port) = info.net.assigned_port {
port.to_string()
} else {
@@ -192,12 +211,12 @@ pub fn binding<C: Context, Kind: HostApiKind>(
.with_call_remote::<CliContext>(),
)
.subcommand(
"set-public",
from_fn_async(set_public::<Kind>)
"set-gateway-enabled",
from_fn_async(set_gateway_enabled::<Kind>)
.with_metadata("sync_db", Value::Bool(true))
.with_inherited(Kind::inheritance)
.no_display()
.with_about("Add an binding to this host")
.with_about("Set whether this gateway should be enabled for this binding")
.with_call_remote::<CliContext>(),
)
}
@@ -215,29 +234,50 @@ pub async fn list_bindings<Kind: HostApiKind>(
#[derive(Deserialize, Serialize, Parser, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
pub struct BindingSetPublicParams {
pub struct BindingGatewaySetEnabledParams {
internal_port: u16,
gateway: GatewayId,
#[arg(long)]
public: Option<bool>,
enabled: Option<bool>,
}
pub async fn set_public<Kind: HostApiKind>(
pub async fn set_gateway_enabled<Kind: HostApiKind>(
ctx: RpcContext,
BindingSetPublicParams {
BindingGatewaySetEnabledParams {
internal_port,
public,
}: BindingSetPublicParams,
gateway,
enabled,
}: BindingGatewaySetEnabledParams,
inheritance: Kind::Inheritance,
) -> Result<(), Error> {
let enabled = enabled.unwrap_or(true);
let gateway_public = ctx
.net_controller
.net_iface
.watcher
.ip_info()
.get(&gateway)
.or_not_found(&gateway)?
.public();
ctx.db
.mutate(|db| {
Kind::host_for(&inheritance, db)?
.as_bindings_mut()
.mutate(|b| {
b.get_mut(&internal_port)
.or_not_found(internal_port)?
.net
.public = public.unwrap_or(true);
let net = &mut b.get_mut(&internal_port).or_not_found(internal_port)?.net;
if gateway_public {
if enabled {
net.public_enabled.insert(gateway);
} else {
net.public_enabled.remove(&gateway);
}
} else {
if enabled {
net.private_disabled.remove(&gateway);
} else {
net.private_disabled.insert(gateway);
}
}
Ok(())
})
})

View File

@@ -6,17 +6,17 @@ use clap::Parser;
use imbl_value::InternedString;
use itertools::Itertools;
use models::{HostId, PackageId};
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, OrEmpty, ParentHandler};
use rpc_toolkit::{Context, Empty, HandlerExt, OrEmpty, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use torut::onion::OnionAddressV3;
use ts_rs::TS;
use crate::context::RpcContext;
use crate::db::model::DatabaseModel;
use crate::net::forward::AvailablePorts;
use crate::net::host::address::{address_api, DomainConfig, HostAddress};
use crate::net::host::binding::{binding, BindInfo, BindOptions};
use crate::net::host::address::{HostAddress, PublicDomainConfig, address_api};
use crate::net::host::binding::{BindInfo, BindOptions, binding};
use crate::net::service_interface::HostnameInfo;
use crate::net::tor::OnionAddress;
use crate::prelude::*;
pub mod address;
@@ -29,12 +29,13 @@ pub mod binding;
pub struct Host {
pub bindings: BTreeMap<u16, BindInfo>,
#[ts(type = "string[]")]
pub onions: BTreeSet<OnionAddressV3>,
#[ts(as = "BTreeMap::<String, DomainConfig>")]
pub domains: BTreeMap<InternedString, DomainConfig>,
pub onions: BTreeSet<OnionAddress>,
pub public_domains: BTreeMap<InternedString, PublicDomainConfig>,
pub private_domains: BTreeSet<InternedString>,
/// COMPUTED: NetService::update
pub hostname_info: BTreeMap<u16, Vec<HostnameInfo>>, // internal port -> Hostnames
}
impl AsRef<Host> for Host {
fn as_ref(&self) -> &Host {
self
@@ -50,15 +51,20 @@ impl Host {
.cloned()
.map(|address| HostAddress::Onion { address })
.chain(
self.domains
self.public_domains
.iter()
.map(
|(address, DomainConfig { public, acme })| HostAddress::Domain {
address: address.clone(),
public: *public,
acme: acme.clone(),
},
),
.map(|(address, config)| HostAddress::Domain {
address: address.clone(),
public: Some(config.clone()),
}),
)
.chain(
self.private_domains
.iter()
.map(|address| HostAddress::Domain {
address: address.clone(),
public: None,
}),
)
}
}
@@ -115,24 +121,22 @@ pub fn host_for<'a>(
};
host_info(db, package_id)?.upsert(host_id, || {
let mut h = Host::new();
h.onions.insert(
tor_key
.or_not_found("generated tor key")?
.public()
.get_onion_address(),
);
h.onions
.insert(tor_key.or_not_found("generated tor key")?.onion_address());
Ok(h)
})
}
pub fn all_hosts(db: &DatabaseModel) -> impl Iterator<Item = Result<&Model<Host>, Error>> {
[Ok(db.as_public().as_server_info().as_network().as_host())]
pub fn all_hosts(db: &mut DatabaseModel) -> impl Iterator<Item = Result<&mut Model<Host>, Error>> {
use patch_db::DestructureMut;
let destructured = db.as_public_mut().destructure_mut();
[Ok(destructured.server_info.as_network_mut().as_host_mut())]
.into_iter()
.chain(
[db.as_public().as_package_data().as_entries()]
[destructured.package_data.as_entries_mut()]
.into_iter()
.flatten_ok()
.map(|entry| entry.and_then(|(_, v)| v.as_hosts().as_entries()))
.map(|entry| entry.and_then(|(_, v)| v.as_hosts_mut().as_entries_mut()))
.flatten_ok()
.map_ok(|(_, v)| v),
)

View File

@@ -0,0 +1,585 @@
use std::collections::{BTreeMap, BTreeSet};
use std::net::SocketAddr;
use std::str::FromStr;
use std::sync::{Arc, Weak};
use clap::Parser;
use color_eyre::eyre::eyre;
use futures::{FutureExt, StreamExt};
use helpers::NonDetachingJoinHandle;
use imbl_value::InternedString;
use iroh::{Endpoint, NodeId, SecretKey};
use itertools::Itertools;
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
use serde::{Deserialize, Serialize};
use tokio::net::TcpStream;
use crate::context::{CliContext, RpcContext};
use crate::prelude::*;
use crate::util::actor::background::BackgroundJobQueue;
use crate::util::io::ReadWriter;
use crate::util::serde::{
deserialize_from_str, display_serializable, serialize_display, HandlerExtSerde, Pem,
PemEncoding, WithIoFormat,
};
use crate::util::sync::{SyncMutex, SyncRwLock, Watch};
const HRP: bech32::Hrp = bech32::Hrp::parse_unchecked("iroh");
#[derive(Debug, Clone, Copy)]
pub struct IrohAddress(pub NodeId);
impl std::fmt::Display for IrohAddress {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
bech32::encode_lower_to_fmt::<bech32::Bech32m, _>(f, HRP, self.0.as_bytes())
.map_err(|_| std::fmt::Error)?;
write!(f, ".p2p.start9.to")
}
}
impl FromStr for IrohAddress {
type Err = Error;
fn from_str(s: &str) -> Result<Self, Self::Err> {
if let Some(b32) = s.strip_suffix(".p2p.start9.to") {
let (hrp, data) = bech32::decode(b32).with_kind(ErrorKind::ParseNetAddress)?;
ensure_code!(
hrp == HRP,
ErrorKind::ParseNetAddress,
"not an iroh address"
);
Ok(Self(
NodeId::from_bytes(&*<Box<[u8; 32]>>::try_from(data).map_err(|_| {
Error::new(eyre!("invalid length"), ErrorKind::ParseNetAddress)
})?)
.with_kind(ErrorKind::ParseNetAddress)?,
))
} else {
Err(Error::new(
eyre!("Invalid iroh address"),
ErrorKind::ParseNetAddress,
))
}
}
}
impl Serialize for IrohAddress {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serialize_display(self, serializer)
}
}
impl<'de> Deserialize<'de> for IrohAddress {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
deserialize_from_str(deserializer)
}
}
impl PartialEq for IrohAddress {
fn eq(&self, other: &Self) -> bool {
self.0.as_ref() == other.0.as_ref()
}
}
impl Eq for IrohAddress {}
impl PartialOrd for IrohAddress {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
self.0.as_ref().partial_cmp(other.0.as_ref())
}
}
impl Ord for IrohAddress {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
self.0.as_ref().cmp(other.0.as_ref())
}
}
#[derive(Clone, Debug)]
pub struct IrohSecretKey(pub SecretKey);
impl IrohSecretKey {
pub fn iroh_address(&self) -> IrohAddress {
IrohAddress(self.0.public())
}
pub fn generate() -> Self {
Self(SecretKey::generate(
&mut ssh_key::rand_core::OsRng::default(),
))
}
}
impl PemEncoding for IrohSecretKey {
fn from_pem<E: serde::de::Error>(pem: &str) -> Result<Self, E> {
ed25519_dalek::SigningKey::from_pem(pem)
.map(From::from)
.map(Self)
}
fn to_pem<E: serde::ser::Error>(&self) -> Result<String, E> {
self.0.secret().to_pem()
}
}
#[derive(Default, Debug, Deserialize, Serialize)]
pub struct IrohKeyStore(BTreeMap<IrohAddress, Pem<IrohSecretKey>>);
impl Map for IrohKeyStore {
type Key = IrohAddress;
type Value = Pem<IrohSecretKey>;
fn key_str(key: &Self::Key) -> Result<impl AsRef<str>, Error> {
Self::key_string(key)
}
fn key_string(key: &Self::Key) -> Result<imbl_value::InternedString, Error> {
Ok(InternedString::from_display(key))
}
}
impl IrohKeyStore {
pub fn new() -> Self {
Self::default()
}
pub fn insert(&mut self, key: IrohSecretKey) {
self.0.insert(key.iroh_address(), Pem::new(key));
}
}
impl Model<IrohKeyStore> {
pub fn new_key(&mut self) -> Result<IrohSecretKey, Error> {
let key = IrohSecretKey::generate();
self.insert(&key.iroh_address(), &Pem::new(key))?;
Ok(key)
}
pub fn insert_key(&mut self, key: &IrohSecretKey) -> Result<(), Error> {
self.insert(&key.iroh_address(), Pem::new_ref(key))
}
pub fn get_key(&self, address: &IrohAddress) -> Result<IrohSecretKey, Error> {
self.as_idx(address)
.or_not_found(lazy_format!("private key for {address}"))?
.de()
.map(|k| k.0)
}
}
pub fn iroh_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"list-services",
from_fn_async(list_services)
.with_display_serializable()
.with_custom_display_fn(|handle, result| display_services(handle.params, result))
.with_about("Display the status of running iroh services")
.with_call_remote::<CliContext>(),
)
.subcommand(
"key",
key::<C>().with_about("Manage the iroh service key store"),
)
}
pub fn key<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"generate",
from_fn_async(generate_key)
.with_about("Generate an iroh service key and add it to the key store")
.with_call_remote::<CliContext>(),
)
.subcommand(
"add",
from_fn_async(add_key)
.with_about("Add an iroh service key to the key store")
.with_call_remote::<CliContext>(),
)
.subcommand(
"list",
from_fn_async(list_keys)
.with_custom_display_fn(|_, res| {
for addr in res {
println!("{addr}");
}
Ok(())
})
.with_about("List iroh services with keys in the key store")
.with_call_remote::<CliContext>(),
)
}
pub async fn generate_key(ctx: RpcContext) -> Result<IrohAddress, Error> {
ctx.db
.mutate(|db| {
Ok(db
.as_private_mut()
.as_key_store_mut()
.as_iroh_mut()
.new_key()?
.iroh_address())
})
.await
.result
}
#[derive(Deserialize, Serialize, Parser)]
pub struct AddKeyParams {
pub key: Pem<IrohSecretKey>,
}
pub async fn add_key(
ctx: RpcContext,
AddKeyParams { key }: AddKeyParams,
) -> Result<IrohAddress, Error> {
ctx.db
.mutate(|db| {
db.as_private_mut()
.as_key_store_mut()
.as_iroh_mut()
.insert_key(&key.0)
})
.await
.result?;
Ok(key.iroh_address())
}
pub async fn list_keys(ctx: RpcContext) -> Result<BTreeSet<IrohAddress>, Error> {
ctx.db
.peek()
.await
.into_private()
.into_key_store()
.into_iroh()
.keys()
}
pub fn display_services(
params: WithIoFormat<Empty>,
services: BTreeMap<IrohAddress, IrohServiceInfo>,
) -> Result<(), Error> {
use prettytable::*;
if let Some(format) = params.format {
return display_serializable(format, services);
}
let mut table = Table::new();
table.add_row(row![bc => "ADDRESS", "BINDINGS"]);
for (service, info) in services {
let row = row![
&service.to_string(),
&info
.bindings
.into_iter()
.map(|((subdomain, port), addr)| lazy_format!("{subdomain}:{port} -> {addr}"))
.join("; ")
];
table.add_row(row);
}
table.print_tty(false)?;
Ok(())
}
#[derive(Debug, Serialize, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct IrohServiceInfo {
pub bindings: BTreeMap<(InternedString, u16), SocketAddr>,
}
pub async fn list_services(
ctx: RpcContext,
_: Empty,
) -> Result<BTreeMap<IrohAddress, IrohServiceInfo>, Error> {
ctx.net_controller.iroh.list_services().await
}
#[derive(Clone)]
pub struct IrohController(Arc<IrohControllerInner>);
struct IrohControllerInner {
// client: Endpoint,
services: SyncMutex<BTreeMap<IrohAddress, IrohService>>,
}
impl IrohController {
pub fn new() -> Result<Self, Error> {
Ok(Self(Arc::new(IrohControllerInner {
services: SyncMutex::new(BTreeMap::new()),
})))
}
pub fn service(&self, key: IrohSecretKey) -> Result<IrohService, Error> {
self.0.services.mutate(|s| {
use std::collections::btree_map::Entry;
let addr = key.iroh_address();
match s.entry(addr) {
Entry::Occupied(e) => Ok(e.get().clone()),
Entry::Vacant(e) => Ok(e
.insert(IrohService::launch(self.0.client.clone(), key)?)
.clone()),
}
})
}
pub async fn gc(&self, addr: Option<IrohAddress>) -> Result<(), Error> {
if let Some(addr) = addr {
if let Some(s) = self.0.services.mutate(|s| {
let rm = if let Some(s) = s.get(&addr) {
!s.gc()
} else {
false
};
if rm {
s.remove(&addr)
} else {
None
}
}) {
s.shutdown().await
} else {
Ok(())
}
} else {
for s in self.0.services.mutate(|s| {
let mut rm = Vec::new();
s.retain(|_, s| {
if s.gc() {
true
} else {
rm.push(s.clone());
false
}
});
rm
}) {
s.shutdown().await?;
}
Ok(())
}
}
pub async fn list_services(&self) -> Result<BTreeMap<IrohAddress, IrohServiceInfo>, Error> {
Ok(self
.0
.services
.peek(|s| s.iter().map(|(a, s)| (a.clone(), s.info())).collect()))
}
pub async fn connect_iroh(
&self,
addr: &IrohAddress,
port: u16,
) -> Result<Box<dyn ReadWriter + Unpin + Send + Sync + 'static>, Error> {
if let Some(target) = self.0.services.peek(|s| {
s.get(addr).and_then(|s| {
s.0.bindings.peek(|b| {
b.get(&port).and_then(|b| {
b.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(a, _)| *a)
})
})
})
}) {
Ok(Box::new(
TcpStream::connect(target)
.await
.with_kind(ErrorKind::Network)?,
))
} else {
todo!()
}
}
}
#[derive(Clone)]
pub struct IrohService(Arc<IrohServiceData>);
struct IrohServiceData {
service: Endpoint,
bindings: Arc<SyncRwLock<BTreeMap<(InternedString, u16), BTreeMap<SocketAddr, Weak<()>>>>>,
_thread: NonDetachingJoinHandle<()>,
}
impl IrohService {
fn launch(
mut client: Watch<(usize, IrohClient<TokioRustlsRuntime>)>,
key: IrohSecretKey,
) -> Result<Self, Error> {
let service = Arc::new(SyncMutex::new(None));
let bindings = Arc::new(SyncRwLock::new(BTreeMap::<
u16,
BTreeMap<SocketAddr, Weak<()>>,
>::new()));
Ok(Self(Arc::new(IrohServiceData {
service: service.clone(),
bindings: bindings.clone(),
_thread: tokio::spawn(async move {
let (bg, mut runner) = BackgroundJobQueue::new();
runner
.run_while(async {
loop {
if let Err(e) = async {
client.wait_for(|(_,c)| c.bootstrap_status().ready_for_traffic()).await;
let epoch = client.peek(|(e, c)| {
ensure_code!(c.bootstrap_status().ready_for_traffic(), ErrorKind::Iroh, "client recycled");
Ok::<_, Error>(*e)
})?;
let (new_service, stream) = client.peek(|(_, c)| {
c.launch_onion_service_with_hsid(
IrohServiceConfigBuilder::default()
.nickname(
key.iroh_address()
.to_string()
.trim_end_matches(".onion")
.parse::<HsNickname>()
.with_kind(ErrorKind::Iroh)?,
)
.build()
.with_kind(ErrorKind::Iroh)?,
key.clone().0,
)
.with_kind(ErrorKind::Iroh)
})?;
let mut status_stream = new_service.status_events();
bg.add_job(async move {
while let Some(status) = status_stream.next().await {
// TODO: health daemon?
}
});
service.replace(Some(new_service));
let mut stream = tor_hsservice::handle_rend_requests(stream);
while let Some(req) = tokio::select! {
req = stream.next() => req,
_ = client.wait_for(|(e, _)| *e != epoch) => None
} {
bg.add_job({
let bg = bg.clone();
let bindings = bindings.clone();
async move {
if let Err(e) = async {
let IncomingStreamRequest::Begin(begin) =
req.request()
else {
return req
.reject(tor_cell::relaycell::msg::End::new_with_reason(
tor_cell::relaycell::msg::EndReason::DONE,
))
.await
.with_kind(ErrorKind::Iroh);
};
let Some(target) = bindings.peek(|b| {
b.get(&begin.port()).and_then(|a| {
a.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| *addr)
})
}) else {
return req
.reject(tor_cell::relaycell::msg::End::new_with_reason(
tor_cell::relaycell::msg::EndReason::DONE,
))
.await
.with_kind(ErrorKind::Iroh);
};
bg.add_job(async move {
if let Err(e) = async {
let mut outgoing =
TcpStream::connect(target)
.await
.with_kind(ErrorKind::Network)?;
let mut incoming = req
.accept(Connected::new_empty())
.await
.with_kind(ErrorKind::Iroh)?;
if let Err(e) =
tokio::io::copy_bidirectional(
&mut outgoing,
&mut incoming,
)
.await
{
tracing::error!("Iroh Stream Error: {e}");
tracing::debug!("{e:?}");
}
Ok::<_, Error>(())
}
.await
{
tracing::trace!("Iroh Stream Error: {e}");
tracing::trace!("{e:?}");
}
});
Ok::<_, Error>(())
}
.await
{
tracing::trace!("Iroh Request Error: {e}");
tracing::trace!("{e:?}");
}
}
});
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("Iroh Client Error: {e}");
tracing::debug!("{e:?}");
}
}
})
.await
})
.into(),
})))
}
pub fn proxy_all<Rcs: FromIterator<Arc<()>>>(
&self,
bindings: impl IntoIterator<Item = (InternedString, u16, SocketAddr)>,
) -> Rcs {
self.0.bindings.mutate(|b| {
bindings
.into_iter()
.map(|(subdomain, port, target)| {
let entry = b
.entry((subdomain, port))
.or_default()
.entry(target)
.or_default();
if let Some(rc) = entry.upgrade() {
rc
} else {
let rc = Arc::new(());
*entry = Arc::downgrade(&rc);
rc
}
})
.collect()
})
}
pub fn gc(&self) -> bool {
self.0.bindings.mutate(|b| {
b.retain(|_, targets| {
targets.retain(|_, rc| rc.strong_count() > 0);
!targets.is_empty()
});
!b.is_empty()
})
}
pub async fn shutdown(self) -> Result<(), Error> {
self.0.service.replace(None);
self.0._thread.abort();
Ok(())
}
pub fn state(&self) -> IrohServiceState {
self.0
.service
.peek(|s| s.as_ref().map(|s| s.status().state().into()))
.unwrap_or(IrohServiceState::Bootstrapping)
}
pub fn info(&self) -> IrohServiceInfo {
IrohServiceInfo {
state: self.state(),
bindings: self.0.bindings.peek(|b| {
b.iter()
.filter_map(|(port, b)| {
b.iter()
.find(|(_, rc)| rc.strong_count() > 0)
.map(|(addr, _)| (*port, *addr))
})
.collect()
}),
}
}
}

View File

@@ -2,14 +2,17 @@ use serde::{Deserialize, Serialize};
use crate::account::AccountInfo;
use crate::net::acme::AcmeCertStore;
use crate::net::iroh::IrohKeyStore;
use crate::net::ssl::CertStore;
use crate::net::tor::OnionStore;
use crate::net::tor::OnionKeyStore;
use crate::prelude::*;
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]
pub struct KeyStore {
pub onion: OnionStore,
pub onion: OnionKeyStore,
#[serde(default)]
pub iroh: IrohKeyStore,
pub local_certs: CertStore,
#[serde(default)]
pub acme: AcmeCertStore,
@@ -17,7 +20,8 @@ pub struct KeyStore {
impl KeyStore {
pub fn new(account: &AccountInfo) -> Result<Self, Error> {
let mut res = Self {
onion: OnionStore::new(),
onion: OnionKeyStore::new(),
iroh: IrohKeyStore::new(),
local_certs: CertStore::new(account)?,
acme: AcmeCertStore::new(),
};

View File

@@ -3,34 +3,44 @@ use rpc_toolkit::{Context, HandlerExt, ParentHandler};
pub mod acme;
pub mod dns;
pub mod forward;
pub mod gateway;
pub mod host;
pub mod iroh;
pub mod keys;
pub mod mdns;
pub mod net_controller;
pub mod network_interface;
pub mod service_interface;
pub mod socks;
pub mod ssl;
pub mod static_server;
pub mod tor;
pub mod tunnel;
pub mod utils;
pub mod vhost;
pub mod web_server;
pub mod wifi;
pub fn net<C: Context>() -> ParentHandler<C> {
pub fn net_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"tor",
tor::tor::<C>().with_about("Tor commands such as list-services, logs, and reset"),
tor::tor_api::<C>().with_about("Tor commands such as list-services, logs, and reset"),
)
.subcommand(
"acme",
acme::acme::<C>().with_about("Setup automatic clearnet certificate acquisition"),
acme::acme_api::<C>().with_about("Setup automatic clearnet certificate acquisition"),
)
.subcommand(
"network-interface",
network_interface::network_interface_api::<C>()
.with_about("View and edit network interface configurations"),
"dns",
dns::dns_api::<C>().with_about("Manage and query DNS"),
)
.subcommand(
"gateway",
gateway::gateway_api::<C>().with_about("View and edit gateway configurations"),
)
.subcommand(
"tunnel",
tunnel::tunnel_api::<C>().with_about("Manage tunnels"),
)
.subcommand(
"vhost",

View File

@@ -9,20 +9,25 @@ use ipnet::IpNet;
use models::{HostId, OptionExt, PackageId};
use tokio::sync::Mutex;
use tokio::task::JoinHandle;
use torut::onion::{OnionAddressV3, TorSecretKeyV3};
use tracing::instrument;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::db::model::Database;
use crate::error::ErrorCollection;
use crate::hostname::Hostname;
use crate::net::dns::DnsController;
use crate::net::forward::LanPortForwardController;
use crate::net::forward::PortForwardController;
use crate::net::gateway::{
AndFilter, DynInterfaceFilter, IdFilter, InterfaceFilter, NetworkInterfaceController, OrFilter,
PublicFilter, SecureFilter,
};
use crate::net::host::address::HostAddress;
use crate::net::host::binding::{AddSslOptions, BindId, BindOptions};
use crate::net::host::{host_for, Host, Hosts};
use crate::net::network_interface::NetworkInterfaceController;
use crate::net::iroh::IrohController;
use crate::net::service_interface::{HostnameInfo, IpHostname, OnionHostname};
use crate::net::tor::TorController;
use crate::net::socks::SocksController;
use crate::net::tor::{OnionAddress, TorController, TorSecretKey};
use crate::net::utils::ipv6_is_local;
use crate::net::vhost::{AlpnInfo, TargetInfo, VHostController};
use crate::prelude::*;
@@ -33,10 +38,12 @@ use crate::HOST_IP;
pub struct NetController {
pub(crate) db: TypedPatchDb<Database>,
pub(super) tor: TorController,
pub(super) iroh: IrohController,
pub(super) vhost: VHostController,
pub(crate) net_iface: Arc<NetworkInterfaceController>,
pub(super) dns: DnsController,
pub(super) forward: LanPortForwardController,
pub(super) forward: PortForwardController,
pub(super) socks: SocksController,
pub(super) server_hostnames: Vec<Option<InternedString>>,
pub(crate) callbacks: Arc<ServiceCallbacks>,
}
@@ -44,18 +51,22 @@ pub struct NetController {
impl NetController {
pub async fn init(
db: TypedPatchDb<Database>,
tor_control: SocketAddr,
tor_socks: SocketAddr,
hostname: &Hostname,
socks_listen: SocketAddr,
) -> Result<Self, Error> {
let net_iface = Arc::new(NetworkInterfaceController::new(db.clone()));
let tor = TorController::new()?;
let iroh = IrohController::new()?;
let socks = SocksController::new(socks_listen, tor.clone())?;
Ok(Self {
db: db.clone(),
tor: TorController::new(tor_control, tor_socks),
vhost: VHostController::new(db, net_iface.clone()),
dns: DnsController::init(net_iface.lxcbr_status()).await?,
forward: LanPortForwardController::new(net_iface.subscribe()),
tor,
iroh,
vhost: VHostController::new(db.clone(), net_iface.clone()),
dns: DnsController::init(db, &net_iface.watcher).await?,
forward: PortForwardController::new(net_iface.watcher.subscribe()),
net_iface,
socks,
server_hostnames: vec![
// LAN IP
None,
@@ -78,7 +89,7 @@ impl NetController {
package: PackageId,
ip: Ipv4Addr,
) -> Result<NetService, Error> {
let dns = self.dns.add(Some(package.clone()), ip).await?;
let dns = self.dns.add_service(Some(package.clone()), ip)?;
let res = NetService::new(NetServiceData {
id: Some(package),
@@ -92,7 +103,7 @@ impl NetController {
}
pub async fn os_bindings(self: &Arc<Self>) -> Result<NetService, Error> {
let dns = self.dns.add(None, HOST_IP.into()).await?;
let dns = self.dns.add_service(None, HOST_IP.into())?;
let service = NetService::new(NetServiceData {
id: None,
@@ -126,9 +137,10 @@ impl NetController {
#[derive(Default, Debug)]
struct HostBinds {
forwards: BTreeMap<u16, (SocketAddr, bool, Arc<()>)>,
forwards: BTreeMap<u16, (SocketAddr, DynInterfaceFilter, Arc<()>)>,
vhosts: BTreeMap<(Option<InternedString>, u16), (TargetInfo, Arc<()>)>,
tor: BTreeMap<OnionAddressV3, (OrdMap<u16, SocketAddr>, Vec<Arc<()>>)>,
private_dns: BTreeMap<InternedString, Arc<()>>,
tor: BTreeMap<OnionAddress, (OrdMap<u16, SocketAddr>, Vec<Arc<()>>)>,
}
pub struct NetServiceData {
@@ -217,9 +229,10 @@ impl NetServiceData {
}
async fn update(&mut self, ctrl: &NetController, id: HostId, host: Host) -> Result<(), Error> {
let mut forwards: BTreeMap<u16, (SocketAddr, bool)> = BTreeMap::new();
let mut forwards: BTreeMap<u16, (SocketAddr, DynInterfaceFilter)> = BTreeMap::new();
let mut vhosts: BTreeMap<(Option<InternedString>, u16), TargetInfo> = BTreeMap::new();
let mut tor: BTreeMap<OnionAddressV3, (TorSecretKeyV3, OrdMap<u16, SocketAddr>)> =
let mut private_dns: BTreeSet<InternedString> = BTreeSet::new();
let mut tor: BTreeMap<OnionAddress, (TorSecretKey, OrdMap<u16, SocketAddr>)> =
BTreeMap::new();
let mut hostname_info: BTreeMap<u16, Vec<HostnameInfo>> = BTreeMap::new();
let binds = self.binds.entry(id.clone()).or_default();
@@ -228,7 +241,7 @@ impl NetServiceData {
// LAN
let server_info = peek.as_public().as_server_info();
let net_ifaces = ctrl.net_iface.ip_info();
let net_ifaces = ctrl.net_iface.watcher.ip_info();
let hostname = server_info.as_hostname().de()?;
for (port, bind) in &host.bindings {
if !bind.enabled {
@@ -255,7 +268,7 @@ impl NetServiceData {
vhosts.insert(
(hostname, external),
TargetInfo {
public: bind.net.public,
filter: bind.net.clone().into_dyn(),
acme: None,
addr,
connect_ssl: connect_ssl.clone(),
@@ -270,80 +283,135 @@ impl NetServiceData {
vhosts.insert(
(Some(hostname), external),
TargetInfo {
public: false,
filter: OrFilter(
IdFilter(
NetworkInterfaceInfo::loopback().0.clone(),
),
IdFilter(
NetworkInterfaceInfo::lxc_bridge().0.clone(),
),
)
.into_dyn(),
acme: None,
addr,
connect_ssl: connect_ssl.clone(),
},
);
); // TODO: wrap onion ssl stream directly in tor ctrl
}
}
HostAddress::Domain {
address,
public,
acme,
} => {
HostAddress::Domain { address, public } => {
if hostnames.insert(address.clone()) {
let address = Some(address.clone());
if ssl.preferred_external_port == 443 {
if public && bind.net.public {
if let Some(public) = &public {
vhosts.insert(
(address.clone(), 5443),
TargetInfo {
public: false,
acme: acme.clone(),
filter: AndFilter(
bind.net.clone(),
AndFilter(
IdFilter(public.gateway.clone()),
PublicFilter { public: false },
),
)
.into_dyn(),
acme: public.acme.clone(),
addr,
connect_ssl: connect_ssl.clone(),
},
);
vhosts.insert(
(address.clone(), 443),
TargetInfo {
filter: AndFilter(
bind.net.clone(),
OrFilter(
IdFilter(public.gateway.clone()),
PublicFilter { public: false },
),
)
.into_dyn(),
acme: public.acme.clone(),
addr,
connect_ssl: connect_ssl.clone(),
},
);
} else {
vhosts.insert(
(address.clone(), 443),
TargetInfo {
filter: AndFilter(
bind.net.clone(),
PublicFilter { public: false },
)
.into_dyn(),
acme: None,
addr,
connect_ssl: connect_ssl.clone(),
},
);
}
vhosts.insert(
(address.clone(), 443),
TargetInfo {
public: public && bind.net.public,
acme,
addr,
connect_ssl: connect_ssl.clone(),
},
);
} else {
vhosts.insert(
(address.clone(), external),
TargetInfo {
public: public && bind.net.public,
acme,
addr,
connect_ssl: connect_ssl.clone(),
},
);
if let Some(public) = public {
vhosts.insert(
(address.clone(), external),
TargetInfo {
filter: AndFilter(
bind.net.clone(),
IdFilter(public.gateway.clone()),
)
.into_dyn(),
acme: public.acme.clone(),
addr,
connect_ssl: connect_ssl.clone(),
},
);
} else {
vhosts.insert(
(address.clone(), external),
TargetInfo {
filter: bind.net.clone().into_dyn(),
acme: None,
addr,
connect_ssl: connect_ssl.clone(),
},
);
}
}
}
}
}
}
}
if let Some(security) = bind.options.secure {
if bind.options.add_ssl.is_some() && security.ssl {
// doesn't make sense to have 2 listening ports, both with ssl
} else {
let external = bind.net.assigned_port.or_not_found("assigned lan port")?;
forwards.insert(external, ((self.ip, *port).into(), bind.net.public));
}
if bind
.options
.secure
.map_or(true, |s| !(s.ssl && bind.options.add_ssl.is_some()))
{
let external = bind.net.assigned_port.or_not_found("assigned lan port")?;
forwards.insert(
external,
(
(self.ip, *port).into(),
AndFilter(
SecureFilter {
secure: bind.options.secure.is_some(),
},
bind.net.clone(),
)
.into_dyn(),
),
);
}
let mut bind_hostname_info: Vec<HostnameInfo> =
hostname_info.remove(port).unwrap_or_default();
for (interface, public, ip_info) in
net_ifaces.iter().filter_map(|(interface, info)| {
if let Some(ip_info) = &info.ip_info {
Some((interface, info.inbound(), ip_info))
} else {
None
}
})
for (interface, info) in net_ifaces
.iter()
.filter(|(id, info)| bind.net.filter(id, info))
{
if !public {
if !info.public() {
bind_hostname_info.push(HostnameInfo::Ip {
network_interface_id: interface.clone(),
gateway_id: interface.clone(),
public: false,
hostname: IpHostname::Local {
value: InternedString::from_display(&{
@@ -357,47 +425,42 @@ impl NetServiceData {
}
for address in host.addresses() {
if let HostAddress::Domain {
address,
public: domain_public,
..
address, public, ..
} = address
{
if !public || (domain_public && bind.net.public) {
if bind
.options
.add_ssl
.as_ref()
.map_or(false, |ssl| ssl.preferred_external_port == 443)
{
bind_hostname_info.push(HostnameInfo::Ip {
network_interface_id: interface.clone(),
public: public && domain_public && bind.net.public, // TODO: check if port forward is active
hostname: IpHostname::Domain {
domain: address.clone(),
subdomain: None,
port: None,
ssl_port: Some(443),
},
});
} else {
bind_hostname_info.push(HostnameInfo::Ip {
network_interface_id: interface.clone(),
public,
hostname: IpHostname::Domain {
domain: address.clone(),
subdomain: None,
port: bind.net.assigned_port,
ssl_port: bind.net.assigned_ssl_port,
},
});
}
if bind
.options
.add_ssl
.as_ref()
.map_or(false, |ssl| ssl.preferred_external_port == 443)
{
bind_hostname_info.push(HostnameInfo::Ip {
gateway_id: interface.clone(),
public: public.is_some(),
hostname: IpHostname::Domain {
value: address.clone(),
port: None,
ssl_port: Some(443),
},
});
} else {
bind_hostname_info.push(HostnameInfo::Ip {
gateway_id: interface.clone(),
public: public.is_some(),
hostname: IpHostname::Domain {
value: address.clone(),
port: bind.net.assigned_port,
ssl_port: bind.net.assigned_ssl_port,
},
});
}
}
}
if !public || bind.net.public {
if let Some(ip_info) = &info.ip_info {
let public = info.public();
if let Some(wan_ip) = ip_info.wan_ip.filter(|_| public) {
bind_hostname_info.push(HostnameInfo::Ip {
network_interface_id: interface.clone(),
gateway_id: interface.clone(),
public,
hostname: IpHostname::Ipv4 {
value: wan_ip,
@@ -411,7 +474,7 @@ impl NetServiceData {
IpNet::V4(net) => {
if !public {
bind_hostname_info.push(HostnameInfo::Ip {
network_interface_id: interface.clone(),
gateway_id: interface.clone(),
public,
hostname: IpHostname::Ipv4 {
value: net.addr(),
@@ -423,7 +486,7 @@ impl NetServiceData {
}
IpNet::V6(net) => {
bind_hostname_info.push(HostnameInfo::Ip {
network_interface_id: interface.clone(),
gateway_id: interface.clone(),
public: public && !ipv6_is_local(net.addr()),
hostname: IpHostname::Ipv6 {
value: net.addr(),
@@ -438,6 +501,7 @@ impl NetServiceData {
}
}
hostname_info.insert(*port, bind_hostname_info);
private_dns.append(&mut hostnames);
}
}
@@ -487,7 +551,7 @@ impl NetServiceData {
.as_key_store()
.as_onion()
.get_key(tor_addr)?;
tor.insert(key.public().get_onion_address(), (key, tor_binds.clone()));
tor.insert(key.onion_address(), (key, tor_binds.clone()));
for (internal, ports) in &tor_hostname_ports {
let mut bind_hostname_info = hostname_info.remove(internal).unwrap_or_default();
bind_hostname_info.push(HostnameInfo::Onion {
@@ -509,8 +573,8 @@ impl NetServiceData {
.collect::<BTreeSet<_>>();
for external in all {
let mut prev = binds.forwards.remove(&external);
if let Some((internal, public)) = forwards.remove(&external) {
prev = prev.filter(|(i, p, _)| i == &internal && *p == public);
if let Some((internal, filter)) = forwards.remove(&external) {
prev = prev.filter(|(i, f, _)| i == &internal && *f == filter);
binds.forwards.insert(
external,
if let Some(prev) = prev {
@@ -518,8 +582,8 @@ impl NetServiceData {
} else {
(
internal,
public,
ctrl.forward.add(external, public, internal).await?,
filter.clone(),
ctrl.forward.add(external, filter, internal).await?,
)
},
);
@@ -553,6 +617,22 @@ impl NetServiceData {
}
}
let mut rm = BTreeSet::new();
binds.private_dns.retain(|fqdn, _| {
if private_dns.remove(fqdn) {
true
} else {
rm.insert(fqdn.clone());
false
}
});
for fqdn in private_dns {
binds
.private_dns
.insert(fqdn.clone(), ctrl.dns.add_private_domain(fqdn)?);
}
ctrl.dns.gc_private_domains(&rm)?;
let all = binds
.tor
.keys()
@@ -568,17 +648,15 @@ impl NetServiceData {
if let Some(prev) = prev {
prev
} else {
let rcs = ctrl
.tor
.add(key, tor_binds.iter().map(|(k, v)| (*k, *v)).collect())
.await?;
let service = ctrl.tor.service(key)?;
let rcs = service.proxy_all(tor_binds.iter().map(|(k, v)| (*k, *v)));
(tor_binds, rcs)
},
);
} else {
if let Some((_, rc)) = prev {
drop(rc);
ctrl.tor.gc(Some(onion), None).await?;
ctrl.tor.gc(Some(onion)).await?;
}
}
}
@@ -662,7 +740,7 @@ impl NetService {
}
fn new(data: NetServiceData) -> Result<Self, Error> {
let mut ip_info = data.net_controller()?.net_iface.subscribe();
let mut ip_info = data.net_controller()?.net_iface.watcher.subscribe();
let data = Arc::new(Mutex::new(data));
let thread_data = data.clone();
let sync_task = tokio::spawn(async move {

View File

@@ -1,8 +1,7 @@
use std::net::{Ipv4Addr, Ipv6Addr};
use imbl_value::InternedString;
use lazy_format::lazy_format;
use models::{HostId, ServiceInterfaceId};
use models::{GatewayId, HostId, ServiceInterfaceId};
use serde::{Deserialize, Serialize};
use ts_rs::TS;
@@ -14,7 +13,7 @@ use ts_rs::TS;
pub enum HostnameInfo {
Ip {
#[ts(type = "string")]
network_interface_id: InternedString,
gateway_id: GatewayId,
public: bool,
hostname: IpHostname,
},
@@ -72,9 +71,7 @@ pub enum IpHostname {
},
Domain {
#[ts(type = "string")]
domain: InternedString,
#[ts(type = "string | null")]
subdomain: Option<InternedString>,
value: InternedString,
port: Option<u16>,
ssl_port: Option<u16>,
},
@@ -85,15 +82,7 @@ impl IpHostname {
Self::Ipv4 { value, .. } => InternedString::from_display(value),
Self::Ipv6 { value, .. } => InternedString::from_display(value),
Self::Local { value, .. } => value.clone(),
Self::Domain {
domain, subdomain, ..
} => {
if let Some(subdomain) = subdomain {
InternedString::from_display(&lazy_format!("{subdomain}.{domain}"))
} else {
domain.clone()
}
}
Self::Domain { value, .. } => value.clone(),
}
}
}

View File

@@ -0,0 +1,169 @@
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
use std::sync::Arc;
use helpers::NonDetachingJoinHandle;
use socks5_impl::protocol::{Address, Reply};
use socks5_impl::server::auth::NoAuth;
use socks5_impl::server::{AuthAdaptor, ClientConnection, Server};
use tokio::net::{TcpListener, TcpStream};
use crate::net::tor::TorController;
use crate::prelude::*;
use crate::util::actor::background::BackgroundJobQueue;
use crate::HOST_IP;
pub const DEFAULT_SOCKS_LISTEN: SocketAddr = SocketAddr::V4(SocketAddrV4::new(
Ipv4Addr::new(HOST_IP[0], HOST_IP[1], HOST_IP[2], HOST_IP[3]),
9050,
));
pub struct SocksController {
_thread: NonDetachingJoinHandle<()>,
}
impl SocksController {
pub fn new(listen: SocketAddr, tor: TorController) -> Result<Self, Error> {
let auth: AuthAdaptor<()> = Arc::new(NoAuth);
let listener = TcpListener::from_std(
mio::net::TcpListener::bind(listen)
.with_kind(ErrorKind::Network)?
.into(),
)
.with_kind(ErrorKind::Network)?;
Ok(Self {
_thread: tokio::spawn(async move {
let (bg, mut runner) = BackgroundJobQueue::new();
runner
.run_while(async {
let server = Server::new(listener, auth);
loop {
match server.accept().await {
Ok((stream, _)) => {
let tor = tor.clone();
bg.add_job(async move {
if let Err(e) = async {
match stream
.authenticate()
.await
.with_kind(ErrorKind::Network)?
.0
.wait_request()
.await
.with_kind(ErrorKind::Network)?
{
ClientConnection::Connect(
reply,
Address::DomainAddress(domain, port),
) if domain.ends_with(".onion") => {
if let Ok(mut target) = tor
.connect_onion(&domain.parse()?, port)
.await
{
let mut sock = reply
.reply(
Reply::Succeeded,
Address::unspecified(),
)
.await
.with_kind(ErrorKind::Network)?;
tokio::io::copy_bidirectional(
&mut sock,
&mut target,
)
.await
.with_kind(ErrorKind::Network)?;
} else {
let mut sock = reply
.reply(
Reply::HostUnreachable,
Address::unspecified(),
)
.await
.with_kind(ErrorKind::Network)?;
sock.shutdown()
.await
.with_kind(ErrorKind::Network)?;
}
}
ClientConnection::Connect(reply, addr) => {
if let Ok(mut target) = match addr {
Address::DomainAddress(domain, port) => {
TcpStream::connect((domain, port)).await
}
Address::SocketAddress(addr) => {
TcpStream::connect(addr).await
}
} {
let mut sock = reply
.reply(
Reply::Succeeded,
Address::unspecified(),
)
.await
.with_kind(ErrorKind::Network)?;
tokio::io::copy_bidirectional(
&mut sock,
&mut target,
)
.await
.with_kind(ErrorKind::Network)?;
} else {
let mut sock = reply
.reply(
Reply::HostUnreachable,
Address::unspecified(),
)
.await
.with_kind(ErrorKind::Network)?;
sock.shutdown()
.await
.with_kind(ErrorKind::Network)?;
}
}
ClientConnection::Bind(bind, _) => {
let mut sock = bind
.reply(
Reply::CommandNotSupported,
Address::unspecified(),
)
.await
.with_kind(ErrorKind::Network)?;
sock.shutdown()
.await
.with_kind(ErrorKind::Network)?;
}
ClientConnection::UdpAssociate(associate, _) => {
let mut sock = associate
.reply(
Reply::CommandNotSupported,
Address::unspecified(),
)
.await
.with_kind(ErrorKind::Network)?;
sock.shutdown()
.await
.with_kind(ErrorKind::Network)?;
}
}
Ok::<_, Error>(())
}
.await
{
tracing::error!("SOCKS5 Stream Error: {e}");
tracing::debug!("{e:?}");
}
});
}
Err(e) => {
tracing::error!("SOCKS5 TCP Accept Error: {e}");
tracing::debug!("{e:?}");
}
}
}
})
.await;
})
.into(),
})
}
}

View File

@@ -1,4 +1,4 @@
use std::cmp::{min, Ordering};
use std::cmp::{Ordering, min};
use std::collections::{BTreeMap, BTreeSet};
use std::net::IpAddr;
use std::path::Path;
@@ -13,18 +13,18 @@ use openssl::ec::{EcGroup, EcKey};
use openssl::hash::MessageDigest;
use openssl::nid::Nid;
use openssl::pkey::{PKey, Private};
use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
use openssl::x509::{X509, X509Builder, X509Extension, X509NameBuilder};
use openssl::*;
use patch_db::HasModel;
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::SOURCE_DATE;
use crate::account::AccountInfo;
use crate::hostname::Hostname;
use crate::init::check_time_is_synchronized;
use crate::prelude::*;
use crate::util::serde::Pem;
use crate::SOURCE_DATE;
#[derive(Debug, Deserialize, Serialize, HasModel)]
#[model = "Model<Self>"]

View File

@@ -6,11 +6,11 @@ use std::sync::Arc;
use std::time::UNIX_EPOCH;
use async_compression::tokio::bufread::GzipEncoder;
use axum::Router;
use axum::body::Body;
use axum::extract::{self as x, Request};
use axum::response::{Redirect, Response};
use axum::routing::{any, get};
use axum::Router;
use base64::display::Base64Display;
use digest::Digest;
use futures::future::ready;
@@ -37,12 +37,12 @@ use crate::middleware::auth::{Auth, HasValidSession};
use crate::middleware::cors::Cors;
use crate::middleware::db::SyncDb;
use crate::prelude::*;
use crate::registry::signer::commitment::merkle_archive::MerkleArchiveCommitment;
use crate::rpc_continuations::{Guid, RpcContinuations};
use crate::s9pk::S9pk;
use crate::s9pk::merkle_archive::source::FileSource;
use crate::s9pk::merkle_archive::source::http::HttpSource;
use crate::s9pk::merkle_archive::source::multi_cursor_file::MultiCursorFile;
use crate::s9pk::merkle_archive::source::FileSource;
use crate::s9pk::S9pk;
use crate::sign::commitment::merkle_archive::MerkleArchiveCommitment;
use crate::util::io::open_file;
use crate::util::net::SyncBody;
use crate::util::serde::BASE64;
@@ -55,10 +55,10 @@ const INTERNAL_SERVER_ERROR: &[u8] = b"Internal Server Error";
const PROXY_STRIP_HEADERS: &[&str] = &["cookie", "host", "origin", "referer", "user-agent"];
#[cfg(all(feature = "daemon", not(feature = "test")))]
#[cfg(all(feature = "startd", not(feature = "test")))]
const EMBEDDED_UIS: Dir<'_> =
include_dir::include_dir!("$CARGO_MANIFEST_DIR/../../web/dist/static");
#[cfg(not(all(feature = "daemon", not(feature = "test"))))]
#[cfg(not(all(feature = "startd", not(feature = "test"))))]
const EMBEDDED_UIS: Dir<'_> = Dir::new("", &[]);
#[derive(Clone)]

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,125 @@
use clap::Parser;
use imbl_value::InternedString;
use models::GatewayId;
use rpc_toolkit::{Context, HandlerExt, ParentHandler, from_fn_async};
use serde::{Deserialize, Serialize};
use tokio::process::Command;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::public::NetworkInterfaceType;
use crate::prelude::*;
use crate::util::Invoke;
use crate::util::io::{TmpDir, write_file_atomic};
pub fn tunnel_api<C: Context>() -> ParentHandler<C> {
ParentHandler::new()
.subcommand(
"add",
from_fn_async(add_tunnel)
.with_about("Add a new tunnel")
.with_call_remote::<CliContext>(),
)
.subcommand(
"remove",
from_fn_async(remove_tunnel)
.no_display()
.with_about("Remove a tunnel")
.with_call_remote::<CliContext>(),
)
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
#[ts(export)]
pub struct AddTunnelParams {
#[ts(type = "string")]
name: InternedString,
config: String,
public: bool,
}
pub async fn add_tunnel(
ctx: RpcContext,
AddTunnelParams {
name,
config,
public,
}: AddTunnelParams,
) -> Result<GatewayId, Error> {
let existing = ctx
.db
.peek()
.await
.into_public()
.into_server_info()
.into_network()
.into_gateways()
.keys()?;
let mut iface = GatewayId::from("wg0");
for id in 1.. {
if !existing.contains(&iface) {
break;
}
iface = InternedString::from_display(&lazy_format!("wg{id}")).into();
}
let tmpdir = TmpDir::new().await?;
let conf = tmpdir.join(&iface).with_extension("conf");
write_file_atomic(&conf, &config).await?;
let mut ifaces = ctx.net_controller.net_iface.watcher.subscribe();
Command::new("nmcli")
.arg("connection")
.arg("import")
.arg("type")
.arg("wireguard")
.arg("file")
.arg(&conf)
.invoke(ErrorKind::Network)
.await?;
tmpdir.delete().await?;
ifaces.wait_for(|ifaces| ifaces.contains_key(&iface)).await;
ctx.net_controller
.net_iface
.set_public(&iface, Some(public))
.await?;
ctx.net_controller.net_iface.set_name(&iface, &name).await?;
Ok(iface)
}
#[derive(Debug, Clone, Deserialize, Serialize, Parser, TS)]
#[ts(export)]
pub struct RemoveTunnelParams {
id: GatewayId,
}
pub async fn remove_tunnel(
ctx: RpcContext,
RemoveTunnelParams { id }: RemoveTunnelParams,
) -> Result<(), Error> {
let Some(existing) = ctx
.db
.peek()
.await
.into_public()
.into_server_info()
.into_network()
.into_gateways()
.into_idx(&id)
.and_then(|e| e.into_ip_info().transpose())
else {
return Ok(());
};
if existing.as_device_type().de()? != Some(NetworkInterfaceType::Wireguard) {
return Err(Error::new(
eyre!("network interface {id} is not a proxy"),
ErrorKind::InvalidRequest,
));
}
ctx.net_controller.net_iface.delete_iface(&id).await?;
Ok(())
}

View File

@@ -2,7 +2,7 @@ use std::collections::{BTreeMap, BTreeSet};
use std::net::{IpAddr, SocketAddr};
use std::sync::{Arc, Weak};
use async_acme::acme::{Identifier, ACME_TLS_ALPN_NAME};
use async_acme::acme::{ACME_TLS_ALPN_NAME, Identifier};
use axum::body::Body;
use axum::extract::Request;
use axum::response::Response;
@@ -10,9 +10,11 @@ use color_eyre::eyre::eyre;
use futures::FutureExt;
use helpers::NonDetachingJoinHandle;
use http::Uri;
use imbl::OrdMap;
use imbl_value::InternedString;
use models::ResultExt;
use rpc_toolkit::{from_fn, Context, HandlerArgs, HandlerExt, ParentHandler};
use itertools::Itertools;
use models::{GatewayId, ResultExt};
use rpc_toolkit::{Context, HandlerArgs, HandlerExt, ParentHandler, from_fn};
use serde::{Deserialize, Serialize};
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
@@ -25,21 +27,24 @@ use tokio_rustls::rustls::server::{Acceptor, ResolvesServerCert};
use tokio_rustls::rustls::sign::CertifiedKey;
use tokio_rustls::rustls::{RootCertStore, ServerConfig};
use tokio_rustls::{LazyConfigAcceptor, TlsConnector};
use tokio_stream::wrappers::WatchStream;
use tokio_stream::StreamExt;
use tokio_stream::wrappers::WatchStream;
use tracing::instrument;
use ts_rs::TS;
use crate::context::{CliContext, RpcContext};
use crate::db::model::Database;
use crate::db::model::public::NetworkInterfaceInfo;
use crate::net::acme::{AcmeCertCache, AcmeProvider};
use crate::net::network_interface::{
Accepted, NetworkInterfaceController, NetworkInterfaceListener,
use crate::net::gateway::{
Accepted, AnyFilter, DynInterfaceFilter, InterfaceFilter, NetworkInterfaceController,
NetworkInterfaceListener,
};
use crate::net::static_server::server_error;
use crate::prelude::*;
use crate::util::collections::EqSet;
use crate::util::io::BackTrackingIO;
use crate::util::serde::{display_serializable, HandlerExtSerde, MaybeUtf8String};
use crate::util::serde::{HandlerExtSerde, MaybeUtf8String, display_serializable};
use crate::util::sync::SyncMutex;
pub fn vhost_api<C: Context>() -> ParentHandler<C> {
@@ -51,12 +56,13 @@ pub fn vhost_api<C: Context>() -> ParentHandler<C> {
use prettytable::*;
if let Some(format) = params.format {
display_serializable(format, res);
display_serializable(format, res)?;
return Ok::<_, Error>(());
}
let mut table = Table::new();
table.add_row(row![bc => "FROM", "TO", "PUBLIC", "ACME", "CONNECT SSL", "ACTIVE"]);
table
.add_row(row![bc => "FROM", "TO", "GATEWAYS", "ACME", "CONNECT SSL", "ACTIVE"]);
for (external, targets) in res {
for (host, targets) in targets {
@@ -68,7 +74,7 @@ pub fn vhost_api<C: Context>() -> ParentHandler<C> {
external.0
),
target.addr,
target.public,
target.gateways.iter().join(", "),
target.acme.as_ref().map(|a| a.0.as_str()).unwrap_or("NONE"),
target.connect_ssl.is_ok(),
idx == 0
@@ -117,12 +123,7 @@ impl VHostController {
&self,
hostname: Option<InternedString>,
external: u16,
TargetInfo {
public,
acme,
addr,
connect_ssl,
}: TargetInfo,
target: TargetInfo,
) -> Result<Arc<()>, Error> {
self.servers.mutate(|writable| {
let server = if let Some(server) = writable.remove(&external) {
@@ -136,15 +137,7 @@ impl VHostController {
self.acme_tls_alpn_cache.clone(),
)?
};
let rc = server.add(
hostname,
TargetInfo {
public,
acme,
addr,
connect_ssl,
},
);
let rc = server.add(hostname, target);
writable.insert(external, server);
Ok(rc?)
})
@@ -152,8 +145,9 @@ impl VHostController {
pub fn dump_table(
&self,
) -> BTreeMap<JsonKey<u16>, BTreeMap<JsonKey<Option<InternedString>>, BTreeSet<TargetInfo>>>
) -> BTreeMap<JsonKey<u16>, BTreeMap<JsonKey<Option<InternedString>>, EqSet<ShowTargetInfo>>>
{
let ip_info = self.interfaces.watcher.ip_info();
self.servers.peek(|s| {
s.iter()
.map(|(k, v)| {
@@ -167,8 +161,7 @@ impl VHostController {
JsonKey::new(k.clone()),
v.iter()
.filter(|(_, v)| v.strong_count() > 0)
.map(|(k, _)| k)
.cloned()
.map(|(k, _)| ShowTargetInfo::new(k.clone(), &ip_info))
.collect(),
)
})
@@ -192,14 +185,45 @@ impl VHostController {
}
}
#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord)]
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub struct TargetInfo {
pub public: bool,
pub filter: DynInterfaceFilter,
pub acme: Option<AcmeProvider>,
pub addr: SocketAddr,
pub connect_ssl: Result<(), AlpnInfo>, // Ok: yes, connect using ssl, pass through alpn; Err: connect tcp, use provided strategy for alpn
}
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Ord)]
pub struct ShowTargetInfo {
pub gateways: BTreeSet<GatewayId>,
pub acme: Option<AcmeProvider>,
pub addr: SocketAddr,
pub connect_ssl: Result<(), AlpnInfo>, // Ok: yes, connect using ssl, pass through alpn; Err: connect tcp, use provided strategy for alpn
}
impl ShowTargetInfo {
pub fn new(
TargetInfo {
filter,
acme,
addr,
connect_ssl,
}: TargetInfo,
ip_info: &OrdMap<GatewayId, NetworkInterfaceInfo>,
) -> Self {
ShowTargetInfo {
gateways: ip_info
.iter()
.filter(|(id, info)| filter.filter(*id, *info))
.map(|(k, _)| k)
.cloned()
.collect(),
acme,
addr,
connect_ssl,
}
}
}
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, TS)]
#[serde(rename_all = "camelCase")]
#[ts(export)]
@@ -222,6 +246,21 @@ struct VHostServer {
_thread: NonDetachingJoinHandle<()>,
}
impl<'a> From<&'a BTreeMap<Option<InternedString>, BTreeMap<TargetInfo, Weak<()>>>> for AnyFilter {
fn from(value: &'a BTreeMap<Option<InternedString>, BTreeMap<TargetInfo, Weak<()>>>) -> Self {
Self(
value
.iter()
.flat_map(|(_, v)| {
v.iter()
.filter(|(_, r)| r.strong_count() > 0)
.map(|(t, _)| t.filter.clone())
})
.collect(),
)
}
}
impl VHostServer {
async fn accept(
listener: &mut NetworkInterfaceListener,
@@ -233,35 +272,35 @@ impl VHostServer {
let accepted;
loop {
let any_public = mapping
.borrow()
.iter()
.any(|(_, targets)| targets.iter().any(|(target, _)| target.public));
let any_filter = AnyFilter::from(&*mapping.borrow());
let changed_public = mapping
.wait_for(|m| {
m.iter()
.any(|(_, targets)| targets.iter().any(|(target, _)| target.public))
!= any_public
})
let changed_filter = mapping
.wait_for(|m| any_filter != AnyFilter::from(m))
.boxed();
tokio::select! {
a = listener.accept(any_public) => {
a = listener.accept(&any_filter) => {
accepted = a?;
break;
}
_ = changed_public => {
tracing::debug!("port {} {} public bindings", listener.port(), if any_public { "no longer has" } else { "now has" });
_ = changed_filter => {
tracing::debug!("port {} filter changed", listener.port());
}
}
}
let check = listener.check_filter();
tokio::spawn(async move {
let bind = accepted.bind;
if let Err(e) =
Self::handle_stream(accepted, mapping, db, acme_tls_alpn_cache, crypto_provider)
.await
if let Err(e) = Self::handle_stream(
accepted,
check,
mapping,
db,
acme_tls_alpn_cache,
crypto_provider,
)
.await
{
tracing::error!("Error in VHostController on {bind}: {e}");
tracing::debug!("{e:?}")
@@ -273,11 +312,11 @@ impl VHostServer {
async fn handle_stream(
Accepted {
stream,
is_public,
wan_ip,
bind,
..
}: Accepted,
check_filter: impl FnOnce(SocketAddr, &DynInterfaceFilter) -> bool,
mapping: watch::Receiver<Mapping>,
db: TypedPatchDb<Database>,
acme_tls_alpn_cache: AcmeTlsAlpnCache,
@@ -431,10 +470,8 @@ impl VHostServer {
.map(|(target, _)| target.clone())
};
if let Some(target) = target {
if is_public && !target.public {
log::warn!(
"Rejecting connection from public interface to private bind: {bind} -> {target:?}"
);
if !check_filter(bind, &target.filter) {
log::warn!("Connection from {bind} to {target:?} rejected by filter");
return Ok(());
}
let peek = db.peek().await;
@@ -660,7 +697,10 @@ impl VHostServer {
crypto_provider: Arc<CryptoProvider>,
acme_tls_alpn_cache: AcmeTlsAlpnCache,
) -> Result<Self, Error> {
let mut listener = iface_ctrl.bind(port).with_kind(crate::ErrorKind::Network)?;
let mut listener = iface_ctrl
.watcher
.bind(port)
.with_kind(crate::ErrorKind::Network)?;
let (map_send, map_recv) = watch::channel(BTreeMap::new());
Ok(Self {
mapping: map_send,

Some files were not shown because too many files have changed in this diff Show More