mirror of
https://github.com/Start9Labs/start-os.git
synced 2026-03-26 02:11:53 +00:00
Merge branch 'next/minor' of github.com:Start9Labs/start-os into next/major
This commit is contained in:
40
CLEARNET.md
40
CLEARNET.md
@@ -1,40 +0,0 @@
|
||||
# Setting up clearnet for a service interface
|
||||
|
||||
NOTE: this guide is for HTTPS only! Other configurations may require a more bespoke setup depending on the service. Please consult the service documentation or the Start9 Community for help with non-HTTPS applications
|
||||
|
||||
## Initialize ACME certificate generation
|
||||
|
||||
The following command will register your device with an ACME certificate provider, such as letsencrypt
|
||||
|
||||
This only needs to be done once.
|
||||
|
||||
```
|
||||
start-cli net acme init --provider=letsencrypt --contact="mailto:me@drbonez.dev"
|
||||
```
|
||||
|
||||
- `provider` can be `letsencrypt`, `letsencrypt-staging` (useful if you're doing a lot of testing and want to avoid being rate limited), or the url of any provider that supports the [RFC8555](https://datatracker.ietf.org/doc/html/rfc8555) ACME api
|
||||
- `contact` can be any valid contact url, typically `mailto:` urls. it can be specified multiple times to set multiple contacts
|
||||
|
||||
## Whitelist a domain for ACME certificate acquisition
|
||||
|
||||
The following command will tell the OS to use ACME certificates instead of system signed ones for the provided url. In this example, `testing.drbonez.dev`
|
||||
|
||||
This must be done for every domain you wish to host on clearnet.
|
||||
|
||||
```
|
||||
start-cli net acme domain add "testing.drbonez.dev"
|
||||
```
|
||||
|
||||
## Forward clearnet port
|
||||
|
||||
Go into your router settings, and map port 443 on your router to port 5443 on your start-os device. This one port should cover most use cases
|
||||
|
||||
## Add domain to service host
|
||||
|
||||
The following command will tell the OS to route https requests from the WAN to the provided hostname to the specified service. In this example, we are adding `testing.drbonez.dev` to the host `ui-multi` on the package `hello-world`. To see a list of available host IDs for a given package, run `start-cli package host <PACKAGE> list`
|
||||
|
||||
This must be done for every domain you wish to host on clearnet.
|
||||
|
||||
```
|
||||
start-cli package host hello-world address ui-multi add testing.drbonez.dev
|
||||
```
|
||||
5
Makefile
5
Makefile
@@ -26,6 +26,7 @@ GZIP_BIN := $(shell which pigz || which gzip)
|
||||
TAR_BIN := $(shell which gtar || which tar)
|
||||
COMPILED_TARGETS := core/target/$(ARCH)-unknown-linux-musl/release/startbox core/target/$(ARCH)-unknown-linux-musl/release/containerbox system-images/compat/docker-images/$(ARCH).tar system-images/utils/docker-images/$(ARCH).tar system-images/binfmt/docker-images/$(ARCH).tar container-runtime/rootfs.$(ARCH).squashfs
|
||||
ALL_TARGETS := $(STARTD_SRC) $(ENVIRONMENT_FILE) $(GIT_HASH_FILE) $(VERSION_FILE) $(COMPILED_TARGETS) cargo-deps/$(ARCH)-unknown-linux-musl/release/startos-backup-fs $(shell if [ "$(PLATFORM)" = "raspberrypi" ]; then echo cargo-deps/aarch64-unknown-linux-musl/release/pi-beep; fi) $(shell /bin/bash -c 'if [[ "${ENVIRONMENT}" =~ (^|-)unstable($$|-) ]]; then echo cargo-deps/$(ARCH)-unknown-linux-musl/release/tokio-console; fi') $(PLATFORM_FILE)
|
||||
REBUILD_TYPES = 1
|
||||
|
||||
ifeq ($(REMOTE),)
|
||||
mkdir = mkdir -p $1
|
||||
@@ -80,6 +81,8 @@ clean:
|
||||
rm -rf container-runtime/dist
|
||||
rm -rf container-runtime/node_modules
|
||||
rm -f container-runtime/*.squashfs
|
||||
if [ -d container-runtime/tmp/combined ] && mountpoint container-runtime/tmp/combined; then sudo umount container-runtime/tmp/combined; fi
|
||||
if [ -d container-runtime/tmp/lower ] && mountpoint container-runtime/tmp/lower; then sudo umount container-runtime/tmp/lower; fi
|
||||
rm -rf container-runtime/tmp
|
||||
(cd sdk && make clean)
|
||||
rm -f ENVIRONMENT.txt
|
||||
@@ -226,7 +229,7 @@ container-runtime/node_modules/.package-lock.json: container-runtime/package.jso
|
||||
npm --prefix container-runtime ci
|
||||
touch container-runtime/node_modules/.package-lock.json
|
||||
|
||||
sdk/base/lib/osBindings/index.ts: core/startos/bindings/index.ts
|
||||
sdk/base/lib/osBindings/index.ts: $(shell if [ "$(REBUILD_TYPES)" -ne 0 ]; then echo core/startos/bindings/index.ts; fi)
|
||||
mkdir -p sdk/base/lib/osBindings
|
||||
rsync -ac --delete core/startos/bindings/ sdk/base/lib/osBindings/
|
||||
touch sdk/base/lib/osBindings/index.ts
|
||||
|
||||
@@ -11,6 +11,7 @@ cryptsetup
|
||||
curl
|
||||
dnsutils
|
||||
dmidecode
|
||||
dnsutils
|
||||
dosfstools
|
||||
e2fsprogs
|
||||
ecryptfs-utils
|
||||
@@ -57,4 +58,5 @@ systemd-timesyncd
|
||||
tor
|
||||
util-linux
|
||||
vim
|
||||
wireguard-tools
|
||||
wireless-tools
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
start-cli net dhcp update $interface
|
||||
@@ -4,7 +4,7 @@ set -e
|
||||
|
||||
# install dependencies
|
||||
/usr/bin/apt update
|
||||
/usr/bin/apt install --no-install-recommends -y xserver-xorg x11-xserver-utils xinit firefox-esr matchbox-window-manager libnss3-tools
|
||||
/usr/bin/apt install --no-install-recommends -y xserver-xorg x11-xserver-utils xinit firefox-esr matchbox-window-manager libnss3-tools p11-kit-modules
|
||||
|
||||
#Change a default preference set by stock debian firefox-esr
|
||||
sed -i 's|^pref("extensions.update.enabled", true);$|pref("extensions.update.enabled", false);|' /etc/firefox-esr/firefox-esr.js
|
||||
@@ -83,6 +83,8 @@ user_pref("toolkit.telemetry.updatePing.enabled", false);
|
||||
user_pref("toolkit.telemetry.cachedClientID", "");
|
||||
EOF
|
||||
|
||||
ln -sf /usr/lib/$(uname -m)-linux-gnu/pkcs11/p11-kit-trust.so /usr/lib/firefox-esr/libnssckbi.so
|
||||
|
||||
# create kiosk script
|
||||
cat > /home/kiosk/kiosk.sh << 'EOF'
|
||||
#!/bin/sh
|
||||
@@ -99,7 +101,9 @@ done
|
||||
killall firefox-esr
|
||||
) &
|
||||
matchbox-window-manager -use_titlebar no &
|
||||
firefox-esr http://localhost --profile /home/kiosk/fx-profile
|
||||
cp -r /home/kiosk/fx-profile /home/kiosk/fx-profile-tmp
|
||||
firefox-esr http://localhost --profile /home/kiosk/fx-profile-tmp
|
||||
rm -rf /home/kiosk/fx-profile-tmp
|
||||
EOF
|
||||
chmod +x /home/kiosk/kiosk.sh
|
||||
|
||||
|
||||
367
build/lib/scripts/wg-vps-setup
Executable file
367
build/lib/scripts/wg-vps-setup
Executable file
@@ -0,0 +1,367 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Colors for better output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[1;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
NC='\033[0;37m' # No Color
|
||||
|
||||
# --- Constants ---
|
||||
readonly WIREGUARD_INSTALL_URL="https://raw.githubusercontent.com/start9labs/wg-vps-setup/master/wireguard-install.sh"
|
||||
readonly SSH_KEY_DIR="/home/start9/.ssh"
|
||||
readonly SSH_KEY_NAME="id_ed25519"
|
||||
readonly SSH_PRIVATE_KEY="$SSH_KEY_DIR/$SSH_KEY_NAME"
|
||||
readonly SSH_PUBLIC_KEY="$SSH_PRIVATE_KEY.pub"
|
||||
|
||||
# Store original arguments
|
||||
SCRIPT_ARGS=("$@")
|
||||
|
||||
# --- Functions ---
|
||||
|
||||
# Function to ensure script runs with root privileges by auto-elevating if needed
|
||||
check_root() {
|
||||
if [[ "$EUID" -ne 0 ]]; then
|
||||
exec sudo "$0" "${SCRIPT_ARGS[@]}"
|
||||
fi
|
||||
sudo chown -R start9:startos "$SSH_KEY_DIR"
|
||||
}
|
||||
|
||||
# Function to print banner
|
||||
print_banner() {
|
||||
echo -e "${BLUE}"
|
||||
echo "================================================"
|
||||
echo -e " ${NC}StartOS WireGuard VPS Setup Tool${BLUE} "
|
||||
echo "================================================"
|
||||
echo -e "${NC}"
|
||||
}
|
||||
|
||||
# Function to print usage
|
||||
print_usage() {
|
||||
echo -e "Usage: $0 [-h] [-i IP] [-u USERNAME] [-p PORT] [-k SSH_KEY]"
|
||||
echo "Options:"
|
||||
echo " -h Show this help message"
|
||||
echo " -i VPS IP address"
|
||||
echo " -u SSH username (default: root)"
|
||||
echo " -p SSH port (default: 22)"
|
||||
echo " -k Path to the custom SSH private key (optional)"
|
||||
echo " If no key is provided, the default key '$SSH_PRIVATE_KEY' will be used."
|
||||
}
|
||||
|
||||
# Function to display end message
|
||||
display_end_message() {
|
||||
echo -e "\n${BLUE}------------------------------------------------------------------${NC}"
|
||||
echo -e "${NC}WireGuard server setup complete!"
|
||||
echo -e "${BLUE}------------------------------------------------------------------${NC}"
|
||||
echo -e "\n${YELLOW}To expose your services to the Clearnet, use the following commands on your StartOS system (replace placeholders):${NC}"
|
||||
echo -e "\n ${YELLOW}1. Initialize ACME (This only needs to be done once):${NC}"
|
||||
echo " start-cli net acme init --provider=letsencrypt --contact=mailto:your-email@example.com"
|
||||
echo -e "\n ${YELLOW}2. Expose 'hello-world' on port 80 through VPS:${NC}"
|
||||
echo " start-cli package host hello-world binding ui-multi set-public 80"
|
||||
echo -e "\n ${YELLOW}3. Add a domain to your 'hello-world' service:${NC}"
|
||||
echo " start-cli package host hello-world address ui-multi domain add your-domain.example.com --acme=letsencrypt"
|
||||
echo -e "\n ${YELLOW}Replace '${NC}your-email@example.com${YELLOW}' with your actual email address, '${NC}your-domain.example.com${YELLOW}' with your actual domain and '${NC}hello-world${YELLOW}' with your actual service id.${NC}"
|
||||
echo -e "${BLUE}------------------------------------------------------------------${NC}"
|
||||
}
|
||||
|
||||
# Function to validate IP address
|
||||
validate_ip() {
|
||||
local ip=$1
|
||||
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function for configuring SSH key authentication on remote server
|
||||
configure_ssh_key_auth() {
|
||||
echo -e "${BLUE}Configuring SSH key authentication on remote server...${NC}"
|
||||
|
||||
ssh -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" '
|
||||
# Check if PubkeyAuthentication is commented out
|
||||
if grep -q "^#PubkeyAuthentication" /etc/ssh/sshd_config; then
|
||||
sed -i "s/^#PubkeyAuthentication.*/PubkeyAuthentication yes/" /etc/ssh/sshd_config
|
||||
# Check if PubkeyAuthentication exists but is not enabled
|
||||
elif grep -q "^PubkeyAuthentication" /etc/ssh/sshd_config; then
|
||||
sed -i "s/^PubkeyAuthentication.*/PubkeyAuthentication yes/" /etc/ssh/sshd_config
|
||||
# Add PubkeyAuthentication if it doesnt exist
|
||||
else
|
||||
echo "PubkeyAuthentication yes" >> /etc/ssh/sshd_config
|
||||
fi
|
||||
|
||||
# Configure AuthorizedKeysFile if needed
|
||||
if grep -q "^#AuthorizedKeysFile" /etc/ssh/sshd_config; then
|
||||
sed -i "s/^#AuthorizedKeysFile.*/AuthorizedKeysFile .ssh\/authorized_keys .ssh\/authorized_keys2/" /etc/ssh/sshd_config
|
||||
elif ! grep -q "^AuthorizedKeysFile" /etc/ssh/sshd_config; then
|
||||
echo "AuthorizedKeysFile .ssh/authorized_keys .ssh/authorized_keys2" >> /etc/ssh/sshd_config
|
||||
fi
|
||||
|
||||
# Reload SSH service
|
||||
systemctl reload sshd
|
||||
'
|
||||
}
|
||||
|
||||
# Function to handle StartOS connection (download only)
|
||||
handle_startos_connection() {
|
||||
echo -e "${BLUE}Fetching the WireGuard configuration file...${NC}"
|
||||
|
||||
# Fetch the client configuration file
|
||||
config_file=$(ssh -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" 'ls -t ~/*.conf 2>/dev/null | head -n 1')
|
||||
if [ -z "$config_file" ]; then
|
||||
echo -e "${RED}Error: No WireGuard configuration file found on the remote server.${NC}"
|
||||
return 1 # Exit with error
|
||||
fi
|
||||
CONFIG_NAME=$(basename "$config_file")
|
||||
|
||||
# Download the configuration file
|
||||
if ! scp -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -P "$SSH_PORT" "$SSH_USER@$VPS_IP":~/"$CONFIG_NAME" ./; then
|
||||
echo -e "${RED}Error: Failed to download the WireGuard configuration file.${NC}"
|
||||
return 1 # Exit with error
|
||||
fi
|
||||
echo -e "${GREEN}WireGuard configuration file '$CONFIG_NAME' downloaded successfully.${NC}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Function to import WireGuard configuration
|
||||
import_wireguard_config() {
|
||||
local config_name="$1"
|
||||
if [ -z "$config_name" ]; then
|
||||
echo -e "${RED}Error: Configuration file name is missing.${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local connection_name=$(basename "$config_name" .conf) #Extract base name without extension
|
||||
|
||||
# Check if the connection with same name already exists
|
||||
if nmcli connection show --active | grep -q "^${connection_name}\s"; then
|
||||
read -r -p "A connection with the name '$connection_name' already exists. Do you want to override it? (y/N): " answer
|
||||
if [[ "$answer" =~ ^[Yy]$ ]]; then
|
||||
nmcli connection delete "$connection_name"
|
||||
if [ $? -ne 0 ]; then
|
||||
echo -e "${RED}Error: Failed to delete existing connection '$connection_name'.${NC}"
|
||||
return 1
|
||||
fi
|
||||
# Import if user chose to override or if connection did not exist
|
||||
if ! nmcli connection import type wireguard file "$config_name"; then
|
||||
echo -e "${RED}Error: Failed to import the WireGuard configuration using NetworkManager.${NC}"
|
||||
rm -f "$config_name"
|
||||
return 1
|
||||
fi
|
||||
echo -e "${GREEN}WireGuard configuration '$config_name' has been imported to NetworkManager.${NC}"
|
||||
rm -f "$config_name"
|
||||
display_end_message
|
||||
else
|
||||
echo -e "${BLUE}Skipping import of the WireGuard configuration.${NC}"
|
||||
rm -f "$config_name"
|
||||
return 0
|
||||
fi
|
||||
else
|
||||
# Import if connection did not exist
|
||||
if command -v nmcli &>/dev/null; then
|
||||
if ! nmcli connection import type wireguard file "$config_name"; then
|
||||
echo -e "${RED}Error: Failed to import the WireGuard configuration using NetworkManager.${NC}"
|
||||
rm -f "$config_name"
|
||||
return 1
|
||||
fi
|
||||
echo -e "${GREEN}WireGuard configuration '$config_name' has been imported to NetworkManager.${NC}"
|
||||
rm -f "$config_name"
|
||||
display_end_message
|
||||
else
|
||||
echo -e "${YELLOW}Warning: NetworkManager 'nmcli' not found. Configuration file '$config_name' saved in current directory.${NC}"
|
||||
echo -e "${YELLOW}Import the configuration to your StartOS manually by going to NetworkManager or using wg-quick up <config> command${NC}"
|
||||
fi
|
||||
fi
|
||||
return 0
|
||||
}
|
||||
|
||||
# Function to download the install script
|
||||
download_install_script() {
|
||||
echo -e "${BLUE}Downloading latest WireGuard install script...${NC}"
|
||||
# Download the script
|
||||
if ! curl -sSf "$WIREGUARD_INSTALL_URL" -o wireguard-install.sh; then
|
||||
echo -e "${RED}Failed to download WireGuard installation script.${NC}"
|
||||
return 1
|
||||
fi
|
||||
chmod +x wireguard-install.sh
|
||||
if [ $? -ne 0 ]; then
|
||||
echo -e "${RED}Failed to chmod +x wireguard install script.${NC}"
|
||||
return 1
|
||||
fi
|
||||
echo -e "${GREEN}WireGuard install script downloaded successfully!${NC}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# Function to install WireGuard
|
||||
install_wireguard() {
|
||||
echo -e "\n${BLUE}Installing WireGuard...${NC}"
|
||||
|
||||
# Check if install script exist
|
||||
if [ ! -f "wireguard-install.sh" ]; then
|
||||
echo -e "${RED}WireGuard install script is missing. Did it failed to download?${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Run the remote install script and let it complete
|
||||
if ! ssh -o ConnectTimeout=60 -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" -t "$SSH_USER@$VPS_IP" "bash -c 'export TERM=xterm-256color; export STARTOS_HOSTNAME=$(hostname); bash ~/wireguard-install.sh'"; then
|
||||
echo -e "${RED}WireGuard installation failed on remote server.${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Test if wireguard installed
|
||||
if ! ssh -q -o BatchMode=yes -o ConnectTimeout=5 -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" "test -f /etc/wireguard/wg0.conf"; then
|
||||
echo -e "\n${RED}WireGuard installation failed because /etc/wireguard/wg0.conf is missing, which means the script removed it.${NC}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo -e "\n${GREEN}WireGuard installation completed successfully!${NC}"
|
||||
return 0
|
||||
}
|
||||
|
||||
# --- Main Script ---
|
||||
# Initialize variables
|
||||
VPS_IP=""
|
||||
SSH_USER="root"
|
||||
SSH_PORT="22"
|
||||
CUSTOM_SSH_KEY=""
|
||||
CONFIG_NAME=""
|
||||
|
||||
# Check if the script is run as root before anything else
|
||||
check_root
|
||||
|
||||
# Print banner
|
||||
print_banner
|
||||
|
||||
# Parse command line arguments
|
||||
while getopts "hi:u:p:k:" opt; do
|
||||
case $opt in
|
||||
h)
|
||||
print_usage
|
||||
exit 0
|
||||
;;
|
||||
i)
|
||||
VPS_IP=$OPTARG
|
||||
;;
|
||||
u)
|
||||
SSH_USER=$OPTARG
|
||||
;;
|
||||
p)
|
||||
SSH_PORT=$OPTARG
|
||||
;;
|
||||
k)
|
||||
CUSTOM_SSH_KEY=$OPTARG
|
||||
;;
|
||||
\?)
|
||||
echo "Invalid option: -$OPTARG" >&2
|
||||
print_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Check if custom SSH key is passed and update the private key variable
|
||||
if [ -n "$CUSTOM_SSH_KEY" ]; then
|
||||
if [ ! -f "$CUSTOM_SSH_KEY" ]; then
|
||||
echo -e "${RED}Custom SSH key '$CUSTOM_SSH_KEY' not found.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
SSH_PRIVATE_KEY="$CUSTOM_SSH_KEY"
|
||||
SSH_PUBLIC_KEY="$CUSTOM_SSH_KEY.pub"
|
||||
else
|
||||
# Use default StartOS SSH key
|
||||
if [ ! -f "$SSH_PRIVATE_KEY" ]; then
|
||||
echo -e "${RED}No SSH key found at default location '$SSH_PRIVATE_KEY'. Please ensure StartOS SSH keys are properly configured.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ ! -f "$SSH_PUBLIC_KEY" ]; then
|
||||
echo -e "${RED}Public key '$SSH_PUBLIC_KEY' not found. Please ensure both private and public keys exist.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If VPS_IP is not provided via command line, ask for it
|
||||
if [ -z "$VPS_IP" ]; then
|
||||
while true; do
|
||||
echo -n "Please enter your VPS IP address: "
|
||||
read VPS_IP
|
||||
if validate_ip "$VPS_IP"; then
|
||||
break
|
||||
else
|
||||
echo -e "${RED}Invalid IP address format. Please try again.${NC}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# Confirm SSH connection details
|
||||
echo -e "\n${GREEN}Connection details:${NC}"
|
||||
echo "VPS IP: $VPS_IP"
|
||||
echo "SSH User: $SSH_USER"
|
||||
echo "SSH Port: $SSH_PORT"
|
||||
|
||||
echo -e "\n${GREEN}Proceeding with SSH key-based authentication...${NC}\n"
|
||||
|
||||
# Copy SSH public key to the remote server
|
||||
if ! ssh-copy-id -i "$SSH_PUBLIC_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP"; then
|
||||
echo -e "${RED}Failed to copy SSH key to the remote server. Please ensure you have correct credentials.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "${GREEN}SSH key-based authentication configured successfully!${NC}"
|
||||
|
||||
# Test SSH connection using key-based authentication
|
||||
echo -e "\nTesting SSH connection with key-based authentication..."
|
||||
if ! ssh -q -o BatchMode=yes -o ConnectTimeout=5 -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -p "$SSH_PORT" "$SSH_USER@$VPS_IP" 'grep -q "^PubkeyAuthentication yes" /etc/ssh/sshd_config'; then
|
||||
echo -e "\n${RED}SSH key-based authentication is not enabled on your VPS.${NC}"
|
||||
echo -e "\n${YELLOW}Would you like this script to automatically enable SSH key authentication? (y/N):${NC} "
|
||||
read -r answer
|
||||
|
||||
if [[ "$answer" =~ ^[Yy]$ ]]; then
|
||||
configure_ssh_key_auth
|
||||
else
|
||||
echo -e "\n${BLUE}------------------------------------------------------------------${NC}"
|
||||
echo -e "${YELLOW}To manually enable SSH key authentication:${NC}"
|
||||
echo -e "\n ${YELLOW}1. Connect to your VPS and edit sshd_config:${NC}"
|
||||
echo " nano /etc/ssh/sshd_config"
|
||||
echo -e "\n ${YELLOW}2. Find and uncomment or add the line:${NC}"
|
||||
echo " PubkeyAuthentication yes"
|
||||
echo -e "\n ${YELLOW}3. Restart the SSH service:${NC}"
|
||||
echo " systemctl restart sshd"
|
||||
echo -e "${BLUE}------------------------------------------------------------------${NC}"
|
||||
echo -e "\n${YELLOW}Please enable SSH key authentication and run this script again.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
echo -e "${GREEN}SSH connection successful with key-based authentication!${NC}"
|
||||
|
||||
# Download the WireGuard install script locally
|
||||
if ! download_install_script; then
|
||||
echo -e "${RED}Failed to download the latest install script. Exiting...${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Upload the install script to the remote server
|
||||
if ! scp -i "$SSH_PRIVATE_KEY" -o StrictHostKeyChecking=no -P "$SSH_PORT" wireguard-install.sh "$SSH_USER@$VPS_IP":~/; then
|
||||
echo -e "${RED}Failed to upload WireGuard install script to the remote server.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install WireGuard on remote server using the downloaded script
|
||||
if ! install_wireguard; then
|
||||
echo -e "${RED}WireGuard installation failed.${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Remove the local install script
|
||||
rm wireguard-install.sh >/dev/null 2>&1
|
||||
|
||||
# Handle the StartOS config (download)
|
||||
if ! handle_startos_connection; then
|
||||
echo -e "${RED}StartOS configuration download failed!${NC}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Import the configuration
|
||||
if ! import_wireguard_config "$CONFIG_NAME"; then
|
||||
echo -e "${RED}StartOS configuration import failed or skipped!${NC}"
|
||||
fi
|
||||
2011
container-runtime/package-lock.json
generated
2011
container-runtime/package-lock.json
generated
File diff suppressed because it is too large
Load Diff
@@ -216,12 +216,6 @@ export function makeEffects(context: EffectContext): Effects {
|
||||
}) as ReturnType<T.Effects["getServiceInterface"]>
|
||||
},
|
||||
|
||||
getPrimaryUrl(...[options]: Parameters<T.Effects["getPrimaryUrl"]>) {
|
||||
return rpcRound("get-primary-url", {
|
||||
...options,
|
||||
callback: context.callbacks?.addCallback(options.callback) || null,
|
||||
}) as ReturnType<T.Effects["getPrimaryUrl"]>
|
||||
},
|
||||
getServicePortForward(
|
||||
...[options]: Parameters<T.Effects["getServicePortForward"]>
|
||||
) {
|
||||
|
||||
@@ -212,16 +212,22 @@ export class RpcListener {
|
||||
s.on("data", (a) =>
|
||||
Promise.resolve(a)
|
||||
.then((b) => b.toString())
|
||||
.then(logData("dataIn"))
|
||||
.then(jsonParse)
|
||||
.then(captureId)
|
||||
.then((x) => this.dealWithInput(x))
|
||||
.catch(mapError)
|
||||
.then(logData("response"))
|
||||
.then(writeDataToSocket)
|
||||
.catch((e) => {
|
||||
console.error(`Major error in socket handling: ${e}`)
|
||||
console.debug(`Data in: ${a.toString()}`)
|
||||
.then((buf) => {
|
||||
for (let s of buf.split("\n")) {
|
||||
if (s)
|
||||
Promise.resolve(s)
|
||||
.then(logData("dataIn"))
|
||||
.then(jsonParse)
|
||||
.then(captureId)
|
||||
.then((x) => this.dealWithInput(x))
|
||||
.catch(mapError)
|
||||
.then(logData("response"))
|
||||
.then(writeDataToSocket)
|
||||
.catch((e) => {
|
||||
console.error(`Major error in socket handling: ${e}`)
|
||||
console.debug(`Data in: ${a.toString()}`)
|
||||
})
|
||||
}
|
||||
}),
|
||||
)
|
||||
})
|
||||
@@ -390,7 +396,7 @@ export class RpcListener {
|
||||
|
||||
.defaultToLazy(() => {
|
||||
console.warn(
|
||||
`Coudln't parse the following input ${JSON.stringify(input)}`,
|
||||
`Couldn't parse the following input ${JSON.stringify(input)}`,
|
||||
)
|
||||
return {
|
||||
jsonrpc,
|
||||
|
||||
@@ -43,7 +43,7 @@ export class DockerProcedureContainer {
|
||||
) {
|
||||
const subcontainer = await SubContainer.of(
|
||||
effects,
|
||||
{ id: data.image },
|
||||
{ imageId: data.image },
|
||||
name,
|
||||
)
|
||||
|
||||
|
||||
@@ -113,7 +113,6 @@ export class MainLoop {
|
||||
}))
|
||||
.find((conf) => conf.internal == internalPort)
|
||||
await effects.bind({
|
||||
kind: "multi",
|
||||
id: interfaceId,
|
||||
internalPort,
|
||||
preferredExternalPort: torConf?.external || internalPort,
|
||||
|
||||
@@ -51,6 +51,7 @@ function todo(): never {
|
||||
const MANIFEST_LOCATION = "/usr/lib/startos/package/embassyManifest.json"
|
||||
export const EMBASSY_JS_LOCATION = "/usr/lib/startos/package/embassy.js"
|
||||
const EMBASSY_POINTER_PATH_PREFIX = "/embassyConfig" as utils.StorePath
|
||||
const EMBASSY_DEPENDS_ON_PATH_PREFIX = "/embassyDependsOn" as utils.StorePath
|
||||
|
||||
const matchResult = object({
|
||||
result: any,
|
||||
@@ -314,7 +315,7 @@ export class SystemForEmbassy implements System {
|
||||
)
|
||||
.catch(() => []),
|
||||
)
|
||||
await this.setDependencies(effects, oldDeps)
|
||||
await this.setDependencies(effects, oldDeps, false)
|
||||
}
|
||||
|
||||
async exit(): Promise<void> {
|
||||
@@ -401,6 +402,7 @@ export class SystemForEmbassy implements System {
|
||||
return [
|
||||
port,
|
||||
{
|
||||
protocol: null,
|
||||
secure: null,
|
||||
preferredExternalPort: Number.parseInt(
|
||||
torPort || lanPort || String(port),
|
||||
@@ -425,7 +427,6 @@ export class SystemForEmbassy implements System {
|
||||
name: interfaceValue.name,
|
||||
id: `${id}-${internal}`,
|
||||
description: interfaceValue.description,
|
||||
hasPrimary: false,
|
||||
type:
|
||||
interfaceValue.ui &&
|
||||
(origin.scheme === "http" || origin.sslScheme === "https")
|
||||
@@ -664,7 +665,7 @@ export class SystemForEmbassy implements System {
|
||||
),
|
||||
)
|
||||
const dependsOn = answer["depends-on"] ?? answer.dependsOn ?? {}
|
||||
await this.setDependencies(effects, dependsOn)
|
||||
await this.setDependencies(effects, dependsOn, true)
|
||||
return
|
||||
} else if (setConfigValue.type === "script") {
|
||||
const moduleCode = await this.moduleCode
|
||||
@@ -687,48 +688,47 @@ export class SystemForEmbassy implements System {
|
||||
}),
|
||||
)
|
||||
const dependsOn = answer["depends-on"] ?? answer.dependsOn ?? {}
|
||||
await this.setDependencies(effects, dependsOn)
|
||||
await this.setDependencies(effects, dependsOn, true)
|
||||
return
|
||||
}
|
||||
}
|
||||
private async setDependencies(
|
||||
effects: Effects,
|
||||
rawDepends: { [x: string]: readonly string[] },
|
||||
configuring: boolean,
|
||||
) {
|
||||
const dependsOn: Record<string, readonly string[] | null> = {
|
||||
const storedDependsOn = (await effects.store.get({
|
||||
packageId: this.manifest.id,
|
||||
path: EMBASSY_DEPENDS_ON_PATH_PREFIX,
|
||||
})) as Record<string, readonly string[]>
|
||||
|
||||
const requiredDeps = {
|
||||
...Object.fromEntries(
|
||||
Object.entries(this.manifest.dependencies || {})?.map((x) => [
|
||||
x[0],
|
||||
null,
|
||||
]) || [],
|
||||
Object.entries(this.manifest.dependencies || {})
|
||||
?.filter((x) => x[1].requirement.type === "required")
|
||||
.map((x) => [x[0], []]) || [],
|
||||
),
|
||||
...rawDepends,
|
||||
}
|
||||
|
||||
const dependsOn: Record<string, readonly string[]> = configuring
|
||||
? {
|
||||
...requiredDeps,
|
||||
...rawDepends,
|
||||
}
|
||||
: storedDependsOn
|
||||
? storedDependsOn
|
||||
: requiredDeps
|
||||
|
||||
await effects.store.set({
|
||||
path: EMBASSY_DEPENDS_ON_PATH_PREFIX,
|
||||
value: dependsOn,
|
||||
})
|
||||
|
||||
await effects.setDependencies({
|
||||
dependencies: Object.entries(dependsOn).flatMap(
|
||||
([key, value]): T.Dependencies => {
|
||||
const dependency = this.manifest.dependencies?.[key]
|
||||
if (!dependency) return []
|
||||
if (value == null) {
|
||||
const versionRange = dependency.version
|
||||
if (dependency.requirement.type === "required") {
|
||||
return [
|
||||
{
|
||||
id: key,
|
||||
versionRange,
|
||||
kind: "running",
|
||||
healthChecks: [],
|
||||
},
|
||||
]
|
||||
}
|
||||
return [
|
||||
{
|
||||
kind: "exists",
|
||||
id: key,
|
||||
versionRange,
|
||||
},
|
||||
]
|
||||
}
|
||||
const versionRange = dependency.version
|
||||
const kind = "running"
|
||||
return [
|
||||
|
||||
@@ -109,7 +109,7 @@ export const polyfillEffects = (
|
||||
return startSdk
|
||||
.runCommand(
|
||||
effects,
|
||||
{ id: manifest.main.image },
|
||||
{ imageId: manifest.main.image },
|
||||
commands,
|
||||
{},
|
||||
commands.join(" "),
|
||||
@@ -165,7 +165,7 @@ export const polyfillEffects = (
|
||||
await startSdk
|
||||
.runCommand(
|
||||
effects,
|
||||
{ id: manifest.main.image },
|
||||
{ imageId: manifest.main.image },
|
||||
commands,
|
||||
{
|
||||
mounts: [
|
||||
@@ -207,7 +207,7 @@ export const polyfillEffects = (
|
||||
await startSdk
|
||||
.runCommand(
|
||||
effects,
|
||||
{ id: manifest.main.image },
|
||||
{ imageId: manifest.main.image },
|
||||
commands,
|
||||
{
|
||||
mounts: [
|
||||
|
||||
@@ -74,8 +74,8 @@ export class SystemForStartOs implements System {
|
||||
async exit(): Promise<void> {}
|
||||
|
||||
async start(effects: Effects): Promise<void> {
|
||||
if (this.runningMain) return
|
||||
effects.constRetry = utils.once(() => effects.restart())
|
||||
if (this.runningMain) await this.stop()
|
||||
let mainOnTerm: () => Promise<void> | undefined
|
||||
const started = async (onTerm: () => Promise<void>) => {
|
||||
await effects.setMainStatus({ status: "running" })
|
||||
@@ -98,8 +98,11 @@ export class SystemForStartOs implements System {
|
||||
|
||||
async stop(): Promise<void> {
|
||||
if (this.runningMain) {
|
||||
await this.runningMain.stop()
|
||||
this.runningMain = undefined
|
||||
try {
|
||||
await this.runningMain.stop()
|
||||
} finally {
|
||||
this.runningMain = undefined
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -11,7 +11,7 @@ new RpcListener(getDependencies)
|
||||
|
||||
/**
|
||||
|
||||
So, this is going to be sent into a running comtainer along with any of the other node modules that are going to be needed and used.
|
||||
So, this is going to be sent into a running container along with any of the other node modules that are going to be needed and used.
|
||||
|
||||
Once the container is started, we will go into a loading/ await state.
|
||||
This is the init system, and it will always be running, and it will be waiting for a command to be sent to it.
|
||||
@@ -38,5 +38,5 @@ There are
|
||||
|
||||
/**
|
||||
TODO:
|
||||
Should I seperate those adapter in/out?
|
||||
Should I separate those adapter in/out?
|
||||
*/
|
||||
|
||||
@@ -18,6 +18,13 @@ sudo mount -t overlay -olowerdir=tmp/lower,upperdir=tmp/upper,workdir=tmp/work o
|
||||
QEMU=
|
||||
if [ "$ARCH" != "$(uname -m)" ]; then
|
||||
QEMU=/usr/bin/qemu-${ARCH}-static
|
||||
if ! which qemu-$ARCH-static > /dev/null; then
|
||||
>&2 echo qemu-user-static is required for cross-platform builds
|
||||
sudo umount tmp/combined
|
||||
sudo umount tmp/lower
|
||||
sudo rm -rf tmp
|
||||
exit 1
|
||||
fi
|
||||
sudo cp $(which qemu-$ARCH-static) tmp/combined${QEMU}
|
||||
fi
|
||||
|
||||
|
||||
1717
core/Cargo.lock
generated
1717
core/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -11,7 +11,7 @@ futures = "0.3.28"
|
||||
lazy_async_pool = "0.3.3"
|
||||
models = { path = "../models" }
|
||||
pin-project = "1.1.3"
|
||||
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "refactor/no-dyn-ctx" }
|
||||
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "master" }
|
||||
serde = { version = "1.0", features = ["derive", "rc"] }
|
||||
serde_json = "1.0"
|
||||
tokio = { version = "1", features = ["full"] }
|
||||
|
||||
@@ -24,7 +24,8 @@ patch-db = { version = "*", path = "../../patch-db/patch-db", features = [
|
||||
rand = "0.8.5"
|
||||
regex = "1.10.2"
|
||||
reqwest = "0.12"
|
||||
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "refactor/no-dyn-ctx" }
|
||||
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "master" }
|
||||
rustls = "0.23"
|
||||
serde = { version = "1.0", features = ["derive", "rc"] }
|
||||
serde_json = "1.0"
|
||||
sqlx = { version = "0.7.2", features = [
|
||||
@@ -39,3 +40,4 @@ tokio = { version = "1", features = ["full"] }
|
||||
torut = { git = "https://github.com/Start9Labs/torut.git", branch = "update/dependencies" }
|
||||
tracing = "0.1.39"
|
||||
yasi = "0.1.5"
|
||||
zbus = "5"
|
||||
|
||||
@@ -168,6 +168,6 @@ fn doesnt_reallocate() {
|
||||
mime: InternedString::intern("png"),
|
||||
data: Cow::Borrowed(&random[..i]),
|
||||
};
|
||||
assert_eq!(dbg!(icon.to_string()).capacity(), icon.data_url_len());
|
||||
assert_eq!(icon.to_string().capacity(), icon.data_url_len());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ pub enum ErrorKind {
|
||||
ConfigGen = 27,
|
||||
ParseNumber = 28,
|
||||
Database = 29,
|
||||
InvalidPackageId = 30,
|
||||
InvalidId = 30,
|
||||
InvalidSignature = 31,
|
||||
Backup = 32,
|
||||
Restore = 33,
|
||||
@@ -90,6 +90,7 @@ pub enum ErrorKind {
|
||||
Lxc = 72,
|
||||
Cancelled = 73,
|
||||
Git = 74,
|
||||
DBus = 75,
|
||||
}
|
||||
impl ErrorKind {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
@@ -124,7 +125,7 @@ impl ErrorKind {
|
||||
ConfigGen => "Config Generation Error",
|
||||
ParseNumber => "Number Parsing Error",
|
||||
Database => "Database Error",
|
||||
InvalidPackageId => "Invalid Package ID",
|
||||
InvalidId => "Invalid ID",
|
||||
InvalidSignature => "Invalid Signature",
|
||||
Backup => "Backup Error",
|
||||
Restore => "Restore Error",
|
||||
@@ -169,6 +170,7 @@ impl ErrorKind {
|
||||
Lxc => "LXC Error",
|
||||
Cancelled => "Cancelled",
|
||||
Git => "Git Error",
|
||||
DBus => "DBus Error",
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -224,7 +226,7 @@ impl From<std::convert::Infallible> for Error {
|
||||
}
|
||||
impl From<InvalidId> for Error {
|
||||
fn from(err: InvalidId) -> Self {
|
||||
Error::new(err, ErrorKind::InvalidPackageId)
|
||||
Error::new(err, ErrorKind::InvalidId)
|
||||
}
|
||||
}
|
||||
impl From<std::io::Error> for Error {
|
||||
@@ -327,6 +329,16 @@ impl From<torut::onion::OnionAddressParseError> for Error {
|
||||
Error::new(e, ErrorKind::Tor)
|
||||
}
|
||||
}
|
||||
impl From<zbus::Error> for Error {
|
||||
fn from(e: zbus::Error) -> Self {
|
||||
Error::new(e, ErrorKind::DBus)
|
||||
}
|
||||
}
|
||||
impl From<rustls::Error> for Error {
|
||||
fn from(e: rustls::Error) -> Self {
|
||||
Error::new(e, ErrorKind::OpenSsl)
|
||||
}
|
||||
}
|
||||
impl From<patch_db::value::Error> for Error {
|
||||
fn from(value: patch_db::value::Error) -> Self {
|
||||
match value.kind {
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
use yasi::InternedString;
|
||||
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
#[error("Invalid ID")]
|
||||
pub struct InvalidId;
|
||||
#[error("Invalid ID: {0}")]
|
||||
pub struct InvalidId(pub(super) InternedString);
|
||||
|
||||
@@ -43,7 +43,7 @@ impl TryFrom<InternedString> for Id {
|
||||
if ID_REGEX.is_match(&value) {
|
||||
Ok(Id(value))
|
||||
} else {
|
||||
Err(InvalidId)
|
||||
Err(InvalidId(value))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -53,7 +53,7 @@ impl TryFrom<String> for Id {
|
||||
if ID_REGEX.is_match(&value) {
|
||||
Ok(Id(InternedString::intern(value)))
|
||||
} else {
|
||||
Err(InvalidId)
|
||||
Err(InvalidId(InternedString::intern(value)))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -63,7 +63,7 @@ impl TryFrom<&str> for Id {
|
||||
if ID_REGEX.is_match(value) {
|
||||
Ok(Id(InternedString::intern(value)))
|
||||
} else {
|
||||
Err(InvalidId)
|
||||
Err(InvalidId(InternedString::intern(value)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,7 @@ keywords = [
|
||||
name = "start-os"
|
||||
readme = "README.md"
|
||||
repository = "https://github.com/Start9Labs/start-os"
|
||||
version = "0.3.6-alpha.9"
|
||||
version = "0.3.6-alpha.13" # VERSION_BUMP
|
||||
license = "MIT"
|
||||
|
||||
[lib]
|
||||
@@ -50,7 +50,7 @@ test = []
|
||||
|
||||
[dependencies]
|
||||
aes = { version = "0.7.5", features = ["ctr"] }
|
||||
async-acme = { version = "0.5.0", git = "https://github.com/dr-bonez/async-acme.git", features = [
|
||||
async-acme = { version = "0.6.0", git = "https://github.com/dr-bonez/async-acme.git", features = [
|
||||
"use_rustls",
|
||||
"use_tokio",
|
||||
] }
|
||||
@@ -62,7 +62,6 @@ async-compression = { version = "0.4.4", features = [
|
||||
async-stream = "0.3.5"
|
||||
async-trait = "0.1.74"
|
||||
axum = { version = "0.7.3", features = ["ws"] }
|
||||
axum-server = "0.6.0"
|
||||
barrage = "0.2.3"
|
||||
backhand = "0.18.0"
|
||||
base32 = "0.5.0"
|
||||
@@ -76,6 +75,7 @@ clap = "4.4.12"
|
||||
color-eyre = "0.6.2"
|
||||
console = "0.15.7"
|
||||
console-subscriber = { version = "0.3.0", optional = true }
|
||||
const_format = "0.2.34"
|
||||
cookie = "0.18.0"
|
||||
cookie_store = "0.21.0"
|
||||
der = { version = "0.7.9", features = ["derive", "pem"] }
|
||||
@@ -102,18 +102,22 @@ hex = "0.4.3"
|
||||
hmac = "0.12.1"
|
||||
http = "1.0.0"
|
||||
http-body-util = "0.1"
|
||||
hyper-util = { version = "0.1.5", features = [
|
||||
"tokio",
|
||||
hyper = { version = "1.5", features = ["server", "http1", "http2"] }
|
||||
hyper-util = { version = "0.1.10", features = [
|
||||
"server",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
"service",
|
||||
"http1",
|
||||
"http2",
|
||||
"tokio",
|
||||
] }
|
||||
id-pool = { version = "0.2.2", default-features = false, features = [
|
||||
"serde",
|
||||
"u16",
|
||||
] }
|
||||
imbl = "2.0.3"
|
||||
imbl-value = { git = "https://github.com/Start9Labs/imbl-value.git" }
|
||||
imbl-value = "0.1.2"
|
||||
include_dir = { version = "0.7.3", features = ["metadata"] }
|
||||
indexmap = { version = "2.0.2", features = ["serde"] }
|
||||
indicatif = { version = "0.17.7", features = ["tokio"] }
|
||||
@@ -131,12 +135,14 @@ lazy_format = "2.0"
|
||||
lazy_static = "1.4.0"
|
||||
libc = "0.2.149"
|
||||
log = "0.4.20"
|
||||
mio = "1"
|
||||
mbrman = "0.5.2"
|
||||
models = { version = "*", path = "../models" }
|
||||
new_mime_guess = "4"
|
||||
nix = { version = "0.29.0", features = [
|
||||
"fs",
|
||||
"mount",
|
||||
"net",
|
||||
"process",
|
||||
"sched",
|
||||
"signal",
|
||||
@@ -166,7 +172,7 @@ regex = "1.10.2"
|
||||
reqwest = { version = "0.12.4", features = ["stream", "json", "socks"] }
|
||||
reqwest_cookie_store = "0.8.0"
|
||||
rpassword = "7.2.0"
|
||||
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "refactor/no-dyn-ctx" }
|
||||
rpc-toolkit = { git = "https://github.com/Start9Labs/rpc-toolkit.git", branch = "master" }
|
||||
rust-argon2 = "2.0.0"
|
||||
rustyline-async = "0.4.1"
|
||||
semver = { version = "1.0.20", features = ["serde"] }
|
||||
@@ -216,7 +222,11 @@ unix-named-pipe = "0.2.0"
|
||||
url = { version = "2.4.1", features = ["serde"] }
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.4.1", features = ["v4"] }
|
||||
zbus = "5.1.1"
|
||||
zeroize = "1.6.0"
|
||||
mail-send = { git = "https://github.com/dr-bonez/mail-send.git", branch = "main" }
|
||||
rustls = "0.23.20"
|
||||
rustls-pki-types = { version = "1.10.1", features = ["alloc"] }
|
||||
|
||||
[profile.test]
|
||||
opt-level = 3
|
||||
|
||||
@@ -24,7 +24,7 @@ pub struct AccountInfo {
|
||||
pub server_id: String,
|
||||
pub hostname: Hostname,
|
||||
pub password: String,
|
||||
pub tor_key: TorSecretKeyV3,
|
||||
pub tor_keys: Vec<TorSecretKeyV3>,
|
||||
pub root_ca_key: PKey<Private>,
|
||||
pub root_ca_cert: X509,
|
||||
pub ssh_key: ssh_key::PrivateKey,
|
||||
@@ -34,7 +34,7 @@ impl AccountInfo {
|
||||
pub fn new(password: &str, start_time: SystemTime) -> Result<Self, Error> {
|
||||
let server_id = generate_id();
|
||||
let hostname = generate_hostname();
|
||||
let tor_key = TorSecretKeyV3::generate();
|
||||
let tor_key = vec![TorSecretKeyV3::generate()];
|
||||
let root_ca_key = generate_key()?;
|
||||
let root_ca_cert = make_root_cert(&root_ca_key, &hostname, start_time)?;
|
||||
let ssh_key = ssh_key::PrivateKey::from(ssh_key::private::Ed25519Keypair::random(
|
||||
@@ -45,7 +45,7 @@ impl AccountInfo {
|
||||
server_id,
|
||||
hostname,
|
||||
password: hash_password(password)?,
|
||||
tor_key,
|
||||
tor_keys: tor_key,
|
||||
root_ca_key,
|
||||
root_ca_cert,
|
||||
ssh_key,
|
||||
@@ -58,8 +58,11 @@ impl AccountInfo {
|
||||
let hostname = Hostname(db.as_public().as_server_info().as_hostname().de()?);
|
||||
let password = db.as_private().as_password().de()?;
|
||||
let key_store = db.as_private().as_key_store();
|
||||
let tor_addr = db.as_public().as_server_info().as_onion_address().de()?;
|
||||
let tor_key = key_store.as_onion().get_key(&tor_addr)?;
|
||||
let tor_addrs = db.as_public().as_server_info().as_host().as_onions().de()?;
|
||||
let tor_keys = tor_addrs
|
||||
.into_iter()
|
||||
.map(|tor_addr| key_store.as_onion().get_key(&tor_addr))
|
||||
.collect::<Result<_, _>>()?;
|
||||
let cert_store = key_store.as_local_certs();
|
||||
let root_ca_key = cert_store.as_root_key().de()?.0;
|
||||
let root_ca_cert = cert_store.as_root_cert().de()?.0;
|
||||
@@ -70,7 +73,7 @@ impl AccountInfo {
|
||||
server_id,
|
||||
hostname,
|
||||
password,
|
||||
tor_key,
|
||||
tor_keys,
|
||||
root_ca_key,
|
||||
root_ca_cert,
|
||||
ssh_key,
|
||||
@@ -82,17 +85,16 @@ impl AccountInfo {
|
||||
let server_info = db.as_public_mut().as_server_info_mut();
|
||||
server_info.as_id_mut().ser(&self.server_id)?;
|
||||
server_info.as_hostname_mut().ser(&self.hostname.0)?;
|
||||
server_info
|
||||
.as_lan_address_mut()
|
||||
.ser(&self.hostname.lan_address().parse()?)?;
|
||||
server_info
|
||||
.as_pubkey_mut()
|
||||
.ser(&self.ssh_key.public_key().to_openssh()?)?;
|
||||
let onion_address = self.tor_key.public().get_onion_address();
|
||||
server_info.as_onion_address_mut().ser(&onion_address)?;
|
||||
server_info
|
||||
.as_tor_address_mut()
|
||||
.ser(&format!("https://{onion_address}").parse()?)?;
|
||||
server_info.as_host_mut().as_onions_mut().ser(
|
||||
&self
|
||||
.tor_keys
|
||||
.iter()
|
||||
.map(|tor_key| tor_key.public().get_onion_address())
|
||||
.collect(),
|
||||
)?;
|
||||
db.as_private_mut().as_password_mut().ser(&self.password)?;
|
||||
db.as_private_mut()
|
||||
.as_ssh_privkey_mut()
|
||||
@@ -101,7 +103,9 @@ impl AccountInfo {
|
||||
.as_compat_s9pk_key_mut()
|
||||
.ser(Pem::new_ref(&self.compat_s9pk_key))?;
|
||||
let key_store = db.as_private_mut().as_key_store_mut();
|
||||
key_store.as_onion_mut().insert_key(&self.tor_key)?;
|
||||
for tor_key in &self.tor_keys {
|
||||
key_store.as_onion_mut().insert_key(tor_key)?;
|
||||
}
|
||||
let cert_store = key_store.as_local_certs_mut();
|
||||
cert_store
|
||||
.as_root_key_mut()
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::fmt;
|
||||
|
||||
use clap::{CommandFactory, FromArgMatches, Parser};
|
||||
|
||||
@@ -187,9 +187,8 @@ pub fn check_password_against_db(db: &DatabaseModel, password: &str) -> Result<(
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Parser, TS)]
|
||||
#[derive(Deserialize, Serialize, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[command(rename_all = "kebab-case")]
|
||||
#[ts(export)]
|
||||
pub struct LoginParams {
|
||||
password: Option<PasswordType>,
|
||||
|
||||
@@ -85,7 +85,7 @@ impl OsBackupV0 {
|
||||
&mut rand::thread_rng(),
|
||||
ssh_key::Algorithm::Ed25519,
|
||||
)?,
|
||||
tor_key: TorSecretKeyV3::from(self.tor_key.0),
|
||||
tor_keys: vec![TorSecretKeyV3::from(self.tor_key.0)],
|
||||
compat_s9pk_key: ed25519_dalek::SigningKey::generate(&mut rand::thread_rng()),
|
||||
},
|
||||
ui: self.ui,
|
||||
@@ -114,7 +114,7 @@ impl OsBackupV1 {
|
||||
root_ca_key: self.root_ca_key.0,
|
||||
root_ca_cert: self.root_ca_cert.0,
|
||||
ssh_key: ssh_key::PrivateKey::from(Ed25519Keypair::from_seed(&self.net_key.0)),
|
||||
tor_key: TorSecretKeyV3::from(ed25519_expand_key(&self.net_key.0)),
|
||||
tor_keys: vec![TorSecretKeyV3::from(ed25519_expand_key(&self.net_key.0))],
|
||||
compat_s9pk_key: ed25519_dalek::SigningKey::from_bytes(&self.net_key),
|
||||
},
|
||||
ui: self.ui,
|
||||
@@ -132,7 +132,7 @@ struct OsBackupV2 {
|
||||
root_ca_key: Pem<PKey<Private>>, // PEM Encoded OpenSSL Key
|
||||
root_ca_cert: Pem<X509>, // PEM Encoded OpenSSL X509 Certificate
|
||||
ssh_key: Pem<ssh_key::PrivateKey>, // PEM Encoded OpenSSH Key
|
||||
tor_key: TorSecretKeyV3, // Base64 Encoded Ed25519 Expanded Secret Key
|
||||
tor_keys: Vec<TorSecretKeyV3>, // Base64 Encoded Ed25519 Expanded Secret Key
|
||||
compat_s9pk_key: Pem<ed25519_dalek::SigningKey>, // PEM Encoded ED25519 Key
|
||||
ui: Value, // JSON Value
|
||||
}
|
||||
@@ -146,7 +146,7 @@ impl OsBackupV2 {
|
||||
root_ca_key: self.root_ca_key.0,
|
||||
root_ca_cert: self.root_ca_cert.0,
|
||||
ssh_key: self.ssh_key.0,
|
||||
tor_key: self.tor_key,
|
||||
tor_keys: self.tor_keys,
|
||||
compat_s9pk_key: self.compat_s9pk_key.0,
|
||||
},
|
||||
ui: self.ui,
|
||||
@@ -159,7 +159,7 @@ impl OsBackupV2 {
|
||||
root_ca_key: Pem(backup.account.root_ca_key.clone()),
|
||||
root_ca_cert: Pem(backup.account.root_ca_cert.clone()),
|
||||
ssh_key: Pem(backup.account.ssh_key.clone()),
|
||||
tor_key: backup.account.tor_key.clone(),
|
||||
tor_keys: backup.account.tor_keys.clone(),
|
||||
compat_s9pk_key: Pem(backup.account.compat_s9pk_key.clone()),
|
||||
ui: backup.ui.clone(),
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ use crate::db::model::Database;
|
||||
use crate::disk::mount::backup::BackupMountGuard;
|
||||
use crate::disk::mount::filesystem::ReadWrite;
|
||||
use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
|
||||
use crate::init::{init, InitResult};
|
||||
use crate::init::init;
|
||||
use crate::prelude::*;
|
||||
use crate::s9pk::S9pk;
|
||||
use crate::service::service_map::DownloadInstallFuture;
|
||||
@@ -109,12 +109,13 @@ pub async fn recover_full_embassy(
|
||||
db.put(&ROOT, &Database::init(&os_backup.account)?).await?;
|
||||
drop(db);
|
||||
|
||||
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?;
|
||||
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
|
||||
|
||||
let rpc_ctx = RpcContext::init(
|
||||
&ctx.webserver,
|
||||
&ctx.config,
|
||||
disk_guid.clone(),
|
||||
Some(net_ctrl),
|
||||
Some(init_result),
|
||||
rpc_ctx_phases,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -4,7 +4,7 @@ use rpc_toolkit::CliApp;
|
||||
use serde_json::Value;
|
||||
|
||||
use crate::service::cli::{ContainerCliContext, ContainerClientConfig};
|
||||
use crate::util::logger::EmbassyLogger;
|
||||
use crate::util::logger::LOGGER;
|
||||
use crate::version::{Current, VersionT};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
@@ -12,7 +12,7 @@ lazy_static::lazy_static! {
|
||||
}
|
||||
|
||||
pub fn main(args: impl IntoIterator<Item = OsString>) {
|
||||
EmbassyLogger::init();
|
||||
LOGGER.enable();
|
||||
if let Err(e) = CliApp::new(
|
||||
|cfg: ContainerClientConfig| Ok(ContainerCliContext::init(cfg)),
|
||||
crate::service::effects::handler(),
|
||||
|
||||
@@ -5,16 +5,16 @@ use futures::FutureExt;
|
||||
use tokio::signal::unix::signal;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::net::web_server::WebServer;
|
||||
use crate::net::web_server::{Acceptor, WebServer};
|
||||
use crate::prelude::*;
|
||||
use crate::registry::context::{RegistryConfig, RegistryContext};
|
||||
use crate::util::logger::EmbassyLogger;
|
||||
use crate::util::logger::LOGGER;
|
||||
|
||||
#[instrument(skip_all)]
|
||||
async fn inner_main(config: &RegistryConfig) -> Result<(), Error> {
|
||||
let server = async {
|
||||
let ctx = RegistryContext::init(config).await?;
|
||||
let mut server = WebServer::new(ctx.listen);
|
||||
let mut server = WebServer::new(Acceptor::bind([ctx.listen]).await?);
|
||||
server.serve_registry(ctx.clone());
|
||||
|
||||
let mut shutdown_recv = ctx.shutdown.subscribe();
|
||||
@@ -63,7 +63,7 @@ async fn inner_main(config: &RegistryConfig) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
pub fn main(args: impl IntoIterator<Item = OsString>) {
|
||||
EmbassyLogger::init();
|
||||
LOGGER.enable();
|
||||
|
||||
let config = RegistryConfig::parse_from(args).load().unwrap();
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ use serde_json::Value;
|
||||
|
||||
use crate::context::config::ClientConfig;
|
||||
use crate::context::CliContext;
|
||||
use crate::util::logger::EmbassyLogger;
|
||||
use crate::util::logger::LOGGER;
|
||||
use crate::version::{Current, VersionT};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
@@ -13,7 +13,8 @@ lazy_static::lazy_static! {
|
||||
}
|
||||
|
||||
pub fn main(args: impl IntoIterator<Item = OsString>) {
|
||||
EmbassyLogger::init();
|
||||
LOGGER.enable();
|
||||
|
||||
if let Err(e) = CliApp::new(
|
||||
|cfg: ClientConfig| Ok(CliContext::init(cfg.load()?)?),
|
||||
crate::expanded_api(),
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
|
||||
use tokio::process::Command;
|
||||
@@ -10,17 +11,17 @@ use crate::disk::fsck::RepairStrategy;
|
||||
use crate::disk::main::DEFAULT_PASSWORD;
|
||||
use crate::disk::REPAIR_DISK_PATH;
|
||||
use crate::firmware::{check_for_firmware_update, update_firmware};
|
||||
use crate::init::{InitPhases, InitResult, STANDBY_MODE_PATH};
|
||||
use crate::net::web_server::WebServer;
|
||||
use crate::init::{InitPhases, STANDBY_MODE_PATH};
|
||||
use crate::net::web_server::{UpgradableListener, WebServer};
|
||||
use crate::prelude::*;
|
||||
use crate::progress::FullProgressTracker;
|
||||
use crate::shutdown::Shutdown;
|
||||
use crate::util::Invoke;
|
||||
use crate::PLATFORM;
|
||||
use crate::{DATA_DIR, PLATFORM};
|
||||
|
||||
#[instrument(skip_all)]
|
||||
async fn setup_or_init(
|
||||
server: &mut WebServer,
|
||||
server: &mut WebServer<UpgradableListener>,
|
||||
config: &ServerConfig,
|
||||
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
|
||||
if let Some(firmware) = check_for_firmware_update()
|
||||
@@ -111,7 +112,7 @@ async fn setup_or_init(
|
||||
.await
|
||||
.is_err()
|
||||
{
|
||||
let ctx = SetupContext::init(config)?;
|
||||
let ctx = SetupContext::init(server, config)?;
|
||||
|
||||
server.serve_setup(ctx.clone());
|
||||
|
||||
@@ -156,7 +157,7 @@ async fn setup_or_init(
|
||||
let disk_guid = Arc::new(String::from(guid_string.trim()));
|
||||
let requires_reboot = crate::disk::main::import(
|
||||
&**disk_guid,
|
||||
config.datadir(),
|
||||
DATA_DIR,
|
||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
||||
RepairStrategy::Aggressive
|
||||
} else {
|
||||
@@ -178,18 +179,26 @@ async fn setup_or_init(
|
||||
tracing::info!("Loaded Disk");
|
||||
|
||||
if requires_reboot.0 {
|
||||
tracing::info!("Rebooting...");
|
||||
let mut reboot_phase = handle.add_phase("Rebooting".into(), Some(1));
|
||||
reboot_phase.start();
|
||||
return Ok(Err(Shutdown {
|
||||
export_args: Some((disk_guid, config.datadir().to_owned())),
|
||||
export_args: Some((disk_guid, Path::new(DATA_DIR).to_owned())),
|
||||
restart: true,
|
||||
}));
|
||||
}
|
||||
|
||||
let InitResult { net_ctrl } = crate::init::init(config, init_phases).await?;
|
||||
let init_result =
|
||||
crate::init::init(&server.acceptor_setter(), config, init_phases).await?;
|
||||
|
||||
let rpc_ctx =
|
||||
RpcContext::init(config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
|
||||
let rpc_ctx = RpcContext::init(
|
||||
&server.acceptor_setter(),
|
||||
config,
|
||||
disk_guid,
|
||||
Some(init_result),
|
||||
rpc_ctx_phases,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok::<_, Error>(Ok((rpc_ctx, handle)))
|
||||
}
|
||||
@@ -203,7 +212,7 @@ async fn setup_or_init(
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn main(
|
||||
server: &mut WebServer,
|
||||
server: &mut WebServer<UpgradableListener>,
|
||||
config: &ServerConfig,
|
||||
) -> Result<Result<(RpcContext, FullProgressTracker), Shutdown>, Error> {
|
||||
if &*PLATFORM == "raspberrypi" && tokio::fs::metadata(STANDBY_MODE_PATH).await.is_ok() {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::cmp::max;
|
||||
use std::ffi::OsString;
|
||||
use std::net::{Ipv6Addr, SocketAddr};
|
||||
use std::net::IpAddr;
|
||||
use std::sync::Arc;
|
||||
|
||||
use clap::Parser;
|
||||
@@ -12,21 +12,27 @@ use tracing::instrument;
|
||||
use crate::context::config::ServerConfig;
|
||||
use crate::context::rpc::InitRpcContextPhases;
|
||||
use crate::context::{DiagnosticContext, InitContext, RpcContext};
|
||||
use crate::net::web_server::WebServer;
|
||||
use crate::net::network_interface::SelfContainedNetworkInterfaceListener;
|
||||
use crate::net::utils::ipv6_is_local;
|
||||
use crate::net::web_server::{Acceptor, UpgradableListener, WebServer};
|
||||
use crate::shutdown::Shutdown;
|
||||
use crate::system::launch_metrics_task;
|
||||
use crate::util::logger::EmbassyLogger;
|
||||
use crate::util::io::append_file;
|
||||
use crate::util::logger::LOGGER;
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
#[instrument(skip_all)]
|
||||
async fn inner_main(
|
||||
server: &mut WebServer,
|
||||
server: &mut WebServer<UpgradableListener>,
|
||||
config: &ServerConfig,
|
||||
) -> Result<Option<Shutdown>, Error> {
|
||||
let rpc_ctx = if !tokio::fs::metadata("/run/startos/initialized")
|
||||
.await
|
||||
.is_ok()
|
||||
{
|
||||
LOGGER.set_logfile(Some(
|
||||
append_file("/run/startos/init.log").await?.into_std().await,
|
||||
));
|
||||
let (ctx, handle) = match super::start_init::main(server, &config).await? {
|
||||
Err(s) => return Ok(Some(s)),
|
||||
Ok(ctx) => ctx,
|
||||
@@ -34,6 +40,7 @@ async fn inner_main(
|
||||
tokio::fs::write("/run/startos/initialized", "").await?;
|
||||
|
||||
server.serve_main(ctx.clone());
|
||||
LOGGER.set_logfile(None);
|
||||
handle.complete();
|
||||
|
||||
ctx
|
||||
@@ -44,6 +51,7 @@ async fn inner_main(
|
||||
server.serve_init(init_ctx);
|
||||
|
||||
let ctx = RpcContext::init(
|
||||
&server.acceptor_setter(),
|
||||
config,
|
||||
Arc::new(
|
||||
tokio::fs::read_to_string("/media/startos/config/disk.guid") // unique identifier for volume group - keeps track of the disk that goes with your embassy
|
||||
@@ -131,7 +139,7 @@ async fn inner_main(
|
||||
}
|
||||
|
||||
pub fn main(args: impl IntoIterator<Item = OsString>) {
|
||||
EmbassyLogger::init();
|
||||
LOGGER.enable();
|
||||
|
||||
let config = ServerConfig::parse_from(args).load().unwrap();
|
||||
|
||||
@@ -142,7 +150,10 @@ pub fn main(args: impl IntoIterator<Item = OsString>) {
|
||||
.build()
|
||||
.expect("failed to initialize runtime");
|
||||
rt.block_on(async {
|
||||
let mut server = WebServer::new(SocketAddr::new(Ipv6Addr::UNSPECIFIED.into(), 80));
|
||||
let addrs = crate::net::utils::all_socket_addrs_for(80).await?;
|
||||
let mut server = WebServer::new(Acceptor::bind_upgradable(
|
||||
SelfContainedNetworkInterfaceListener::bind(80),
|
||||
));
|
||||
match inner_main(&mut server, &config).await {
|
||||
Ok(a) => {
|
||||
server.shutdown().await;
|
||||
|
||||
@@ -13,6 +13,7 @@ use crate::disk::OsPartitionInfo;
|
||||
use crate::init::init_postgres;
|
||||
use crate::prelude::*;
|
||||
use crate::util::serde::IoFormat;
|
||||
use crate::MAIN_DATA;
|
||||
|
||||
pub const DEVICE_CONFIG_PATH: &str = "/media/startos/config/config.yaml"; // "/media/startos/config/config.yaml";
|
||||
pub const CONFIG_PATH: &str = "/etc/startos/config.yaml";
|
||||
@@ -103,17 +104,11 @@ pub struct ServerConfig {
|
||||
#[arg(skip)]
|
||||
pub os_partitions: Option<OsPartitionInfo>,
|
||||
#[arg(long)]
|
||||
pub bind_rpc: Option<SocketAddr>,
|
||||
#[arg(long)]
|
||||
pub tor_control: Option<SocketAddr>,
|
||||
#[arg(long)]
|
||||
pub tor_socks: Option<SocketAddr>,
|
||||
#[arg(long)]
|
||||
pub dns_bind: Option<Vec<SocketAddr>>,
|
||||
#[arg(long)]
|
||||
pub revision_cache_size: Option<usize>,
|
||||
#[arg(short, long)]
|
||||
pub datadir: Option<PathBuf>,
|
||||
#[arg(long)]
|
||||
pub disable_encryption: Option<bool>,
|
||||
#[arg(long)]
|
||||
@@ -126,15 +121,12 @@ impl ContextConfig for ServerConfig {
|
||||
fn merge_with(&mut self, other: Self) {
|
||||
self.ethernet_interface = self.ethernet_interface.take().or(other.ethernet_interface);
|
||||
self.os_partitions = self.os_partitions.take().or(other.os_partitions);
|
||||
self.bind_rpc = self.bind_rpc.take().or(other.bind_rpc);
|
||||
self.tor_control = self.tor_control.take().or(other.tor_control);
|
||||
self.tor_socks = self.tor_socks.take().or(other.tor_socks);
|
||||
self.dns_bind = self.dns_bind.take().or(other.dns_bind);
|
||||
self.revision_cache_size = self
|
||||
.revision_cache_size
|
||||
.take()
|
||||
.or(other.revision_cache_size);
|
||||
self.datadir = self.datadir.take().or(other.datadir);
|
||||
self.disable_encryption = self.disable_encryption.take().or(other.disable_encryption);
|
||||
self.multi_arch_s9pks = self.multi_arch_s9pks.take().or(other.multi_arch_s9pks);
|
||||
}
|
||||
@@ -148,13 +140,8 @@ impl ServerConfig {
|
||||
self.load_path_rec(Some(CONFIG_PATH))?;
|
||||
Ok(self)
|
||||
}
|
||||
pub fn datadir(&self) -> &Path {
|
||||
self.datadir
|
||||
.as_deref()
|
||||
.unwrap_or_else(|| Path::new("/embassy-data"))
|
||||
}
|
||||
pub async fn db(&self) -> Result<PatchDb, Error> {
|
||||
let db_path = self.datadir().join("main").join("embassy.db");
|
||||
let db_path = Path::new(MAIN_DATA).join("embassy.db");
|
||||
let db = PatchDb::open(&db_path)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
|
||||
@@ -163,7 +150,7 @@ impl ServerConfig {
|
||||
}
|
||||
#[instrument(skip_all)]
|
||||
pub async fn secret_store(&self) -> Result<PgPool, Error> {
|
||||
init_postgres(self.datadir()).await?;
|
||||
init_postgres("/media/startos/data").await?;
|
||||
let secret_store =
|
||||
PgPool::connect_with(PgConnectOptions::new().database("secrets").username("root"))
|
||||
.await?;
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use std::ops::Deref;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
|
||||
use rpc_toolkit::yajrc::RpcError;
|
||||
@@ -13,7 +12,6 @@ use crate::shutdown::Shutdown;
|
||||
use crate::Error;
|
||||
|
||||
pub struct DiagnosticContextSeed {
|
||||
pub datadir: PathBuf,
|
||||
pub shutdown: Sender<Shutdown>,
|
||||
pub error: Arc<RpcError>,
|
||||
pub disk_guid: Option<Arc<String>>,
|
||||
@@ -25,7 +23,7 @@ pub struct DiagnosticContext(Arc<DiagnosticContextSeed>);
|
||||
impl DiagnosticContext {
|
||||
#[instrument(skip_all)]
|
||||
pub fn init(
|
||||
config: &ServerConfig,
|
||||
_config: &ServerConfig,
|
||||
disk_guid: Option<Arc<String>>,
|
||||
error: Error,
|
||||
) -> Result<Self, Error> {
|
||||
@@ -35,7 +33,6 @@ impl DiagnosticContext {
|
||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
||||
|
||||
Ok(Self(Arc::new(DiagnosticContextSeed {
|
||||
datadir: config.datadir().to_owned(),
|
||||
shutdown,
|
||||
disk_guid,
|
||||
error: Arc::new(error.into()),
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::future::Future;
|
||||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||
use std::ops::Deref;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::atomic::{AtomicBool, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -27,10 +26,11 @@ use crate::auth::Sessions;
|
||||
use crate::context::config::ServerConfig;
|
||||
use crate::db::model::Database;
|
||||
use crate::disk::OsPartitionInfo;
|
||||
use crate::init::check_time_is_synchronized;
|
||||
use crate::init::{check_time_is_synchronized, InitResult};
|
||||
use crate::lxc::{ContainerId, LxcContainer, LxcManager};
|
||||
use crate::net::net_controller::{NetController, PreInitNetController};
|
||||
use crate::net::net_controller::{NetController, NetService};
|
||||
use crate::net::utils::{find_eth_iface, find_wifi_iface};
|
||||
use crate::net::web_server::{UpgradableListener, WebServerAcceptorSetter};
|
||||
use crate::net::wifi::WpaCli;
|
||||
use crate::prelude::*;
|
||||
use crate::progress::{FullProgressTracker, PhaseProgressTrackerHandle};
|
||||
@@ -47,13 +47,13 @@ pub struct RpcContextSeed {
|
||||
pub os_partitions: OsPartitionInfo,
|
||||
pub wifi_interface: Option<String>,
|
||||
pub ethernet_interface: String,
|
||||
pub datadir: PathBuf,
|
||||
pub disk_guid: Arc<String>,
|
||||
pub ephemeral_sessions: SyncMutex<Sessions>,
|
||||
pub db: TypedPatchDb<Database>,
|
||||
pub sync_db: watch::Sender<u64>,
|
||||
pub account: RwLock<AccountInfo>,
|
||||
pub net_controller: Arc<NetController>,
|
||||
pub os_net_service: NetService,
|
||||
pub s9pk_arch: Option<&'static str>,
|
||||
pub services: ServiceMap,
|
||||
pub metrics_cache: RwLock<Option<crate::system::Metrics>>,
|
||||
@@ -85,7 +85,7 @@ pub struct InitRpcContextPhases {
|
||||
load_db: PhaseProgressTrackerHandle,
|
||||
init_net_ctrl: PhaseProgressTrackerHandle,
|
||||
cleanup_init: CleanupInitPhases,
|
||||
// TODO: migrations
|
||||
run_migrations: PhaseProgressTrackerHandle,
|
||||
}
|
||||
impl InitRpcContextPhases {
|
||||
pub fn new(handle: &FullProgressTracker) -> Self {
|
||||
@@ -93,6 +93,7 @@ impl InitRpcContextPhases {
|
||||
load_db: handle.add_phase("Loading database".into(), Some(5)),
|
||||
init_net_ctrl: handle.add_phase("Initializing network".into(), Some(1)),
|
||||
cleanup_init: CleanupInitPhases::new(handle),
|
||||
run_migrations: handle.add_phase("Running migrations".into(), Some(10)),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -117,13 +118,15 @@ pub struct RpcContext(Arc<RpcContextSeed>);
|
||||
impl RpcContext {
|
||||
#[instrument(skip_all)]
|
||||
pub async fn init(
|
||||
webserver: &WebServerAcceptorSetter<UpgradableListener>,
|
||||
config: &ServerConfig,
|
||||
disk_guid: Arc<String>,
|
||||
net_ctrl: Option<PreInitNetController>,
|
||||
init_result: Option<InitResult>,
|
||||
InitRpcContextPhases {
|
||||
mut load_db,
|
||||
mut init_net_ctrl,
|
||||
cleanup_init,
|
||||
run_migrations,
|
||||
}: InitRpcContextPhases,
|
||||
) -> Result<Self, Error> {
|
||||
let tor_proxy = config.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
|
||||
@@ -133,7 +136,7 @@ impl RpcContext {
|
||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
||||
|
||||
load_db.start();
|
||||
let db = if let Some(net_ctrl) = &net_ctrl {
|
||||
let db = if let Some(InitResult { net_ctrl, .. }) = &init_result {
|
||||
net_ctrl.db.clone()
|
||||
} else {
|
||||
TypedPatchDb::<Database>::load(config.db().await?).await?
|
||||
@@ -144,29 +147,28 @@ impl RpcContext {
|
||||
tracing::info!("Opened PatchDB");
|
||||
|
||||
init_net_ctrl.start();
|
||||
let net_controller = Arc::new(
|
||||
NetController::init(
|
||||
if let Some(net_ctrl) = net_ctrl {
|
||||
net_ctrl
|
||||
} else {
|
||||
PreInitNetController::init(
|
||||
db.clone(),
|
||||
config
|
||||
.tor_control
|
||||
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
|
||||
tor_proxy,
|
||||
&account.hostname,
|
||||
account.tor_key.clone(),
|
||||
)
|
||||
.await?
|
||||
},
|
||||
config
|
||||
.dns_bind
|
||||
.as_deref()
|
||||
.unwrap_or(&[SocketAddr::from(([127, 0, 0, 1], 53))]),
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
let (net_controller, os_net_service) = if let Some(InitResult {
|
||||
net_ctrl,
|
||||
os_net_service,
|
||||
}) = init_result
|
||||
{
|
||||
(net_ctrl, os_net_service)
|
||||
} else {
|
||||
let net_ctrl = Arc::new(
|
||||
NetController::init(
|
||||
db.clone(),
|
||||
config
|
||||
.tor_control
|
||||
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
|
||||
tor_proxy,
|
||||
&account.hostname,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
webserver.try_upgrade(|a| net_ctrl.net_iface.upgrade_listener(a))?;
|
||||
let os_net_service = net_ctrl.os_bindings().await?;
|
||||
(net_ctrl, os_net_service)
|
||||
};
|
||||
init_net_ctrl.complete();
|
||||
tracing::info!("Initialized Net Controller");
|
||||
|
||||
@@ -210,7 +212,6 @@ impl RpcContext {
|
||||
|
||||
let seed = Arc::new(RpcContextSeed {
|
||||
is_closed: AtomicBool::new(false),
|
||||
datadir: config.datadir().to_path_buf(),
|
||||
os_partitions: config.os_partitions.clone().ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("OS Partition Information Missing"),
|
||||
@@ -229,6 +230,7 @@ impl RpcContext {
|
||||
db,
|
||||
account: RwLock::new(account),
|
||||
net_controller,
|
||||
os_net_service,
|
||||
s9pk_arch: if config.multi_arch_s9pks.unwrap_or(false) {
|
||||
None
|
||||
} else {
|
||||
@@ -276,7 +278,9 @@ impl RpcContext {
|
||||
let res = Self(seed.clone());
|
||||
res.cleanup_and_initialize(cleanup_init).await?;
|
||||
tracing::info!("Cleaned up transient states");
|
||||
crate::version::post_init(&res).await?;
|
||||
|
||||
crate::version::post_init(&res, run_migrations).await?;
|
||||
tracing::info!("Completed migrations");
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
@@ -286,7 +290,6 @@ impl RpcContext {
|
||||
self.services.shutdown_all().await?;
|
||||
self.is_closed.store(true, Ordering::SeqCst);
|
||||
tracing::info!("RPC Context is shutdown");
|
||||
// TODO: shutdown http servers
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use std::ops::Deref;
|
||||
use std::path::PathBuf;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -10,8 +10,6 @@ use josekit::jwk::Jwk;
|
||||
use patch_db::PatchDb;
|
||||
use rpc_toolkit::Context;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::postgres::PgConnectOptions;
|
||||
use sqlx::PgPool;
|
||||
use tokio::sync::broadcast::Sender;
|
||||
use tokio::sync::OnceCell;
|
||||
use tracing::instrument;
|
||||
@@ -22,12 +20,13 @@ use crate::context::config::ServerConfig;
|
||||
use crate::context::RpcContext;
|
||||
use crate::disk::OsPartitionInfo;
|
||||
use crate::hostname::Hostname;
|
||||
use crate::init::init_postgres;
|
||||
use crate::net::web_server::{UpgradableListener, WebServer, WebServerAcceptorSetter};
|
||||
use crate::prelude::*;
|
||||
use crate::progress::FullProgressTracker;
|
||||
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
|
||||
use crate::setup::SetupProgress;
|
||||
use crate::util::net::WebSocketExt;
|
||||
use crate::MAIN_DATA;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref CURRENT_SECRET: Jwk = Jwk::generate_ec_key(josekit::jwk::alg::ec::EcCurve::P256).unwrap_or_else(|e| {
|
||||
@@ -41,7 +40,7 @@ lazy_static::lazy_static! {
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export)]
|
||||
pub struct SetupResult {
|
||||
pub tor_address: String,
|
||||
pub tor_addresses: Vec<String>,
|
||||
#[ts(type = "string")]
|
||||
pub hostname: Hostname,
|
||||
#[ts(type = "string")]
|
||||
@@ -52,7 +51,11 @@ impl TryFrom<&AccountInfo> for SetupResult {
|
||||
type Error = Error;
|
||||
fn try_from(value: &AccountInfo) -> Result<Self, Self::Error> {
|
||||
Ok(Self {
|
||||
tor_address: format!("https://{}", value.tor_key.public().get_onion_address()),
|
||||
tor_addresses: value
|
||||
.tor_keys
|
||||
.iter()
|
||||
.map(|tor_key| format!("https://{}", tor_key.public().get_onion_address()))
|
||||
.collect(),
|
||||
hostname: value.hostname.clone(),
|
||||
lan_address: value.hostname.lan_address(),
|
||||
root_ca: String::from_utf8(value.root_ca_cert.to_pem()?)?,
|
||||
@@ -61,6 +64,7 @@ impl TryFrom<&AccountInfo> for SetupResult {
|
||||
}
|
||||
|
||||
pub struct SetupContextSeed {
|
||||
pub webserver: WebServerAcceptorSetter<UpgradableListener>,
|
||||
pub config: ServerConfig,
|
||||
pub os_partitions: OsPartitionInfo,
|
||||
pub disable_encryption: bool,
|
||||
@@ -68,7 +72,6 @@ pub struct SetupContextSeed {
|
||||
pub task: OnceCell<NonDetachingJoinHandle<()>>,
|
||||
pub result: OnceCell<Result<(SetupResult, RpcContext), Error>>,
|
||||
pub shutdown: Sender<()>,
|
||||
pub datadir: PathBuf,
|
||||
pub rpc_continuations: RpcContinuations,
|
||||
}
|
||||
|
||||
@@ -76,10 +79,13 @@ pub struct SetupContextSeed {
|
||||
pub struct SetupContext(Arc<SetupContextSeed>);
|
||||
impl SetupContext {
|
||||
#[instrument(skip_all)]
|
||||
pub fn init(config: &ServerConfig) -> Result<Self, Error> {
|
||||
pub fn init(
|
||||
webserver: &WebServer<UpgradableListener>,
|
||||
config: &ServerConfig,
|
||||
) -> Result<Self, Error> {
|
||||
let (shutdown, _) = tokio::sync::broadcast::channel(1);
|
||||
let datadir = config.datadir().to_owned();
|
||||
Ok(Self(Arc::new(SetupContextSeed {
|
||||
webserver: webserver.acceptor_setter(),
|
||||
config: config.clone(),
|
||||
os_partitions: config.os_partitions.clone().ok_or_else(|| {
|
||||
Error::new(
|
||||
@@ -92,13 +98,12 @@ impl SetupContext {
|
||||
task: OnceCell::new(),
|
||||
result: OnceCell::new(),
|
||||
shutdown,
|
||||
datadir,
|
||||
rpc_continuations: RpcContinuations::new(),
|
||||
})))
|
||||
}
|
||||
#[instrument(skip_all)]
|
||||
pub async fn db(&self) -> Result<PatchDb, Error> {
|
||||
let db_path = self.datadir.join("main").join("embassy.db");
|
||||
let db_path = Path::new(MAIN_DATA).join("embassy.db");
|
||||
let db = PatchDb::open(&db_path)
|
||||
.await
|
||||
.with_ctx(|_| (crate::ErrorKind::Filesystem, db_path.display().to_string()))?;
|
||||
@@ -161,21 +166,30 @@ impl SetupContext {
|
||||
if let Err(e) = async {
|
||||
let mut stream =
|
||||
progress_tracker.stream(Some(Duration::from_millis(100)));
|
||||
while let Some(progress) = stream.next().await {
|
||||
ws.send(ws::Message::Text(
|
||||
serde_json::to_string(&progress)
|
||||
.with_kind(ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
if progress.overall.is_complete() {
|
||||
break;
|
||||
loop {
|
||||
tokio::select! {
|
||||
progress = stream.next() => {
|
||||
if let Some(progress) = progress {
|
||||
ws.send(ws::Message::Text(
|
||||
serde_json::to_string(&progress)
|
||||
.with_kind(ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
if progress.overall.is_complete() {
|
||||
return ws.normal_close("complete").await;
|
||||
}
|
||||
} else {
|
||||
return ws.normal_close("complete").await;
|
||||
}
|
||||
}
|
||||
msg = ws.recv() => {
|
||||
if msg.transpose().with_kind(ErrorKind::Network)?.is_none() {
|
||||
return Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ws.normal_close("complete").await?;
|
||||
|
||||
Ok::<_, Error>(())
|
||||
}
|
||||
.await
|
||||
{
|
||||
|
||||
@@ -198,17 +198,26 @@ pub async fn subscribe(
|
||||
session,
|
||||
|mut ws| async move {
|
||||
if let Err(e) = async {
|
||||
while let Some(rev) = sub.recv().await {
|
||||
ws.send(ws::Message::Text(
|
||||
serde_json::to_string(&rev).with_kind(ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
loop {
|
||||
tokio::select! {
|
||||
rev = sub.recv() => {
|
||||
if let Some(rev) = rev {
|
||||
ws.send(ws::Message::Text(
|
||||
serde_json::to_string(&rev).with_kind(ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
} else {
|
||||
return ws.normal_close("complete").await;
|
||||
}
|
||||
}
|
||||
msg = ws.recv() => {
|
||||
if msg.transpose().with_kind(ErrorKind::Network)?.is_none() {
|
||||
return Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ws.normal_close("complete").await?;
|
||||
|
||||
Ok::<_, Error>(())
|
||||
}
|
||||
.await
|
||||
{
|
||||
|
||||
@@ -1,28 +1,31 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
use std::net::{IpAddr, Ipv4Addr};
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use exver::{Version, VersionRange};
|
||||
use imbl_value::InternedString;
|
||||
use ipnet::{Ipv4Net, Ipv6Net};
|
||||
use ipnet::IpNet;
|
||||
use isocountry::CountryCode;
|
||||
use itertools::Itertools;
|
||||
use models::PackageId;
|
||||
use openssl::hash::MessageDigest;
|
||||
use patch_db::{HasModel, Value};
|
||||
use reqwest::Url;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use torut::onion::OnionAddressV3;
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::account::AccountInfo;
|
||||
use crate::db::model::package::AllPackageData;
|
||||
use crate::net::utils::{get_iface_ipv4_addr, get_iface_ipv6_addr};
|
||||
use crate::net::acme::AcmeProvider;
|
||||
use crate::net::host::binding::{AddSslOptions, BindInfo, BindOptions, NetInfo};
|
||||
use crate::net::host::Host;
|
||||
use crate::net::utils::ipv6_is_local;
|
||||
use crate::net::vhost::AlpnInfo;
|
||||
use crate::prelude::*;
|
||||
use crate::progress::FullProgress;
|
||||
use crate::system::SmtpValue;
|
||||
use crate::util::cpupower::Governor;
|
||||
use crate::util::lshw::LshwDevice;
|
||||
use crate::util::serde::MaybeUtf8String;
|
||||
use crate::version::{Current, VersionT};
|
||||
use crate::{ARCH, PLATFORM};
|
||||
|
||||
@@ -38,7 +41,6 @@ pub struct Public {
|
||||
}
|
||||
impl Public {
|
||||
pub fn init(account: &AccountInfo) -> Result<Self, Error> {
|
||||
let lan_address = account.hostname.lan_address().parse().unwrap();
|
||||
Ok(Self {
|
||||
server_info: ServerInfo {
|
||||
arch: get_arch(),
|
||||
@@ -46,16 +48,44 @@ impl Public {
|
||||
id: account.server_id.clone(),
|
||||
version: Current::default().semver(),
|
||||
hostname: account.hostname.no_dot_host_name(),
|
||||
host: Host {
|
||||
bindings: [(
|
||||
80,
|
||||
BindInfo {
|
||||
enabled: false,
|
||||
options: BindOptions {
|
||||
preferred_external_port: 80,
|
||||
add_ssl: Some(AddSslOptions {
|
||||
preferred_external_port: 443,
|
||||
alpn: Some(AlpnInfo::Specified(vec![
|
||||
MaybeUtf8String("http/1.1".into()),
|
||||
MaybeUtf8String("h2".into()),
|
||||
])),
|
||||
}),
|
||||
secure: None,
|
||||
},
|
||||
net: NetInfo {
|
||||
assigned_port: None,
|
||||
assigned_ssl_port: Some(443),
|
||||
public: false,
|
||||
},
|
||||
},
|
||||
)]
|
||||
.into_iter()
|
||||
.collect(),
|
||||
onions: account
|
||||
.tor_keys
|
||||
.iter()
|
||||
.map(|k| k.public().get_onion_address())
|
||||
.collect(),
|
||||
domains: BTreeMap::new(),
|
||||
hostname_info: BTreeMap::new(),
|
||||
},
|
||||
last_backup: None,
|
||||
package_version_compat: Current::default().compat().clone(),
|
||||
post_init_migration_todos: BTreeSet::new(),
|
||||
lan_address,
|
||||
onion_address: account.tor_key.public().get_onion_address(),
|
||||
tor_address: format!("https://{}", account.tor_key.public().get_onion_address())
|
||||
.parse()
|
||||
.unwrap(),
|
||||
ip_info: BTreeMap::new(),
|
||||
acme: None,
|
||||
network_interfaces: BTreeMap::new(),
|
||||
acme: BTreeMap::new(),
|
||||
status_info: ServerStatus {
|
||||
backup_progress: None,
|
||||
updated: false,
|
||||
@@ -115,6 +145,7 @@ pub struct ServerInfo {
|
||||
pub id: String,
|
||||
#[ts(type = "string")]
|
||||
pub hostname: InternedString,
|
||||
pub host: Host,
|
||||
#[ts(type = "string")]
|
||||
pub version: Version,
|
||||
#[ts(type = "string")]
|
||||
@@ -123,15 +154,11 @@ pub struct ServerInfo {
|
||||
pub post_init_migration_todos: BTreeSet<Version>,
|
||||
#[ts(type = "string | null")]
|
||||
pub last_backup: Option<DateTime<Utc>>,
|
||||
#[ts(type = "string")]
|
||||
pub lan_address: Url,
|
||||
#[ts(type = "string")]
|
||||
pub onion_address: OnionAddressV3,
|
||||
/// for backwards compatibility
|
||||
#[ts(type = "string")]
|
||||
pub tor_address: Url,
|
||||
pub ip_info: BTreeMap<String, IpInfo>,
|
||||
pub acme: Option<AcmeSettings>,
|
||||
#[ts(as = "BTreeMap::<String, NetworkInterfaceInfo>")]
|
||||
#[serde(default)]
|
||||
pub network_interfaces: BTreeMap<InternedString, NetworkInterfaceInfo>,
|
||||
#[serde(default)]
|
||||
pub acme: BTreeMap<AcmeProvider, AcmeSettings>,
|
||||
#[serde(default)]
|
||||
pub status_info: ServerStatus,
|
||||
pub wifi: WifiInfo,
|
||||
@@ -151,43 +178,76 @@ pub struct ServerInfo {
|
||||
pub devices: Vec<LshwDevice>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
|
||||
#[derive(Clone, Debug, Default, Deserialize, Serialize, HasModel, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[model = "Model<Self>"]
|
||||
#[ts(export)]
|
||||
pub struct IpInfo {
|
||||
#[ts(type = "string | null")]
|
||||
pub ipv4_range: Option<Ipv4Net>,
|
||||
pub ipv4: Option<Ipv4Addr>,
|
||||
#[ts(type = "string | null")]
|
||||
pub ipv6_range: Option<Ipv6Net>,
|
||||
pub ipv6: Option<Ipv6Addr>,
|
||||
pub struct NetworkInterfaceInfo {
|
||||
pub public: Option<bool>,
|
||||
pub ip_info: Option<IpInfo>,
|
||||
}
|
||||
impl IpInfo {
|
||||
pub async fn for_interface(iface: &str) -> Result<Self, Error> {
|
||||
let (ipv4, ipv4_range) = get_iface_ipv4_addr(iface).await?.unzip();
|
||||
let (ipv6, ipv6_range) = get_iface_ipv6_addr(iface).await?.unzip();
|
||||
Ok(Self {
|
||||
ipv4_range,
|
||||
ipv4,
|
||||
ipv6_range,
|
||||
ipv6,
|
||||
impl NetworkInterfaceInfo {
|
||||
pub fn public(&self) -> bool {
|
||||
self.public.unwrap_or_else(|| {
|
||||
!self.ip_info.as_ref().map_or(true, |ip_info| {
|
||||
let ip4s = ip_info
|
||||
.subnets
|
||||
.iter()
|
||||
.filter_map(|ipnet| {
|
||||
if let IpAddr::V4(ip4) = ipnet.addr() {
|
||||
Some(ip4)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect::<BTreeSet<_>>();
|
||||
if !ip4s.is_empty() {
|
||||
return ip4s.iter().all(|ip4| {
|
||||
ip4.is_loopback()
|
||||
|| (ip4.is_private() && !ip4.octets().starts_with(&[10, 59])) // reserving 10.59 for public wireguard configurations
|
||||
|| ip4.is_link_local()
|
||||
});
|
||||
}
|
||||
ip_info.subnets.iter().all(|ipnet| {
|
||||
if let IpAddr::V6(ip6) = ipnet.addr() {
|
||||
ipv6_is_local(ip6)
|
||||
} else {
|
||||
true
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, Deserialize, Serialize, TS)]
|
||||
#[ts(export)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct IpInfo {
|
||||
pub scope_id: u32,
|
||||
pub device_type: Option<NetworkInterfaceType>,
|
||||
#[ts(type = "string[]")]
|
||||
pub subnets: BTreeSet<IpNet>,
|
||||
pub wan_ip: Option<Ipv4Addr>,
|
||||
#[ts(type = "string[]")]
|
||||
pub ntp_servers: BTreeSet<InternedString>,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, Deserialize, Serialize, TS)]
|
||||
#[ts(export)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
pub enum NetworkInterfaceType {
|
||||
Ethernet,
|
||||
Wireless,
|
||||
Wireguard,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[model = "Model<Self>"]
|
||||
#[ts(export)]
|
||||
pub struct AcmeSettings {
|
||||
#[ts(type = "string")]
|
||||
pub provider: Url,
|
||||
/// email addresses for letsencrypt
|
||||
pub contact: Vec<String>,
|
||||
#[ts(type = "string[]")]
|
||||
/// domains to get letsencrypt certs for
|
||||
pub domains: BTreeSet<InternedString>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
|
||||
|
||||
@@ -10,7 +10,7 @@ use crate::context::{CliContext, DiagnosticContext, RpcContext};
|
||||
use crate::init::SYSTEM_REBUILD_PATH;
|
||||
use crate::shutdown::Shutdown;
|
||||
use crate::util::io::delete_file;
|
||||
use crate::Error;
|
||||
use crate::{Error, DATA_DIR};
|
||||
|
||||
pub fn diagnostic<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::new()
|
||||
@@ -71,7 +71,7 @@ pub fn restart(ctx: DiagnosticContext) -> Result<(), Error> {
|
||||
export_args: ctx
|
||||
.disk_guid
|
||||
.clone()
|
||||
.map(|guid| (guid, ctx.datadir.clone())),
|
||||
.map(|guid| (guid, Path::new(DATA_DIR).to_owned())),
|
||||
restart: true,
|
||||
})
|
||||
.expect("receiver dropped");
|
||||
|
||||
@@ -7,7 +7,6 @@ use models::PackageId;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tracing::instrument;
|
||||
|
||||
use super::filesystem::ecryptfs::EcryptFS;
|
||||
use super::guard::{GenericMountGuard, TmpMountGuard};
|
||||
use crate::auth::check_password;
|
||||
use crate::backup::target::BackupInfo;
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
use std::ffi::OsStr;
|
||||
use std::fmt::{Display, Write};
|
||||
use std::path::Path;
|
||||
use std::time::Duration;
|
||||
|
||||
use digest::generic_array::GenericArray;
|
||||
use digest::OutputSizeUser;
|
||||
|
||||
@@ -3,10 +3,12 @@ use std::io::Cursor;
|
||||
use std::net::{Ipv4Addr, SocketAddr, SocketAddrV4};
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime};
|
||||
|
||||
use axum::extract::ws::{self};
|
||||
use color_eyre::eyre::eyre;
|
||||
use const_format::formatcp;
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use itertools::Itertools;
|
||||
use models::ResultExt;
|
||||
@@ -23,21 +25,24 @@ use crate::context::{CliContext, InitContext};
|
||||
use crate::db::model::public::ServerStatus;
|
||||
use crate::db::model::Database;
|
||||
use crate::disk::mount::util::unmount;
|
||||
use crate::hostname::Hostname;
|
||||
use crate::middleware::auth::LOCAL_AUTH_COOKIE_PATH;
|
||||
use crate::net::net_controller::PreInitNetController;
|
||||
use crate::net::net_controller::{NetController, NetService};
|
||||
use crate::net::utils::find_wifi_iface;
|
||||
use crate::net::web_server::{UpgradableListener, WebServerAcceptorSetter};
|
||||
use crate::prelude::*;
|
||||
use crate::progress::{
|
||||
FullProgress, FullProgressTracker, PhaseProgressTrackerHandle, PhasedProgressBar,
|
||||
};
|
||||
use crate::rpc_continuations::{Guid, RpcContinuation};
|
||||
use crate::s9pk::v2::pack::{CONTAINER_DATADIR, CONTAINER_TOOL};
|
||||
use crate::ssh::SSH_AUTHORIZED_KEYS_FILE;
|
||||
use crate::ssh::SSH_DIR;
|
||||
use crate::system::get_mem_info;
|
||||
use crate::util::io::{create_file, IOHook};
|
||||
use crate::util::lshw::lshw;
|
||||
use crate::util::net::WebSocketExt;
|
||||
use crate::util::{cpupower, Invoke};
|
||||
use crate::Error;
|
||||
use crate::{Error, MAIN_DATA, PACKAGE_DATA};
|
||||
|
||||
pub const SYSTEM_REBUILD_PATH: &str = "/media/startos/config/system-rebuild";
|
||||
pub const STANDBY_MODE_PATH: &str = "/media/startos/config/standby";
|
||||
@@ -195,7 +200,8 @@ pub async fn init_postgres(datadir: impl AsRef<Path>) -> Result<(), Error> {
|
||||
}
|
||||
|
||||
pub struct InitResult {
|
||||
pub net_ctrl: PreInitNetController,
|
||||
pub net_ctrl: Arc<NetController>,
|
||||
pub os_net_service: NetService,
|
||||
}
|
||||
|
||||
pub struct InitPhases {
|
||||
@@ -213,7 +219,6 @@ pub struct InitPhases {
|
||||
enable_zram: PhaseProgressTrackerHandle,
|
||||
update_server_info: PhaseProgressTrackerHandle,
|
||||
launch_service_network: PhaseProgressTrackerHandle,
|
||||
run_migrations: PhaseProgressTrackerHandle,
|
||||
validate_db: PhaseProgressTrackerHandle,
|
||||
postinit: Option<PhaseProgressTrackerHandle>,
|
||||
}
|
||||
@@ -238,7 +243,6 @@ impl InitPhases {
|
||||
enable_zram: handle.add_phase("Enabling ZRAM".into(), Some(1)),
|
||||
update_server_info: handle.add_phase("Updating server info".into(), Some(1)),
|
||||
launch_service_network: handle.add_phase("Launching service intranet".into(), Some(1)),
|
||||
run_migrations: handle.add_phase("Running migrations".into(), Some(10)),
|
||||
validate_db: handle.add_phase("Validating database".into(), Some(1)),
|
||||
postinit: if Path::new("/media/startos/config/postinit.sh").exists() {
|
||||
Some(handle.add_phase("Running postinit.sh".into(), Some(5)))
|
||||
@@ -274,6 +278,7 @@ pub async fn run_script<P: AsRef<Path>>(path: P, mut progress: PhaseProgressTrac
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn init(
|
||||
webserver: &WebServerAcceptorSetter<UpgradableListener>,
|
||||
cfg: &ServerConfig,
|
||||
InitPhases {
|
||||
preinit,
|
||||
@@ -290,7 +295,6 @@ pub async fn init(
|
||||
mut enable_zram,
|
||||
mut update_server_info,
|
||||
mut launch_service_network,
|
||||
run_migrations,
|
||||
mut validate_db,
|
||||
postinit,
|
||||
}: InitPhases,
|
||||
@@ -317,7 +321,7 @@ pub async fn init(
|
||||
})?;
|
||||
tokio::fs::set_permissions(LOCAL_AUTH_COOKIE_PATH, Permissions::from_mode(0o046)).await?;
|
||||
Command::new("chown")
|
||||
.arg("root:embassy")
|
||||
.arg("root:startos")
|
||||
.arg(LOCAL_AUTH_COOKIE_PATH)
|
||||
.invoke(crate::ErrorKind::Filesystem)
|
||||
.await?;
|
||||
@@ -334,8 +338,10 @@ pub async fn init(
|
||||
|
||||
load_ssh_keys.start();
|
||||
crate::ssh::sync_keys(
|
||||
&Hostname(peek.as_public().as_server_info().as_hostname().de()?),
|
||||
&peek.as_private().as_ssh_privkey().de()?,
|
||||
&peek.as_private().as_ssh_pubkeys().de()?,
|
||||
SSH_AUTHORIZED_KEYS_FILE,
|
||||
SSH_DIR,
|
||||
)
|
||||
.await?;
|
||||
load_ssh_keys.complete();
|
||||
@@ -344,22 +350,25 @@ pub async fn init(
|
||||
let account = AccountInfo::load(&peek)?;
|
||||
|
||||
start_net.start();
|
||||
let net_ctrl = PreInitNetController::init(
|
||||
db.clone(),
|
||||
cfg.tor_control
|
||||
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
|
||||
cfg.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
|
||||
Ipv4Addr::new(127, 0, 0, 1),
|
||||
9050,
|
||||
))),
|
||||
&account.hostname,
|
||||
account.tor_key,
|
||||
)
|
||||
.await?;
|
||||
let net_ctrl = Arc::new(
|
||||
NetController::init(
|
||||
db.clone(),
|
||||
cfg.tor_control
|
||||
.unwrap_or(SocketAddr::from(([127, 0, 0, 1], 9051))),
|
||||
cfg.tor_socks.unwrap_or(SocketAddr::V4(SocketAddrV4::new(
|
||||
Ipv4Addr::new(127, 0, 0, 1),
|
||||
9050,
|
||||
))),
|
||||
&account.hostname,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
webserver.try_upgrade(|a| net_ctrl.net_iface.upgrade_listener(a))?;
|
||||
let os_net_service = net_ctrl.os_bindings().await?;
|
||||
start_net.complete();
|
||||
|
||||
mount_logs.start();
|
||||
let log_dir = cfg.datadir().join("main/logs");
|
||||
let log_dir = Path::new(MAIN_DATA).join("logs");
|
||||
if tokio::fs::metadata(&log_dir).await.is_err() {
|
||||
tokio::fs::create_dir_all(&log_dir).await?;
|
||||
}
|
||||
@@ -390,8 +399,6 @@ pub async fn init(
|
||||
mount_logs.complete();
|
||||
tracing::info!("Mounted Logs");
|
||||
|
||||
let mut server_info = peek.as_public().as_server_info().de()?;
|
||||
|
||||
load_ca_cert.start();
|
||||
// write to ca cert store
|
||||
tokio::fs::write(
|
||||
@@ -402,58 +409,46 @@ pub async fn init(
|
||||
Command::new("update-ca-certificates")
|
||||
.invoke(crate::ErrorKind::OpenSsl)
|
||||
.await?;
|
||||
if tokio::fs::metadata("/home/kiosk/profile").await.is_ok() {
|
||||
Command::new("certutil")
|
||||
.arg("-A")
|
||||
.arg("-n")
|
||||
.arg("StartOS Local Root CA")
|
||||
.arg("-t")
|
||||
.arg("TCu,Cuw,Tuw")
|
||||
.arg("-i")
|
||||
.arg("/usr/local/share/ca-certificates/startos-root-ca.crt")
|
||||
.arg("-d")
|
||||
.arg("/home/kiosk/fx-profile")
|
||||
.invoke(ErrorKind::OpenSsl)
|
||||
.await?;
|
||||
}
|
||||
load_ca_cert.complete();
|
||||
|
||||
load_wifi.start();
|
||||
crate::net::wifi::synchronize_wpa_supplicant_conf(
|
||||
&cfg.datadir().join("main"),
|
||||
&mut server_info.wifi,
|
||||
)
|
||||
.await?;
|
||||
let wifi_interface = find_wifi_iface().await?;
|
||||
let wifi = db
|
||||
.mutate(|db| {
|
||||
let wifi = db.as_public_mut().as_server_info_mut().as_wifi_mut();
|
||||
wifi.as_interface_mut().ser(&wifi_interface)?;
|
||||
wifi.de()
|
||||
})
|
||||
.await?;
|
||||
crate::net::wifi::synchronize_network_manager(MAIN_DATA, &wifi).await?;
|
||||
load_wifi.complete();
|
||||
tracing::info!("Synchronized WiFi");
|
||||
|
||||
init_tmp.start();
|
||||
let tmp_dir = cfg.datadir().join("package-data/tmp");
|
||||
let tmp_dir = Path::new(PACKAGE_DATA).join("tmp");
|
||||
if tokio::fs::metadata(&tmp_dir).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&tmp_dir).await?;
|
||||
}
|
||||
if tokio::fs::metadata(&tmp_dir).await.is_err() {
|
||||
tokio::fs::create_dir_all(&tmp_dir).await?;
|
||||
}
|
||||
let tmp_var = cfg.datadir().join(format!("package-data/tmp/var"));
|
||||
let tmp_var = Path::new(PACKAGE_DATA).join("tmp/var");
|
||||
if tokio::fs::metadata(&tmp_var).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&tmp_var).await?;
|
||||
}
|
||||
crate::disk::mount::util::bind(&tmp_var, "/var/tmp", false).await?;
|
||||
let downloading = cfg
|
||||
.datadir()
|
||||
.join(format!("package-data/archive/downloading"));
|
||||
let downloading = Path::new(PACKAGE_DATA).join("archive/downloading");
|
||||
if tokio::fs::metadata(&downloading).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&downloading).await?;
|
||||
}
|
||||
let tmp_docker = cfg
|
||||
.datadir()
|
||||
.join(format!("package-data/tmp/{CONTAINER_TOOL}"));
|
||||
let tmp_docker = Path::new(PACKAGE_DATA).join(formatcp!("tmp/{CONTAINER_TOOL}"));
|
||||
crate::disk::mount::util::bind(&tmp_docker, CONTAINER_DATADIR, false).await?;
|
||||
init_tmp.complete();
|
||||
|
||||
let server_info = db.peek().await.into_public().into_server_info();
|
||||
set_governor.start();
|
||||
let governor = if let Some(governor) = &server_info.governor {
|
||||
let selected_governor = server_info.as_governor().de()?;
|
||||
let governor = if let Some(governor) = &selected_governor {
|
||||
if cpupower::get_available_governors()
|
||||
.await?
|
||||
.contains(governor)
|
||||
@@ -474,11 +469,11 @@ pub async fn init(
|
||||
set_governor.complete();
|
||||
|
||||
sync_clock.start();
|
||||
server_info.ntp_synced = false;
|
||||
let mut ntp_synced = false;
|
||||
let mut not_made_progress = 0u32;
|
||||
for _ in 0..1800 {
|
||||
if check_time_is_synchronized().await? {
|
||||
server_info.ntp_synced = true;
|
||||
ntp_synced = true;
|
||||
break;
|
||||
}
|
||||
let t = SystemTime::now();
|
||||
@@ -495,7 +490,7 @@ pub async fn init(
|
||||
break;
|
||||
}
|
||||
}
|
||||
if !server_info.ntp_synced {
|
||||
if !ntp_synced {
|
||||
tracing::warn!("Timed out waiting for system time to synchronize");
|
||||
} else {
|
||||
tracing::info!("Syncronized system clock");
|
||||
@@ -503,16 +498,16 @@ pub async fn init(
|
||||
sync_clock.complete();
|
||||
|
||||
enable_zram.start();
|
||||
if server_info.zram {
|
||||
crate::system::enable_zram().await?
|
||||
if server_info.as_zram().de()? {
|
||||
crate::system::enable_zram().await?;
|
||||
tracing::info!("Enabled ZRAM");
|
||||
}
|
||||
enable_zram.complete();
|
||||
|
||||
update_server_info.start();
|
||||
server_info.ip_info = crate::net::dhcp::init_ips().await?;
|
||||
server_info.ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
|
||||
server_info.devices = lshw().await?;
|
||||
server_info.status_info = ServerStatus {
|
||||
let ram = get_mem_info().await?.total.0 as u64 * 1024 * 1024;
|
||||
let devices = lshw().await?;
|
||||
let status_info = ServerStatus {
|
||||
updated: false,
|
||||
update_progress: None,
|
||||
backup_progress: None,
|
||||
@@ -520,10 +515,15 @@ pub async fn init(
|
||||
restarting: false,
|
||||
};
|
||||
db.mutate(|v| {
|
||||
v.as_public_mut().as_server_info_mut().ser(&server_info)?;
|
||||
let server_info = v.as_public_mut().as_server_info_mut();
|
||||
server_info.as_ntp_synced_mut().ser(&ntp_synced)?;
|
||||
server_info.as_ram_mut().ser(&ram)?;
|
||||
server_info.as_devices_mut().ser(&devices)?;
|
||||
server_info.as_status_info_mut().ser(&status_info)?;
|
||||
Ok(())
|
||||
})
|
||||
.await?;
|
||||
tracing::info!("Updated server info");
|
||||
update_server_info.complete();
|
||||
|
||||
launch_service_network.start();
|
||||
@@ -532,6 +532,7 @@ pub async fn init(
|
||||
.arg("lxc-net.service")
|
||||
.invoke(ErrorKind::Lxc)
|
||||
.await?;
|
||||
tracing::info!("Launched service intranet");
|
||||
launch_service_network.complete();
|
||||
|
||||
validate_db.start();
|
||||
@@ -540,6 +541,7 @@ pub async fn init(
|
||||
d.ser(&model)
|
||||
})
|
||||
.await?;
|
||||
tracing::info!("Validated database");
|
||||
validate_db.complete();
|
||||
|
||||
if let Some(progress) = postinit {
|
||||
@@ -548,7 +550,10 @@ pub async fn init(
|
||||
|
||||
tracing::info!("System initialized.");
|
||||
|
||||
Ok(InitResult { net_ctrl })
|
||||
Ok(InitResult {
|
||||
net_ctrl,
|
||||
os_net_service,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn init_api<C: Context>() -> ParentHandler<C> {
|
||||
|
||||
@@ -2,6 +2,7 @@ use std::ops::Deref;
|
||||
use std::path::PathBuf;
|
||||
use std::time::Duration;
|
||||
|
||||
use axum::extract::ws;
|
||||
use clap::builder::ValueParserFactory;
|
||||
use clap::{value_parser, CommandFactory, FromArgMatches, Parser};
|
||||
use color_eyre::eyre::eyre;
|
||||
@@ -12,7 +13,7 @@ use itertools::Itertools;
|
||||
use models::{FromStrParser, VersionString};
|
||||
use reqwest::header::{HeaderMap, CONTENT_LENGTH};
|
||||
use reqwest::Url;
|
||||
use rpc_toolkit::yajrc::{GenericRpcMethod, RpcError};
|
||||
use rpc_toolkit::yajrc::RpcError;
|
||||
use rpc_toolkit::HandlerArgs;
|
||||
use rustyline_async::ReadlineEvent;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -188,7 +189,7 @@ pub async fn sideload(
|
||||
SideloadParams { session }: SideloadParams,
|
||||
) -> Result<SideloadResponse, Error> {
|
||||
let (upload, file) = upload(&ctx, session.clone()).await?;
|
||||
let (err_send, err_recv) = oneshot::channel::<Error>();
|
||||
let (err_send, mut err_recv) = oneshot::channel::<Error>();
|
||||
let progress = Guid::new();
|
||||
let progress_tracker = FullProgressTracker::new();
|
||||
let mut progress_listener = progress_tracker.stream(Some(Duration::from_millis(200)));
|
||||
@@ -198,43 +199,44 @@ pub async fn sideload(
|
||||
RpcContinuation::ws_authed(
|
||||
&ctx,
|
||||
session,
|
||||
|mut ws| {
|
||||
use axum::extract::ws::Message;
|
||||
async move {
|
||||
if let Err(e) = async {
|
||||
type RpcResponse = rpc_toolkit::yajrc::RpcResponse<
|
||||
GenericRpcMethod<&'static str, (), FullProgress>,
|
||||
>;
|
||||
|mut ws| async move {
|
||||
if let Err(e) = async {
|
||||
loop {
|
||||
tokio::select! {
|
||||
res = async {
|
||||
while let Some(progress) = progress_listener.next().await {
|
||||
ws.send(Message::Text(
|
||||
progress = progress_listener.next() => {
|
||||
if let Some(progress) = progress {
|
||||
ws.send(ws::Message::Text(
|
||||
serde_json::to_string(&progress)
|
||||
.with_kind(ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
if progress.overall.is_complete() {
|
||||
return ws.normal_close("complete").await;
|
||||
}
|
||||
} else {
|
||||
return ws.normal_close("complete").await;
|
||||
}
|
||||
Ok::<_, Error>(())
|
||||
} => res?,
|
||||
err = err_recv => {
|
||||
}
|
||||
msg = ws.recv() => {
|
||||
if msg.transpose().with_kind(ErrorKind::Network)?.is_none() {
|
||||
return Ok(())
|
||||
}
|
||||
}
|
||||
err = (&mut err_recv) => {
|
||||
if let Ok(e) = err {
|
||||
ws.close_result(Err::<&str, _>(e.clone_output())).await?;
|
||||
return Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
ws.normal_close("complete").await?;
|
||||
|
||||
Ok::<_, Error>(())
|
||||
}
|
||||
.await
|
||||
{
|
||||
tracing::error!("Error tracking sideload progress: {e}");
|
||||
tracing::debug!("{e:?}");
|
||||
}
|
||||
}
|
||||
.await
|
||||
{
|
||||
tracing::error!("Error tracking sideload progress: {e}");
|
||||
tracing::debug!("{e:?}");
|
||||
}
|
||||
},
|
||||
Duration::from_secs(600),
|
||||
),
|
||||
@@ -258,9 +260,9 @@ pub async fn sideload(
|
||||
}
|
||||
.await
|
||||
{
|
||||
let _ = err_send.send(e.clone_output());
|
||||
tracing::error!("Error sideloading package: {e}");
|
||||
tracing::debug!("{e:?}");
|
||||
let _ = err_send.send(e);
|
||||
}
|
||||
});
|
||||
Ok(SideloadResponse { upload, progress })
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
use const_format::formatcp;
|
||||
|
||||
pub const DATA_DIR: &str = "/media/startos/data";
|
||||
pub const MAIN_DATA: &str = formatcp!("{DATA_DIR}/main");
|
||||
pub const PACKAGE_DATA: &str = formatcp!("{DATA_DIR}/package-data");
|
||||
pub const DEFAULT_REGISTRY: &str = "https://registry.start9.com";
|
||||
// pub const COMMUNITY_MARKETPLACE: &str = "https://community-registry.start9.com";
|
||||
pub const HOST_IP: [u8; 4] = [172, 18, 0, 1];
|
||||
pub const HOST_IP: [u8; 4] = [10, 0, 3, 1];
|
||||
pub use std::env::consts::ARCH;
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref PLATFORM: String = {
|
||||
@@ -82,6 +87,7 @@ use crate::context::{
|
||||
CliContext, DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext,
|
||||
};
|
||||
use crate::disk::fsck::RequiresReboot;
|
||||
use crate::net::net;
|
||||
use crate::registry::context::{RegistryContext, RegistryUrlParams};
|
||||
use crate::util::serde::HandlerExtSerde;
|
||||
|
||||
@@ -295,13 +301,20 @@ pub fn server<C: Context>() -> ParentHandler<C> {
|
||||
.with_about("Set system smtp server and credentials")
|
||||
.with_call_remote::<CliContext>()
|
||||
)
|
||||
.subcommand(
|
||||
"test-smtp",
|
||||
from_fn_async(system::test_smtp)
|
||||
.no_display()
|
||||
.with_about("Send test email using provided smtp server and credentials")
|
||||
.with_call_remote::<CliContext>()
|
||||
)
|
||||
.subcommand(
|
||||
"clear-smtp",
|
||||
from_fn_async(system::clear_system_smtp)
|
||||
.no_display()
|
||||
.with_about("Remove system smtp server and credentials")
|
||||
.with_call_remote::<CliContext>()
|
||||
)
|
||||
).subcommand("host", net::host::server_host_api::<C>().with_about("Commands for modifying the host for the system ui"))
|
||||
}
|
||||
|
||||
pub fn package<C: Context>() -> ParentHandler<C> {
|
||||
@@ -415,7 +428,7 @@ pub fn package<C: Context>() -> ParentHandler<C> {
|
||||
.subcommand("attach", from_fn_async(service::cli_attach).no_display())
|
||||
.subcommand(
|
||||
"host",
|
||||
net::host::host::<C>().with_about("Manage network hosts for a package"),
|
||||
net::host::host_api::<C>().with_about("Manage network hosts for a package"),
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -30,6 +30,7 @@ use crate::error::ResultExt;
|
||||
use crate::lxc::ContainerId;
|
||||
use crate::prelude::*;
|
||||
use crate::rpc_continuations::{Guid, RpcContinuation, RpcContinuations};
|
||||
use crate::util::net::WebSocketExt;
|
||||
use crate::util::serde::Reversible;
|
||||
use crate::util::Invoke;
|
||||
|
||||
@@ -80,34 +81,28 @@ async fn ws_handler(
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
}
|
||||
|
||||
let mut ws_closed = false;
|
||||
while let Some(entry) = tokio::select! {
|
||||
a = logs.try_next() => Some(a?),
|
||||
a = stream.try_next() => { a.with_kind(crate::ErrorKind::Network)?; ws_closed = true; None }
|
||||
} {
|
||||
if let Some(entry) = entry {
|
||||
let (_, log_entry) = entry.log_entry()?;
|
||||
stream
|
||||
.send(ws::Message::Text(
|
||||
serde_json::to_string(&log_entry).with_kind(ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
loop {
|
||||
tokio::select! {
|
||||
entry = logs.try_next() => {
|
||||
if let Some(entry) = entry? {
|
||||
let (_, log_entry) = entry.log_entry()?;
|
||||
stream
|
||||
.send(ws::Message::Text(
|
||||
serde_json::to_string(&log_entry).with_kind(ErrorKind::Serialization)?,
|
||||
))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
} else {
|
||||
return stream.normal_close("complete").await;
|
||||
}
|
||||
},
|
||||
msg = stream.try_next() => {
|
||||
if msg.with_kind(crate::ErrorKind::Network)?.is_none() {
|
||||
return Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !ws_closed {
|
||||
stream
|
||||
.send(ws::Message::Close(Some(ws::CloseFrame {
|
||||
code: ws::close_code::NORMAL,
|
||||
reason: "Log Stream Finished".into(),
|
||||
})))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
drop(stream);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize, serde::Deserialize, Debug, Clone)]
|
||||
|
||||
@@ -8,13 +8,11 @@ use rpc_toolkit::{
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::lxc::{ContainerId, LxcConfig};
|
||||
use crate::prelude::*;
|
||||
use crate::rpc_continuations::Guid;
|
||||
use crate::{
|
||||
context::{CliContext, RpcContext},
|
||||
service::ServiceStats,
|
||||
};
|
||||
use crate::service::ServiceStats;
|
||||
|
||||
pub fn lxc<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::new()
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
use std::collections::BTreeSet;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::path::Path;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::Duration;
|
||||
use std::{collections::BTreeSet, ffi::OsString};
|
||||
|
||||
use clap::builder::ValueParserFactory;
|
||||
use futures::{AsyncWriteExt, StreamExt};
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use axum::body::Body;
|
||||
use axum::extract::Request;
|
||||
use axum::response::Response;
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use http::{HeaderMap, HeaderValue, Method};
|
||||
use rpc_toolkit::{Empty, Middleware};
|
||||
|
||||
#[derive(Clone)]
|
||||
@@ -52,6 +53,13 @@ impl<Context: Send + Sync + 'static> Middleware<Context> for Cors {
|
||||
request: &mut Request,
|
||||
) -> Result<(), Response> {
|
||||
self.get_cors_headers(request);
|
||||
if request.method() == Method::OPTIONS {
|
||||
let mut response = Response::new(Body::empty());
|
||||
response
|
||||
.headers_mut()
|
||||
.extend(std::mem::take(&mut self.headers));
|
||||
return Err(response);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
async fn process_http_response(&mut self, _: &Context, response: &mut Response) {
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::str::FromStr;
|
||||
|
||||
use async_acme::acme::Identifier;
|
||||
use clap::builder::ValueParserFactory;
|
||||
use clap::Parser;
|
||||
use imbl_value::InternedString;
|
||||
@@ -10,6 +11,7 @@ use openssl::pkey::{PKey, Private};
|
||||
use openssl::x509::X509;
|
||||
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ts_rs::TS;
|
||||
use url::Url;
|
||||
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
@@ -78,10 +80,18 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
|
||||
|
||||
async fn read_certificate(
|
||||
&self,
|
||||
domains: &[String],
|
||||
identifiers: &[Identifier],
|
||||
directory_url: &str,
|
||||
) -> Result<Option<(String, String)>, Self::Error> {
|
||||
let domains = JsonKey::new(domains.into_iter().map(InternedString::intern).collect());
|
||||
let identifiers = JsonKey::new(
|
||||
identifiers
|
||||
.into_iter()
|
||||
.map(|d| match d {
|
||||
Identifier::Dns(d) => d.into(),
|
||||
Identifier::Ip(ip) => InternedString::from_display(ip),
|
||||
})
|
||||
.collect(),
|
||||
);
|
||||
let directory_url = directory_url
|
||||
.parse::<Url>()
|
||||
.with_kind(ErrorKind::ParseUrl)?;
|
||||
@@ -94,7 +104,7 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
|
||||
.into_acme()
|
||||
.into_certs()
|
||||
.into_idx(&directory_url)
|
||||
.and_then(|a| a.into_idx(&domains))
|
||||
.and_then(|a| a.into_idx(&identifiers))
|
||||
else {
|
||||
return Ok(None);
|
||||
};
|
||||
@@ -120,13 +130,21 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
|
||||
|
||||
async fn write_certificate(
|
||||
&self,
|
||||
domains: &[String],
|
||||
identifiers: &[Identifier],
|
||||
directory_url: &str,
|
||||
key_pem: &str,
|
||||
certificate_pem: &str,
|
||||
) -> Result<(), Self::Error> {
|
||||
tracing::info!("Saving new certificate for {domains:?}");
|
||||
let domains = JsonKey::new(domains.into_iter().map(InternedString::intern).collect());
|
||||
tracing::info!("Saving new certificate for {identifiers:?}");
|
||||
let identifiers = JsonKey::new(
|
||||
identifiers
|
||||
.into_iter()
|
||||
.map(|d| match d {
|
||||
Identifier::Dns(d) => d.into(),
|
||||
Identifier::Ip(ip) => InternedString::from_display(ip),
|
||||
})
|
||||
.collect(),
|
||||
);
|
||||
let directory_url = directory_url
|
||||
.parse::<Url>()
|
||||
.with_kind(ErrorKind::ParseUrl)?;
|
||||
@@ -146,7 +164,7 @@ impl<'a> async_acme::cache::AcmeCache for AcmeCertCache<'a> {
|
||||
.as_acme_mut()
|
||||
.as_certs_mut()
|
||||
.upsert(&directory_url, || Ok(BTreeMap::new()))?
|
||||
.insert(&domains, &cert)
|
||||
.insert(&identifiers, &cert)
|
||||
})
|
||||
.await?;
|
||||
|
||||
@@ -159,18 +177,23 @@ pub fn acme<C: Context>() -> ParentHandler<C> {
|
||||
.subcommand(
|
||||
"init",
|
||||
from_fn_async(init)
|
||||
.with_metadata("sync_db", Value::Bool(true))
|
||||
.no_display()
|
||||
.with_about("Setup ACME certificate acquisition")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"domain",
|
||||
domain::<C>()
|
||||
.with_about("Add, remove, or view domains for which to acquire ACME certificates"),
|
||||
"remove",
|
||||
from_fn_async(remove)
|
||||
.with_metadata("sync_db", Value::Bool(true))
|
||||
.no_display()
|
||||
.with_about("Setup ACME certificate acquisition")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Clone, Deserialize, Serialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, TS)]
|
||||
#[ts(type = "string")]
|
||||
pub struct AcmeProvider(pub Url);
|
||||
impl FromStr for AcmeProvider {
|
||||
type Err = <Url as FromStr>::Err;
|
||||
@@ -180,9 +203,36 @@ impl FromStr for AcmeProvider {
|
||||
"letsencrypt-staging" => async_acme::acme::LETS_ENCRYPT_STAGING_DIRECTORY.parse(),
|
||||
s => s.parse(),
|
||||
}
|
||||
.map(|mut u: Url| {
|
||||
let path = u
|
||||
.path_segments()
|
||||
.into_iter()
|
||||
.flatten()
|
||||
.filter(|p| !p.is_empty())
|
||||
.map(|p| p.to_owned())
|
||||
.collect::<Vec<_>>();
|
||||
if let Ok(mut path_mut) = u.path_segments_mut() {
|
||||
path_mut.clear();
|
||||
path_mut.extend(path);
|
||||
}
|
||||
u
|
||||
})
|
||||
.map(Self)
|
||||
}
|
||||
}
|
||||
impl<'de> Deserialize<'de> for AcmeProvider {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
crate::util::serde::deserialize_from_str(deserializer)
|
||||
}
|
||||
}
|
||||
impl AsRef<str> for AcmeProvider {
|
||||
fn as_ref(&self) -> &str {
|
||||
self.0.as_str()
|
||||
}
|
||||
}
|
||||
impl ValueParserFactory for AcmeProvider {
|
||||
type Parser = FromStrParser<Self>;
|
||||
fn value_parser() -> Self::Parser {
|
||||
@@ -200,125 +250,36 @@ pub struct InitAcmeParams {
|
||||
|
||||
pub async fn init(
|
||||
ctx: RpcContext,
|
||||
InitAcmeParams {
|
||||
provider: AcmeProvider(provider),
|
||||
contact,
|
||||
}: InitAcmeParams,
|
||||
InitAcmeParams { provider, contact }: InitAcmeParams,
|
||||
) -> Result<(), Error> {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
db.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_acme_mut()
|
||||
.map_mutate(|acme| {
|
||||
Ok(Some(AcmeSettings {
|
||||
provider,
|
||||
contact,
|
||||
domains: acme.map(|acme| acme.domains).unwrap_or_default(),
|
||||
}))
|
||||
})
|
||||
.insert(&provider, &AcmeSettings { contact })
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn domain<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::new()
|
||||
.subcommand(
|
||||
"add",
|
||||
from_fn_async(add_domain)
|
||||
.no_display()
|
||||
.with_about("Add a domain for which to acquire ACME certificates")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"remove",
|
||||
from_fn_async(remove_domain)
|
||||
.no_display()
|
||||
.with_about("Remove a domain for which to acquire ACME certificates")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"list",
|
||||
from_fn_async(list_domains)
|
||||
.with_custom_display_fn(|_, res| {
|
||||
for domain in res {
|
||||
println!("{domain}")
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.with_about("List domains for which to acquire ACME certificates")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Parser)]
|
||||
pub struct DomainParams {
|
||||
pub domain: InternedString,
|
||||
pub struct RemoveAcmeParams {
|
||||
#[arg(long)]
|
||||
pub provider: AcmeProvider,
|
||||
}
|
||||
|
||||
pub async fn add_domain(
|
||||
pub async fn remove(
|
||||
ctx: RpcContext,
|
||||
DomainParams { domain }: DomainParams,
|
||||
RemoveAcmeParams { provider }: RemoveAcmeParams,
|
||||
) -> Result<(), Error> {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
db.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_acme_mut()
|
||||
.transpose_mut()
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("Please call `start-cli net acme init` before adding a domain"),
|
||||
ErrorKind::InvalidRequest,
|
||||
)
|
||||
})?
|
||||
.as_domains_mut()
|
||||
.mutate(|domains| {
|
||||
domains.insert(domain);
|
||||
Ok(())
|
||||
})
|
||||
.remove(&provider)
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn remove_domain(
|
||||
ctx: RpcContext,
|
||||
DomainParams { domain }: DomainParams,
|
||||
) -> Result<(), Error> {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
if let Some(acme) = db
|
||||
.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_acme_mut()
|
||||
.transpose_mut()
|
||||
{
|
||||
acme.as_domains_mut().mutate(|domains| {
|
||||
domains.remove(&domain);
|
||||
Ok(())
|
||||
})
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_domains(ctx: RpcContext) -> Result<BTreeSet<InternedString>, Error> {
|
||||
if let Some(acme) = ctx
|
||||
.db
|
||||
.peek()
|
||||
.await
|
||||
.into_public()
|
||||
.into_server_info()
|
||||
.into_acme()
|
||||
.transpose()
|
||||
{
|
||||
acme.into_domains().de()
|
||||
} else {
|
||||
Ok(BTreeSet::new())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,99 +0,0 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::net::IpAddr;
|
||||
|
||||
use clap::Parser;
|
||||
use futures::TryStreamExt;
|
||||
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::db::model::public::IpInfo;
|
||||
use crate::net::utils::{iface_is_physical, list_interfaces};
|
||||
use crate::prelude::*;
|
||||
use crate::Error;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref CACHED_IPS: RwLock<BTreeSet<IpAddr>> = RwLock::new(BTreeSet::new());
|
||||
}
|
||||
|
||||
async fn _ips() -> Result<BTreeSet<IpAddr>, Error> {
|
||||
Ok(init_ips()
|
||||
.await?
|
||||
.values()
|
||||
.flat_map(|i| {
|
||||
std::iter::empty()
|
||||
.chain(i.ipv4.map(IpAddr::from))
|
||||
.chain(i.ipv6.map(IpAddr::from))
|
||||
})
|
||||
.collect())
|
||||
}
|
||||
|
||||
pub async fn ips() -> Result<BTreeSet<IpAddr>, Error> {
|
||||
let ips = CACHED_IPS.read().await.clone();
|
||||
if !ips.is_empty() {
|
||||
return Ok(ips);
|
||||
}
|
||||
let ips = _ips().await?;
|
||||
*CACHED_IPS.write().await = ips.clone();
|
||||
Ok(ips)
|
||||
}
|
||||
|
||||
pub async fn init_ips() -> Result<BTreeMap<String, IpInfo>, Error> {
|
||||
let mut res = BTreeMap::new();
|
||||
let mut ifaces = list_interfaces();
|
||||
while let Some(iface) = ifaces.try_next().await? {
|
||||
if iface_is_physical(&iface).await {
|
||||
let ip_info = IpInfo::for_interface(&iface).await?;
|
||||
res.insert(iface, ip_info);
|
||||
}
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
// #[command(subcommands(update))]
|
||||
pub fn dhcp<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::new().subcommand(
|
||||
"update",
|
||||
from_fn_async::<_, _, (), Error, (RpcContext, UpdateParams)>(update)
|
||||
.no_display()
|
||||
.with_about("Update IP assigned by dhcp")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
#[derive(Deserialize, Serialize, Parser, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[command(rename_all = "kebab-case")]
|
||||
pub struct UpdateParams {
|
||||
interface: String,
|
||||
}
|
||||
|
||||
pub async fn update(
|
||||
ctx: RpcContext,
|
||||
UpdateParams { interface }: UpdateParams,
|
||||
) -> Result<(), Error> {
|
||||
if iface_is_physical(&interface).await {
|
||||
let ip_info = IpInfo::for_interface(&interface).await?;
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
db.as_public_mut()
|
||||
.as_server_info_mut()
|
||||
.as_ip_info_mut()
|
||||
.insert(&interface, &ip_info)
|
||||
})
|
||||
.await?;
|
||||
|
||||
let mut cached = CACHED_IPS.write().await;
|
||||
if cached.is_empty() {
|
||||
*cached = _ips().await?;
|
||||
} else {
|
||||
cached.extend(
|
||||
std::iter::empty()
|
||||
.chain(ip_info.ipv4.map(IpAddr::from))
|
||||
.chain(ip_info.ipv6.map(IpAddr::from)),
|
||||
);
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,6 +1,6 @@
|
||||
use std::borrow::Borrow;
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::{Ipv4Addr, SocketAddr};
|
||||
use std::net::Ipv4Addr;
|
||||
use std::sync::{Arc, Weak};
|
||||
use std::time::Duration;
|
||||
|
||||
@@ -19,6 +19,7 @@ use trust_dns_server::server::{Request, RequestHandler, ResponseHandler, Respons
|
||||
use trust_dns_server::ServerFuture;
|
||||
|
||||
use crate::net::forward::START9_BRIDGE_IFACE;
|
||||
use crate::util::sync::Watch;
|
||||
use crate::util::Invoke;
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
|
||||
@@ -140,38 +141,46 @@ impl RequestHandler for Resolver {
|
||||
|
||||
impl DnsController {
|
||||
#[instrument(skip_all)]
|
||||
pub async fn init(bind: &[SocketAddr]) -> Result<Self, Error> {
|
||||
pub async fn init(mut lxcbr_status: Watch<bool>) -> Result<Self, Error> {
|
||||
let services = Arc::new(RwLock::new(BTreeMap::new()));
|
||||
|
||||
let mut server = ServerFuture::new(Resolver {
|
||||
services: services.clone(),
|
||||
});
|
||||
server.register_listener(
|
||||
TcpListener::bind(bind)
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?,
|
||||
Duration::from_secs(30),
|
||||
);
|
||||
server.register_socket(UdpSocket::bind(bind).await.with_kind(ErrorKind::Network)?);
|
||||
|
||||
Command::new("resolvectl")
|
||||
.arg("dns")
|
||||
.arg(START9_BRIDGE_IFACE)
|
||||
.arg("127.0.0.1")
|
||||
.invoke(ErrorKind::Network)
|
||||
.await?;
|
||||
Command::new("resolvectl")
|
||||
.arg("domain")
|
||||
.arg(START9_BRIDGE_IFACE)
|
||||
.arg("embassy")
|
||||
.invoke(ErrorKind::Network)
|
||||
.await?;
|
||||
let dns_server = tokio::spawn(async move {
|
||||
server.register_listener(
|
||||
TcpListener::bind((Ipv4Addr::LOCALHOST, 53))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?,
|
||||
Duration::from_secs(30),
|
||||
);
|
||||
server.register_socket(
|
||||
UdpSocket::bind((Ipv4Addr::LOCALHOST, 53))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?,
|
||||
);
|
||||
|
||||
lxcbr_status.wait_for(|a| *a).await;
|
||||
|
||||
Command::new("resolvectl")
|
||||
.arg("dns")
|
||||
.arg(START9_BRIDGE_IFACE)
|
||||
.arg("127.0.0.1")
|
||||
.invoke(ErrorKind::Network)
|
||||
.await?;
|
||||
Command::new("resolvectl")
|
||||
.arg("domain")
|
||||
.arg(START9_BRIDGE_IFACE)
|
||||
.arg("embassy")
|
||||
.invoke(ErrorKind::Network)
|
||||
.await?;
|
||||
|
||||
let dns_server = tokio::spawn(
|
||||
server
|
||||
.block_until_done()
|
||||
.map_err(|e| Error::new(e, ErrorKind::Network)),
|
||||
)
|
||||
.await
|
||||
.map_err(|e| Error::new(e, ErrorKind::Network))
|
||||
})
|
||||
.into();
|
||||
|
||||
Ok(Self {
|
||||
|
||||
@@ -1,13 +1,18 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::{Arc, Weak};
|
||||
|
||||
use futures::channel::oneshot;
|
||||
use helpers::NonDetachingJoinHandle;
|
||||
use id_pool::IdPool;
|
||||
use imbl_value::InternedString;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::process::Command;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
use crate::db::model::public::NetworkInterfaceInfo;
|
||||
use crate::prelude::*;
|
||||
use crate::util::sync::Watch;
|
||||
use crate::util::Invoke;
|
||||
|
||||
pub const START9_BRIDGE_IFACE: &str = "lxcbr0";
|
||||
@@ -34,144 +39,269 @@ impl AvailablePorts {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct ForwardRequest {
|
||||
public: bool,
|
||||
target: SocketAddr,
|
||||
rc: Weak<()>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct ForwardState {
|
||||
requested: BTreeMap<u16, ForwardRequest>,
|
||||
current: BTreeMap<u16, BTreeMap<InternedString, SocketAddr>>,
|
||||
}
|
||||
impl ForwardState {
|
||||
async fn sync(&mut self, interfaces: &BTreeMap<InternedString, bool>) -> Result<(), Error> {
|
||||
let private_interfaces = interfaces
|
||||
.iter()
|
||||
.filter(|(_, public)| !*public)
|
||||
.map(|(i, _)| i)
|
||||
.collect::<BTreeSet<_>>();
|
||||
let all_interfaces = interfaces.keys().collect::<BTreeSet<_>>();
|
||||
self.requested.retain(|_, req| req.rc.strong_count() > 0);
|
||||
for external in self
|
||||
.requested
|
||||
.keys()
|
||||
.chain(self.current.keys())
|
||||
.copied()
|
||||
.collect::<BTreeSet<_>>()
|
||||
{
|
||||
match (
|
||||
self.requested.get(&external),
|
||||
self.current.get_mut(&external),
|
||||
) {
|
||||
(Some(req), Some(cur)) => {
|
||||
let expected = if req.public {
|
||||
&all_interfaces
|
||||
} else {
|
||||
&private_interfaces
|
||||
};
|
||||
let actual = cur.keys().collect::<BTreeSet<_>>();
|
||||
let mut to_rm = actual
|
||||
.difference(expected)
|
||||
.copied()
|
||||
.cloned()
|
||||
.collect::<BTreeSet<_>>();
|
||||
let mut to_add = expected
|
||||
.difference(&actual)
|
||||
.copied()
|
||||
.cloned()
|
||||
.collect::<BTreeSet<_>>();
|
||||
for interface in actual.intersection(expected).copied() {
|
||||
if cur[interface] != req.target {
|
||||
to_rm.insert(interface.clone());
|
||||
to_add.insert(interface.clone());
|
||||
}
|
||||
}
|
||||
for interface in to_rm {
|
||||
unforward(external, &*interface, cur[&interface]).await?;
|
||||
cur.remove(&interface);
|
||||
}
|
||||
for interface in to_add {
|
||||
forward(external, &*interface, req.target).await?;
|
||||
cur.insert(interface, req.target);
|
||||
}
|
||||
}
|
||||
(Some(req), None) => {
|
||||
let cur = self.current.entry(external).or_default();
|
||||
for interface in if req.public {
|
||||
&all_interfaces
|
||||
} else {
|
||||
&private_interfaces
|
||||
}
|
||||
.into_iter()
|
||||
.copied()
|
||||
.cloned()
|
||||
{
|
||||
forward(external, &*interface, req.target).await?;
|
||||
cur.insert(interface, req.target);
|
||||
}
|
||||
}
|
||||
(None, Some(cur)) => {
|
||||
let to_rm = cur.keys().cloned().collect::<BTreeSet<_>>();
|
||||
for interface in to_rm {
|
||||
unforward(external, &*interface, cur[&interface]).await?;
|
||||
cur.remove(&interface);
|
||||
}
|
||||
self.current.remove(&external);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
fn err_has_exited<T>(_: T) -> Error {
|
||||
Error::new(
|
||||
eyre!("PortForwardController thread has exited"),
|
||||
ErrorKind::Unknown,
|
||||
)
|
||||
}
|
||||
|
||||
pub struct LanPortForwardController {
|
||||
forwards: Mutex<BTreeMap<u16, BTreeMap<SocketAddr, Weak<()>>>>,
|
||||
req: mpsc::UnboundedSender<(
|
||||
Option<(u16, ForwardRequest)>,
|
||||
oneshot::Sender<Result<(), Error>>,
|
||||
)>,
|
||||
_thread: NonDetachingJoinHandle<()>,
|
||||
}
|
||||
impl LanPortForwardController {
|
||||
pub fn new() -> Self {
|
||||
pub fn new(mut ip_info: Watch<BTreeMap<InternedString, NetworkInterfaceInfo>>) -> Self {
|
||||
let (req_send, mut req_recv) = mpsc::unbounded_channel();
|
||||
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
|
||||
let mut state = ForwardState::default();
|
||||
let mut interfaces = ip_info.peek_and_mark_seen(|ip_info| {
|
||||
ip_info
|
||||
.iter()
|
||||
.map(|(iface, info)| (iface.clone(), info.public()))
|
||||
.collect()
|
||||
});
|
||||
let mut reply: Option<oneshot::Sender<Result<(), Error>>> = None;
|
||||
loop {
|
||||
tokio::select! {
|
||||
msg = req_recv.recv() => {
|
||||
if let Some((msg, re)) = msg {
|
||||
if let Some((external, req)) = msg {
|
||||
state.requested.insert(external, req);
|
||||
}
|
||||
reply = Some(re);
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
_ = ip_info.changed() => {
|
||||
interfaces = ip_info.peek(|ip_info| {
|
||||
ip_info
|
||||
.iter()
|
||||
.map(|(iface, info)| (iface.clone(), info.public()))
|
||||
.collect()
|
||||
});
|
||||
}
|
||||
}
|
||||
let res = state.sync(&interfaces).await;
|
||||
if let Err(e) = &res {
|
||||
tracing::error!("Error in PortForwardController: {e}");
|
||||
tracing::debug!("{e:?}");
|
||||
}
|
||||
if let Some(re) = reply.take() {
|
||||
let _ = re.send(res);
|
||||
}
|
||||
}
|
||||
}));
|
||||
Self {
|
||||
forwards: Mutex::new(BTreeMap::new()),
|
||||
req: req_send,
|
||||
_thread: thread,
|
||||
}
|
||||
}
|
||||
pub async fn add(&self, port: u16, addr: SocketAddr) -> Result<Arc<()>, Error> {
|
||||
let mut writable = self.forwards.lock().await;
|
||||
let (prev, mut forward) = if let Some(forward) = writable.remove(&port) {
|
||||
(
|
||||
forward.keys().next().cloned(),
|
||||
forward
|
||||
.into_iter()
|
||||
.filter(|(_, rc)| rc.strong_count() > 0)
|
||||
.collect(),
|
||||
)
|
||||
} else {
|
||||
(None, BTreeMap::new())
|
||||
};
|
||||
pub async fn add(&self, port: u16, public: bool, target: SocketAddr) -> Result<Arc<()>, Error> {
|
||||
let rc = Arc::new(());
|
||||
forward.insert(addr, Arc::downgrade(&rc));
|
||||
let next = forward.keys().next().cloned();
|
||||
if !forward.is_empty() {
|
||||
writable.insert(port, forward);
|
||||
}
|
||||
let (send, recv) = oneshot::channel();
|
||||
self.req
|
||||
.send((
|
||||
Some((
|
||||
port,
|
||||
ForwardRequest {
|
||||
public,
|
||||
target,
|
||||
rc: Arc::downgrade(&rc),
|
||||
},
|
||||
)),
|
||||
send,
|
||||
))
|
||||
.map_err(err_has_exited)?;
|
||||
|
||||
update_forward(port, prev, next).await?;
|
||||
Ok(rc)
|
||||
recv.await.map_err(err_has_exited)?.map(|_| rc)
|
||||
}
|
||||
pub async fn gc(&self, external: u16) -> Result<(), Error> {
|
||||
let mut writable = self.forwards.lock().await;
|
||||
let (prev, forward) = if let Some(forward) = writable.remove(&external) {
|
||||
(
|
||||
forward.keys().next().cloned(),
|
||||
forward
|
||||
.into_iter()
|
||||
.filter(|(_, rc)| rc.strong_count() > 0)
|
||||
.collect(),
|
||||
)
|
||||
} else {
|
||||
(None, BTreeMap::new())
|
||||
};
|
||||
let next = forward.keys().next().cloned();
|
||||
if !forward.is_empty() {
|
||||
writable.insert(external, forward);
|
||||
}
|
||||
pub async fn gc(&self) -> Result<(), Error> {
|
||||
let (send, recv) = oneshot::channel();
|
||||
self.req.send((None, send)).map_err(err_has_exited)?;
|
||||
|
||||
update_forward(external, prev, next).await
|
||||
recv.await.map_err(err_has_exited)?
|
||||
}
|
||||
}
|
||||
|
||||
async fn update_forward(
|
||||
external: u16,
|
||||
prev: Option<SocketAddr>,
|
||||
next: Option<SocketAddr>,
|
||||
) -> Result<(), Error> {
|
||||
if prev != next {
|
||||
if let Some(prev) = prev {
|
||||
unforward(START9_BRIDGE_IFACE, external, prev).await?;
|
||||
}
|
||||
if let Some(next) = next {
|
||||
forward(START9_BRIDGE_IFACE, external, next).await?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// iptables -I FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT
|
||||
// iptables -t nat -I PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333
|
||||
async fn forward(iface: &str, external: u16, addr: SocketAddr) -> Result<(), Error> {
|
||||
Command::new("iptables")
|
||||
.arg("-I")
|
||||
.arg("FORWARD")
|
||||
.arg("-o")
|
||||
.arg(iface)
|
||||
.arg("-p")
|
||||
.arg("tcp")
|
||||
.arg("-d")
|
||||
.arg(addr.ip().to_string())
|
||||
.arg("--dport")
|
||||
.arg(addr.port().to_string())
|
||||
.arg("-j")
|
||||
.arg("ACCEPT")
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?;
|
||||
Command::new("iptables")
|
||||
.arg("-t")
|
||||
.arg("nat")
|
||||
.arg("-I")
|
||||
.arg("PREROUTING")
|
||||
.arg("-p")
|
||||
.arg("tcp")
|
||||
.arg("--dport")
|
||||
.arg(external.to_string())
|
||||
.arg("-j")
|
||||
.arg("DNAT")
|
||||
.arg("--to")
|
||||
.arg(addr.to_string())
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?;
|
||||
async fn forward(external: u16, interface: &str, target: SocketAddr) -> Result<(), Error> {
|
||||
for proto in ["tcp", "udp"] {
|
||||
Command::new("iptables")
|
||||
.arg("-I")
|
||||
.arg("FORWARD")
|
||||
.arg("-i")
|
||||
.arg(interface)
|
||||
.arg("-o")
|
||||
.arg(START9_BRIDGE_IFACE)
|
||||
.arg("-p")
|
||||
.arg(proto)
|
||||
.arg("-d")
|
||||
.arg(target.ip().to_string())
|
||||
.arg("--dport")
|
||||
.arg(target.port().to_string())
|
||||
.arg("-j")
|
||||
.arg("ACCEPT")
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?;
|
||||
Command::new("iptables")
|
||||
.arg("-t")
|
||||
.arg("nat")
|
||||
.arg("-I")
|
||||
.arg("PREROUTING")
|
||||
.arg("-i")
|
||||
.arg(interface)
|
||||
.arg("-p")
|
||||
.arg(proto)
|
||||
.arg("--dport")
|
||||
.arg(external.to_string())
|
||||
.arg("-j")
|
||||
.arg("DNAT")
|
||||
.arg("--to")
|
||||
.arg(target.to_string())
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// iptables -D FORWARD -o br-start9 -p tcp -d 172.18.0.2 --dport 8333 -j ACCEPT
|
||||
// iptables -t nat -D PREROUTING -p tcp --dport 32768 -j DNAT --to 172.18.0.2:8333
|
||||
async fn unforward(iface: &str, external: u16, addr: SocketAddr) -> Result<(), Error> {
|
||||
Command::new("iptables")
|
||||
.arg("-D")
|
||||
.arg("FORWARD")
|
||||
.arg("-o")
|
||||
.arg(iface)
|
||||
.arg("-p")
|
||||
.arg("tcp")
|
||||
.arg("-d")
|
||||
.arg(addr.ip().to_string())
|
||||
.arg("--dport")
|
||||
.arg(addr.port().to_string())
|
||||
.arg("-j")
|
||||
.arg("ACCEPT")
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?;
|
||||
Command::new("iptables")
|
||||
.arg("-t")
|
||||
.arg("nat")
|
||||
.arg("-D")
|
||||
.arg("PREROUTING")
|
||||
.arg("-p")
|
||||
.arg("tcp")
|
||||
.arg("--dport")
|
||||
.arg(external.to_string())
|
||||
.arg("-j")
|
||||
.arg("DNAT")
|
||||
.arg("--to")
|
||||
.arg(addr.to_string())
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?;
|
||||
async fn unforward(external: u16, interface: &str, target: SocketAddr) -> Result<(), Error> {
|
||||
for proto in ["tcp", "udp"] {
|
||||
Command::new("iptables")
|
||||
.arg("-D")
|
||||
.arg("FORWARD")
|
||||
.arg("-i")
|
||||
.arg(interface)
|
||||
.arg("-o")
|
||||
.arg(START9_BRIDGE_IFACE)
|
||||
.arg("-p")
|
||||
.arg(proto)
|
||||
.arg("-d")
|
||||
.arg(target.ip().to_string())
|
||||
.arg("--dport")
|
||||
.arg(target.port().to_string())
|
||||
.arg("-j")
|
||||
.arg("ACCEPT")
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?;
|
||||
Command::new("iptables")
|
||||
.arg("-t")
|
||||
.arg("nat")
|
||||
.arg("-D")
|
||||
.arg("PREROUTING")
|
||||
.arg("-i")
|
||||
.arg(interface)
|
||||
.arg("-p")
|
||||
.arg(proto)
|
||||
.arg("--dport")
|
||||
.arg(external.to_string())
|
||||
.arg("-j")
|
||||
.arg("DNAT")
|
||||
.arg("--to")
|
||||
.arg(target.to_string())
|
||||
.invoke(crate::ErrorKind::Network)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,17 +1,22 @@
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use std::collections::BTreeSet;
|
||||
|
||||
use clap::builder::ValueParserFactory;
|
||||
use clap::Parser;
|
||||
use imbl_value::InternedString;
|
||||
use models::FromStrParser;
|
||||
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use torut::onion::OnionAddressV3;
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::db::model::DatabaseModel;
|
||||
use crate::net::acme::AcmeProvider;
|
||||
use crate::net::host::{all_hosts, HostApiKind};
|
||||
use crate::prelude::*;
|
||||
use crate::util::serde::{display_serializable, HandlerExtSerde};
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, PartialEq, Eq, PartialOrd, Ord, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[derive(Clone, Debug, Deserialize, Serialize, TS)]
|
||||
#[serde(rename_all = "kebab-case")]
|
||||
#[serde(rename_all_fields = "camelCase")]
|
||||
#[serde(tag = "kind")]
|
||||
#[ts(export)]
|
||||
pub enum HostAddress {
|
||||
@@ -22,36 +27,278 @@ pub enum HostAddress {
|
||||
Domain {
|
||||
#[ts(type = "string")]
|
||||
address: InternedString,
|
||||
public: bool,
|
||||
acme: Option<AcmeProvider>,
|
||||
},
|
||||
}
|
||||
|
||||
impl FromStr for HostAddress {
|
||||
type Err = Error;
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
if let Some(addr) = s.strip_suffix(".onion") {
|
||||
Ok(HostAddress::Onion {
|
||||
address: addr
|
||||
.parse::<OnionAddressV3>()
|
||||
.with_kind(ErrorKind::ParseUrl)?,
|
||||
})
|
||||
} else {
|
||||
Ok(HostAddress::Domain { address: s.into() })
|
||||
}
|
||||
}
|
||||
#[derive(Debug, Deserialize, Serialize, TS)]
|
||||
pub struct DomainConfig {
|
||||
pub public: bool,
|
||||
pub acme: Option<AcmeProvider>,
|
||||
}
|
||||
|
||||
impl fmt::Display for HostAddress {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::Onion { address } => write!(f, "{address}"),
|
||||
Self::Domain { address } => write!(f, "{address}"),
|
||||
fn check_duplicates(db: &DatabaseModel) -> Result<(), Error> {
|
||||
let mut onions = BTreeSet::<OnionAddressV3>::new();
|
||||
let mut domains = BTreeSet::<InternedString>::new();
|
||||
let mut check_onion = |onion: OnionAddressV3| {
|
||||
if onions.contains(&onion) {
|
||||
return Err(Error::new(
|
||||
eyre!("onion address {onion} is already in use"),
|
||||
ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
onions.insert(onion);
|
||||
Ok(())
|
||||
};
|
||||
let mut check_domain = |domain: InternedString| {
|
||||
if domains.contains(&domain) {
|
||||
return Err(Error::new(
|
||||
eyre!("domain {domain} is already in use"),
|
||||
ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
domains.insert(domain);
|
||||
Ok(())
|
||||
};
|
||||
for host in all_hosts(db) {
|
||||
let host = host?;
|
||||
for onion in host.as_onions().de()? {
|
||||
check_onion(onion)?;
|
||||
}
|
||||
for domain in host.as_domains().keys()? {
|
||||
check_domain(domain)?;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl ValueParserFactory for HostAddress {
|
||||
type Parser = FromStrParser<Self>;
|
||||
fn value_parser() -> Self::Parser {
|
||||
Self::Parser::new()
|
||||
}
|
||||
pub fn address_api<C: Context, Kind: HostApiKind>(
|
||||
) -> ParentHandler<C, Kind::Params, Kind::InheritedParams> {
|
||||
ParentHandler::<C, Kind::Params, Kind::InheritedParams>::new()
|
||||
.subcommand(
|
||||
"domain",
|
||||
ParentHandler::<C, Empty, Kind::Inheritance>::new()
|
||||
.subcommand(
|
||||
"add",
|
||||
from_fn_async(add_domain::<Kind>)
|
||||
.with_metadata("sync_db", Value::Bool(true))
|
||||
.with_inherited(|_, a| a)
|
||||
.no_display()
|
||||
.with_about("Add an address to this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"remove",
|
||||
from_fn_async(remove_domain::<Kind>)
|
||||
.with_metadata("sync_db", Value::Bool(true))
|
||||
.with_inherited(|_, a| a)
|
||||
.no_display()
|
||||
.with_about("Remove an address from this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.with_inherited(Kind::inheritance),
|
||||
)
|
||||
.subcommand(
|
||||
"onion",
|
||||
ParentHandler::<C, Empty, Kind::Inheritance>::new()
|
||||
.subcommand(
|
||||
"add",
|
||||
from_fn_async(add_onion::<Kind>)
|
||||
.with_metadata("sync_db", Value::Bool(true))
|
||||
.with_inherited(|_, a| a)
|
||||
.no_display()
|
||||
.with_about("Add an address to this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"remove",
|
||||
from_fn_async(remove_onion::<Kind>)
|
||||
.with_metadata("sync_db", Value::Bool(true))
|
||||
.with_inherited(|_, a| a)
|
||||
.no_display()
|
||||
.with_about("Remove an address from this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.with_inherited(Kind::inheritance),
|
||||
)
|
||||
.subcommand(
|
||||
"list",
|
||||
from_fn_async(list_addresses::<Kind>)
|
||||
.with_inherited(Kind::inheritance)
|
||||
.with_display_serializable()
|
||||
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
|
||||
use prettytable::*;
|
||||
|
||||
if let Some(format) = params.format {
|
||||
display_serializable(format, res);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![bc => "ADDRESS", "PUBLIC", "ACME PROVIDER"]);
|
||||
for address in &res {
|
||||
match address {
|
||||
HostAddress::Onion { address } => {
|
||||
table.add_row(row![address, true, "N/A"]);
|
||||
}
|
||||
HostAddress::Domain {
|
||||
address,
|
||||
public,
|
||||
acme,
|
||||
} => {
|
||||
table.add_row(row![
|
||||
address,
|
||||
*public,
|
||||
acme.as_ref().map(|a| a.0.as_str()).unwrap_or("NONE")
|
||||
]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
table.print_tty(false)?;
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.with_about("List addresses for this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Parser)]
|
||||
pub struct AddDomainParams {
|
||||
pub domain: InternedString,
|
||||
#[arg(long)]
|
||||
pub private: bool,
|
||||
#[arg(long)]
|
||||
pub acme: Option<AcmeProvider>,
|
||||
}
|
||||
|
||||
pub async fn add_domain<Kind: HostApiKind>(
|
||||
ctx: RpcContext,
|
||||
AddDomainParams {
|
||||
domain,
|
||||
private,
|
||||
acme,
|
||||
}: AddDomainParams,
|
||||
inheritance: Kind::Inheritance,
|
||||
) -> Result<(), Error> {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
if let Some(acme) = &acme {
|
||||
if !db.as_public().as_server_info().as_acme().contains_key(&acme)? {
|
||||
return Err(Error::new(eyre!("unknown acme provider {}, please run acme.init for this provider first", acme.0), ErrorKind::InvalidRequest));
|
||||
}
|
||||
}
|
||||
|
||||
Kind::host_for(&inheritance, db)?
|
||||
.as_domains_mut()
|
||||
.insert(
|
||||
&domain,
|
||||
&DomainConfig {
|
||||
public: !private,
|
||||
acme,
|
||||
},
|
||||
)?;
|
||||
check_duplicates(db)
|
||||
})
|
||||
.await?;
|
||||
Kind::sync_host(&ctx, inheritance).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Parser)]
|
||||
pub struct RemoveDomainParams {
|
||||
pub domain: InternedString,
|
||||
}
|
||||
|
||||
pub async fn remove_domain<Kind: HostApiKind>(
|
||||
ctx: RpcContext,
|
||||
RemoveDomainParams { domain }: RemoveDomainParams,
|
||||
inheritance: Kind::Inheritance,
|
||||
) -> Result<(), Error> {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
Kind::host_for(&inheritance, db)?
|
||||
.as_domains_mut()
|
||||
.remove(&domain)
|
||||
})
|
||||
.await?;
|
||||
Kind::sync_host(&ctx, inheritance).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Parser)]
|
||||
pub struct OnionParams {
|
||||
pub onion: String,
|
||||
}
|
||||
|
||||
pub async fn add_onion<Kind: HostApiKind>(
|
||||
ctx: RpcContext,
|
||||
OnionParams { onion }: OnionParams,
|
||||
inheritance: Kind::Inheritance,
|
||||
) -> Result<(), Error> {
|
||||
let onion = onion
|
||||
.strip_suffix(".onion")
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("onion hostname must end in .onion"),
|
||||
ErrorKind::InvalidOnionAddress,
|
||||
)
|
||||
})?
|
||||
.parse::<OnionAddressV3>()?;
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
db.as_private().as_key_store().as_onion().get_key(&onion)?;
|
||||
|
||||
Kind::host_for(&inheritance, db)?
|
||||
.as_onions_mut()
|
||||
.mutate(|a| Ok(a.insert(onion)))?;
|
||||
check_duplicates(db)
|
||||
})
|
||||
.await?;
|
||||
|
||||
Kind::sync_host(&ctx, inheritance).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn remove_onion<Kind: HostApiKind>(
|
||||
ctx: RpcContext,
|
||||
OnionParams { onion }: OnionParams,
|
||||
inheritance: Kind::Inheritance,
|
||||
) -> Result<(), Error> {
|
||||
let onion = onion
|
||||
.strip_suffix(".onion")
|
||||
.ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("onion hostname must end in .onion"),
|
||||
ErrorKind::InvalidOnionAddress,
|
||||
)
|
||||
})?
|
||||
.parse::<OnionAddressV3>()?;
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
Kind::host_for(&inheritance, db)?
|
||||
.as_onions_mut()
|
||||
.mutate(|a| Ok(a.remove(&onion)))
|
||||
})
|
||||
.await?;
|
||||
|
||||
Kind::sync_host(&ctx, inheritance).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_addresses<Kind: HostApiKind>(
|
||||
ctx: RpcContext,
|
||||
_: Empty,
|
||||
inheritance: Kind::Inheritance,
|
||||
) -> Result<Vec<HostAddress>, Error> {
|
||||
Ok(Kind::host_for(&inheritance, &mut ctx.db.peek().await)?
|
||||
.de()?
|
||||
.addresses()
|
||||
.collect())
|
||||
}
|
||||
|
||||
@@ -1,13 +1,19 @@
|
||||
use std::collections::BTreeMap;
|
||||
use std::str::FromStr;
|
||||
|
||||
use clap::builder::ValueParserFactory;
|
||||
use clap::Parser;
|
||||
use models::{FromStrParser, HostId};
|
||||
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerArgs, HandlerExt, ParentHandler};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::net::forward::AvailablePorts;
|
||||
use crate::net::host::HostApiKind;
|
||||
use crate::net::vhost::AlpnInfo;
|
||||
use crate::prelude::*;
|
||||
use crate::util::serde::{display_serializable, HandlerExtSerde};
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize, TS)]
|
||||
#[ts(export)]
|
||||
@@ -41,12 +47,14 @@ impl FromStr for BindId {
|
||||
pub struct BindInfo {
|
||||
pub enabled: bool,
|
||||
pub options: BindOptions,
|
||||
pub lan: LanInfo,
|
||||
pub net: NetInfo,
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Deserialize, Serialize, TS, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export)]
|
||||
pub struct LanInfo {
|
||||
pub struct NetInfo {
|
||||
pub public: bool,
|
||||
pub assigned_port: Option<u16>,
|
||||
pub assigned_ssl_port: Option<u16>,
|
||||
}
|
||||
@@ -63,7 +71,8 @@ impl BindInfo {
|
||||
Ok(Self {
|
||||
enabled: true,
|
||||
options,
|
||||
lan: LanInfo {
|
||||
net: NetInfo {
|
||||
public: false,
|
||||
assigned_port,
|
||||
assigned_ssl_port,
|
||||
},
|
||||
@@ -74,7 +83,7 @@ impl BindInfo {
|
||||
available_ports: &mut AvailablePorts,
|
||||
options: BindOptions,
|
||||
) -> Result<Self, Error> {
|
||||
let Self { mut lan, .. } = self;
|
||||
let Self { net: mut lan, .. } = self;
|
||||
if options
|
||||
.secure
|
||||
.map_or(false, |s| !(s.ssl && options.add_ssl.is_some()))
|
||||
@@ -104,7 +113,7 @@ impl BindInfo {
|
||||
Ok(Self {
|
||||
enabled: true,
|
||||
options,
|
||||
lan,
|
||||
net: lan,
|
||||
})
|
||||
}
|
||||
pub fn disable(&mut self) {
|
||||
@@ -137,3 +146,99 @@ pub struct AddSslOptions {
|
||||
// pub add_x_forwarded_headers: bool, // TODO
|
||||
pub alpn: Option<AlpnInfo>,
|
||||
}
|
||||
|
||||
pub fn binding<C: Context, Kind: HostApiKind>(
|
||||
) -> ParentHandler<C, Kind::Params, Kind::InheritedParams> {
|
||||
ParentHandler::<C, Kind::Params, Kind::InheritedParams>::new()
|
||||
.subcommand(
|
||||
"list",
|
||||
from_fn_async(list_bindings::<Kind>)
|
||||
.with_inherited(Kind::inheritance)
|
||||
.with_display_serializable()
|
||||
.with_custom_display_fn(|HandlerArgs { params, .. }, res| {
|
||||
use prettytable::*;
|
||||
|
||||
if let Some(format) = params.format {
|
||||
return Ok(display_serializable(format, res));
|
||||
}
|
||||
|
||||
let mut table = Table::new();
|
||||
table.add_row(row![bc => "INTERNAL PORT", "ENABLED", "PUBLIC", "EXTERNAL PORT", "EXTERNAL SSL PORT"]);
|
||||
for (internal, info) in res {
|
||||
table.add_row(row![
|
||||
internal,
|
||||
info.enabled,
|
||||
info.net.public,
|
||||
if let Some(port) = info.net.assigned_port {
|
||||
port.to_string()
|
||||
} else {
|
||||
"N/A".to_owned()
|
||||
},
|
||||
if let Some(port) = info.net.assigned_ssl_port {
|
||||
port.to_string()
|
||||
} else {
|
||||
"N/A".to_owned()
|
||||
},
|
||||
]);
|
||||
}
|
||||
|
||||
table.print_tty(false).unwrap();
|
||||
|
||||
Ok(())
|
||||
})
|
||||
.with_about("List bindinges for this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"set-public",
|
||||
from_fn_async(set_public::<Kind>)
|
||||
.with_metadata("sync_db", Value::Bool(true))
|
||||
.with_inherited(Kind::inheritance)
|
||||
.no_display()
|
||||
.with_about("Add an binding to this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn list_bindings<Kind: HostApiKind>(
|
||||
ctx: RpcContext,
|
||||
_: Empty,
|
||||
inheritance: Kind::Inheritance,
|
||||
) -> Result<BTreeMap<u16, BindInfo>, Error> {
|
||||
Kind::host_for(&inheritance, &mut ctx.db.peek().await)?
|
||||
.as_bindings()
|
||||
.de()
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Parser, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export)]
|
||||
pub struct BindingSetPublicParams {
|
||||
internal_port: u16,
|
||||
#[arg(long)]
|
||||
public: Option<bool>,
|
||||
}
|
||||
|
||||
pub async fn set_public<Kind: HostApiKind>(
|
||||
ctx: RpcContext,
|
||||
BindingSetPublicParams {
|
||||
internal_port,
|
||||
public,
|
||||
}: BindingSetPublicParams,
|
||||
inheritance: Kind::Inheritance,
|
||||
) -> Result<(), Error> {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
Kind::host_for(&inheritance, db)?
|
||||
.as_bindings_mut()
|
||||
.mutate(|b| {
|
||||
b.get_mut(&internal_port)
|
||||
.or_not_found(internal_port)?
|
||||
.net
|
||||
.public = public.unwrap_or(true);
|
||||
Ok(())
|
||||
})
|
||||
})
|
||||
.await?;
|
||||
Kind::sync_host(&ctx, inheritance).await
|
||||
}
|
||||
|
||||
@@ -1,31 +1,37 @@
|
||||
use std::collections::{BTreeMap, BTreeSet};
|
||||
use std::future::Future;
|
||||
use std::panic::RefUnwindSafe;
|
||||
|
||||
use clap::Parser;
|
||||
use imbl_value::InternedString;
|
||||
use itertools::Itertools;
|
||||
use models::{HostId, PackageId};
|
||||
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
|
||||
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, OrEmpty, ParentHandler};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use torut::onion::OnionAddressV3;
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::context::RpcContext;
|
||||
use crate::db::model::DatabaseModel;
|
||||
use crate::net::forward::AvailablePorts;
|
||||
use crate::net::host::address::HostAddress;
|
||||
use crate::net::host::binding::{BindInfo, BindOptions};
|
||||
use crate::net::host::address::{address_api, DomainConfig, HostAddress};
|
||||
use crate::net::host::binding::{binding, BindInfo, BindOptions};
|
||||
use crate::net::service_interface::HostnameInfo;
|
||||
use crate::prelude::*;
|
||||
|
||||
pub mod address;
|
||||
pub mod binding;
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, HasModel, TS)]
|
||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[model = "Model<Self>"]
|
||||
#[ts(export)]
|
||||
pub struct Host {
|
||||
pub kind: HostKind,
|
||||
pub bindings: BTreeMap<u16, BindInfo>,
|
||||
pub addresses: BTreeSet<HostAddress>,
|
||||
#[ts(type = "string[]")]
|
||||
pub onions: BTreeSet<OnionAddressV3>,
|
||||
#[ts(as = "BTreeMap::<String, DomainConfig>")]
|
||||
pub domains: BTreeMap<InternedString, DomainConfig>,
|
||||
/// COMPUTED: NetService::update
|
||||
pub hostname_info: BTreeMap<u16, Vec<HostnameInfo>>, // internal port -> Hostnames
|
||||
}
|
||||
@@ -35,29 +41,28 @@ impl AsRef<Host> for Host {
|
||||
}
|
||||
}
|
||||
impl Host {
|
||||
pub fn new(kind: HostKind) -> Self {
|
||||
Self {
|
||||
kind,
|
||||
bindings: BTreeMap::new(),
|
||||
addresses: BTreeSet::new(),
|
||||
hostname_info: BTreeMap::new(),
|
||||
}
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
pub fn addresses(&self) -> impl Iterator<Item = &HostAddress> {
|
||||
// TODO: handle primary
|
||||
self.addresses.iter()
|
||||
pub fn addresses<'a>(&'a self) -> impl Iterator<Item = HostAddress> + 'a {
|
||||
self.onions
|
||||
.iter()
|
||||
.cloned()
|
||||
.map(|address| HostAddress::Onion { address })
|
||||
.chain(
|
||||
self.domains
|
||||
.iter()
|
||||
.map(
|
||||
|(address, DomainConfig { public, acme })| HostAddress::Domain {
|
||||
address: address.clone(),
|
||||
public: *public,
|
||||
acme: acme.clone(),
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Deserialize, Serialize, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export)]
|
||||
pub enum HostKind {
|
||||
Multi,
|
||||
// Single,
|
||||
// Static,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Deserialize, Serialize, HasModel, TS)]
|
||||
#[model = "Model<Self>"]
|
||||
#[ts(export)]
|
||||
@@ -76,10 +81,12 @@ impl Map for Hosts {
|
||||
|
||||
pub fn host_for<'a>(
|
||||
db: &'a mut DatabaseModel,
|
||||
package_id: &PackageId,
|
||||
package_id: Option<&PackageId>,
|
||||
host_id: &HostId,
|
||||
host_kind: HostKind,
|
||||
) -> Result<&'a mut Model<Host>, Error> {
|
||||
let Some(package_id) = package_id else {
|
||||
return Ok(db.as_public_mut().as_server_info_mut().as_host_mut());
|
||||
};
|
||||
fn host_info<'a>(
|
||||
db: &'a mut DatabaseModel,
|
||||
package_id: &PackageId,
|
||||
@@ -103,23 +110,31 @@ pub fn host_for<'a>(
|
||||
None
|
||||
};
|
||||
host_info(db, package_id)?.upsert(host_id, || {
|
||||
let mut h = Host::new(host_kind);
|
||||
h.addresses.insert(HostAddress::Onion {
|
||||
address: tor_key
|
||||
let mut h = Host::new();
|
||||
h.onions.insert(
|
||||
tor_key
|
||||
.or_not_found("generated tor key")?
|
||||
.public()
|
||||
.get_onion_address(),
|
||||
});
|
||||
);
|
||||
Ok(h)
|
||||
})
|
||||
}
|
||||
|
||||
pub fn all_hosts(db: &DatabaseModel) -> impl Iterator<Item = Result<&Model<Host>, Error>> {
|
||||
[Ok(db.as_public().as_server_info().as_host())]
|
||||
.into_iter()
|
||||
.chain(
|
||||
[db.as_public().as_package_data().as_entries()]
|
||||
.into_iter()
|
||||
.flatten_ok()
|
||||
.map(|entry| entry.and_then(|(_, v)| v.as_hosts().as_entries()))
|
||||
.flatten_ok()
|
||||
.map_ok(|(_, v)| v),
|
||||
)
|
||||
}
|
||||
|
||||
impl Model<Host> {
|
||||
pub fn set_kind(&mut self, kind: HostKind) -> Result<(), Error> {
|
||||
match (self.as_kind().de()?, kind) {
|
||||
(HostKind::Multi, HostKind::Multi) => Ok(()),
|
||||
}
|
||||
}
|
||||
pub fn add_binding(
|
||||
&mut self,
|
||||
available_ports: &mut AvailablePorts,
|
||||
@@ -139,16 +154,78 @@ impl Model<Host> {
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Parser)]
|
||||
pub struct HostParams {
|
||||
pub struct RequiresPackageId {
|
||||
package: PackageId,
|
||||
}
|
||||
|
||||
pub fn host<C: Context>() -> ParentHandler<C, HostParams> {
|
||||
ParentHandler::<C, HostParams>::new()
|
||||
#[derive(Deserialize, Serialize, Parser)]
|
||||
pub struct RequiresHostId {
|
||||
host: HostId,
|
||||
}
|
||||
|
||||
pub trait HostApiKind: 'static {
|
||||
type Params: Send + Sync + 'static;
|
||||
type InheritedParams: Send + Sync + 'static;
|
||||
type Inheritance: RefUnwindSafe + OrEmpty<Self::Inheritance> + Send + Sync + 'static;
|
||||
fn inheritance(params: Self::Params, inherited: Self::InheritedParams) -> Self::Inheritance;
|
||||
fn host_for<'a>(
|
||||
inheritance: &Self::Inheritance,
|
||||
db: &'a mut DatabaseModel,
|
||||
) -> Result<&'a mut Model<Host>, Error>;
|
||||
fn sync_host(
|
||||
ctx: &RpcContext,
|
||||
inheritance: Self::Inheritance,
|
||||
) -> impl Future<Output = Result<(), Error>> + Send;
|
||||
}
|
||||
pub struct ForPackage;
|
||||
impl HostApiKind for ForPackage {
|
||||
type Params = RequiresHostId;
|
||||
type InheritedParams = PackageId;
|
||||
type Inheritance = (PackageId, HostId);
|
||||
fn inheritance(
|
||||
RequiresHostId { host }: Self::Params,
|
||||
package: Self::InheritedParams,
|
||||
) -> Self::Inheritance {
|
||||
(package, host)
|
||||
}
|
||||
fn host_for<'a>(
|
||||
(package, host): &Self::Inheritance,
|
||||
db: &'a mut DatabaseModel,
|
||||
) -> Result<&'a mut Model<Host>, Error> {
|
||||
host_for(db, Some(package), host)
|
||||
}
|
||||
async fn sync_host(ctx: &RpcContext, (package, host): Self::Inheritance) -> Result<(), Error> {
|
||||
let service = ctx.services.get(&package).await;
|
||||
let service_ref = service.as_ref().or_not_found(&package)?;
|
||||
service_ref.sync_host(host).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
pub struct ForServer;
|
||||
impl HostApiKind for ForServer {
|
||||
type Params = Empty;
|
||||
type InheritedParams = Empty;
|
||||
type Inheritance = Empty;
|
||||
fn inheritance(_: Self::Params, _: Self::InheritedParams) -> Self::Inheritance {
|
||||
Empty {}
|
||||
}
|
||||
fn host_for<'a>(
|
||||
_: &Self::Inheritance,
|
||||
db: &'a mut DatabaseModel,
|
||||
) -> Result<&'a mut Model<Host>, Error> {
|
||||
host_for(db, None, &HostId::default())
|
||||
}
|
||||
async fn sync_host(ctx: &RpcContext, _: Self::Inheritance) -> Result<(), Error> {
|
||||
ctx.os_net_service.sync_host(HostId::default()).await
|
||||
}
|
||||
}
|
||||
|
||||
pub fn host_api<C: Context>() -> ParentHandler<C, RequiresPackageId> {
|
||||
ParentHandler::<C, RequiresPackageId>::new()
|
||||
.subcommand(
|
||||
"list",
|
||||
from_fn_async(list_hosts)
|
||||
.with_inherited(|HostParams { package }, _| package)
|
||||
.with_inherited(|RequiresPackageId { package }, _| package)
|
||||
.with_custom_display_fn(|_, ids| {
|
||||
for id in ids {
|
||||
println!("{id}")
|
||||
@@ -159,8 +236,19 @@ pub fn host<C: Context>() -> ParentHandler<C, HostParams> {
|
||||
)
|
||||
.subcommand(
|
||||
"address",
|
||||
address::<C>().with_inherited(|HostParams { package }, _| package),
|
||||
address_api::<C, ForPackage>()
|
||||
.with_inherited(|RequiresPackageId { package }, _| package),
|
||||
)
|
||||
.subcommand(
|
||||
"binding",
|
||||
binding::<C, ForPackage>().with_inherited(|RequiresPackageId { package }, _| package),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn server_host_api<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::<C>::new()
|
||||
.subcommand("address", address_api::<C, ForServer>())
|
||||
.subcommand("binding", binding::<C, ForServer>())
|
||||
}
|
||||
|
||||
pub async fn list_hosts(
|
||||
@@ -178,122 +266,3 @@ pub async fn list_hosts(
|
||||
.into_hosts()
|
||||
.keys()
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Parser)]
|
||||
pub struct AddressApiParams {
|
||||
host: HostId,
|
||||
}
|
||||
|
||||
pub fn address<C: Context>() -> ParentHandler<C, AddressApiParams, PackageId> {
|
||||
ParentHandler::<C, AddressApiParams, PackageId>::new()
|
||||
.subcommand(
|
||||
"add",
|
||||
from_fn_async(add_address)
|
||||
.with_inherited(|AddressApiParams { host }, package| (package, host))
|
||||
.no_display()
|
||||
.with_about("Add an address to this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"remove",
|
||||
from_fn_async(remove_address)
|
||||
.with_inherited(|AddressApiParams { host }, package| (package, host))
|
||||
.no_display()
|
||||
.with_about("Remove an address from this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
.subcommand(
|
||||
"list",
|
||||
from_fn_async(list_addresses)
|
||||
.with_inherited(|AddressApiParams { host }, package| (package, host))
|
||||
.with_custom_display_fn(|_, res| {
|
||||
for address in res {
|
||||
println!("{address}")
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.with_about("List addresses for this host")
|
||||
.with_call_remote::<CliContext>(),
|
||||
)
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Parser)]
|
||||
pub struct AddressParams {
|
||||
pub address: HostAddress,
|
||||
}
|
||||
|
||||
pub async fn add_address(
|
||||
ctx: RpcContext,
|
||||
AddressParams { address }: AddressParams,
|
||||
(package, host): (PackageId, HostId),
|
||||
) -> Result<(), Error> {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
if let HostAddress::Onion { address } = address {
|
||||
db.as_private()
|
||||
.as_key_store()
|
||||
.as_onion()
|
||||
.get_key(&address)?;
|
||||
}
|
||||
|
||||
db.as_public_mut()
|
||||
.as_package_data_mut()
|
||||
.as_idx_mut(&package)
|
||||
.or_not_found(&package)?
|
||||
.as_hosts_mut()
|
||||
.as_idx_mut(&host)
|
||||
.or_not_found(&host)?
|
||||
.as_addresses_mut()
|
||||
.mutate(|a| Ok(a.insert(address)))
|
||||
})
|
||||
.await?;
|
||||
let service = ctx.services.get(&package).await;
|
||||
let service_ref = service.as_ref().or_not_found(&package)?;
|
||||
service_ref.update_host(host).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn remove_address(
|
||||
ctx: RpcContext,
|
||||
AddressParams { address }: AddressParams,
|
||||
(package, host): (PackageId, HostId),
|
||||
) -> Result<(), Error> {
|
||||
ctx.db
|
||||
.mutate(|db| {
|
||||
db.as_public_mut()
|
||||
.as_package_data_mut()
|
||||
.as_idx_mut(&package)
|
||||
.or_not_found(&package)?
|
||||
.as_hosts_mut()
|
||||
.as_idx_mut(&host)
|
||||
.or_not_found(&host)?
|
||||
.as_addresses_mut()
|
||||
.mutate(|a| Ok(a.remove(&address)))
|
||||
})
|
||||
.await?;
|
||||
let service = ctx.services.get(&package).await;
|
||||
let service_ref = service.as_ref().or_not_found(&package)?;
|
||||
service_ref.update_host(host).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn list_addresses(
|
||||
ctx: RpcContext,
|
||||
_: Empty,
|
||||
(package, host): (PackageId, HostId),
|
||||
) -> Result<BTreeSet<HostAddress>, Error> {
|
||||
ctx.db
|
||||
.peek()
|
||||
.await
|
||||
.into_public()
|
||||
.into_package_data()
|
||||
.into_idx(&package)
|
||||
.or_not_found(&package)?
|
||||
.into_hosts()
|
||||
.into_idx(&host)
|
||||
.or_not_found(&host)?
|
||||
.into_addresses()
|
||||
.de()
|
||||
}
|
||||
|
||||
@@ -21,7 +21,9 @@ impl KeyStore {
|
||||
local_certs: CertStore::new(account)?,
|
||||
acme: AcmeCertStore::new(),
|
||||
};
|
||||
res.onion.insert(account.tor_key.clone());
|
||||
for tor_key in account.tor_keys.iter().cloned() {
|
||||
res.onion.insert(tor_key);
|
||||
}
|
||||
Ok(res)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use rpc_toolkit::{Context, HandlerExt, ParentHandler};
|
||||
|
||||
pub mod acme;
|
||||
pub mod dhcp;
|
||||
pub mod dns;
|
||||
pub mod forward;
|
||||
pub mod host;
|
||||
pub mod keys;
|
||||
pub mod mdns;
|
||||
pub mod net_controller;
|
||||
pub mod network_interface;
|
||||
pub mod service_interface;
|
||||
pub mod ssl;
|
||||
pub mod static_server;
|
||||
@@ -17,20 +17,23 @@ pub mod vhost;
|
||||
pub mod web_server;
|
||||
pub mod wifi;
|
||||
|
||||
pub const PACKAGE_CERT_PATH: &str = "/var/lib/embassy/ssl";
|
||||
|
||||
pub fn net<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::new()
|
||||
.subcommand(
|
||||
"tor",
|
||||
tor::tor::<C>().with_about("Tor commands such as list-services, logs, and reset"),
|
||||
)
|
||||
.subcommand(
|
||||
"dhcp",
|
||||
dhcp::dhcp::<C>().with_about("Command to update IP assigned from dhcp"),
|
||||
)
|
||||
.subcommand(
|
||||
"acme",
|
||||
acme::acme::<C>().with_about("Setup automatic clearnet certificate acquisition"),
|
||||
)
|
||||
.subcommand(
|
||||
"network-interface",
|
||||
network_interface::network_interface_api::<C>()
|
||||
.with_about("View and edit network interface configurations"),
|
||||
)
|
||||
.subcommand(
|
||||
"vhost",
|
||||
vhost::vhost_api::<C>().with_about("Manage ssl virtual host proxy"),
|
||||
)
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
1113
core/startos/src/net/network_interface.rs
Normal file
1113
core/startos/src/net/network_interface.rs
Normal file
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,8 @@ use ts_rs::TS;
|
||||
#[serde(tag = "kind")]
|
||||
pub enum HostnameInfo {
|
||||
Ip {
|
||||
network_interface_id: String,
|
||||
#[ts(type = "string")]
|
||||
network_interface_id: InternedString,
|
||||
public: bool,
|
||||
hostname: IpHostname,
|
||||
},
|
||||
@@ -43,6 +44,8 @@ pub enum IpHostname {
|
||||
},
|
||||
Ipv6 {
|
||||
value: Ipv6Addr,
|
||||
#[serde(default)]
|
||||
scope_id: u32,
|
||||
port: Option<u16>,
|
||||
ssl_port: Option<u16>,
|
||||
},
|
||||
@@ -69,7 +72,6 @@ pub struct ServiceInterface {
|
||||
pub id: ServiceInterfaceId,
|
||||
pub name: String,
|
||||
pub description: String,
|
||||
pub has_primary: bool,
|
||||
pub masked: bool,
|
||||
pub address_info: AddressInfo,
|
||||
#[serde(rename = "type")]
|
||||
|
||||
@@ -17,7 +17,6 @@ use openssl::x509::{X509Builder, X509Extension, X509NameBuilder, X509};
|
||||
use openssl::*;
|
||||
use patch_db::HasModel;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::time::Instant;
|
||||
use tracing::instrument;
|
||||
|
||||
use crate::account::AccountInfo;
|
||||
|
||||
@@ -8,15 +8,15 @@ use std::time::UNIX_EPOCH;
|
||||
use async_compression::tokio::bufread::GzipEncoder;
|
||||
use axum::body::Body;
|
||||
use axum::extract::{self as x, Request};
|
||||
use axum::response::Response;
|
||||
use axum::routing::{any, get, post};
|
||||
use axum::response::{Redirect, Response};
|
||||
use axum::routing::{any, get};
|
||||
use axum::Router;
|
||||
use base64::display::Base64Display;
|
||||
use digest::Digest;
|
||||
use futures::future::ready;
|
||||
use http::header::{
|
||||
ACCEPT_ENCODING, ACCEPT_RANGES, CACHE_CONTROL, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH,
|
||||
CONTENT_RANGE, CONTENT_TYPE, ETAG, RANGE,
|
||||
CONTENT_RANGE, CONTENT_TYPE, ETAG, HOST, RANGE,
|
||||
};
|
||||
use http::request::Parts as RequestParts;
|
||||
use http::{HeaderValue, Method, StatusCode};
|
||||
@@ -26,7 +26,6 @@ use new_mime_guess::MimeGuess;
|
||||
use openssl::hash::MessageDigest;
|
||||
use openssl::x509::X509;
|
||||
use rpc_toolkit::{Context, HttpServer, Server};
|
||||
use sqlx::query;
|
||||
use tokio::io::{AsyncRead, AsyncReadExt, AsyncSeekExt, BufReader};
|
||||
use tokio_util::io::ReaderStream;
|
||||
use url::Url;
|
||||
@@ -47,7 +46,7 @@ use crate::s9pk::S9pk;
|
||||
use crate::util::io::open_file;
|
||||
use crate::util::net::SyncBody;
|
||||
use crate::util::serde::BASE64;
|
||||
use crate::{diagnostic_api, init_api, install_api, main_api, setup_api};
|
||||
use crate::{diagnostic_api, init_api, install_api, main_api, setup_api, DATA_DIR};
|
||||
|
||||
const NOT_FOUND: &[u8] = b"Not Found";
|
||||
const METHOD_NOT_ALLOWED: &[u8] = b"Method Not Allowed";
|
||||
@@ -230,6 +229,20 @@ pub fn refresher() -> Router {
|
||||
}))
|
||||
}
|
||||
|
||||
pub fn redirecter() -> Router {
|
||||
Router::new().fallback(get(|request: Request| async move {
|
||||
Redirect::temporary(&format!(
|
||||
"https://{}{}",
|
||||
request
|
||||
.headers()
|
||||
.get(HOST)
|
||||
.and_then(|s| s.to_str().ok())
|
||||
.unwrap_or("localhost"),
|
||||
request.uri()
|
||||
))
|
||||
}))
|
||||
}
|
||||
|
||||
async fn proxy_request(ctx: RpcContext, request: Request, url: String) -> Result<Response, Error> {
|
||||
if_authorized(&ctx, request, |mut request| async {
|
||||
for header in PROXY_STRIP_HEADERS {
|
||||
@@ -253,7 +266,7 @@ fn s9pk_router(ctx: RpcContext) -> Router {
|
||||
let (parts, _) = request.into_parts();
|
||||
match FileData::from_path(
|
||||
&parts,
|
||||
&ctx.datadir
|
||||
&Path::new(DATA_DIR)
|
||||
.join(PKG_ARCHIVE_DIR)
|
||||
.join("installed")
|
||||
.join(s9pk),
|
||||
@@ -279,7 +292,7 @@ fn s9pk_router(ctx: RpcContext) -> Router {
|
||||
let s9pk = S9pk::deserialize(
|
||||
&MultiCursorFile::from(
|
||||
open_file(
|
||||
ctx.datadir
|
||||
Path::new(DATA_DIR)
|
||||
.join(PKG_ARCHIVE_DIR)
|
||||
.join("installed")
|
||||
.join(s9pk),
|
||||
|
||||
@@ -1,16 +1,32 @@
|
||||
use std::net::{Ipv4Addr, Ipv6Addr, SocketAddr};
|
||||
use std::collections::BTreeMap;
|
||||
use std::net::{IpAddr, Ipv4Addr, Ipv6Addr, SocketAddr, SocketAddrV6};
|
||||
use std::path::Path;
|
||||
|
||||
use async_stream::try_stream;
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::stream::BoxStream;
|
||||
use futures::{StreamExt, TryStreamExt};
|
||||
use ipnet::{Ipv4Net, Ipv6Net};
|
||||
use helpers::NonDetachingJoinHandle;
|
||||
use imbl_value::InternedString;
|
||||
use ipnet::{IpNet, Ipv4Net, Ipv6Net};
|
||||
use nix::net::if_::if_nametoindex;
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio::process::Command;
|
||||
|
||||
use crate::db::model::public::NetworkInterfaceInfo;
|
||||
use crate::net::network_interface::NetworkInterfaceListener;
|
||||
use crate::net::web_server::Accept;
|
||||
use crate::prelude::*;
|
||||
use crate::util::sync::Watch;
|
||||
use crate::util::Invoke;
|
||||
use crate::Error;
|
||||
|
||||
pub fn ipv6_is_link_local(addr: Ipv6Addr) -> bool {
|
||||
(addr.segments()[0] & 0xffc0) == 0xfe80
|
||||
}
|
||||
|
||||
pub fn ipv6_is_local(addr: Ipv6Addr) -> bool {
|
||||
addr.is_loopback() || (addr.segments()[0] & 0xfe00) == 0xfc00 || ipv6_is_link_local(addr)
|
||||
}
|
||||
|
||||
fn parse_iface_ip(output: &str) -> Result<Vec<&str>, Error> {
|
||||
let output = output.trim();
|
||||
@@ -112,6 +128,55 @@ pub async fn find_eth_iface() -> Result<String, Error> {
|
||||
))
|
||||
}
|
||||
|
||||
pub async fn all_socket_addrs_for(port: u16) -> Result<Vec<(InternedString, SocketAddr)>, Error> {
|
||||
let mut res = Vec::new();
|
||||
|
||||
let raw = String::from_utf8(
|
||||
Command::new("ip")
|
||||
.arg("-o")
|
||||
.arg("addr")
|
||||
.arg("show")
|
||||
.invoke(ErrorKind::ParseSysInfo)
|
||||
.await?,
|
||||
)?;
|
||||
let err = |item: &str, lineno: usize, line: &str| {
|
||||
Error::new(
|
||||
eyre!("failed to parse ip info ({item}[line:{lineno}]) from {line:?}"),
|
||||
ErrorKind::ParseSysInfo,
|
||||
)
|
||||
};
|
||||
for (idx, line) in raw
|
||||
.lines()
|
||||
.map(|l| l.trim())
|
||||
.enumerate()
|
||||
.filter(|(_, l)| !l.is_empty())
|
||||
{
|
||||
let mut split = line.split_whitespace();
|
||||
let _num = split.next();
|
||||
let ifname = split.next().ok_or_else(|| err("ifname", idx, line))?;
|
||||
let _kind = split.next();
|
||||
let ipnet_str = split.next().ok_or_else(|| err("ipnet", idx, line))?;
|
||||
let ipnet = ipnet_str
|
||||
.parse::<IpNet>()
|
||||
.with_ctx(|_| (ErrorKind::ParseSysInfo, err("ipnet", idx, ipnet_str)))?;
|
||||
match ipnet.addr() {
|
||||
IpAddr::V4(ip4) => res.push((ifname.into(), SocketAddr::new(ip4.into(), port))),
|
||||
IpAddr::V6(ip6) => res.push((
|
||||
ifname.into(),
|
||||
SocketAddr::V6(SocketAddrV6::new(
|
||||
ip6,
|
||||
port,
|
||||
0,
|
||||
if_nametoindex(ifname)
|
||||
.with_ctx(|_| (ErrorKind::ParseSysInfo, "reading scope_id"))?,
|
||||
)),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub struct TcpListeners {
|
||||
listeners: Vec<TcpListener>,
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,134 +1,278 @@
|
||||
use std::convert::Infallible;
|
||||
use std::future::Future;
|
||||
use std::net::SocketAddr;
|
||||
use std::ops::Deref;
|
||||
use std::sync::atomic::AtomicBool;
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::task::Poll;
|
||||
use std::time::Duration;
|
||||
|
||||
use axum::extract::Request;
|
||||
use axum::Router;
|
||||
use axum_server::Handle;
|
||||
use bytes::Bytes;
|
||||
use futures::future::{ready, BoxFuture};
|
||||
use futures::future::Either;
|
||||
use futures::FutureExt;
|
||||
use helpers::NonDetachingJoinHandle;
|
||||
use tokio::sync::{oneshot, watch};
|
||||
use hyper_util::rt::{TokioIo, TokioTimer};
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio::sync::oneshot;
|
||||
|
||||
use crate::context::{DiagnosticContext, InitContext, InstallContext, RpcContext, SetupContext};
|
||||
use crate::net::network_interface::{
|
||||
NetworkInterfaceListener, SelfContainedNetworkInterfaceListener,
|
||||
};
|
||||
use crate::net::static_server::{
|
||||
diagnostic_ui_router, init_ui_router, install_ui_router, main_ui_router, refresher,
|
||||
diagnostic_ui_router, init_ui_router, install_ui_router, main_ui_router, redirecter, refresher,
|
||||
setup_ui_router,
|
||||
};
|
||||
use crate::prelude::*;
|
||||
use crate::util::actor::background::BackgroundJobQueue;
|
||||
use crate::util::sync::Watch;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SwappableRouter(watch::Sender<Router>);
|
||||
impl SwappableRouter {
|
||||
pub fn new(router: Router) -> Self {
|
||||
Self(watch::channel(router).0)
|
||||
}
|
||||
pub fn swap(&self, router: Router) {
|
||||
let _ = self.0.send_replace(router);
|
||||
}
|
||||
pub struct Accepted {
|
||||
pub https_redirect: bool,
|
||||
pub stream: TcpStream,
|
||||
}
|
||||
|
||||
pub struct SwappableRouterService {
|
||||
router: watch::Receiver<Router>,
|
||||
changed: Option<BoxFuture<'static, ()>>,
|
||||
pub trait Accept {
|
||||
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>>;
|
||||
}
|
||||
impl SwappableRouterService {
|
||||
fn router(&self) -> Router {
|
||||
self.router.borrow().clone()
|
||||
}
|
||||
fn changed(&mut self, cx: &mut std::task::Context<'_>) -> Poll<()> {
|
||||
let mut changed = if let Some(changed) = self.changed.take() {
|
||||
changed
|
||||
} else {
|
||||
let mut router = self.router.clone();
|
||||
async move {
|
||||
router.changed().await;
|
||||
|
||||
impl Accept for Vec<TcpListener> {
|
||||
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
|
||||
for listener in &*self {
|
||||
if let Poll::Ready((stream, _)) = listener.poll_accept(cx)? {
|
||||
return Poll::Ready(Ok(Accepted {
|
||||
https_redirect: false,
|
||||
stream,
|
||||
}));
|
||||
}
|
||||
.boxed()
|
||||
};
|
||||
if changed.poll_unpin(cx).is_ready() {
|
||||
return Poll::Ready(());
|
||||
}
|
||||
self.changed = Some(changed);
|
||||
Poll::Pending
|
||||
}
|
||||
}
|
||||
impl Clone for SwappableRouterService {
|
||||
fn clone(&self) -> Self {
|
||||
impl Accept for NetworkInterfaceListener {
|
||||
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
|
||||
NetworkInterfaceListener::poll_accept(self, cx, true).map(|res| {
|
||||
res.map(|a| Accepted {
|
||||
https_redirect: a.is_public,
|
||||
stream: a.stream,
|
||||
})
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<A: Accept, B: Accept> Accept for Either<A, B> {
|
||||
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
|
||||
match self {
|
||||
Either::Left(a) => a.poll_accept(cx),
|
||||
Either::Right(b) => b.poll_accept(cx),
|
||||
}
|
||||
}
|
||||
}
|
||||
impl<A: Accept> Accept for Option<A> {
|
||||
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
|
||||
match self {
|
||||
None => Poll::Pending,
|
||||
Some(a) => a.poll_accept(cx),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[pin_project::pin_project]
|
||||
pub struct Acceptor<A: Accept> {
|
||||
acceptor: Watch<A>,
|
||||
}
|
||||
impl<A: Accept + Send + Sync + 'static> Acceptor<A> {
|
||||
pub fn new(acceptor: A) -> Self {
|
||||
Self {
|
||||
router: self.router.clone(),
|
||||
changed: None,
|
||||
acceptor: Watch::new(acceptor),
|
||||
}
|
||||
}
|
||||
|
||||
fn poll_changed(&mut self, cx: &mut std::task::Context<'_>) -> Poll<()> {
|
||||
self.acceptor.poll_changed(cx)
|
||||
}
|
||||
|
||||
fn poll_accept(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<Accepted, Error>> {
|
||||
let _ = self.poll_changed(cx);
|
||||
self.acceptor.peek_mut(|a| a.poll_accept(cx))
|
||||
}
|
||||
|
||||
async fn accept(&mut self) -> Result<Accepted, Error> {
|
||||
std::future::poll_fn(|cx| self.poll_accept(cx)).await
|
||||
}
|
||||
}
|
||||
impl<B> tower_service::Service<Request<B>> for SwappableRouterService
|
||||
where
|
||||
B: axum::body::HttpBody<Data = Bytes> + Send + 'static,
|
||||
B::Error: Into<axum::BoxError>,
|
||||
{
|
||||
type Response = <Router as tower_service::Service<Request<B>>>::Response;
|
||||
type Error = <Router as tower_service::Service<Request<B>>>::Error;
|
||||
type Future = <Router as tower_service::Service<Request<B>>>::Future;
|
||||
#[inline]
|
||||
fn poll_ready(&mut self, cx: &mut std::task::Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
if self.changed(cx).is_ready() {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
tower_service::Service::<Request<B>>::poll_ready(&mut self.router(), cx)
|
||||
}
|
||||
fn call(&mut self, req: Request<B>) -> Self::Future {
|
||||
self.router().call(req)
|
||||
impl Acceptor<Vec<TcpListener>> {
|
||||
pub async fn bind(listen: impl IntoIterator<Item = SocketAddr>) -> Result<Self, Error> {
|
||||
Ok(Self::new(
|
||||
futures::future::try_join_all(listen.into_iter().map(TcpListener::bind)).await?,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> tower_service::Service<T> for SwappableRouter {
|
||||
type Response = SwappableRouterService;
|
||||
type Error = Infallible;
|
||||
type Future = futures::future::Ready<Result<Self::Response, Self::Error>>;
|
||||
#[inline]
|
||||
fn poll_ready(
|
||||
&mut self,
|
||||
_: &mut std::task::Context<'_>,
|
||||
) -> std::task::Poll<Result<(), Self::Error>> {
|
||||
Poll::Ready(Ok(()))
|
||||
}
|
||||
fn call(&mut self, _: T) -> Self::Future {
|
||||
ready(Ok(SwappableRouterService {
|
||||
router: self.0.subscribe(),
|
||||
changed: None,
|
||||
}))
|
||||
pub type UpgradableListener =
|
||||
Option<Either<SelfContainedNetworkInterfaceListener, NetworkInterfaceListener>>;
|
||||
|
||||
impl Acceptor<UpgradableListener> {
|
||||
pub fn bind_upgradable(listener: SelfContainedNetworkInterfaceListener) -> Self {
|
||||
Self::new(Some(Either::Left(listener)))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WebServer {
|
||||
pub struct WebServerAcceptorSetter<A: Accept> {
|
||||
acceptor: Watch<A>,
|
||||
}
|
||||
impl<A: Accept, B: Accept> WebServerAcceptorSetter<Option<Either<A, B>>> {
|
||||
pub fn try_upgrade<F: FnOnce(A) -> Result<B, Error>>(&self, f: F) -> Result<(), Error> {
|
||||
let mut res = Ok(());
|
||||
self.acceptor.send_modify(|a| {
|
||||
*a = match a.take() {
|
||||
Some(Either::Left(a)) => match f(a) {
|
||||
Ok(b) => Some(Either::Right(b)),
|
||||
Err(e) => {
|
||||
res = Err(e);
|
||||
None
|
||||
}
|
||||
},
|
||||
x => x,
|
||||
}
|
||||
});
|
||||
res
|
||||
}
|
||||
}
|
||||
impl<A: Accept> Deref for WebServerAcceptorSetter<A> {
|
||||
type Target = Watch<A>;
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&self.acceptor
|
||||
}
|
||||
}
|
||||
|
||||
pub struct WebServer<A: Accept> {
|
||||
shutdown: oneshot::Sender<()>,
|
||||
router: SwappableRouter,
|
||||
router: Watch<Option<Router>>,
|
||||
acceptor: Watch<A>,
|
||||
thread: NonDetachingJoinHandle<()>,
|
||||
}
|
||||
impl WebServer {
|
||||
pub fn new(bind: SocketAddr) -> Self {
|
||||
let router = SwappableRouter::new(refresher());
|
||||
let thread_router = router.clone();
|
||||
impl<A: Accept + Send + Sync + 'static> WebServer<A> {
|
||||
pub fn acceptor_setter(&self) -> WebServerAcceptorSetter<A> {
|
||||
WebServerAcceptorSetter {
|
||||
acceptor: self.acceptor.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new(mut acceptor: Acceptor<A>) -> Self {
|
||||
let acceptor_send = acceptor.acceptor.clone();
|
||||
let router = Watch::<Option<Router>>::new(None);
|
||||
let service = router.clone_unseen();
|
||||
let (shutdown, shutdown_recv) = oneshot::channel();
|
||||
let thread = NonDetachingJoinHandle::from(tokio::spawn(async move {
|
||||
let handle = Handle::new();
|
||||
let mut server = axum_server::bind(bind).handle(handle.clone());
|
||||
server.http_builder().http1().preserve_header_case(true);
|
||||
server.http_builder().http1().title_case_headers(true);
|
||||
#[derive(Clone)]
|
||||
struct QueueRunner {
|
||||
queue: Arc<RwLock<Option<BackgroundJobQueue>>>,
|
||||
}
|
||||
impl<Fut> hyper::rt::Executor<Fut> for QueueRunner
|
||||
where
|
||||
Fut: Future + Send + 'static,
|
||||
{
|
||||
fn execute(&self, fut: Fut) {
|
||||
if let Some(q) = &*self.queue.read().unwrap() {
|
||||
q.add_job(fut);
|
||||
} else {
|
||||
tracing::warn!("job queued after shutdown");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let (Err(e), _) = tokio::join!(server.serve(thread_router), async {
|
||||
let _ = shutdown_recv.await;
|
||||
handle.graceful_shutdown(Some(Duration::from_secs(0)));
|
||||
}) {
|
||||
tracing::error!("Spawning hyper server error: {}", e);
|
||||
struct SwappableRouter(Watch<Option<Router>>, bool);
|
||||
impl hyper::service::Service<hyper::Request<hyper::body::Incoming>> for SwappableRouter {
|
||||
type Response = <Router as tower_service::Service<
|
||||
hyper::Request<hyper::body::Incoming>,
|
||||
>>::Response;
|
||||
type Error = <Router as tower_service::Service<
|
||||
hyper::Request<hyper::body::Incoming>,
|
||||
>>::Error;
|
||||
type Future = <Router as tower_service::Service<
|
||||
hyper::Request<hyper::body::Incoming>,
|
||||
>>::Future;
|
||||
|
||||
fn call(&self, req: hyper::Request<hyper::body::Incoming>) -> Self::Future {
|
||||
use tower_service::Service;
|
||||
|
||||
if self.1 {
|
||||
redirecter().call(req)
|
||||
} else {
|
||||
let router = self.0.read();
|
||||
if let Some(mut router) = router {
|
||||
router.call(req)
|
||||
} else {
|
||||
refresher().call(req)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let accept = AtomicBool::new(true);
|
||||
let queue_cell = Arc::new(RwLock::new(None));
|
||||
let graceful = hyper_util::server::graceful::GracefulShutdown::new();
|
||||
let mut server = hyper_util::server::conn::auto::Builder::new(QueueRunner {
|
||||
queue: queue_cell.clone(),
|
||||
});
|
||||
server
|
||||
.http1()
|
||||
.timer(TokioTimer::new())
|
||||
.title_case_headers(true)
|
||||
.preserve_header_case(true)
|
||||
.http2()
|
||||
.timer(TokioTimer::new())
|
||||
.enable_connect_protocol()
|
||||
.keep_alive_interval(Duration::from_secs(60))
|
||||
.keep_alive_timeout(Duration::from_secs(300));
|
||||
let (queue, mut runner) = BackgroundJobQueue::new();
|
||||
*queue_cell.write().unwrap() = Some(queue.clone());
|
||||
|
||||
let handler = async {
|
||||
loop {
|
||||
if let Err(e) = async {
|
||||
let accepted = acceptor.accept().await?;
|
||||
queue.add_job(
|
||||
graceful.watch(
|
||||
server
|
||||
.serve_connection_with_upgrades(
|
||||
TokioIo::new(accepted.stream),
|
||||
SwappableRouter(service.clone(), accepted.https_redirect),
|
||||
)
|
||||
.into_owned(),
|
||||
),
|
||||
);
|
||||
|
||||
Ok::<_, Error>(())
|
||||
}
|
||||
.await
|
||||
{
|
||||
tracing::error!("Error accepting HTTP connection: {e}");
|
||||
tracing::debug!("{e:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
.boxed();
|
||||
|
||||
tokio::select! {
|
||||
_ = shutdown_recv => (),
|
||||
_ = handler => (),
|
||||
_ = &mut runner => (),
|
||||
}
|
||||
|
||||
accept.store(false, std::sync::atomic::Ordering::SeqCst);
|
||||
drop(queue);
|
||||
drop(queue_cell.write().unwrap().take());
|
||||
|
||||
if !runner.is_empty() {
|
||||
runner.await;
|
||||
}
|
||||
}));
|
||||
Self {
|
||||
shutdown,
|
||||
router,
|
||||
thread,
|
||||
acceptor: acceptor_send,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -138,7 +282,7 @@ impl WebServer {
|
||||
}
|
||||
|
||||
pub fn serve_router(&mut self, router: Router) {
|
||||
self.router.swap(router)
|
||||
self.router.send(Some(router))
|
||||
}
|
||||
|
||||
pub fn serve_main(&mut self, ctx: RpcContext) {
|
||||
|
||||
@@ -298,7 +298,7 @@ fn display_wifi_info(params: WithIoFormat<Empty>, info: WifiListInfo) {
|
||||
let mut table_global = Table::new();
|
||||
table_global.add_row(row![bc =>
|
||||
"CONNECTED",
|
||||
"SIGNAL_STRENGTH",
|
||||
"SIGNAL STRENGTH",
|
||||
"COUNTRY",
|
||||
"ETHERNET",
|
||||
]);
|
||||
@@ -306,12 +306,12 @@ fn display_wifi_info(params: WithIoFormat<Empty>, info: WifiListInfo) {
|
||||
&info
|
||||
.connected
|
||||
.as_ref()
|
||||
.map_or("[N/A]".to_owned(), |c| c.0.clone()),
|
||||
.map_or("N/A".to_owned(), |c| c.0.clone()),
|
||||
&info
|
||||
.connected
|
||||
.as_ref()
|
||||
.and_then(|x| info.ssids.get(x))
|
||||
.map_or("[N/A]".to_owned(), |ss| format!("{}", ss.0)),
|
||||
.map_or("N/A".to_owned(), |ss| format!("{}", ss.0)),
|
||||
info.country.as_ref().map(|c| c.alpha2()).unwrap_or("00"),
|
||||
&format!("{}", info.ethernet)
|
||||
]);
|
||||
@@ -897,32 +897,28 @@ impl TypedValueParser for CountryCodeParser {
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn synchronize_wpa_supplicant_conf<P: AsRef<Path>>(
|
||||
pub async fn synchronize_network_manager<P: AsRef<Path>>(
|
||||
main_datadir: P,
|
||||
wifi: &mut WifiInfo,
|
||||
wifi: &WifiInfo,
|
||||
) -> Result<(), Error> {
|
||||
wifi.interface = find_wifi_iface().await?;
|
||||
let Some(wifi_iface) = &wifi.interface else {
|
||||
return Ok(());
|
||||
};
|
||||
let persistent = main_datadir.as_ref().join("system-connections");
|
||||
tracing::debug!("persistent: {:?}", persistent);
|
||||
// let supplicant = Path::new("/etc/wpa_supplicant.conf");
|
||||
|
||||
if tokio::fs::metadata(&persistent).await.is_err() {
|
||||
tokio::fs::create_dir_all(&persistent).await?;
|
||||
}
|
||||
crate::disk::mount::util::bind(&persistent, "/etc/NetworkManager/system-connections", false)
|
||||
.await?;
|
||||
// if tokio::fs::metadata(&supplicant).await.is_err() {
|
||||
// tokio::fs::write(&supplicant, include_str!("wpa_supplicant.conf.base")).await?;
|
||||
// }
|
||||
|
||||
Command::new("systemctl")
|
||||
.arg("restart")
|
||||
.arg("NetworkManager")
|
||||
.invoke(ErrorKind::Wifi)
|
||||
.await?;
|
||||
|
||||
let Some(wifi_iface) = &wifi.interface else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
Command::new("ifconfig")
|
||||
.arg(wifi_iface)
|
||||
.arg("up")
|
||||
|
||||
@@ -13,11 +13,11 @@ use serde::{Deserialize, Serialize};
|
||||
use tracing::instrument;
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::backup::BackupReport;
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::db::model::DatabaseModel;
|
||||
use crate::db::model::{Database, DatabaseModel};
|
||||
use crate::prelude::*;
|
||||
use crate::util::serde::HandlerExtSerde;
|
||||
use crate::{backup::BackupReport, db::model::Database};
|
||||
|
||||
// #[command(subcommands(list, delete, delete_before, create))]
|
||||
pub fn notification<C: Context>() -> ParentHandler<C> {
|
||||
|
||||
@@ -50,7 +50,7 @@ pub async fn partition(disk: &DiskInfo, overwrite: bool) -> Result<OsPartitionIn
|
||||
if part_info.guid.is_some() {
|
||||
if entry.first_lba < if use_efi { 33759266 } else { 33570850 } {
|
||||
return Err(Error::new(
|
||||
eyre!("Not enough space before embassy data"),
|
||||
eyre!("Not enough space before StartOS data"),
|
||||
crate::ErrorKind::InvalidRequest,
|
||||
));
|
||||
}
|
||||
|
||||
@@ -6,3 +6,20 @@ pub use tracing::instrument;
|
||||
pub use crate::db::prelude::*;
|
||||
pub use crate::ensure_code;
|
||||
pub use crate::error::{Error, ErrorCollection, ErrorKind, ResultExt};
|
||||
|
||||
#[macro_export]
|
||||
macro_rules! dbg {
|
||||
() => {{
|
||||
tracing::debug!("[{}:{}:{}]", file!(), line!(), column!());
|
||||
}};
|
||||
($e:expr) => {{
|
||||
let e = $e;
|
||||
tracing::debug!("[{}:{}:{}] {} = {e:?}", file!(), line!(), column!(), stringify!($e));
|
||||
e
|
||||
}};
|
||||
($($e:expr),+) => {
|
||||
($(
|
||||
crate::dbg!($e)
|
||||
),+)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,6 @@ use crate::context::config::{ContextConfig, CONFIG_PATH};
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::prelude::*;
|
||||
use crate::registry::auth::{SignatureHeader, AUTH_SIG_HEADER};
|
||||
use crate::registry::device_info::{DeviceInfo, DEVICE_INFO_HEADER};
|
||||
use crate::registry::signer::sign::AnySigningKey;
|
||||
use crate::registry::RegistryDatabase;
|
||||
use crate::rpc_continuations::RpcContinuations;
|
||||
|
||||
@@ -2,7 +2,6 @@ use std::collections::{BTreeMap, BTreeSet};
|
||||
|
||||
use axum::Router;
|
||||
use futures::future::ready;
|
||||
use imbl_value::InternedString;
|
||||
use models::DataUrl;
|
||||
use rpc_toolkit::{from_fn_async, Context, HandlerExt, ParentHandler, Server};
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -11,13 +10,13 @@ use ts_rs::TS;
|
||||
use crate::context::CliContext;
|
||||
use crate::middleware::cors::Cors;
|
||||
use crate::net::static_server::{bad_request, not_found, server_error};
|
||||
use crate::net::web_server::WebServer;
|
||||
use crate::net::web_server::{Accept, WebServer};
|
||||
use crate::prelude::*;
|
||||
use crate::registry::auth::Auth;
|
||||
use crate::registry::context::RegistryContext;
|
||||
use crate::registry::device_info::DeviceInfoMiddleware;
|
||||
use crate::registry::os::index::OsIndex;
|
||||
use crate::registry::package::index::{Category, PackageIndex};
|
||||
use crate::registry::package::index::PackageIndex;
|
||||
use crate::registry::signer::SignerInfo;
|
||||
use crate::rpc_continuations::Guid;
|
||||
use crate::util::serde::HandlerExtSerde;
|
||||
@@ -144,7 +143,7 @@ pub fn registry_router(ctx: RegistryContext) -> Router {
|
||||
)
|
||||
}
|
||||
|
||||
impl WebServer {
|
||||
impl<A: Accept + Send + Sync + 'static> WebServer<A> {
|
||||
pub fn serve_registry(&mut self, ctx: RegistryContext) {
|
||||
self.serve_router(registry_router(ctx))
|
||||
}
|
||||
|
||||
@@ -72,7 +72,6 @@ pub struct PackageVersionInfo {
|
||||
pub icon: DataUrl<'static>,
|
||||
pub description: Description,
|
||||
pub release_notes: String,
|
||||
#[ts(type = "string")]
|
||||
pub git_hash: GitHash,
|
||||
#[ts(type = "string")]
|
||||
pub license: InternedString,
|
||||
|
||||
@@ -24,10 +24,10 @@ impl MerkleArchiveCommitment {
|
||||
pub fn from_query(query: &str) -> Result<Option<Self>, Error> {
|
||||
let mut root_sighash = None;
|
||||
let mut root_maxsize = None;
|
||||
for (k, v) in form_urlencoded::parse(dbg!(query).as_bytes()) {
|
||||
for (k, v) in form_urlencoded::parse(query.as_bytes()) {
|
||||
match &*k {
|
||||
"rootSighash" => {
|
||||
root_sighash = Some(dbg!(v).parse()?);
|
||||
root_sighash = Some(v.parse()?);
|
||||
}
|
||||
"rootMaxsize" => {
|
||||
root_maxsize = Some(v.parse()?);
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
use std::path::Path;
|
||||
|
||||
use tokio::process::Command;
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::prelude::*;
|
||||
use crate::util::Invoke;
|
||||
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
|
||||
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize, TS)]
|
||||
#[ts(type = "string")]
|
||||
pub struct GitHash(String);
|
||||
|
||||
impl GitHash {
|
||||
@@ -31,6 +33,31 @@ impl GitHash {
|
||||
}
|
||||
Ok(GitHash(hash))
|
||||
}
|
||||
pub fn load_sync() -> Option<GitHash> {
|
||||
let mut hash = String::from_utf8(
|
||||
std::process::Command::new("git")
|
||||
.arg("rev-parse")
|
||||
.arg("HEAD")
|
||||
.output()
|
||||
.ok()?
|
||||
.stdout,
|
||||
)
|
||||
.ok()?;
|
||||
if !std::process::Command::new("git")
|
||||
.arg("diff-index")
|
||||
.arg("--quiet")
|
||||
.arg("HEAD")
|
||||
.arg("--")
|
||||
.output()
|
||||
.ok()?
|
||||
.status
|
||||
.success()
|
||||
{
|
||||
hash += "-modified";
|
||||
}
|
||||
|
||||
Some(GitHash(hash))
|
||||
}
|
||||
}
|
||||
|
||||
impl AsRef<str> for GitHash {
|
||||
|
||||
@@ -3,7 +3,6 @@ use std::path::Path;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use exver::{Version, VersionRange};
|
||||
use helpers::const_true;
|
||||
use imbl_value::InternedString;
|
||||
pub use models::PackageId;
|
||||
use models::{mime, ImageId, VolumeId};
|
||||
@@ -62,8 +61,8 @@ pub struct Manifest {
|
||||
pub dependencies: Dependencies,
|
||||
#[serde(default)]
|
||||
pub hardware_requirements: HardwareRequirements,
|
||||
#[serde(default)]
|
||||
#[ts(type = "string | null")]
|
||||
#[ts(optional)]
|
||||
#[serde(default = "GitHash::load_sync")]
|
||||
pub git_hash: Option<GitHash>,
|
||||
#[serde(default = "current_version")]
|
||||
#[ts(type = "string")]
|
||||
|
||||
@@ -294,7 +294,7 @@ impl CallbackHandler {
|
||||
}
|
||||
}
|
||||
pub async fn call(mut self, args: Vector<Value>) -> Result<(), Error> {
|
||||
dbg!(eyre!("callback fired: {}", self.handle.is_active()));
|
||||
crate::dbg!(eyre!("callback fired: {}", self.handle.is_active()));
|
||||
if let Some(seed) = self.seed.upgrade() {
|
||||
seed.persistent_container
|
||||
.callback(self.handle.take(), args)
|
||||
|
||||
@@ -17,11 +17,11 @@ use crate::db::model::package::{
|
||||
use crate::disk::mount::filesystem::bind::Bind;
|
||||
use crate::disk::mount::filesystem::idmapped::IdMapped;
|
||||
use crate::disk::mount::filesystem::{FileSystem, MountType};
|
||||
use crate::rpc_continuations::Guid;
|
||||
use crate::service::effects::prelude::*;
|
||||
use crate::status::health_check::NamedHealthCheckResult;
|
||||
use crate::util::Invoke;
|
||||
use crate::volume::data_dir;
|
||||
use crate::DATA_DIR;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
|
||||
#[ts(export)]
|
||||
@@ -55,7 +55,7 @@ pub async fn mount(
|
||||
let context = context.deref()?;
|
||||
let subpath = subpath.unwrap_or_default();
|
||||
let subpath = subpath.strip_prefix("/").unwrap_or(&subpath);
|
||||
let source = data_dir(&context.seed.ctx.datadir, &package_id, &volume_id).join(subpath);
|
||||
let source = data_dir(DATA_DIR, &package_id, &volume_id).join(subpath);
|
||||
if tokio::fs::metadata(&source).await.is_err() {
|
||||
tokio::fs::create_dir_all(&source).await?;
|
||||
}
|
||||
|
||||
@@ -130,10 +130,6 @@ pub fn handler<C: Context>() -> ParentHandler<C> {
|
||||
"get-host-info",
|
||||
from_fn_async(net::host::get_host_info).no_cli(),
|
||||
)
|
||||
.subcommand(
|
||||
"get-primary-url",
|
||||
from_fn_async(net::host::get_primary_url).no_cli(),
|
||||
)
|
||||
.subcommand(
|
||||
"get-container-ip",
|
||||
from_fn_async(net::info::get_container_ip).no_cli(),
|
||||
|
||||
@@ -1,14 +1,12 @@
|
||||
use models::{HostId, PackageId};
|
||||
|
||||
use crate::net::host::binding::{BindId, BindOptions, LanInfo};
|
||||
use crate::net::host::HostKind;
|
||||
use crate::net::host::binding::{BindId, BindOptions, NetInfo};
|
||||
use crate::service::effects::prelude::*;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export)]
|
||||
pub struct BindParams {
|
||||
kind: HostKind,
|
||||
id: HostId,
|
||||
internal_port: u16,
|
||||
#[serde(flatten)]
|
||||
@@ -17,15 +15,18 @@ pub struct BindParams {
|
||||
pub async fn bind(
|
||||
context: EffectContext,
|
||||
BindParams {
|
||||
kind,
|
||||
id,
|
||||
internal_port,
|
||||
options,
|
||||
}: BindParams,
|
||||
) -> Result<(), Error> {
|
||||
let context = context.deref()?;
|
||||
let mut svc = context.seed.persistent_container.net_service.lock().await;
|
||||
svc.bind(kind, id, internal_port, options).await
|
||||
context
|
||||
.seed
|
||||
.persistent_container
|
||||
.net_service
|
||||
.bind(id, internal_port, options)
|
||||
.await
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, TS, Parser)]
|
||||
@@ -41,8 +42,12 @@ pub async fn clear_bindings(
|
||||
ClearBindingsParams { except }: ClearBindingsParams,
|
||||
) -> Result<(), Error> {
|
||||
let context = context.deref()?;
|
||||
let mut svc = context.seed.persistent_container.net_service.lock().await;
|
||||
svc.clear_bindings(except.into_iter().collect()).await?;
|
||||
context
|
||||
.seed
|
||||
.persistent_container
|
||||
.net_service
|
||||
.clear_bindings(except.into_iter().collect())
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -53,15 +58,36 @@ pub struct GetServicePortForwardParams {
|
||||
#[ts(optional)]
|
||||
package_id: Option<PackageId>,
|
||||
host_id: HostId,
|
||||
internal_port: u32,
|
||||
internal_port: u16,
|
||||
}
|
||||
pub async fn get_service_port_forward(
|
||||
context: EffectContext,
|
||||
data: GetServicePortForwardParams,
|
||||
) -> Result<LanInfo, Error> {
|
||||
let internal_port = data.internal_port as u16;
|
||||
|
||||
GetServicePortForwardParams {
|
||||
package_id,
|
||||
host_id,
|
||||
internal_port,
|
||||
}: GetServicePortForwardParams,
|
||||
) -> Result<NetInfo, Error> {
|
||||
let context = context.deref()?;
|
||||
let net_service = context.seed.persistent_container.net_service.lock().await;
|
||||
net_service.get_lan_port(data.host_id, internal_port)
|
||||
|
||||
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
|
||||
|
||||
Ok(context
|
||||
.seed
|
||||
.ctx
|
||||
.db
|
||||
.peek()
|
||||
.await
|
||||
.as_public()
|
||||
.as_package_data()
|
||||
.as_idx(&package_id)
|
||||
.or_not_found(&package_id)?
|
||||
.as_hosts()
|
||||
.as_idx(&host_id)
|
||||
.or_not_found(&host_id)?
|
||||
.as_bindings()
|
||||
.de()?
|
||||
.get(&internal_port)
|
||||
.or_not_found(lazy_format!("binding for port {internal_port}"))?
|
||||
.net)
|
||||
}
|
||||
|
||||
@@ -1,35 +1,10 @@
|
||||
use models::{HostId, PackageId};
|
||||
|
||||
use crate::net::host::address::HostAddress;
|
||||
use crate::net::host::Host;
|
||||
use crate::service::effects::callbacks::CallbackHandler;
|
||||
use crate::service::effects::prelude::*;
|
||||
use crate::service::rpc::CallbackId;
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, TS)]
|
||||
#[ts(export)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct GetPrimaryUrlParams {
|
||||
#[ts(optional)]
|
||||
package_id: Option<PackageId>,
|
||||
host_id: HostId,
|
||||
#[ts(optional)]
|
||||
callback: Option<CallbackId>,
|
||||
}
|
||||
pub async fn get_primary_url(
|
||||
context: EffectContext,
|
||||
GetPrimaryUrlParams {
|
||||
package_id,
|
||||
host_id,
|
||||
callback,
|
||||
}: GetPrimaryUrlParams,
|
||||
) -> Result<Option<HostAddress>, Error> {
|
||||
let context = context.deref()?;
|
||||
let package_id = package_id.unwrap_or_else(|| context.seed.id.clone());
|
||||
|
||||
Ok(None) // TODO
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize, TS)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
#[ts(export)]
|
||||
|
||||
@@ -4,6 +4,5 @@ use crate::service::effects::prelude::*;
|
||||
|
||||
pub async fn get_container_ip(context: EffectContext) -> Result<Ipv4Addr, Error> {
|
||||
let context = context.deref()?;
|
||||
let net_service = context.seed.persistent_container.net_service.lock().await;
|
||||
Ok(net_service.get_ip())
|
||||
Ok(context.seed.persistent_container.net_service.get_ip().await)
|
||||
}
|
||||
|
||||
@@ -15,7 +15,6 @@ pub struct ExportServiceInterfaceParams {
|
||||
id: ServiceInterfaceId,
|
||||
name: String,
|
||||
description: String,
|
||||
has_primary: bool,
|
||||
masked: bool,
|
||||
address_info: AddressInfo,
|
||||
r#type: ServiceInterfaceType,
|
||||
@@ -26,7 +25,6 @@ pub async fn export_service_interface(
|
||||
id,
|
||||
name,
|
||||
description,
|
||||
has_primary,
|
||||
masked,
|
||||
address_info,
|
||||
r#type,
|
||||
@@ -39,7 +37,6 @@ pub async fn export_service_interface(
|
||||
id: id.clone(),
|
||||
name,
|
||||
description,
|
||||
has_primary,
|
||||
masked,
|
||||
address_info,
|
||||
interface_type: r#type,
|
||||
|
||||
@@ -51,10 +51,16 @@ pub async fn get_ssl_certificate(
|
||||
.iter()
|
||||
.map(|(_, m)| m.as_hosts().as_entries())
|
||||
.flatten_ok()
|
||||
.map_ok(|(_, m)| m.as_addresses().de())
|
||||
.map_ok(|(_, m)| {
|
||||
Ok(m.as_onions()
|
||||
.de()?
|
||||
.iter()
|
||||
.map(InternedString::from_display)
|
||||
.chain(m.as_domains().keys()?)
|
||||
.collect::<Vec<_>>())
|
||||
})
|
||||
.map(|a| a.and_then(|a| a))
|
||||
.flatten_ok()
|
||||
.map_ok(|a| InternedString::from_display(&a))
|
||||
.try_collect::<_, BTreeSet<_>, _>()?;
|
||||
for hostname in &hostnames {
|
||||
if let Some(internal) = hostname
|
||||
@@ -135,10 +141,16 @@ pub async fn get_ssl_key(
|
||||
.into_iter()
|
||||
.map(|m| m.as_hosts().as_entries())
|
||||
.flatten_ok()
|
||||
.map_ok(|(_, m)| m.as_addresses().de())
|
||||
.map_ok(|(_, m)| {
|
||||
Ok(m.as_onions()
|
||||
.de()?
|
||||
.iter()
|
||||
.map(InternedString::from_display)
|
||||
.chain(m.as_domains().keys()?)
|
||||
.collect::<Vec<_>>())
|
||||
})
|
||||
.map(|a| a.and_then(|a| a))
|
||||
.flatten_ok()
|
||||
.map_ok(|a| InternedString::from_display(&a))
|
||||
.try_collect::<_, BTreeSet<_>, _>()?;
|
||||
for hostname in &hostnames {
|
||||
if let Some(internal) = hostname
|
||||
|
||||
@@ -26,7 +26,7 @@ pub async fn get_store(
|
||||
callback,
|
||||
}: GetStoreParams,
|
||||
) -> Result<Value, Error> {
|
||||
dbg!(&callback);
|
||||
crate::dbg!(&callback);
|
||||
let context = context.deref()?;
|
||||
let peeked = context.seed.ctx.db.peek().await;
|
||||
let package_id = package_id.unwrap_or(context.seed.id.clone());
|
||||
|
||||
@@ -4,12 +4,11 @@ use imbl_value::InternedString;
|
||||
use models::ImageId;
|
||||
use tokio::process::Command;
|
||||
|
||||
use crate::disk::mount::filesystem::overlayfs::OverlayGuard;
|
||||
use crate::rpc_continuations::Guid;
|
||||
use crate::service::effects::prelude::*;
|
||||
use crate::service::persistent_container::Subcontainer;
|
||||
use crate::util::Invoke;
|
||||
use crate::{
|
||||
disk::mount::filesystem::overlayfs::OverlayGuard, service::persistent_container::Subcontainer,
|
||||
};
|
||||
|
||||
#[cfg(feature = "container-runtime")]
|
||||
mod sync;
|
||||
|
||||
@@ -48,7 +48,7 @@ use crate::util::net::WebSocketExt;
|
||||
use crate::util::serde::{NoOutput, Pem};
|
||||
use crate::util::Never;
|
||||
use crate::volume::data_dir;
|
||||
use crate::CAP_1_KiB;
|
||||
use crate::{CAP_1_KiB, DATA_DIR, PACKAGE_DATA};
|
||||
|
||||
pub mod action;
|
||||
pub mod cli;
|
||||
@@ -149,10 +149,10 @@ impl ServiceRef {
|
||||
.values()
|
||||
.flat_map(|h| h.bindings.values())
|
||||
.flat_map(|b| {
|
||||
b.lan
|
||||
b.net
|
||||
.assigned_port
|
||||
.into_iter()
|
||||
.chain(b.lan.assigned_ssl_port)
|
||||
.chain(b.net.assigned_ssl_port)
|
||||
}),
|
||||
);
|
||||
Ok(())
|
||||
@@ -167,17 +167,18 @@ impl ServiceRef {
|
||||
{
|
||||
let state = pde.state_info.expect_removing()?;
|
||||
for volume_id in &state.manifest.volumes {
|
||||
let path = data_dir(&ctx.datadir, &state.manifest.id, volume_id);
|
||||
let path = data_dir(DATA_DIR, &state.manifest.id, volume_id);
|
||||
if tokio::fs::metadata(&path).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&path).await?;
|
||||
}
|
||||
}
|
||||
let logs_dir = ctx.datadir.join("logs").join(&state.manifest.id);
|
||||
let logs_dir = Path::new(PACKAGE_DATA)
|
||||
.join("logs")
|
||||
.join(&state.manifest.id);
|
||||
if tokio::fs::metadata(&logs_dir).await.is_ok() {
|
||||
tokio::fs::remove_dir_all(&logs_dir).await?;
|
||||
}
|
||||
let archive_path = ctx
|
||||
.datadir
|
||||
let archive_path = Path::new(PACKAGE_DATA)
|
||||
.join("archive")
|
||||
.join("installed")
|
||||
.join(&state.manifest.id);
|
||||
@@ -278,7 +279,7 @@ impl Service {
|
||||
let ctx = ctx.clone();
|
||||
move |s9pk: S9pk, i: Model<PackageDataEntry>| async move {
|
||||
for volume_id in &s9pk.as_manifest().volumes {
|
||||
let path = data_dir(&ctx.datadir, &s9pk.as_manifest().id, volume_id);
|
||||
let path = data_dir(DATA_DIR, &s9pk.as_manifest().id, volume_id);
|
||||
if tokio::fs::metadata(&path).await.is_err() {
|
||||
tokio::fs::create_dir_all(&path).await?;
|
||||
}
|
||||
@@ -291,7 +292,7 @@ impl Service {
|
||||
Self::new(ctx, s9pk, start_stop).await.map(Some)
|
||||
}
|
||||
};
|
||||
let s9pk_dir = ctx.datadir.join(PKG_ARCHIVE_DIR).join("installed"); // TODO: make this based on hash
|
||||
let s9pk_dir = Path::new(DATA_DIR).join(PKG_ARCHIVE_DIR).join("installed"); // TODO: make this based on hash
|
||||
let s9pk_path = s9pk_dir.join(id).with_extension("s9pk");
|
||||
let Some(entry) = ctx
|
||||
.db
|
||||
@@ -604,27 +605,11 @@ impl Service {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn update_host(&self, host_id: HostId) -> Result<(), Error> {
|
||||
let host = self
|
||||
.seed
|
||||
.ctx
|
||||
.db
|
||||
.peek()
|
||||
.await
|
||||
.as_public()
|
||||
.as_package_data()
|
||||
.as_idx(&self.seed.id)
|
||||
.or_not_found(&self.seed.id)?
|
||||
.as_hosts()
|
||||
.as_idx(&host_id)
|
||||
.or_not_found(&host_id)?
|
||||
.de()?;
|
||||
pub async fn sync_host(&self, host_id: HostId) -> Result<(), Error> {
|
||||
self.seed
|
||||
.persistent_container
|
||||
.net_service
|
||||
.lock()
|
||||
.await
|
||||
.update(host_id, host)
|
||||
.sync_host(host_id)
|
||||
.await
|
||||
}
|
||||
}
|
||||
@@ -934,7 +919,6 @@ pub async fn attach(
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
current_out = "stdout";
|
||||
}
|
||||
dbg!(¤t_out);
|
||||
ws.send(Message::Binary(out))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
@@ -948,7 +932,6 @@ pub async fn attach(
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
current_out = "stderr";
|
||||
}
|
||||
dbg!(¤t_out);
|
||||
ws.send(Message::Binary(err))
|
||||
.await
|
||||
.with_kind(ErrorKind::Network)?;
|
||||
|
||||
@@ -39,7 +39,7 @@ use crate::util::io::create_file;
|
||||
use crate::util::rpc_client::UnixRpcClient;
|
||||
use crate::util::Invoke;
|
||||
use crate::volume::data_dir;
|
||||
use crate::ARCH;
|
||||
use crate::{ARCH, DATA_DIR, PACKAGE_DATA};
|
||||
|
||||
const RPC_CONNECT_TIMEOUT: Duration = Duration::from_secs(10);
|
||||
|
||||
@@ -110,7 +110,7 @@ pub struct PersistentContainer {
|
||||
pub(super) images: BTreeMap<ImageId, Arc<MountGuard>>,
|
||||
pub(super) subcontainers: Arc<Mutex<BTreeMap<Guid, Subcontainer>>>,
|
||||
pub(super) state: Arc<watch::Sender<ServiceState>>,
|
||||
pub(super) net_service: Mutex<NetService>,
|
||||
pub(super) net_service: NetService,
|
||||
destroyed: bool,
|
||||
}
|
||||
|
||||
@@ -121,8 +121,8 @@ impl PersistentContainer {
|
||||
.lxc_manager
|
||||
.create(
|
||||
Some(
|
||||
&ctx.datadir
|
||||
.join("package-data/logs")
|
||||
&Path::new(PACKAGE_DATA)
|
||||
.join("logs")
|
||||
.join(&s9pk.as_manifest().id),
|
||||
),
|
||||
LxcConfig::default(),
|
||||
@@ -157,7 +157,7 @@ impl PersistentContainer {
|
||||
.await?;
|
||||
let mount = MountGuard::mount(
|
||||
&IdMapped::new(
|
||||
Bind::new(data_dir(&ctx.datadir, &s9pk.as_manifest().id, volume)),
|
||||
Bind::new(data_dir(DATA_DIR, &s9pk.as_manifest().id, volume)),
|
||||
0,
|
||||
100000,
|
||||
65536,
|
||||
@@ -285,7 +285,7 @@ impl PersistentContainer {
|
||||
images,
|
||||
subcontainers: Arc::new(Mutex::new(BTreeMap::new())),
|
||||
state: Arc::new(watch::channel(ServiceState::new(start)).0),
|
||||
net_service: Mutex::new(net_service),
|
||||
net_service,
|
||||
destroyed: false,
|
||||
})
|
||||
}
|
||||
@@ -452,7 +452,7 @@ impl PersistentContainer {
|
||||
#[instrument(skip_all)]
|
||||
pub async fn exit(mut self) -> Result<(), Error> {
|
||||
if let Some(destroy) = self.destroy(false) {
|
||||
dbg!(destroy.await)?;
|
||||
destroy.await?;
|
||||
}
|
||||
tracing::info!("Service for {} exited", self.s9pk.as_manifest().id);
|
||||
|
||||
|
||||
@@ -155,7 +155,7 @@ impl serde::Serialize for Sandbox {
|
||||
pub struct CallbackId(u64);
|
||||
impl CallbackId {
|
||||
pub fn register(self, container: &PersistentContainer) -> CallbackHandle {
|
||||
dbg!(eyre!(
|
||||
crate::dbg!(eyre!(
|
||||
"callback {} registered for {}",
|
||||
self.0,
|
||||
container.s9pk.as_manifest().id
|
||||
|
||||
@@ -36,7 +36,7 @@ impl Actor for ServiceActor {
|
||||
ServiceActorLoopNext::DontWait => (),
|
||||
}
|
||||
}
|
||||
})
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,7 +92,6 @@ async fn service_actor_loop(
|
||||
..
|
||||
} => MainStatus::Stopped,
|
||||
};
|
||||
let previous = i.as_status().de()?;
|
||||
i.as_status_mut().ser(&main_status)?;
|
||||
return Ok(previous
|
||||
.major_changes(&main_status)
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use futures::future::BoxFuture;
|
||||
use futures::{Future, FutureExt};
|
||||
use futures::stream::FuturesUnordered;
|
||||
use futures::{Future, FutureExt, StreamExt};
|
||||
use helpers::NonDetachingJoinHandle;
|
||||
use imbl::OrdMap;
|
||||
use imbl_value::InternedString;
|
||||
@@ -27,6 +29,7 @@ use crate::service::start_stop::StartStop;
|
||||
use crate::service::{LoadDisposition, Service, ServiceRef};
|
||||
use crate::status::MainStatus;
|
||||
use crate::util::serde::Pem;
|
||||
use crate::DATA_DIR;
|
||||
|
||||
pub type DownloadInstallFuture = BoxFuture<'static, Result<InstallFuture, Error>>;
|
||||
pub type InstallFuture = BoxFuture<'static, Result<(), Error>>;
|
||||
@@ -66,8 +69,12 @@ impl ServiceMap {
|
||||
progress.start();
|
||||
let ids = ctx.db.peek().await.as_public().as_package_data().keys()?;
|
||||
progress.set_total(ids.len() as u64);
|
||||
for id in ids {
|
||||
if let Err(e) = self.load(ctx, &id, LoadDisposition::Retry).await {
|
||||
let mut jobs = FuturesUnordered::new();
|
||||
for id in &ids {
|
||||
jobs.push(self.load(ctx, id, LoadDisposition::Retry));
|
||||
}
|
||||
while let Some(res) = jobs.next().await {
|
||||
if let Err(e) = res {
|
||||
tracing::error!("Error loading installed package as service: {e}");
|
||||
tracing::debug!("{e:?}");
|
||||
}
|
||||
@@ -220,8 +227,7 @@ impl ServiceMap {
|
||||
Ok(async move {
|
||||
let (installed_path, sync_progress_task) = reload_guard
|
||||
.handle(async {
|
||||
let download_path = ctx
|
||||
.datadir
|
||||
let download_path = Path::new(DATA_DIR)
|
||||
.join(PKG_ARCHIVE_DIR)
|
||||
.join("downloading")
|
||||
.join(&id)
|
||||
@@ -251,8 +257,7 @@ impl ServiceMap {
|
||||
file.sync_all().await?;
|
||||
download_progress.complete();
|
||||
|
||||
let installed_path = ctx
|
||||
.datadir
|
||||
let installed_path = Path::new(DATA_DIR)
|
||||
.join(PKG_ARCHIVE_DIR)
|
||||
.join("installed")
|
||||
.join(&id)
|
||||
|
||||
@@ -15,6 +15,7 @@ use crate::service::ServiceActor;
|
||||
use crate::util::actor::background::BackgroundJobQueue;
|
||||
use crate::util::actor::{ConflictBuilder, Handler};
|
||||
use crate::util::future::RemoteCancellable;
|
||||
use crate::util::serde::NoOutput;
|
||||
|
||||
pub(in crate::service) struct Backup {
|
||||
pub path: PathBuf,
|
||||
@@ -48,7 +49,7 @@ impl Handler<Backup> for ServiceActor {
|
||||
.mount_backup(path, ReadWrite)
|
||||
.await?;
|
||||
seed.persistent_container
|
||||
.execute(id, ProcedureName::CreateBackup, Value::Null, None)
|
||||
.execute::<NoOutput>(id, ProcedureName::CreateBackup, Value::Null, None)
|
||||
.await?;
|
||||
backup_guard.unmount(true).await?;
|
||||
|
||||
|
||||
@@ -11,6 +11,7 @@ use crate::service::ServiceActor;
|
||||
use crate::util::actor::background::BackgroundJobQueue;
|
||||
use crate::util::actor::{ConflictBuilder, Handler};
|
||||
use crate::util::future::RemoteCancellable;
|
||||
use crate::util::serde::NoOutput;
|
||||
|
||||
pub(in crate::service) struct Restore {
|
||||
pub path: PathBuf,
|
||||
@@ -38,7 +39,7 @@ impl Handler<Restore> for ServiceActor {
|
||||
.mount_backup(path, ReadOnly)
|
||||
.await?;
|
||||
seed.persistent_container
|
||||
.execute(id, ProcedureName::RestoreBackup, Value::Null, None)
|
||||
.execute::<NoOutput>(id, ProcedureName::RestoreBackup, Value::Null, None)
|
||||
.await?;
|
||||
backup_guard.unmount(true).await?;
|
||||
|
||||
@@ -48,7 +49,7 @@ impl Handler<Restore> for ServiceActor {
|
||||
Ok::<_, Error>(())
|
||||
}
|
||||
.map(|x| {
|
||||
if let Err(err) = dbg!(x) {
|
||||
if let Err(err) = x {
|
||||
tracing::debug!("{:?}", err);
|
||||
tracing::warn!("{}", err);
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
use color_eyre::eyre::eyre;
|
||||
use const_format::formatcp;
|
||||
use josekit::jwk::Jwk;
|
||||
use patch_db::json_ptr::ROOT;
|
||||
use rpc_toolkit::yajrc::RpcError;
|
||||
@@ -30,7 +31,7 @@ use crate::disk::mount::guard::{GenericMountGuard, TmpMountGuard};
|
||||
use crate::disk::util::{pvscan, recovery_info, DiskInfo, StartOsRecoveryInfo};
|
||||
use crate::disk::REPAIR_DISK_PATH;
|
||||
use crate::init::{init, InitPhases, InitResult};
|
||||
use crate::net::net_controller::PreInitNetController;
|
||||
use crate::net::net_controller::NetController;
|
||||
use crate::net::ssl::root_ca_start_time;
|
||||
use crate::prelude::*;
|
||||
use crate::progress::{FullProgress, PhaseProgressTrackerHandle};
|
||||
@@ -38,7 +39,7 @@ use crate::rpc_continuations::Guid;
|
||||
use crate::util::crypto::EncryptedWire;
|
||||
use crate::util::io::{create_file, dir_copy, dir_size, Counter};
|
||||
use crate::util::Invoke;
|
||||
use crate::{Error, ErrorKind, ResultExt};
|
||||
use crate::{Error, ErrorKind, ResultExt, DATA_DIR, MAIN_DATA, PACKAGE_DATA};
|
||||
|
||||
pub fn setup<C: Context>() -> ParentHandler<C> {
|
||||
ParentHandler::new()
|
||||
@@ -79,10 +80,11 @@ async fn setup_init(
|
||||
ctx: &SetupContext,
|
||||
password: Option<String>,
|
||||
init_phases: InitPhases,
|
||||
) -> Result<(AccountInfo, PreInitNetController), Error> {
|
||||
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?;
|
||||
) -> Result<(AccountInfo, InitResult), Error> {
|
||||
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
|
||||
|
||||
let account = net_ctrl
|
||||
let account = init_result
|
||||
.net_ctrl
|
||||
.db
|
||||
.mutate(|m| {
|
||||
let mut account = AccountInfo::load(m)?;
|
||||
@@ -98,7 +100,7 @@ async fn setup_init(
|
||||
})
|
||||
.await?;
|
||||
|
||||
Ok((account, net_ctrl))
|
||||
Ok((account, init_result))
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, TS)]
|
||||
@@ -140,7 +142,7 @@ pub async fn attach(
|
||||
disk_phase.start();
|
||||
let requires_reboot = crate::disk::main::import(
|
||||
&*disk_guid,
|
||||
&setup_ctx.datadir,
|
||||
DATA_DIR,
|
||||
if tokio::fs::metadata(REPAIR_DISK_PATH).await.is_ok() {
|
||||
RepairStrategy::Aggressive
|
||||
} else {
|
||||
@@ -155,7 +157,7 @@ pub async fn attach(
|
||||
.with_ctx(|_| (ErrorKind::Filesystem, REPAIR_DISK_PATH))?;
|
||||
}
|
||||
if requires_reboot.0 {
|
||||
crate::disk::main::export(&*disk_guid, &setup_ctx.datadir).await?;
|
||||
crate::disk::main::export(&*disk_guid, DATA_DIR).await?;
|
||||
return Err(Error::new(
|
||||
eyre!(
|
||||
"Errors were corrected with your disk, but the server must be restarted in order to proceed"
|
||||
@@ -167,7 +169,7 @@ pub async fn attach(
|
||||
|
||||
let (account, net_ctrl) = setup_init(&setup_ctx, password, init_phases).await?;
|
||||
|
||||
let rpc_ctx = RpcContext::init(&setup_ctx.config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
|
||||
let rpc_ctx = RpcContext::init(&setup_ctx.webserver, &setup_ctx.config, disk_guid, Some(net_ctrl), rpc_ctx_phases).await?;
|
||||
|
||||
Ok(((&account).try_into()?, rpc_ctx))
|
||||
})?;
|
||||
@@ -391,18 +393,13 @@ pub async fn execute_inner(
|
||||
crate::disk::main::create(
|
||||
&[start_os_logicalname],
|
||||
&pvscan().await?,
|
||||
&ctx.datadir,
|
||||
DATA_DIR,
|
||||
encryption_password,
|
||||
)
|
||||
.await?,
|
||||
);
|
||||
let _ = crate::disk::main::import(
|
||||
&*guid,
|
||||
&ctx.datadir,
|
||||
RepairStrategy::Preen,
|
||||
encryption_password,
|
||||
)
|
||||
.await?;
|
||||
let _ = crate::disk::main::import(&*guid, DATA_DIR, RepairStrategy::Preen, encryption_password)
|
||||
.await?;
|
||||
disk_phase.complete();
|
||||
|
||||
let progress = SetupExecuteProgress {
|
||||
@@ -456,9 +453,16 @@ async fn fresh_setup(
|
||||
db.put(&ROOT, &Database::init(&account)?).await?;
|
||||
drop(db);
|
||||
|
||||
let InitResult { net_ctrl } = init(&ctx.config, init_phases).await?;
|
||||
let init_result = init(&ctx.webserver, &ctx.config, init_phases).await?;
|
||||
|
||||
let rpc_ctx = RpcContext::init(&ctx.config, guid, Some(net_ctrl), rpc_ctx_phases).await?;
|
||||
let rpc_ctx = RpcContext::init(
|
||||
&ctx.webserver,
|
||||
&ctx.config,
|
||||
guid,
|
||||
Some(init_result),
|
||||
rpc_ctx_phases,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(((&account).try_into()?, rpc_ctx))
|
||||
}
|
||||
@@ -513,10 +517,10 @@ async fn migrate(
|
||||
)
|
||||
.await?;
|
||||
|
||||
let main_transfer_args = ("/media/startos/migrate/main/", "/embassy-data/main/");
|
||||
let main_transfer_args = ("/media/startos/migrate/main/", formatcp!("{MAIN_DATA}/"));
|
||||
let package_data_transfer_args = (
|
||||
"/media/startos/migrate/package-data/",
|
||||
"/embassy-data/package-data/",
|
||||
formatcp!("{PACKAGE_DATA}/"),
|
||||
);
|
||||
|
||||
let tmpdir = Path::new(package_data_transfer_args.0).join("tmp");
|
||||
@@ -571,7 +575,14 @@ async fn migrate(
|
||||
|
||||
let (account, net_ctrl) = setup_init(&ctx, Some(start_os_password), init_phases).await?;
|
||||
|
||||
let rpc_ctx = RpcContext::init(&ctx.config, guid, Some(net_ctrl), rpc_ctx_phases).await?;
|
||||
let rpc_ctx = RpcContext::init(
|
||||
&ctx.webserver,
|
||||
&ctx.config,
|
||||
guid,
|
||||
Some(net_ctrl),
|
||||
rpc_ctx_phases,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(((&account).try_into()?, rpc_ctx))
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use std::path::PathBuf;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::context::RpcContext;
|
||||
@@ -7,7 +7,7 @@ use crate::init::{STANDBY_MODE_PATH, SYSTEM_REBUILD_PATH};
|
||||
use crate::prelude::*;
|
||||
use crate::sound::SHUTDOWN;
|
||||
use crate::util::Invoke;
|
||||
use crate::PLATFORM;
|
||||
use crate::{DATA_DIR, PLATFORM};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Shutdown {
|
||||
@@ -87,7 +87,7 @@ pub async fn shutdown(ctx: RpcContext) -> Result<(), Error> {
|
||||
.await?;
|
||||
ctx.shutdown
|
||||
.send(Some(Shutdown {
|
||||
export_args: Some((ctx.disk_guid.clone(), ctx.datadir.clone())),
|
||||
export_args: Some((ctx.disk_guid.clone(), Path::new(DATA_DIR).to_owned())),
|
||||
restart: false,
|
||||
}))
|
||||
.map_err(|_| ())
|
||||
@@ -107,7 +107,7 @@ pub async fn restart(ctx: RpcContext) -> Result<(), Error> {
|
||||
.await?;
|
||||
ctx.shutdown
|
||||
.send(Some(Shutdown {
|
||||
export_args: Some((ctx.disk_guid.clone(), ctx.datadir.clone())),
|
||||
export_args: Some((ctx.disk_guid.clone(), Path::new(DATA_DIR).to_owned())),
|
||||
restart: true,
|
||||
}))
|
||||
.map_err(|_| ())
|
||||
|
||||
@@ -3,20 +3,23 @@ use std::path::Path;
|
||||
|
||||
use clap::builder::ValueParserFactory;
|
||||
use clap::Parser;
|
||||
use color_eyre::eyre::eyre;
|
||||
use imbl_value::InternedString;
|
||||
use models::FromStrParser;
|
||||
use rpc_toolkit::{from_fn_async, Context, Empty, HandlerExt, ParentHandler};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::fs::OpenOptions;
|
||||
use tokio::process::Command;
|
||||
use tracing::instrument;
|
||||
use ts_rs::TS;
|
||||
|
||||
use crate::context::{CliContext, RpcContext};
|
||||
use crate::hostname::Hostname;
|
||||
use crate::prelude::*;
|
||||
use crate::util::io::create_file;
|
||||
use crate::util::serde::{display_serializable, HandlerExtSerde, WithIoFormat};
|
||||
use crate::util::serde::{display_serializable, HandlerExtSerde, Pem, WithIoFormat};
|
||||
use crate::util::Invoke;
|
||||
|
||||
pub const SSH_AUTHORIZED_KEYS_FILE: &str = "/home/start9/.ssh/authorized_keys";
|
||||
pub const SSH_DIR: &str = "/home/start9/.ssh";
|
||||
|
||||
#[derive(Clone, Debug, Deserialize, Serialize)]
|
||||
pub struct SshKeys(BTreeMap<InternedString, WithTimeData<SshPubKey>>);
|
||||
@@ -143,7 +146,7 @@ pub async fn add(ctx: RpcContext, AddParams { key }: AddParams) -> Result<SshKey
|
||||
))
|
||||
})
|
||||
.await?;
|
||||
sync_keys(&keys, SSH_AUTHORIZED_KEYS_FILE).await?;
|
||||
sync_pubkeys(&keys, SSH_DIR).await?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
@@ -175,7 +178,7 @@ pub async fn delete(
|
||||
}
|
||||
})
|
||||
.await?;
|
||||
sync_keys(&keys, SSH_AUTHORIZED_KEYS_FILE).await
|
||||
sync_pubkeys(&keys, SSH_DIR).await
|
||||
}
|
||||
|
||||
fn display_all_ssh_keys(params: WithIoFormat<Empty>, result: Vec<SshKeyResponse>) {
|
||||
@@ -226,23 +229,90 @@ pub async fn list(ctx: RpcContext) -> Result<Vec<SshKeyResponse>, Error> {
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn sync_keys<P: AsRef<Path>>(keys: &SshKeys, dest: P) -> Result<(), Error> {
|
||||
pub async fn sync_keys<P: AsRef<Path>>(
|
||||
hostname: &Hostname,
|
||||
privkey: &Pem<ssh_key::PrivateKey>,
|
||||
pubkeys: &SshKeys,
|
||||
ssh_dir: P,
|
||||
) -> Result<(), Error> {
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
let dest = dest.as_ref();
|
||||
let ssh_dir = dest.parent().ok_or_else(|| {
|
||||
Error::new(
|
||||
eyre!("SSH Key File cannot be \"/\""),
|
||||
crate::ErrorKind::Filesystem,
|
||||
)
|
||||
})?;
|
||||
let ssh_dir = ssh_dir.as_ref();
|
||||
if tokio::fs::metadata(ssh_dir).await.is_err() {
|
||||
tokio::fs::create_dir_all(ssh_dir).await?;
|
||||
}
|
||||
let mut f = create_file(dest).await?;
|
||||
for key in keys.0.values() {
|
||||
|
||||
let id_alg = if privkey.0.algorithm().is_ed25519() {
|
||||
"id_ed25519"
|
||||
} else if privkey.0.algorithm().is_ecdsa() {
|
||||
"id_ecdsa"
|
||||
} else if privkey.0.algorithm().is_rsa() {
|
||||
"id_rsa"
|
||||
} else {
|
||||
"id_unknown"
|
||||
};
|
||||
|
||||
let privkey_path = ssh_dir.join(id_alg);
|
||||
let mut f = OpenOptions::new()
|
||||
.create(true)
|
||||
.write(true)
|
||||
.mode(0o600)
|
||||
.open(&privkey_path)
|
||||
.await
|
||||
.with_ctx(|_| {
|
||||
(
|
||||
ErrorKind::Filesystem,
|
||||
lazy_format!("create {privkey_path:?}"),
|
||||
)
|
||||
})?;
|
||||
f.write_all(privkey.to_string().as_bytes()).await?;
|
||||
f.write_all(b"\n").await?;
|
||||
f.sync_all().await?;
|
||||
let mut f = create_file(ssh_dir.join(id_alg).with_extension("pub")).await?;
|
||||
f.write_all(
|
||||
(privkey
|
||||
.0
|
||||
.public_key()
|
||||
.to_openssh()
|
||||
.with_kind(ErrorKind::OpenSsh)?
|
||||
+ " start9@"
|
||||
+ &*hostname.0)
|
||||
.as_bytes(),
|
||||
)
|
||||
.await?;
|
||||
f.write_all(b"\n").await?;
|
||||
f.sync_all().await?;
|
||||
|
||||
let mut f = create_file(ssh_dir.join("authorized_keys")).await?;
|
||||
for key in pubkeys.0.values() {
|
||||
f.write_all(key.0.to_key_format().as_bytes()).await?;
|
||||
f.write_all(b"\n").await?;
|
||||
}
|
||||
|
||||
Command::new("chown")
|
||||
.arg("-R")
|
||||
.arg("start9:startos")
|
||||
.arg(ssh_dir)
|
||||
.invoke(ErrorKind::Filesystem)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[instrument(skip_all)]
|
||||
pub async fn sync_pubkeys<P: AsRef<Path>>(pubkeys: &SshKeys, ssh_dir: P) -> Result<(), Error> {
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
let ssh_dir = ssh_dir.as_ref();
|
||||
if tokio::fs::metadata(ssh_dir).await.is_err() {
|
||||
tokio::fs::create_dir_all(ssh_dir).await?;
|
||||
}
|
||||
|
||||
let mut f = create_file(ssh_dir.join("authorized_keys")).await?;
|
||||
for key in pubkeys.0.values() {
|
||||
f.write_all(key.0.to_key_format().as_bytes()).await?;
|
||||
f.write_all(b"\n").await?;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ impl MainStatus {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn backing_up(self) -> Self {
|
||||
pub fn backing_up(&self) -> Self {
|
||||
MainStatus::BackingUp {
|
||||
on_complete: if self.running() {
|
||||
StartStop::Start
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user