From 4e7d33b07f5bbac1be9629e5a44b4d9361568ac1 Mon Sep 17 00:00:00 2001 From: Shadowy Super Coder Date: Wed, 11 Feb 2026 09:20:16 -0700 Subject: [PATCH] Fix mount propagation so container-internal mounts are visible to dependent services Volume bind mounts into LXC containers inherited private propagation from the host source path, which prevented mounts made inside a container (e.g. NAS mounts via postinit.sh) from propagating back to the host. Dependent services bind-mounting the same volume from the host side would never see these internal mounts. Self-bind each host volume directory and mark it rshared so that container-internal mounts propagate back to the host path. Mark dependency mounts as rslave so they receive propagated mounts but cannot propagate mounts back to the source service. Because rshared propagation means mounts can survive container teardown, add defense-in-depth to uninstall cleanup: unmount any remaining mounts under the package volume path, then refuse to delete if any persist, preventing remove_dir_all from traversing into a live NFS/NAS mount and destroying data. --- core/src/disk/mount/util.rs | 18 +++++++++++++ core/src/service/effects/dependency.rs | 15 +++++++++-- core/src/service/persistent_container.rs | 34 +++++++++++++++++++++++- core/src/service/uninstall.rs | 17 ++++++++++++ 4 files changed, 81 insertions(+), 3 deletions(-) diff --git a/core/src/disk/mount/util.rs b/core/src/disk/mount/util.rs index 327bb2169..4106f6cb5 100644 --- a/core/src/disk/mount/util.rs +++ b/core/src/disk/mount/util.rs @@ -61,6 +61,24 @@ pub async fn unmount>(mountpoint: P, lazy: bool) -> Result<(), Er Ok(()) } +/// Returns true if any mountpoints exist under (or at) the given path. +pub async fn has_mounts_under>(path: P) -> Result { + let path = path.as_ref(); + let canonical_path = tokio::fs::canonicalize(path) + .await + .with_ctx(|_| (ErrorKind::Filesystem, lazy_format!("canonicalize {path:?}")))?; + + let mounts_content = tokio::fs::read_to_string("/proc/mounts") + .await + .with_ctx(|_| (ErrorKind::Filesystem, "read /proc/mounts"))?; + + Ok(mounts_content.lines().any(|line| { + line.split_whitespace() + .nth(1) + .map_or(false, |mp| Path::new(mp).starts_with(&canonical_path)) + })) +} + /// Unmounts all mountpoints under (and including) the given path, in reverse /// depth order so that nested mounts are unmounted before their parents. #[instrument(skip_all)] diff --git a/core/src/service/effects/dependency.rs b/core/src/service/effects/dependency.rs index 7cf233452..b998385c4 100644 --- a/core/src/service/effects/dependency.rs +++ b/core/src/service/effects/dependency.rs @@ -6,6 +6,8 @@ use clap::builder::ValueParserFactory; use exver::VersionRange; use rust_i18n::t; +use tokio::process::Command; + use crate::db::model::package::{ CurrentDependencies, CurrentDependencyInfo, CurrentDependencyKind, ManifestPreference, TaskEntry, @@ -19,7 +21,7 @@ use crate::service::effects::callbacks::CallbackHandler; use crate::service::effects::prelude::*; use crate::service::rpc::CallbackId; use crate::status::health_check::NamedHealthCheckResult; -use crate::util::{FromStrParser, VersionString}; +use crate::util::{FromStrParser, Invoke, VersionString}; use crate::volume::data_dir; use crate::{DATA_DIR, HealthCheckId, PackageId, ReplayId, VolumeId}; @@ -90,7 +92,7 @@ pub async fn mount( ), ) .mount( - mountpoint, + &mountpoint, if readonly { MountType::ReadOnly } else { @@ -99,6 +101,15 @@ pub async fn mount( ) .await?; + // Make the dependency mount a slave so it receives propagated mounts + // (e.g. NAS mounts from the source service) but cannot propagate + // mounts back to the source service's volume. + Command::new("mount") + .arg("--make-rslave") + .arg(&mountpoint) + .invoke(ErrorKind::Filesystem) + .await?; + Ok(()) } diff --git a/core/src/service/persistent_container.rs b/core/src/service/persistent_container.rs index 7c73f7bf0..031dcd085 100644 --- a/core/src/service/persistent_container.rs +++ b/core/src/service/persistent_container.rs @@ -20,6 +20,7 @@ use crate::disk::mount::filesystem::loop_dev::LoopDev; use crate::disk::mount::filesystem::overlayfs::OverlayGuard; use crate::disk::mount::filesystem::{MountType, ReadOnly}; use crate::disk::mount::guard::{GenericMountGuard, MountGuard}; +use crate::disk::mount::util::{is_mountpoint, unmount}; use crate::lxc::{HOST_RPC_SERVER_SOCKET, LxcConfig, LxcContainer}; use crate::net::net_controller::NetService; use crate::prelude::*; @@ -76,6 +77,7 @@ pub struct PersistentContainer { pub(super) rpc_client: UnixRpcClient, pub(super) rpc_server: watch::Sender, ShutdownHandle)>>, js_mount: MountGuard, + host_volume_binds: BTreeMap, volumes: BTreeMap, assets: Vec, pub(super) images: BTreeMap>, @@ -120,6 +122,7 @@ impl PersistentContainer { .is_ok(); let mut volumes = BTreeMap::new(); + let mut host_volume_binds = BTreeMap::new(); // TODO: remove once packages are reconverted let added = if is_compat { @@ -128,13 +131,35 @@ impl PersistentContainer { BTreeSet::default() }; for volume in s9pk.as_manifest().volumes.union(&added) { + let host_volume_dir = data_dir(DATA_DIR, &s9pk.as_manifest().id, volume); + + // Self-bind the host volume directory and mark it rshared so that + // mounts created inside the container (e.g. NAS mounts from + // postinit.sh) propagate back to the host path and are visible to + // dependent services that bind-mount the same volume. + if is_mountpoint(&host_volume_dir).await? { + unmount(&host_volume_dir, true).await?; + } + let host_bind = MountGuard::mount( + &Bind::new(&host_volume_dir), + &host_volume_dir, + MountType::ReadWrite, + ) + .await?; + Command::new("mount") + .arg("--make-rshared") + .arg(&host_volume_dir) + .invoke(ErrorKind::Filesystem) + .await?; + host_volume_binds.insert(volume.clone(), host_bind); + let mountpoint = lxc_container .rootfs_dir() .join("media/startos/volumes") .join(volume); let mount = MountGuard::mount( &IdMapped::new( - Bind::new(data_dir(DATA_DIR, &s9pk.as_manifest().id, volume)), + Bind::new(&host_volume_dir), vec![IdMap { from_id: 0, to_id: 100000, @@ -296,6 +321,7 @@ impl PersistentContainer { rpc_server: watch::channel(None).0, // procedures: Default::default(), js_mount, + host_volume_binds, volumes, assets, images, @@ -439,6 +465,7 @@ impl PersistentContainer { let rpc_server = self.rpc_server.send_replace(None); let js_mount = self.js_mount.take(); let volumes = std::mem::take(&mut self.volumes); + let host_volume_binds = std::mem::take(&mut self.host_volume_binds); let assets = std::mem::take(&mut self.assets); let images = std::mem::take(&mut self.images); let subcontainers = self.subcontainers.clone(); @@ -461,6 +488,11 @@ impl PersistentContainer { for (_, volume) in volumes { errs.handle(volume.unmount(true).await); } + // Unmount host-side shared binds after the rootfs-side volume + // mounts. Use delete_mountpoint=false to preserve the data dirs. + for (_, host_bind) in host_volume_binds { + errs.handle(host_bind.unmount(false).await); + } for assets in assets { errs.handle(assets.unmount(true).await); } diff --git a/core/src/service/uninstall.rs b/core/src/service/uninstall.rs index a924e8cb0..5334e6661 100644 --- a/core/src/service/uninstall.rs +++ b/core/src/service/uninstall.rs @@ -4,6 +4,7 @@ use imbl::vector; use crate::context::RpcContext; use crate::db::model::package::{InstalledState, InstallingInfo, InstallingState, PackageState}; +use crate::disk::mount::util::{has_mounts_under, unmount_all_under}; use crate::prelude::*; use crate::volume::PKG_VOLUME_DIR; use crate::{DATA_DIR, PACKAGE_DATA, PackageId}; @@ -81,6 +82,22 @@ pub async fn cleanup(ctx: &RpcContext, id: &PackageId, soft: bool) -> Result<(), if !soft { let path = Path::new(DATA_DIR).join(PKG_VOLUME_DIR).join(&manifest.id); if tokio::fs::metadata(&path).await.is_ok() { + // Best-effort cleanup of any propagated mounts (e.g. NAS) + // that survived container destroy or were never cleaned up + // (force-uninstall skips destroy entirely). + unmount_all_under(&path, true).await.log_err(); + // Hard check: refuse to delete if mounts are still active, + // to avoid traversing into a live NFS/NAS mount. + if has_mounts_under(&path).await? { + return Err(Error::new( + eyre!( + "Refusing to remove {}: active mounts remain under this path. \ + Unmount them manually and retry.", + path.display() + ), + ErrorKind::Filesystem, + )); + } tokio::fs::remove_dir_all(&path).await?; } let logs_dir = Path::new(PACKAGE_DATA).join("logs").join(&manifest.id);