rename frontend to web and update contributing guide (#2509)

* rename frontend to web and update contributing guide

* rename this time

* fix build

* restructure rust code

* update documentation

* update descriptions

* Update CONTRIBUTING.md

Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>

---------

Co-authored-by: Aiden McClelland <me@drbonez.dev>
Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
Co-authored-by: J H <2364004+Blu-J@users.noreply.github.com>
This commit is contained in:
Matt Hill
2023-11-13 14:22:23 -07:00
committed by GitHub
parent 871f78b570
commit 86567e7fa5
968 changed files with 812 additions and 6672 deletions

View File

@@ -0,0 +1,241 @@
use std::path::PathBuf;
use std::sync::Arc;
use models::OptionExt;
use sqlx::{Executor, Postgres};
use tracing::instrument;
use super::PKG_ARCHIVE_DIR;
use crate::context::RpcContext;
use crate::db::model::{
CurrentDependencies, Database, PackageDataEntry, PackageDataEntryInstalled,
PackageDataEntryMatchModelRef,
};
use crate::error::ErrorCollection;
use crate::prelude::*;
use crate::s9pk::manifest::PackageId;
use crate::util::{Apply, Version};
use crate::volume::{asset_dir, script_dir};
use crate::Error;
#[instrument(skip_all)]
pub async fn cleanup(ctx: &RpcContext, id: &PackageId, version: &Version) -> Result<(), Error> {
let mut errors = ErrorCollection::new();
ctx.managers.remove(&(id.clone(), version.clone())).await;
// docker images start9/$APP_ID/*:$VERSION -q | xargs docker rmi
let images = crate::util::docker::images_for(id, version).await?;
errors.extend(
futures::future::join_all(images.into_iter().map(|sha| async {
let sha = sha; // move into future
crate::util::docker::remove_image(&sha).await
}))
.await,
);
let pkg_archive_dir = ctx
.datadir
.join(PKG_ARCHIVE_DIR)
.join(id)
.join(version.as_str());
if tokio::fs::metadata(&pkg_archive_dir).await.is_ok() {
tokio::fs::remove_dir_all(&pkg_archive_dir)
.await
.apply(|res| errors.handle(res));
}
let assets_path = asset_dir(&ctx.datadir, id, version);
if tokio::fs::metadata(&assets_path).await.is_ok() {
tokio::fs::remove_dir_all(&assets_path)
.await
.apply(|res| errors.handle(res));
}
let scripts_path = script_dir(&ctx.datadir, id, version);
if tokio::fs::metadata(&scripts_path).await.is_ok() {
tokio::fs::remove_dir_all(&scripts_path)
.await
.apply(|res| errors.handle(res));
}
errors.into_result()
}
#[instrument(skip_all)]
pub async fn cleanup_failed(ctx: &RpcContext, id: &PackageId) -> Result<(), Error> {
if let Some(version) = match ctx
.db
.peek()
.await
.as_package_data()
.as_idx(id)
.or_not_found(id)?
.as_match()
{
PackageDataEntryMatchModelRef::Installing(m) => Some(m.as_manifest().as_version().de()?),
PackageDataEntryMatchModelRef::Restoring(m) => Some(m.as_manifest().as_version().de()?),
PackageDataEntryMatchModelRef::Updating(m) => {
let manifest_version = m.as_manifest().as_version().de()?;
let installed = m.as_installed().as_manifest().as_version().de()?;
if manifest_version != installed {
Some(manifest_version)
} else {
None // do not remove existing data
}
}
_ => {
tracing::warn!("{}: Nothing to clean up!", id);
None
}
} {
cleanup(ctx, id, &version).await?;
}
ctx.db
.mutate(|v| {
match v
.clone()
.into_package_data()
.into_idx(id)
.or_not_found(id)?
.as_match()
{
PackageDataEntryMatchModelRef::Installing(_)
| PackageDataEntryMatchModelRef::Restoring(_) => {
v.as_package_data_mut().remove(id)?;
}
PackageDataEntryMatchModelRef::Updating(pde) => {
v.as_package_data_mut()
.as_idx_mut(id)
.or_not_found(id)?
.ser(&PackageDataEntry::Installed(PackageDataEntryInstalled {
manifest: pde.as_installed().as_manifest().de()?,
static_files: pde.as_static_files().de()?,
installed: pde.as_installed().de()?,
}))?;
}
_ => (),
}
Ok(())
})
.await
}
#[instrument(skip_all)]
pub fn remove_from_current_dependents_lists(
db: &mut Model<Database>,
id: &PackageId,
current_dependencies: &CurrentDependencies,
) -> Result<(), Error> {
for dep in current_dependencies.0.keys().chain(std::iter::once(id)) {
if let Some(current_dependents) = db
.as_package_data_mut()
.as_idx_mut(dep)
.and_then(|d| d.as_installed_mut())
.map(|i| i.as_current_dependents_mut())
{
current_dependents.remove(id)?;
}
}
Ok(())
}
#[instrument(skip_all)]
pub async fn uninstall<Ex>(ctx: &RpcContext, secrets: &mut Ex, id: &PackageId) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
let db = ctx.db.peek().await;
let entry = db
.as_package_data()
.as_idx(id)
.or_not_found(id)?
.expect_as_removing()?;
let dependents_paths: Vec<PathBuf> = entry
.as_removing()
.as_current_dependents()
.keys()?
.into_iter()
.filter(|x| x != id)
.flat_map(|x| db.as_package_data().as_idx(&x))
.flat_map(|x| x.as_installed())
.flat_map(|x| x.as_manifest().as_volumes().de())
.flat_map(|x| x.values().cloned().collect::<Vec<_>>())
.flat_map(|x| x.pointer_path(&ctx.datadir))
.collect();
let volume_dir = ctx
.datadir
.join(crate::volume::PKG_VOLUME_DIR)
.join(&*entry.as_manifest().as_id().de()?);
let version = entry.as_removing().as_manifest().as_version().de()?;
tracing::debug!(
"Cleaning up {:?} except for {:?}",
volume_dir,
dependents_paths
);
cleanup(ctx, id, &version).await?;
cleanup_folder(volume_dir, Arc::new(dependents_paths)).await;
remove_network_keys(secrets, id).await?;
ctx.db
.mutate(|d| {
d.as_package_data_mut().remove(id)?;
remove_from_current_dependents_lists(
d,
id,
&entry.as_removing().as_current_dependencies().de()?,
)
})
.await
}
#[instrument(skip_all)]
pub async fn remove_network_keys<Ex>(secrets: &mut Ex, id: &PackageId) -> Result<(), Error>
where
for<'a> &'a mut Ex: Executor<'a, Database = Postgres>,
{
sqlx::query!("DELETE FROM network_keys WHERE package = $1", &*id)
.execute(&mut *secrets)
.await?;
sqlx::query!("DELETE FROM tor WHERE package = $1", &*id)
.execute(&mut *secrets)
.await?;
Ok(())
}
/// Needed to remove, without removing the folders that are mounted in the other docker containers
pub fn cleanup_folder(
path: PathBuf,
dependents_volumes: Arc<Vec<PathBuf>>,
) -> futures::future::BoxFuture<'static, ()> {
Box::pin(async move {
let meta_data = match tokio::fs::metadata(&path).await {
Ok(a) => a,
Err(_e) => {
return;
}
};
if !meta_data.is_dir() {
tracing::error!("is_not dir, remove {:?}", path);
let _ = tokio::fs::remove_file(&path).await;
return;
}
if !dependents_volumes
.iter()
.any(|v| v.starts_with(&path) || v == &path)
{
tracing::error!("No parents, remove {:?}", path);
let _ = tokio::fs::remove_dir_all(&path).await;
return;
}
let mut read_dir = match tokio::fs::read_dir(&path).await {
Ok(a) => a,
Err(_e) => {
return;
}
};
tracing::error!("Parents, recurse {:?}", path);
while let Some(entry) = read_dir.next_entry().await.ok().flatten() {
let entry_path = entry.path();
cleanup_folder(entry_path, dependents_volumes.clone()).await;
}
})
}

File diff suppressed because it is too large Load Diff

Binary file not shown.

After

Width:  |  Height:  |  Size: 280 KiB

View File

@@ -0,0 +1,228 @@
use std::future::Future;
use std::io::SeekFrom;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
use std::sync::Arc;
use std::task::{Context, Poll};
use std::time::Duration;
use models::{OptionExt, PackageId};
use serde::{Deserialize, Serialize};
use tokio::io::{AsyncRead, AsyncSeek, AsyncWrite};
use crate::db::model::Database;
use crate::prelude::*;
#[derive(Debug, Deserialize, Serialize, HasModel, Default)]
#[serde(rename_all = "kebab-case")]
#[model = "Model<Self>"]
pub struct InstallProgress {
pub size: Option<u64>,
pub downloaded: AtomicU64,
pub download_complete: AtomicBool,
pub validated: AtomicU64,
pub validation_complete: AtomicBool,
pub unpacked: AtomicU64,
pub unpack_complete: AtomicBool,
}
impl InstallProgress {
pub fn new(size: Option<u64>) -> Self {
InstallProgress {
size,
downloaded: AtomicU64::new(0),
download_complete: AtomicBool::new(false),
validated: AtomicU64::new(0),
validation_complete: AtomicBool::new(false),
unpacked: AtomicU64::new(0),
unpack_complete: AtomicBool::new(false),
}
}
pub fn download_complete(&self) {
self.download_complete.store(true, Ordering::SeqCst)
}
pub async fn track_download(self: Arc<Self>, db: PatchDb, id: PackageId) -> Result<(), Error> {
let update = |d: &mut Model<Database>| {
d.as_package_data_mut()
.as_idx_mut(&id)
.or_not_found(&id)?
.as_install_progress_mut()
.or_not_found("install-progress")?
.ser(&self)
};
while !self.download_complete.load(Ordering::SeqCst) {
db.mutate(&update).await?;
tokio::time::sleep(Duration::from_millis(300)).await;
}
db.mutate(&update).await
}
pub async fn track_download_during<
F: FnOnce() -> Fut,
Fut: Future<Output = Result<T, Error>>,
T,
>(
self: &Arc<Self>,
db: PatchDb,
id: &PackageId,
f: F,
) -> Result<T, Error> {
let tracker = tokio::spawn(self.clone().track_download(db.clone(), id.clone()));
let res = f().await;
self.download_complete.store(true, Ordering::SeqCst);
tracker.await.unwrap()?;
res
}
pub async fn track_read(
self: Arc<Self>,
db: PatchDb,
id: PackageId,
complete: Arc<AtomicBool>,
) -> Result<(), Error> {
let update = |d: &mut Model<Database>| {
d.as_package_data_mut()
.as_idx_mut(&id)
.or_not_found(&id)?
.as_install_progress_mut()
.or_not_found("install-progress")?
.ser(&self)
};
while !complete.load(Ordering::SeqCst) {
db.mutate(&update).await?;
tokio::time::sleep(Duration::from_millis(300)).await;
}
db.mutate(&update).await
}
pub async fn track_read_during<
F: FnOnce() -> Fut,
Fut: Future<Output = Result<T, Error>>,
T,
>(
self: &Arc<Self>,
db: PatchDb,
id: &PackageId,
f: F,
) -> Result<T, Error> {
let complete = Arc::new(AtomicBool::new(false));
let tracker = tokio::spawn(self.clone().track_read(
db.clone(),
id.clone(),
complete.clone(),
));
let res = f().await;
complete.store(true, Ordering::SeqCst);
tracker.await.unwrap()?;
res
}
}
#[pin_project::pin_project]
#[derive(Debug)]
pub struct InstallProgressTracker<RW> {
#[pin]
inner: RW,
validating: bool,
progress: Arc<InstallProgress>,
}
impl<RW> InstallProgressTracker<RW> {
pub fn new(inner: RW, progress: Arc<InstallProgress>) -> Self {
InstallProgressTracker {
inner,
validating: true,
progress,
}
}
pub fn validated(&mut self) {
self.progress
.validation_complete
.store(true, Ordering::SeqCst);
self.validating = false;
}
}
impl<W: AsyncWrite> AsyncWrite for InstallProgressTracker<W> {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, std::io::Error>> {
let this = self.project();
match this.inner.poll_write(cx, buf) {
Poll::Ready(Ok(n)) => {
this.progress
.downloaded
.fetch_add(n as u64, Ordering::SeqCst);
Poll::Ready(Ok(n))
}
a => a,
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), std::io::Error>> {
let this = self.project();
this.inner.poll_flush(cx)
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), std::io::Error>> {
let this = self.project();
this.inner.poll_shutdown(cx)
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[std::io::IoSlice<'_>],
) -> Poll<Result<usize, std::io::Error>> {
let this = self.project();
match this.inner.poll_write_vectored(cx, bufs) {
Poll::Ready(Ok(n)) => {
this.progress
.downloaded
.fetch_add(n as u64, Ordering::SeqCst);
Poll::Ready(Ok(n))
}
a => a,
}
}
}
impl<R: AsyncRead> AsyncRead for InstallProgressTracker<R> {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut tokio::io::ReadBuf<'_>,
) -> Poll<std::io::Result<()>> {
let this = self.project();
let prev = buf.filled().len() as u64;
match this.inner.poll_read(cx, buf) {
Poll::Ready(Ok(())) => {
if *this.validating {
&this.progress.validated
} else {
&this.progress.unpacked
}
.fetch_add(buf.filled().len() as u64 - prev, Ordering::SeqCst);
Poll::Ready(Ok(()))
}
a => a,
}
}
}
impl<R: AsyncSeek> AsyncSeek for InstallProgressTracker<R> {
fn start_seek(self: Pin<&mut Self>, position: SeekFrom) -> std::io::Result<()> {
let this = self.project();
this.inner.start_seek(position)
}
fn poll_complete(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<std::io::Result<u64>> {
let this = self.project();
match this.inner.poll_complete(cx) {
Poll::Ready(Ok(n)) => {
if *this.validating {
&this.progress.validated
} else {
&this.progress.unpacked
}
.store(n, Ordering::SeqCst);
Poll::Ready(Ok(n))
}
a => a,
}
}
}

View File

@@ -0,0 +1,18 @@
use std::collections::BTreeMap;
use rpc_toolkit::command;
use tracing::instrument;
use crate::config::not_found;
use crate::context::RpcContext;
use crate::db::model::CurrentDependents;
use crate::prelude::*;
use crate::s9pk::manifest::PackageId;
use crate::util::serde::display_serializable;
use crate::util::Version;
use crate::Error;
#[command(subcommands(dry))]
pub async fn update() -> Result<(), Error> {
Ok(())
}