Feat/js action (#1437)

* Feat: js action

wip: Getting async js

feat: Have execute get action config

feat: Read + Write

chore: Add typing for globals

chore: Change the default path, include error on missing function, and add json File Read Write

chore: Change the default path, include error on missing function, and add json File Read Write

wip: Fix the unit test

wip: Fix the unit test

feat: module loading

* fix: Change the source + add input

* fix: single thread runtime

* fix: Smaller fixes

* Apply suggestions from code review

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>

* fix: pr

Co-authored-by: Aiden McClelland <3732071+dr-bonez@users.noreply.github.com>
This commit is contained in:
J M
2022-05-19 18:02:50 -06:00
committed by GitHub
parent 2b6e54da1e
commit f7b5fb55d7
21 changed files with 2486 additions and 75 deletions

826
backend/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -61,6 +61,8 @@ color-eyre = "0.5"
cookie_store = "0.15.0"
digest = "0.9.0"
divrem = "1.0.0"
deno_core = "0.134.0"
deno_ast = {version="0.14.0", features = ["transpiling"]}
ed25519-dalek = { version = "1.0.1", features = ["serde"] }
emver = { version = "0.1.6", features = ["serde"] }
fd-lock-rs = "0.1.3"

View File

@@ -12,8 +12,9 @@ use tracing::instrument;
use crate::config::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::id::{Id, ImageId, InvalidId};
use crate::procedure::PackageProcedure;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::status::health_check::HealthCheckId;
use crate::util::serde::{display_serializable, parse_stdin_deserializable, IoFormat};
use crate::util::Version;
use crate::volume::Volumes;
@@ -135,7 +136,7 @@ impl Action {
ctx,
pkg_id,
pkg_version,
Some(&format!("{}Action", action_id)),
ProcedureName::Action(action_id.clone()),
volumes,
input,
true,

View File

@@ -17,7 +17,7 @@ use crate::dependencies::reconfigure_dependents_with_live_pointers;
use crate::id::ImageId;
use crate::install::PKG_ARCHIVE_DIR;
use crate::net::interface::{InterfaceId, Interfaces};
use crate::procedure::{NoOutput, PackageProcedure};
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::serde::IoFormat;
use crate::util::{AtomicFile, Version};
@@ -99,7 +99,7 @@ impl BackupActions {
ctx,
pkg_id,
pkg_version,
Some("CreateBackup"),
ProcedureName::CreateBackup,
&volumes,
None,
false,
@@ -178,7 +178,7 @@ impl BackupActions {
ctx,
pkg_id,
pkg_version,
Some("RestoreBackup"),
ProcedureName::RestoreBackup,
&volumes,
None,
false,

View File

@@ -10,7 +10,7 @@ use super::{Config, ConfigSpec};
use crate::context::RpcContext;
use crate::dependencies::Dependencies;
use crate::id::ImageId;
use crate::procedure::PackageProcedure;
use crate::procedure::{PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::status::health_check::HealthCheckId;
use crate::util::Version;
@@ -53,7 +53,7 @@ impl ConfigActions {
ctx,
pkg_id,
pkg_version,
Some("GetConfig"),
ProcedureName::GetConfig,
volumes,
None::<()>,
false,
@@ -81,7 +81,7 @@ impl ConfigActions {
ctx,
pkg_id,
pkg_version,
Some("SetConfig"),
ProcedureName::SetConfig,
volumes,
Some(input),
false,

View File

@@ -270,6 +270,7 @@ impl RpcContext {
tracing::info!("Initialized Package Managers");
Ok(res)
}
#[instrument(skip(self, db, receipts))]
pub async fn set_nginx_conf<Db: DbHandle>(
&self,

View File

@@ -64,6 +64,7 @@ pub enum ErrorKind {
InvalidBackupTargetId = 56,
ProductKeyMismatch = 57,
LanPortConflict = 58,
Javascript = 59,
}
impl ErrorKind {
pub fn as_str(&self) -> &'static str {
@@ -127,6 +128,7 @@ impl ErrorKind {
InvalidBackupTargetId => "Invalid Backup Target ID",
ProductKeyMismatch => "Incompatible Product Keys",
LanPortConflict => "Incompatible LAN port configuration",
Javascript => "Javascript engine error",
}
}
}

View File

@@ -59,6 +59,7 @@ pub const PKG_ARCHIVE_DIR: &str = "package-data/archive";
pub const PKG_PUBLIC_DIR: &str = "package-data/public";
pub const PKG_DOCKER_DIR: &str = "package-data/docker";
pub const PKG_WASM_DIR: &str = "package-data/wasm";
pub const PKG_SCRIPT_DIR: &str = "package-data/scripts";
#[command(display(display_serializable))]
pub async fn list(#[context] ctx: RpcContext) -> Result<Vec<(PackageId, Version)>, Error> {

View File

@@ -25,7 +25,7 @@ use crate::net::interface::InterfaceId;
use crate::net::GeneratedCertificateMountPoint;
use crate::notifications::NotificationLevel;
use crate::procedure::docker::DockerProcedure;
use crate::procedure::{NoOutput, PackageProcedure};
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::status::MainStatus;
use crate::util::{Container, NonDetachingJoinHandle, Version};
@@ -312,7 +312,7 @@ async fn start_up_image(
&rt_state.ctx,
&rt_state.manifest.id,
&rt_state.manifest.version,
None,
ProcedureName::Main,
&rt_state.manifest.volumes,
None,
false,
@@ -391,6 +391,10 @@ impl Manager {
.commit_health_check_results
.store(false, Ordering::SeqCst);
let _ = self.shared.on_stop.send(OnStop::Exit);
let action = match &self.shared.manifest.main {
PackageProcedure::Docker(a) => a,
PackageProcedure::Script(_) => return Ok(()),
};
match self
.shared
.ctx
@@ -398,13 +402,11 @@ impl Manager {
.stop_container(
&self.shared.container_name,
Some(StopContainerOptions {
t: match &self.shared.manifest.main {
PackageProcedure::Docker(a) => a,
}
.sigterm_timeout
.map(|a| *a)
.unwrap_or(Duration::from_secs(30))
.as_secs_f64() as i64,
t: action
.sigterm_timeout
.map(|a| *a)
.unwrap_or(Duration::from_secs(30))
.as_secs_f64() as i64,
}),
)
.await
@@ -542,19 +544,21 @@ async fn stop(shared: &ManagerSharedState) -> Result<(), Error> {
) {
resume(shared).await?;
}
let action = match &shared.manifest.main {
PackageProcedure::Docker(a) => a,
PackageProcedure::Script(_) => return Ok(()),
};
match shared
.ctx
.docker
.stop_container(
&shared.container_name,
Some(StopContainerOptions {
t: match &shared.manifest.main {
PackageProcedure::Docker(a) => a,
}
.sigterm_timeout
.map(|a| *a)
.unwrap_or(Duration::from_secs(30))
.as_secs_f64() as i64,
t: action
.sigterm_timeout
.map(|a| *a)
.unwrap_or(Duration::from_secs(30))
.as_secs_f64() as i64,
}),
)
.await

View File

@@ -11,6 +11,7 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::procedure::PackageProcedure;
use crate::procedure::ProcedureName;
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::volume::Volumes;
@@ -64,7 +65,7 @@ impl Migrations {
ctx,
pkg_id,
pkg_version,
Some("Migration"), // Migrations cannot be executed concurrently
ProcedureName::Migration, // Migrations cannot be executed concurrently
volumes,
Some(version),
false,
@@ -99,7 +100,7 @@ impl Migrations {
ctx,
pkg_id,
pkg_version,
Some("Migration"),
ProcedureName::Migration,
volumes,
Some(version),
false,

View File

@@ -68,7 +68,7 @@ impl NetController {
})
}
pub fn ssl_directory_for(&self, pkg_id: &PackageId) -> PathBuf {
pub fn ssl_directory_for(pkg_id: &PackageId) -> PathBuf {
PathBuf::from(format!("{}/{}", PACKAGE_CERT_PATH, pkg_id))
}

View File

@@ -21,6 +21,8 @@ use crate::util::Version;
use crate::volume::{VolumeId, Volumes};
use crate::{Error, ResultExt, HOST_IP};
use super::ProcedureName;
pub const NET_TLD: &str = "embassy";
lazy_static::lazy_static! {
@@ -87,12 +89,14 @@ impl DockerProcedure {
ctx: &RpcContext,
pkg_id: &PackageId,
pkg_version: &Version,
name: Option<&str>,
name: ProcedureName,
volumes: &Volumes,
input: Option<I>,
allow_inject: bool,
timeout: Option<Duration>,
) -> Result<Result<O, (i32, String)>, Error> {
let name = name.docker_name();
let name: Option<&str> = name.as_ref().map(|x| &**x);
let mut cmd = tokio::process::Command::new("docker");
if self.inject && allow_inject {
cmd.arg("exec");
@@ -313,7 +317,7 @@ impl DockerProcedure {
} else {
continue;
};
let src = volume.path_for(ctx, pkg_id, pkg_version, volume_id);
let src = volume.path_for(&ctx.datadir, pkg_id, pkg_version, volume_id);
if let Err(e) = tokio::fs::metadata(&src).await {
tracing::warn!("{} not mounted to container: {}", src.display(), e);
continue;

View File

@@ -0,0 +1,659 @@
use std::{path::PathBuf, time::Duration};
use serde::{Deserialize, Serialize};
use tracing::instrument;
use crate::{
context::RpcContext, s9pk::manifest::PackageId, util::Version, volume::Volumes, Error,
};
use self::js_runtime::JsExecutionEnvironment;
use super::ProcedureName;
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
pub struct JsCode(String);
#[derive(Debug, Clone, Copy)]
pub enum JsError {
Unknown = 1,
Javascript = 2,
Engine = 3,
BoundryLayerSerDe = 4,
Tokio = 5,
FileSystem = 6,
Timeout = 143,
}
#[derive(Clone, Debug, Deserialize, Serialize)]
#[serde(rename_all = "kebab-case")]
pub struct JsProcedure {}
impl JsProcedure {
pub fn validate(&self, volumes: &Volumes) -> Result<(), color_eyre::eyre::Report> {
Ok(())
}
#[instrument(skip(directory, input))]
pub async fn execute<I: Serialize, O: for<'de> Deserialize<'de>>(
&self,
directory: &PathBuf,
pkg_id: &PackageId,
pkg_version: &Version,
name: ProcedureName,
volumes: &Volumes,
input: Option<I>,
timeout: Option<Duration>,
) -> Result<Result<O, (i32, String)>, Error> {
Ok(async move {
let running_action = JsExecutionEnvironment::load_from_package(
directory,
pkg_id,
pkg_version,
volumes.clone(),
)
.await?
.with_effects()
.run_action(name, input);
let output: O = match timeout {
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action)
.await
.map_err(|_| (JsError::Timeout, "Timed out. Retrying soon...".to_owned()))??,
None => running_action.await?,
};
Ok(output)
}
.await
.map_err(|(error, message)| (error as i32, message)))
}
#[instrument(skip(ctx, input))]
pub async fn sandboxed<I: Serialize, O: for<'de> Deserialize<'de>>(
&self,
ctx: &RpcContext,
pkg_id: &PackageId,
pkg_version: &Version,
volumes: &Volumes,
input: Option<I>,
timeout: Option<Duration>,
) -> Result<Result<O, (i32, String)>, Error> {
Ok(async move {
let running_action = JsExecutionEnvironment::load_from_package(
&ctx.datadir,
pkg_id,
pkg_version,
volumes.clone(),
)
.await?
.read_only_effects()
.run_action(ProcedureName::GetConfig, input);
let output: O = match timeout {
Some(timeout_duration) => tokio::time::timeout(timeout_duration, running_action)
.await
.map_err(|_| (JsError::Timeout, "Timed out. Retrying soon...".to_owned()))??,
None => running_action.await?,
};
Ok(output)
}
.await
.map_err(|(error, message)| (error as i32, message)))
}
}
mod js_runtime {
use deno_core::anyhow::{anyhow, bail};
use deno_core::error::AnyError;
use deno_core::resolve_import;
use deno_core::JsRuntime;
use deno_core::ModuleLoader;
use deno_core::ModuleSource;
use deno_core::ModuleSourceFuture;
use deno_core::ModuleSpecifier;
use deno_core::ModuleType;
use deno_core::RuntimeOptions;
use deno_core::{Extension, OpDecl};
use serde::{Deserialize, Serialize};
use serde_json::Value;
use std::sync::Arc;
use std::{path::PathBuf, pin::Pin};
use tokio::io::AsyncReadExt;
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::volume::{script_dir, Volumes};
use super::super::ProcedureName;
use super::{JsCode, JsError};
#[derive(Clone, Deserialize, Serialize)]
struct JsContext {
sandboxed: bool,
datadir: PathBuf,
run_function: String,
version: Version,
package_id: PackageId,
volumes: Arc<Volumes>,
input: Value,
}
#[derive(Clone, Default)]
struct AnswerState(std::sync::Arc<deno_core::parking_lot::Mutex<Value>>);
#[derive(Clone, Debug)]
struct ModsLoader {
code: JsCode,
}
impl ModuleLoader for ModsLoader {
fn resolve(
&self,
specifier: &str,
referrer: &str,
_is_main: bool,
) -> Result<ModuleSpecifier, AnyError> {
if referrer.contains("embassy") {
bail!("Embassy.js cannot import anything else");
}
let s = resolve_import(specifier, referrer).unwrap();
Ok(s)
}
fn load(
&self,
module_specifier: &ModuleSpecifier,
maybe_referrer: Option<ModuleSpecifier>,
is_dyn_import: bool,
) -> Pin<Box<ModuleSourceFuture>> {
let module_specifier = module_specifier.as_str().to_owned();
let module = match &*module_specifier {
"file:///deno_global.js" => Ok(ModuleSource {
module_url_specified: "file:///deno_global.js".to_string(),
module_url_found: "file:///deno_global.js".to_string(),
code: "const old_deno = Deno; Deno = null; export default old_deno"
.as_bytes()
.to_vec()
.into_boxed_slice(),
module_type: ModuleType::JavaScript,
}),
"file:///loadModule.js" => Ok(ModuleSource {
module_url_specified: "file:///loadModule.js".to_string(),
module_url_found: "file:///loadModule.js".to_string(),
code: include_str!("./js_scripts/loadModule.js")
.as_bytes()
.to_vec()
.into_boxed_slice(),
module_type: ModuleType::JavaScript,
}),
"file:///embassy.js" => Ok(ModuleSource {
module_url_specified: "file:///embassy.js".to_string(),
module_url_found: "file:///embassy.js".to_string(),
code: self.code.0.as_bytes().to_vec().into_boxed_slice(),
module_type: ModuleType::JavaScript,
}),
x => Err(anyhow!("Not allowed to import: {}", x)),
};
Box::pin(async move {
if is_dyn_import {
bail!("Will not import dynamic");
}
match &maybe_referrer {
Some(x) if x.as_str() == "file:///embassy.js" => {
bail!("Embassy is not allowed to import")
}
_ => (),
}
module
})
}
}
pub struct JsExecutionEnvironment {
sandboxed: bool,
base_directory: PathBuf,
module_loader: ModsLoader,
package_id: PackageId,
version: Version,
operations: Vec<OpDecl>,
volumes: Arc<Volumes>,
}
impl JsExecutionEnvironment {
pub async fn load_from_package(
data_directory: impl AsRef<std::path::Path>,
package_id: &crate::s9pk::manifest::PackageId,
version: &crate::util::Version,
volumes: Volumes,
) -> Result<Self, (JsError, String)> {
let data_dir = data_directory.as_ref();
let base_directory = data_dir;
let js_code = JsCode({
let file_path = script_dir(data_dir, package_id, version).join("embassy.js");
let mut file = match tokio::fs::File::open(file_path.clone()).await {
Ok(x) => x,
Err(e) => {
tracing::debug!("{:?}", e);
return Err((
JsError::FileSystem,
format!("The file opening '{:?}' created error: {}", file_path, e),
));
}
};
let mut buffer = Default::default();
if let Err(err) = file.read_to_string(&mut buffer).await {
tracing::debug!("{:?}", err);
return Err((
JsError::FileSystem,
format!("The file reading created error: {}", err),
));
};
buffer
});
Ok(Self {
base_directory: base_directory.to_owned(),
module_loader: ModsLoader { code: js_code },
operations: Default::default(),
package_id: package_id.clone(),
version: version.clone(),
volumes: Arc::new(volumes),
sandboxed: false,
})
}
pub fn read_only_effects(mut self) -> Self {
self.sandboxed = true;
self.with_effects()
}
pub fn with_effects(mut self) -> Self {
self.operations = vec![
fns::read_file::decl(),
fns::write_file::decl(),
fns::create_dir::decl(),
fns::remove_dir::decl(),
fns::get_context::decl(),
fns::log_trace::decl(),
fns::log_warn::decl(),
fns::log_error::decl(),
fns::log_debug::decl(),
fns::log_info::decl(),
fns::current_function::decl(),
fns::set_value::decl(),
fns::remove_file::decl(),
fns::is_sandboxed::decl(),
fns::get_input::decl(),
];
self
}
pub async fn run_action<I: Serialize, O: for<'de> Deserialize<'de>>(
self,
procedure_name: ProcedureName,
input: Option<I>,
) -> Result<O, (JsError, String)> {
let input = match serde_json::to_value(input) {
Ok(a) => a,
Err(err) => {
tracing::error!("{}", err);
tracing::debug!("{:?}", err);
return Err((
JsError::BoundryLayerSerDe,
"Couldn't convert input".to_string(),
));
}
};
let safer_handle: crate::util::NonDetachingJoinHandle<_> =
tokio::task::spawn_blocking(move || self.execute(procedure_name, input)).into();
let output = safer_handle
.await
.map_err(|err| (JsError::Tokio, format!("Tokio gave us the error: {}", err)))??;
match serde_json::from_value(output.clone()) {
Ok(x) => Ok(x),
Err(err) => {
tracing::error!("{}", err);
tracing::debug!("{:?}", err);
return Err((
JsError::BoundryLayerSerDe,
format!(
"Couldn't convert output = {:#?} to the correct type",
serde_json::to_string_pretty(&output).unwrap_or_default()
),
));
}
}
}
fn execute(
&self,
procedure_name: ProcedureName,
input: Value,
) -> Result<Value, (JsError, String)> {
let base_directory = self.base_directory.clone();
let answer_state = AnswerState::default();
let ext_answer_state = answer_state.clone();
let js_ctx = JsContext {
datadir: base_directory,
run_function: procedure_name.js_function_name(),
package_id: self.package_id.clone(),
volumes: self.volumes.clone(),
version: self.version.clone(),
sandboxed: self.sandboxed,
input,
};
let ext = Extension::builder()
.ops(self.operations.clone())
.state(move |state| {
state.put(ext_answer_state.clone());
state.put(js_ctx.clone());
Ok(())
})
.build();
let loader = std::rc::Rc::new(self.module_loader.clone());
let mut runtime = JsRuntime::new(RuntimeOptions {
module_loader: Some(loader),
extensions: vec![ext],
..Default::default()
});
let future = async move {
let mod_id = runtime
.load_main_module(&"file:///loadModule.js".parse().unwrap(), None)
.await?;
let evaluated = runtime.mod_evaluate(mod_id);
let res = runtime.run_event_loop(false).await;
evaluated.await??;
res?;
Ok::<_, AnyError>(())
};
tokio::runtime::Handle::current()
.block_on(future)
.map_err(|e| {
tracing::debug!("{:?}", e);
(JsError::Javascript, format!("Execution error: {}", e))
})?;
let answer = answer_state.0.lock().clone();
Ok(answer)
}
}
/// Note: Make sure that we have the assumption that all these methods are callable at any time, and all call restrictions should be in rust
mod fns {
use deno_core::{anyhow::bail, error::AnyError, *};
use serde_json::Value;
use std::{convert::TryFrom, path::PathBuf};
use crate::volume::VolumeId;
use super::{AnswerState, JsContext};
#[op]
async fn read_file(
ctx: JsContext,
volume_id: VolumeId,
path_in: PathBuf,
) -> Result<String, AnyError> {
let volume = match ctx.volumes.get(&volume_id) {
Some(a) => a,
None => {
bail!("There is no {} in volumes", volume_id);
}
};
let volume_path =
volume.path_for(&ctx.datadir, &ctx.package_id, &ctx.version, &volume_id);
//get_path_for in volume.rs
let new_file = volume_path.join(path_in);
if !new_file.starts_with(volume_path) {
bail!("Path has broken away from parent");
}
let answer = tokio::fs::read_to_string(new_file).await?;
Ok(answer)
}
#[op]
async fn write_file(
ctx: JsContext,
volume_id: VolumeId,
path_in: PathBuf,
write: String,
) -> Result<(), AnyError> {
if ctx.sandboxed {
bail!("Cannot write in sandbox mode");
}
let volume = match ctx.volumes.get(&volume_id) {
Some(a) => a,
None => {
bail!("There is no {} in volumes", volume_id);
}
};
if volume.readonly() {
bail!("Volume {} is readonly", volume_id);
}
let volume_path =
volume.path_for(&ctx.datadir, &ctx.package_id, &ctx.version, &volume_id);
let new_file = volume_path.join(path_in);
// With the volume check
if !new_file.starts_with(volume_path) {
bail!("Path has broken away from parent");
}
tokio::fs::write(new_file, write).await?;
Ok(())
}
#[op]
async fn remove_file(
ctx: JsContext,
volume_id: VolumeId,
path_in: PathBuf,
) -> Result<(), AnyError> {
if ctx.sandboxed {
bail!("Cannot write in sandbox mode");
}
let volume = match ctx.volumes.get(&volume_id) {
Some(a) => a,
None => {
bail!("There is no {} in volumes", volume_id);
}
};
if volume.readonly() {
bail!("Volume {} is readonly", volume_id);
}
let volume_path =
volume.path_for(&ctx.datadir, &ctx.package_id, &ctx.version, &volume_id);
let new_file = volume_path.clone().join(path_in);
// With the volume check
if !new_file.starts_with(volume_path) {
bail!("Path has broken away from parent");
}
tokio::fs::remove_file(new_file).await?;
Ok(())
}
#[op]
async fn remove_dir(
ctx: JsContext,
volume_id: VolumeId,
path_in: PathBuf,
) -> Result<(), AnyError> {
if ctx.sandboxed {
bail!("Cannot write in sandbox mode");
}
let volume = match ctx.volumes.get(&volume_id) {
Some(a) => a,
None => {
bail!("There is no {} in volumes", volume_id);
}
};
if volume.readonly() {
bail!("Volume {} is readonly", volume_id);
}
let volume_path =
volume.path_for(&ctx.datadir, &ctx.package_id, &ctx.version, &volume_id);
let new_file = volume_path.clone().join(path_in);
// With the volume check
if !new_file.starts_with(volume_path) {
bail!("Path has broken away from parent");
}
tokio::fs::remove_dir_all(new_file).await?;
Ok(())
}
#[op]
async fn create_dir(
ctx: JsContext,
volume_id: VolumeId,
path_in: PathBuf,
) -> Result<(), AnyError> {
if ctx.sandboxed {
bail!("Cannot write in sandbox mode");
}
let volume = match ctx.volumes.get(&volume_id) {
Some(a) => a,
None => {
bail!("There is no {} in volumes", volume_id);
}
};
if volume.readonly() {
bail!("Volume {} is readonly", volume_id);
}
let volume_path =
volume.path_for(&ctx.datadir, &ctx.package_id, &ctx.version, &volume_id);
let new_file = volume_path.clone().join(path_in);
// With the volume check
if !new_file.starts_with(volume_path) {
bail!("Path has broken away from parent");
}
tokio::fs::create_dir_all(new_file).await?;
Ok(())
}
#[op]
fn current_function(state: &mut OpState) -> Result<String, AnyError> {
let ctx = state.borrow::<JsContext>();
Ok(ctx.run_function.clone())
}
#[op]
fn log_trace(state: &mut OpState, input: String) -> Result<(), AnyError> {
let ctx = state.borrow::<JsContext>();
tracing::trace!(
package_id = tracing::field::display(&ctx.package_id),
run_function = tracing::field::display(&ctx.run_function),
"{}",
input
);
Ok(())
}
#[op]
fn log_warn(state: &mut OpState, input: String) -> Result<(), AnyError> {
let ctx = state.borrow::<JsContext>();
tracing::warn!(
package_id = tracing::field::display(&ctx.package_id),
run_function = tracing::field::display(&ctx.run_function),
"{}",
input
);
Ok(())
}
#[op]
fn log_error(state: &mut OpState, input: String) -> Result<(), AnyError> {
let ctx = state.borrow::<JsContext>();
tracing::error!(
package_id = tracing::field::display(&ctx.package_id),
run_function = tracing::field::display(&ctx.run_function),
"{}",
input
);
Ok(())
}
#[op]
fn log_debug(state: &mut OpState, input: String) -> Result<(), AnyError> {
let ctx = state.borrow::<JsContext>();
tracing::debug!(
package_id = tracing::field::display(&ctx.package_id),
run_function = tracing::field::display(&ctx.run_function),
"{}",
input
);
Ok(())
}
#[op]
fn log_info(state: &mut OpState, input: String) -> Result<(), AnyError> {
let ctx = state.borrow::<JsContext>();
tracing::info!(
package_id = tracing::field::display(&ctx.package_id),
run_function = tracing::field::display(&ctx.run_function),
"{}",
input
);
Ok(())
}
#[op]
fn get_context(state: &mut OpState) -> Result<JsContext, AnyError> {
let ctx = state.borrow::<JsContext>();
Ok(ctx.clone())
}
#[op]
fn get_input(state: &mut OpState) -> Result<Value, AnyError> {
let ctx = state.borrow::<JsContext>();
Ok(ctx.input.clone())
}
#[op]
fn set_value(state: &mut OpState, value: Value) -> Result<(), AnyError> {
let mut answer = state.borrow::<AnswerState>().0.lock();
*answer = value;
Ok(())
}
#[op]
fn is_sandboxed(state: &mut OpState) -> Result<bool, AnyError> {
let ctx = state.borrow::<JsContext>();
Ok(ctx.sandboxed)
}
}
}
#[tokio::test]
async fn js_action_execute() {
let js_action = JsProcedure {};
let path: PathBuf = "test/js_action_execute/".parse().unwrap();
let package_id = "test-package".parse().unwrap();
let package_version: Version = "0.3.0.3".parse().unwrap();
let name = ProcedureName::GetConfig;
let volumes: Volumes = serde_json::from_value(serde_json::json!({
"main": {
"type": "data"
},
"compat": {
"type": "assets"
},
"filebrowser" :{
"package-id": "filebrowser",
"path": "data",
"readonly": true,
"type": "pointer",
"volume-id": "main",
}
}))
.unwrap();
let input: Option<serde_json::Value> = Some(serde_json::json!({"test":123}));
let timeout = None;
let _output: crate::config::action::ConfigRes = js_action
.execute(
&path,
&package_id,
&package_version,
name,
&volumes,
input,
timeout,
)
.await
.unwrap()
.unwrap();
assert_eq!(
&std::fs::read_to_string(
"test/js_action_execute/package-data/volumes/test-package/data/main/test.log"
)
.unwrap(),
"This is a test"
);
std::fs::remove_file(
"test/js_action_execute/package-data/volumes/test-package/data/main/test.log",
)
.unwrap();
}

View File

@@ -0,0 +1,100 @@
//@ts-check
// @ts-ignore
import Deno from "/deno_global.js";
// @ts-ignore
import * as mainModule from "/embassy.js";
/**
* This is using the simplified json pointer spec, using no escapes and arrays
* @param {object} obj
* @param {string} pointer
* @returns
*/
function jsonPointerValue(obj, pointer) {
const paths = pointer.substring(1).split("/");
for (const path of paths) {
if (obj == null) {
return null;
}
obj = (obj || {})[path];
}
return obj;
}
// @ts-ignore
const context = Deno.core.opSync("get_context");
// @ts-ignore
const writeFile = ({ path, volumeId, toWrite }) =>
Deno.core.opAsync("write_file", context, volumeId, path, toWrite);
// @ts-ignore
const readFile = ({ volumeId, path }) =>
Deno.core.opAsync("read_file", context, volumeId, path);
// @ts-ignore
const removeFile = ({ volumeId, path }) =>
Deno.core.opAsync("remove_file", context, volumeId, path);
// @ts-ignore
const isSandboxed = () => Deno.core.opSync("is_sandboxed");
// @ts-ignore
const writeJsonFile = ({ volumeId, path, toWrite }) =>
Deno.core.opAsync(
"write_file",
context,
volumeId,
path,
JSON.stringify(toWrite)
);
// @ts-ignore
const readJsonFile = ({ volumeId, path }) =>
JSON.parse(Deno.core.opAsync("read_file", context, volumeId, path));
// @ts-ignore
const createDir = ({ volumeId, path }) =>
Deno.core.opAsync("create_dir", context, volumeId, path);
// @ts-ignore
const removeDir = ({ volumeId, path }) =>
Deno.core.opAsync("remove_dir", context, volumeId, path);
// @ts-ignore
const trace = (x) => Deno.core.opSync("log_trace", x);
// @ts-ignore
const warn = (x) => Deno.core.opSync("log_warn", x);
// @ts-ignore
const error = (x) => Deno.core.opSync("log_error", x);
// @ts-ignore
const debug = (x) => Deno.core.opSync("log_debug", x);
// @ts-ignore
const info = (x) => Deno.core.opSync("log_info", x);
// @ts-ignore
const currentFunction = Deno.core.opSync("current_function");
//@ts-ignore
const input = Deno.core.opSync("get_input");
// @ts-ignore
const setState = (x) => Deno.core.opSync("set_value", x);
const effects = {
writeFile,
readFile,
writeJsonFile,
readJsonFile,
error,
warn,
debug,
trace,
info,
isSandboxed,
removeFile,
createDir,
removeDir,
};
const runFunction = jsonPointerValue(
mainModule,
currentFunction
);
(async () => {
if (typeof runFunction !== "function") {
error(`Expecting ${{ currentFunction }} to be a function`);
throw new Error(`Expecting ${{ currentFunction }} to be a function`);
}
const answer = await runFunction(effects, input);
setState(answer);
})();

View File

@@ -6,14 +6,60 @@ use serde::{Deserialize, Serialize};
use tracing::instrument;
use self::docker::DockerProcedure;
use self::js_scripts::JsProcedure;
use crate::action::ActionId;
use crate::context::RpcContext;
use crate::id::ImageId;
use crate::s9pk::manifest::PackageId;
use crate::status::health_check::HealthCheckId;
use crate::util::Version;
use crate::volume::Volumes;
use crate::Error;
pub mod docker;
pub mod js_scripts;
#[derive(Debug, Clone)]
pub enum ProcedureName {
Main, // Usually just run container
CreateBackup,
RestoreBackup,
GetConfig,
SetConfig,
Migration,
Properties,
Health(HealthCheckId),
Action(ActionId),
}
impl ProcedureName {
fn docker_name(&self) -> Option<String> {
match self {
ProcedureName::Main => None,
ProcedureName::CreateBackup => Some("CreateBackup".to_string()),
ProcedureName::RestoreBackup => Some("RestoreBackup".to_string()),
ProcedureName::GetConfig => Some("GetConfig".to_string()),
ProcedureName::SetConfig => Some("SetConfig".to_string()),
ProcedureName::Migration => Some("Migration".to_string()),
ProcedureName::Properties => Some(format!("Properties-{}", rand::random::<u64>())),
ProcedureName::Health(id) => Some(format!("{}Health", id)),
ProcedureName::Action(id) => Some(format!("{}Action", id)),
}
}
fn js_function_name(&self) -> String {
match self {
ProcedureName::Main => todo!(),
ProcedureName::CreateBackup => "/createBackup".to_string(),
ProcedureName::RestoreBackup => "/restoreBackup".to_string(),
ProcedureName::GetConfig => "/getConfig".to_string(),
ProcedureName::SetConfig => "/setConfig".to_string(),
ProcedureName::Migration => "/migration".to_string(),
ProcedureName::Properties => "/properties".to_string(),
ProcedureName::Health(id) => format!("/health/{}", id),
ProcedureName::Action(id) => format!("/action/{}", id),
}
}
}
// TODO: create RPC endpoint that looks up the appropriate action and calls `execute`
@@ -22,6 +68,7 @@ pub mod docker;
#[serde(tag = "type")]
pub enum PackageProcedure {
Docker(DockerProcedure),
Script(JsProcedure),
}
impl PackageProcedure {
#[instrument]
@@ -33,6 +80,8 @@ impl PackageProcedure {
) -> Result<(), color_eyre::eyre::Report> {
match self {
PackageProcedure::Docker(action) => action.validate(volumes, image_ids, expected_io),
PackageProcedure::Script(action) => action.validate(volumes),
}
}
@@ -42,15 +91,15 @@ impl PackageProcedure {
ctx: &RpcContext,
pkg_id: &PackageId,
pkg_version: &Version,
name: Option<&str>,
name: ProcedureName,
volumes: &Volumes,
input: Option<I>,
allow_inject: bool,
timeout: Option<Duration>,
) -> Result<Result<O, (i32, String)>, Error> {
match self {
PackageProcedure::Docker(action) => {
action
PackageProcedure::Docker(procedure) => {
procedure
.execute(
ctx,
pkg_id,
@@ -63,6 +112,19 @@ impl PackageProcedure {
)
.await
}
PackageProcedure::Script(procedure) => {
procedure
.execute(
&ctx.datadir,
pkg_id,
pkg_version,
name,
volumes,
input,
timeout,
)
.await
}
}
}
#[instrument(skip(ctx, input))]
@@ -76,8 +138,13 @@ impl PackageProcedure {
timeout: Option<Duration>,
) -> Result<Result<O, (i32, String)>, Error> {
match self {
PackageProcedure::Docker(action) => {
action
PackageProcedure::Docker(procedure) => {
procedure
.sandboxed(ctx, pkg_id, pkg_version, volumes, input, timeout)
.await
}
PackageProcedure::Script(procedure) => {
procedure
.sandboxed(ctx, pkg_id, pkg_version, volumes, input, timeout)
.await
}

View File

@@ -5,6 +5,7 @@ use serde_json::Value;
use tracing::instrument;
use crate::context::RpcContext;
use crate::procedure::ProcedureName;
use crate::s9pk::manifest::{Manifest, PackageId};
use crate::{Error, ErrorKind};
@@ -35,7 +36,7 @@ pub async fn fetch_properties(ctx: RpcContext, id: PackageId) -> Result<Value, E
&ctx,
&manifest.id,
&manifest.version,
Some(&format!("Properties-{}", rand::random::<u64>())),
ProcedureName::Properties,
&manifest.volumes,
None,
false,

View File

@@ -7,7 +7,7 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::id::{Id, ImageId};
use crate::procedure::{NoOutput, PackageProcedure};
use crate::procedure::{NoOutput, PackageProcedure, ProcedureName};
use crate::s9pk::manifest::PackageId;
use crate::util::serde::Duration;
use crate::util::Version;
@@ -109,7 +109,7 @@ impl HealthCheck {
ctx,
pkg_id,
pkg_version,
Some(&format!("{}Health", id)),
ProcedureName::Health(id.clone()),
volumes,
Some(Utc::now().signed_duration_since(started).num_milliseconds()),
true,

View File

@@ -9,7 +9,9 @@ use tracing::instrument;
use crate::context::RpcContext;
use crate::id::{Id, IdUnchecked};
use crate::install::PKG_SCRIPT_DIR;
use crate::net::interface::{InterfaceId, Interfaces};
use crate::net::NetController;
use crate::s9pk::manifest::PackageId;
use crate::util::Version;
use crate::{Error, ResultExt};
@@ -93,20 +95,22 @@ impl Volumes {
version: &Version,
) -> Result<(), Error> {
for (volume_id, volume) in &self.0 {
volume.install(ctx, pkg_id, version, volume_id).await?; // TODO: concurrent?
volume
.install(&ctx.datadir, pkg_id, version, volume_id)
.await?; // TODO: concurrent?
}
Ok(())
}
pub fn get_path_for(
&self,
ctx: &RpcContext,
path: &PathBuf,
pkg_id: &PackageId,
version: &Version,
volume_id: &VolumeId,
) -> Option<PathBuf> {
self.0
.get(volume_id)
.map(|volume| volume.path_for(ctx, pkg_id, version, volume_id))
.map(|volume| volume.path_for(path, pkg_id, version, volume_id))
}
pub fn to_readonly(&self) -> Self {
Volumes(
@@ -165,8 +169,7 @@ pub fn asset_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, version: &Versi
pub fn script_dir<P: AsRef<Path>>(datadir: P, pkg_id: &PackageId, version: &Version) -> PathBuf {
datadir
.as_ref()
.join("package-data")
.join("scripts")
.join(&*PKG_SCRIPT_DIR)
.join(pkg_id)
.join(version.as_str())
}
@@ -214,14 +217,14 @@ impl Volume {
}
pub async fn install(
&self,
ctx: &RpcContext,
path: &PathBuf,
pkg_id: &PackageId,
version: &Version,
volume_id: &VolumeId,
) -> Result<(), Error> {
match self {
Volume::Data { .. } => {
tokio::fs::create_dir_all(self.path_for(ctx, pkg_id, version, volume_id)).await?;
tokio::fs::create_dir_all(self.path_for(path, pkg_id, version, volume_id)).await?;
}
_ => (),
}
@@ -229,25 +232,25 @@ impl Volume {
}
pub fn path_for(
&self,
ctx: &RpcContext,
data_dir_path: impl AsRef<Path>,
pkg_id: &PackageId,
version: &Version,
volume_id: &VolumeId,
) -> PathBuf {
match self {
Volume::Data { .. } => data_dir(&ctx.datadir, pkg_id, volume_id),
Volume::Assets {} => asset_dir(&ctx.datadir, pkg_id, version).join(volume_id),
Volume::Data { .. } => data_dir(&data_dir_path, pkg_id, volume_id),
Volume::Assets {} => asset_dir(&data_dir_path, pkg_id, version).join(volume_id),
Volume::Pointer {
package_id,
volume_id,
path,
..
} => data_dir(&ctx.datadir, package_id, volume_id).join(if path.is_absolute() {
} => data_dir(&data_dir_path, package_id, volume_id).join(if path.is_absolute() {
path.strip_prefix("/").unwrap()
} else {
path.as_ref()
}),
Volume::Certificate { interface_id: _ } => ctx.net_controller.ssl_directory_for(pkg_id),
Volume::Certificate { interface_id: _ } => NetController::ssl_directory_for(pkg_id),
Volume::Backup { .. } => backup_dir(pkg_id),
}
}

View File

@@ -0,0 +1,7 @@
import {Effects, Config, ConfigRes, SetResult, Properties} from './types';
export function properties(effects: Effects): Properties | Promise<Properties>;
export function getConfig(effects: Effects): ConfigRes | Promise<ConfigRes>;
export function setConfig(effects: Effects, input: Config): SetResult | Promise<SetResult>;

View File

@@ -0,0 +1,593 @@
// @ts-check
export function properties() {
return "Anything here"
}
/**
*
* @param {import('./types').Effects} effects
* @returns {Promise<import('./types').ConfigRes>}
*/
export async function getConfig(effects) {
await effects.writeFile({
path: "./test.log",
toWrite: "This is a test",
volumeId: 'main',
});
await effects.createDir({
path: "./testing",
volumeId: 'main',});
await effects.writeFile({
path: "./testing/test2.log",
toWrite: "This is a test",
volumeId: 'main',
});
await effects.removeFile({
path: "./testing/test2.log",
volumeId: 'main',
})
await effects.removeDir({
path: "./testing",
volumeId: 'main',});
effects.debug(`Read results are ${await effects.readFile({
path: "./test.log",
volumeId: 'main',
})}`)
effects.trace('trace')
effects.debug('debug')
effects.warn('warn')
effects.error('error')
effects.info('info')
return {
spec: {
"control-tor-address": {
"name": "Control Tor Address",
"description": "The Tor address for the control interface.",
"type": "pointer",
"subtype": "package",
"package-id": "lnd",
"target": "tor-address",
"interface": "control"
},
"peer-tor-address": {
"name": "Peer Tor Address",
"description": "The Tor address for the peer interface.",
"type": "pointer",
"subtype": "package",
"package-id": "lnd",
"target": "tor-address",
"interface": "peer"
},
"watchtower-tor-address": {
"name": "Watchtower Tor Address",
"description": "The Tor address for the watchtower interface.",
"type": "pointer",
"subtype": "package",
"package-id": "lnd",
"target": "tor-address",
"interface": "watchtower"
},
"alias": {
"type": "string",
"name": "Alias",
"description": "The public, human-readable name of your Lightning node",
"nullable": true,
"pattern": ".{1,32}",
"pattern-description": "Must be at least 1 character and no more than 32 characters"
},
"color": {
"type": "string",
"name": "Color",
"description": "The public color dot of your Lightning node",
"nullable": false,
"pattern": "[0-9a-fA-F]{6}",
"pattern-description": "Must be a valid 6 digit hexadecimal RGB value. The first two digits are red, middle two are green, and final two are\nblue\n",
"default": {
"charset": "a-f,0-9",
"len": 6
}
},
"accept-keysend": {
"type": "boolean",
"name": "Accept Keysend",
"description": "Allow others to send payments directly to your public key through keysend instead of having to get a new invoice\n",
"default": false
},
"accept-amp": {
"type": "boolean",
"name": "Accept Spontaneous AMPs",
"description": "If enabled, spontaneous payments through AMP will be accepted. Payments to AMP\ninvoices will be accepted regardless of this setting.\n",
"default": false
},
"reject-htlc": {
"type": "boolean",
"name": "Reject Routing Requests",
"description": "If true, LND will not forward any HTLCs that are meant as onward payments. This option will still allow LND to send\nHTLCs and receive HTLCs but lnd won't be used as a hop.\n",
"default": false
},
"min-chan-size": {
"type": "number",
"name": "Minimum Channel Size",
"description": "The smallest channel size that we should accept. Incoming channels smaller than this will be rejected.\n",
"nullable": true,
"range": "[1,16777215]",
"integral": true,
"units": "satoshis"
},
"max-chan-size": {
"type": "number",
"name": "Maximum Channel Size",
"description": "The largest channel size that we should accept. Incoming channels larger than this will be rejected.\nFor non-Wumbo channels this limit remains 16777215 satoshis by default as specified in BOLT-0002. For wumbo\nchannels this limit is 1,000,000,000 satoshis (10 BTC). Set this config option explicitly to restrict your maximum\nchannel size to better align with your risk tolerance. Don't forget to enable Wumbo channels under 'Advanced,' if desired.\n",
"nullable": true,
"range": "[1,1000000000]",
"integral": true,
"units": "satoshis"
},
"tor": {
"type": "object",
"name": "Tor Config",
"spec": {
"use-tor-only": {
"type": "boolean",
"name": "Use Tor for all traffic",
"description": "Use the tor proxy even for connections that are reachable on clearnet. This will hide your node's public IP address, but will slow down your node's performance",
"default": false
},
"stream-isolation": {
"type": "boolean",
"name": "Stream Isolation",
"description": "Enable Tor stream isolation by randomizing user credentials for each connection. With this mode active, each connection will use a new circuit. This means that multiple applications (other than lnd) using Tor won't be mixed in with lnd's traffic.\nThis option may not be used when 'Use Tor for all traffic' is disabled, since direct connections compromise source IP privacy by default.",
"default": false
}
}
},
"bitcoind": {
"type": "union",
"name": "Bitcoin Core",
"description": "The Bitcoin Core node to connect to:\n - internal: The Bitcoin Core and Proxy services installed to your Embassy\n - external: An unpruned Bitcoin Core node running on a different device\n",
"tag": {
"id": "type",
"name": "Type",
"variant-names": {
"internal": "Internal (Bitcoin Core)",
"internal-proxy": "Internal (Bitcoin Proxy)",
"external": "External"
},
"description": "The Bitcoin Core node to connect to:\n - internal: The Bitcoin Core and Proxy services installed to your Embassy\n - external: An unpruned Bitcoin Core node running on a different device\n"
},
"default": "internal",
"variants": {
"internal": {
"user": {
"type": "pointer",
"name": "RPC Username",
"description": "The username for Bitcoin Core's RPC interface",
"subtype": "package",
"package-id": "bitcoind",
"target": "config",
"multi": false,
"selector": "$.rpc.username"
},
"password": {
"type": "pointer",
"name": "RPC Password",
"description": "The password for Bitcoin Core's RPC interface",
"subtype": "package",
"package-id": "bitcoind",
"target": "config",
"multi": false,
"selector": "$.rpc.password"
}
},
"internal-proxy": {
"user": {
"type": "pointer",
"name": "RPC Username",
"description": "The username for the RPC user allocated to lnd",
"subtype": "package",
"package-id": "btc-rpc-proxy",
"target": "config",
"multi": false,
"selector": "$.users[?(@.name == \"lnd\")].name"
},
"password": {
"type": "pointer",
"name": "RPC Password",
"description": "The password for the RPC user allocated to lnd",
"subtype": "package",
"package-id": "btc-rpc-proxy",
"target": "config",
"multi": false,
"selector": "$.users[?(@.name == \"lnd\")].password"
}
},
"external": {
"connection-settings": {
"type": "union",
"name": "Connection Settings",
"description": "Information to connect to an external unpruned Bitcoin Core node",
"tag": {
"id": "type",
"name": "Type",
"description": "- Manual: Raw information for finding a Bitcoin Core node\n- Quick Connect: A Quick Connect URL for a Bitcoin Core node\n",
"variant-names": {
"manual": "Manual",
"quick-connect": "Quick Connect"
}
},
"default": "quick-connect",
"variants": {
"manual": {
"host": {
"type": "string",
"name": "Public Address",
"description": "The public address of your Bitcoin Core server",
"nullable": false
},
"rpc-user": {
"type": "string",
"name": "RPC Username",
"description": "The username for the RPC user on your Bitcoin Core RPC server",
"nullable": false
},
"rpc-password": {
"type": "string",
"name": "RPC Password",
"description": "The password for the RPC user on your Bitcoin Core RPC server",
"nullable": false
},
"rpc-port": {
"type": "number",
"name": "RPC Port",
"description": "The port that your Bitcoin Core RPC server is bound to",
"nullable": false,
"range": "[0,65535]",
"integral": true,
"default": 8332
},
"zmq-block-port": {
"type": "number",
"name": "ZeroMQ Block Port",
"description": "The port that your Bitcoin Core ZeroMQ server is bound to for raw blocks",
"nullable": false,
"range": "[0,65535]",
"integral": true,
"default": 28332
},
"zmq-tx-port": {
"type": "number",
"name": "ZeroMQ Transaction Port",
"description": "The port that your Bitcoin Core ZeroMQ server is bound to for raw transactions",
"nullable": false,
"range": "[0,65535]",
"integral": true,
"default": 28333
}
},
"quick-connect": {
"quick-connect-url": {
"type": "string",
"name": "Quick Connect URL",
"description": "The Quick Connect URL for your Bitcoin Core RPC server\nNOTE: LND will not accept a .onion url for this option\n",
"nullable": false,
"pattern": "btcstandup://[^:]*:[^@]*@[a-zA-Z0-9.-]+:[0-9]+(/(\\?(label=.+)?)?)?",
"pattern-description": "Must be a valid Quick Connect URL. For help, check out https://github.com/BlockchainCommons/Gordian/blob/master/Docs/Quick-Connect-API.md"
},
"zmq-block-port": {
"type": "number",
"name": "ZeroMQ Block Port",
"description": "The port that your Bitcoin Core ZeroMQ server is bound to for raw blocks",
"nullable": false,
"range": "[0,65535]",
"integral": true,
"default": 28332
},
"zmq-tx-port": {
"type": "number",
"name": "ZeroMQ Transaction Port",
"description": "The port that your Bitcoin Core ZeroMQ server is bound to for raw transactions",
"nullable": false,
"range": "[0,65535]",
"integral": true,
"default": 28333
}
}
}
}
}
}
},
"autopilot": {
"type": "object",
"name": "Autopilot",
"description": "Autopilot Settings",
"spec": {
"enabled": {
"type": "boolean",
"name": "Enabled",
"description": "If the autopilot agent should be active or not. The autopilot agent will\nattempt to AUTOMATICALLY OPEN CHANNELS to put your node in an advantageous\nposition within the network graph. DO NOT ENABLE THIS IF YOU WANT TO MANAGE \nCHANNELS MANUALLY OR DO NOT UNDERSTAND IT.\n",
"default": false
},
"private": {
"type": "boolean",
"name": "Private",
"description": "Whether the channels created by the autopilot agent should be private or not.\nPrivate channels won't be announced to the network.\n",
"default": false
},
"maxchannels": {
"type": "number",
"name": "Maximum Channels",
"description": "The maximum number of channels that should be created.",
"nullable": false,
"range": "[1,*)",
"integral": true,
"default": 5
},
"allocation": {
"type": "number",
"name": "Allocation",
"description": "The fraction of total funds that should be committed to automatic channel\nestablishment. For example 60% means that 60% of the total funds available\nwithin the wallet should be used to automatically establish channels. The total\namount of attempted channels will still respect the \"Maximum Channels\" parameter.\n",
"nullable": false,
"range": "[0,100]",
"integral": false,
"default": 60,
"units": "%"
},
"min-channel-size": {
"type": "number",
"name": "Minimum Channel Size",
"description": "The smallest channel that the autopilot agent should create.",
"nullable": false,
"range": "[0,*)",
"integral": true,
"default": 20000,
"units": "satoshis"
},
"max-channel-size": {
"type": "number",
"name": "Maximum Channel Size",
"description": "The largest channel that the autopilot agent should create.",
"nullable": false,
"range": "[0,*)",
"integral": true,
"default": 16777215,
"units": "satoshis"
},
"advanced": {
"type": "object",
"name": "Advanced",
"description": "Advanced Options",
"spec": {
"min-confirmations": {
"type": "number",
"name": "Minimum Confirmations",
"description": "The minimum number of confirmations each of your inputs in funding transactions\ncreated by the autopilot agent must have.\n",
"nullable": false,
"range": "[0,*)",
"integral": true,
"default": 1,
"units": "blocks"
},
"confirmation-target": {
"type": "number",
"name": "Confirmation Target",
"description": "The confirmation target (in blocks) for channels opened by autopilot.",
"nullable": false,
"range": "[0,*)",
"integral": true,
"default": 1,
"units": "blocks"
}
}
}
}
},
"advanced": {
"type": "object",
"name": "Advanced",
"description": "Advanced Options",
"spec": {
"debug-level": {
"type": "enum",
"name": "Log Verbosity",
"values": [
"trace",
"debug",
"info",
"warn",
"error",
"critical"
],
"description": "Sets the level of log filtration. Trace is the most verbose, Critical is the least.\n",
"default": "info",
"value-names": {}
},
"db-bolt-no-freelist-sync": {
"type": "boolean",
"name": "Disallow Bolt DB Freelist Sync",
"description": "If true, prevents the database from syncing its freelist to disk.\n",
"default": false
},
"db-bolt-auto-compact": {
"type": "boolean",
"name": "Compact Database on Startup",
"description": "Performs database compaction on startup. This is necessary to keep disk usage down over time at the cost of\nhaving longer startup times.\n",
"default": true
},
"db-bolt-auto-compact-min-age": {
"type": "number",
"name": "Minimum Autocompaction Age for Bolt DB",
"description": "How long ago (in hours) the last compaction of a database file must be for it to be considered for auto\ncompaction again. Can be set to 0 to compact on every startup.\n",
"nullable": false,
"range": "[0, *)",
"integral": true,
"default": 168,
"units": "hours"
},
"db-bolt-db-timeout": {
"type": "number",
"name": "Bolt DB Timeout",
"description": "How long should LND try to open the database before giving up?",
"nullable": false,
"range": "[1, 86400]",
"integral": true,
"default": 60,
"units": "seconds"
},
"recovery-window": {
"type": "number",
"name": "Recovery Window",
"description": "Number of blocks in the past that LND should scan for unknown transactions",
"nullable": true,
"range": "[1,*)",
"integral": true,
"units": "blocks"
},
"payments-expiration-grace-period": {
"type": "number",
"name": "Payments Expiration Grace Period",
"description": "A period to wait before for closing channels with outgoing htlcs that have timed out and are a result of this\nnodes instead payment. In addition to our current block based deadline, is specified this grace period will\nalso be taken into account.\n",
"nullable": false,
"range": "[1,*)",
"integral": true,
"default": 30,
"units": "seconds"
},
"default-remote-max-htlcs": {
"type": "number",
"name": "Maximum Remote HTLCs",
"description": "The default max_htlc applied when opening or accepting channels. This value limits the number of concurrent\nHTLCs that the remote party can add to the commitment. The maximum possible value is 483.\n",
"nullable": false,
"range": "[1,483]",
"integral": true,
"default": 483,
"units": "htlcs"
},
"max-channel-fee-allocation": {
"type": "number",
"name": "Maximum Channel Fee Allocation",
"description": "The maximum percentage of total funds that can be allocated to a channel's commitment fee. This only applies for\nthe initiator of the channel.\n",
"nullable": false,
"range": "[0.1, 1]",
"integral": false,
"default": 0.5
},
"max-commit-fee-rate-anchors": {
"type": "number",
"name": "Maximum Commitment Fee for Anchor Channels",
"description": "The maximum fee rate in sat/vbyte that will be used for commitments of channels of the anchors type. Must be\nlarge enough to ensure transaction propagation.\n",
"nullable": false,
"range": "[1,*)",
"integral": true,
"default": 10
},
"protocol-wumbo-channels": {
"type": "boolean",
"name": "Enable Wumbo Channels",
"description": "If set, then lnd will create and accept requests for channels larger than 0.16 BTC\n",
"default": false
},
"protocol-no-anchors": {
"type": "boolean",
"name": "Disable Anchor Channels",
"description": "Set to disable support for anchor commitments. Anchor channels allow you to determine your fees at close time by\nusing a Child Pays For Parent transaction.\n",
"default": false
},
"protocol-disable-script-enforced-lease": {
"type": "boolean",
"name": "Disable Script Enforced Channel Leases",
"description": "Set to disable support for script enforced lease channel commitments. If not set, lnd will accept these channels by default if the remote channel party proposes them. Note that lnd will require 1 UTXO to be reserved for this channel type if it is enabled.\nNote: This may cause you to be unable to close a channel and your wallets may not understand why",
"default": false
},
"gc-canceled-invoices-on-startup": {
"type": "boolean",
"name": "Cleanup Canceled Invoices on Startup",
"description": "If true, LND will attempt to garbage collect canceled invoices upon start.\n",
"default": false
},
"bitcoin": {
"type": "object",
"name": "Bitcoin Channel Configuration",
"description": "Configuration options for lightning network channel management operating over the Bitcoin network",
"spec": {
"default-channel-confirmations": {
"type": "number",
"name": "Default Channel Confirmations",
"description": "The default number of confirmations a channel must have before it's considered\nopen. LND will require any incoming channel requests to wait this many\nconfirmations before it considers the channel active.\n",
"nullable": false,
"range": "[1,6]",
"integral": true,
"default": 3,
"units": "blocks"
},
"min-htlc": {
"type": "number",
"name": "Minimum Incoming HTLC Size",
"description": "The smallest HTLC LND will to accept on your channels, in millisatoshis.\n",
"nullable": false,
"range": "[1,*)",
"integral": true,
"default": 1,
"units": "millisatoshis"
},
"min-htlc-out": {
"type": "number",
"name": "Minimum Outgoing HTLC Size",
"description": "The smallest HTLC LND will send out on your channels, in millisatoshis.\n",
"nullable": false,
"range": "[1,*)",
"integral": true,
"default": 1000,
"units": "millisatoshis"
},
"base-fee": {
"type": "number",
"name": "Routing Base Fee",
"description": "The base fee in millisatoshi you will charge for forwarding payments on your\nchannels.\n",
"nullable": false,
"range": "[0,*)",
"integral": true,
"default": 1000,
"units": "millisatoshi"
},
"fee-rate": {
"type": "number",
"name": "Routing Fee Rate",
"description": "The fee rate used when forwarding payments on your channels. The total fee\ncharged is the Base Fee + (amount * Fee Rate / 1000000), where amount is the\nforwarded amount. Measured in sats per million\n",
"nullable": false,
"range": "[1,1000000)",
"integral": true,
"default": 1,
"units": "sats per million"
},
"time-lock-delta": {
"type": "number",
"name": "Time Lock Delta",
"description": "The CLTV delta we will subtract from a forwarded HTLC's timelock value.",
"nullable": false,
"range": "[6, 144]",
"integral": true,
"default": 40,
"units": "blocks"
}
}
}
}
}
}
}
}
/**
* @param {import ("./types").Effects} effects
* @param {import("./types").Config} input
* @returns {Promise<import("./types").SetResult>}
*/
export async function setConfig(effects, input) {
return {
"depends-on": {}
}
}

View File

@@ -0,0 +1,191 @@
export type Effects = {
writeFile(input: {path: string, volumeId: string, toWrite: string}): Promise<void>,
readFile(input: {volumeId: string,path: string}): Promise<string>,
createDir(input: {volumeId: string,path: string}): Promise<string>,
removeDir(input: {volumeId: string,path: string}): Promise<string>,
removeFile(input: {volumeId: string,path: string}): Promise<void>,
writeJsonFile(input: {volumeId: string,path: string, toWrite: object}): void,
readJsonFile(input:{volumeId: string,path: string}): object,
trace(whatToPrin: string),
warn(whatToPrin: string),
error(whatToPrin: string),
debug(whatToPrin: string),
info(whatToPrin: string),
is_sandboxed(): boolean,
}
export type ActionResult = {
version: "0",
message: string,
value?: string,
copyable: boolean,
qr: boolean,
}
export type ConfigRes = {
config?: Config,
spec: ConfigSpec,
}
export type Config = {
[value: string]: any
}
export type ConfigSpec = {
[value: string]: ValueSpecAny
}
export type WithDefault<T, Default> = T & {
default?: Default
}
export type WithDescription<T> = T & {
description?: String,
name: string,
warning?: string,
}
export type ListSpec<T> = {
spec: T,
range: string
}
export type Tag<T extends string, V> = V & {
type: T
}
export type Subtype<T extends string, V> = V & {
subtype: T
}
export type Target<T extends string, V> = V & {
"target": T
}
export type UniqueBy =
|{
any: UniqueBy[],
}
| {
all: UniqueBy[]
}
| string
| null
export type WithNullable<T> = T & {
nullable: boolean
}
export type DefaultString = String | {
charset?: string,
len: number
}
export type ValueSpecString= ({} | {
pattern: string,
'pattern-description': string
}) & {
copyable?: boolean,
masked?: boolean,
placeholder?: string
}
export type ValueSpecNumber = {
range?: string,
integral?: boolean,
units?: string,
placeholder?: number,
}
export type ValueSpecBoolean = {}
export type ValueSpecAny =
| Tag<'boolean', WithDescription<WithDefault<ValueSpecBoolean, boolean>>>
| Tag<'string', WithDescription<WithDefault<WithNullable<ValueSpecString>, DefaultString>>>
| Tag<'number', WithDescription<WithDefault<WithNullable<ValueSpecNumber>, number>>>
| Tag<'enum', WithDescription<WithDefault<{
values: string[],
"value-names": {
[key: string]: string
}
}, string>>>
| Tag<'list', ValueSpecList>
| Tag<'object', WithDescription<WithDefault<ValueSpecObject, Config>>>
| Tag<'union', WithDescription<WithDefault<ValueSpecUnion, string>>>
| Tag<'pointer', WithDescription<
| Subtype<'package',
| Target<'tor-key', {
'package-id': string
interface: string
}>
| Target<'tor-address', {
'package-id': string,
interface: string
} >
| Target<'lan-address',{
'package-id': string,
interface: string
} >
| Target<'config', {
'package-id': string,
selector: string,
multi: boolean
}>
>
| Subtype<'system', {}>
>>
export type ValueSpecUnion = {
tag: {
id: string,
name: string,
description?: string,
"variant-names": {
[key: string]: string,
}
},
variants: {
[key: string]: ConfigSpec
},
"display-as"?: string,
"unique-by"?: UniqueBy
}
export type ValueSpecObject = {
spec : ConfigSpec,
'display-as'?: string,
"unique-by"?: UniqueBy
}
export type ValueSpecList =
| Subtype<'boolean', WithDescription<WithDefault<ListSpec<ValueSpecBoolean>, boolean>>>
| Subtype<'string', WithDescription<WithDefault<ListSpec<ValueSpecString>, string>>>
| Subtype<'number', WithDescription<WithDefault<ListSpec<ValueSpecNumber>, number>>>
| Subtype<'enum', WithDescription<WithDefault<{
values: string[],
"value-names": {
[key: string]: string
}
}, string>>>
export type SetResult = {
signal?: string,
'depends-on': {
[packageId: string]: string[]
}
}
export type PackagePropertiesV2 = {
[name: string]: PackagePropertyObject | PackagePropertyString
}
export type PackagePropertyString = {
type: 'string',
description?: string,
value: string,
copyable?: boolean,
qr?: boolean,
masked?: boolean,
}
export type PackagePropertyObject = {
value: PackagePropertiesV2;
type: "object";
description: string;
}
export type Properties = {
version: 2,
data: PackagePropertiesV2
}